Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

+863 -351
+18 -18
MAINTAINERS
··· 1091 1091 F: drivers/*/*aspeed* 1092 1092 1093 1093 ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT 1094 - M: Nicolas Ferre <nicolas.ferre@atmel.com> 1094 + M: Nicolas Ferre <nicolas.ferre@microchip.com> 1095 1095 M: Alexandre Belloni <alexandre.belloni@free-electrons.com> 1096 1096 M: Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com> 1097 1097 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) ··· 1773 1773 F: include/linux/soc/renesas/ 1774 1774 1775 1775 ARM/SOCFPGA ARCHITECTURE 1776 - M: Dinh Nguyen <dinguyen@opensource.altera.com> 1776 + M: Dinh Nguyen <dinguyen@kernel.org> 1777 1777 S: Maintained 1778 1778 F: arch/arm/mach-socfpga/ 1779 1779 F: arch/arm/boot/dts/socfpga* ··· 1783 1783 T: git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git 1784 1784 1785 1785 ARM/SOCFPGA CLOCK FRAMEWORK SUPPORT 1786 - M: Dinh Nguyen <dinguyen@opensource.altera.com> 1786 + M: Dinh Nguyen <dinguyen@kernel.org> 1787 1787 S: Maintained 1788 1788 F: drivers/clk/socfpga/ 1789 1789 ··· 2175 2175 F: include/uapi/linux/atm* 2176 2176 2177 2177 ATMEL AT91 / AT32 MCI DRIVER 2178 - M: Ludovic Desroches <ludovic.desroches@atmel.com> 2178 + M: Ludovic Desroches <ludovic.desroches@microchip.com> 2179 2179 S: Maintained 2180 2180 F: drivers/mmc/host/atmel-mci.c 2181 2181 2182 2182 ATMEL AT91 SAMA5D2-Compatible Shutdown Controller 2183 - M: Nicolas Ferre <nicolas.ferre@atmel.com> 2183 + M: Nicolas Ferre <nicolas.ferre@microchip.com> 2184 2184 S: Supported 2185 2185 F: drivers/power/reset/at91-sama5d2_shdwc.c 2186 2186 2187 2187 ATMEL SAMA5D2 ADC DRIVER 2188 - M: Ludovic Desroches <ludovic.desroches@atmel.com> 2188 + M: Ludovic Desroches <ludovic.desroches@microchip.com> 2189 2189 L: linux-iio@vger.kernel.org 2190 2190 S: Supported 2191 2191 F: drivers/iio/adc/at91-sama5d2_adc.c 2192 2192 2193 2193 ATMEL Audio ALSA driver 2194 - M: Nicolas Ferre <nicolas.ferre@atmel.com> 2194 + M: Nicolas Ferre <nicolas.ferre@microchip.com> 2195 2195 L: alsa-devel@alsa-project.org (moderated for non-subscribers) 2196 2196 S: Supported 2197 2197 F: sound/soc/atmel 2198 2198 2199 2199 ATMEL XDMA DRIVER 2200 - M: Ludovic Desroches <ludovic.desroches@atmel.com> 2200 + M: Ludovic Desroches <ludovic.desroches@microchip.com> 2201 2201 L: linux-arm-kernel@lists.infradead.org 2202 2202 L: dmaengine@vger.kernel.org 2203 2203 S: Supported 2204 2204 F: drivers/dma/at_xdmac.c 2205 2205 2206 2206 ATMEL I2C DRIVER 2207 - M: Ludovic Desroches <ludovic.desroches@atmel.com> 2207 + M: Ludovic Desroches <ludovic.desroches@microchip.com> 2208 2208 L: linux-i2c@vger.kernel.org 2209 2209 S: Supported 2210 2210 F: drivers/i2c/busses/i2c-at91.c 2211 2211 2212 2212 ATMEL ISI DRIVER 2213 - M: Ludovic Desroches <ludovic.desroches@atmel.com> 2213 + M: Ludovic Desroches <ludovic.desroches@microchip.com> 2214 2214 L: linux-media@vger.kernel.org 2215 2215 S: Supported 2216 2216 F: drivers/media/platform/soc_camera/atmel-isi.c 2217 2217 F: include/media/atmel-isi.h 2218 2218 2219 2219 ATMEL LCDFB DRIVER 2220 - M: Nicolas Ferre <nicolas.ferre@atmel.com> 2220 + M: Nicolas Ferre <nicolas.ferre@microchip.com> 2221 2221 L: linux-fbdev@vger.kernel.org 2222 2222 S: Maintained 2223 2223 F: drivers/video/fbdev/atmel_lcdfb.c 2224 2224 F: include/video/atmel_lcdc.h 2225 2225 2226 2226 ATMEL MACB ETHERNET DRIVER 2227 - M: Nicolas Ferre <nicolas.ferre@atmel.com> 2227 + M: Nicolas Ferre <nicolas.ferre@microchip.com> 2228 2228 S: Supported 2229 2229 F: drivers/net/ethernet/cadence/ 2230 2230 ··· 2236 2236 F: drivers/mtd/nand/atmel_nand* 2237 2237 2238 2238 ATMEL SDMMC DRIVER 2239 - M: Ludovic Desroches <ludovic.desroches@atmel.com> 2239 + M: Ludovic Desroches <ludovic.desroches@microchip.com> 2240 2240 L: linux-mmc@vger.kernel.org 2241 2241 S: Supported 2242 2242 F: drivers/mmc/host/sdhci-of-at91.c 2243 2243 2244 2244 ATMEL SPI DRIVER 2245 - M: Nicolas Ferre <nicolas.ferre@atmel.com> 2245 + M: Nicolas Ferre <nicolas.ferre@microchip.com> 2246 2246 S: Supported 2247 2247 F: drivers/spi/spi-atmel.* 2248 2248 2249 2249 ATMEL SSC DRIVER 2250 - M: Nicolas Ferre <nicolas.ferre@atmel.com> 2250 + M: Nicolas Ferre <nicolas.ferre@microchip.com> 2251 2251 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 2252 2252 S: Supported 2253 2253 F: drivers/misc/atmel-ssc.c 2254 2254 F: include/linux/atmel-ssc.h 2255 2255 2256 2256 ATMEL Timer Counter (TC) AND CLOCKSOURCE DRIVERS 2257 - M: Nicolas Ferre <nicolas.ferre@atmel.com> 2257 + M: Nicolas Ferre <nicolas.ferre@microchip.com> 2258 2258 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 2259 2259 S: Supported 2260 2260 F: drivers/misc/atmel_tclib.c 2261 2261 F: drivers/clocksource/tcb_clksrc.c 2262 2262 2263 2263 ATMEL USBA UDC DRIVER 2264 - M: Nicolas Ferre <nicolas.ferre@atmel.com> 2264 + M: Nicolas Ferre <nicolas.ferre@microchip.com> 2265 2265 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 2266 2266 S: Supported 2267 2267 F: drivers/usb/gadget/udc/atmel_usba_udc.* ··· 9765 9765 F: drivers/pinctrl/pinctrl-at91.* 9766 9766 9767 9767 PIN CONTROLLER - ATMEL AT91 PIO4 9768 - M: Ludovic Desroches <ludovic.desroches@atmel.com> 9768 + M: Ludovic Desroches <ludovic.desroches@microchip.com> 9769 9769 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 9770 9770 L: linux-gpio@vger.kernel.org 9771 9771 S: Supported
+1 -1
arch/arm/boot/dts/Makefile
··· 617 617 orion5x-lacie-ethernet-disk-mini-v2.dtb \ 618 618 orion5x-linkstation-lsgl.dtb \ 619 619 orion5x-linkstation-lswtgl.dtb \ 620 - orion5x-lschl.dtb \ 620 + orion5x-linkstation-lschl.dtb \ 621 621 orion5x-lswsgl.dtb \ 622 622 orion5x-maxtor-shared-storage-2.dtb \ 623 623 orion5x-netgear-wnr854t.dtb \
+8
arch/arm/boot/dts/imx1.dtsi
··· 18 18 / { 19 19 #address-cells = <1>; 20 20 #size-cells = <1>; 21 + /* 22 + * The decompressor and also some bootloaders rely on a 23 + * pre-existing /chosen node to be available to insert the 24 + * command line and merge other ATAGS info. 25 + * Also for U-Boot there must be a pre-existing /memory node. 26 + */ 27 + chosen {}; 28 + memory { device_type = "memory"; reg = <0 0>; }; 21 29 22 30 aliases { 23 31 gpio0 = &gpio1;
+8
arch/arm/boot/dts/imx23.dtsi
··· 16 16 #size-cells = <1>; 17 17 18 18 interrupt-parent = <&icoll>; 19 + /* 20 + * The decompressor and also some bootloaders rely on a 21 + * pre-existing /chosen node to be available to insert the 22 + * command line and merge other ATAGS info. 23 + * Also for U-Boot there must be a pre-existing /memory node. 24 + */ 25 + chosen {}; 26 + memory { device_type = "memory"; reg = <0 0>; }; 19 27 20 28 aliases { 21 29 gpio0 = &gpio0;
+8
arch/arm/boot/dts/imx25.dtsi
··· 14 14 / { 15 15 #address-cells = <1>; 16 16 #size-cells = <1>; 17 + /* 18 + * The decompressor and also some bootloaders rely on a 19 + * pre-existing /chosen node to be available to insert the 20 + * command line and merge other ATAGS info. 21 + * Also for U-Boot there must be a pre-existing /memory node. 22 + */ 23 + chosen {}; 24 + memory { device_type = "memory"; reg = <0 0>; }; 17 25 18 26 aliases { 19 27 ethernet0 = &fec;
+8
arch/arm/boot/dts/imx27.dtsi
··· 19 19 / { 20 20 #address-cells = <1>; 21 21 #size-cells = <1>; 22 + /* 23 + * The decompressor and also some bootloaders rely on a 24 + * pre-existing /chosen node to be available to insert the 25 + * command line and merge other ATAGS info. 26 + * Also for U-Boot there must be a pre-existing /memory node. 27 + */ 28 + chosen {}; 29 + memory { device_type = "memory"; reg = <0 0>; }; 22 30 23 31 aliases { 24 32 ethernet0 = &fec;
+8
arch/arm/boot/dts/imx28.dtsi
··· 17 17 #size-cells = <1>; 18 18 19 19 interrupt-parent = <&icoll>; 20 + /* 21 + * The decompressor and also some bootloaders rely on a 22 + * pre-existing /chosen node to be available to insert the 23 + * command line and merge other ATAGS info. 24 + * Also for U-Boot there must be a pre-existing /memory node. 25 + */ 26 + chosen {}; 27 + memory { device_type = "memory"; reg = <0 0>; }; 20 28 21 29 aliases { 22 30 ethernet0 = &mac0;
+8
arch/arm/boot/dts/imx31.dtsi
··· 12 12 / { 13 13 #address-cells = <1>; 14 14 #size-cells = <1>; 15 + /* 16 + * The decompressor and also some bootloaders rely on a 17 + * pre-existing /chosen node to be available to insert the 18 + * command line and merge other ATAGS info. 19 + * Also for U-Boot there must be a pre-existing /memory node. 20 + */ 21 + chosen {}; 22 + memory { device_type = "memory"; reg = <0 0>; }; 15 23 16 24 aliases { 17 25 serial0 = &uart1;
+8
arch/arm/boot/dts/imx35.dtsi
··· 13 13 / { 14 14 #address-cells = <1>; 15 15 #size-cells = <1>; 16 + /* 17 + * The decompressor and also some bootloaders rely on a 18 + * pre-existing /chosen node to be available to insert the 19 + * command line and merge other ATAGS info. 20 + * Also for U-Boot there must be a pre-existing /memory node. 21 + */ 22 + chosen {}; 23 + memory { device_type = "memory"; reg = <0 0>; }; 16 24 17 25 aliases { 18 26 ethernet0 = &fec;
+8
arch/arm/boot/dts/imx50.dtsi
··· 17 17 / { 18 18 #address-cells = <1>; 19 19 #size-cells = <1>; 20 + /* 21 + * The decompressor and also some bootloaders rely on a 22 + * pre-existing /chosen node to be available to insert the 23 + * command line and merge other ATAGS info. 24 + * Also for U-Boot there must be a pre-existing /memory node. 25 + */ 26 + chosen {}; 27 + memory { device_type = "memory"; reg = <0 0>; }; 20 28 21 29 aliases { 22 30 ethernet0 = &fec;
+8
arch/arm/boot/dts/imx51.dtsi
··· 19 19 / { 20 20 #address-cells = <1>; 21 21 #size-cells = <1>; 22 + /* 23 + * The decompressor and also some bootloaders rely on a 24 + * pre-existing /chosen node to be available to insert the 25 + * command line and merge other ATAGS info. 26 + * Also for U-Boot there must be a pre-existing /memory node. 27 + */ 28 + chosen {}; 29 + memory { device_type = "memory"; reg = <0 0>; }; 22 30 23 31 aliases { 24 32 ethernet0 = &fec;
+8
arch/arm/boot/dts/imx53.dtsi
··· 19 19 / { 20 20 #address-cells = <1>; 21 21 #size-cells = <1>; 22 + /* 23 + * The decompressor and also some bootloaders rely on a 24 + * pre-existing /chosen node to be available to insert the 25 + * command line and merge other ATAGS info. 26 + * Also for U-Boot there must be a pre-existing /memory node. 27 + */ 28 + chosen {}; 29 + memory { device_type = "memory"; reg = <0 0>; }; 22 30 23 31 aliases { 24 32 ethernet0 = &fec;
+1 -1
arch/arm/boot/dts/imx6dl.dtsi
··· 137 137 &gpio4 { 138 138 gpio-ranges = <&iomuxc 5 136 1>, <&iomuxc 6 145 1>, <&iomuxc 7 150 1>, 139 139 <&iomuxc 8 146 1>, <&iomuxc 9 151 1>, <&iomuxc 10 147 1>, 140 - <&iomuxc 11 151 1>, <&iomuxc 12 148 1>, <&iomuxc 13 153 1>, 140 + <&iomuxc 11 152 1>, <&iomuxc 12 148 1>, <&iomuxc 13 153 1>, 141 141 <&iomuxc 14 149 1>, <&iomuxc 15 154 1>, <&iomuxc 16 39 7>, 142 142 <&iomuxc 23 56 1>, <&iomuxc 24 61 7>, <&iomuxc 31 46 1>; 143 143 };
+8
arch/arm/boot/dts/imx6qdl.dtsi
··· 16 16 / { 17 17 #address-cells = <1>; 18 18 #size-cells = <1>; 19 + /* 20 + * The decompressor and also some bootloaders rely on a 21 + * pre-existing /chosen node to be available to insert the 22 + * command line and merge other ATAGS info. 23 + * Also for U-Boot there must be a pre-existing /memory node. 24 + */ 25 + chosen {}; 26 + memory { device_type = "memory"; reg = <0 0>; }; 19 27 20 28 aliases { 21 29 ethernet0 = &fec;
+8
arch/arm/boot/dts/imx6sl.dtsi
··· 14 14 / { 15 15 #address-cells = <1>; 16 16 #size-cells = <1>; 17 + /* 18 + * The decompressor and also some bootloaders rely on a 19 + * pre-existing /chosen node to be available to insert the 20 + * command line and merge other ATAGS info. 21 + * Also for U-Boot there must be a pre-existing /memory node. 22 + */ 23 + chosen {}; 24 + memory { device_type = "memory"; reg = <0 0>; }; 17 25 18 26 aliases { 19 27 ethernet0 = &fec;
+8
arch/arm/boot/dts/imx6sx.dtsi
··· 15 15 / { 16 16 #address-cells = <1>; 17 17 #size-cells = <1>; 18 + /* 19 + * The decompressor and also some bootloaders rely on a 20 + * pre-existing /chosen node to be available to insert the 21 + * command line and merge other ATAGS info. 22 + * Also for U-Boot there must be a pre-existing /memory node. 23 + */ 24 + chosen {}; 25 + memory { device_type = "memory"; reg = <0 0>; }; 18 26 19 27 aliases { 20 28 can0 = &flexcan1;
+8
arch/arm/boot/dts/imx6ul.dtsi
··· 15 15 / { 16 16 #address-cells = <1>; 17 17 #size-cells = <1>; 18 + /* 19 + * The decompressor and also some bootloaders rely on a 20 + * pre-existing /chosen node to be available to insert the 21 + * command line and merge other ATAGS info. 22 + * Also for U-Boot there must be a pre-existing /memory node. 23 + */ 24 + chosen {}; 25 + memory { device_type = "memory"; reg = <0 0>; }; 18 26 19 27 aliases { 20 28 ethernet0 = &fec1;
+8
arch/arm/boot/dts/imx7s.dtsi
··· 50 50 / { 51 51 #address-cells = <1>; 52 52 #size-cells = <1>; 53 + /* 54 + * The decompressor and also some bootloaders rely on a 55 + * pre-existing /chosen node to be available to insert the 56 + * command line and merge other ATAGS info. 57 + * Also for U-Boot there must be a pre-existing /memory node. 58 + */ 59 + chosen {}; 60 + memory { device_type = "memory"; reg = <0 0>; }; 53 61 54 62 aliases { 55 63 gpio0 = &gpio1;
+2 -2
arch/arm/boot/dts/orion5x-lschl.dts arch/arm/boot/dts/orion5x-linkstation-lschl.dts
··· 2 2 * Device Tree file for Buffalo Linkstation LS-CHLv3 3 3 * 4 4 * Copyright (C) 2016 Ash Hughes <ashley.hughes@blueyonder.co.uk> 5 - * Copyright (C) 2015, 2016 5 + * Copyright (C) 2015-2017 6 6 * Roger Shimizu <rogershimizu@gmail.com> 7 7 * 8 8 * This file is dual-licensed: you can use it either under the terms ··· 52 52 #include <dt-bindings/gpio/gpio.h> 53 53 54 54 / { 55 - model = "Buffalo Linkstation Live v3 (LS-CHL)"; 55 + model = "Buffalo Linkstation LiveV3 (LS-CHL)"; 56 56 compatible = "buffalo,lschl", "marvell,orion5x-88f5182", "marvell,orion5x"; 57 57 58 58 memory { /* 128 MB */
+1
arch/arm/boot/dts/stih407-family.dtsi
··· 680 680 phy-names = "usb2-phy", "usb3-phy"; 681 681 phys = <&usb2_picophy0>, 682 682 <&phy_port2 PHY_TYPE_USB3>; 683 + snps,dis_u3_susphy_quirk; 683 684 }; 684 685 }; 685 686
+2 -2
arch/arm/configs/ezx_defconfig
··· 64 64 CONFIG_NETFILTER_NETLINK_QUEUE=m 65 65 CONFIG_NF_CONNTRACK=m 66 66 CONFIG_NF_CONNTRACK_EVENTS=y 67 - CONFIG_NF_CT_PROTO_SCTP=m 68 - CONFIG_NF_CT_PROTO_UDPLITE=m 67 + CONFIG_NF_CT_PROTO_SCTP=y 68 + CONFIG_NF_CT_PROTO_UDPLITE=y 69 69 CONFIG_NF_CONNTRACK_AMANDA=m 70 70 CONFIG_NF_CONNTRACK_FTP=m 71 71 CONFIG_NF_CONNTRACK_H323=m
+2 -2
arch/arm/configs/imote2_defconfig
··· 56 56 CONFIG_NETFILTER_NETLINK_QUEUE=m 57 57 CONFIG_NF_CONNTRACK=m 58 58 CONFIG_NF_CONNTRACK_EVENTS=y 59 - CONFIG_NF_CT_PROTO_SCTP=m 60 - CONFIG_NF_CT_PROTO_UDPLITE=m 59 + CONFIG_NF_CT_PROTO_SCTP=y 60 + CONFIG_NF_CT_PROTO_UDPLITE=y 61 61 CONFIG_NF_CONNTRACK_AMANDA=m 62 62 CONFIG_NF_CONNTRACK_FTP=m 63 63 CONFIG_NF_CONNTRACK_H323=m
+1 -1
arch/arm/kernel/ptrace.c
··· 600 600 const void *kbuf, const void __user *ubuf) 601 601 { 602 602 int ret; 603 - struct pt_regs newregs; 603 + struct pt_regs newregs = *task_pt_regs(target); 604 604 605 605 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 606 606 &newregs,
+1 -1
arch/arm/mach-imx/mmdc.c
··· 60 60 61 61 #define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu) 62 62 63 - static enum cpuhp_state cpuhp_mmdc_state; 64 63 static int ddr_type; 65 64 66 65 struct fsl_mmdc_devtype_data { ··· 81 82 82 83 #ifdef CONFIG_PERF_EVENTS 83 84 85 + static enum cpuhp_state cpuhp_mmdc_state; 84 86 static DEFINE_IDA(mmdc_ida); 85 87 86 88 PMU_EVENT_ATTR_STRING(total-cycles, mmdc_pmu_total_cycles, "event=0x00")
+2 -2
arch/arm/mm/fault.c
··· 610 610 611 611 void __init early_abt_enable(void) 612 612 { 613 - fsr_info[22].fn = early_abort_handler; 613 + fsr_info[FSR_FS_AEA].fn = early_abort_handler; 614 614 local_abt_enable(); 615 - fsr_info[22].fn = do_bad; 615 + fsr_info[FSR_FS_AEA].fn = do_bad; 616 616 } 617 617 618 618 #ifndef CONFIG_ARM_LPAE
+4
arch/arm/mm/fault.h
··· 11 11 #define FSR_FS5_0 (0x3f) 12 12 13 13 #ifdef CONFIG_ARM_LPAE 14 + #define FSR_FS_AEA 17 15 + 14 16 static inline int fsr_fs(unsigned int fsr) 15 17 { 16 18 return fsr & FSR_FS5_0; 17 19 } 18 20 #else 21 + #define FSR_FS_AEA 22 22 + 19 23 static inline int fsr_fs(unsigned int fsr) 20 24 { 21 25 return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;
+18
arch/arm64/boot/dts/amlogic/meson-gx.dtsi
··· 55 55 #address-cells = <2>; 56 56 #size-cells = <2>; 57 57 58 + reserved-memory { 59 + #address-cells = <2>; 60 + #size-cells = <2>; 61 + ranges; 62 + 63 + /* 16 MiB reserved for Hardware ROM Firmware */ 64 + hwrom_reserved: hwrom@0 { 65 + reg = <0x0 0x0 0x0 0x1000000>; 66 + no-map; 67 + }; 68 + 69 + /* 2 MiB reserved for ARM Trusted Firmware (BL31) */ 70 + secmon_reserved: secmon@10000000 { 71 + reg = <0x0 0x10000000 0x0 0x200000>; 72 + no-map; 73 + }; 74 + }; 75 + 58 76 cpus { 59 77 #address-cells = <0x2>; 60 78 #size-cells = <0x0>;
+12
arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
··· 151 151 status = "okay"; 152 152 pinctrl-0 = <&eth_rgmii_pins>; 153 153 pinctrl-names = "default"; 154 + phy-handle = <&eth_phy0>; 155 + 156 + mdio { 157 + compatible = "snps,dwmac-mdio"; 158 + #address-cells = <1>; 159 + #size-cells = <0>; 160 + 161 + eth_phy0: ethernet-phy@0 { 162 + reg = <0>; 163 + eee-broken-1000t; 164 + }; 165 + }; 154 166 }; 155 167 156 168 &ir {
+2 -1
arch/powerpc/include/asm/reg.h
··· 649 649 #define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */ 650 650 #define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */ 651 651 #define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */ 652 - #define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 */ 652 + #define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 and 9 */ 653 653 #define SRR1_WAKESYSERR 0x00300000 /* System error */ 654 654 #define SRR1_WAKEEE 0x00200000 /* External interrupt */ 655 + #define SRR1_WAKEHVI 0x00240000 /* Hypervisor Virtualization Interrupt (P9) */ 655 656 #define SRR1_WAKEMT 0x00280000 /* mtctrl */ 656 657 #define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */ 657 658 #define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */
+1
arch/powerpc/include/asm/xics.h
··· 44 44 45 45 #ifdef CONFIG_PPC_POWERNV 46 46 extern int icp_opal_init(void); 47 + extern void icp_opal_flush_interrupt(void); 47 48 #else 48 49 static inline int icp_opal_init(void) { return -ENODEV; } 49 50 #endif
+5 -16
arch/powerpc/mm/fault.c
··· 253 253 if (unlikely(debugger_fault_handler(regs))) 254 254 goto bail; 255 255 256 - /* On a kernel SLB miss we can only check for a valid exception entry */ 257 - if (!user_mode(regs) && (address >= TASK_SIZE)) { 256 + /* 257 + * The kernel should never take an execute fault nor should it 258 + * take a page fault to a kernel address. 259 + */ 260 + if (!user_mode(regs) && (is_exec || (address >= TASK_SIZE))) { 258 261 rc = SIGSEGV; 259 262 goto bail; 260 263 } ··· 393 390 #endif /* CONFIG_8xx */ 394 391 395 392 if (is_exec) { 396 - /* 397 - * An execution fault + no execute ? 398 - * 399 - * On CPUs that don't have CPU_FTR_COHERENT_ICACHE we 400 - * deliberately create NX mappings, and use the fault to do the 401 - * cache flush. This is usually handled in hash_page_do_lazy_icache() 402 - * but we could end up here if that races with a concurrent PTE 403 - * update. In that case we need to fall through here to the VMA 404 - * check below. 405 - */ 406 - if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE) && 407 - (regs->msr & SRR1_ISI_N_OR_G)) 408 - goto bad_area; 409 - 410 393 /* 411 394 * Allow execution from readable areas if the MMU does not 412 395 * provide separate controls over reading and executing.
+1 -5
arch/powerpc/mm/tlb-radix.c
··· 50 50 for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) { 51 51 __tlbiel_pid(pid, set, ric); 52 52 } 53 - if (cpu_has_feature(CPU_FTR_POWER9_DD1)) 54 - asm volatile(PPC_INVALIDATE_ERAT : : :"memory"); 55 - return; 53 + asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory"); 56 54 } 57 55 58 56 static inline void _tlbie_pid(unsigned long pid, unsigned long ric) ··· 83 85 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) 84 86 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); 85 87 asm volatile("ptesync": : :"memory"); 86 - if (cpu_has_feature(CPU_FTR_POWER9_DD1)) 87 - asm volatile(PPC_INVALIDATE_ERAT : : :"memory"); 88 88 } 89 89 90 90 static inline void _tlbie_va(unsigned long va, unsigned long pid,
+10 -2
arch/powerpc/platforms/powernv/smp.c
··· 155 155 wmask = SRR1_WAKEMASK_P8; 156 156 157 157 idle_states = pnv_get_supported_cpuidle_states(); 158 + 158 159 /* We don't want to take decrementer interrupts while we are offline, 159 - * so clear LPCR:PECE1. We keep PECE2 enabled. 160 + * so clear LPCR:PECE1. We keep PECE2 (and LPCR_PECE_HVEE on P9) 161 + * enabled as to let IPIs in. 160 162 */ 161 163 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); 162 164 ··· 208 206 * contains 0. 209 207 */ 210 208 if (((srr1 & wmask) == SRR1_WAKEEE) || 209 + ((srr1 & wmask) == SRR1_WAKEHVI) || 211 210 (local_paca->irq_happened & PACA_IRQ_EE)) { 212 - icp_native_flush_interrupt(); 211 + if (cpu_has_feature(CPU_FTR_ARCH_300)) 212 + icp_opal_flush_interrupt(); 213 + else 214 + icp_native_flush_interrupt(); 213 215 } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) { 214 216 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); 215 217 asm volatile(PPC_MSGCLR(%0) : : "r" (msg)); ··· 227 221 if (srr1 && !generic_check_cpu_restart(cpu)) 228 222 DBG("CPU%d Unexpected exit while offline !\n", cpu); 229 223 } 224 + 225 + /* Re-enable decrementer interrupts */ 230 226 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1); 231 227 DBG("CPU%d coming online...\n", cpu); 232 228 }
+33 -2
arch/powerpc/sysdev/xics/icp-opal.c
··· 120 120 { 121 121 int hw_cpu = get_hard_smp_processor_id(cpu); 122 122 123 + kvmppc_set_host_ipi(cpu, 1); 123 124 opal_int_set_mfrr(hw_cpu, IPI_PRIORITY); 124 125 } 125 126 126 127 static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id) 127 128 { 128 - int hw_cpu = hard_smp_processor_id(); 129 + int cpu = smp_processor_id(); 129 130 130 - opal_int_set_mfrr(hw_cpu, 0xff); 131 + kvmppc_set_host_ipi(cpu, 0); 132 + opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff); 131 133 132 134 return smp_ipi_demux(); 135 + } 136 + 137 + /* 138 + * Called when an interrupt is received on an off-line CPU to 139 + * clear the interrupt, so that the CPU can go back to nap mode. 140 + */ 141 + void icp_opal_flush_interrupt(void) 142 + { 143 + unsigned int xirr; 144 + unsigned int vec; 145 + 146 + do { 147 + xirr = icp_opal_get_xirr(); 148 + vec = xirr & 0x00ffffff; 149 + if (vec == XICS_IRQ_SPURIOUS) 150 + break; 151 + if (vec == XICS_IPI) { 152 + /* Clear pending IPI */ 153 + int cpu = smp_processor_id(); 154 + kvmppc_set_host_ipi(cpu, 0); 155 + opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff); 156 + } else { 157 + pr_err("XICS: hw interrupt 0x%x to offline cpu, " 158 + "disabling\n", vec); 159 + xics_mask_unknown_vec(vec); 160 + } 161 + 162 + /* EOI the interrupt */ 163 + } while (opal_int_eoi(xirr) > 0); 133 164 } 134 165 135 166 #endif /* CONFIG_SMP */
-2
arch/x86/kernel/apic/io_apic.c
··· 1875 1875 .irq_ack = irq_chip_ack_parent, 1876 1876 .irq_eoi = ioapic_ack_level, 1877 1877 .irq_set_affinity = ioapic_set_affinity, 1878 - .irq_retrigger = irq_chip_retrigger_hierarchy, 1879 1878 .flags = IRQCHIP_SKIP_SET_WAKE, 1880 1879 }; 1881 1880 ··· 1886 1887 .irq_ack = irq_chip_ack_parent, 1887 1888 .irq_eoi = ioapic_ir_ack_level, 1888 1889 .irq_set_affinity = ioapic_set_affinity, 1889 - .irq_retrigger = irq_chip_retrigger_hierarchy, 1890 1890 .flags = IRQCHIP_SKIP_SET_WAKE, 1891 1891 }; 1892 1892
+4 -5
block/blk-lib.c
··· 306 306 if (ret == 0 || (ret && ret != -EOPNOTSUPP)) 307 307 goto out; 308 308 309 - ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, 310 - ZERO_PAGE(0), biop); 311 - if (ret == 0 || (ret && ret != -EOPNOTSUPP)) 312 - goto out; 313 - 314 309 ret = 0; 315 310 while (nr_sects != 0) { 316 311 bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES), ··· 363 368 BLKDEV_DISCARD_ZERO)) 364 369 return 0; 365 370 } 371 + 372 + if (!blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, 373 + ZERO_PAGE(0))) 374 + return 0; 366 375 367 376 blk_start_plug(&plug); 368 377 ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask,
-3
drivers/char/hw_random/core.c
··· 92 92 mutex_unlock(&reading_mutex); 93 93 if (bytes_read > 0) 94 94 add_device_randomness(rng_buffer, bytes_read); 95 - memset(rng_buffer, 0, size); 96 95 } 97 96 98 97 static inline void cleanup_rng(struct kref *kref) ··· 287 288 } 288 289 } 289 290 out: 290 - memset(rng_buffer, 0, rng_buffer_size()); 291 291 return ret ? : err; 292 292 293 293 out_unlock_reading: ··· 425 427 /* Outside lock, sure, but y'know: randomness. */ 426 428 add_hwgenerator_randomness((void *)rng_fillbuf, rc, 427 429 rc * current_quality * 8 >> 10); 428 - memset(rng_fillbuf, 0, rng_buffer_size()); 429 430 } 430 431 hwrng_fill = NULL; 431 432 return 0;
+3 -1
drivers/gpu/drm/i915/i915_drv.c
··· 213 213 } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) { 214 214 dev_priv->pch_type = PCH_KBP; 215 215 DRM_DEBUG_KMS("Found KabyPoint PCH\n"); 216 - WARN_ON(!IS_KABYLAKE(dev_priv)); 216 + WARN_ON(!IS_SKYLAKE(dev_priv) && 217 + !IS_KABYLAKE(dev_priv)); 217 218 } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || 218 219 (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) || 219 220 ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && ··· 2428 2427 * we can do is to hope that things will still work (and disable RPM). 2429 2428 */ 2430 2429 i915_gem_init_swizzling(dev_priv); 2430 + i915_gem_restore_fences(dev_priv); 2431 2431 2432 2432 intel_runtime_pm_enable_interrupts(dev_priv); 2433 2433
+11 -3
drivers/gpu/drm/i915/i915_gem.c
··· 2010 2010 for (i = 0; i < dev_priv->num_fence_regs; i++) { 2011 2011 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; 2012 2012 2013 - if (WARN_ON(reg->pin_count)) 2014 - continue; 2013 + /* Ideally we want to assert that the fence register is not 2014 + * live at this point (i.e. that no piece of code will be 2015 + * trying to write through fence + GTT, as that both violates 2016 + * our tracking of activity and associated locking/barriers, 2017 + * but also is illegal given that the hw is powered down). 2018 + * 2019 + * Previously we used reg->pin_count as a "liveness" indicator. 2020 + * That is not sufficient, and we need a more fine-grained 2021 + * tool if we want to have a sanity check here. 2022 + */ 2015 2023 2016 2024 if (!reg->vma) 2017 2025 continue; ··· 3486 3478 vma->display_alignment = max_t(u64, vma->display_alignment, alignment); 3487 3479 3488 3480 /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */ 3489 - if (obj->cache_dirty) { 3481 + if (obj->cache_dirty || obj->base.write_domain == I915_GEM_DOMAIN_CPU) { 3490 3482 i915_gem_clflush_object(obj, true); 3491 3483 intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB); 3492 3484 }
+6 -6
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 1181 1181 if (exec[i].offset != 1182 1182 gen8_canonical_addr(exec[i].offset & PAGE_MASK)) 1183 1183 return -EINVAL; 1184 - 1185 - /* From drm_mm perspective address space is continuous, 1186 - * so from this point we're always using non-canonical 1187 - * form internally. 1188 - */ 1189 - exec[i].offset = gen8_noncanonical_addr(exec[i].offset); 1190 1184 } 1185 + 1186 + /* From drm_mm perspective address space is continuous, 1187 + * so from this point we're always using non-canonical 1188 + * form internally. 1189 + */ 1190 + exec[i].offset = gen8_noncanonical_addr(exec[i].offset); 1191 1191 1192 1192 if (exec[i].alignment && !is_power_of_2(exec[i].alignment)) 1193 1193 return -EINVAL;
+10 -2
drivers/gpu/drm/i915/i915_gem_internal.c
··· 66 66 67 67 max_order = MAX_ORDER; 68 68 #ifdef CONFIG_SWIOTLB 69 - if (swiotlb_nr_tbl()) /* minimum max swiotlb size is IO_TLB_SEGSIZE */ 70 - max_order = min(max_order, ilog2(IO_TLB_SEGPAGES)); 69 + if (swiotlb_nr_tbl()) { 70 + unsigned int max_segment; 71 + 72 + max_segment = swiotlb_max_segment(); 73 + if (max_segment) { 74 + max_segment = max_t(unsigned int, max_segment, 75 + PAGE_SIZE) >> PAGE_SHIFT; 76 + max_order = min(max_order, ilog2(max_segment)); 77 + } 78 + } 71 79 #endif 72 80 73 81 gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
+2 -2
drivers/gpu/drm/i915/intel_display.c
··· 4262 4262 drm_crtc_vblank_put(&intel_crtc->base); 4263 4263 4264 4264 wake_up_all(&dev_priv->pending_flip_queue); 4265 - queue_work(dev_priv->wq, &work->unpin_work); 4266 - 4267 4265 trace_i915_flip_complete(intel_crtc->plane, 4268 4266 work->pending_flip_obj); 4267 + 4268 + queue_work(dev_priv->wq, &work->unpin_work); 4269 4269 } 4270 4270 4271 4271 static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
+2 -1
drivers/gpu/drm/i915/intel_dpll_mgr.c
··· 1730 1730 return NULL; 1731 1731 1732 1732 if ((encoder->type == INTEL_OUTPUT_DP || 1733 - encoder->type == INTEL_OUTPUT_EDP) && 1733 + encoder->type == INTEL_OUTPUT_EDP || 1734 + encoder->type == INTEL_OUTPUT_DP_MST) && 1734 1735 !bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state)) 1735 1736 return NULL; 1736 1737
+1 -1
drivers/gpu/drm/vc4/vc4_plane.c
··· 858 858 } 859 859 } 860 860 plane = &vc4_plane->base; 861 - ret = drm_universal_plane_init(dev, plane, 0xff, 861 + ret = drm_universal_plane_init(dev, plane, 0, 862 862 &vc4_plane_funcs, 863 863 formats, num_formats, 864 864 type, NULL);
+1 -2
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
··· 481 481 mode_cmd.height = var->yres; 482 482 mode_cmd.pitches[0] = ((var->bits_per_pixel + 7) / 8) * mode_cmd.width; 483 483 mode_cmd.pixel_format = 484 - drm_mode_legacy_fb_format(var->bits_per_pixel, 485 - ((var->bits_per_pixel + 7) / 8) * mode_cmd.width); 484 + drm_mode_legacy_fb_format(var->bits_per_pixel, depth); 486 485 487 486 cur_fb = par->set_fb; 488 487 if (cur_fb && cur_fb->width == mode_cmd.width &&
+8 -6
drivers/i2c/busses/i2c-piix4.c
··· 58 58 #define SMBSLVDAT (0xC + piix4_smba) 59 59 60 60 /* count for request_region */ 61 - #define SMBIOSIZE 8 61 + #define SMBIOSIZE 9 62 62 63 63 /* PCI Address Constants */ 64 64 #define SMBBA 0x090 ··· 592 592 u8 port; 593 593 int retval; 594 594 595 + mutex_lock(&piix4_mutex_sb800); 596 + 595 597 /* Request the SMBUS semaphore, avoid conflicts with the IMC */ 596 598 smbslvcnt = inb_p(SMBSLVCNT); 597 599 do { ··· 607 605 usleep_range(1000, 2000); 608 606 } while (--retries); 609 607 /* SMBus is still owned by the IMC, we give up */ 610 - if (!retries) 608 + if (!retries) { 609 + mutex_unlock(&piix4_mutex_sb800); 611 610 return -EBUSY; 612 - 613 - mutex_lock(&piix4_mutex_sb800); 611 + } 614 612 615 613 outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX); 616 614 smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1); ··· 625 623 626 624 outb_p(smba_en_lo, SB800_PIIX4_SMB_IDX + 1); 627 625 628 - mutex_unlock(&piix4_mutex_sb800); 629 - 630 626 /* Release the semaphore */ 631 627 outb_p(smbslvcnt | 0x20, SMBSLVCNT); 628 + 629 + mutex_unlock(&piix4_mutex_sb800); 632 630 633 631 return retval; 634 632 }
+5 -3
drivers/infiniband/sw/rxe/rxe_mr.c
··· 59 59 60 60 case RXE_MEM_TYPE_MR: 61 61 case RXE_MEM_TYPE_FMR: 62 - return ((iova < mem->iova) || 63 - ((iova + length) > (mem->iova + mem->length))) ? 64 - -EFAULT : 0; 62 + if (iova < mem->iova || 63 + length > mem->length || 64 + iova > mem->iova + mem->length - length) 65 + return -EFAULT; 66 + return 0; 65 67 66 68 default: 67 69 return -EFAULT;
+1 -1
drivers/infiniband/sw/rxe/rxe_resp.c
··· 479 479 goto err2; 480 480 } 481 481 482 - resid = mtu; 482 + qp->resp.resid = mtu; 483 483 } else { 484 484 if (pktlen != resid) { 485 485 state = RESPST_ERR_LENGTH;
+14 -6
drivers/input/misc/uinput.c
··· 263 263 return -EINVAL; 264 264 } 265 265 266 - if (test_bit(ABS_MT_SLOT, dev->absbit)) { 267 - nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1; 268 - error = input_mt_init_slots(dev, nslot, 0); 269 - if (error) 266 + if (test_bit(EV_ABS, dev->evbit)) { 267 + input_alloc_absinfo(dev); 268 + if (!dev->absinfo) { 269 + error = -EINVAL; 270 270 goto fail1; 271 - } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) { 272 - input_set_events_per_packet(dev, 60); 271 + } 272 + 273 + if (test_bit(ABS_MT_SLOT, dev->absbit)) { 274 + nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1; 275 + error = input_mt_init_slots(dev, nslot, 0); 276 + if (error) 277 + goto fail1; 278 + } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) { 279 + input_set_events_per_packet(dev, 60); 280 + } 273 281 } 274 282 275 283 if (test_bit(EV_FF, dev->evbit) && !udev->ff_effects_max) {
+7 -1
drivers/input/rmi4/Kconfig
··· 42 42 config RMI4_F03 43 43 bool "RMI4 Function 03 (PS2 Guest)" 44 44 depends on RMI4_CORE 45 - depends on SERIO=y || RMI4_CORE=SERIO 46 45 help 47 46 Say Y here if you want to add support for RMI4 function 03. 48 47 49 48 Function 03 provides PS2 guest support for RMI4 devices. This 50 49 includes support for TrackPoints on TouchPads. 50 + 51 + config RMI4_F03_SERIO 52 + tristate 53 + depends on RMI4_CORE 54 + depends on RMI4_F03 55 + default RMI4_CORE 56 + select SERIO 51 57 52 58 config RMI4_2D_SENSOR 53 59 bool
+25 -7
drivers/mmc/host/mmci.c
··· 1023 1023 if (!host->busy_status && busy_resp && 1024 1024 !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) && 1025 1025 (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) { 1026 - /* Unmask the busy IRQ */ 1026 + 1027 + /* Clear the busy start IRQ */ 1028 + writel(host->variant->busy_detect_mask, 1029 + host->base + MMCICLEAR); 1030 + 1031 + /* Unmask the busy end IRQ */ 1027 1032 writel(readl(base + MMCIMASK0) | 1028 1033 host->variant->busy_detect_mask, 1029 1034 base + MMCIMASK0); ··· 1043 1038 1044 1039 /* 1045 1040 * At this point we are not busy with a command, we have 1046 - * not received a new busy request, mask the busy IRQ and 1047 - * fall through to process the IRQ. 1041 + * not received a new busy request, clear and mask the busy 1042 + * end IRQ and fall through to process the IRQ. 1048 1043 */ 1049 1044 if (host->busy_status) { 1045 + 1046 + writel(host->variant->busy_detect_mask, 1047 + host->base + MMCICLEAR); 1048 + 1050 1049 writel(readl(base + MMCIMASK0) & 1051 1050 ~host->variant->busy_detect_mask, 1052 1051 base + MMCIMASK0); ··· 1292 1283 } 1293 1284 1294 1285 /* 1295 - * We intentionally clear the MCI_ST_CARDBUSY IRQ here (if it's 1296 - * enabled) since the HW seems to be triggering the IRQ on both 1297 - * edges while monitoring DAT0 for busy completion. 1286 + * We intentionally clear the MCI_ST_CARDBUSY IRQ (if it's 1287 + * enabled) in mmci_cmd_irq() function where ST Micro busy 1288 + * detection variant is handled. Considering the HW seems to be 1289 + * triggering the IRQ on both edges while monitoring DAT0 for 1290 + * busy completion and that same status bit is used to monitor 1291 + * start and end of busy detection, special care must be taken 1292 + * to make sure that both start and end interrupts are always 1293 + * cleared one after the other. 1298 1294 */ 1299 1295 status &= readl(host->base + MMCIMASK0); 1300 - writel(status, host->base + MMCICLEAR); 1296 + if (host->variant->busy_detect) 1297 + writel(status & ~host->variant->busy_detect_mask, 1298 + host->base + MMCICLEAR); 1299 + else 1300 + writel(status, host->base + MMCICLEAR); 1301 1301 1302 1302 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); 1303 1303
+96 -12
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
··· 31 31 u8 lmac_type; 32 32 u8 lane_to_sds; 33 33 bool use_training; 34 + bool autoneg; 34 35 bool link_up; 35 36 int lmacid; /* ID within BGX */ 36 37 int lmacid_bd; /* ID on board */ ··· 462 461 /* power down, reset autoneg, autoneg enable */ 463 462 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL); 464 463 cfg &= ~PCS_MRX_CTL_PWR_DN; 465 - cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN); 464 + cfg |= PCS_MRX_CTL_RST_AN; 465 + if (lmac->phydev) { 466 + cfg |= PCS_MRX_CTL_AN_EN; 467 + } else { 468 + /* In scenarios where PHY driver is not present or it's a 469 + * non-standard PHY, FW sets AN_EN to inform Linux driver 470 + * to do auto-neg and link polling or not. 471 + */ 472 + if (cfg & PCS_MRX_CTL_AN_EN) 473 + lmac->autoneg = true; 474 + } 466 475 bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg); 467 476 468 477 if (lmac->lmac_type == BGX_MODE_QSGMII) { ··· 483 472 return 0; 484 473 } 485 474 486 - if (lmac->lmac_type == BGX_MODE_SGMII) { 475 + if ((lmac->lmac_type == BGX_MODE_SGMII) && lmac->phydev) { 487 476 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS, 488 477 PCS_MRX_STATUS_AN_CPT, false)) { 489 478 dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n"); ··· 689 678 return -1; 690 679 } 691 680 681 + static void bgx_poll_for_sgmii_link(struct lmac *lmac) 682 + { 683 + u64 pcs_link, an_result; 684 + u8 speed; 685 + 686 + pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid, 687 + BGX_GMP_PCS_MRX_STATUS); 688 + 689 + /*Link state bit is sticky, read it again*/ 690 + if (!(pcs_link & PCS_MRX_STATUS_LINK)) 691 + pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid, 692 + BGX_GMP_PCS_MRX_STATUS); 693 + 694 + if (bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_GMP_PCS_MRX_STATUS, 695 + PCS_MRX_STATUS_AN_CPT, false)) { 696 + lmac->link_up = false; 697 + lmac->last_speed = SPEED_UNKNOWN; 698 + lmac->last_duplex = DUPLEX_UNKNOWN; 699 + goto next_poll; 700 + } 701 + 702 + lmac->link_up = ((pcs_link & PCS_MRX_STATUS_LINK) != 0) ? true : false; 703 + an_result = bgx_reg_read(lmac->bgx, lmac->lmacid, 704 + BGX_GMP_PCS_ANX_AN_RESULTS); 705 + 706 + speed = (an_result >> 3) & 0x3; 707 + lmac->last_duplex = (an_result >> 1) & 0x1; 708 + switch (speed) { 709 + case 0: 710 + lmac->last_speed = 10; 711 + break; 712 + case 1: 713 + lmac->last_speed = 100; 714 + break; 715 + case 2: 716 + lmac->last_speed = 1000; 717 + break; 718 + default: 719 + lmac->link_up = false; 720 + lmac->last_speed = SPEED_UNKNOWN; 721 + lmac->last_duplex = DUPLEX_UNKNOWN; 722 + break; 723 + } 724 + 725 + next_poll: 726 + 727 + if (lmac->last_link != lmac->link_up) { 728 + if (lmac->link_up) 729 + bgx_sgmii_change_link_state(lmac); 730 + lmac->last_link = lmac->link_up; 731 + } 732 + 733 + queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 3); 734 + } 735 + 692 736 static void bgx_poll_for_link(struct work_struct *work) 693 737 { 694 738 struct lmac *lmac; 695 739 u64 spu_link, smu_link; 696 740 697 741 lmac = container_of(work, struct lmac, dwork.work); 742 + if (lmac->is_sgmii) { 743 + bgx_poll_for_sgmii_link(lmac); 744 + return; 745 + } 698 746 699 747 /* Receive link is latching low. Force it high and verify it */ 700 748 bgx_reg_modify(lmac->bgx, lmac->lmacid, ··· 845 775 (lmac->lmac_type != BGX_MODE_XLAUI) && 846 776 (lmac->lmac_type != BGX_MODE_40G_KR) && 847 777 (lmac->lmac_type != BGX_MODE_10G_KR)) { 848 - if (!lmac->phydev) 849 - return -ENODEV; 850 - 778 + if (!lmac->phydev) { 779 + if (lmac->autoneg) { 780 + bgx_reg_write(bgx, lmacid, 781 + BGX_GMP_PCS_LINKX_TIMER, 782 + PCS_LINKX_TIMER_COUNT); 783 + goto poll; 784 + } else { 785 + /* Default to below link speed and duplex */ 786 + lmac->link_up = true; 787 + lmac->last_speed = 1000; 788 + lmac->last_duplex = 1; 789 + bgx_sgmii_change_link_state(lmac); 790 + return 0; 791 + } 792 + } 851 793 lmac->phydev->dev_flags = 0; 852 794 853 795 if (phy_connect_direct(&lmac->netdev, lmac->phydev, ··· 868 786 return -ENODEV; 869 787 870 788 phy_start_aneg(lmac->phydev); 871 - } else { 872 - lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND | 873 - WQ_MEM_RECLAIM, 1); 874 - if (!lmac->check_link) 875 - return -ENOMEM; 876 - INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link); 877 - queue_delayed_work(lmac->check_link, &lmac->dwork, 0); 789 + return 0; 878 790 } 791 + 792 + poll: 793 + lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND | 794 + WQ_MEM_RECLAIM, 1); 795 + if (!lmac->check_link) 796 + return -ENOMEM; 797 + INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link); 798 + queue_delayed_work(lmac->check_link, &lmac->dwork, 0); 879 799 880 800 return 0; 881 801 }
+5
drivers/net/ethernet/cavium/thunder/thunder_bgx.h
··· 153 153 #define PCS_MRX_CTL_LOOPBACK1 BIT_ULL(14) 154 154 #define PCS_MRX_CTL_RESET BIT_ULL(15) 155 155 #define BGX_GMP_PCS_MRX_STATUS 0x30008 156 + #define PCS_MRX_STATUS_LINK BIT_ULL(2) 156 157 #define PCS_MRX_STATUS_AN_CPT BIT_ULL(5) 158 + #define BGX_GMP_PCS_ANX_ADV 0x30010 157 159 #define BGX_GMP_PCS_ANX_AN_RESULTS 0x30020 160 + #define BGX_GMP_PCS_LINKX_TIMER 0x30040 161 + #define PCS_LINKX_TIMER_COUNT 0x1E84 158 162 #define BGX_GMP_PCS_SGM_AN_ADV 0x30068 159 163 #define BGX_GMP_PCS_MISCX_CTL 0x30078 164 + #define PCS_MISC_CTL_MODE BIT_ULL(8) 160 165 #define PCS_MISC_CTL_DISP_EN BIT_ULL(13) 161 166 #define PCS_MISC_CTL_GMX_ENO BIT_ULL(11) 162 167 #define PCS_MISC_CTL_SAMP_PT_MASK 0x7Full
+1 -1
drivers/net/ethernet/hisilicon/hns/hns_enet.c
··· 305 305 struct hns_nic_ring_data *ring_data) 306 306 { 307 307 struct hns_nic_priv *priv = netdev_priv(ndev); 308 - struct device *dev = priv->dev; 309 308 struct hnae_ring *ring = ring_data->ring; 309 + struct device *dev = ring_to_dev(ring); 310 310 struct netdev_queue *dev_queue; 311 311 struct skb_frag_struct *frag; 312 312 int buf_num;
+2 -2
drivers/net/hamradio/mkiss.c
··· 648 648 { 649 649 /* Finish setting up the DEVICE info. */ 650 650 dev->mtu = AX_MTU; 651 - dev->hard_header_len = 0; 652 - dev->addr_len = 0; 651 + dev->hard_header_len = AX25_MAX_HEADER_LEN; 652 + dev->addr_len = AX25_ADDR_LEN; 653 653 dev->type = ARPHRD_AX25; 654 654 dev->tx_queue_len = 10; 655 655 dev->header_ops = &ax25_header_ops;
+1
drivers/net/loopback.c
··· 163 163 { 164 164 dev->mtu = 64 * 1024; 165 165 dev->hard_header_len = ETH_HLEN; /* 14 */ 166 + dev->min_header_len = ETH_HLEN; /* 14 */ 166 167 dev->addr_len = ETH_ALEN; /* 6 */ 167 168 dev->type = ARPHRD_LOOPBACK; /* 0x0001*/ 168 169 dev->flags = IFF_LOOPBACK;
+2 -4
drivers/net/phy/mdio-bcm-iproc.c
··· 81 81 if (rc) 82 82 return rc; 83 83 84 - iproc_mdio_config_clk(priv->base); 85 - 86 84 /* Prepare the read operation */ 87 85 cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) | 88 86 (reg << MII_DATA_RA_SHIFT) | ··· 109 111 rc = iproc_mdio_wait_for_idle(priv->base); 110 112 if (rc) 111 113 return rc; 112 - 113 - iproc_mdio_config_clk(priv->base); 114 114 115 115 /* Prepare the write operation */ 116 116 cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) | ··· 158 162 bus->parent = &pdev->dev; 159 163 bus->read = iproc_mdio_read; 160 164 bus->write = iproc_mdio_write; 165 + 166 + iproc_mdio_config_clk(priv->base); 161 167 162 168 rc = of_mdiobus_register(bus, pdev->dev.of_node); 163 169 if (rc) {
+20 -8
drivers/net/phy/phy_device.c
··· 908 908 struct module *ndev_owner = dev->dev.parent->driver->owner; 909 909 struct mii_bus *bus = phydev->mdio.bus; 910 910 struct device *d = &phydev->mdio.dev; 911 + bool using_genphy = false; 911 912 int err; 912 913 913 914 /* For Ethernet device drivers that register their own MDIO bus, we ··· 918 917 */ 919 918 if (ndev_owner != bus->owner && !try_module_get(bus->owner)) { 920 919 dev_err(&dev->dev, "failed to get the bus module\n"); 921 - return -EIO; 922 - } 923 - 924 - if (!try_module_get(d->driver->owner)) { 925 - dev_err(&dev->dev, "failed to get the device driver module\n"); 926 920 return -EIO; 927 921 } 928 922 ··· 934 938 d->driver = 935 939 &genphy_driver[GENPHY_DRV_1G].mdiodrv.driver; 936 940 941 + using_genphy = true; 942 + } 943 + 944 + if (!try_module_get(d->driver->owner)) { 945 + dev_err(&dev->dev, "failed to get the device driver module\n"); 946 + err = -EIO; 947 + goto error_put_device; 948 + } 949 + 950 + if (using_genphy) { 937 951 err = d->driver->probe(d); 938 952 if (err >= 0) 939 953 err = device_bind_driver(d); 940 954 941 955 if (err) 942 - goto error; 956 + goto error_module_put; 943 957 } 944 958 945 959 if (phydev->attached_dev) { ··· 986 980 return err; 987 981 988 982 error: 983 + /* phy_detach() does all of the cleanup below */ 989 984 phy_detach(phydev); 990 - put_device(d); 985 + return err; 986 + 987 + error_module_put: 991 988 module_put(d->driver->owner); 989 + error_put_device: 990 + put_device(d); 992 991 if (ndev_owner != bus->owner) 993 992 module_put(bus->owner); 994 993 return err; ··· 1056 1045 1057 1046 phy_led_triggers_unregister(phydev); 1058 1047 1048 + module_put(phydev->mdio.dev.driver->owner); 1049 + 1059 1050 /* If the device had no specific driver before (i.e. - it 1060 1051 * was using the generic driver), we unbind the device 1061 1052 * from the generic driver so that there's a chance a ··· 1078 1065 bus = phydev->mdio.bus; 1079 1066 1080 1067 put_device(&phydev->mdio.dev); 1081 - module_put(phydev->mdio.dev.driver->owner); 1082 1068 if (ndev_owner != bus->owner) 1083 1069 module_put(bus->owner); 1084 1070 }
+80 -49
drivers/net/usb/sierra_net.c
··· 73 73 /* Private data structure */ 74 74 struct sierra_net_data { 75 75 76 - u8 ethr_hdr_tmpl[ETH_HLEN]; /* ethernet header template for rx'd pkts */ 77 - 78 76 u16 link_up; /* air link up or down */ 79 77 u8 tx_hdr_template[4]; /* part of HIP hdr for tx'd packets */ 80 78 ··· 120 122 121 123 /* LSI Protocol types */ 122 124 #define SIERRA_NET_PROTOCOL_UMTS 0x01 125 + #define SIERRA_NET_PROTOCOL_UMTS_DS 0x04 123 126 /* LSI Coverage */ 124 127 #define SIERRA_NET_COVERAGE_NONE 0x00 125 128 #define SIERRA_NET_COVERAGE_NOPACKET 0x01 ··· 128 129 /* LSI Session */ 129 130 #define SIERRA_NET_SESSION_IDLE 0x00 130 131 /* LSI Link types */ 131 - #define SIERRA_NET_AS_LINK_TYPE_IPv4 0x00 132 + #define SIERRA_NET_AS_LINK_TYPE_IPV4 0x00 133 + #define SIERRA_NET_AS_LINK_TYPE_IPV6 0x02 132 134 133 135 struct lsi_umts { 134 136 u8 protocol; ··· 137 137 __be16 length; 138 138 /* eventually use a union for the rest - assume umts for now */ 139 139 u8 coverage; 140 - u8 unused2[41]; 140 + u8 network_len; /* network name len */ 141 + u8 network[40]; /* network name (UCS2, bigendian) */ 141 142 u8 session_state; 142 143 u8 unused3[33]; 144 + } __packed; 145 + 146 + struct lsi_umts_single { 147 + struct lsi_umts lsi; 143 148 u8 link_type; 144 149 u8 pdp_addr_len; /* NW-supplied PDP address len */ 145 150 u8 pdp_addr[16]; /* NW-supplied PDP address (bigendian)) */ ··· 163 158 u8 reserved[8]; 164 159 } __packed; 165 160 161 + struct lsi_umts_dual { 162 + struct lsi_umts lsi; 163 + u8 pdp_addr4_len; /* NW-supplied PDP IPv4 address len */ 164 + u8 pdp_addr4[4]; /* NW-supplied PDP IPv4 address (bigendian)) */ 165 + u8 pdp_addr6_len; /* NW-supplied PDP IPv6 address len */ 166 + u8 pdp_addr6[16]; /* NW-supplied PDP IPv6 address (bigendian)) */ 167 + u8 unused4[23]; 168 + u8 dns1_addr4_len; /* NW-supplied 1st DNS v4 address len (bigendian) */ 169 + u8 dns1_addr4[4]; /* NW-supplied 1st DNS v4 address */ 170 + u8 dns1_addr6_len; /* NW-supplied 1st DNS v6 address len */ 171 + u8 dns1_addr6[16]; /* NW-supplied 1st DNS v6 address (bigendian)*/ 172 + u8 dns2_addr4_len; /* NW-supplied 2nd DNS v4 address len (bigendian) */ 173 + u8 dns2_addr4[4]; /* NW-supplied 2nd DNS v4 address */ 174 + u8 dns2_addr6_len; /* NW-supplied 2nd DNS v6 address len */ 175 + u8 dns2_addr6[16]; /* NW-supplied 2nd DNS v6 address (bigendian)*/ 176 + u8 unused5[68]; 177 + } __packed; 178 + 166 179 #define SIERRA_NET_LSI_COMMON_LEN 4 167 - #define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts)) 180 + #define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts_single)) 168 181 #define SIERRA_NET_LSI_UMTS_STATUS_LEN \ 169 182 (SIERRA_NET_LSI_UMTS_LEN - SIERRA_NET_LSI_COMMON_LEN) 183 + #define SIERRA_NET_LSI_UMTS_DS_LEN (sizeof(struct lsi_umts_dual)) 184 + #define SIERRA_NET_LSI_UMTS_DS_STATUS_LEN \ 185 + (SIERRA_NET_LSI_UMTS_DS_LEN - SIERRA_NET_LSI_COMMON_LEN) 170 186 171 187 /* Forward definitions */ 172 188 static void sierra_sync_timer(unsigned long syncdata); ··· 216 190 dev->data[0] = (unsigned long)priv; 217 191 } 218 192 219 - /* is packet IPv4 */ 193 + /* is packet IPv4/IPv6 */ 220 194 static inline int is_ip(struct sk_buff *skb) 221 195 { 222 - return skb->protocol == cpu_to_be16(ETH_P_IP); 196 + return skb->protocol == cpu_to_be16(ETH_P_IP) || 197 + skb->protocol == cpu_to_be16(ETH_P_IPV6); 223 198 } 224 199 225 200 /* ··· 376 349 static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen) 377 350 { 378 351 struct lsi_umts *lsi = (struct lsi_umts *)data; 352 + u32 expected_length; 379 353 380 - if (datalen < sizeof(struct lsi_umts)) { 381 - netdev_err(dev->net, "%s: Data length %d, exp %Zu\n", 382 - __func__, datalen, 383 - sizeof(struct lsi_umts)); 354 + if (datalen < sizeof(struct lsi_umts_single)) { 355 + netdev_err(dev->net, "%s: Data length %d, exp >= %Zu\n", 356 + __func__, datalen, sizeof(struct lsi_umts_single)); 384 357 return -1; 385 - } 386 - 387 - if (lsi->length != cpu_to_be16(SIERRA_NET_LSI_UMTS_STATUS_LEN)) { 388 - netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n", 389 - __func__, be16_to_cpu(lsi->length), 390 - (u32)SIERRA_NET_LSI_UMTS_STATUS_LEN); 391 - return -1; 392 - } 393 - 394 - /* Validate the protocol - only support UMTS for now */ 395 - if (lsi->protocol != SIERRA_NET_PROTOCOL_UMTS) { 396 - netdev_err(dev->net, "Protocol unsupported, 0x%02x\n", 397 - lsi->protocol); 398 - return -1; 399 - } 400 - 401 - /* Validate the link type */ 402 - if (lsi->link_type != SIERRA_NET_AS_LINK_TYPE_IPv4) { 403 - netdev_err(dev->net, "Link type unsupported: 0x%02x\n", 404 - lsi->link_type); 405 - return -1; 406 - } 407 - 408 - /* Validate the coverage */ 409 - if (lsi->coverage == SIERRA_NET_COVERAGE_NONE 410 - || lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) { 411 - netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage); 412 - return 0; 413 358 } 414 359 415 360 /* Validate the session state */ 416 361 if (lsi->session_state == SIERRA_NET_SESSION_IDLE) { 417 362 netdev_err(dev->net, "Session idle, 0x%02x\n", 418 - lsi->session_state); 363 + lsi->session_state); 364 + return 0; 365 + } 366 + 367 + /* Validate the protocol - only support UMTS for now */ 368 + if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS) { 369 + struct lsi_umts_single *single = (struct lsi_umts_single *)lsi; 370 + 371 + /* Validate the link type */ 372 + if (single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV4 && 373 + single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV6) { 374 + netdev_err(dev->net, "Link type unsupported: 0x%02x\n", 375 + single->link_type); 376 + return -1; 377 + } 378 + expected_length = SIERRA_NET_LSI_UMTS_STATUS_LEN; 379 + } else if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS_DS) { 380 + expected_length = SIERRA_NET_LSI_UMTS_DS_STATUS_LEN; 381 + } else { 382 + netdev_err(dev->net, "Protocol unsupported, 0x%02x\n", 383 + lsi->protocol); 384 + return -1; 385 + } 386 + 387 + if (be16_to_cpu(lsi->length) != expected_length) { 388 + netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n", 389 + __func__, be16_to_cpu(lsi->length), expected_length); 390 + return -1; 391 + } 392 + 393 + /* Validate the coverage */ 394 + if (lsi->coverage == SIERRA_NET_COVERAGE_NONE || 395 + lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) { 396 + netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage); 419 397 return 0; 420 398 } 421 399 ··· 684 652 u8 numendpoints; 685 653 u16 fwattr = 0; 686 654 int status; 687 - struct ethhdr *eth; 688 655 struct sierra_net_data *priv; 689 656 static const u8 sync_tmplate[sizeof(priv->sync_msg)] = { 690 657 0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00}; ··· 720 689 /* change MAC addr to include, ifacenum, and to be unique */ 721 690 dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter); 722 691 dev->net->dev_addr[ETH_ALEN-1] = ifacenum; 723 - 724 - /* we will have to manufacture ethernet headers, prepare template */ 725 - eth = (struct ethhdr *)priv->ethr_hdr_tmpl; 726 - memcpy(&eth->h_dest, dev->net->dev_addr, ETH_ALEN); 727 - eth->h_proto = cpu_to_be16(ETH_P_IP); 728 692 729 693 /* prepare shutdown message template */ 730 694 memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg)); ··· 850 824 851 825 skb_pull(skb, hh.hdrlen); 852 826 853 - /* We are going to accept this packet, prepare it */ 854 - memcpy(skb->data, sierra_net_get_private(dev)->ethr_hdr_tmpl, 855 - ETH_HLEN); 827 + /* We are going to accept this packet, prepare it. 828 + * In case protocol is IPv6, keep it, otherwise force IPv4. 829 + */ 830 + skb_reset_mac_header(skb); 831 + if (eth_hdr(skb)->h_proto != cpu_to_be16(ETH_P_IPV6)) 832 + eth_hdr(skb)->h_proto = cpu_to_be16(ETH_P_IP); 833 + eth_zero_addr(eth_hdr(skb)->h_source); 834 + memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN); 856 835 857 836 /* Last packet in batch handled by usbnet */ 858 837 if (hh.payload_len.word == skb->len)
+24 -22
drivers/net/xen-netfront.c
··· 281 281 { 282 282 RING_IDX req_prod = queue->rx.req_prod_pvt; 283 283 int notify; 284 + int err = 0; 284 285 285 286 if (unlikely(!netif_carrier_ok(queue->info->netdev))) 286 287 return; ··· 296 295 struct xen_netif_rx_request *req; 297 296 298 297 skb = xennet_alloc_one_rx_buffer(queue); 299 - if (!skb) 298 + if (!skb) { 299 + err = -ENOMEM; 300 300 break; 301 + } 301 302 302 303 id = xennet_rxidx(req_prod); 303 304 ··· 323 320 324 321 queue->rx.req_prod_pvt = req_prod; 325 322 326 - /* Not enough requests? Try again later. */ 327 - if (req_prod - queue->rx.sring->req_prod < NET_RX_SLOTS_MIN) { 323 + /* Try again later if there are not enough requests or skb allocation 324 + * failed. 325 + * Enough requests is quantified as the sum of newly created slots and 326 + * the unconsumed slots at the backend. 327 + */ 328 + if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN || 329 + unlikely(err)) { 328 330 mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); 329 331 return; 330 332 } ··· 1385 1377 for (i = 0; i < num_queues && info->queues; ++i) { 1386 1378 struct netfront_queue *queue = &info->queues[i]; 1387 1379 1380 + del_timer_sync(&queue->rx_refill_timer); 1381 + 1388 1382 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) 1389 1383 unbind_from_irqhandler(queue->tx_irq, queue); 1390 1384 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) { ··· 1741 1731 1742 1732 if (netif_running(info->netdev)) 1743 1733 napi_disable(&queue->napi); 1744 - del_timer_sync(&queue->rx_refill_timer); 1745 1734 netif_napi_del(&queue->napi); 1746 1735 } 1747 1736 ··· 1829 1820 xennet_destroy_queues(info); 1830 1821 1831 1822 err = xennet_create_queues(info, &num_queues); 1832 - if (err < 0) 1833 - goto destroy_ring; 1823 + if (err < 0) { 1824 + xenbus_dev_fatal(dev, err, "creating queues"); 1825 + kfree(info->queues); 1826 + info->queues = NULL; 1827 + goto out; 1828 + } 1834 1829 1835 1830 /* Create shared ring, alloc event channel -- for each queue */ 1836 1831 for (i = 0; i < num_queues; ++i) { 1837 1832 queue = &info->queues[i]; 1838 1833 err = setup_netfront(dev, queue, feature_split_evtchn); 1839 - if (err) { 1840 - /* setup_netfront() will tidy up the current 1841 - * queue on error, but we need to clean up 1842 - * those already allocated. 1843 - */ 1844 - if (i > 0) { 1845 - rtnl_lock(); 1846 - netif_set_real_num_tx_queues(info->netdev, i); 1847 - rtnl_unlock(); 1848 - goto destroy_ring; 1849 - } else { 1850 - goto out; 1851 - } 1852 - } 1834 + if (err) 1835 + goto destroy_ring; 1853 1836 } 1854 1837 1855 1838 again: ··· 1931 1930 xenbus_transaction_end(xbt, 1); 1932 1931 destroy_ring: 1933 1932 xennet_disconnect_backend(info); 1934 - kfree(info->queues); 1935 - info->queues = NULL; 1933 + xennet_destroy_queues(info); 1936 1934 out: 1935 + unregister_netdev(info->netdev); 1936 + xennet_free_netdev(info->netdev); 1937 1937 return err; 1938 1938 } 1939 1939
-6
drivers/pci/hotplug/pciehp_ctrl.c
··· 31 31 #include <linux/kernel.h> 32 32 #include <linux/types.h> 33 33 #include <linux/slab.h> 34 - #include <linux/pm_runtime.h> 35 34 #include <linux/pci.h> 36 35 #include "../pci.h" 37 36 #include "pciehp.h" ··· 98 99 pciehp_green_led_blink(p_slot); 99 100 100 101 /* Check link training status */ 101 - pm_runtime_get_sync(&ctrl->pcie->port->dev); 102 102 retval = pciehp_check_link_status(ctrl); 103 103 if (retval) { 104 104 ctrl_err(ctrl, "Failed to check link status\n"); ··· 118 120 if (retval != -EEXIST) 119 121 goto err_exit; 120 122 } 121 - pm_runtime_put(&ctrl->pcie->port->dev); 122 123 123 124 pciehp_green_led_on(p_slot); 124 125 pciehp_set_attention_status(p_slot, 0); 125 126 return 0; 126 127 127 128 err_exit: 128 - pm_runtime_put(&ctrl->pcie->port->dev); 129 129 set_slot_off(ctrl, p_slot); 130 130 return retval; 131 131 } ··· 137 141 int retval; 138 142 struct controller *ctrl = p_slot->ctrl; 139 143 140 - pm_runtime_get_sync(&ctrl->pcie->port->dev); 141 144 retval = pciehp_unconfigure_device(p_slot); 142 - pm_runtime_put(&ctrl->pcie->port->dev); 143 145 if (retval) 144 146 return retval; 145 147
+10
drivers/pci/msi.c
··· 1206 1206 if (flags & PCI_IRQ_AFFINITY) { 1207 1207 if (!affd) 1208 1208 affd = &msi_default_affd; 1209 + 1210 + if (affd->pre_vectors + affd->post_vectors > min_vecs) 1211 + return -EINVAL; 1212 + 1213 + /* 1214 + * If there aren't any vectors left after applying the pre/post 1215 + * vectors don't bother with assigning affinity. 1216 + */ 1217 + if (affd->pre_vectors + affd->post_vectors == min_vecs) 1218 + affd = NULL; 1209 1219 } else { 1210 1220 if (WARN_ON(affd)) 1211 1221 affd = NULL;
+6 -6
drivers/pci/pci.c
··· 2241 2241 return false; 2242 2242 2243 2243 /* 2244 - * Hotplug ports handled by firmware in System Management Mode 2244 + * Hotplug interrupts cannot be delivered if the link is down, 2245 + * so parents of a hotplug port must stay awake. In addition, 2246 + * hotplug ports handled by firmware in System Management Mode 2245 2247 * may not be put into D3 by the OS (Thunderbolt on non-Macs). 2248 + * For simplicity, disallow in general for now. 2246 2249 */ 2247 - if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge)) 2250 + if (bridge->is_hotplug_bridge) 2248 2251 return false; 2249 2252 2250 2253 if (pci_bridge_d3_force) ··· 2279 2276 !pci_pme_capable(dev, PCI_D3cold)) || 2280 2277 2281 2278 /* If it is a bridge it must be allowed to go to D3. */ 2282 - !pci_power_manageable(dev) || 2283 - 2284 - /* Hotplug interrupts cannot be delivered if the link is down. */ 2285 - dev->is_hotplug_bridge) 2279 + !pci_power_manageable(dev)) 2286 2280 2287 2281 *d3cold_ok = false; 2288 2282
+1
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
··· 3816 3816 static const struct target_core_fabric_ops ibmvscsis_ops = { 3817 3817 .module = THIS_MODULE, 3818 3818 .name = "ibmvscsis", 3819 + .max_data_sg_nents = MAX_TXU / PAGE_SIZE, 3819 3820 .get_fabric_name = ibmvscsis_get_fabric_name, 3820 3821 .tpg_get_wwn = ibmvscsis_get_fabric_wwn, 3821 3822 .tpg_get_tag = ibmvscsis_get_tag,
+1 -3
drivers/staging/lustre/lustre/llite/llite_mmap.c
··· 390 390 result = VM_FAULT_LOCKED; 391 391 break; 392 392 case -ENODATA: 393 + case -EAGAIN: 393 394 case -EFAULT: 394 395 result = VM_FAULT_NOPAGE; 395 396 break; 396 397 case -ENOMEM: 397 398 result = VM_FAULT_OOM; 398 - break; 399 - case -EAGAIN: 400 - result = VM_FAULT_RETRY; 401 399 break; 402 400 default: 403 401 result = VM_FAULT_SIGBUS;
+9 -1
drivers/target/target_core_device.c
··· 352 352 kfree(new); 353 353 return -EINVAL; 354 354 } 355 - BUG_ON(orig->se_lun_acl != NULL); 355 + if (orig->se_lun_acl != NULL) { 356 + pr_warn_ratelimited("Detected existing explicit" 357 + " se_lun_acl->se_lun_group reference for %s" 358 + " mapped_lun: %llu, failing\n", 359 + nacl->initiatorname, mapped_lun); 360 + mutex_unlock(&nacl->lun_entry_mutex); 361 + kfree(new); 362 + return -EINVAL; 363 + } 356 364 357 365 rcu_assign_pointer(new->se_lun, lun); 358 366 rcu_assign_pointer(new->se_lun_acl, lun_acl);
+6 -2
drivers/target/target_core_sbc.c
··· 451 451 int *post_ret) 452 452 { 453 453 struct se_device *dev = cmd->se_dev; 454 + sense_reason_t ret = TCM_NO_SENSE; 454 455 455 456 /* 456 457 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through ··· 459 458 * sent to the backend driver. 460 459 */ 461 460 spin_lock_irq(&cmd->t_state_lock); 462 - if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) { 461 + if (cmd->transport_state & CMD_T_SENT) { 463 462 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; 464 463 *post_ret = 1; 464 + 465 + if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION) 466 + ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 465 467 } 466 468 spin_unlock_irq(&cmd->t_state_lock); 467 469 ··· 474 470 */ 475 471 up(&dev->caw_sem); 476 472 477 - return TCM_NO_SENSE; 473 + return ret; 478 474 } 479 475 480 476 static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
+58 -28
drivers/target/target_core_transport.c
··· 457 457 { 458 458 struct se_node_acl *nacl = container_of(kref, 459 459 struct se_node_acl, acl_kref); 460 + struct se_portal_group *se_tpg = nacl->se_tpg; 460 461 461 - complete(&nacl->acl_free_comp); 462 + if (!nacl->dynamic_stop) { 463 + complete(&nacl->acl_free_comp); 464 + return; 465 + } 466 + 467 + mutex_lock(&se_tpg->acl_node_mutex); 468 + list_del(&nacl->acl_list); 469 + mutex_unlock(&se_tpg->acl_node_mutex); 470 + 471 + core_tpg_wait_for_nacl_pr_ref(nacl); 472 + core_free_device_list_for_node(nacl, se_tpg); 473 + kfree(nacl); 462 474 } 463 475 464 476 void target_put_nacl(struct se_node_acl *nacl) ··· 511 499 void transport_free_session(struct se_session *se_sess) 512 500 { 513 501 struct se_node_acl *se_nacl = se_sess->se_node_acl; 502 + 514 503 /* 515 504 * Drop the se_node_acl->nacl_kref obtained from within 516 505 * core_tpg_get_initiator_node_acl(). 517 506 */ 518 507 if (se_nacl) { 508 + struct se_portal_group *se_tpg = se_nacl->se_tpg; 509 + const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo; 510 + unsigned long flags; 511 + 519 512 se_sess->se_node_acl = NULL; 513 + 514 + /* 515 + * Also determine if we need to drop the extra ->cmd_kref if 516 + * it had been previously dynamically generated, and 517 + * the endpoint is not caching dynamic ACLs. 518 + */ 519 + mutex_lock(&se_tpg->acl_node_mutex); 520 + if (se_nacl->dynamic_node_acl && 521 + !se_tfo->tpg_check_demo_mode_cache(se_tpg)) { 522 + spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 523 + if (list_empty(&se_nacl->acl_sess_list)) 524 + se_nacl->dynamic_stop = true; 525 + spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 526 + 527 + if (se_nacl->dynamic_stop) 528 + list_del(&se_nacl->acl_list); 529 + } 530 + mutex_unlock(&se_tpg->acl_node_mutex); 531 + 532 + if (se_nacl->dynamic_stop) 533 + target_put_nacl(se_nacl); 534 + 520 535 target_put_nacl(se_nacl); 521 536 } 522 537 if (se_sess->sess_cmd_map) { ··· 557 518 void transport_deregister_session(struct se_session *se_sess) 558 519 { 559 520 struct se_portal_group *se_tpg = se_sess->se_tpg; 560 - const struct target_core_fabric_ops *se_tfo; 561 - struct se_node_acl *se_nacl; 562 521 unsigned long flags; 563 - bool drop_nacl = false; 564 522 565 523 if (!se_tpg) { 566 524 transport_free_session(se_sess); 567 525 return; 568 526 } 569 - se_tfo = se_tpg->se_tpg_tfo; 570 527 571 528 spin_lock_irqsave(&se_tpg->session_lock, flags); 572 529 list_del(&se_sess->sess_list); ··· 570 535 se_sess->fabric_sess_ptr = NULL; 571 536 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 572 537 573 - /* 574 - * Determine if we need to do extra work for this initiator node's 575 - * struct se_node_acl if it had been previously dynamically generated. 576 - */ 577 - se_nacl = se_sess->se_node_acl; 578 - 579 - mutex_lock(&se_tpg->acl_node_mutex); 580 - if (se_nacl && se_nacl->dynamic_node_acl) { 581 - if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) { 582 - list_del(&se_nacl->acl_list); 583 - drop_nacl = true; 584 - } 585 - } 586 - mutex_unlock(&se_tpg->acl_node_mutex); 587 - 588 - if (drop_nacl) { 589 - core_tpg_wait_for_nacl_pr_ref(se_nacl); 590 - core_free_device_list_for_node(se_nacl, se_tpg); 591 - se_sess->se_node_acl = NULL; 592 - kfree(se_nacl); 593 - } 594 538 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 595 539 se_tpg->se_tpg_tfo->get_fabric_name()); 596 540 /* 597 541 * If last kref is dropping now for an explicit NodeACL, awake sleeping 598 542 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group 599 543 * removal context from within transport_free_session() code. 544 + * 545 + * For dynamic ACL, target_put_nacl() uses target_complete_nacl() 546 + * to release all remaining generate_node_acl=1 created ACL resources. 600 547 */ 601 548 602 549 transport_free_session(se_sess); ··· 3127 3110 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3128 3111 goto check_stop; 3129 3112 } 3130 - cmd->t_state = TRANSPORT_ISTATE_PROCESSING; 3131 3113 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3132 3114 3133 3115 cmd->se_tfo->queue_tm_rsp(cmd); ··· 3139 3123 struct se_cmd *cmd) 3140 3124 { 3141 3125 unsigned long flags; 3126 + bool aborted = false; 3142 3127 3143 3128 spin_lock_irqsave(&cmd->t_state_lock, flags); 3144 - cmd->transport_state |= CMD_T_ACTIVE; 3129 + if (cmd->transport_state & CMD_T_ABORTED) { 3130 + aborted = true; 3131 + } else { 3132 + cmd->t_state = TRANSPORT_ISTATE_PROCESSING; 3133 + cmd->transport_state |= CMD_T_ACTIVE; 3134 + } 3145 3135 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3136 + 3137 + if (aborted) { 3138 + pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d" 3139 + "ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function, 3140 + cmd->se_tmr_req->ref_task_tag, cmd->tag); 3141 + transport_cmd_check_stop_to_fabric(cmd); 3142 + return 0; 3143 + } 3146 3144 3147 3145 INIT_WORK(&cmd->work, target_tmr_work); 3148 3146 queue_work(cmd->se_dev->tmr_wq, &cmd->work);
+1 -1
drivers/target/target_core_xcopy.c
··· 864 864 " CHECK_CONDITION -> sending response\n", rc); 865 865 ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION; 866 866 } 867 - target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION); 867 + target_complete_cmd(ec_cmd, ec_cmd->scsi_status); 868 868 } 869 869 870 870 sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
+22
drivers/vfio/vfio_iommu_spapr_tce.c
··· 1245 1245 static long tce_iommu_take_ownership_ddw(struct tce_container *container, 1246 1246 struct iommu_table_group *table_group) 1247 1247 { 1248 + long i, ret = 0; 1249 + 1248 1250 if (!table_group->ops->create_table || !table_group->ops->set_window || 1249 1251 !table_group->ops->release_ownership) { 1250 1252 WARN_ON_ONCE(1); ··· 1255 1253 1256 1254 table_group->ops->take_ownership(table_group); 1257 1255 1256 + /* Set all windows to the new group */ 1257 + for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { 1258 + struct iommu_table *tbl = container->tables[i]; 1259 + 1260 + if (!tbl) 1261 + continue; 1262 + 1263 + ret = table_group->ops->set_window(table_group, i, tbl); 1264 + if (ret) 1265 + goto release_exit; 1266 + } 1267 + 1258 1268 return 0; 1269 + 1270 + release_exit: 1271 + for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) 1272 + table_group->ops->unset_window(table_group, i); 1273 + 1274 + table_group->ops->release_ownership(table_group); 1275 + 1276 + return ret; 1259 1277 } 1260 1278 1261 1279 static int tce_iommu_attach_group(void *iommu_data,
+60 -37
fs/nfsd/vfs.c
··· 332 332 } 333 333 } 334 334 335 + static __be32 336 + nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp, 337 + struct iattr *iap) 338 + { 339 + struct inode *inode = d_inode(fhp->fh_dentry); 340 + int host_err; 341 + 342 + if (iap->ia_size < inode->i_size) { 343 + __be32 err; 344 + 345 + err = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry, 346 + NFSD_MAY_TRUNC | NFSD_MAY_OWNER_OVERRIDE); 347 + if (err) 348 + return err; 349 + } 350 + 351 + host_err = get_write_access(inode); 352 + if (host_err) 353 + goto out_nfserrno; 354 + 355 + host_err = locks_verify_truncate(inode, NULL, iap->ia_size); 356 + if (host_err) 357 + goto out_put_write_access; 358 + return 0; 359 + 360 + out_put_write_access: 361 + put_write_access(inode); 362 + out_nfserrno: 363 + return nfserrno(host_err); 364 + } 365 + 335 366 /* 336 367 * Set various file attributes. After this call fhp needs an fh_put. 337 368 */ ··· 377 346 __be32 err; 378 347 int host_err; 379 348 bool get_write_count; 349 + int size_change = 0; 380 350 381 351 if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE)) 382 352 accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE; ··· 390 358 /* Get inode */ 391 359 err = fh_verify(rqstp, fhp, ftype, accmode); 392 360 if (err) 393 - return err; 361 + goto out; 394 362 if (get_write_count) { 395 363 host_err = fh_want_write(fhp); 396 364 if (host_err) 397 - goto out_host_err; 365 + return nfserrno(host_err); 398 366 } 399 367 400 368 dentry = fhp->fh_dentry; ··· 405 373 iap->ia_valid &= ~ATTR_MODE; 406 374 407 375 if (!iap->ia_valid) 408 - return 0; 376 + goto out; 409 377 410 378 nfsd_sanitize_attrs(inode, iap); 411 379 412 - if (check_guard && guardtime != inode->i_ctime.tv_sec) 413 - return nfserr_notsync; 414 - 415 380 /* 416 381 * The size case is special, it changes the file in addition to the 417 - * attributes, and file systems don't expect it to be mixed with 418 - * "random" attribute changes. We thus split out the size change 419 - * into a separate call for vfs_truncate, and do the rest as a 420 - * a separate setattr call. 382 + * attributes. 421 383 */ 422 384 if (iap->ia_valid & ATTR_SIZE) { 423 - struct path path = { 424 - .mnt = fhp->fh_export->ex_path.mnt, 425 - .dentry = dentry, 426 - }; 427 - bool implicit_mtime = false; 385 + err = nfsd_get_write_access(rqstp, fhp, iap); 386 + if (err) 387 + goto out; 388 + size_change = 1; 428 389 429 390 /* 430 - * vfs_truncate implicity updates the mtime IFF the file size 431 - * actually changes. Avoid the additional seattr call below if 432 - * the only other attribute that the client sends is the mtime. 391 + * RFC5661, Section 18.30.4: 392 + * Changing the size of a file with SETATTR indirectly 393 + * changes the time_modify and change attributes. 394 + * 395 + * (and similar for the older RFCs) 433 396 */ 434 - if (iap->ia_size != i_size_read(inode) && 435 - ((iap->ia_valid & ~(ATTR_SIZE | ATTR_MTIME)) == 0)) 436 - implicit_mtime = true; 437 - 438 - host_err = vfs_truncate(&path, iap->ia_size); 439 - if (host_err) 440 - goto out_host_err; 441 - 442 - iap->ia_valid &= ~ATTR_SIZE; 443 - if (implicit_mtime) 444 - iap->ia_valid &= ~ATTR_MTIME; 445 - if (!iap->ia_valid) 446 - goto done; 397 + if (iap->ia_size != i_size_read(inode)) 398 + iap->ia_valid |= ATTR_MTIME; 447 399 } 448 400 449 401 iap->ia_valid |= ATTR_CTIME; 450 402 403 + if (check_guard && guardtime != inode->i_ctime.tv_sec) { 404 + err = nfserr_notsync; 405 + goto out_put_write_access; 406 + } 407 + 451 408 fh_lock(fhp); 452 409 host_err = notify_change(dentry, iap, NULL); 453 410 fh_unlock(fhp); 454 - if (host_err) 455 - goto out_host_err; 411 + err = nfserrno(host_err); 456 412 457 - done: 458 - host_err = commit_metadata(fhp); 459 - out_host_err: 460 - return nfserrno(host_err); 413 + out_put_write_access: 414 + if (size_change) 415 + put_write_access(inode); 416 + if (!err) 417 + err = nfserrno(commit_metadata(fhp)); 418 + out: 419 + return err; 461 420 } 462 421 463 422 #if defined(CONFIG_NFSD_V4)
+1 -1
fs/pstore/ram.c
··· 280 280 1, id, type, PSTORE_TYPE_PMSG, 0); 281 281 282 282 /* ftrace is last since it may want to dynamically allocate memory. */ 283 - if (!prz_ok(prz)) { 283 + if (!prz_ok(prz) && cxt->fprzs) { 284 284 if (!(cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU)) { 285 285 prz = ramoops_get_next_prz(cxt->fprzs, 286 286 &cxt->ftrace_read_cnt, 1, id, type,
+1 -3
include/linux/buffer_head.h
··· 243 243 { 244 244 if (err == 0) 245 245 return VM_FAULT_LOCKED; 246 - if (err == -EFAULT) 246 + if (err == -EFAULT || err == -EAGAIN) 247 247 return VM_FAULT_NOPAGE; 248 248 if (err == -ENOMEM) 249 249 return VM_FAULT_OOM; 250 - if (err == -EAGAIN) 251 - return VM_FAULT_RETRY; 252 250 /* -ENOSPC, -EDQUOT, -EIO ... */ 253 251 return VM_FAULT_SIGBUS; 254 252 }
+4 -4
include/linux/cpumask.h
··· 560 560 static inline int cpumask_parse_user(const char __user *buf, int len, 561 561 struct cpumask *dstp) 562 562 { 563 - return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpu_ids); 563 + return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits); 564 564 } 565 565 566 566 /** ··· 575 575 struct cpumask *dstp) 576 576 { 577 577 return bitmap_parselist_user(buf, len, cpumask_bits(dstp), 578 - nr_cpu_ids); 578 + nr_cpumask_bits); 579 579 } 580 580 581 581 /** ··· 590 590 char *nl = strchr(buf, '\n'); 591 591 unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf); 592 592 593 - return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpu_ids); 593 + return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits); 594 594 } 595 595 596 596 /** ··· 602 602 */ 603 603 static inline int cpulist_parse(const char *buf, struct cpumask *dstp) 604 604 { 605 - return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpu_ids); 605 + return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits); 606 606 } 607 607 608 608 /**
+4
include/linux/netdevice.h
··· 1508 1508 * @max_mtu: Interface Maximum MTU value 1509 1509 * @type: Interface hardware type 1510 1510 * @hard_header_len: Maximum hardware header length. 1511 + * @min_header_len: Minimum hardware header length 1511 1512 * 1512 1513 * @needed_headroom: Extra headroom the hardware may need, but not in all 1513 1514 * cases can this be guaranteed ··· 1725 1724 unsigned int max_mtu; 1726 1725 unsigned short type; 1727 1726 unsigned short hard_header_len; 1727 + unsigned short min_header_len; 1728 1728 1729 1729 unsigned short needed_headroom; 1730 1730 unsigned short needed_tailroom; ··· 2690 2688 { 2691 2689 if (likely(len >= dev->hard_header_len)) 2692 2690 return true; 2691 + if (len < dev->min_header_len) 2692 + return false; 2693 2693 2694 2694 if (capable(CAP_SYS_RAWIO)) { 2695 2695 memset(ll_header + len, 0, dev->hard_header_len - len);
+4 -1
include/net/lwtunnel.h
··· 178 178 } 179 179 static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len) 180 180 { 181 - return -EOPNOTSUPP; 181 + /* return 0 since we are not walking attr looking for 182 + * RTA_ENCAP_TYPE attribute on nexthops. 183 + */ 184 + return 0; 182 185 } 183 186 184 187 static inline int lwtunnel_build_state(u16 encap_type,
+1
include/target/target_core_base.h
··· 538 538 char initiatorname[TRANSPORT_IQN_LEN]; 539 539 /* Used to signal demo mode created ACL, disabled by default */ 540 540 bool dynamic_node_acl; 541 + bool dynamic_stop; 541 542 u32 queue_depth; 542 543 u32 acl_index; 543 544 enum target_prot_type saved_prot_type;
+8 -3
include/uapi/rdma/ib_user_verbs.h
··· 37 37 #define IB_USER_VERBS_H 38 38 39 39 #include <linux/types.h> 40 - #include <rdma/ib_verbs.h> 41 40 42 41 /* 43 42 * Increment this value if any changes that break userspace ABI ··· 547 548 }; 548 549 549 550 enum { 550 - IB_USER_LEGACY_LAST_QP_ATTR_MASK = IB_QP_DEST_QPN 551 + /* 552 + * This value is equal to IB_QP_DEST_QPN. 553 + */ 554 + IB_USER_LEGACY_LAST_QP_ATTR_MASK = 1ULL << 20, 551 555 }; 552 556 553 557 enum { 554 - IB_USER_LAST_QP_ATTR_MASK = IB_QP_RATE_LIMIT 558 + /* 559 + * This value is equal to IB_QP_RATE_LIMIT. 560 + */ 561 + IB_USER_LAST_QP_ATTR_MASK = 1ULL << 25, 555 562 }; 556 563 557 564 struct ib_uverbs_ex_create_qp {
+1 -2
kernel/ucount.c
··· 227 227 * properly. 228 228 */ 229 229 user_header = register_sysctl("user", empty); 230 + kmemleak_ignore(user_header); 230 231 BUG_ON(!user_header); 231 232 BUG_ON(!setup_userns_sysctls(&init_user_ns)); 232 233 #endif 233 234 return 0; 234 235 } 235 236 subsys_initcall(user_namespace_sysctl_init); 236 - 237 -
+4
mm/slub.c
··· 1422 1422 int err; 1423 1423 unsigned long i, count = oo_objects(s->oo); 1424 1424 1425 + /* Bailout if already initialised */ 1426 + if (s->random_seq) 1427 + return 0; 1428 + 1425 1429 err = cache_random_seq_create(s, count, GFP_KERNEL); 1426 1430 if (err) { 1427 1431 pr_err("SLUB: Unable to initialize free list for %s\n",
+1
net/dsa/dsa2.c
··· 271 271 if (err) { 272 272 dev_warn(ds->dev, "Failed to create slave %d: %d\n", 273 273 index, err); 274 + ds->ports[index].netdev = NULL; 274 275 return err; 275 276 } 276 277
+1
net/ethernet/eth.c
··· 356 356 dev->header_ops = &eth_header_ops; 357 357 dev->type = ARPHRD_ETHER; 358 358 dev->hard_header_len = ETH_HLEN; 359 + dev->min_header_len = ETH_HLEN; 359 360 dev->mtu = ETH_DATA_LEN; 360 361 dev->min_mtu = ETH_MIN_MTU; 361 362 dev->max_mtu = ETH_DATA_LEN;
+1
net/ipv4/igmp.c
··· 1172 1172 psf->sf_crcount = im->crcount; 1173 1173 } 1174 1174 in_dev_put(pmc->interface); 1175 + kfree(pmc); 1175 1176 } 1176 1177 spin_unlock_bh(&im->lock); 1177 1178 }
+2
net/ipv4/ping.c
··· 642 642 { 643 643 struct sk_buff *skb = skb_peek(&sk->sk_write_queue); 644 644 645 + if (!skb) 646 + return 0; 645 647 pfh->wcheck = csum_partial((char *)&pfh->icmph, 646 648 sizeof(struct icmphdr), pfh->wcheck); 647 649 pfh->icmph.checksum = csum_fold(pfh->wcheck);
+6
net/ipv6/addrconf.c
··· 4022 4022 4023 4023 if (bump_id) 4024 4024 rt_genid_bump_ipv6(dev_net(dev)); 4025 + 4026 + /* Make sure that a new temporary address will be created 4027 + * before this temporary address becomes deprecated. 4028 + */ 4029 + if (ifp->flags & IFA_F_TEMPORARY) 4030 + addrconf_verify_rtnl(); 4025 4031 } 4026 4032 4027 4033 static void addrconf_dad_run(struct inet6_dev *idev)
+1
net/ipv6/mcast.c
··· 779 779 psf->sf_crcount = im->mca_crcount; 780 780 } 781 781 in6_dev_put(pmc->idev); 782 + kfree(pmc); 782 783 } 783 784 spin_unlock_bh(&im->mca_lock); 784 785 }
+1
net/ipv6/sit.c
··· 1380 1380 err = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL); 1381 1381 if (err) { 1382 1382 free_percpu(dev->tstats); 1383 + dev->tstats = NULL; 1383 1384 return err; 1384 1385 } 1385 1386
+23 -19
net/kcm/kcmsock.c
··· 929 929 goto out_error; 930 930 } 931 931 932 - /* New message, alloc head skb */ 933 - head = alloc_skb(0, sk->sk_allocation); 934 - while (!head) { 935 - kcm_push(kcm); 936 - err = sk_stream_wait_memory(sk, &timeo); 937 - if (err) 938 - goto out_error; 939 - 932 + if (msg_data_left(msg)) { 933 + /* New message, alloc head skb */ 940 934 head = alloc_skb(0, sk->sk_allocation); 935 + while (!head) { 936 + kcm_push(kcm); 937 + err = sk_stream_wait_memory(sk, &timeo); 938 + if (err) 939 + goto out_error; 940 + 941 + head = alloc_skb(0, sk->sk_allocation); 942 + } 943 + 944 + skb = head; 945 + 946 + /* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling 947 + * csum_and_copy_from_iter from skb_do_copy_data_nocache. 948 + */ 949 + skb->ip_summed = CHECKSUM_UNNECESSARY; 941 950 } 942 - 943 - skb = head; 944 - 945 - /* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling 946 - * csum_and_copy_from_iter from skb_do_copy_data_nocache. 947 - */ 948 - skb->ip_summed = CHECKSUM_UNNECESSARY; 949 951 950 952 start: 951 953 while (msg_data_left(msg)) { ··· 1020 1018 if (eor) { 1021 1019 bool not_busy = skb_queue_empty(&sk->sk_write_queue); 1022 1020 1023 - /* Message complete, queue it on send buffer */ 1024 - __skb_queue_tail(&sk->sk_write_queue, head); 1025 - kcm->seq_skb = NULL; 1026 - KCM_STATS_INCR(kcm->stats.tx_msgs); 1021 + if (head) { 1022 + /* Message complete, queue it on send buffer */ 1023 + __skb_queue_tail(&sk->sk_write_queue, head); 1024 + kcm->seq_skb = NULL; 1025 + KCM_STATS_INCR(kcm->stats.tx_msgs); 1026 + } 1027 1027 1028 1028 if (msg->msg_flags & MSG_BATCH) { 1029 1029 kcm->tx_wait_more = true;
+1
net/l2tp/l2tp_core.h
··· 263 263 int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, 264 264 const struct l2tp_nl_cmd_ops *ops); 265 265 void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type); 266 + int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg); 266 267 267 268 /* Session reference counts. Incremented when code obtains a reference 268 269 * to a session.
+26 -1
net/l2tp/l2tp_ip.c
··· 11 11 12 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 13 14 + #include <asm/ioctls.h> 14 15 #include <linux/icmp.h> 15 16 #include <linux/module.h> 16 17 #include <linux/skbuff.h> ··· 561 560 return err ? err : copied; 562 561 } 563 562 563 + int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg) 564 + { 565 + struct sk_buff *skb; 566 + int amount; 567 + 568 + switch (cmd) { 569 + case SIOCOUTQ: 570 + amount = sk_wmem_alloc_get(sk); 571 + break; 572 + case SIOCINQ: 573 + spin_lock_bh(&sk->sk_receive_queue.lock); 574 + skb = skb_peek(&sk->sk_receive_queue); 575 + amount = skb ? skb->len : 0; 576 + spin_unlock_bh(&sk->sk_receive_queue.lock); 577 + break; 578 + 579 + default: 580 + return -ENOIOCTLCMD; 581 + } 582 + 583 + return put_user(amount, (int __user *)arg); 584 + } 585 + EXPORT_SYMBOL(l2tp_ioctl); 586 + 564 587 static struct proto l2tp_ip_prot = { 565 588 .name = "L2TP/IP", 566 589 .owner = THIS_MODULE, ··· 593 568 .bind = l2tp_ip_bind, 594 569 .connect = l2tp_ip_connect, 595 570 .disconnect = l2tp_ip_disconnect, 596 - .ioctl = udp_ioctl, 571 + .ioctl = l2tp_ioctl, 597 572 .destroy = l2tp_ip_destroy_sock, 598 573 .setsockopt = ip_setsockopt, 599 574 .getsockopt = ip_getsockopt,
+1 -1
net/l2tp/l2tp_ip6.c
··· 731 731 .bind = l2tp_ip6_bind, 732 732 .connect = l2tp_ip6_connect, 733 733 .disconnect = l2tp_ip6_disconnect, 734 - .ioctl = udp_ioctl, 734 + .ioctl = l2tp_ioctl, 735 735 .destroy = l2tp_ip6_destroy_sock, 736 736 .setsockopt = ipv6_setsockopt, 737 737 .getsockopt = ipv6_getsockopt,
+4 -3
net/packet/af_packet.c
··· 2776 2776 struct virtio_net_hdr vnet_hdr = { 0 }; 2777 2777 int offset = 0; 2778 2778 struct packet_sock *po = pkt_sk(sk); 2779 - int hlen, tlen; 2779 + int hlen, tlen, linear; 2780 2780 int extra_len = 0; 2781 2781 2782 2782 /* ··· 2837 2837 err = -ENOBUFS; 2838 2838 hlen = LL_RESERVED_SPACE(dev); 2839 2839 tlen = dev->needed_tailroom; 2840 - skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, 2841 - __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len), 2840 + linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len); 2841 + linear = max(linear, min_t(int, len, dev->hard_header_len)); 2842 + skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear, 2842 2843 msg->msg_flags & MSG_DONTWAIT, &err); 2843 2844 if (skb == NULL) 2844 2845 goto out_unlock;
+1 -1
security/selinux/hooks.c
··· 5888 5888 return error; 5889 5889 5890 5890 /* Obtain a SID for the context, if one was specified. */ 5891 - if (size && str[1] && str[1] != '\n') { 5891 + if (size && str[0] && str[0] != '\n') { 5892 5892 if (str[size-1] == '\n') { 5893 5893 str[size-1] = 0; 5894 5894 size--;
+1 -8
sound/core/seq/seq_memory.c
··· 419 419 { 420 420 unsigned long flags; 421 421 struct snd_seq_event_cell *ptr; 422 - int max_count = 5 * HZ; 423 422 424 423 if (snd_BUG_ON(!pool)) 425 424 return -EINVAL; ··· 431 432 if (waitqueue_active(&pool->output_sleep)) 432 433 wake_up(&pool->output_sleep); 433 434 434 - while (atomic_read(&pool->counter) > 0) { 435 - if (max_count == 0) { 436 - pr_warn("ALSA: snd_seq_pool_done timeout: %d cells remain\n", atomic_read(&pool->counter)); 437 - break; 438 - } 435 + while (atomic_read(&pool->counter) > 0) 439 436 schedule_timeout_uninterruptible(1); 440 - max_count--; 441 - } 442 437 443 438 /* release all resources */ 444 439 spin_lock_irqsave(&pool->lock, flags);
+20 -13
sound/core/seq/seq_queue.c
··· 181 181 } 182 182 } 183 183 184 + static void queue_use(struct snd_seq_queue *queue, int client, int use); 185 + 184 186 /* allocate a new queue - 185 187 * return queue index value or negative value for error 186 188 */ ··· 194 192 if (q == NULL) 195 193 return -ENOMEM; 196 194 q->info_flags = info_flags; 195 + queue_use(q, client, 1); 197 196 if (queue_list_add(q) < 0) { 198 197 queue_delete(q); 199 198 return -ENOMEM; 200 199 } 201 - snd_seq_queue_use(q->queue, client, 1); /* use this queue */ 202 200 return q->queue; 203 201 } 204 202 ··· 504 502 return result; 505 503 } 506 504 507 - 508 - /* use or unuse this queue - 509 - * if it is the first client, starts the timer. 510 - * if it is not longer used by any clients, stop the timer. 511 - */ 512 - int snd_seq_queue_use(int queueid, int client, int use) 505 + /* use or unuse this queue */ 506 + static void queue_use(struct snd_seq_queue *queue, int client, int use) 513 507 { 514 - struct snd_seq_queue *queue; 515 - 516 - queue = queueptr(queueid); 517 - if (queue == NULL) 518 - return -EINVAL; 519 - mutex_lock(&queue->timer_mutex); 520 508 if (use) { 521 509 if (!test_and_set_bit(client, queue->clients_bitmap)) 522 510 queue->clients++; ··· 521 529 } else { 522 530 snd_seq_timer_close(queue); 523 531 } 532 + } 533 + 534 + /* use or unuse this queue - 535 + * if it is the first client, starts the timer. 536 + * if it is not longer used by any clients, stop the timer. 537 + */ 538 + int snd_seq_queue_use(int queueid, int client, int use) 539 + { 540 + struct snd_seq_queue *queue; 541 + 542 + queue = queueptr(queueid); 543 + if (queue == NULL) 544 + return -EINVAL; 545 + mutex_lock(&queue->timer_mutex); 546 + queue_use(queue, client, use); 524 547 mutex_unlock(&queue->timer_mutex); 525 548 queuefree(queue); 526 549 return 0;
+1
sound/pci/hda/patch_hdmi.c
··· 3639 3639 HDA_CODEC_ENTRY(0x10de0071, "GPU 71 HDMI/DP", patch_nvhdmi), 3640 3640 HDA_CODEC_ENTRY(0x10de0072, "GPU 72 HDMI/DP", patch_nvhdmi), 3641 3641 HDA_CODEC_ENTRY(0x10de007d, "GPU 7d HDMI/DP", patch_nvhdmi), 3642 + HDA_CODEC_ENTRY(0x10de0080, "GPU 80 HDMI/DP", patch_nvhdmi), 3642 3643 HDA_CODEC_ENTRY(0x10de0082, "GPU 82 HDMI/DP", patch_nvhdmi), 3643 3644 HDA_CODEC_ENTRY(0x10de0083, "GPU 83 HDMI/DP", patch_nvhdmi), 3644 3645 HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch),
+2 -1
sound/usb/line6/driver.c
··· 754 754 goto error; 755 755 } 756 756 757 + line6_get_interval(line6); 758 + 757 759 if (properties->capabilities & LINE6_CAP_CONTROL) { 758 - line6_get_interval(line6); 759 760 ret = line6_init_cap_control(line6); 760 761 if (ret < 0) 761 762 goto error;