Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull in the 'net' tree to get Daniel Borkmann's flow dissector
infrastructure change.

Signed-off-by: David S. Miller <davem@davemloft.net>

+2907 -1806
+8
CREDITS
··· 1510 1510 D: Cobalt Networks (x86) support 1511 1511 D: This-and-That 1512 1512 1513 + N: Mark M. Hoffman 1514 + E: mhoffman@lightlink.com 1515 + D: asb100, lm93 and smsc47b397 hardware monitoring drivers 1516 + D: hwmon subsystem core 1517 + D: hwmon subsystem maintainer 1518 + D: i2c-sis96x and i2c-stub SMBus drivers 1519 + S: USA 1520 + 1513 1521 N: Dirk Hohndel 1514 1522 E: hohndel@suse.de 1515 1523 D: The XFree86[tm] Project
+1 -5
Documentation/devicetree/bindings/mfd/ab8500.txt
··· 13 13 4 = active high level-sensitive 14 14 8 = active low level-sensitive 15 15 16 - Optional parent device properties: 17 - - reg : contains the PRCMU mailbox address for the AB8500 i2c port 18 - 19 16 The AB8500 consists of a large and varied group of sub-devices: 20 17 21 18 Device IRQ Names Supply Names Description ··· 83 86 - stericsson,amic2-bias-vamic1 : Analoge Mic wishes to use a non-standard Vamic 84 87 - stericsson,earpeice-cmv : Earpeice voltage (only: 950 | 1100 | 1270 | 1580) 85 88 86 - ab8500@5 { 89 + ab8500 { 87 90 compatible = "stericsson,ab8500"; 88 - reg = <5>; /* mailbox 5 is i2c */ 89 91 interrupts = <0 40 0x4>; 90 92 interrupt-controller; 91 93 #interrupt-cells = <2>;
+3
Documentation/devicetree/bindings/tty/serial/of-serial.txt
··· 11 11 - "nvidia,tegra20-uart" 12 12 - "nxp,lpc3220-uart" 13 13 - "ibm,qpace-nwp-serial" 14 + - "altr,16550-FIFO32" 15 + - "altr,16550-FIFO64" 16 + - "altr,16550-FIFO128" 14 17 - "serial" if the port type is unknown. 15 18 - reg : offset and length of the register set for the device. 16 19 - interrupts : should contain uart interrupt.
+1 -1
Documentation/hwmon/lm75
··· 23 23 Datasheet: Publicly available at the Maxim website 24 24 http://www.maxim-ic.com/ 25 25 * Microchip (TelCom) TCN75 26 - Prefix: 'lm75' 26 + Prefix: 'tcn75' 27 27 Addresses scanned: none 28 28 Datasheet: Publicly available at the Microchip website 29 29 http://www.microchip.com/
+7
Documentation/networking/ipvs-sysctl.txt
··· 15 15 enabled and the variable is automatically set to 2, otherwise 16 16 the strategy is disabled and the variable is set to 1. 17 17 18 + backup_only - BOOLEAN 19 + 0 - disabled (default) 20 + not 0 - enabled 21 + 22 + If set, disable the director function while the server is 23 + in backup mode to avoid packet loops for DR/TUN methods. 24 + 18 25 conntrack - BOOLEAN 19 26 0 - disabled (default) 20 27 not 0 - enabled
+27 -15
MAINTAINERS
··· 1338 1338 F: drivers/platform/x86/asus*.c 1339 1339 F: drivers/platform/x86/eeepc*.c 1340 1340 1341 - ASUS ASB100 HARDWARE MONITOR DRIVER 1342 - M: "Mark M. Hoffman" <mhoffman@lightlink.com> 1343 - L: lm-sensors@lm-sensors.org 1344 - S: Maintained 1345 - F: drivers/hwmon/asb100.c 1346 - 1347 1341 ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API 1348 1342 M: Dan Williams <djbw@fb.com> 1349 1343 W: http://sourceforge.net/projects/xscaleiop ··· 3845 3851 F: Documentation/i2c/busses/i2c-ismt 3846 3852 3847 3853 I2C/SMBUS STUB DRIVER 3848 - M: "Mark M. Hoffman" <mhoffman@lightlink.com> 3854 + M: Jean Delvare <khali@linux-fr.org> 3849 3855 L: linux-i2c@vger.kernel.org 3850 3856 S: Maintained 3851 3857 F: drivers/i2c/i2c-stub.c ··· 3998 4004 M: Stanislaw Gruszka <stf_xl@wp.pl> 3999 4005 S: Maintained 4000 4006 F: drivers/usb/atm/ueagle-atm.c 4007 + 4008 + INA209 HARDWARE MONITOR DRIVER 4009 + M: Guenter Roeck <linux@roeck-us.net> 4010 + L: lm-sensors@lm-sensors.org 4011 + S: Maintained 4012 + F: Documentation/hwmon/ina209 4013 + F: Documentation/devicetree/bindings/i2c/ina209.txt 4014 + F: drivers/hwmon/ina209.c 4015 + 4016 + INA2XX HARDWARE MONITOR DRIVER 4017 + M: Guenter Roeck <linux@roeck-us.net> 4018 + L: lm-sensors@lm-sensors.org 4019 + S: Maintained 4020 + F: Documentation/hwmon/ina2xx 4021 + F: drivers/hwmon/ina2xx.c 4022 + F: include/linux/platform_data/ina2xx.h 4001 4023 4002 4024 INDUSTRY PACK SUBSYSTEM (IPACK) 4003 4025 M: Samuel Iglesias Gonsalvez <siglesias@igalia.com> ··· 5107 5097 S: Maintained 5108 5098 F: Documentation/hwmon/max6650 5109 5099 F: drivers/hwmon/max6650.c 5100 + 5101 + MAX6697 HARDWARE MONITOR DRIVER 5102 + M: Guenter Roeck <linux@roeck-us.net> 5103 + L: lm-sensors@lm-sensors.org 5104 + S: Maintained 5105 + F: Documentation/hwmon/max6697 5106 + F: Documentation/devicetree/bindings/i2c/max6697.txt 5107 + F: drivers/hwmon/max6697.c 5108 + F: include/linux/platform_data/max6697.h 5110 5109 5111 5110 MAXIRADIO FM RADIO RECEIVER DRIVER 5112 5111 M: Hans Verkuil <hverkuil@xs4all.nl> ··· 7192 7173 S: Maintained 7193 7174 F: drivers/net/ethernet/sis/sis900.* 7194 7175 7195 - SIS 96X I2C/SMBUS DRIVER 7196 - M: "Mark M. Hoffman" <mhoffman@lightlink.com> 7197 - L: linux-i2c@vger.kernel.org 7198 - S: Maintained 7199 - F: Documentation/i2c/busses/i2c-sis96x 7200 - F: drivers/i2c/busses/i2c-sis96x.c 7201 - 7202 7176 SIS FRAMEBUFFER DRIVER 7203 7177 M: Thomas Winischhofer <thomas@winischhofer.net> 7204 7178 W: http://www.winischhofer.net/linuxsisvga.shtml ··· 7269 7257 F: drivers/hwmon/sch5627.c 7270 7258 7271 7259 SMSC47B397 HARDWARE MONITOR DRIVER 7272 - M: "Mark M. Hoffman" <mhoffman@lightlink.com> 7260 + M: Jean Delvare <khali@linux-fr.org> 7273 7261 L: lm-sensors@lm-sensors.org 7274 7262 S: Maintained 7275 7263 F: Documentation/hwmon/smsc47b397
+1 -1
Makefile
··· 1 1 VERSION = 3 2 2 PATCHLEVEL = 9 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc2 4 + EXTRAVERSION = -rc3 5 5 NAME = Unicycling Gorilla 6 6 7 7 # *DOCUMENTATION*
-7
arch/Kconfig
··· 319 319 select ARCH_WANT_COMPAT_IPC_PARSE_VERSION 320 320 bool 321 321 322 - config HAVE_VIRT_TO_BUS 323 - bool 324 - help 325 - An architecture should select this if it implements the 326 - deprecated interface virt_to_bus(). All new architectures 327 - should probably not select this. 328 - 329 322 config HAVE_ARCH_SECCOMP_FILTER 330 323 bool 331 324 help
+1 -1
arch/alpha/Kconfig
··· 9 9 select HAVE_PERF_EVENTS 10 10 select HAVE_DMA_ATTRS 11 11 select HAVE_GENERIC_HARDIRQS 12 - select HAVE_VIRT_TO_BUS 12 + select VIRT_TO_BUS 13 13 select GENERIC_IRQ_PROBE 14 14 select AUTO_IRQ_AFFINITY if SMP 15 15 select GENERIC_IRQ_SHOW
+11 -11
arch/arm/Kconfig
··· 49 49 select HAVE_REGS_AND_STACK_ACCESS_API 50 50 select HAVE_SYSCALL_TRACEPOINTS 51 51 select HAVE_UID16 52 - select HAVE_VIRT_TO_BUS 53 52 select KTIME_SCALAR 54 53 select PERF_USE_VMALLOC 55 54 select RTC_LIB ··· 555 556 config ARCH_DOVE 556 557 bool "Marvell Dove" 557 558 select ARCH_REQUIRE_GPIOLIB 558 - select COMMON_CLK_DOVE 559 559 select CPU_V7 560 560 select GENERIC_CLOCKEVENTS 561 561 select MIGHT_HAVE_PCI ··· 742 744 select NEED_MACH_IO_H 743 745 select NEED_MACH_MEMORY_H 744 746 select NO_IOPORT 747 + select VIRT_TO_BUS 745 748 help 746 749 On the Acorn Risc-PC, Linux can support the internal IDE disk and 747 750 CD-ROM interface, serial and parallel port, and the floppy drive. ··· 878 879 select ISA_DMA 879 880 select NEED_MACH_MEMORY_H 880 881 select PCI 882 + select VIRT_TO_BUS 881 883 select ZONE_DMA 882 884 help 883 885 Support for the StrongARM based Digital DNARD machine, also known ··· 1006 1006 bool 1007 1007 1008 1008 config ARCH_MULTI_V6 1009 - bool "ARMv6 based platforms (ARM11, Scorpion, ...)" 1009 + bool "ARMv6 based platforms (ARM11)" 1010 1010 select ARCH_MULTI_V6_V7 1011 1011 select CPU_V6 1012 1012 1013 1013 config ARCH_MULTI_V7 1014 - bool "ARMv7 based platforms (Cortex-A, PJ4, Krait)" 1014 + bool "ARMv7 based platforms (Cortex-A, PJ4, Scorpion, Krait)" 1015 1015 default y 1016 1016 select ARCH_MULTI_V6_V7 1017 1017 select ARCH_VEXPRESS ··· 1462 1462 bool 1463 1463 select ISA_DMA_API 1464 1464 1465 - config ARCH_NO_VIRT_TO_BUS 1466 - def_bool y 1467 - depends on !ARCH_RPC && !ARCH_NETWINDER && !ARCH_SHARK 1468 - 1469 1465 # Select ISA DMA interface 1470 1466 config ISA_DMA_API 1471 1467 bool ··· 1653 1657 accounting to be spread across the timer interval, preventing a 1654 1658 "thundering herd" at every timer tick. 1655 1659 1660 + # The GPIO number here must be sorted by descending number. In case of 1661 + # a multiplatform kernel, we just want the highest value required by the 1662 + # selected platforms. 1656 1663 config ARCH_NR_GPIO 1657 1664 int 1658 1665 default 1024 if ARCH_SHMOBILE || ARCH_TEGRA 1659 - default 355 if ARCH_U8500 1660 - default 264 if MACH_H4700 1661 1666 default 512 if SOC_OMAP5 1667 + default 355 if ARCH_U8500 1662 1668 default 288 if ARCH_VT8500 || ARCH_SUNXI 1669 + default 264 if MACH_H4700 1663 1670 default 0 1664 1671 help 1665 1672 Maximum number of GPIOs in the system. ··· 1886 1887 1887 1888 config XEN 1888 1889 bool "Xen guest support on ARM (EXPERIMENTAL)" 1889 - depends on ARM && OF 1890 + depends on ARM && AEABI && OF 1890 1891 depends on CPU_V7 && !CPU_V6 1892 + depends on !GENERIC_ATOMIC64 1891 1893 help 1892 1894 Say Y if you want to run Linux in a Virtual Machine on Xen on ARM. 1893 1895
+1 -1
arch/arm/Kconfig.debug
··· 492 492 DEBUG_IMX31_UART || \ 493 493 DEBUG_IMX35_UART || \ 494 494 DEBUG_IMX51_UART || \ 495 - DEBUG_IMX50_IMX53_UART || \ 495 + DEBUG_IMX53_UART || \ 496 496 DEBUG_IMX6Q_UART 497 497 default 1 498 498 help
+1 -1
arch/arm/boot/Makefile
··· 115 115 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ 116 116 $(obj)/Image System.map "$(INSTALL_PATH)" 117 117 118 - subdir- := bootp compressed 118 + subdir- := bootp compressed dts
+8
arch/arm/boot/dts/armada-370-rd.dts
··· 64 64 status = "okay"; 65 65 /* No CD or WP GPIOs */ 66 66 }; 67 + 68 + usb@d0050000 { 69 + status = "okay"; 70 + }; 71 + 72 + usb@d0051000 { 73 + status = "okay"; 74 + }; 67 75 }; 68 76 };
+2 -3
arch/arm/boot/dts/armada-370-xp.dtsi
··· 31 31 mpic: interrupt-controller@d0020000 { 32 32 compatible = "marvell,mpic"; 33 33 #interrupt-cells = <1>; 34 - #address-cells = <1>; 35 34 #size-cells = <1>; 36 35 interrupt-controller; 37 36 }; ··· 53 54 reg = <0xd0012000 0x100>; 54 55 reg-shift = <2>; 55 56 interrupts = <41>; 56 - reg-io-width = <4>; 57 + reg-io-width = <1>; 57 58 status = "disabled"; 58 59 }; 59 60 serial@d0012100 { ··· 61 62 reg = <0xd0012100 0x100>; 62 63 reg-shift = <2>; 63 64 interrupts = <42>; 64 - reg-io-width = <4>; 65 + reg-io-width = <1>; 65 66 status = "disabled"; 66 67 }; 67 68
+2 -2
arch/arm/boot/dts/armada-xp.dtsi
··· 46 46 reg = <0xd0012200 0x100>; 47 47 reg-shift = <2>; 48 48 interrupts = <43>; 49 - reg-io-width = <4>; 49 + reg-io-width = <1>; 50 50 status = "disabled"; 51 51 }; 52 52 serial@d0012300 { ··· 54 54 reg = <0xd0012300 0x100>; 55 55 reg-shift = <2>; 56 56 interrupts = <44>; 57 - reg-io-width = <4>; 57 + reg-io-width = <1>; 58 58 status = "disabled"; 59 59 }; 60 60
+26 -2
arch/arm/boot/dts/at91sam9x5.dtsi
··· 238 238 nand { 239 239 pinctrl_nand: nand-0 { 240 240 atmel,pins = 241 - <3 4 0x0 0x1 /* PD5 gpio RDY pin pull_up */ 242 - 3 5 0x0 0x1>; /* PD4 gpio enable pin pull_up */ 241 + <3 0 0x1 0x0 /* PD0 periph A Read Enable */ 242 + 3 1 0x1 0x0 /* PD1 periph A Write Enable */ 243 + 3 2 0x1 0x0 /* PD2 periph A Address Latch Enable */ 244 + 3 3 0x1 0x0 /* PD3 periph A Command Latch Enable */ 245 + 3 4 0x0 0x1 /* PD4 gpio Chip Enable pin pull_up */ 246 + 3 5 0x0 0x1 /* PD5 gpio RDY/BUSY pin pull_up */ 247 + 3 6 0x1 0x0 /* PD6 periph A Data bit 0 */ 248 + 3 7 0x1 0x0 /* PD7 periph A Data bit 1 */ 249 + 3 8 0x1 0x0 /* PD8 periph A Data bit 2 */ 250 + 3 9 0x1 0x0 /* PD9 periph A Data bit 3 */ 251 + 3 10 0x1 0x0 /* PD10 periph A Data bit 4 */ 252 + 3 11 0x1 0x0 /* PD11 periph A Data bit 5 */ 253 + 3 12 0x1 0x0 /* PD12 periph A Data bit 6 */ 254 + 3 13 0x1 0x0>; /* PD13 periph A Data bit 7 */ 255 + }; 256 + 257 + pinctrl_nand_16bits: nand_16bits-0 { 258 + atmel,pins = 259 + <3 14 0x1 0x0 /* PD14 periph A Data bit 8 */ 260 + 3 15 0x1 0x0 /* PD15 periph A Data bit 9 */ 261 + 3 16 0x1 0x0 /* PD16 periph A Data bit 10 */ 262 + 3 17 0x1 0x0 /* PD17 periph A Data bit 11 */ 263 + 3 18 0x1 0x0 /* PD18 periph A Data bit 12 */ 264 + 3 19 0x1 0x0 /* PD19 periph A Data bit 13 */ 265 + 3 20 0x1 0x0 /* PD20 periph A Data bit 14 */ 266 + 3 21 0x1 0x0>; /* PD21 periph A Data bit 15 */ 243 267 }; 244 268 }; 245 269
+1 -1
arch/arm/boot/dts/bcm2835.dtsi
··· 105 105 compatible = "fixed-clock"; 106 106 reg = <1>; 107 107 #clock-cells = <0>; 108 - clock-frequency = <150000000>; 108 + clock-frequency = <250000000>; 109 109 }; 110 110 }; 111 111 };
+1 -2
arch/arm/boot/dts/dbx5x0.dtsi
··· 319 319 }; 320 320 }; 321 321 322 - ab8500@5 { 322 + ab8500 { 323 323 compatible = "stericsson,ab8500"; 324 - reg = <5>; /* mailbox 5 is i2c */ 325 324 interrupt-parent = <&intc>; 326 325 interrupts = <0 40 0x4>; 327 326 interrupt-controller;
+5
arch/arm/boot/dts/dove.dtsi
··· 197 197 status = "disabled"; 198 198 }; 199 199 200 + rtc@d8500 { 201 + compatible = "marvell,orion-rtc"; 202 + reg = <0xd8500 0x20>; 203 + }; 204 + 200 205 crypto: crypto@30000 { 201 206 compatible = "marvell,orion-crypto"; 202 207 reg = <0x30000 0x10000>,
+9
arch/arm/boot/dts/exynos4.dtsi
··· 275 275 compatible = "arm,pl330", "arm,primecell"; 276 276 reg = <0x12680000 0x1000>; 277 277 interrupts = <0 35 0>; 278 + #dma-cells = <1>; 279 + #dma-channels = <8>; 280 + #dma-requests = <32>; 278 281 }; 279 282 280 283 pdma1: pdma@12690000 { 281 284 compatible = "arm,pl330", "arm,primecell"; 282 285 reg = <0x12690000 0x1000>; 283 286 interrupts = <0 36 0>; 287 + #dma-cells = <1>; 288 + #dma-channels = <8>; 289 + #dma-requests = <32>; 284 290 }; 285 291 286 292 mdma1: mdma@12850000 { 287 293 compatible = "arm,pl330", "arm,primecell"; 288 294 reg = <0x12850000 0x1000>; 289 295 interrupts = <0 34 0>; 296 + #dma-cells = <1>; 297 + #dma-channels = <8>; 298 + #dma-requests = <1>; 290 299 }; 291 300 }; 292 301 };
+6
arch/arm/boot/dts/exynos5440.dtsi
··· 142 142 compatible = "arm,pl330", "arm,primecell"; 143 143 reg = <0x120000 0x1000>; 144 144 interrupts = <0 34 0>; 145 + #dma-cells = <1>; 146 + #dma-channels = <8>; 147 + #dma-requests = <32>; 145 148 }; 146 149 147 150 pdma1: pdma@121B0000 { 148 151 compatible = "arm,pl330", "arm,primecell"; 149 152 reg = <0x121000 0x1000>; 150 153 interrupts = <0 35 0>; 154 + #dma-cells = <1>; 155 + #dma-channels = <8>; 156 + #dma-requests = <32>; 151 157 }; 152 158 }; 153 159
+1 -1
arch/arm/boot/dts/href.dtsi
··· 221 221 }; 222 222 }; 223 223 224 - ab8500@5 { 224 + ab8500 { 225 225 ab8500-regulators { 226 226 ab8500_ldo_aux1_reg: ab8500_ldo_aux1 { 227 227 regulator-name = "V-DISPLAY";
+1 -1
arch/arm/boot/dts/hrefv60plus.dts
··· 158 158 }; 159 159 }; 160 160 161 - ab8500@5 { 161 + ab8500 { 162 162 ab8500-regulators { 163 163 ab8500_ldo_aux1_reg: ab8500_ldo_aux1 { 164 164 regulator-name = "V-DISPLAY";
+1 -2
arch/arm/boot/dts/imx53-mba53.dts
··· 42 42 fsl,pins = <689 0x10000 /* DISP1_DRDY */ 43 43 482 0x10000 /* DISP1_HSYNC */ 44 44 489 0x10000 /* DISP1_VSYNC */ 45 - 684 0x10000 /* DISP1_DAT_0 */ 46 45 515 0x10000 /* DISP1_DAT_22 */ 47 46 523 0x10000 /* DISP1_DAT_23 */ 48 - 543 0x10000 /* DISP1_DAT_21 */ 47 + 545 0x10000 /* DISP1_DAT_21 */ 49 48 553 0x10000 /* DISP1_DAT_20 */ 50 49 558 0x10000 /* DISP1_DAT_19 */ 51 50 564 0x10000 /* DISP1_DAT_18 */
-2
arch/arm/boot/dts/kirkwood-dns320.dts
··· 42 42 43 43 ocp@f1000000 { 44 44 serial@12000 { 45 - clock-frequency = <166666667>; 46 45 status = "okay"; 47 46 }; 48 47 49 48 serial@12100 { 50 - clock-frequency = <166666667>; 51 49 status = "okay"; 52 50 }; 53 51 };
-1
arch/arm/boot/dts/kirkwood-dns325.dts
··· 50 50 }; 51 51 }; 52 52 serial@12000 { 53 - clock-frequency = <200000000>; 54 53 status = "okay"; 55 54 }; 56 55 };
-1
arch/arm/boot/dts/kirkwood-dockstar.dts
··· 37 37 }; 38 38 }; 39 39 serial@12000 { 40 - clock-frequency = <200000000>; 41 40 status = "ok"; 42 41 }; 43 42
-1
arch/arm/boot/dts/kirkwood-dreamplug.dts
··· 38 38 }; 39 39 }; 40 40 serial@12000 { 41 - clock-frequency = <200000000>; 42 41 status = "ok"; 43 42 }; 44 43
-1
arch/arm/boot/dts/kirkwood-goflexnet.dts
··· 73 73 }; 74 74 }; 75 75 serial@12000 { 76 - clock-frequency = <200000000>; 77 76 status = "ok"; 78 77 }; 79 78
-1
arch/arm/boot/dts/kirkwood-ib62x0.dts
··· 51 51 }; 52 52 }; 53 53 serial@12000 { 54 - clock-frequency = <200000000>; 55 54 status = "okay"; 56 55 }; 57 56
-1
arch/arm/boot/dts/kirkwood-iconnect.dts
··· 78 78 }; 79 79 }; 80 80 serial@12000 { 81 - clock-frequency = <200000000>; 82 81 status = "ok"; 83 82 }; 84 83
-1
arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
··· 115 115 }; 116 116 117 117 serial@12000 { 118 - clock-frequency = <200000000>; 119 118 status = "ok"; 120 119 }; 121 120
-1
arch/arm/boot/dts/kirkwood-km_kirkwood.dts
··· 34 34 }; 35 35 36 36 serial@12000 { 37 - clock-frequency = <200000000>; 38 37 status = "ok"; 39 38 }; 40 39
-1
arch/arm/boot/dts/kirkwood-lschlv2.dts
··· 13 13 14 14 ocp@f1000000 { 15 15 serial@12000 { 16 - clock-frequency = <166666667>; 17 16 status = "okay"; 18 17 }; 19 18 };
-1
arch/arm/boot/dts/kirkwood-lsxhl.dts
··· 13 13 14 14 ocp@f1000000 { 15 15 serial@12000 { 16 - clock-frequency = <200000000>; 17 16 status = "okay"; 18 17 }; 19 18 };
-1
arch/arm/boot/dts/kirkwood-mplcec4.dts
··· 90 90 }; 91 91 92 92 serial@12000 { 93 - clock-frequency = <200000000>; 94 93 status = "ok"; 95 94 }; 96 95
-1
arch/arm/boot/dts/kirkwood-ns2-common.dtsi
··· 23 23 }; 24 24 25 25 serial@12000 { 26 - clock-frequency = <166666667>; 27 26 status = "okay"; 28 27 }; 29 28
-1
arch/arm/boot/dts/kirkwood-nsa310.dts
··· 117 117 }; 118 118 119 119 serial@12000 { 120 - clock-frequency = <200000000>; 121 120 status = "ok"; 122 121 }; 123 122
-2
arch/arm/boot/dts/kirkwood-openblocks_a6.dts
··· 18 18 19 19 ocp@f1000000 { 20 20 serial@12000 { 21 - clock-frequency = <200000000>; 22 21 status = "ok"; 23 22 }; 24 23 25 24 serial@12100 { 26 - clock-frequency = <200000000>; 27 25 status = "ok"; 28 26 }; 29 27
-1
arch/arm/boot/dts/kirkwood-topkick.dts
··· 108 108 }; 109 109 110 110 serial@12000 { 111 - clock-frequency = <200000000>; 112 111 status = "ok"; 113 112 }; 114 113
+3 -2
arch/arm/boot/dts/kirkwood.dtsi
··· 38 38 interrupt-controller; 39 39 #interrupt-cells = <2>; 40 40 interrupts = <35>, <36>, <37>, <38>; 41 + clocks = <&gate_clk 7>; 41 42 }; 42 43 43 44 gpio1: gpio@10140 { ··· 50 49 interrupt-controller; 51 50 #interrupt-cells = <2>; 52 51 interrupts = <39>, <40>, <41>; 52 + clocks = <&gate_clk 7>; 53 53 }; 54 54 55 55 serial@12000 { ··· 59 57 reg-shift = <2>; 60 58 interrupts = <33>; 61 59 clocks = <&gate_clk 7>; 62 - /* set clock-frequency in board dts */ 63 60 status = "disabled"; 64 61 }; 65 62 ··· 68 67 reg-shift = <2>; 69 68 interrupts = <34>; 70 69 clocks = <&gate_clk 7>; 71 - /* set clock-frequency in board dts */ 72 70 status = "disabled"; 73 71 }; 74 72 ··· 75 75 compatible = "marvell,kirkwood-rtc", "marvell,orion-rtc"; 76 76 reg = <0x10300 0x20>; 77 77 interrupts = <53>; 78 + clocks = <&gate_clk 7>; 78 79 }; 79 80 80 81 spi@10600 {
+1 -1
arch/arm/boot/dts/orion5x-lacie-ethernet-disk-mini-v2.dts
··· 11 11 12 12 / { 13 13 model = "LaCie Ethernet Disk mini V2"; 14 - compatible = "lacie,ethernet-disk-mini-v2", "marvell-orion5x-88f5182", "marvell,orion5x"; 14 + compatible = "lacie,ethernet-disk-mini-v2", "marvell,orion5x-88f5182", "marvell,orion5x"; 15 15 16 16 memory { 17 17 reg = <0x00000000 0x4000000>; /* 64 MB */
+1 -1
arch/arm/boot/dts/snowball.dts
··· 298 298 }; 299 299 }; 300 300 301 - ab8500@5 { 301 + ab8500 { 302 302 ab8500-regulators { 303 303 ab8500_ldo_aux1_reg: ab8500_ldo_aux1 { 304 304 regulator-name = "V-DISPLAY";
+3
arch/arm/boot/dts/socfpga.dtsi
··· 75 75 compatible = "arm,pl330", "arm,primecell"; 76 76 reg = <0xffe01000 0x1000>; 77 77 interrupts = <0 180 4>; 78 + #dma-cells = <1>; 79 + #dma-channels = <8>; 80 + #dma-requests = <32>; 78 81 }; 79 82 }; 80 83
+1
arch/arm/boot/dts/tegra20.dtsi
··· 118 118 compatible = "arm,cortex-a9-twd-timer"; 119 119 reg = <0x50040600 0x20>; 120 120 interrupts = <1 13 0x304>; 121 + clocks = <&tegra_car 132>; 121 122 }; 122 123 123 124 intc: interrupt-controller {
+1
arch/arm/boot/dts/tegra30.dtsi
··· 119 119 compatible = "arm,cortex-a9-twd-timer"; 120 120 reg = <0x50040600 0x20>; 121 121 interrupts = <1 13 0xf04>; 122 + clocks = <&tegra_car 214>; 122 123 }; 123 124 124 125 intc: interrupt-controller {
+1
arch/arm/configs/mxs_defconfig
··· 116 116 CONFIG_SND_MXS_SOC=y 117 117 CONFIG_SND_SOC_MXS_SGTL5000=y 118 118 CONFIG_USB=y 119 + CONFIG_USB_EHCI_HCD=y 119 120 CONFIG_USB_CHIPIDEA=y 120 121 CONFIG_USB_CHIPIDEA_HOST=y 121 122 CONFIG_USB_STORAGE=y
+2
arch/arm/configs/omap2plus_defconfig
··· 126 126 CONFIG_INPUT_TWL4030_PWRBUTTON=y 127 127 CONFIG_VT_HW_CONSOLE_BINDING=y 128 128 # CONFIG_LEGACY_PTYS is not set 129 + CONFIG_SERIAL_8250=y 130 + CONFIG_SERIAL_8250_CONSOLE=y 129 131 CONFIG_SERIAL_8250_NR_UARTS=32 130 132 CONFIG_SERIAL_8250_EXTENDED=y 131 133 CONFIG_SERIAL_8250_MANY_PORTS=y
+4 -21
arch/arm/include/asm/xen/events.h
··· 2 2 #define _ASM_ARM_XEN_EVENTS_H 3 3 4 4 #include <asm/ptrace.h> 5 + #include <asm/atomic.h> 5 6 6 7 enum ipi_vector { 7 8 XEN_PLACEHOLDER_VECTOR, ··· 16 15 return raw_irqs_disabled_flags(regs->ARM_cpsr); 17 16 } 18 17 19 - /* 20 - * We cannot use xchg because it does not support 8-byte 21 - * values. However it is safe to use {ldr,dtd}exd directly because all 22 - * platforms which Xen can run on support those instructions. 23 - */ 24 - static inline xen_ulong_t xchg_xen_ulong(xen_ulong_t *ptr, xen_ulong_t val) 25 - { 26 - xen_ulong_t oldval; 27 - unsigned int tmp; 28 - 29 - wmb(); 30 - asm volatile("@ xchg_xen_ulong\n" 31 - "1: ldrexd %0, %H0, [%3]\n" 32 - " strexd %1, %2, %H2, [%3]\n" 33 - " teq %1, #0\n" 34 - " bne 1b" 35 - : "=&r" (oldval), "=&r" (tmp) 36 - : "r" (val), "r" (ptr) 37 - : "memory", "cc"); 38 - return oldval; 39 - } 18 + #define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((ptr), \ 19 + atomic64_t, \ 20 + counter), (val)) 40 21 41 22 #endif /* _ASM_ARM_XEN_EVENTS_H */
+1 -1
arch/arm/kernel/smp.c
··· 480 480 evt->features = CLOCK_EVT_FEAT_ONESHOT | 481 481 CLOCK_EVT_FEAT_PERIODIC | 482 482 CLOCK_EVT_FEAT_DUMMY; 483 - evt->rating = 400; 483 + evt->rating = 100; 484 484 evt->mult = 1; 485 485 evt->set_mode = broadcast_timer_set_mode; 486 486
+13 -20
arch/arm/lib/memset.S
··· 14 14 15 15 .text 16 16 .align 5 17 - .word 0 18 - 19 - 1: subs r2, r2, #4 @ 1 do we have enough 20 - blt 5f @ 1 bytes to align with? 21 - cmp r3, #2 @ 1 22 - strltb r1, [ip], #1 @ 1 23 - strleb r1, [ip], #1 @ 1 24 - strb r1, [ip], #1 @ 1 25 - add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3)) 26 - /* 27 - * The pointer is now aligned and the length is adjusted. Try doing the 28 - * memset again. 29 - */ 30 17 31 18 ENTRY(memset) 32 - /* 33 - * Preserve the contents of r0 for the return value. 34 - */ 35 - mov ip, r0 36 - ands r3, ip, #3 @ 1 unaligned? 37 - bne 1b @ 1 19 + ands r3, r0, #3 @ 1 unaligned? 20 + mov ip, r0 @ preserve r0 as return value 21 + bne 6f @ 1 38 22 /* 39 23 * we know that the pointer in ip is aligned to a word boundary. 40 24 */ 41 - orr r1, r1, r1, lsl #8 25 + 1: orr r1, r1, r1, lsl #8 42 26 orr r1, r1, r1, lsl #16 43 27 mov r3, r1 44 28 cmp r2, #16 ··· 111 127 tst r2, #1 112 128 strneb r1, [ip], #1 113 129 mov pc, lr 130 + 131 + 6: subs r2, r2, #4 @ 1 do we have enough 132 + blt 5b @ 1 bytes to align with? 133 + cmp r3, #2 @ 1 134 + strltb r1, [ip], #1 @ 1 135 + strleb r1, [ip], #1 @ 1 136 + strb r1, [ip], #1 @ 1 137 + add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3)) 138 + b 1b 114 139 ENDPROC(memset)
+1
arch/arm/mach-at91/board-foxg20.c
··· 176 176 /* If you choose to use a pin other than PB16 it needs to be 3.3V */ 177 177 .pin = AT91_PIN_PB16, 178 178 .is_open_drain = 1, 179 + .ext_pullup_enable_pin = -EINVAL, 179 180 }; 180 181 181 182 static struct platform_device w1_device = {
+1
arch/arm/mach-at91/board-stamp9g20.c
··· 188 188 static struct w1_gpio_platform_data w1_gpio_pdata = { 189 189 .pin = AT91_PIN_PA29, 190 190 .is_open_drain = 1, 191 + .ext_pullup_enable_pin = -EINVAL, 191 192 }; 192 193 193 194 static struct platform_device w1_device = {
+8
arch/arm/mach-at91/include/mach/gpio.h
··· 209 209 extern void at91_gpio_suspend(void); 210 210 extern void at91_gpio_resume(void); 211 211 212 + #ifdef CONFIG_PINCTRL_AT91 213 + extern void at91_pinctrl_gpio_suspend(void); 214 + extern void at91_pinctrl_gpio_resume(void); 215 + #else 216 + static inline void at91_pinctrl_gpio_suspend(void) {} 217 + static inline void at91_pinctrl_gpio_resume(void) {} 218 + #endif 219 + 212 220 #endif /* __ASSEMBLY__ */ 213 221 214 222 #endif
+8 -12
arch/arm/mach-at91/irq.c
··· 92 92 93 93 void at91_irq_suspend(void) 94 94 { 95 - int i = 0, bit; 95 + int bit = -1; 96 96 97 97 if (has_aic5()) { 98 98 /* disable enabled irqs */ 99 - while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) { 99 + while ((bit = find_next_bit(backups, n_irqs, bit + 1)) < n_irqs) { 100 100 at91_aic_write(AT91_AIC5_SSR, 101 101 bit & AT91_AIC5_INTSEL_MSK); 102 102 at91_aic_write(AT91_AIC5_IDCR, 1); 103 - i = bit; 104 103 } 105 104 /* enable wakeup irqs */ 106 - i = 0; 107 - while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) { 105 + bit = -1; 106 + while ((bit = find_next_bit(wakeups, n_irqs, bit + 1)) < n_irqs) { 108 107 at91_aic_write(AT91_AIC5_SSR, 109 108 bit & AT91_AIC5_INTSEL_MSK); 110 109 at91_aic_write(AT91_AIC5_IECR, 1); 111 - i = bit; 112 110 } 113 111 } else { 114 112 at91_aic_write(AT91_AIC_IDCR, *backups); ··· 116 118 117 119 void at91_irq_resume(void) 118 120 { 119 - int i = 0, bit; 121 + int bit = -1; 120 122 121 123 if (has_aic5()) { 122 124 /* disable wakeup irqs */ 123 - while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) { 125 + while ((bit = find_next_bit(wakeups, n_irqs, bit + 1)) < n_irqs) { 124 126 at91_aic_write(AT91_AIC5_SSR, 125 127 bit & AT91_AIC5_INTSEL_MSK); 126 128 at91_aic_write(AT91_AIC5_IDCR, 1); 127 - i = bit; 128 129 } 129 130 /* enable irqs disabled for suspend */ 130 - i = 0; 131 - while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) { 131 + bit = -1; 132 + while ((bit = find_next_bit(backups, n_irqs, bit + 1)) < n_irqs) { 132 133 at91_aic_write(AT91_AIC5_SSR, 133 134 bit & AT91_AIC5_INTSEL_MSK); 134 135 at91_aic_write(AT91_AIC5_IECR, 1); 135 - i = bit; 136 136 } 137 137 } else { 138 138 at91_aic_write(AT91_AIC_IDCR, *wakeups);
+8 -2
arch/arm/mach-at91/pm.c
··· 201 201 202 202 static int at91_pm_enter(suspend_state_t state) 203 203 { 204 - at91_gpio_suspend(); 204 + if (of_have_populated_dt()) 205 + at91_pinctrl_gpio_suspend(); 206 + else 207 + at91_gpio_suspend(); 205 208 at91_irq_suspend(); 206 209 207 210 pr_debug("AT91: PM - wake mask %08x, pm state %d\n", ··· 289 286 error: 290 287 target_state = PM_SUSPEND_ON; 291 288 at91_irq_resume(); 292 - at91_gpio_resume(); 289 + if (of_have_populated_dt()) 290 + at91_pinctrl_gpio_resume(); 291 + else 292 + at91_gpio_resume(); 293 293 return 0; 294 294 } 295 295
+3
arch/arm/mach-davinci/dma.c
··· 743 743 */ 744 744 int edma_alloc_slot(unsigned ctlr, int slot) 745 745 { 746 + if (!edma_cc[ctlr]) 747 + return -EINVAL; 748 + 746 749 if (slot >= 0) 747 750 slot = EDMA_CHAN_SLOT(slot); 748 751
+1
arch/arm/mach-footbridge/Kconfig
··· 67 67 select ISA 68 68 select ISA_DMA 69 69 select PCI 70 + select VIRT_TO_BUS 70 71 help 71 72 Say Y here if you intend to run this kernel on the Rebel.COM 72 73 NetWinder. Information about this machine can be found at:
+1
arch/arm/mach-imx/clk-imx35.c
··· 264 264 clk_prepare_enable(clk[gpio3_gate]); 265 265 clk_prepare_enable(clk[iim_gate]); 266 266 clk_prepare_enable(clk[emi_gate]); 267 + clk_prepare_enable(clk[max_gate]); 267 268 268 269 /* 269 270 * SCC is needed to boot via mmc after a watchdog reset. The clock code
+1 -1
arch/arm/mach-imx/clk-imx6q.c
··· 172 172 static struct clk_onecell_data clk_data; 173 173 174 174 static enum mx6q_clks const clks_init_on[] __initconst = { 175 - mmdc_ch0_axi, rom, 175 + mmdc_ch0_axi, rom, pll1_sys, 176 176 }; 177 177 178 178 static struct clk_div_table clk_enet_ref_table[] = {
+9 -9
arch/arm/mach-imx/headsmp.S
··· 26 26 27 27 #ifdef CONFIG_PM 28 28 /* 29 - * The following code is located into the .data section. This is to 30 - * allow phys_l2x0_saved_regs to be accessed with a relative load 31 - * as we are running on physical address here. 29 + * The following code must assume it is running from physical address 30 + * where absolute virtual addresses to the data section have to be 31 + * turned into relative ones. 32 32 */ 33 - .data 34 - .align 35 33 36 34 #ifdef CONFIG_CACHE_L2X0 37 35 .macro pl310_resume 38 - ldr r2, phys_l2x0_saved_regs 36 + adr r0, l2x0_saved_regs_offset 37 + ldr r2, [r0] 38 + add r2, r2, r0 39 39 ldr r0, [r2, #L2X0_R_PHY_BASE] @ get physical base of l2x0 40 40 ldr r1, [r2, #L2X0_R_AUX_CTRL] @ get aux_ctrl value 41 41 str r1, [r0, #L2X0_AUX_CTRL] @ restore aux_ctrl ··· 43 43 str r1, [r0, #L2X0_CTRL] @ re-enable L2 44 44 .endm 45 45 46 - .globl phys_l2x0_saved_regs 47 - phys_l2x0_saved_regs: 48 - .long 0 46 + l2x0_saved_regs_offset: 47 + .word l2x0_saved_regs - . 48 + 49 49 #else 50 50 .macro pl310_resume 51 51 .endm
+5
arch/arm/mach-imx/imx25-dt.c
··· 27 27 NULL 28 28 }; 29 29 30 + static void __init imx25_timer_init(void) 31 + { 32 + mx25_clocks_init_dt(); 33 + } 34 + 30 35 DT_MACHINE_START(IMX25_DT, "Freescale i.MX25 (Device Tree Support)") 31 36 .map_io = mx25_map_io, 32 37 .init_early = imx25_init_early,
-15
arch/arm/mach-imx/pm-imx6q.c
··· 22 22 #include "common.h" 23 23 #include "hardware.h" 24 24 25 - extern unsigned long phys_l2x0_saved_regs; 26 - 27 25 static int imx6q_suspend_finish(unsigned long val) 28 26 { 29 27 cpu_do_idle(); ··· 55 57 56 58 void __init imx6q_pm_init(void) 57 59 { 58 - /* 59 - * The l2x0 core code provides an infrastucture to save and restore 60 - * l2x0 registers across suspend/resume cycle. But because imx6q 61 - * retains L2 content during suspend and needs to resume L2 before 62 - * MMU is enabled, it can only utilize register saving support and 63 - * have to take care of restoring on its own. So we save physical 64 - * address of the data structure used by l2x0 core to save registers, 65 - * and later restore the necessary ones in imx6q resume entry. 66 - */ 67 - #ifdef CONFIG_CACHE_L2X0 68 - phys_l2x0_saved_regs = __pa(&l2x0_saved_regs); 69 - #endif 70 - 71 60 suspend_set_ops(&imx6q_pm_ops); 72 61 }
+1
arch/arm/mach-ixp4xx/vulcan-setup.c
··· 163 163 164 164 static struct w1_gpio_platform_data vulcan_w1_gpio_pdata = { 165 165 .pin = 14, 166 + .ext_pullup_enable_pin = -EINVAL, 166 167 }; 167 168 168 169 static struct platform_device vulcan_w1_gpio = {
+18 -7
arch/arm/mach-kirkwood/board-dt.c
··· 41 41 42 42 struct device_node *np = of_find_compatible_node( 43 43 NULL, NULL, "marvell,kirkwood-gating-clock"); 44 - 45 44 struct of_phandle_args clkspec; 45 + struct clk *clk; 46 46 47 47 clkspec.np = np; 48 48 clkspec.args_count = 1; 49 - 50 - clkspec.args[0] = CGC_BIT_GE0; 51 - orion_clkdev_add(NULL, "mv643xx_eth_port.0", 52 - of_clk_get_from_provider(&clkspec)); 53 49 54 50 clkspec.args[0] = CGC_BIT_PEX0; 55 51 orion_clkdev_add("0", "pcie", ··· 55 59 orion_clkdev_add("1", "pcie", 56 60 of_clk_get_from_provider(&clkspec)); 57 61 58 - clkspec.args[0] = CGC_BIT_GE1; 59 - orion_clkdev_add(NULL, "mv643xx_eth_port.1", 62 + clkspec.args[0] = CGC_BIT_SDIO; 63 + orion_clkdev_add(NULL, "mvsdio", 60 64 of_clk_get_from_provider(&clkspec)); 65 + 66 + /* 67 + * The ethernet interfaces forget the MAC address assigned by 68 + * u-boot if the clocks are turned off. Until proper DT support 69 + * is available we always enable them for now. 70 + */ 71 + clkspec.args[0] = CGC_BIT_GE0; 72 + clk = of_clk_get_from_provider(&clkspec); 73 + orion_clkdev_add(NULL, "mv643xx_eth_port.0", clk); 74 + clk_prepare_enable(clk); 75 + 76 + clkspec.args[0] = CGC_BIT_GE1; 77 + clk = of_clk_get_from_provider(&clkspec); 78 + orion_clkdev_add(NULL, "mv643xx_eth_port.1", clk); 79 + clk_prepare_enable(clk); 61 80 } 62 81 63 82 static void __init kirkwood_of_clk_init(void)
+1
arch/arm/mach-mmp/gplugd.c
··· 9 9 */ 10 10 11 11 #include <linux/init.h> 12 + #include <linux/platform_device.h> 12 13 #include <linux/gpio.h> 13 14 14 15 #include <asm/mach/arch.h>
+1 -1
arch/arm/mach-mxs/icoll.c
··· 100 100 .xlate = irq_domain_xlate_onecell, 101 101 }; 102 102 103 - void __init icoll_of_init(struct device_node *np, 103 + static void __init icoll_of_init(struct device_node *np, 104 104 struct device_node *interrupt_parent) 105 105 { 106 106 /*
+5 -5
arch/arm/mach-mxs/mach-mxs.c
··· 402 402 { 403 403 enable_clk_enet_out(); 404 404 update_fec_mac_prop(OUI_CRYSTALFONTZ); 405 + 406 + mxsfb_pdata.mode_list = cfa10049_video_modes; 407 + mxsfb_pdata.mode_count = ARRAY_SIZE(cfa10049_video_modes); 408 + mxsfb_pdata.default_bpp = 32; 409 + mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT; 405 410 } 406 411 407 412 static void __init cfa10037_init(void) 408 413 { 409 414 enable_clk_enet_out(); 410 415 update_fec_mac_prop(OUI_CRYSTALFONTZ); 411 - 412 - mxsfb_pdata.mode_list = cfa10049_video_modes; 413 - mxsfb_pdata.mode_count = ARRAY_SIZE(cfa10049_video_modes); 414 - mxsfb_pdata.default_bpp = 32; 415 - mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT; 416 416 } 417 417 418 418 static void __init apf28_init(void)
+1
arch/arm/mach-mxs/mm.c
··· 18 18 19 19 #include <mach/mx23.h> 20 20 #include <mach/mx28.h> 21 + #include <mach/common.h> 21 22 22 23 /* 23 24 * Define the MX23 memory map.
+1
arch/arm/mach-mxs/ocotp.c
··· 19 19 #include <asm/processor.h> /* for cpu_relax() */ 20 20 21 21 #include <mach/mxs.h> 22 + #include <mach/common.h> 22 23 23 24 #define OCOTP_WORD_OFFSET 0x20 24 25 #define OCOTP_WORD_COUNT 0x20
+2
arch/arm/mach-omap1/common.h
··· 31 31 32 32 #include <plat/i2c.h> 33 33 34 + #include <mach/irqs.h> 35 + 34 36 #if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850) 35 37 void omap7xx_map_io(void); 36 38 #else
-6
arch/arm/mach-omap2/Kconfig
··· 311 311 default y 312 312 select OMAP_PACKAGE_CBB 313 313 select REGULATOR_FIXED_VOLTAGE if REGULATOR 314 - select SERIAL_8250 315 - select SERIAL_8250_CONSOLE 316 - select SERIAL_CORE_CONSOLE 317 314 318 315 config MACH_OMAP_ZOOM3 319 316 bool "OMAP3630 Zoom3 board" ··· 318 321 default y 319 322 select OMAP_PACKAGE_CBP 320 323 select REGULATOR_FIXED_VOLTAGE if REGULATOR 321 - select SERIAL_8250 322 - select SERIAL_8250_CONSOLE 323 - select SERIAL_CORE_CONSOLE 324 324 325 325 config MACH_CM_T35 326 326 bool "CompuLab CM-T35/CM-T3730 modules"
+2
arch/arm/mach-omap2/board-generic.c
··· 102 102 .init_irq = omap_intc_of_init, 103 103 .handle_irq = omap3_intc_handle_irq, 104 104 .init_machine = omap_generic_init, 105 + .init_late = omap3_init_late, 105 106 .init_time = omap3_sync32k_timer_init, 106 107 .dt_compat = omap3_boards_compat, 107 108 .restart = omap3xxx_restart, ··· 120 119 .init_irq = omap_intc_of_init, 121 120 .handle_irq = omap3_intc_handle_irq, 122 121 .init_machine = omap_generic_init, 122 + .init_late = omap3_init_late, 123 123 .init_time = omap3_secure_sync32k_timer_init, 124 124 .dt_compat = omap3_gp_boards_compat, 125 125 .restart = omap3xxx_restart,
+2
arch/arm/mach-omap2/board-rx51.c
··· 17 17 #include <linux/io.h> 18 18 #include <linux/gpio.h> 19 19 #include <linux/leds.h> 20 + #include <linux/usb/phy.h> 20 21 #include <linux/usb/musb.h> 21 22 #include <linux/platform_data/spi-omap2-mcspi.h> 22 23 ··· 99 98 sdrc_params = nokia_get_sdram_timings(); 100 99 omap_sdrc_init(sdrc_params, sdrc_params); 101 100 101 + usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb"); 102 102 usb_musb_init(&musb_board_data); 103 103 rx51_peripherals_init(); 104 104
-1
arch/arm/mach-omap2/common.h
··· 108 108 void omap3630_init_late(void); 109 109 void am35xx_init_late(void); 110 110 void ti81xx_init_late(void); 111 - void omap4430_init_late(void); 112 111 int omap2_common_pm_late_init(void); 113 112 114 113 #if defined(CONFIG_SOC_OMAP2420) || defined(CONFIG_SOC_OMAP2430)
+3 -3
arch/arm/mach-omap2/gpmc.c
··· 1122 1122 /* TODO: remove, see function definition */ 1123 1123 gpmc_convert_ps_to_ns(gpmc_t); 1124 1124 1125 - /* Now the GPMC is initialised, unreserve the chip-selects */ 1126 - gpmc_cs_map = 0; 1127 - 1128 1125 return 0; 1129 1126 } 1130 1127 ··· 1379 1382 1380 1383 if (IS_ERR_VALUE(gpmc_setup_irq())) 1381 1384 dev_warn(gpmc_dev, "gpmc_setup_irq failed\n"); 1385 + 1386 + /* Now the GPMC is initialised, unreserve the chip-selects */ 1387 + gpmc_cs_map = 0; 1382 1388 1383 1389 rc = gpmc_probe_dt(pdev); 1384 1390 if (rc < 0) {
+5 -4
arch/arm/mach-omap2/mux.c
··· 211 211 return -EINVAL; 212 212 } 213 213 214 - pr_err("%s: Could not find signal %s\n", __func__, muxname); 215 - 216 214 return -ENODEV; 217 215 } 218 216 ··· 231 233 232 234 return mux_mode; 233 235 } 236 + 237 + pr_err("%s: Could not find signal %s\n", __func__, muxname); 234 238 235 239 return -ENODEV; 236 240 } ··· 739 739 list_for_each_entry(e, &partition->muxmodes, node) { 740 740 struct omap_mux *m = &e->mux; 741 741 742 - (void)debugfs_create_file(m->muxnames[0], S_IWUSR, mux_dbg_dir, 743 - m, &omap_mux_dbg_signal_fops); 742 + (void)debugfs_create_file(m->muxnames[0], S_IWUSR | S_IRUGO, 743 + mux_dbg_dir, m, 744 + &omap_mux_dbg_signal_fops); 744 745 } 745 746 } 746 747
+1
arch/arm/mach-pxa/raumfeld.c
··· 505 505 .pin = GPIO_ONE_WIRE, 506 506 .is_open_drain = 0, 507 507 .enable_external_pullup = w1_enable_external_pullup, 508 + .ext_pullup_enable_pin = -EINVAL, 508 509 }; 509 510 510 511 struct platform_device raumfeld_w1_gpio_device = {
+18 -18
arch/arm/mach-s5pv210/clock.c
··· 214 214 .name = "pcmcdclk", 215 215 }; 216 216 217 - static struct clk dummy_apb_pclk = { 218 - .name = "apb_pclk", 219 - .id = -1, 220 - }; 221 - 222 217 static struct clk *clkset_vpllsrc_list[] = { 223 218 [0] = &clk_fin_vpll, 224 219 [1] = &clk_sclk_hdmi27m, ··· 300 305 301 306 static struct clk init_clocks_off[] = { 302 307 { 303 - .name = "dma", 304 - .devname = "dma-pl330.0", 305 - .parent = &clk_hclk_psys.clk, 306 - .enable = s5pv210_clk_ip0_ctrl, 307 - .ctrlbit = (1 << 3), 308 - }, { 309 - .name = "dma", 310 - .devname = "dma-pl330.1", 311 - .parent = &clk_hclk_psys.clk, 312 - .enable = s5pv210_clk_ip0_ctrl, 313 - .ctrlbit = (1 << 4), 314 - }, { 315 308 .name = "rot", 316 309 .parent = &clk_hclk_dsys.clk, 317 310 .enable = s5pv210_clk_ip0_ctrl, ··· 554 571 .parent = &clk_hclk_psys.clk, 555 572 .enable = s5pv210_clk_ip2_ctrl, 556 573 .ctrlbit = (1<<19), 574 + }; 575 + 576 + static struct clk clk_pdma0 = { 577 + .name = "pdma0", 578 + .parent = &clk_hclk_psys.clk, 579 + .enable = s5pv210_clk_ip0_ctrl, 580 + .ctrlbit = (1 << 3), 581 + }; 582 + 583 + static struct clk clk_pdma1 = { 584 + .name = "pdma1", 585 + .parent = &clk_hclk_psys.clk, 586 + .enable = s5pv210_clk_ip0_ctrl, 587 + .ctrlbit = (1 << 4), 557 588 }; 558 589 559 590 static struct clk *clkset_uart_list[] = { ··· 1072 1075 &clk_hsmmc1, 1073 1076 &clk_hsmmc2, 1074 1077 &clk_hsmmc3, 1078 + &clk_pdma0, 1079 + &clk_pdma1, 1075 1080 }; 1076 1081 1077 1082 /* Clock initialisation code */ ··· 1332 1333 CLKDEV_INIT(NULL, "spi_busclk0", &clk_p), 1333 1334 CLKDEV_INIT("s5pv210-spi.0", "spi_busclk1", &clk_sclk_spi0.clk), 1334 1335 CLKDEV_INIT("s5pv210-spi.1", "spi_busclk1", &clk_sclk_spi1.clk), 1336 + CLKDEV_INIT("dma-pl330.0", "apb_pclk", &clk_pdma0), 1337 + CLKDEV_INIT("dma-pl330.1", "apb_pclk", &clk_pdma1), 1335 1338 }; 1336 1339 1337 1340 void __init s5pv210_register_clocks(void) ··· 1362 1361 for (ptr = 0; ptr < ARRAY_SIZE(clk_cdev); ptr++) 1363 1362 s3c_disable_clocks(clk_cdev[ptr], 1); 1364 1363 1365 - s3c24xx_register_clock(&dummy_apb_pclk); 1366 1364 s3c_pwmclk_init(); 1367 1365 }
+1 -1
arch/arm/mach-s5pv210/mach-goni.c
··· 845 845 .mux_id = 0, 846 846 .flags = V4L2_MBUS_PCLK_SAMPLE_FALLING | 847 847 V4L2_MBUS_VSYNC_ACTIVE_LOW, 848 - .bus_type = FIMC_BUS_TYPE_ITU_601, 848 + .fimc_bus_type = FIMC_BUS_TYPE_ITU_601, 849 849 .board_info = &noon010pc30_board_info, 850 850 .i2c_bus_num = 0, 851 851 .clk_frequency = 16000000UL,
+1
arch/arm/mach-shmobile/board-marzen.c
··· 32 32 #include <linux/smsc911x.h> 33 33 #include <linux/spi/spi.h> 34 34 #include <linux/spi/sh_hspi.h> 35 + #include <linux/mmc/host.h> 35 36 #include <linux/mmc/sh_mobile_sdhi.h> 36 37 #include <linux/mfd/tmio.h> 37 38 #include <linux/usb/otg.h>
+1 -1
arch/arm/mach-spear3xx/spear3xx.c
··· 14 14 #define pr_fmt(fmt) "SPEAr3xx: " fmt 15 15 16 16 #include <linux/amba/pl022.h> 17 - #include <linux/amba/pl08x.h> 17 + #include <linux/amba/pl080.h> 18 18 #include <linux/io.h> 19 19 #include <plat/pl080.h> 20 20 #include <mach/generic.h>
+3 -2
arch/arm/mm/dma-mapping.c
··· 342 342 { 343 343 struct dma_pool *pool = &atomic_pool; 344 344 pgprot_t prot = pgprot_dmacoherent(pgprot_kernel); 345 + gfp_t gfp = GFP_KERNEL | GFP_DMA; 345 346 unsigned long nr_pages = pool->size >> PAGE_SHIFT; 346 347 unsigned long *bitmap; 347 348 struct page *page; ··· 362 361 ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page, 363 362 atomic_pool_init); 364 363 else 365 - ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot, 366 - &page, atomic_pool_init); 364 + ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page, 365 + atomic_pool_init); 367 366 if (ptr) { 368 367 int i; 369 368
+1 -1
arch/arm/net/bpf_jit_32.c
··· 576 576 /* x = ((*(frame + k)) & 0xf) << 2; */ 577 577 ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL; 578 578 /* the interpreter should deal with the negative K */ 579 - if (k < 0) 579 + if ((int)k < 0) 580 580 return -1; 581 581 /* offset in r1: we might have to take the slow path */ 582 582 emit_mov_i(r_off, k, ctx);
+5 -2
arch/arm/plat-orion/addr-map.c
··· 157 157 u32 size = readl(ddr_window_cpu_base + DDR_SIZE_CS_OFF(i)); 158 158 159 159 /* 160 - * Chip select enabled? 160 + * We only take care of entries for which the chip 161 + * select is enabled, and that don't have high base 162 + * address bits set (devices can only access the first 163 + * 32 bits of the memory). 161 164 */ 162 - if (size & 1) { 165 + if ((size & 1) && !(base & 0xF)) { 163 166 struct mbus_dram_window *w; 164 167 165 168 w = &orion_mbus_dram_info.cs[cs++];
+1 -1
arch/arm/plat-spear/Kconfig
··· 10 10 11 11 config ARCH_SPEAR13XX 12 12 bool "ST SPEAr13xx with Device Tree" 13 - select ARCH_HAVE_CPUFREQ 13 + select ARCH_HAS_CPUFREQ 14 14 select ARM_GIC 15 15 select CPU_V7 16 16 select GPIO_SPEAR_SPICS
-1
arch/arm64/Kconfig
··· 9 9 select CLONE_BACKWARDS 10 10 select COMMON_CLK 11 11 select GENERIC_CLOCKEVENTS 12 - select GENERIC_HARDIRQS_NO_DEPRECATED 13 12 select GENERIC_IOMAP 14 13 select GENERIC_IRQ_PROBE 15 14 select GENERIC_IRQ_SHOW
-11
arch/arm64/Kconfig.debug
··· 6 6 bool 7 7 default y 8 8 9 - config DEBUG_ERRORS 10 - bool "Verbose kernel error messages" 11 - depends on DEBUG_KERNEL 12 - help 13 - This option controls verbose debugging information which can be 14 - printed when the kernel detects an internal error. This debugging 15 - information is useful to kernel hackers when tracking down problems, 16 - but mostly meaningless to other people. It's safe to say Y unless 17 - you are concerned with the code size or don't want to see these 18 - messages. 19 - 20 9 config DEBUG_STACK_USAGE 21 10 bool "Enable stack utilization instrumentation" 22 11 depends on DEBUG_KERNEL
-1
arch/arm64/configs/defconfig
··· 82 82 CONFIG_DEBUG_INFO=y 83 83 # CONFIG_FTRACE is not set 84 84 CONFIG_ATOMIC64_SELFTEST=y 85 - CONFIG_DEBUG_ERRORS=y
+1 -1
arch/arm64/include/asm/ucontext.h
··· 22 22 stack_t uc_stack; 23 23 sigset_t uc_sigmask; 24 24 /* glibc uses a 1024-bit sigset_t */ 25 - __u8 __unused[(1024 - sizeof(sigset_t)) / 8]; 25 + __u8 __unused[1024 / 8 - sizeof(sigset_t)]; 26 26 /* last for future expansion */ 27 27 struct sigcontext uc_mcontext; 28 28 };
+2
arch/arm64/kernel/arm64ksyms.c
··· 40 40 EXPORT_SYMBOL(__clear_user); 41 41 42 42 /* bitops */ 43 + #ifdef CONFIG_SMP 43 44 EXPORT_SYMBOL(__atomic_hash); 45 + #endif 44 46 45 47 /* physical memory */ 46 48 EXPORT_SYMBOL(memstart_addr);
-1
arch/arm64/kernel/signal32.c
··· 549 549 sigset_t *set, struct pt_regs *regs) 550 550 { 551 551 struct compat_rt_sigframe __user *frame; 552 - compat_stack_t stack; 553 552 int err = 0; 554 553 555 554 frame = compat_get_sigframe(ka, regs, sizeof(*frame));
+1 -1
arch/avr32/Kconfig
··· 7 7 select HAVE_OPROFILE 8 8 select HAVE_KPROBES 9 9 select HAVE_GENERIC_HARDIRQS 10 - select HAVE_VIRT_TO_BUS 10 + select VIRT_TO_BUS 11 11 select GENERIC_IRQ_PROBE 12 12 select GENERIC_ATOMIC64 13 13 select HARDIRQS_SW_RESEND
+1 -1
arch/blackfin/Kconfig
··· 33 33 select ARCH_HAVE_CUSTOM_GPIO_H 34 34 select ARCH_WANT_OPTIONAL_GPIOLIB 35 35 select HAVE_UID16 36 - select HAVE_VIRT_TO_BUS 36 + select VIRT_TO_BUS 37 37 select ARCH_WANT_IPC_PARSE_VERSION 38 38 select HAVE_GENERIC_HARDIRQS 39 39 select GENERIC_ATOMIC64
+1 -1
arch/cris/Kconfig
··· 43 43 select GENERIC_ATOMIC64 44 44 select HAVE_GENERIC_HARDIRQS 45 45 select HAVE_UID16 46 - select HAVE_VIRT_TO_BUS 46 + select VIRT_TO_BUS 47 47 select ARCH_WANT_IPC_PARSE_VERSION 48 48 select GENERIC_IRQ_SHOW 49 49 select GENERIC_IOMAP
+1 -1
arch/frv/Kconfig
··· 6 6 select HAVE_PERF_EVENTS 7 7 select HAVE_UID16 8 8 select HAVE_GENERIC_HARDIRQS 9 - select HAVE_VIRT_TO_BUS 9 + select VIRT_TO_BUS 10 10 select GENERIC_IRQ_SHOW 11 11 select HAVE_DEBUG_BUGVERBOSE 12 12 select ARCH_HAVE_NMI_SAFE_CMPXCHG
+1 -1
arch/h8300/Kconfig
··· 5 5 select HAVE_GENERIC_HARDIRQS 6 6 select GENERIC_ATOMIC64 7 7 select HAVE_UID16 8 - select HAVE_VIRT_TO_BUS 8 + select VIRT_TO_BUS 9 9 select ARCH_WANT_IPC_PARSE_VERSION 10 10 select GENERIC_IRQ_SHOW 11 11 select GENERIC_CPU_DEVICES
+1 -1
arch/ia64/Kconfig
··· 26 26 select HAVE_MEMBLOCK 27 27 select HAVE_MEMBLOCK_NODE_MAP 28 28 select HAVE_VIRT_CPU_ACCOUNTING 29 - select HAVE_VIRT_TO_BUS 29 + select VIRT_TO_BUS 30 30 select ARCH_DISCARD_MEMBLOCK 31 31 select GENERIC_IRQ_PROBE 32 32 select GENERIC_PENDING_IRQ if SMP
+1 -1
arch/m32r/Kconfig
··· 10 10 select ARCH_WANT_IPC_PARSE_VERSION 11 11 select HAVE_DEBUG_BUGVERBOSE 12 12 select HAVE_GENERIC_HARDIRQS 13 - select HAVE_VIRT_TO_BUS 13 + select VIRT_TO_BUS 14 14 select GENERIC_IRQ_PROBE 15 15 select GENERIC_IRQ_SHOW 16 16 select GENERIC_ATOMIC64
+2 -2
arch/m32r/include/uapi/asm/stat.h
··· 63 63 long long st_size; 64 64 unsigned long st_blksize; 65 65 66 - #if defined(__BIG_ENDIAN) 66 + #if defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN) 67 67 unsigned long __pad4; /* future possible st_blocks high bits */ 68 68 unsigned long st_blocks; /* Number 512-byte blocks allocated. */ 69 - #elif defined(__LITTLE_ENDIAN) 69 + #elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN) 70 70 unsigned long st_blocks; /* Number 512-byte blocks allocated. */ 71 71 unsigned long __pad4; /* future possible st_blocks high bits */ 72 72 #else
+1 -1
arch/m68k/Kconfig
··· 8 8 select GENERIC_IRQ_SHOW 9 9 select GENERIC_ATOMIC64 10 10 select HAVE_UID16 11 - select HAVE_VIRT_TO_BUS 11 + select VIRT_TO_BUS 12 12 select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS 13 13 select GENERIC_CPU_DEVICES 14 14 select GENERIC_STRNCPY_FROM_USER if MMU
-1
arch/m68k/Kconfig.machine
··· 310 310 config SOM5282EM 311 311 bool "EMAC.Inc SOM5282EM board support" 312 312 depends on M528x 313 - select EMAC_INC 314 313 help 315 314 Support for the EMAC.Inc SOM5282EM module. 316 315
+5 -5
arch/m68k/include/asm/MC68328.h
··· 293 293 /* 294 294 * Here go the bitmasks themselves 295 295 */ 296 - #define IMR_MSPIM (1 << SPIM _IRQ_NUM) /* Mask SPI Master interrupt */ 296 + #define IMR_MSPIM (1 << SPIM_IRQ_NUM) /* Mask SPI Master interrupt */ 297 297 #define IMR_MTMR2 (1 << TMR2_IRQ_NUM) /* Mask Timer 2 interrupt */ 298 298 #define IMR_MUART (1 << UART_IRQ_NUM) /* Mask UART interrupt */ 299 299 #define IMR_MWDT (1 << WDT_IRQ_NUM) /* Mask Watchdog Timer interrupt */ ··· 327 327 #define IWR_ADDR 0xfffff308 328 328 #define IWR LONG_REF(IWR_ADDR) 329 329 330 - #define IWR_SPIM (1 << SPIM _IRQ_NUM) /* SPI Master interrupt */ 330 + #define IWR_SPIM (1 << SPIM_IRQ_NUM) /* SPI Master interrupt */ 331 331 #define IWR_TMR2 (1 << TMR2_IRQ_NUM) /* Timer 2 interrupt */ 332 332 #define IWR_UART (1 << UART_IRQ_NUM) /* UART interrupt */ 333 333 #define IWR_WDT (1 << WDT_IRQ_NUM) /* Watchdog Timer interrupt */ ··· 357 357 #define ISR_ADDR 0xfffff30c 358 358 #define ISR LONG_REF(ISR_ADDR) 359 359 360 - #define ISR_SPIM (1 << SPIM _IRQ_NUM) /* SPI Master interrupt */ 360 + #define ISR_SPIM (1 << SPIM_IRQ_NUM) /* SPI Master interrupt */ 361 361 #define ISR_TMR2 (1 << TMR2_IRQ_NUM) /* Timer 2 interrupt */ 362 362 #define ISR_UART (1 << UART_IRQ_NUM) /* UART interrupt */ 363 363 #define ISR_WDT (1 << WDT_IRQ_NUM) /* Watchdog Timer interrupt */ ··· 391 391 #define IPR_ADDR 0xfffff310 392 392 #define IPR LONG_REF(IPR_ADDR) 393 393 394 - #define IPR_SPIM (1 << SPIM _IRQ_NUM) /* SPI Master interrupt */ 394 + #define IPR_SPIM (1 << SPIM_IRQ_NUM) /* SPI Master interrupt */ 395 395 #define IPR_TMR2 (1 << TMR2_IRQ_NUM) /* Timer 2 interrupt */ 396 396 #define IPR_UART (1 << UART_IRQ_NUM) /* UART interrupt */ 397 397 #define IPR_WDT (1 << WDT_IRQ_NUM) /* Watchdog Timer interrupt */ ··· 757 757 758 758 /* 'EZ328-compatible definitions */ 759 759 #define TCN_ADDR TCN1_ADDR 760 - #define TCN TCN 760 + #define TCN TCN1 761 761 762 762 /* 763 763 * Timer Unit 1 and 2 Status Registers
+3
arch/m68k/kernel/setup_no.c
··· 57 57 void (*mach_halt)(void); 58 58 void (*mach_power_off)(void); 59 59 60 + #ifdef CONFIG_M68000 61 + #define CPU_NAME "MC68000" 62 + #endif 60 63 #ifdef CONFIG_M68328 61 64 #define CPU_NAME "MC68328" 62 65 #endif
+1 -1
arch/m68k/mm/init.c
··· 188 188 } 189 189 } 190 190 191 - #if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE) 191 + #if defined(CONFIG_MMU) && !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE) 192 192 /* insert pointer tables allocated so far into the tablelist */ 193 193 init_pointer_table((unsigned long)kernel_pg_dir); 194 194 for (i = 0; i < PTRS_PER_PGD; i++) {
+1 -1
arch/m68k/platform/coldfire/m528x.c
··· 69 69 u8 port; 70 70 71 71 /* make sure PUAPAR is set for UART0 and UART1 */ 72 - port = readb(MCF5282_GPIO_PUAPAR); 72 + port = readb(MCFGPIO_PUAPAR); 73 73 port |= 0x03 | (0x03 << 2); 74 74 writeb(port, MCFGPIO_PUAPAR); 75 75 }
+1 -1
arch/microblaze/Kconfig
··· 19 19 select HAVE_DEBUG_KMEMLEAK 20 20 select IRQ_DOMAIN 21 21 select HAVE_GENERIC_HARDIRQS 22 - select HAVE_VIRT_TO_BUS 22 + select VIRT_TO_BUS 23 23 select GENERIC_IRQ_PROBE 24 24 select GENERIC_IRQ_SHOW 25 25 select GENERIC_PCI_IOMAP
+1 -1
arch/mips/Kconfig
··· 38 38 select GENERIC_CLOCKEVENTS 39 39 select GENERIC_CMOS_UPDATE 40 40 select HAVE_MOD_ARCH_SPECIFIC 41 - select HAVE_VIRT_TO_BUS 41 + select VIRT_TO_BUS 42 42 select MODULES_USE_ELF_REL if MODULES 43 43 select MODULES_USE_ELF_RELA if MODULES && 64BIT 44 44 select CLONE_BACKWARDS
+1 -1
arch/mn10300/Kconfig
··· 8 8 select HAVE_ARCH_KGDB 9 9 select GENERIC_ATOMIC64 10 10 select HAVE_NMI_WATCHDOG if MN10300_WD_TIMER 11 - select HAVE_VIRT_TO_BUS 11 + select VIRT_TO_BUS 12 12 select GENERIC_CLOCKEVENTS 13 13 select MODULES_USE_ELF_RELA 14 14 select OLD_SIGSUSPEND3
+1 -2
arch/openrisc/Kconfig
··· 9 9 select OF_EARLY_FLATTREE 10 10 select IRQ_DOMAIN 11 11 select HAVE_MEMBLOCK 12 - select ARCH_WANT_OPTIONAL_GPIOLIB 12 + select ARCH_REQUIRE_GPIOLIB 13 13 select HAVE_ARCH_TRACEHOOK 14 14 select HAVE_GENERIC_HARDIRQS 15 - select HAVE_VIRT_TO_BUS 16 15 select GENERIC_IRQ_CHIP 17 16 select GENERIC_IRQ_PROBE 18 17 select GENERIC_IRQ_SHOW
+1 -1
arch/parisc/Kconfig
··· 21 21 select GENERIC_STRNCPY_FROM_USER 22 22 select SYSCTL_ARCH_UNALIGN_ALLOW 23 23 select HAVE_MOD_ARCH_SPECIFIC 24 - select HAVE_VIRT_TO_BUS 24 + select VIRT_TO_BUS 25 25 select MODULES_USE_ELF_RELA 26 26 select CLONE_BACKWARDS 27 27 select TTY # Needed for pdc_cons.c
+2 -1
arch/powerpc/Kconfig
··· 90 90 config PPC 91 91 bool 92 92 default y 93 + select BINFMT_ELF 93 94 select OF 94 95 select OF_EARLY_FLATTREE 95 96 select HAVE_FTRACE_MCOUNT_RECORD ··· 99 98 select HAVE_FUNCTION_GRAPH_TRACER 100 99 select SYSCTL_EXCEPTION_TRACE 101 100 select ARCH_WANT_OPTIONAL_GPIOLIB 102 - select HAVE_VIRT_TO_BUS if !PPC64 101 + select VIRT_TO_BUS if !PPC64 103 102 select HAVE_IDE 104 103 select HAVE_IOREMAP_PROT 105 104 select HAVE_EFFICIENT_UNALIGNED_ACCESS
+66 -62
arch/powerpc/include/asm/mmu-hash64.h
··· 343 343 /* 344 344 * VSID allocation (256MB segment) 345 345 * 346 - * We first generate a 38-bit "proto-VSID". For kernel addresses this 347 - * is equal to the ESID | 1 << 37, for user addresses it is: 348 - * (context << USER_ESID_BITS) | (esid & ((1U << USER_ESID_BITS) - 1) 346 + * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated 347 + * from mmu context id and effective segment id of the address. 349 348 * 350 - * This splits the proto-VSID into the below range 351 - * 0 - (2^(CONTEXT_BITS + USER_ESID_BITS) - 1) : User proto-VSID range 352 - * 2^(CONTEXT_BITS + USER_ESID_BITS) - 2^(VSID_BITS) : Kernel proto-VSID range 353 - * 354 - * We also have CONTEXT_BITS + USER_ESID_BITS = VSID_BITS - 1 355 - * That is, we assign half of the space to user processes and half 356 - * to the kernel. 349 + * For user processes max context id is limited to ((1ul << 19) - 5) 350 + * for kernel space, we use the top 4 context ids to map address as below 351 + * NOTE: each context only support 64TB now. 352 + * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ] 353 + * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ] 354 + * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ] 355 + * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ] 357 356 * 358 357 * The proto-VSIDs are then scrambled into real VSIDs with the 359 358 * multiplicative hash: ··· 362 363 * VSID_MULTIPLIER is prime, so in particular it is 363 364 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function. 364 365 * Because the modulus is 2^n-1 we can compute it efficiently without 365 - * a divide or extra multiply (see below). 366 + * a divide or extra multiply (see below). The scramble function gives 367 + * robust scattering in the hash table (at least based on some initial 368 + * results). 366 369 * 367 - * This scheme has several advantages over older methods: 370 + * We also consider VSID 0 special. We use VSID 0 for slb entries mapping 371 + * bad address. This enables us to consolidate bad address handling in 372 + * hash_page. 368 373 * 369 - * - We have VSIDs allocated for every kernel address 370 - * (i.e. everything above 0xC000000000000000), except the very top 371 - * segment, which simplifies several things. 372 - * 373 - * - We allow for USER_ESID_BITS significant bits of ESID and 374 - * CONTEXT_BITS bits of context for user addresses. 375 - * i.e. 64T (46 bits) of address space for up to half a million contexts. 376 - * 377 - * - The scramble function gives robust scattering in the hash 378 - * table (at least based on some initial results). The previous 379 - * method was more susceptible to pathological cases giving excessive 380 - * hash collisions. 374 + * We also need to avoid the last segment of the last context, because that 375 + * would give a protovsid of 0x1fffffffff. That will result in a VSID 0 376 + * because of the modulo operation in vsid scramble. But the vmemmap 377 + * (which is what uses region 0xf) will never be close to 64TB in size 378 + * (it's 56 bytes per page of system memory). 381 379 */ 380 + 381 + #define CONTEXT_BITS 19 382 + #define ESID_BITS 18 383 + #define ESID_BITS_1T 6 384 + 385 + /* 386 + * 256MB segment 387 + * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments 388 + * available for user + kernel mapping. The top 4 contexts are used for 389 + * kernel mapping. Each segment contains 2^28 bytes. Each 390 + * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts 391 + * (19 == 37 + 28 - 46). 392 + */ 393 + #define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 5) 382 394 383 395 /* 384 396 * This should be computed such that protovosid * vsid_mulitplier 385 397 * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus 386 398 */ 387 399 #define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */ 388 - #define VSID_BITS_256M 38 400 + #define VSID_BITS_256M (CONTEXT_BITS + ESID_BITS) 389 401 #define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1) 390 402 391 403 #define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */ 392 - #define VSID_BITS_1T 26 404 + #define VSID_BITS_1T (CONTEXT_BITS + ESID_BITS_1T) 393 405 #define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1) 394 406 395 - #define CONTEXT_BITS 19 396 - #define USER_ESID_BITS 18 397 - #define USER_ESID_BITS_1T 6 398 407 399 - #define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT)) 408 + #define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT)) 400 409 401 410 /* 402 411 * This macro generates asm code to compute the VSID scramble ··· 428 421 srdi rx,rt,VSID_BITS_##size; \ 429 422 clrldi rt,rt,(64-VSID_BITS_##size); \ 430 423 add rt,rt,rx; /* add high and low bits */ \ 431 - /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \ 424 + /* NOTE: explanation based on VSID_BITS_##size = 36 \ 425 + * Now, r3 == VSID (mod 2^36-1), and lies between 0 and \ 432 426 * 2^36-1+2^28-1. That in particular means that if r3 >= \ 433 427 * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \ 434 428 * the bit clear, r3 already has the answer we want, if it \ ··· 521 513 }) 522 514 #endif /* 1 */ 523 515 524 - /* 525 - * This is only valid for addresses >= PAGE_OFFSET 526 - * The proto-VSID space is divided into two class 527 - * User: 0 to 2^(CONTEXT_BITS + USER_ESID_BITS) -1 528 - * kernel: 2^(CONTEXT_BITS + USER_ESID_BITS) to 2^(VSID_BITS) - 1 529 - * 530 - * With KERNEL_START at 0xc000000000000000, the proto vsid for 531 - * the kernel ends up with 0xc00000000 (36 bits). With 64TB 532 - * support we need to have kernel proto-VSID in the 533 - * [2^37 to 2^38 - 1] range due to the increased USER_ESID_BITS. 534 - */ 535 - static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) 536 - { 537 - unsigned long proto_vsid; 538 - /* 539 - * We need to make sure proto_vsid for the kernel is 540 - * >= 2^(CONTEXT_BITS + USER_ESID_BITS[_1T]) 541 - */ 542 - if (ssize == MMU_SEGSIZE_256M) { 543 - proto_vsid = ea >> SID_SHIFT; 544 - proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS)); 545 - return vsid_scramble(proto_vsid, 256M); 546 - } 547 - proto_vsid = ea >> SID_SHIFT_1T; 548 - proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS_1T)); 549 - return vsid_scramble(proto_vsid, 1T); 550 - } 551 - 552 516 /* Returns the segment size indicator for a user address */ 553 517 static inline int user_segment_size(unsigned long addr) 554 518 { ··· 530 550 return MMU_SEGSIZE_256M; 531 551 } 532 552 533 - /* This is only valid for user addresses (which are below 2^44) */ 534 553 static inline unsigned long get_vsid(unsigned long context, unsigned long ea, 535 554 int ssize) 536 555 { 556 + /* 557 + * Bad address. We return VSID 0 for that 558 + */ 559 + if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) 560 + return 0; 561 + 537 562 if (ssize == MMU_SEGSIZE_256M) 538 - return vsid_scramble((context << USER_ESID_BITS) 563 + return vsid_scramble((context << ESID_BITS) 539 564 | (ea >> SID_SHIFT), 256M); 540 - return vsid_scramble((context << USER_ESID_BITS_1T) 565 + return vsid_scramble((context << ESID_BITS_1T) 541 566 | (ea >> SID_SHIFT_1T), 1T); 542 567 } 543 568 569 + /* 570 + * This is only valid for addresses >= PAGE_OFFSET 571 + * 572 + * For kernel space, we use the top 4 context ids to map address as below 573 + * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ] 574 + * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ] 575 + * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ] 576 + * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ] 577 + */ 578 + static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) 579 + { 580 + unsigned long context; 581 + 582 + /* 583 + * kernel take the top 4 context from the available range 584 + */ 585 + context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1; 586 + return get_vsid(context, ea, ssize); 587 + } 544 588 #endif /* __ASSEMBLY__ */ 545 589 546 590 #endif /* _ASM_POWERPC_MMU_HASH64_H_ */
+1 -1
arch/powerpc/kernel/cputable.c
··· 275 275 .cpu_features = CPU_FTRS_PPC970, 276 276 .cpu_user_features = COMMON_USER_POWER4 | 277 277 PPC_FEATURE_HAS_ALTIVEC_COMP, 278 - .mmu_features = MMU_FTR_HPTE_TABLE, 278 + .mmu_features = MMU_FTRS_PPC970, 279 279 .icache_bsize = 128, 280 280 .dcache_bsize = 128, 281 281 .num_pmcs = 8,
+25 -9
arch/powerpc/kernel/exceptions-64s.S
··· 1452 1452 _GLOBAL(do_stab_bolted) 1453 1453 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 1454 1454 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ 1455 + mfspr r11,SPRN_DAR /* ea */ 1455 1456 1457 + /* 1458 + * check for bad kernel/user address 1459 + * (ea & ~REGION_MASK) >= PGTABLE_RANGE 1460 + */ 1461 + rldicr. r9,r11,4,(63 - 46 - 4) 1462 + li r9,0 /* VSID = 0 for bad address */ 1463 + bne- 0f 1464 + 1465 + /* 1466 + * Calculate VSID: 1467 + * This is the kernel vsid, we take the top for context from 1468 + * the range. context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 1469 + * Here we know that (ea >> 60) == 0xc 1470 + */ 1471 + lis r9,(MAX_USER_CONTEXT + 1)@ha 1472 + addi r9,r9,(MAX_USER_CONTEXT + 1)@l 1473 + 1474 + srdi r10,r11,SID_SHIFT 1475 + rldimi r10,r9,ESID_BITS,0 /* proto vsid */ 1476 + ASM_VSID_SCRAMBLE(r10, r9, 256M) 1477 + rldic r9,r10,12,16 /* r9 = vsid << 12 */ 1478 + 1479 + 0: 1456 1480 /* Hash to the primary group */ 1457 1481 ld r10,PACASTABVIRT(r13) 1458 - mfspr r11,SPRN_DAR 1459 - srdi r11,r11,28 1482 + srdi r11,r11,SID_SHIFT 1460 1483 rldimi r10,r11,7,52 /* r10 = first ste of the group */ 1461 - 1462 - /* Calculate VSID */ 1463 - /* This is a kernel address, so protovsid = ESID | 1 << 37 */ 1464 - li r9,0x1 1465 - rldimi r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0 1466 - ASM_VSID_SCRAMBLE(r11, r9, 256M) 1467 - rldic r9,r11,12,16 /* r9 = vsid << 12 */ 1468 1484 1469 1485 /* Search the primary group for a free entry */ 1470 1486 1: ld r11,0(r10) /* Test valid bit of the current ste */
+7 -7
arch/powerpc/kernel/prom_init.c
··· 2832 2832 { 2833 2833 } 2834 2834 #else 2835 - static void __reloc_toc(void *tocstart, unsigned long offset, 2836 - unsigned long nr_entries) 2835 + static void __reloc_toc(unsigned long offset, unsigned long nr_entries) 2837 2836 { 2838 2837 unsigned long i; 2839 - unsigned long *toc_entry = (unsigned long *)tocstart; 2838 + unsigned long *toc_entry; 2839 + 2840 + /* Get the start of the TOC by using r2 directly. */ 2841 + asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry)); 2840 2842 2841 2843 for (i = 0; i < nr_entries; i++) { 2842 2844 *toc_entry = *toc_entry + offset; ··· 2852 2850 unsigned long nr_entries = 2853 2851 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); 2854 2852 2855 - /* Need to add offset to get at __prom_init_toc_start */ 2856 - __reloc_toc(__prom_init_toc_start + offset, offset, nr_entries); 2853 + __reloc_toc(offset, nr_entries); 2857 2854 2858 2855 mb(); 2859 2856 } ··· 2865 2864 2866 2865 mb(); 2867 2866 2868 - /* __prom_init_toc_start has been relocated, no need to add offset */ 2869 - __reloc_toc(__prom_init_toc_start, -offset, nr_entries); 2867 + __reloc_toc(-offset, nr_entries); 2870 2868 } 2871 2869 #endif 2872 2870 #endif
+1
arch/powerpc/kernel/ptrace.c
··· 1428 1428 1429 1429 brk.address = bp_info->addr & ~7UL; 1430 1430 brk.type = HW_BRK_TYPE_TRANSLATE; 1431 + brk.len = 8; 1431 1432 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) 1432 1433 brk.type |= HW_BRK_TYPE_READ; 1433 1434 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
+2 -2
arch/powerpc/kvm/book3s_64_mmu_host.c
··· 326 326 vcpu3s->context_id[0] = err; 327 327 328 328 vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1) 329 - << USER_ESID_BITS) - 1; 330 - vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS; 329 + << ESID_BITS) - 1; 330 + vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << ESID_BITS; 331 331 vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first; 332 332 333 333 kvmppc_mmu_hpte_init(vcpu);
+17 -5
arch/powerpc/mm/hash_utils_64.c
··· 195 195 unsigned long vpn = hpt_vpn(vaddr, vsid, ssize); 196 196 unsigned long tprot = prot; 197 197 198 + /* 199 + * If we hit a bad address return error. 200 + */ 201 + if (!vsid) 202 + return -1; 198 203 /* Make kernel text executable */ 199 204 if (overlaps_kernel_text(vaddr, vaddr + step)) 200 205 tprot &= ~HPTE_R_N; ··· 764 759 /* Initialize stab / SLB management */ 765 760 if (mmu_has_feature(MMU_FTR_SLB)) 766 761 slb_initialize(); 762 + else 763 + stab_initialize(get_paca()->stab_real); 767 764 } 768 765 769 766 #ifdef CONFIG_SMP ··· 929 922 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", 930 923 ea, access, trap); 931 924 932 - if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) { 933 - DBG_LOW(" out of pgtable range !\n"); 934 - return 1; 935 - } 936 - 937 925 /* Get region & vsid */ 938 926 switch (REGION_ID(ea)) { 939 927 case USER_REGION_ID: ··· 959 957 } 960 958 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); 961 959 960 + /* Bad address. */ 961 + if (!vsid) { 962 + DBG_LOW("Bad address!\n"); 963 + return 1; 964 + } 962 965 /* Get pgdir */ 963 966 pgdir = mm->pgd; 964 967 if (pgdir == NULL) ··· 1133 1126 /* Get VSID */ 1134 1127 ssize = user_segment_size(ea); 1135 1128 vsid = get_vsid(mm->context.id, ea, ssize); 1129 + if (!vsid) 1130 + return; 1136 1131 1137 1132 /* Hash doesn't like irqs */ 1138 1133 local_irq_save(flags); ··· 1242 1233 hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); 1243 1234 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 1244 1235 1236 + /* Don't create HPTE entries for bad address */ 1237 + if (!vsid) 1238 + return; 1245 1239 ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr), 1246 1240 mode, HPTE_V_BOLTED, 1247 1241 mmu_linear_psize, mmu_kernel_ssize);
+1 -10
arch/powerpc/mm/mmu_context_hash64.c
··· 29 29 static DEFINE_SPINLOCK(mmu_context_lock); 30 30 static DEFINE_IDA(mmu_context_ida); 31 31 32 - /* 33 - * 256MB segment 34 - * The proto-VSID space has 2^(CONTEX_BITS + USER_ESID_BITS) - 1 segments 35 - * available for user mappings. Each segment contains 2^28 bytes. Each 36 - * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts 37 - * (19 == 37 + 28 - 46). 38 - */ 39 - #define MAX_CONTEXT ((1UL << CONTEXT_BITS) - 1) 40 - 41 32 int __init_new_context(void) 42 33 { 43 34 int index; ··· 47 56 else if (err) 48 57 return err; 49 58 50 - if (index > MAX_CONTEXT) { 59 + if (index > MAX_USER_CONTEXT) { 51 60 spin_lock(&mmu_context_lock); 52 61 ida_remove(&mmu_context_ida, index); 53 62 spin_unlock(&mmu_context_lock);
+1 -1
arch/powerpc/mm/pgtable_64.c
··· 61 61 #endif 62 62 63 63 #ifdef CONFIG_PPC_STD_MMU_64 64 - #if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT)) 64 + #if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT)) 65 65 #error TASK_SIZE_USER64 exceeds user VSID range 66 66 #endif 67 67 #endif
+25 -25
arch/powerpc/mm/slb_low.S
··· 31 31 * No other registers are examined or changed. 32 32 */ 33 33 _GLOBAL(slb_allocate_realmode) 34 - /* r3 = faulting address */ 34 + /* 35 + * check for bad kernel/user address 36 + * (ea & ~REGION_MASK) >= PGTABLE_RANGE 37 + */ 38 + rldicr. r9,r3,4,(63 - 46 - 4) 39 + bne- 8f 35 40 36 41 srdi r9,r3,60 /* get region */ 37 - srdi r10,r3,28 /* get esid */ 42 + srdi r10,r3,SID_SHIFT /* get esid */ 38 43 cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */ 39 44 40 45 /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */ ··· 61 56 */ 62 57 _GLOBAL(slb_miss_kernel_load_linear) 63 58 li r11,0 64 - li r9,0x1 65 59 /* 66 - * for 1T we shift 12 bits more. slb_finish_load_1T will do 67 - * the necessary adjustment 60 + * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 61 + * r9 = region id. 68 62 */ 69 - rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 63 + addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha 64 + addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l 65 + 66 + 70 67 BEGIN_FTR_SECTION 71 68 b slb_finish_load 72 69 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) ··· 98 91 _GLOBAL(slb_miss_kernel_load_io) 99 92 li r11,0 100 93 6: 101 - li r9,0x1 102 94 /* 103 - * for 1T we shift 12 bits more. slb_finish_load_1T will do 104 - * the necessary adjustment 95 + * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 96 + * r9 = region id. 105 97 */ 106 - rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 98 + addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha 99 + addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l 100 + 107 101 BEGIN_FTR_SECTION 108 102 b slb_finish_load 109 103 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) 110 104 b slb_finish_load_1T 111 105 112 - 0: /* user address: proto-VSID = context << 15 | ESID. First check 113 - * if the address is within the boundaries of the user region 114 - */ 115 - srdi. r9,r10,USER_ESID_BITS 116 - bne- 8f /* invalid ea bits set */ 117 - 118 - 106 + 0: 119 107 /* when using slices, we extract the psize off the slice bitmaps 120 108 * and then we need to get the sllp encoding off the mmu_psize_defs 121 109 * array. ··· 166 164 ld r9,PACACONTEXTID(r13) 167 165 BEGIN_FTR_SECTION 168 166 cmpldi r10,0x1000 169 - END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) 170 - rldimi r10,r9,USER_ESID_BITS,0 171 - BEGIN_FTR_SECTION 172 167 bge slb_finish_load_1T 173 168 END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) 174 169 b slb_finish_load 175 170 176 171 8: /* invalid EA */ 177 172 li r10,0 /* BAD_VSID */ 173 + li r9,0 /* BAD_VSID */ 178 174 li r11,SLB_VSID_USER /* flags don't much matter */ 179 175 b slb_finish_load 180 176 ··· 221 221 222 222 /* get context to calculate proto-VSID */ 223 223 ld r9,PACACONTEXTID(r13) 224 - rldimi r10,r9,USER_ESID_BITS,0 225 - 226 224 /* fall through slb_finish_load */ 227 225 228 226 #endif /* __DISABLED__ */ ··· 229 231 /* 230 232 * Finish loading of an SLB entry and return 231 233 * 232 - * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET 234 + * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET 233 235 */ 234 236 slb_finish_load: 237 + rldimi r10,r9,ESID_BITS,0 235 238 ASM_VSID_SCRAMBLE(r10,r9,256M) 236 239 /* 237 240 * bits above VSID_BITS_256M need to be ignored from r10 ··· 297 298 /* 298 299 * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return. 299 300 * 300 - * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9 301 + * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9 301 302 */ 302 303 slb_finish_load_1T: 303 - srdi r10,r10,40-28 /* get 1T ESID */ 304 + srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */ 305 + rldimi r10,r9,ESID_BITS_1T,0 304 306 ASM_VSID_SCRAMBLE(r10,r9,1T) 305 307 /* 306 308 * bits above VSID_BITS_1T need to be ignored from r10
+1 -1
arch/powerpc/mm/tlb_hash64.c
··· 82 82 if (!is_kernel_addr(addr)) { 83 83 ssize = user_segment_size(addr); 84 84 vsid = get_vsid(mm->context.id, addr, ssize); 85 - WARN_ON(vsid == 0); 86 85 } else { 87 86 vsid = get_kernel_vsid(addr, mmu_kernel_ssize); 88 87 ssize = mmu_kernel_ssize; 89 88 } 89 + WARN_ON(vsid == 0); 90 90 vpn = hpt_vpn(addr, vsid, ssize); 91 91 rpte = __real_pte(__pte(pte), ptep); 92 92
+3 -3
arch/powerpc/platforms/85xx/sgy_cts1000.c
··· 69 69 return IRQ_HANDLED; 70 70 }; 71 71 72 - static int __devinit gpio_halt_probe(struct platform_device *pdev) 72 + static int gpio_halt_probe(struct platform_device *pdev) 73 73 { 74 74 enum of_gpio_flags flags; 75 75 struct device_node *node = pdev->dev.of_node; ··· 128 128 return 0; 129 129 } 130 130 131 - static int __devexit gpio_halt_remove(struct platform_device *pdev) 131 + static int gpio_halt_remove(struct platform_device *pdev) 132 132 { 133 133 if (halt_node) { 134 134 int gpio = of_get_gpio(halt_node, 0); ··· 165 165 .of_match_table = gpio_halt_match, 166 166 }, 167 167 .probe = gpio_halt_probe, 168 - .remove = __devexit_p(gpio_halt_remove), 168 + .remove = gpio_halt_remove, 169 169 }; 170 170 171 171 module_platform_driver(gpio_halt_driver);
+2 -4
arch/powerpc/platforms/Kconfig.cputype
··· 124 124 select PPC_HAVE_PMU_SUPPORT 125 125 126 126 config POWER3 127 - bool 128 127 depends on PPC64 && PPC_BOOK3S 129 - default y if !POWER4_ONLY 128 + def_bool y 130 129 131 130 config POWER4 132 131 depends on PPC64 && PPC_BOOK3S ··· 144 145 but somewhat slower on other machines. This option only changes 145 146 the scheduling of instructions, not the selection of instructions 146 147 itself, so the resulting kernel will keep running on all other 147 - machines. When building a kernel that is supposed to run only 148 - on Cell, you should also select the POWER4_ONLY option. 148 + machines. 149 149 150 150 # this is temp to handle compat with arch=ppc 151 151 config 8xx
+1 -1
arch/s390/Kconfig
··· 134 134 select HAVE_SYSCALL_WRAPPERS 135 135 select HAVE_UID16 if 32BIT 136 136 select HAVE_VIRT_CPU_ACCOUNTING 137 - select HAVE_VIRT_TO_BUS 137 + select VIRT_TO_BUS 138 138 select INIT_ALL_POSSIBLE 139 139 select KTIME_SCALAR if 32BIT 140 140 select MODULES_USE_ELF_RELA
+5 -1
arch/s390/include/asm/eadm.h
··· 34 34 u32 reserved[4]; 35 35 } __packed; 36 36 37 + #define EQC_WR_PROHIBIT 22 38 + 37 39 struct msb { 38 40 u8 fmt:4; 39 41 u8 oc:4; ··· 98 96 #define OP_STATE_TEMP_ERR 2 99 97 #define OP_STATE_PERM_ERR 3 100 98 99 + enum scm_event {SCM_CHANGE, SCM_AVAIL}; 100 + 101 101 struct scm_driver { 102 102 struct device_driver drv; 103 103 int (*probe) (struct scm_device *scmdev); 104 104 int (*remove) (struct scm_device *scmdev); 105 - void (*notify) (struct scm_device *scmdev); 105 + void (*notify) (struct scm_device *scmdev, enum scm_event event); 106 106 void (*handler) (struct scm_device *scmdev, void *data, int error); 107 107 }; 108 108
-2
arch/s390/include/asm/tlbflush.h
··· 74 74 75 75 static inline void __tlb_flush_mm(struct mm_struct * mm) 76 76 { 77 - if (unlikely(cpumask_empty(mm_cpumask(mm)))) 78 - return; 79 77 /* 80 78 * If the machine has IDTE we prefer to do a per mm flush 81 79 * on all cpus instead of doing a local flush if the mm
+2 -1
arch/s390/kernel/entry.S
··· 636 636 UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER 637 637 mcck_skip: 638 638 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT 639 - mvc __PT_R0(64,%r11),__LC_GPREGS_SAVE_AREA 639 + stm %r0,%r7,__PT_R0(%r11) 640 + mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32 640 641 stm %r8,%r9,__PT_PSW(%r11) 641 642 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 642 643 l %r1,BASED(.Ldo_machine_check)
+3 -2
arch/s390/kernel/entry64.S
··· 678 678 UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER 679 679 LAST_BREAK %r14 680 680 mcck_skip: 681 - lghi %r14,__LC_GPREGS_SAVE_AREA 682 - mvc __PT_R0(128,%r11),0(%r14) 681 + lghi %r14,__LC_GPREGS_SAVE_AREA+64 682 + stmg %r0,%r7,__PT_R0(%r11) 683 + mvc __PT_R8(64,%r11),0(%r14) 683 684 stmg %r8,%r9,__PT_PSW(%r11) 684 685 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 685 686 lgr %r2,%r11 # pass pointer to pt_regs
+2
arch/s390/kernel/setup.c
··· 571 571 572 572 /* Split remaining virtual space between 1:1 mapping & vmemmap array */ 573 573 tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page)); 574 + /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */ 575 + tmp = SECTION_ALIGN_UP(tmp); 574 576 tmp = VMALLOC_START - tmp * sizeof(struct page); 575 577 tmp &= ~((vmax >> 11) - 1); /* align to page table level */ 576 578 tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS);
+1 -1
arch/score/Kconfig
··· 12 12 select GENERIC_CPU_DEVICES 13 13 select GENERIC_CLOCKEVENTS 14 14 select HAVE_MOD_ARCH_SPECIFIC 15 - select HAVE_VIRT_TO_BUS 15 + select VIRT_TO_BUS 16 16 select MODULES_USE_ELF_REL 17 17 select CLONE_BACKWARDS 18 18
+1 -7
arch/sparc/Kconfig
··· 84 84 default "arch/sparc/configs/sparc32_defconfig" if SPARC32 85 85 default "arch/sparc/configs/sparc64_defconfig" if SPARC64 86 86 87 - # CONFIG_BITS can be used at source level to get 32/64 bits 88 - config BITS 89 - int 90 - default 32 if SPARC32 91 - default 64 if SPARC64 92 - 93 87 config IOMMU_HELPER 94 88 bool 95 89 default y if SPARC64 ··· 191 197 192 198 config GENERIC_HWEIGHT 193 199 bool 194 - default y if !ULTRA_HAS_POPULATION_COUNT 200 + default y 195 201 196 202 config GENERIC_CALIBRATE_DELAY 197 203 bool
+1
arch/sparc/include/asm/spitfire.h
··· 45 45 #define SUN4V_CHIP_NIAGARA3 0x03 46 46 #define SUN4V_CHIP_NIAGARA4 0x04 47 47 #define SUN4V_CHIP_NIAGARA5 0x05 48 + #define SUN4V_CHIP_SPARC64X 0x8a 48 49 #define SUN4V_CHIP_UNKNOWN 0xff 49 50 50 51 #ifndef __ASSEMBLY__
+6
arch/sparc/kernel/cpu.c
··· 493 493 sparc_pmu_type = "niagara5"; 494 494 break; 495 495 496 + case SUN4V_CHIP_SPARC64X: 497 + sparc_cpu_type = "SPARC64-X"; 498 + sparc_fpu_type = "SPARC64-X integrated FPU"; 499 + sparc_pmu_type = "sparc64-x"; 500 + break; 501 + 496 502 default: 497 503 printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n", 498 504 prom_cpu_compatible);
+23 -2
arch/sparc/kernel/head_64.S
··· 134 134 .asciz "SUNW,UltraSPARC-T" 135 135 prom_sparc_prefix: 136 136 .asciz "SPARC-" 137 + prom_sparc64x_prefix: 138 + .asciz "SPARC64-X" 137 139 .align 4 138 140 prom_root_compatible: 139 141 .skip 64 ··· 414 412 cmp %g2, 'T' 415 413 be,pt %xcc, 70f 416 414 cmp %g2, 'M' 417 - bne,pn %xcc, 4f 415 + bne,pn %xcc, 49f 418 416 nop 419 417 420 418 70: ldub [%g1 + 7], %g2 ··· 427 425 cmp %g2, '5' 428 426 be,pt %xcc, 5f 429 427 mov SUN4V_CHIP_NIAGARA5, %g4 430 - ba,pt %xcc, 4f 428 + ba,pt %xcc, 49f 431 429 nop 432 430 433 431 91: sethi %hi(prom_cpu_compatible), %g1 ··· 441 439 mov SUN4V_CHIP_NIAGARA2, %g4 442 440 443 441 4: 442 + /* Athena */ 443 + sethi %hi(prom_cpu_compatible), %g1 444 + or %g1, %lo(prom_cpu_compatible), %g1 445 + sethi %hi(prom_sparc64x_prefix), %g7 446 + or %g7, %lo(prom_sparc64x_prefix), %g7 447 + mov 9, %g3 448 + 41: ldub [%g7], %g2 449 + ldub [%g1], %g4 450 + cmp %g2, %g4 451 + bne,pn %icc, 49f 452 + add %g7, 1, %g7 453 + subcc %g3, 1, %g3 454 + bne,pt %xcc, 41b 455 + add %g1, 1, %g1 456 + mov SUN4V_CHIP_SPARC64X, %g4 457 + ba,pt %xcc, 5f 458 + nop 459 + 460 + 49: 444 461 mov SUN4V_CHIP_UNKNOWN, %g4 445 462 5: sethi %hi(sun4v_chip_type), %g2 446 463 or %g2, %lo(sun4v_chip_type), %g2
+26 -15
arch/sparc/kernel/leon_pci_grpci2.c
··· 186 186 #define CAP9_IOMAP_OFS 0x20 187 187 #define CAP9_BARSIZE_OFS 0x24 188 188 189 + #define TGT 256 190 + 189 191 struct grpci2_priv { 190 192 struct leon_pci_info info; /* must be on top of this structure */ 191 193 struct grpci2_regs *regs; ··· 239 237 if (where & 0x3) 240 238 return -EINVAL; 241 239 242 - if (bus == 0 && PCI_SLOT(devfn) != 0) 243 - devfn += (0x8 * 6); 240 + if (bus == 0) { 241 + devfn += (0x8 * 6); /* start at AD16=Device0 */ 242 + } else if (bus == TGT) { 243 + bus = 0; 244 + devfn = 0; /* special case: bridge controller itself */ 245 + } 244 246 245 247 /* Select bus */ 246 248 spin_lock_irqsave(&grpci2_dev_lock, flags); ··· 309 303 if (where & 0x3) 310 304 return -EINVAL; 311 305 312 - if (bus == 0 && PCI_SLOT(devfn) != 0) 313 - devfn += (0x8 * 6); 306 + if (bus == 0) { 307 + devfn += (0x8 * 6); /* start at AD16=Device0 */ 308 + } else if (bus == TGT) { 309 + bus = 0; 310 + devfn = 0; /* special case: bridge controller itself */ 311 + } 314 312 315 313 /* Select bus */ 316 314 spin_lock_irqsave(&grpci2_dev_lock, flags); ··· 378 368 unsigned int busno = bus->number; 379 369 int ret; 380 370 381 - if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0)) { 371 + if (PCI_SLOT(devfn) > 15 || busno > 255) { 382 372 *val = ~0; 383 373 return 0; 384 374 } ··· 416 406 struct grpci2_priv *priv = grpci2priv; 417 407 unsigned int busno = bus->number; 418 408 419 - if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0)) 409 + if (PCI_SLOT(devfn) > 15 || busno > 255) 420 410 return 0; 421 411 422 412 #ifdef GRPCI2_DEBUG_CFGACCESS ··· 588 578 REGSTORE(regs->ahbmst_map[i], priv->pci_area); 589 579 590 580 /* Get the GRPCI2 Host PCI ID */ 591 - grpci2_cfg_r32(priv, 0, 0, PCI_VENDOR_ID, &priv->pciid); 581 + grpci2_cfg_r32(priv, TGT, 0, PCI_VENDOR_ID, &priv->pciid); 592 582 593 583 /* Get address to first (always defined) capability structure */ 594 - grpci2_cfg_r8(priv, 0, 0, PCI_CAPABILITY_LIST, &capptr); 584 + grpci2_cfg_r8(priv, TGT, 0, PCI_CAPABILITY_LIST, &capptr); 595 585 596 586 /* Enable/Disable Byte twisting */ 597 - grpci2_cfg_r32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, &io_map); 587 + grpci2_cfg_r32(priv, TGT, 0, capptr+CAP9_IOMAP_OFS, &io_map); 598 588 io_map = (io_map & ~0x1) | (priv->bt_enabled ? 1 : 0); 599 - grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, io_map); 589 + grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_IOMAP_OFS, io_map); 600 590 601 591 /* Setup the Host's PCI Target BARs for other peripherals to access, 602 592 * and do DMA to the host's memory. The target BARs can be sized and ··· 627 617 pciadr = 0; 628 618 } 629 619 } 630 - grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BARSIZE_OFS+i*4, bar_sz); 631 - grpci2_cfg_w32(priv, 0, 0, PCI_BASE_ADDRESS_0+i*4, pciadr); 632 - grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr); 620 + grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_BARSIZE_OFS+i*4, 621 + bar_sz); 622 + grpci2_cfg_w32(priv, TGT, 0, PCI_BASE_ADDRESS_0+i*4, pciadr); 623 + grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr); 633 624 printk(KERN_INFO " TGT BAR[%d]: 0x%08x (PCI)-> 0x%08x\n", 634 625 i, pciadr, ahbadr); 635 626 } 636 627 637 628 /* set as bus master and enable pci memory responses */ 638 - grpci2_cfg_r32(priv, 0, 0, PCI_COMMAND, &data); 629 + grpci2_cfg_r32(priv, TGT, 0, PCI_COMMAND, &data); 639 630 data |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); 640 - grpci2_cfg_w32(priv, 0, 0, PCI_COMMAND, data); 631 + grpci2_cfg_w32(priv, TGT, 0, PCI_COMMAND, data); 641 632 642 633 /* Enable Error respone (CPU-TRAP) on illegal memory access. */ 643 634 REGSTORE(regs->ctrl, CTRL_ER | CTRL_PE);
+1 -1
arch/tile/Kconfig
··· 17 17 select GENERIC_IRQ_SHOW 18 18 select HAVE_DEBUG_BUGVERBOSE 19 19 select HAVE_SYSCALL_WRAPPERS if TILEGX 20 - select HAVE_VIRT_TO_BUS 20 + select VIRT_TO_BUS 21 21 select SYS_HYPERVISOR 22 22 select ARCH_HAVE_NMI_SAFE_CMPXCHG 23 23 select GENERIC_CLOCKEVENTS
+1 -1
arch/unicore32/Kconfig
··· 9 9 select GENERIC_ATOMIC64 10 10 select HAVE_KERNEL_LZO 11 11 select HAVE_KERNEL_LZMA 12 - select HAVE_VIRT_TO_BUS 12 + select VIRT_TO_BUS 13 13 select ARCH_HAVE_CUSTOM_GPIO_H 14 14 select GENERIC_FIND_FIRST_BIT 15 15 select GENERIC_IRQ_PROBE
+1 -1
arch/x86/Kconfig
··· 112 112 select GENERIC_STRNLEN_USER 113 113 select HAVE_CONTEXT_TRACKING if X86_64 114 114 select HAVE_IRQ_TIME_ACCOUNTING 115 - select HAVE_VIRT_TO_BUS 115 + select VIRT_TO_BUS 116 116 select MODULES_USE_ELF_REL if X86_32 117 117 select MODULES_USE_ELF_RELA if X86_64 118 118 select CLONE_BACKWARDS if X86_32
+10
arch/x86/kernel/cpu/perf_event_intel_ds.c
··· 729 729 } 730 730 } 731 731 } 732 + 733 + void perf_restore_debug_store(void) 734 + { 735 + struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds); 736 + 737 + if (!x86_pmu.bts && !x86_pmu.pebs) 738 + return; 739 + 740 + wrmsrl(MSR_IA32_DS_AREA, (unsigned long)ds); 741 + }
+2
arch/x86/power/cpu.c
··· 11 11 #include <linux/suspend.h> 12 12 #include <linux/export.h> 13 13 #include <linux/smp.h> 14 + #include <linux/perf_event.h> 14 15 15 16 #include <asm/pgtable.h> 16 17 #include <asm/proto.h> ··· 229 228 do_fpu_end(); 230 229 x86_platform.restore_sched_clock_state(); 231 230 mtrr_bp_restore(); 231 + perf_restore_debug_store(); 232 232 } 233 233 234 234 /* Needed by apm.c */
+1 -1
arch/xtensa/Kconfig
··· 9 9 select HAVE_IDE 10 10 select GENERIC_ATOMIC64 11 11 select HAVE_GENERIC_HARDIRQS 12 - select HAVE_VIRT_TO_BUS 12 + select VIRT_TO_BUS 13 13 select GENERIC_IRQ_SHOW 14 14 select GENERIC_CPU_DEVICES 15 15 select MODULES_USE_ELF_RELA
+2 -2
drivers/acpi/processor_perflib.c
··· 465 465 return result; 466 466 } 467 467 468 - static int acpi_processor_get_performance_info(struct acpi_processor *pr) 468 + int acpi_processor_get_performance_info(struct acpi_processor *pr) 469 469 { 470 470 int result = 0; 471 471 acpi_status status = AE_OK; ··· 509 509 #endif 510 510 return result; 511 511 } 512 - 512 + EXPORT_SYMBOL_GPL(acpi_processor_get_performance_info); 513 513 int acpi_processor_notify_smm(struct module *calling_module) 514 514 { 515 515 acpi_status status;
+1 -1
drivers/amba/tegra-ahb.c
··· 158 158 EXPORT_SYMBOL(tegra_ahb_enable_smmu); 159 159 #endif 160 160 161 - #ifdef CONFIG_PM_SLEEP 161 + #ifdef CONFIG_PM 162 162 static int tegra_ahb_suspend(struct device *dev) 163 163 { 164 164 int i;
+4
drivers/bluetooth/ath3k.c
··· 74 74 75 75 /* Atheros AR3012 with sflash firmware*/ 76 76 { USB_DEVICE(0x0CF3, 0x3004) }, 77 + { USB_DEVICE(0x0CF3, 0x3008) }, 77 78 { USB_DEVICE(0x0CF3, 0x311D) }, 78 79 { USB_DEVICE(0x13d3, 0x3375) }, 80 + { USB_DEVICE(0x04CA, 0x3004) }, 79 81 { USB_DEVICE(0x04CA, 0x3005) }, 80 82 { USB_DEVICE(0x04CA, 0x3006) }, 81 83 { USB_DEVICE(0x04CA, 0x3008) }, ··· 108 106 109 107 /* Atheros AR3012 with sflash firmware*/ 110 108 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, 109 + { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, 111 110 { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 }, 112 111 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, 112 + { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, 113 113 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, 114 114 { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, 115 115 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+2
drivers/bluetooth/btusb.c
··· 132 132 133 133 /* Atheros 3012 with sflash firmware */ 134 134 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, 135 + { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, 135 136 { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, 136 137 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, 138 + { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, 137 139 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, 138 140 { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, 139 141 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+11 -2
drivers/char/hw_random/virtio-rng.c
··· 92 92 { 93 93 int err; 94 94 95 + if (vq) { 96 + /* We only support one device for now */ 97 + return -EBUSY; 98 + } 95 99 /* We expect a single virtqueue. */ 96 100 vq = virtio_find_single_vq(vdev, random_recv_done, "input"); 97 - if (IS_ERR(vq)) 98 - return PTR_ERR(vq); 101 + if (IS_ERR(vq)) { 102 + err = PTR_ERR(vq); 103 + vq = NULL; 104 + return err; 105 + } 99 106 100 107 err = hwrng_register(&virtio_hwrng); 101 108 if (err) { 102 109 vdev->config->del_vqs(vdev); 110 + vq = NULL; 103 111 return err; 104 112 } 105 113 ··· 120 112 busy = false; 121 113 hwrng_unregister(&virtio_hwrng); 122 114 vdev->config->del_vqs(vdev); 115 + vq = NULL; 123 116 } 124 117 125 118 static int virtrng_probe(struct virtio_device *vdev)
+1 -1
drivers/clk/clk-vt8500.c
··· 157 157 divisor = parent_rate / rate; 158 158 159 159 /* If prate / rate would be decimal, incr the divisor */ 160 - if (rate * divisor < *prate) 160 + if (rate * divisor < parent_rate) 161 161 divisor++; 162 162 163 163 if (divisor == cdev->div_mask + 1)
-1
drivers/clk/tegra/clk-tegra20.c
··· 1292 1292 TEGRA_CLK_DUPLICATE(usbd, "tegra-ehci.0", NULL), 1293 1293 TEGRA_CLK_DUPLICATE(usbd, "tegra-otg", NULL), 1294 1294 TEGRA_CLK_DUPLICATE(cclk, NULL, "cpu"), 1295 - TEGRA_CLK_DUPLICATE(twd, "smp_twd", NULL), 1296 1295 TEGRA_CLK_DUPLICATE(clk_max, NULL, NULL), /* Must be the last entry */ 1297 1296 }; 1298 1297
-1
drivers/clk/tegra/clk-tegra30.c
··· 1931 1931 TEGRA_CLK_DUPLICATE(cml1, "tegra_sata_cml", NULL), 1932 1932 TEGRA_CLK_DUPLICATE(cml0, "tegra_pcie", "cml"), 1933 1933 TEGRA_CLK_DUPLICATE(pciex, "tegra_pcie", "pciex"), 1934 - TEGRA_CLK_DUPLICATE(twd, "smp_twd", NULL), 1935 1934 TEGRA_CLK_DUPLICATE(vcp, "nvavp", "vcp"), 1936 1935 TEGRA_CLK_DUPLICATE(clk_max, NULL, NULL), /* MUST be the last entry */ 1937 1936 };
+7
drivers/gpio/gpio-mvebu.c
··· 42 42 #include <linux/io.h> 43 43 #include <linux/of_irq.h> 44 44 #include <linux/of_device.h> 45 + #include <linux/clk.h> 45 46 #include <linux/pinctrl/consumer.h> 46 47 47 48 /* ··· 497 496 struct resource *res; 498 497 struct irq_chip_generic *gc; 499 498 struct irq_chip_type *ct; 499 + struct clk *clk; 500 500 unsigned int ngpios; 501 501 int soc_variant; 502 502 int i, cpu, id; ··· 530 528 dev_err(&pdev->dev, "Couldn't get OF id\n"); 531 529 return id; 532 530 } 531 + 532 + clk = devm_clk_get(&pdev->dev, NULL); 533 + /* Not all SoCs require a clock.*/ 534 + if (!IS_ERR(clk)) 535 + clk_prepare_enable(clk); 533 536 534 537 mvchip->soc_variant = soc_variant; 535 538 mvchip->chip.label = dev_name(&pdev->dev);
+2 -2
drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
··· 544 544 static void 545 545 nv50_disp_base_vblank_enable(struct nouveau_event *event, int head) 546 546 { 547 - nv_mask(event->priv, 0x61002c, (1 << head), (1 << head)); 547 + nv_mask(event->priv, 0x61002c, (4 << head), (4 << head)); 548 548 } 549 549 550 550 static void 551 551 nv50_disp_base_vblank_disable(struct nouveau_event *event, int head) 552 552 { 553 - nv_mask(event->priv, 0x61002c, (1 << head), (0 << head)); 553 + nv_mask(event->priv, 0x61002c, (4 << head), 0); 554 554 } 555 555 556 556 static int
+5
drivers/gpu/drm/nouveau/nouveau_abi16.c
··· 116 116 { 117 117 struct nouveau_abi16_ntfy *ntfy, *temp; 118 118 119 + /* wait for all activity to stop before releasing notify object, which 120 + * may be still in use */ 121 + if (chan->chan && chan->ntfy) 122 + nouveau_channel_idle(chan->chan); 123 + 119 124 /* cleanup notifier state */ 120 125 list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) { 121 126 nouveau_abi16_ntfy_fini(chan, ntfy);
+2 -2
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 801 801 stride = 16 * 4; 802 802 height = amount / stride; 803 803 804 - if (new_mem->mem_type == TTM_PL_VRAM && 804 + if (old_mem->mem_type == TTM_PL_VRAM && 805 805 nouveau_bo_tile_layout(nvbo)) { 806 806 ret = RING_SPACE(chan, 8); 807 807 if (ret) ··· 823 823 BEGIN_NV04(chan, NvSubCopy, 0x0200, 1); 824 824 OUT_RING (chan, 1); 825 825 } 826 - if (old_mem->mem_type == TTM_PL_VRAM && 826 + if (new_mem->mem_type == TTM_PL_VRAM && 827 827 nouveau_bo_tile_layout(nvbo)) { 828 828 ret = RING_SPACE(chan, 8); 829 829 if (ret)
+1
drivers/gpu/drm/nouveau/nv50_display.c
··· 2276 2276 NV_WARN(drm, "failed to create encoder %d/%d/%d: %d\n", 2277 2277 dcbe->location, dcbe->type, 2278 2278 ffs(dcbe->or) - 1, ret); 2279 + ret = 0; 2279 2280 } 2280 2281 } 2281 2282
+2
drivers/hwmon/lineage-pem.c
··· 422 422 &sensor_dev_attr_in2_input.dev_attr.attr, 423 423 &sensor_dev_attr_curr1_input.dev_attr.attr, 424 424 &sensor_dev_attr_power1_input.dev_attr.attr, 425 + NULL 425 426 }; 426 427 427 428 static const struct attribute_group pem_input_group = { ··· 433 432 &sensor_dev_attr_fan1_input.dev_attr.attr, 434 433 &sensor_dev_attr_fan2_input.dev_attr.attr, 435 434 &sensor_dev_attr_fan3_input.dev_attr.attr, 435 + NULL 436 436 }; 437 437 438 438 static const struct attribute_group pem_fan_group = {
+1 -1
drivers/hwmon/lm75.h
··· 25 25 which contains this code, we don't worry about the wasted space. 26 26 */ 27 27 28 - #include <linux/hwmon.h> 28 + #include <linux/kernel.h> 29 29 30 30 /* straight from the datasheet */ 31 31 #define LM75_TEMP_MIN (-55000)
+8 -6
drivers/hwmon/pmbus/ltc2978.c
··· 59 59 struct ltc2978_data { 60 60 enum chips id; 61 61 int vin_min, vin_max; 62 - int temp_min, temp_max; 62 + int temp_min, temp_max[2]; 63 63 int vout_min[8], vout_max[8]; 64 64 int iout_max[2]; 65 65 int temp2_max; ··· 113 113 ret = pmbus_read_word_data(client, page, 114 114 LTC2978_MFR_TEMPERATURE_PEAK); 115 115 if (ret >= 0) { 116 - if (lin11_to_val(ret) > lin11_to_val(data->temp_max)) 117 - data->temp_max = ret; 118 - ret = data->temp_max; 116 + if (lin11_to_val(ret) 117 + > lin11_to_val(data->temp_max[page])) 118 + data->temp_max[page] = ret; 119 + ret = data->temp_max[page]; 119 120 } 120 121 break; 121 122 case PMBUS_VIRT_RESET_VOUT_HISTORY: ··· 267 266 break; 268 267 case PMBUS_VIRT_RESET_TEMP_HISTORY: 269 268 data->temp_min = 0x7bff; 270 - data->temp_max = 0x7c00; 269 + data->temp_max[page] = 0x7c00; 271 270 ret = ltc2978_clear_peaks(client, page, data->id); 272 271 break; 273 272 default: ··· 324 323 data->vin_min = 0x7bff; 325 324 data->vin_max = 0x7c00; 326 325 data->temp_min = 0x7bff; 327 - data->temp_max = 0x7c00; 326 + for (i = 0; i < ARRAY_SIZE(data->temp_max); i++) 327 + data->temp_max[i] = 0x7c00; 328 328 data->temp2_max = 0x7c00; 329 329 330 330 switch (data->id) {
+7 -5
drivers/hwmon/pmbus/pmbus_core.c
··· 766 766 static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr) 767 767 { 768 768 if (data->num_attributes >= data->max_attributes - 1) { 769 - data->max_attributes += PMBUS_ATTR_ALLOC_SIZE; 770 - data->group.attrs = krealloc(data->group.attrs, 771 - sizeof(struct attribute *) * 772 - data->max_attributes, GFP_KERNEL); 773 - if (data->group.attrs == NULL) 769 + int new_max_attrs = data->max_attributes + PMBUS_ATTR_ALLOC_SIZE; 770 + void *new_attrs = krealloc(data->group.attrs, 771 + new_max_attrs * sizeof(void *), 772 + GFP_KERNEL); 773 + if (!new_attrs) 774 774 return -ENOMEM; 775 + data->group.attrs = new_attrs; 776 + data->max_attributes = new_max_attrs; 775 777 } 776 778 777 779 data->group.attrs[data->num_attributes++] = attr;
+1 -1
drivers/i2c/Kconfig
··· 4 4 5 5 menuconfig I2C 6 6 tristate "I2C support" 7 - depends on !S390 8 7 select RT_MUTEXES 9 8 ---help--- 10 9 I2C (pronounce: I-squared-C) is a slow serial bus protocol used in ··· 75 76 76 77 config I2C_SMBUS 77 78 tristate "SMBus-specific protocols" if !I2C_HELPER_AUTO 79 + depends on GENERIC_HARDIRQS 78 80 help 79 81 Say Y here if you want support for SMBus extensions to the I2C 80 82 specification. At the moment, the only supported extension is
+4 -2
drivers/i2c/busses/Kconfig
··· 114 114 115 115 config I2C_ISCH 116 116 tristate "Intel SCH SMBus 1.0" 117 - depends on PCI 117 + depends on PCI && GENERIC_HARDIRQS 118 118 select LPC_SCH 119 119 help 120 120 Say Y here if you want to use SMBus controller on the Intel SCH ··· 543 543 544 544 config I2C_OCORES 545 545 tristate "OpenCores I2C Controller" 546 + depends on GENERIC_HARDIRQS 546 547 help 547 548 If you say yes to this option, support will be included for the 548 549 OpenCores I2C controller. For details see ··· 778 777 779 778 config I2C_PARPORT 780 779 tristate "Parallel port adapter" 781 - depends on PARPORT 780 + depends on PARPORT && GENERIC_HARDIRQS 782 781 select I2C_ALGOBIT 783 782 select I2C_SMBUS 784 783 help ··· 803 802 804 803 config I2C_PARPORT_LIGHT 805 804 tristate "Parallel port adapter (light)" 805 + depends on GENERIC_HARDIRQS 806 806 select I2C_ALGOBIT 807 807 select I2C_SMBUS 808 808 help
+4 -5
drivers/iio/common/st_sensors/st_sensors_core.c
··· 62 62 int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr) 63 63 { 64 64 int err; 65 - struct st_sensor_odr_avl odr_out; 65 + struct st_sensor_odr_avl odr_out = {0, 0}; 66 66 struct st_sensor_data *sdata = iio_priv(indio_dev); 67 67 68 68 err = st_sensors_match_odr(sdata->sensor, odr, &odr_out); ··· 114 114 115 115 static int st_sensors_set_fullscale(struct iio_dev *indio_dev, unsigned int fs) 116 116 { 117 - int err, i; 117 + int err, i = 0; 118 118 struct st_sensor_data *sdata = iio_priv(indio_dev); 119 119 120 120 err = st_sensors_match_fs(sdata->sensor, fs, &i); ··· 139 139 140 140 int st_sensors_set_enable(struct iio_dev *indio_dev, bool enable) 141 141 { 142 - bool found; 143 142 u8 tmp_value; 144 143 int err = -EINVAL; 145 - struct st_sensor_odr_avl odr_out; 144 + bool found = false; 145 + struct st_sensor_odr_avl odr_out = {0, 0}; 146 146 struct st_sensor_data *sdata = iio_priv(indio_dev); 147 147 148 148 if (enable) { 149 - found = false; 150 149 tmp_value = sdata->sensor->pw.value_on; 151 150 if ((sdata->sensor->odr.addr == sdata->sensor->pw.addr) && 152 151 (sdata->sensor->odr.mask == sdata->sensor->pw.mask)) {
+38 -26
drivers/iio/dac/ad5064.c
··· 27 27 #define AD5064_ADDR(x) ((x) << 20) 28 28 #define AD5064_CMD(x) ((x) << 24) 29 29 30 - #define AD5064_ADDR_DAC(chan) (chan) 31 30 #define AD5064_ADDR_ALL_DAC 0xF 32 31 33 32 #define AD5064_CMD_WRITE_INPUT_N 0x0 ··· 130 131 } 131 132 132 133 static int ad5064_sync_powerdown_mode(struct ad5064_state *st, 133 - unsigned int channel) 134 + const struct iio_chan_spec *chan) 134 135 { 135 136 unsigned int val; 136 137 int ret; 137 138 138 - val = (0x1 << channel); 139 + val = (0x1 << chan->address); 139 140 140 - if (st->pwr_down[channel]) 141 - val |= st->pwr_down_mode[channel] << 8; 141 + if (st->pwr_down[chan->channel]) 142 + val |= st->pwr_down_mode[chan->channel] << 8; 142 143 143 144 ret = ad5064_write(st, AD5064_CMD_POWERDOWN_DAC, 0, val, 0); 144 145 ··· 168 169 mutex_lock(&indio_dev->mlock); 169 170 st->pwr_down_mode[chan->channel] = mode + 1; 170 171 171 - ret = ad5064_sync_powerdown_mode(st, chan->channel); 172 + ret = ad5064_sync_powerdown_mode(st, chan); 172 173 mutex_unlock(&indio_dev->mlock); 173 174 174 175 return ret; ··· 204 205 mutex_lock(&indio_dev->mlock); 205 206 st->pwr_down[chan->channel] = pwr_down; 206 207 207 - ret = ad5064_sync_powerdown_mode(st, chan->channel); 208 + ret = ad5064_sync_powerdown_mode(st, chan); 208 209 mutex_unlock(&indio_dev->mlock); 209 210 return ret ? ret : len; 210 211 } ··· 257 258 258 259 switch (mask) { 259 260 case IIO_CHAN_INFO_RAW: 260 - if (val > (1 << chan->scan_type.realbits) || val < 0) 261 + if (val >= (1 << chan->scan_type.realbits) || val < 0) 261 262 return -EINVAL; 262 263 263 264 mutex_lock(&indio_dev->mlock); ··· 291 292 { }, 292 293 }; 293 294 294 - #define AD5064_CHANNEL(chan, bits) { \ 295 + #define AD5064_CHANNEL(chan, addr, bits) { \ 295 296 .type = IIO_VOLTAGE, \ 296 297 .indexed = 1, \ 297 298 .output = 1, \ 298 299 .channel = (chan), \ 299 300 .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \ 300 301 IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \ 301 - .address = AD5064_ADDR_DAC(chan), \ 302 + .address = addr, \ 302 303 .scan_type = IIO_ST('u', (bits), 16, 20 - (bits)), \ 303 304 .ext_info = ad5064_ext_info, \ 304 305 } 305 306 306 307 #define DECLARE_AD5064_CHANNELS(name, bits) \ 307 308 const struct iio_chan_spec name[] = { \ 308 - AD5064_CHANNEL(0, bits), \ 309 - AD5064_CHANNEL(1, bits), \ 310 - AD5064_CHANNEL(2, bits), \ 311 - AD5064_CHANNEL(3, bits), \ 312 - AD5064_CHANNEL(4, bits), \ 313 - AD5064_CHANNEL(5, bits), \ 314 - AD5064_CHANNEL(6, bits), \ 315 - AD5064_CHANNEL(7, bits), \ 309 + AD5064_CHANNEL(0, 0, bits), \ 310 + AD5064_CHANNEL(1, 1, bits), \ 311 + AD5064_CHANNEL(2, 2, bits), \ 312 + AD5064_CHANNEL(3, 3, bits), \ 313 + AD5064_CHANNEL(4, 4, bits), \ 314 + AD5064_CHANNEL(5, 5, bits), \ 315 + AD5064_CHANNEL(6, 6, bits), \ 316 + AD5064_CHANNEL(7, 7, bits), \ 317 + } 318 + 319 + #define DECLARE_AD5065_CHANNELS(name, bits) \ 320 + const struct iio_chan_spec name[] = { \ 321 + AD5064_CHANNEL(0, 0, bits), \ 322 + AD5064_CHANNEL(1, 3, bits), \ 316 323 } 317 324 318 325 static DECLARE_AD5064_CHANNELS(ad5024_channels, 12); 319 326 static DECLARE_AD5064_CHANNELS(ad5044_channels, 14); 320 327 static DECLARE_AD5064_CHANNELS(ad5064_channels, 16); 328 + 329 + static DECLARE_AD5065_CHANNELS(ad5025_channels, 12); 330 + static DECLARE_AD5065_CHANNELS(ad5045_channels, 14); 331 + static DECLARE_AD5065_CHANNELS(ad5065_channels, 16); 321 332 322 333 static const struct ad5064_chip_info ad5064_chip_info_tbl[] = { 323 334 [ID_AD5024] = { ··· 337 328 }, 338 329 [ID_AD5025] = { 339 330 .shared_vref = false, 340 - .channels = ad5024_channels, 331 + .channels = ad5025_channels, 341 332 .num_channels = 2, 342 333 }, 343 334 [ID_AD5044] = { ··· 347 338 }, 348 339 [ID_AD5045] = { 349 340 .shared_vref = false, 350 - .channels = ad5044_channels, 341 + .channels = ad5045_channels, 351 342 .num_channels = 2, 352 343 }, 353 344 [ID_AD5064] = { ··· 362 353 }, 363 354 [ID_AD5065] = { 364 355 .shared_vref = false, 365 - .channels = ad5064_channels, 356 + .channels = ad5065_channels, 366 357 .num_channels = 2, 367 358 }, 368 359 [ID_AD5628_1] = { ··· 438 429 { 439 430 struct iio_dev *indio_dev; 440 431 struct ad5064_state *st; 432 + unsigned int midscale; 441 433 unsigned int i; 442 434 int ret; 443 435 ··· 475 465 goto error_free_reg; 476 466 } 477 467 478 - for (i = 0; i < st->chip_info->num_channels; ++i) { 479 - st->pwr_down_mode[i] = AD5064_LDAC_PWRDN_1K; 480 - st->dac_cache[i] = 0x8000; 481 - } 482 - 483 468 indio_dev->dev.parent = dev; 484 469 indio_dev->name = name; 485 470 indio_dev->info = &ad5064_info; 486 471 indio_dev->modes = INDIO_DIRECT_MODE; 487 472 indio_dev->channels = st->chip_info->channels; 488 473 indio_dev->num_channels = st->chip_info->num_channels; 474 + 475 + midscale = (1 << indio_dev->channels[0].scan_type.realbits) / 2; 476 + 477 + for (i = 0; i < st->chip_info->num_channels; ++i) { 478 + st->pwr_down_mode[i] = AD5064_LDAC_PWRDN_1K; 479 + st->dac_cache[i] = midscale; 480 + } 489 481 490 482 ret = iio_device_register(indio_dev); 491 483 if (ret)
+1
drivers/iio/imu/inv_mpu6050/Kconfig
··· 5 5 config INV_MPU6050_IIO 6 6 tristate "Invensense MPU6050 devices" 7 7 depends on I2C && SYSFS 8 + select IIO_BUFFER 8 9 select IIO_TRIGGERED_BUFFER 9 10 help 10 11 This driver supports the Invensense MPU6050 devices.
+12
drivers/infiniband/hw/cxgb4/cm.c
··· 1598 1598 1599 1599 neigh = dst_neigh_lookup(ep->dst, 1600 1600 &ep->com.cm_id->remote_addr.sin_addr.s_addr); 1601 + if (!neigh) { 1602 + pr_err("%s - cannot alloc neigh.\n", __func__); 1603 + err = -ENOMEM; 1604 + goto fail4; 1605 + } 1606 + 1601 1607 /* get a l2t entry */ 1602 1608 if (neigh->dev->flags & IFF_LOOPBACK) { 1603 1609 PDBG("%s LOOPBACK\n", __func__); ··· 3088 3082 } 3089 3083 dst = &rt->dst; 3090 3084 neigh = dst_neigh_lookup_skb(dst, skb); 3085 + 3086 + if (!neigh) { 3087 + pr_err("%s - failed to allocate neigh!\n", 3088 + __func__); 3089 + goto free_dst; 3090 + } 3091 3091 3092 3092 if (neigh->dev->flags & IFF_LOOPBACK) { 3093 3093 pdev = ip_dev_find(&init_net, iph->daddr);
-1
drivers/infiniband/hw/mlx4/cm.c
··· 362 362 INIT_LIST_HEAD(&dev->sriov.cm_list); 363 363 dev->sriov.sl_id_map = RB_ROOT; 364 364 idr_init(&dev->sriov.pv_id_table); 365 - idr_pre_get(&dev->sriov.pv_id_table, GFP_KERNEL); 366 365 } 367 366 368 367 /* slave = -1 ==> all slaves */
+2 -6
drivers/input/joystick/analog.c
··· 158 158 #define GET_TIME(x) rdtscl(x) 159 159 #define DELTA(x,y) ((y)-(x)) 160 160 #define TIME_NAME "TSC" 161 - #elif defined(__alpha__) 161 + #elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_TILE) 162 162 #define GET_TIME(x) do { x = get_cycles(); } while (0) 163 163 #define DELTA(x,y) ((y)-(x)) 164 - #define TIME_NAME "PCC" 165 - #elif defined(CONFIG_MN10300) || defined(CONFIG_TILE) 166 - #define GET_TIME(x) do { x = get_cycles(); } while (0) 167 - #define DELTA(x, y) ((x) - (y)) 168 - #define TIME_NAME "TSC" 164 + #define TIME_NAME "get_cycles" 169 165 #else 170 166 #define FAKE_TIME 171 167 static unsigned long analog_faketime = 0;
+1 -1
drivers/irqchip/irq-gic.c
··· 648 648 649 649 /* Convert our logical CPU mask into a physical one. */ 650 650 for_each_cpu(cpu, mask) 651 - map |= 1 << cpu_logical_map(cpu); 651 + map |= gic_cpu_map[cpu]; 652 652 653 653 /* 654 654 * Ensure that stores to Normal memory are visible to the
+4 -2
drivers/isdn/hisax/Kconfig
··· 237 237 238 238 config HISAX_NETJET 239 239 bool "NETjet card" 240 - depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN))) 240 + depends on PCI && (BROKEN || !(PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN))) 241 + depends on VIRT_TO_BUS 241 242 help 242 243 This enables HiSax support for the NetJet from Traverse 243 244 Technologies. ··· 249 248 250 249 config HISAX_NETJET_U 251 250 bool "NETspider U card" 252 - depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN))) 251 + depends on PCI && (BROKEN || !(PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN))) 252 + depends on VIRT_TO_BUS 253 253 help 254 254 This enables HiSax support for the Netspider U interface ISDN card 255 255 from Traverse Technologies.
+1
drivers/mfd/Kconfig
··· 858 858 config AB8500_CORE 859 859 bool "ST-Ericsson AB8500 Mixed Signal Power Management chip" 860 860 depends on GENERIC_HARDIRQS && ABX500_CORE && MFD_DB8500_PRCMU 861 + select POWER_SUPPLY 861 862 select MFD_CORE 862 863 select IRQ_DOMAIN 863 864 help
+13 -4
drivers/mfd/ab8500-gpadc.c
··· 594 594 static int ab8500_gpadc_runtime_resume(struct device *dev) 595 595 { 596 596 struct ab8500_gpadc *gpadc = dev_get_drvdata(dev); 597 + int ret; 597 598 598 - regulator_enable(gpadc->regu); 599 - return 0; 599 + ret = regulator_enable(gpadc->regu); 600 + if (ret) 601 + dev_err(dev, "Failed to enable vtvout LDO: %d\n", ret); 602 + return ret; 600 603 } 601 604 602 605 static int ab8500_gpadc_runtime_idle(struct device *dev) ··· 646 643 } 647 644 648 645 /* VTVout LDO used to power up ab8500-GPADC */ 649 - gpadc->regu = regulator_get(&pdev->dev, "vddadc"); 646 + gpadc->regu = devm_regulator_get(&pdev->dev, "vddadc"); 650 647 if (IS_ERR(gpadc->regu)) { 651 648 ret = PTR_ERR(gpadc->regu); 652 649 dev_err(gpadc->dev, "failed to get vtvout LDO\n"); ··· 655 652 656 653 platform_set_drvdata(pdev, gpadc); 657 654 658 - regulator_enable(gpadc->regu); 655 + ret = regulator_enable(gpadc->regu); 656 + if (ret) { 657 + dev_err(gpadc->dev, "Failed to enable vtvout LDO: %d\n", ret); 658 + goto fail_enable; 659 + } 659 660 660 661 pm_runtime_set_autosuspend_delay(gpadc->dev, GPADC_AUDOSUSPEND_DELAY); 661 662 pm_runtime_use_autosuspend(gpadc->dev); ··· 670 663 list_add_tail(&gpadc->node, &ab8500_gpadc_list); 671 664 dev_dbg(gpadc->dev, "probe success\n"); 672 665 return 0; 666 + 667 + fail_enable: 673 668 fail_irq: 674 669 free_irq(gpadc->irq, gpadc); 675 670 fail:
+3 -3
drivers/mfd/omap-usb-host.c
··· 460 460 461 461 switch (omap->usbhs_rev) { 462 462 case OMAP_USBHS_REV1: 463 - omap_usbhs_rev1_hostconfig(omap, reg); 463 + reg = omap_usbhs_rev1_hostconfig(omap, reg); 464 464 break; 465 465 466 466 case OMAP_USBHS_REV2: 467 - omap_usbhs_rev2_hostconfig(omap, reg); 467 + reg = omap_usbhs_rev2_hostconfig(omap, reg); 468 468 break; 469 469 470 470 default: /* newer revisions */ 471 - omap_usbhs_rev2_hostconfig(omap, reg); 471 + reg = omap_usbhs_rev2_hostconfig(omap, reg); 472 472 break; 473 473 } 474 474
+33 -3
drivers/mfd/palmas.c
··· 257 257 PALMAS_INT1_MASK), 258 258 }; 259 259 260 - static void palmas_dt_to_pdata(struct device_node *node, 260 + static int palmas_set_pdata_irq_flag(struct i2c_client *i2c, 261 261 struct palmas_platform_data *pdata) 262 262 { 263 + struct irq_data *irq_data = irq_get_irq_data(i2c->irq); 264 + if (!irq_data) { 265 + dev_err(&i2c->dev, "Invalid IRQ: %d\n", i2c->irq); 266 + return -EINVAL; 267 + } 268 + 269 + pdata->irq_flags = irqd_get_trigger_type(irq_data); 270 + dev_info(&i2c->dev, "Irq flag is 0x%08x\n", pdata->irq_flags); 271 + return 0; 272 + } 273 + 274 + static void palmas_dt_to_pdata(struct i2c_client *i2c, 275 + struct palmas_platform_data *pdata) 276 + { 277 + struct device_node *node = i2c->dev.of_node; 263 278 int ret; 264 279 u32 prop; 265 280 ··· 298 283 pdata->power_ctrl = PALMAS_POWER_CTRL_NSLEEP_MASK | 299 284 PALMAS_POWER_CTRL_ENABLE1_MASK | 300 285 PALMAS_POWER_CTRL_ENABLE2_MASK; 286 + if (i2c->irq) 287 + palmas_set_pdata_irq_flag(i2c, pdata); 301 288 } 302 289 303 290 static int palmas_i2c_probe(struct i2c_client *i2c, ··· 321 304 if (!pdata) 322 305 return -ENOMEM; 323 306 324 - palmas_dt_to_pdata(node, pdata); 307 + palmas_dt_to_pdata(i2c, pdata); 325 308 } 326 309 327 310 if (!pdata) ··· 361 344 } 362 345 } 363 346 347 + /* Change interrupt line output polarity */ 348 + if (pdata->irq_flags & IRQ_TYPE_LEVEL_HIGH) 349 + reg = PALMAS_POLARITY_CTRL_INT_POLARITY; 350 + else 351 + reg = 0; 352 + ret = palmas_update_bits(palmas, PALMAS_PU_PD_OD_BASE, 353 + PALMAS_POLARITY_CTRL, PALMAS_POLARITY_CTRL_INT_POLARITY, 354 + reg); 355 + if (ret < 0) { 356 + dev_err(palmas->dev, "POLARITY_CTRL updat failed: %d\n", ret); 357 + goto err; 358 + } 359 + 364 360 /* Change IRQ into clear on read mode for efficiency */ 365 361 slave = PALMAS_BASE_TO_SLAVE(PALMAS_INTERRUPT_BASE); 366 362 addr = PALMAS_BASE_TO_REG(PALMAS_INTERRUPT_BASE, PALMAS_INT_CTRL); ··· 382 352 regmap_write(palmas->regmap[slave], addr, reg); 383 353 384 354 ret = regmap_add_irq_chip(palmas->regmap[slave], palmas->irq, 385 - IRQF_ONESHOT | IRQF_TRIGGER_LOW, 0, &palmas_irq_chip, 355 + IRQF_ONESHOT | pdata->irq_flags, 0, &palmas_irq_chip, 386 356 &palmas->irq_data); 387 357 if (ret < 0) 388 358 goto err;
+1
drivers/mfd/tps65912-core.c
··· 169 169 void tps65912_device_exit(struct tps65912 *tps65912) 170 170 { 171 171 mfd_remove_devices(tps65912->dev); 172 + tps65912_irq_exit(tps65912); 172 173 kfree(tps65912); 173 174 } 174 175
+1 -1
drivers/mfd/twl4030-audio.c
··· 118 118 * Disable the resource. 119 119 * The function returns with error or the content of the register 120 120 */ 121 - int twl4030_audio_disable_resource(unsigned id) 121 + int twl4030_audio_disable_resource(enum twl4030_audio_res id) 122 122 { 123 123 struct twl4030_audio *audio = platform_get_drvdata(twl4030_audio_dev); 124 124 int val;
+1 -1
drivers/mfd/twl4030-madc.c
··· 800 800 801 801 static struct platform_driver twl4030_madc_driver = { 802 802 .probe = twl4030_madc_probe, 803 - .remove = __exit_p(twl4030_madc_remove), 803 + .remove = twl4030_madc_remove, 804 804 .driver = { 805 805 .name = "twl4030_madc", 806 806 .owner = THIS_MODULE,
+33 -19
drivers/mtd/bcm47xxpart.c
··· 19 19 /* 10 parts were found on sflash on Netgear WNDR4500 */ 20 20 #define BCM47XXPART_MAX_PARTS 12 21 21 22 + /* 23 + * Amount of bytes we read when analyzing each block of flash memory. 24 + * Set it big enough to allow detecting partition and reading important data. 25 + */ 26 + #define BCM47XXPART_BYTES_TO_READ 0x404 27 + 22 28 /* Magics */ 23 29 #define BOARD_DATA_MAGIC 0x5246504D /* MPFR */ 24 30 #define POT_MAGIC1 0x54544f50 /* POTT */ ··· 63 57 struct trx_header *trx; 64 58 int trx_part = -1; 65 59 int last_trx_part = -1; 66 - int max_bytes_to_read = 0x8004; 60 + int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, }; 67 61 68 62 if (blocksize <= 0x10000) 69 63 blocksize = 0x10000; 70 - if (blocksize == 0x20000) 71 - max_bytes_to_read = 0x18004; 72 64 73 65 /* Alloc */ 74 66 parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS, 75 67 GFP_KERNEL); 76 - buf = kzalloc(max_bytes_to_read, GFP_KERNEL); 68 + buf = kzalloc(BCM47XXPART_BYTES_TO_READ, GFP_KERNEL); 77 69 78 70 /* Parse block by block looking for magics */ 79 71 for (offset = 0; offset <= master->size - blocksize; ··· 86 82 } 87 83 88 84 /* Read beginning of the block */ 89 - if (mtd_read(master, offset, max_bytes_to_read, 85 + if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ, 90 86 &bytes_read, (uint8_t *)buf) < 0) { 91 87 pr_err("mtd_read error while parsing (offset: 0x%X)!\n", 92 88 offset); ··· 97 93 if (buf[0x400 / 4] == NVRAM_HEADER) { 98 94 bcm47xxpart_add_part(&parts[curr_part++], "boot", 99 95 offset, MTD_WRITEABLE); 100 - continue; 101 - } 102 - 103 - /* Standard NVRAM */ 104 - if (buf[0x000 / 4] == NVRAM_HEADER || 105 - buf[0x1000 / 4] == NVRAM_HEADER || 106 - buf[0x8000 / 4] == NVRAM_HEADER || 107 - (blocksize == 0x20000 && ( 108 - buf[0x10000 / 4] == NVRAM_HEADER || 109 - buf[0x11000 / 4] == NVRAM_HEADER || 110 - buf[0x18000 / 4] == NVRAM_HEADER))) { 111 - bcm47xxpart_add_part(&parts[curr_part++], "nvram", 112 - offset, 0); 113 - offset = rounddown(offset, blocksize); 114 96 continue; 115 97 } 116 98 ··· 168 178 continue; 169 179 } 170 180 } 181 + 182 + /* Look for NVRAM at the end of the last block. */ 183 + for (i = 0; i < ARRAY_SIZE(possible_nvram_sizes); i++) { 184 + if (curr_part > BCM47XXPART_MAX_PARTS) { 185 + pr_warn("Reached maximum number of partitions, scanning stopped!\n"); 186 + break; 187 + } 188 + 189 + offset = master->size - possible_nvram_sizes[i]; 190 + if (mtd_read(master, offset, 0x4, &bytes_read, 191 + (uint8_t *)buf) < 0) { 192 + pr_err("mtd_read error while reading at offset 0x%X!\n", 193 + offset); 194 + continue; 195 + } 196 + 197 + /* Standard NVRAM */ 198 + if (buf[0] == NVRAM_HEADER) { 199 + bcm47xxpart_add_part(&parts[curr_part++], "nvram", 200 + master->size - blocksize, 0); 201 + break; 202 + } 203 + } 204 + 171 205 kfree(buf); 172 206 173 207 /*
+16
drivers/mtd/nand/nand_base.c
··· 1523 1523 oobreadlen -= toread; 1524 1524 } 1525 1525 } 1526 + 1527 + if (chip->options & NAND_NEED_READRDY) { 1528 + /* Apply delay or wait for ready/busy pin */ 1529 + if (!chip->dev_ready) 1530 + udelay(chip->chip_delay); 1531 + else 1532 + nand_wait_ready(mtd); 1533 + } 1526 1534 } else { 1527 1535 memcpy(buf, chip->buffers->databuf + col, bytes); 1528 1536 buf += bytes; ··· 1794 1786 1795 1787 len = min(len, readlen); 1796 1788 buf = nand_transfer_oob(chip, buf, ops, len); 1789 + 1790 + if (chip->options & NAND_NEED_READRDY) { 1791 + /* Apply delay or wait for ready/busy pin */ 1792 + if (!chip->dev_ready) 1793 + udelay(chip->chip_delay); 1794 + else 1795 + nand_wait_ready(mtd); 1796 + } 1797 1797 1798 1798 readlen -= len; 1799 1799 if (!readlen)
+36 -34
drivers/mtd/nand/nand_ids.c
··· 22 22 * 512 512 Byte page size 23 23 */ 24 24 struct nand_flash_dev nand_flash_ids[] = { 25 + #define SP_OPTIONS NAND_NEED_READRDY 26 + #define SP_OPTIONS16 (SP_OPTIONS | NAND_BUSWIDTH_16) 25 27 26 28 #ifdef CONFIG_MTD_NAND_MUSEUM_IDS 27 - {"NAND 1MiB 5V 8-bit", 0x6e, 256, 1, 0x1000, 0}, 28 - {"NAND 2MiB 5V 8-bit", 0x64, 256, 2, 0x1000, 0}, 29 - {"NAND 4MiB 5V 8-bit", 0x6b, 512, 4, 0x2000, 0}, 30 - {"NAND 1MiB 3,3V 8-bit", 0xe8, 256, 1, 0x1000, 0}, 31 - {"NAND 1MiB 3,3V 8-bit", 0xec, 256, 1, 0x1000, 0}, 32 - {"NAND 2MiB 3,3V 8-bit", 0xea, 256, 2, 0x1000, 0}, 33 - {"NAND 4MiB 3,3V 8-bit", 0xd5, 512, 4, 0x2000, 0}, 34 - {"NAND 4MiB 3,3V 8-bit", 0xe3, 512, 4, 0x2000, 0}, 35 - {"NAND 4MiB 3,3V 8-bit", 0xe5, 512, 4, 0x2000, 0}, 36 - {"NAND 8MiB 3,3V 8-bit", 0xd6, 512, 8, 0x2000, 0}, 29 + {"NAND 1MiB 5V 8-bit", 0x6e, 256, 1, 0x1000, SP_OPTIONS}, 30 + {"NAND 2MiB 5V 8-bit", 0x64, 256, 2, 0x1000, SP_OPTIONS}, 31 + {"NAND 4MiB 5V 8-bit", 0x6b, 512, 4, 0x2000, SP_OPTIONS}, 32 + {"NAND 1MiB 3,3V 8-bit", 0xe8, 256, 1, 0x1000, SP_OPTIONS}, 33 + {"NAND 1MiB 3,3V 8-bit", 0xec, 256, 1, 0x1000, SP_OPTIONS}, 34 + {"NAND 2MiB 3,3V 8-bit", 0xea, 256, 2, 0x1000, SP_OPTIONS}, 35 + {"NAND 4MiB 3,3V 8-bit", 0xd5, 512, 4, 0x2000, SP_OPTIONS}, 36 + {"NAND 4MiB 3,3V 8-bit", 0xe3, 512, 4, 0x2000, SP_OPTIONS}, 37 + {"NAND 4MiB 3,3V 8-bit", 0xe5, 512, 4, 0x2000, SP_OPTIONS}, 38 + {"NAND 8MiB 3,3V 8-bit", 0xd6, 512, 8, 0x2000, SP_OPTIONS}, 37 39 38 - {"NAND 8MiB 1,8V 8-bit", 0x39, 512, 8, 0x2000, 0}, 39 - {"NAND 8MiB 3,3V 8-bit", 0xe6, 512, 8, 0x2000, 0}, 40 - {"NAND 8MiB 1,8V 16-bit", 0x49, 512, 8, 0x2000, NAND_BUSWIDTH_16}, 41 - {"NAND 8MiB 3,3V 16-bit", 0x59, 512, 8, 0x2000, NAND_BUSWIDTH_16}, 40 + {"NAND 8MiB 1,8V 8-bit", 0x39, 512, 8, 0x2000, SP_OPTIONS}, 41 + {"NAND 8MiB 3,3V 8-bit", 0xe6, 512, 8, 0x2000, SP_OPTIONS}, 42 + {"NAND 8MiB 1,8V 16-bit", 0x49, 512, 8, 0x2000, SP_OPTIONS16}, 43 + {"NAND 8MiB 3,3V 16-bit", 0x59, 512, 8, 0x2000, SP_OPTIONS16}, 42 44 #endif 43 45 44 - {"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, 0}, 45 - {"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, 0}, 46 - {"NAND 16MiB 1,8V 16-bit", 0x43, 512, 16, 0x4000, NAND_BUSWIDTH_16}, 47 - {"NAND 16MiB 3,3V 16-bit", 0x53, 512, 16, 0x4000, NAND_BUSWIDTH_16}, 46 + {"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, SP_OPTIONS}, 47 + {"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, SP_OPTIONS}, 48 + {"NAND 16MiB 1,8V 16-bit", 0x43, 512, 16, 0x4000, SP_OPTIONS16}, 49 + {"NAND 16MiB 3,3V 16-bit", 0x53, 512, 16, 0x4000, SP_OPTIONS16}, 48 50 49 - {"NAND 32MiB 1,8V 8-bit", 0x35, 512, 32, 0x4000, 0}, 50 - {"NAND 32MiB 3,3V 8-bit", 0x75, 512, 32, 0x4000, 0}, 51 - {"NAND 32MiB 1,8V 16-bit", 0x45, 512, 32, 0x4000, NAND_BUSWIDTH_16}, 52 - {"NAND 32MiB 3,3V 16-bit", 0x55, 512, 32, 0x4000, NAND_BUSWIDTH_16}, 51 + {"NAND 32MiB 1,8V 8-bit", 0x35, 512, 32, 0x4000, SP_OPTIONS}, 52 + {"NAND 32MiB 3,3V 8-bit", 0x75, 512, 32, 0x4000, SP_OPTIONS}, 53 + {"NAND 32MiB 1,8V 16-bit", 0x45, 512, 32, 0x4000, SP_OPTIONS16}, 54 + {"NAND 32MiB 3,3V 16-bit", 0x55, 512, 32, 0x4000, SP_OPTIONS16}, 53 55 54 - {"NAND 64MiB 1,8V 8-bit", 0x36, 512, 64, 0x4000, 0}, 55 - {"NAND 64MiB 3,3V 8-bit", 0x76, 512, 64, 0x4000, 0}, 56 - {"NAND 64MiB 1,8V 16-bit", 0x46, 512, 64, 0x4000, NAND_BUSWIDTH_16}, 57 - {"NAND 64MiB 3,3V 16-bit", 0x56, 512, 64, 0x4000, NAND_BUSWIDTH_16}, 56 + {"NAND 64MiB 1,8V 8-bit", 0x36, 512, 64, 0x4000, SP_OPTIONS}, 57 + {"NAND 64MiB 3,3V 8-bit", 0x76, 512, 64, 0x4000, SP_OPTIONS}, 58 + {"NAND 64MiB 1,8V 16-bit", 0x46, 512, 64, 0x4000, SP_OPTIONS16}, 59 + {"NAND 64MiB 3,3V 16-bit", 0x56, 512, 64, 0x4000, SP_OPTIONS16}, 58 60 59 - {"NAND 128MiB 1,8V 8-bit", 0x78, 512, 128, 0x4000, 0}, 60 - {"NAND 128MiB 1,8V 8-bit", 0x39, 512, 128, 0x4000, 0}, 61 - {"NAND 128MiB 3,3V 8-bit", 0x79, 512, 128, 0x4000, 0}, 62 - {"NAND 128MiB 1,8V 16-bit", 0x72, 512, 128, 0x4000, NAND_BUSWIDTH_16}, 63 - {"NAND 128MiB 1,8V 16-bit", 0x49, 512, 128, 0x4000, NAND_BUSWIDTH_16}, 64 - {"NAND 128MiB 3,3V 16-bit", 0x74, 512, 128, 0x4000, NAND_BUSWIDTH_16}, 65 - {"NAND 128MiB 3,3V 16-bit", 0x59, 512, 128, 0x4000, NAND_BUSWIDTH_16}, 61 + {"NAND 128MiB 1,8V 8-bit", 0x78, 512, 128, 0x4000, SP_OPTIONS}, 62 + {"NAND 128MiB 1,8V 8-bit", 0x39, 512, 128, 0x4000, SP_OPTIONS}, 63 + {"NAND 128MiB 3,3V 8-bit", 0x79, 512, 128, 0x4000, SP_OPTIONS}, 64 + {"NAND 128MiB 1,8V 16-bit", 0x72, 512, 128, 0x4000, SP_OPTIONS16}, 65 + {"NAND 128MiB 1,8V 16-bit", 0x49, 512, 128, 0x4000, SP_OPTIONS16}, 66 + {"NAND 128MiB 3,3V 16-bit", 0x74, 512, 128, 0x4000, SP_OPTIONS16}, 67 + {"NAND 128MiB 3,3V 16-bit", 0x59, 512, 128, 0x4000, SP_OPTIONS16}, 66 68 67 - {"NAND 256MiB 3,3V 8-bit", 0x71, 512, 256, 0x4000, 0}, 69 + {"NAND 256MiB 3,3V 8-bit", 0x71, 512, 256, 0x4000, SP_OPTIONS}, 68 70 69 71 /* 70 72 * These are the new chips with large page size. The pagesize and the
+2 -4
drivers/net/bonding/bond_main.c
··· 1746 1746 1747 1747 bond_compute_features(bond); 1748 1748 1749 + bond_update_speed_duplex(new_slave); 1750 + 1749 1751 read_lock(&bond->lock); 1750 1752 1751 1753 new_slave->last_arp_rx = jiffies - ··· 1799 1797 pr_debug("Initial state of slave_dev is BOND_LINK_%s\n", 1800 1798 new_slave->link == BOND_LINK_DOWN ? "DOWN" : 1801 1799 (new_slave->link == BOND_LINK_UP ? "UP" : "BACK")); 1802 - 1803 - bond_update_speed_duplex(new_slave); 1804 1800 1805 1801 if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) { 1806 1802 /* if there is a primary slave, remember it */ ··· 2373 2373 /* prevent it from being the active one */ 2374 2374 bond_set_backup_slave(slave); 2375 2375 } 2376 - 2377 - bond_update_speed_duplex(slave); 2378 2376 2379 2377 pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n", 2380 2378 bond->dev->name, slave->dev->name,
+1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
··· 2761 2761 bp->port.pmf = 0; 2762 2762 load_error1: 2763 2763 bnx2x_napi_disable(bp); 2764 + bnx2x_del_all_napi(bp); 2764 2765 2765 2766 /* clear pf_load status, as it was already set */ 2766 2767 if (IS_PF(bp))
+2 -1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
··· 459 459 460 460 #define UPDATE_QSTAT(s, t) \ 461 461 do { \ 462 - qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi); \ 463 462 qstats->t##_lo = qstats_old->t##_lo + le32_to_cpu(s.lo); \ 463 + qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi) \ 464 + + ((qstats->t##_lo < qstats_old->t##_lo) ? 1 : 0); \ 464 465 } while (0) 465 466 466 467 #define UPDATE_QSTAT_OLD(f) \
+8
drivers/net/ethernet/broadcom/tg3.c
··· 4271 4271 tp->link_config.active_speed = tp->link_config.speed; 4272 4272 tp->link_config.active_duplex = tp->link_config.duplex; 4273 4273 4274 + if (tg3_asic_rev(tp) == ASIC_REV_5714) { 4275 + /* With autoneg disabled, 5715 only links up when the 4276 + * advertisement register has the configured speed 4277 + * enabled. 4278 + */ 4279 + tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL); 4280 + } 4281 + 4274 4282 bmcr = 0; 4275 4283 switch (tp->link_config.speed) { 4276 4284 default:
+1
drivers/net/ethernet/dec/tulip/Kconfig
··· 108 108 config DE4X5 109 109 tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA" 110 110 depends on (PCI || EISA) 111 + depends on VIRT_TO_BUS || ALPHA || PPC || SPARC 111 112 select CRC32 112 113 ---help--- 113 114 This is support for the DIGITAL series of PCI/EISA Ethernet cards.
+19 -14
drivers/net/ethernet/freescale/fec.c
··· 931 931 goto spin_unlock; 932 932 } 933 933 934 - /* Duplex link change */ 935 934 if (phy_dev->link) { 936 - if (fep->full_duplex != phy_dev->duplex) { 937 - fec_restart(ndev, phy_dev->duplex); 938 - /* prevent unnecessary second fec_restart() below */ 935 + if (!fep->link) { 939 936 fep->link = phy_dev->link; 940 937 status_change = 1; 941 938 } 942 - } 943 939 944 - /* Link on or off change */ 945 - if (phy_dev->link != fep->link) { 946 - fep->link = phy_dev->link; 947 - if (phy_dev->link) 940 + if (fep->full_duplex != phy_dev->duplex) 941 + status_change = 1; 942 + 943 + if (phy_dev->speed != fep->speed) { 944 + fep->speed = phy_dev->speed; 945 + status_change = 1; 946 + } 947 + 948 + /* if any of the above changed restart the FEC */ 949 + if (status_change) 948 950 fec_restart(ndev, phy_dev->duplex); 949 - else 951 + } else { 952 + if (fep->link) { 950 953 fec_stop(ndev); 951 - status_change = 1; 954 + status_change = 1; 955 + } 952 956 } 953 957 954 958 spin_unlock: ··· 1329 1325 static void fec_enet_free_buffers(struct net_device *ndev) 1330 1326 { 1331 1327 struct fec_enet_private *fep = netdev_priv(ndev); 1332 - int i; 1328 + unsigned int i; 1333 1329 struct sk_buff *skb; 1334 1330 struct bufdesc *bdp; 1335 1331 ··· 1353 1349 static int fec_enet_alloc_buffers(struct net_device *ndev) 1354 1350 { 1355 1351 struct fec_enet_private *fep = netdev_priv(ndev); 1356 - int i; 1352 + unsigned int i; 1357 1353 struct sk_buff *skb; 1358 1354 struct bufdesc *bdp; 1359 1355 ··· 1438 1434 struct fec_enet_private *fep = netdev_priv(ndev); 1439 1435 1440 1436 /* Don't know what to do yet. */ 1437 + napi_disable(&fep->napi); 1441 1438 fep->opened = 0; 1442 1439 netif_stop_queue(ndev); 1443 1440 fec_stop(ndev); ··· 1595 1590 struct fec_enet_private *fep = netdev_priv(ndev); 1596 1591 struct bufdesc *cbd_base; 1597 1592 struct bufdesc *bdp; 1598 - int i; 1593 + unsigned int i; 1599 1594 1600 1595 /* Allocate memory for buffer descriptors. */ 1601 1596 cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
+1
drivers/net/ethernet/freescale/fec.h
··· 240 240 phy_interface_t phy_interface; 241 241 int link; 242 242 int full_duplex; 243 + int speed; 243 244 struct completion mdio_done; 244 245 int irq[FEC_IRQ_NUM]; 245 246 int bufdesc_ex;
+2 -1
drivers/net/ethernet/nxp/lpc_eth.c
··· 1470 1470 } 1471 1471 platform_set_drvdata(pdev, ndev); 1472 1472 1473 - if (lpc_mii_init(pldat) != 0) 1473 + ret = lpc_mii_init(pldat); 1474 + if (ret) 1474 1475 goto err_out_unregister_netdev; 1475 1476 1476 1477 netdev_info(ndev, "LPC mac at 0x%08x irq %d\n",
+10
drivers/net/ethernet/renesas/sh_eth.c
··· 2215 2215 /* MDIO bus release function */ 2216 2216 static int sh_mdio_release(struct net_device *ndev) 2217 2217 { 2218 + struct sh_eth_private *mdp = netdev_priv(ndev); 2218 2219 struct mii_bus *bus = dev_get_drvdata(&ndev->dev); 2219 2220 2220 2221 /* unregister mdio bus */ ··· 2229 2228 2230 2229 /* free bitbang info */ 2231 2230 free_mdio_bitbang(bus); 2231 + 2232 + /* free bitbang memory */ 2233 + kfree(mdp->bitbang); 2232 2234 2233 2235 return 0; 2234 2236 } ··· 2261 2257 bitbang->ctrl.ops = &bb_ops; 2262 2258 2263 2259 /* MII controller setting */ 2260 + mdp->bitbang = bitbang; 2264 2261 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); 2265 2262 if (!mdp->mii_bus) { 2266 2263 ret = -ENOMEM; ··· 2441 2436 } 2442 2437 mdp->tsu_addr = ioremap(rtsu->start, 2443 2438 resource_size(rtsu)); 2439 + if (mdp->tsu_addr == NULL) { 2440 + ret = -ENOMEM; 2441 + dev_err(&pdev->dev, "TSU ioremap failed.\n"); 2442 + goto out_release; 2443 + } 2444 2444 mdp->port = devno % 2; 2445 2445 ndev->features = NETIF_F_HW_VLAN_FILTER; 2446 2446 }
+1
drivers/net/ethernet/renesas/sh_eth.h
··· 705 705 const u16 *reg_offset; 706 706 void __iomem *addr; 707 707 void __iomem *tsu_addr; 708 + struct bb_info *bitbang; 708 709 u32 num_rx_ring; 709 710 u32 num_tx_ring; 710 711 dma_addr_t rx_desc_dma;
+2 -1
drivers/net/ethernet/sfc/nic.c
··· 376 376 return false; 377 377 378 378 tx_queue->empty_read_count = 0; 379 - return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; 379 + return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0 380 + && tx_queue->write_count - write_count == 1; 380 381 } 381 382 382 383 /* For each entry inserted into the software descriptor ring, create a
+1 -1
drivers/net/ethernet/ti/cpsw.c
··· 1006 1006 /* If there is no more tx desc left free then we need to 1007 1007 * tell the kernel to stop sending us tx frames. 1008 1008 */ 1009 - if (unlikely(cpdma_check_free_tx_desc(priv->txch))) 1009 + if (unlikely(!cpdma_check_free_tx_desc(priv->txch))) 1010 1010 netif_stop_queue(ndev); 1011 1011 1012 1012 return NETDEV_TX_OK;
+1 -1
drivers/net/ethernet/ti/davinci_emac.c
··· 1102 1102 /* If there is no more tx desc left free then we need to 1103 1103 * tell the kernel to stop sending us tx frames. 1104 1104 */ 1105 - if (unlikely(cpdma_check_free_tx_desc(priv->txchan))) 1105 + if (unlikely(!cpdma_check_free_tx_desc(priv->txchan))) 1106 1106 netif_stop_queue(ndev); 1107 1107 1108 1108 return NETDEV_TX_OK;
+9 -6
drivers/net/netconsole.c
··· 666 666 goto done; 667 667 668 668 spin_lock_irqsave(&target_list_lock, flags); 669 + restart: 669 670 list_for_each_entry(nt, &target_list, list) { 670 671 netconsole_target_get(nt); 671 672 if (nt->np.dev == dev) { ··· 679 678 case NETDEV_UNREGISTER: 680 679 /* 681 680 * rtnl_lock already held 681 + * we might sleep in __netpoll_cleanup() 682 682 */ 683 - if (nt->np.dev) { 684 - __netpoll_cleanup(&nt->np); 685 - dev_put(nt->np.dev); 686 - nt->np.dev = NULL; 687 - } 683 + spin_unlock_irqrestore(&target_list_lock, flags); 684 + __netpoll_cleanup(&nt->np); 685 + spin_lock_irqsave(&target_list_lock, flags); 686 + dev_put(nt->np.dev); 687 + nt->np.dev = NULL; 688 688 nt->enabled = 0; 689 689 stopped = true; 690 - break; 690 + netconsole_target_put(nt); 691 + goto restart; 691 692 } 692 693 } 693 694 netconsole_target_put(nt);
+1 -1
drivers/net/usb/Kconfig
··· 268 268 select CRC16 269 269 select CRC32 270 270 help 271 - This option adds support for SMSC LAN95XX based USB 2.0 271 + This option adds support for SMSC LAN75XX based USB 2.0 272 272 Gigabit Ethernet adapters. 273 273 274 274 config USB_NET_SMSC95XX
+1 -10
drivers/net/usb/cdc_mbim.c
··· 68 68 struct cdc_ncm_ctx *ctx; 69 69 struct usb_driver *subdriver = ERR_PTR(-ENODEV); 70 70 int ret = -ENODEV; 71 - u8 data_altsetting = CDC_NCM_DATA_ALTSETTING_NCM; 71 + u8 data_altsetting = cdc_ncm_select_altsetting(dev, intf); 72 72 struct cdc_mbim_state *info = (void *)&dev->data; 73 - 74 - /* see if interface supports MBIM alternate setting */ 75 - if (intf->num_altsetting == 2) { 76 - if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) 77 - usb_set_interface(dev->udev, 78 - intf->cur_altsetting->desc.bInterfaceNumber, 79 - CDC_NCM_COMM_ALTSETTING_MBIM); 80 - data_altsetting = CDC_NCM_DATA_ALTSETTING_MBIM; 81 - } 82 73 83 74 /* Probably NCM, defer for cdc_ncm_bind */ 84 75 if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
+32 -17
drivers/net/usb/cdc_ncm.c
··· 55 55 56 56 #define DRIVER_VERSION "14-Mar-2012" 57 57 58 + #if IS_ENABLED(CONFIG_USB_NET_CDC_MBIM) 59 + static bool prefer_mbim = true; 60 + #else 61 + static bool prefer_mbim; 62 + #endif 63 + module_param(prefer_mbim, bool, S_IRUGO | S_IWUSR); 64 + MODULE_PARM_DESC(prefer_mbim, "Prefer MBIM setting on dual NCM/MBIM functions"); 65 + 58 66 static void cdc_ncm_txpath_bh(unsigned long param); 59 67 static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx); 60 68 static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer); ··· 558 550 } 559 551 EXPORT_SYMBOL_GPL(cdc_ncm_unbind); 560 552 561 - static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) 553 + /* Select the MBIM altsetting iff it is preferred and available, 554 + * returning the number of the corresponding data interface altsetting 555 + */ 556 + u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf) 562 557 { 563 - int ret; 558 + struct usb_host_interface *alt; 564 559 565 560 /* The MBIM spec defines a NCM compatible default altsetting, 566 561 * which we may have matched: ··· 579 568 * endpoint descriptors, shall be constructed according to 580 569 * the rules given in section 6 (USB Device Model) of this 581 570 * specification." 582 - * 583 - * Do not bind to such interfaces, allowing cdc_mbim to handle 584 - * them 585 571 */ 586 - #if IS_ENABLED(CONFIG_USB_NET_CDC_MBIM) 587 - if ((intf->num_altsetting == 2) && 588 - !usb_set_interface(dev->udev, 589 - intf->cur_altsetting->desc.bInterfaceNumber, 590 - CDC_NCM_COMM_ALTSETTING_MBIM)) { 591 - if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) 592 - return -ENODEV; 593 - else 594 - usb_set_interface(dev->udev, 595 - intf->cur_altsetting->desc.bInterfaceNumber, 596 - CDC_NCM_COMM_ALTSETTING_NCM); 572 + if (prefer_mbim && intf->num_altsetting == 2) { 573 + alt = usb_altnum_to_altsetting(intf, CDC_NCM_COMM_ALTSETTING_MBIM); 574 + if (alt && cdc_ncm_comm_intf_is_mbim(alt) && 575 + !usb_set_interface(dev->udev, 576 + intf->cur_altsetting->desc.bInterfaceNumber, 577 + CDC_NCM_COMM_ALTSETTING_MBIM)) 578 + return CDC_NCM_DATA_ALTSETTING_MBIM; 597 579 } 598 - #endif 580 + return CDC_NCM_DATA_ALTSETTING_NCM; 581 + } 582 + EXPORT_SYMBOL_GPL(cdc_ncm_select_altsetting); 583 + 584 + static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) 585 + { 586 + int ret; 587 + 588 + /* MBIM backwards compatible function? */ 589 + cdc_ncm_select_altsetting(dev, intf); 590 + if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) 591 + return -ENODEV; 599 592 600 593 /* NCM data altsetting is always 1 */ 601 594 ret = cdc_ncm_bind_common(dev, intf, 1);
+16 -33
drivers/net/usb/qmi_wwan.c
··· 139 139 140 140 BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state))); 141 141 142 - /* control and data is shared? */ 143 - if (intf->cur_altsetting->desc.bNumEndpoints == 3) { 144 - info->control = intf; 145 - info->data = intf; 146 - goto shared; 147 - } 148 - 149 - /* else require a single interrupt status endpoint on control intf */ 150 - if (intf->cur_altsetting->desc.bNumEndpoints != 1) 151 - goto err; 142 + /* set up initial state */ 143 + info->control = intf; 144 + info->data = intf; 152 145 153 146 /* and a number of CDC descriptors */ 154 147 while (len > 3) { ··· 200 207 buf += h->bLength; 201 208 } 202 209 203 - /* did we find all the required ones? */ 204 - if (!(found & (1 << USB_CDC_HEADER_TYPE)) || 205 - !(found & (1 << USB_CDC_UNION_TYPE))) { 206 - dev_err(&intf->dev, "CDC functional descriptors missing\n"); 207 - goto err; 208 - } 209 - 210 - /* verify CDC Union */ 211 - if (desc->bInterfaceNumber != cdc_union->bMasterInterface0) { 212 - dev_err(&intf->dev, "bogus CDC Union: master=%u\n", cdc_union->bMasterInterface0); 213 - goto err; 214 - } 215 - 216 - /* need to save these for unbind */ 217 - info->control = intf; 218 - info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0); 219 - if (!info->data) { 220 - dev_err(&intf->dev, "bogus CDC Union: slave=%u\n", cdc_union->bSlaveInterface0); 221 - goto err; 210 + /* Use separate control and data interfaces if we found a CDC Union */ 211 + if (cdc_union) { 212 + info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0); 213 + if (desc->bInterfaceNumber != cdc_union->bMasterInterface0 || !info->data) { 214 + dev_err(&intf->dev, "bogus CDC Union: master=%u, slave=%u\n", 215 + cdc_union->bMasterInterface0, cdc_union->bSlaveInterface0); 216 + goto err; 217 + } 222 218 } 223 219 224 220 /* errors aren't fatal - we can live with the dynamic address */ ··· 217 235 } 218 236 219 237 /* claim data interface and set it up */ 220 - status = usb_driver_claim_interface(driver, info->data, dev); 221 - if (status < 0) 222 - goto err; 238 + if (info->control != info->data) { 239 + status = usb_driver_claim_interface(driver, info->data, dev); 240 + if (status < 0) 241 + goto err; 242 + } 223 243 224 - shared: 225 244 status = qmi_wwan_register_subdriver(dev); 226 245 if (status < 0 && info->control != info->data) { 227 246 usb_set_intfdata(info->data, NULL);
+3 -4
drivers/net/wireless/mwifiex/join.c
··· 1117 1117 adhoc_join->bss_descriptor.bssid, 1118 1118 adhoc_join->bss_descriptor.ssid); 1119 1119 1120 - for (i = 0; bss_desc->supported_rates[i] && 1121 - i < MWIFIEX_SUPPORTED_RATES; 1122 - i++) 1123 - ; 1120 + for (i = 0; i < MWIFIEX_SUPPORTED_RATES && 1121 + bss_desc->supported_rates[i]; i++) 1122 + ; 1124 1123 rates_size = i; 1125 1124 1126 1125 /* Copy Data Rates from the Rates recorded in scan response */
+2 -2
drivers/net/wireless/rt2x00/Kconfig
··· 55 55 56 56 config RT2800PCI 57 57 tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support" 58 - depends on PCI || RALINK_RT288X || RALINK_RT305X 58 + depends on PCI || SOC_RT288X || SOC_RT305X 59 59 select RT2800_LIB 60 60 select RT2X00_LIB_PCI if PCI 61 - select RT2X00_LIB_SOC if RALINK_RT288X || RALINK_RT305X 61 + select RT2X00_LIB_SOC if SOC_RT288X || SOC_RT305X 62 62 select RT2X00_LIB_FIRMWARE 63 63 select RT2X00_LIB_CRYPTO 64 64 select CRC_CCITT
+7 -7
drivers/net/wireless/rt2x00/rt2800pci.c
··· 89 89 rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0); 90 90 } 91 91 92 - #if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) 92 + #if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X) 93 93 static int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev) 94 94 { 95 95 void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE); ··· 107 107 { 108 108 return -ENOMEM; 109 109 } 110 - #endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT305X */ 110 + #endif /* CONFIG_SOC_RT288X || CONFIG_SOC_RT305X */ 111 111 112 112 #ifdef CONFIG_PCI 113 113 static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom) ··· 1177 1177 #endif /* CONFIG_PCI */ 1178 1178 MODULE_LICENSE("GPL"); 1179 1179 1180 - #if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) 1180 + #if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X) 1181 1181 static int rt2800soc_probe(struct platform_device *pdev) 1182 1182 { 1183 1183 return rt2x00soc_probe(pdev, &rt2800pci_ops); ··· 1194 1194 .suspend = rt2x00soc_suspend, 1195 1195 .resume = rt2x00soc_resume, 1196 1196 }; 1197 - #endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT305X */ 1197 + #endif /* CONFIG_SOC_RT288X || CONFIG_SOC_RT305X */ 1198 1198 1199 1199 #ifdef CONFIG_PCI 1200 1200 static int rt2800pci_probe(struct pci_dev *pci_dev, ··· 1217 1217 { 1218 1218 int ret = 0; 1219 1219 1220 - #if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) 1220 + #if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X) 1221 1221 ret = platform_driver_register(&rt2800soc_driver); 1222 1222 if (ret) 1223 1223 return ret; ··· 1225 1225 #ifdef CONFIG_PCI 1226 1226 ret = pci_register_driver(&rt2800pci_driver); 1227 1227 if (ret) { 1228 - #if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) 1228 + #if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X) 1229 1229 platform_driver_unregister(&rt2800soc_driver); 1230 1230 #endif 1231 1231 return ret; ··· 1240 1240 #ifdef CONFIG_PCI 1241 1241 pci_unregister_driver(&rt2800pci_driver); 1242 1242 #endif 1243 - #if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) 1243 + #if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X) 1244 1244 platform_driver_unregister(&rt2800soc_driver); 1245 1245 #endif 1246 1246 }
+43 -62
drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
··· 1377 1377 1378 1378 void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid) 1379 1379 { 1380 - /* dummy routine needed for callback from rtl_op_configure_filter() */ 1380 + struct rtl_priv *rtlpriv = rtl_priv(hw); 1381 + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); 1382 + u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR); 1383 + 1384 + if (rtlpriv->psc.rfpwr_state != ERFON) 1385 + return; 1386 + 1387 + if (check_bssid) { 1388 + u8 tmp; 1389 + if (IS_NORMAL_CHIP(rtlhal->version)) { 1390 + reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN); 1391 + tmp = BIT(4); 1392 + } else { 1393 + reg_rcr |= RCR_CBSSID; 1394 + tmp = BIT(4) | BIT(5); 1395 + } 1396 + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, 1397 + (u8 *) (&reg_rcr)); 1398 + _rtl92cu_set_bcn_ctrl_reg(hw, 0, tmp); 1399 + } else { 1400 + u8 tmp; 1401 + if (IS_NORMAL_CHIP(rtlhal->version)) { 1402 + reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN); 1403 + tmp = BIT(4); 1404 + } else { 1405 + reg_rcr &= ~RCR_CBSSID; 1406 + tmp = BIT(4) | BIT(5); 1407 + } 1408 + reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN)); 1409 + rtlpriv->cfg->ops->set_hw_reg(hw, 1410 + HW_VAR_RCR, (u8 *) (&reg_rcr)); 1411 + _rtl92cu_set_bcn_ctrl_reg(hw, tmp, 0); 1412 + } 1381 1413 } 1382 1414 1383 1415 /*========================================================================== */ 1384 1416 1385 - static void _rtl92cu_set_check_bssid(struct ieee80211_hw *hw, 1386 - enum nl80211_iftype type) 1387 - { 1388 - struct rtl_priv *rtlpriv = rtl_priv(hw); 1389 - u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR); 1390 - struct rtl_hal *rtlhal = rtl_hal(rtlpriv); 1391 - struct rtl_phy *rtlphy = &(rtlpriv->phy); 1392 - u8 filterout_non_associated_bssid = false; 1393 - 1394 - switch (type) { 1395 - case NL80211_IFTYPE_ADHOC: 1396 - case NL80211_IFTYPE_STATION: 1397 - filterout_non_associated_bssid = true; 1398 - break; 1399 - case NL80211_IFTYPE_UNSPECIFIED: 1400 - case NL80211_IFTYPE_AP: 1401 - default: 1402 - break; 1403 - } 1404 - if (filterout_non_associated_bssid) { 1405 - if (IS_NORMAL_CHIP(rtlhal->version)) { 1406 - switch (rtlphy->current_io_type) { 1407 - case IO_CMD_RESUME_DM_BY_SCAN: 1408 - reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN); 1409 - rtlpriv->cfg->ops->set_hw_reg(hw, 1410 - HW_VAR_RCR, (u8 *)(&reg_rcr)); 1411 - /* enable update TSF */ 1412 - _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4)); 1413 - break; 1414 - case IO_CMD_PAUSE_DM_BY_SCAN: 1415 - reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN); 1416 - rtlpriv->cfg->ops->set_hw_reg(hw, 1417 - HW_VAR_RCR, (u8 *)(&reg_rcr)); 1418 - /* disable update TSF */ 1419 - _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0); 1420 - break; 1421 - } 1422 - } else { 1423 - reg_rcr |= (RCR_CBSSID); 1424 - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, 1425 - (u8 *)(&reg_rcr)); 1426 - _rtl92cu_set_bcn_ctrl_reg(hw, 0, (BIT(4)|BIT(5))); 1427 - } 1428 - } else if (filterout_non_associated_bssid == false) { 1429 - if (IS_NORMAL_CHIP(rtlhal->version)) { 1430 - reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN)); 1431 - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, 1432 - (u8 *)(&reg_rcr)); 1433 - _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0); 1434 - } else { 1435 - reg_rcr &= (~RCR_CBSSID); 1436 - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, 1437 - (u8 *)(&reg_rcr)); 1438 - _rtl92cu_set_bcn_ctrl_reg(hw, (BIT(4)|BIT(5)), 0); 1439 - } 1440 - } 1441 - } 1442 - 1443 1417 int rtl92cu_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type) 1444 1418 { 1419 + struct rtl_priv *rtlpriv = rtl_priv(hw); 1420 + 1445 1421 if (_rtl92cu_set_media_status(hw, type)) 1446 1422 return -EOPNOTSUPP; 1447 - _rtl92cu_set_check_bssid(hw, type); 1423 + 1424 + if (rtlpriv->mac80211.link_state == MAC80211_LINKED) { 1425 + if (type != NL80211_IFTYPE_AP) 1426 + rtl92cu_set_check_bssid(hw, true); 1427 + } else { 1428 + rtl92cu_set_check_bssid(hw, false); 1429 + } 1430 + 1448 1431 return 0; 1449 1432 } 1450 1433 ··· 2041 2058 (shortgi_rate << 4) | (shortgi_rate); 2042 2059 } 2043 2060 rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value); 2044 - RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n", 2045 - rtl_read_dword(rtlpriv, REG_ARFR0)); 2046 2061 } 2047 2062 2048 2063 void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
+35 -20
drivers/pci/rom.c
··· 100 100 return min((size_t)(image - rom), size); 101 101 } 102 102 103 + static loff_t pci_find_rom(struct pci_dev *pdev, size_t *size) 104 + { 105 + struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; 106 + loff_t start; 107 + 108 + /* assign the ROM an address if it doesn't have one */ 109 + if (res->parent == NULL && pci_assign_resource(pdev, PCI_ROM_RESOURCE)) 110 + return 0; 111 + start = pci_resource_start(pdev, PCI_ROM_RESOURCE); 112 + *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); 113 + 114 + if (*size == 0) 115 + return 0; 116 + 117 + /* Enable ROM space decodes */ 118 + if (pci_enable_rom(pdev)) 119 + return 0; 120 + 121 + return start; 122 + } 123 + 103 124 /** 104 125 * pci_map_rom - map a PCI ROM to kernel space 105 126 * @pdev: pointer to pci device struct ··· 135 114 void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) 136 115 { 137 116 struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; 138 - loff_t start; 117 + loff_t start = 0; 139 118 void __iomem *rom; 140 119 141 - /* 142 - * Some devices may provide ROMs via a source other than the BAR 143 - */ 144 - if (pdev->rom && pdev->romlen) { 145 - *size = pdev->romlen; 146 - return phys_to_virt(pdev->rom); 147 120 /* 148 121 * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy 149 122 * memory map if the VGA enable bit of the Bridge Control register is 150 123 * set for embedded VGA. 151 124 */ 152 - } else if (res->flags & IORESOURCE_ROM_SHADOW) { 125 + if (res->flags & IORESOURCE_ROM_SHADOW) { 153 126 /* primary video rom always starts here */ 154 127 start = (loff_t)0xC0000; 155 128 *size = 0x20000; /* cover C000:0 through E000:0 */ ··· 154 139 return (void __iomem *)(unsigned long) 155 140 pci_resource_start(pdev, PCI_ROM_RESOURCE); 156 141 } else { 157 - /* assign the ROM an address if it doesn't have one */ 158 - if (res->parent == NULL && 159 - pci_assign_resource(pdev,PCI_ROM_RESOURCE)) 160 - return NULL; 161 - start = pci_resource_start(pdev, PCI_ROM_RESOURCE); 162 - *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); 163 - if (*size == 0) 164 - return NULL; 165 - 166 - /* Enable ROM space decodes */ 167 - if (pci_enable_rom(pdev)) 168 - return NULL; 142 + start = pci_find_rom(pdev, size); 169 143 } 170 144 } 145 + 146 + /* 147 + * Some devices may provide ROMs via a source other than the BAR 148 + */ 149 + if (!start && pdev->rom && pdev->romlen) { 150 + *size = pdev->romlen; 151 + return phys_to_virt(pdev->rom); 152 + } 153 + 154 + if (!start) 155 + return NULL; 171 156 172 157 rom = ioremap(start, *size); 173 158 if (!rom) {
+60 -1
drivers/pinctrl/pinctrl-at91.c
··· 1277 1277 } 1278 1278 1279 1279 #ifdef CONFIG_PM 1280 + 1281 + static u32 wakeups[MAX_GPIO_BANKS]; 1282 + static u32 backups[MAX_GPIO_BANKS]; 1283 + 1280 1284 static int gpio_irq_set_wake(struct irq_data *d, unsigned state) 1281 1285 { 1282 1286 struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d); 1283 1287 unsigned bank = at91_gpio->pioc_idx; 1288 + unsigned mask = 1 << d->hwirq; 1284 1289 1285 1290 if (unlikely(bank >= MAX_GPIO_BANKS)) 1286 1291 return -EINVAL; 1292 + 1293 + if (state) 1294 + wakeups[bank] |= mask; 1295 + else 1296 + wakeups[bank] &= ~mask; 1287 1297 1288 1298 irq_set_irq_wake(at91_gpio->pioc_virq, state); 1289 1299 1290 1300 return 0; 1291 1301 } 1302 + 1303 + void at91_pinctrl_gpio_suspend(void) 1304 + { 1305 + int i; 1306 + 1307 + for (i = 0; i < gpio_banks; i++) { 1308 + void __iomem *pio; 1309 + 1310 + if (!gpio_chips[i]) 1311 + continue; 1312 + 1313 + pio = gpio_chips[i]->regbase; 1314 + 1315 + backups[i] = __raw_readl(pio + PIO_IMR); 1316 + __raw_writel(backups[i], pio + PIO_IDR); 1317 + __raw_writel(wakeups[i], pio + PIO_IER); 1318 + 1319 + if (!wakeups[i]) { 1320 + clk_unprepare(gpio_chips[i]->clock); 1321 + clk_disable(gpio_chips[i]->clock); 1322 + } else { 1323 + printk(KERN_DEBUG "GPIO-%c may wake for %08x\n", 1324 + 'A'+i, wakeups[i]); 1325 + } 1326 + } 1327 + } 1328 + 1329 + void at91_pinctrl_gpio_resume(void) 1330 + { 1331 + int i; 1332 + 1333 + for (i = 0; i < gpio_banks; i++) { 1334 + void __iomem *pio; 1335 + 1336 + if (!gpio_chips[i]) 1337 + continue; 1338 + 1339 + pio = gpio_chips[i]->regbase; 1340 + 1341 + if (!wakeups[i]) { 1342 + if (clk_prepare(gpio_chips[i]->clock) == 0) 1343 + clk_enable(gpio_chips[i]->clock); 1344 + } 1345 + 1346 + __raw_writel(wakeups[i], pio + PIO_IDR); 1347 + __raw_writel(backups[i], pio + PIO_IER); 1348 + } 1349 + } 1350 + 1292 1351 #else 1293 1352 #define gpio_irq_set_wake NULL 1294 - #endif 1353 + #endif /* CONFIG_PM */ 1295 1354 1296 1355 static struct irq_chip gpio_irqchip = { 1297 1356 .name = "GPIO",
+24 -4
drivers/rtc/rtc-mv.c
··· 14 14 #include <linux/platform_device.h> 15 15 #include <linux/of.h> 16 16 #include <linux/delay.h> 17 + #include <linux/clk.h> 17 18 #include <linux/gfp.h> 18 19 #include <linux/module.h> 19 20 ··· 42 41 struct rtc_device *rtc; 43 42 void __iomem *ioaddr; 44 43 int irq; 44 + struct clk *clk; 45 45 }; 46 46 47 47 static int mv_rtc_set_time(struct device *dev, struct rtc_time *tm) ··· 223 221 struct rtc_plat_data *pdata; 224 222 resource_size_t size; 225 223 u32 rtc_time; 224 + int ret = 0; 226 225 227 226 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 228 227 if (!res) ··· 242 239 if (!pdata->ioaddr) 243 240 return -ENOMEM; 244 241 242 + pdata->clk = devm_clk_get(&pdev->dev, NULL); 243 + /* Not all SoCs require a clock.*/ 244 + if (!IS_ERR(pdata->clk)) 245 + clk_prepare_enable(pdata->clk); 246 + 245 247 /* make sure the 24 hours mode is enabled */ 246 248 rtc_time = readl(pdata->ioaddr + RTC_TIME_REG_OFFS); 247 249 if (rtc_time & RTC_HOURS_12H_MODE) { 248 250 dev_err(&pdev->dev, "24 Hours mode not supported.\n"); 249 - return -EINVAL; 251 + ret = -EINVAL; 252 + goto out; 250 253 } 251 254 252 255 /* make sure it is actually functional */ ··· 261 252 rtc_time = readl(pdata->ioaddr + RTC_TIME_REG_OFFS); 262 253 if (rtc_time == 0x01000000) { 263 254 dev_err(&pdev->dev, "internal RTC not ticking\n"); 264 - return -ENODEV; 255 + ret = -ENODEV; 256 + goto out; 265 257 } 266 258 } 267 259 ··· 278 268 } else 279 269 pdata->rtc = rtc_device_register(pdev->name, &pdev->dev, 280 270 &mv_rtc_ops, THIS_MODULE); 281 - if (IS_ERR(pdata->rtc)) 282 - return PTR_ERR(pdata->rtc); 271 + if (IS_ERR(pdata->rtc)) { 272 + ret = PTR_ERR(pdata->rtc); 273 + goto out; 274 + } 283 275 284 276 if (pdata->irq >= 0) { 285 277 writel(0, pdata->ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS); ··· 294 282 } 295 283 296 284 return 0; 285 + out: 286 + if (!IS_ERR(pdata->clk)) 287 + clk_disable_unprepare(pdata->clk); 288 + 289 + return ret; 297 290 } 298 291 299 292 static int __exit mv_rtc_remove(struct platform_device *pdev) ··· 309 292 device_init_wakeup(&pdev->dev, 0); 310 293 311 294 rtc_device_unregister(pdata->rtc); 295 + if (!IS_ERR(pdata->clk)) 296 + clk_disable_unprepare(pdata->clk); 297 + 312 298 return 0; 313 299 } 314 300
+62 -7
drivers/s390/block/scm_blk.c
··· 135 135 .release = scm_release, 136 136 }; 137 137 138 + static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req) 139 + { 140 + return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT; 141 + } 142 + 138 143 static void scm_request_prepare(struct scm_request *scmrq) 139 144 { 140 145 struct scm_blk_dev *bdev = scmrq->bdev; ··· 200 195 201 196 scm_release_cluster(scmrq); 202 197 blk_requeue_request(bdev->rq, scmrq->request); 198 + atomic_dec(&bdev->queued_reqs); 203 199 scm_request_done(scmrq); 204 200 scm_ensure_queue_restart(bdev); 205 201 } 206 202 207 203 void scm_request_finish(struct scm_request *scmrq) 208 204 { 205 + struct scm_blk_dev *bdev = scmrq->bdev; 206 + 209 207 scm_release_cluster(scmrq); 210 208 blk_end_request_all(scmrq->request, scmrq->error); 209 + atomic_dec(&bdev->queued_reqs); 211 210 scm_request_done(scmrq); 212 211 } 213 212 ··· 227 218 if (req->cmd_type != REQ_TYPE_FS) 228 219 continue; 229 220 221 + if (!scm_permit_request(bdev, req)) { 222 + scm_ensure_queue_restart(bdev); 223 + return; 224 + } 230 225 scmrq = scm_request_fetch(); 231 226 if (!scmrq) { 232 227 SCM_LOG(5, "no request"); ··· 244 231 return; 245 232 } 246 233 if (scm_need_cluster_request(scmrq)) { 234 + atomic_inc(&bdev->queued_reqs); 247 235 blk_start_request(req); 248 236 scm_initiate_cluster_request(scmrq); 249 237 return; 250 238 } 251 239 scm_request_prepare(scmrq); 240 + atomic_inc(&bdev->queued_reqs); 252 241 blk_start_request(req); 253 242 254 243 ret = scm_start_aob(scmrq->aob); ··· 259 244 scm_request_requeue(scmrq); 260 245 return; 261 246 } 262 - atomic_inc(&bdev->queued_reqs); 263 247 } 264 248 } 265 249 ··· 294 280 tasklet_hi_schedule(&bdev->tasklet); 295 281 } 296 282 283 + static void scm_blk_handle_error(struct scm_request *scmrq) 284 + { 285 + struct scm_blk_dev *bdev = scmrq->bdev; 286 + unsigned long flags; 287 + 288 + if (scmrq->error != -EIO) 289 + goto restart; 290 + 291 + /* For -EIO the response block is valid. */ 292 + switch (scmrq->aob->response.eqc) { 293 + case EQC_WR_PROHIBIT: 294 + spin_lock_irqsave(&bdev->lock, flags); 295 + if (bdev->state != SCM_WR_PROHIBIT) 296 + pr_info("%lu: Write access to the SCM increment is suspended\n", 297 + (unsigned long) bdev->scmdev->address); 298 + bdev->state = SCM_WR_PROHIBIT; 299 + spin_unlock_irqrestore(&bdev->lock, flags); 300 + goto requeue; 301 + default: 302 + break; 303 + } 304 + 305 + restart: 306 + if (!scm_start_aob(scmrq->aob)) 307 + return; 308 + 309 + requeue: 310 + spin_lock_irqsave(&bdev->rq_lock, flags); 311 + scm_request_requeue(scmrq); 312 + spin_unlock_irqrestore(&bdev->rq_lock, flags); 313 + } 314 + 297 315 static void scm_blk_tasklet(struct scm_blk_dev *bdev) 298 316 { 299 317 struct scm_request *scmrq; ··· 339 293 spin_unlock_irqrestore(&bdev->lock, flags); 340 294 341 295 if (scmrq->error && scmrq->retries-- > 0) { 342 - if (scm_start_aob(scmrq->aob)) { 343 - spin_lock_irqsave(&bdev->rq_lock, flags); 344 - scm_request_requeue(scmrq); 345 - spin_unlock_irqrestore(&bdev->rq_lock, flags); 346 - } 296 + scm_blk_handle_error(scmrq); 297 + 347 298 /* Request restarted or requeued, handle next. */ 348 299 spin_lock_irqsave(&bdev->lock, flags); 349 300 continue; ··· 353 310 } 354 311 355 312 scm_request_finish(scmrq); 356 - atomic_dec(&bdev->queued_reqs); 357 313 spin_lock_irqsave(&bdev->lock, flags); 358 314 } 359 315 spin_unlock_irqrestore(&bdev->lock, flags); ··· 374 332 } 375 333 376 334 bdev->scmdev = scmdev; 335 + bdev->state = SCM_OPER; 377 336 spin_lock_init(&bdev->rq_lock); 378 337 spin_lock_init(&bdev->lock); 379 338 INIT_LIST_HEAD(&bdev->finished_requests); ··· 437 394 del_gendisk(bdev->gendisk); 438 395 blk_cleanup_queue(bdev->gendisk->queue); 439 396 put_disk(bdev->gendisk); 397 + } 398 + 399 + void scm_blk_set_available(struct scm_blk_dev *bdev) 400 + { 401 + unsigned long flags; 402 + 403 + spin_lock_irqsave(&bdev->lock, flags); 404 + if (bdev->state == SCM_WR_PROHIBIT) 405 + pr_info("%lu: Write access to the SCM increment is restored\n", 406 + (unsigned long) bdev->scmdev->address); 407 + bdev->state = SCM_OPER; 408 + spin_unlock_irqrestore(&bdev->lock, flags); 440 409 } 441 410 442 411 static int __init scm_blk_init(void)
+2
drivers/s390/block/scm_blk.h
··· 21 21 spinlock_t rq_lock; /* guard the request queue */ 22 22 spinlock_t lock; /* guard the rest of the blockdev */ 23 23 atomic_t queued_reqs; 24 + enum {SCM_OPER, SCM_WR_PROHIBIT} state; 24 25 struct list_head finished_requests; 25 26 #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE 26 27 struct list_head cluster_list; ··· 49 48 50 49 int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *); 51 50 void scm_blk_dev_cleanup(struct scm_blk_dev *); 51 + void scm_blk_set_available(struct scm_blk_dev *); 52 52 void scm_blk_irq(struct scm_device *, void *, int); 53 53 54 54 void scm_request_finish(struct scm_request *);
+17 -6
drivers/s390/block/scm_drv.c
··· 13 13 #include <asm/eadm.h> 14 14 #include "scm_blk.h" 15 15 16 - static void notify(struct scm_device *scmdev) 16 + static void scm_notify(struct scm_device *scmdev, enum scm_event event) 17 17 { 18 - pr_info("%lu: The capabilities of the SCM increment changed\n", 19 - (unsigned long) scmdev->address); 20 - SCM_LOG(2, "State changed"); 21 - SCM_LOG_STATE(2, scmdev); 18 + struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev); 19 + 20 + switch (event) { 21 + case SCM_CHANGE: 22 + pr_info("%lu: The capabilities of the SCM increment changed\n", 23 + (unsigned long) scmdev->address); 24 + SCM_LOG(2, "State changed"); 25 + SCM_LOG_STATE(2, scmdev); 26 + break; 27 + case SCM_AVAIL: 28 + SCM_LOG(2, "Increment available"); 29 + SCM_LOG_STATE(2, scmdev); 30 + scm_blk_set_available(bdev); 31 + break; 32 + } 22 33 } 23 34 24 35 static int scm_probe(struct scm_device *scmdev) ··· 75 64 .name = "scm_block", 76 65 .owner = THIS_MODULE, 77 66 }, 78 - .notify = notify, 67 + .notify = scm_notify, 79 68 .probe = scm_probe, 80 69 .remove = scm_remove, 81 70 .handler = scm_blk_irq,
+2
drivers/s390/char/sclp_cmd.c
··· 627 627 struct read_storage_sccb *sccb; 628 628 int i, id, assigned, rc; 629 629 630 + if (OLDMEM_BASE) /* No standby memory in kdump mode */ 631 + return 0; 630 632 if (!early_read_info_sccb_valid) 631 633 return 0; 632 634 if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
+17
drivers/s390/cio/chsc.c
··· 433 433 " failed (rc=%d).\n", ret); 434 434 } 435 435 436 + static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area) 437 + { 438 + int ret; 439 + 440 + CIO_CRW_EVENT(4, "chsc: scm available information\n"); 441 + if (sei_area->rs != 7) 442 + return; 443 + 444 + ret = scm_process_availability_information(); 445 + if (ret) 446 + CIO_CRW_EVENT(0, "chsc: process availability information" 447 + " failed (rc=%d).\n", ret); 448 + } 449 + 436 450 static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) 437 451 { 438 452 switch (sei_area->cc) { ··· 481 467 break; 482 468 case 12: /* scm change notification */ 483 469 chsc_process_sei_scm_change(sei_area); 470 + break; 471 + case 14: /* scm available notification */ 472 + chsc_process_sei_scm_avail(sei_area); 484 473 break; 485 474 default: /* other stuff */ 486 475 CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n",
+2
drivers/s390/cio/chsc.h
··· 156 156 157 157 #ifdef CONFIG_SCM_BUS 158 158 int scm_update_information(void); 159 + int scm_process_availability_information(void); 159 160 #else /* CONFIG_SCM_BUS */ 160 161 static inline int scm_update_information(void) { return 0; } 162 + static inline int scm_process_availability_information(void) { return 0; } 161 163 #endif /* CONFIG_SCM_BUS */ 162 164 163 165
+17 -1
drivers/s390/cio/scm.c
··· 211 211 goto out; 212 212 scmdrv = to_scm_drv(scmdev->dev.driver); 213 213 if (changed && scmdrv->notify) 214 - scmdrv->notify(scmdev); 214 + scmdrv->notify(scmdev, SCM_CHANGE); 215 215 out: 216 216 device_unlock(&scmdev->dev); 217 217 if (changed) ··· 295 295 free_page((unsigned long)scm_info); 296 296 297 297 return ret; 298 + } 299 + 300 + static int scm_dev_avail(struct device *dev, void *unused) 301 + { 302 + struct scm_driver *scmdrv = to_scm_drv(dev->driver); 303 + struct scm_device *scmdev = to_scm_dev(dev); 304 + 305 + if (dev->driver && scmdrv->notify) 306 + scmdrv->notify(scmdev, SCM_AVAIL); 307 + 308 + return 0; 309 + } 310 + 311 + int scm_process_availability_information(void) 312 + { 313 + return bus_for_each_dev(&scm_bus_type, NULL, NULL, scm_dev_avail); 298 314 } 299 315 300 316 static int __init scm_init(void)
+1
drivers/s390/net/qeth_core.h
··· 916 916 void *reply_param); 917 917 int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); 918 918 int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int); 919 + int qeth_get_elements_for_frags(struct sk_buff *); 919 920 int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, 920 921 struct sk_buff *, struct qeth_hdr *, int, int, int); 921 922 int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *,
+38 -7
drivers/s390/net/qeth_core_main.c
··· 3679 3679 } 3680 3680 EXPORT_SYMBOL_GPL(qeth_get_priority_queue); 3681 3681 3682 + int qeth_get_elements_for_frags(struct sk_buff *skb) 3683 + { 3684 + int cnt, length, e, elements = 0; 3685 + struct skb_frag_struct *frag; 3686 + char *data; 3687 + 3688 + for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { 3689 + frag = &skb_shinfo(skb)->frags[cnt]; 3690 + data = (char *)page_to_phys(skb_frag_page(frag)) + 3691 + frag->page_offset; 3692 + length = frag->size; 3693 + e = PFN_UP((unsigned long)data + length - 1) - 3694 + PFN_DOWN((unsigned long)data); 3695 + elements += e; 3696 + } 3697 + return elements; 3698 + } 3699 + EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags); 3700 + 3682 3701 int qeth_get_elements_no(struct qeth_card *card, void *hdr, 3683 3702 struct sk_buff *skb, int elems) 3684 3703 { ··· 3705 3686 int elements_needed = PFN_UP((unsigned long)skb->data + dlen - 1) - 3706 3687 PFN_DOWN((unsigned long)skb->data); 3707 3688 3708 - elements_needed += skb_shinfo(skb)->nr_frags; 3689 + elements_needed += qeth_get_elements_for_frags(skb); 3690 + 3709 3691 if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { 3710 3692 QETH_DBF_MESSAGE(2, "Invalid size of IP packet " 3711 3693 "(Number=%d / Length=%d). Discarded.\n", ··· 3791 3771 3792 3772 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { 3793 3773 frag = &skb_shinfo(skb)->frags[cnt]; 3794 - buffer->element[element].addr = (char *) 3795 - page_to_phys(skb_frag_page(frag)) 3796 - + frag->page_offset; 3797 - buffer->element[element].length = frag->size; 3798 - buffer->element[element].eflags = SBAL_EFLAGS_MIDDLE_FRAG; 3799 - element++; 3774 + data = (char *)page_to_phys(skb_frag_page(frag)) + 3775 + frag->page_offset; 3776 + length = frag->size; 3777 + while (length > 0) { 3778 + length_here = PAGE_SIZE - 3779 + ((unsigned long) data % PAGE_SIZE); 3780 + if (length < length_here) 3781 + length_here = length; 3782 + 3783 + buffer->element[element].addr = data; 3784 + buffer->element[element].length = length_here; 3785 + buffer->element[element].eflags = 3786 + SBAL_EFLAGS_MIDDLE_FRAG; 3787 + length -= length_here; 3788 + data += length_here; 3789 + element++; 3790 + } 3800 3791 } 3801 3792 3802 3793 if (buffer->element[element - 1].eflags)
+15 -8
drivers/s390/net/qeth_l3_main.c
··· 623 623 return rc; 624 624 } 625 625 626 - static void qeth_l3_correct_routing_type(struct qeth_card *card, 626 + static int qeth_l3_correct_routing_type(struct qeth_card *card, 627 627 enum qeth_routing_types *type, enum qeth_prot_versions prot) 628 628 { 629 629 if (card->info.type == QETH_CARD_TYPE_IQD) { ··· 632 632 case PRIMARY_CONNECTOR: 633 633 case SECONDARY_CONNECTOR: 634 634 case MULTICAST_ROUTER: 635 - return; 635 + return 0; 636 636 default: 637 637 goto out_inval; 638 638 } ··· 641 641 case NO_ROUTER: 642 642 case PRIMARY_ROUTER: 643 643 case SECONDARY_ROUTER: 644 - return; 644 + return 0; 645 645 case MULTICAST_ROUTER: 646 646 if (qeth_is_ipafunc_supported(card, prot, 647 647 IPA_OSA_MC_ROUTER)) 648 - return; 648 + return 0; 649 649 default: 650 650 goto out_inval; 651 651 } 652 652 } 653 653 out_inval: 654 654 *type = NO_ROUTER; 655 + return -EINVAL; 655 656 } 656 657 657 658 int qeth_l3_setrouting_v4(struct qeth_card *card) ··· 661 660 662 661 QETH_CARD_TEXT(card, 3, "setrtg4"); 663 662 664 - qeth_l3_correct_routing_type(card, &card->options.route4.type, 663 + rc = qeth_l3_correct_routing_type(card, &card->options.route4.type, 665 664 QETH_PROT_IPV4); 665 + if (rc) 666 + return rc; 666 667 667 668 rc = qeth_l3_send_setrouting(card, card->options.route4.type, 668 669 QETH_PROT_IPV4); ··· 686 683 687 684 if (!qeth_is_supported(card, IPA_IPV6)) 688 685 return 0; 689 - qeth_l3_correct_routing_type(card, &card->options.route6.type, 686 + rc = qeth_l3_correct_routing_type(card, &card->options.route6.type, 690 687 QETH_PROT_IPV6); 688 + if (rc) 689 + return rc; 691 690 692 691 rc = qeth_l3_send_setrouting(card, card->options.route6.type, 693 692 QETH_PROT_IPV6); ··· 2903 2898 tcp_hdr(skb)->doff * 4; 2904 2899 int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data); 2905 2900 int elements = PFN_UP(tcpd + tcpd_len - 1) - PFN_DOWN(tcpd); 2906 - elements += skb_shinfo(skb)->nr_frags; 2901 + 2902 + elements += qeth_get_elements_for_frags(skb); 2903 + 2907 2904 return elements; 2908 2905 } 2909 2906 ··· 3355 3348 rc = -ENODEV; 3356 3349 goto out_remove; 3357 3350 } 3358 - qeth_trace_features(card); 3359 3351 3360 3352 if (!card->dev && qeth_l3_setup_netdev(card)) { 3361 3353 rc = -ENODEV; ··· 3431 3425 qeth_l3_set_multicast_list(card->dev); 3432 3426 rtnl_unlock(); 3433 3427 } 3428 + qeth_trace_features(card); 3434 3429 /* let user_space know that device is online */ 3435 3430 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); 3436 3431 mutex_unlock(&card->conf_mutex);
+2
drivers/s390/net/qeth_l3_sys.c
··· 87 87 rc = qeth_l3_setrouting_v6(card); 88 88 } 89 89 out: 90 + if (rc) 91 + route->type = old_route_type; 90 92 mutex_unlock(&card->conf_mutex); 91 93 return rc ? rc : count; 92 94 }
+10 -6
drivers/staging/comedi/drivers/dt9812.c
··· 947 947 unsigned int *data) 948 948 { 949 949 struct comedi_dt9812 *devpriv = dev->private; 950 + unsigned int channel = CR_CHAN(insn->chanspec); 950 951 int n; 951 952 u8 bits = 0; 952 953 953 954 dt9812_digital_in(devpriv->slot, &bits); 954 955 for (n = 0; n < insn->n; n++) 955 - data[n] = ((1 << insn->chanspec) & bits) != 0; 956 + data[n] = ((1 << channel) & bits) != 0; 956 957 return n; 957 958 } 958 959 ··· 962 961 unsigned int *data) 963 962 { 964 963 struct comedi_dt9812 *devpriv = dev->private; 964 + unsigned int channel = CR_CHAN(insn->chanspec); 965 965 int n; 966 966 u8 bits = 0; 967 967 968 968 dt9812_digital_out_shadow(devpriv->slot, &bits); 969 969 for (n = 0; n < insn->n; n++) { 970 - u8 mask = 1 << insn->chanspec; 970 + u8 mask = 1 << channel; 971 971 972 972 bits &= ~mask; 973 973 if (data[n]) ··· 983 981 unsigned int *data) 984 982 { 985 983 struct comedi_dt9812 *devpriv = dev->private; 984 + unsigned int channel = CR_CHAN(insn->chanspec); 986 985 int n; 987 986 988 987 for (n = 0; n < insn->n; n++) { 989 988 u16 value = 0; 990 989 991 - dt9812_analog_in(devpriv->slot, insn->chanspec, &value, 992 - DT9812_GAIN_1); 990 + dt9812_analog_in(devpriv->slot, channel, &value, DT9812_GAIN_1); 993 991 data[n] = value; 994 992 } 995 993 return n; ··· 1000 998 unsigned int *data) 1001 999 { 1002 1000 struct comedi_dt9812 *devpriv = dev->private; 1001 + unsigned int channel = CR_CHAN(insn->chanspec); 1003 1002 int n; 1004 1003 u16 value; 1005 1004 1006 1005 for (n = 0; n < insn->n; n++) { 1007 1006 value = 0; 1008 - dt9812_analog_out_shadow(devpriv->slot, insn->chanspec, &value); 1007 + dt9812_analog_out_shadow(devpriv->slot, channel, &value); 1009 1008 data[n] = value; 1010 1009 } 1011 1010 return n; ··· 1017 1014 unsigned int *data) 1018 1015 { 1019 1016 struct comedi_dt9812 *devpriv = dev->private; 1017 + unsigned int channel = CR_CHAN(insn->chanspec); 1020 1018 int n; 1021 1019 1022 1020 for (n = 0; n < insn->n; n++) 1023 - dt9812_analog_out(devpriv->slot, insn->chanspec, data[n]); 1021 + dt9812_analog_out(devpriv->slot, channel, data[n]); 1024 1022 return n; 1025 1023 } 1026 1024
+19 -12
drivers/staging/comedi/drivers/usbdux.c
··· 730 730 static int usbduxsub_start(struct usbduxsub *usbduxsub) 731 731 { 732 732 int errcode = 0; 733 - uint8_t local_transfer_buffer[16]; 733 + uint8_t *local_transfer_buffer; 734 + 735 + local_transfer_buffer = kmalloc(1, GFP_KERNEL); 736 + if (!local_transfer_buffer) 737 + return -ENOMEM; 734 738 735 739 /* 7f92 to zero */ 736 - local_transfer_buffer[0] = 0; 740 + *local_transfer_buffer = 0; 737 741 errcode = usb_control_msg(usbduxsub->usbdev, 738 742 /* create a pipe for a control transfer */ 739 743 usb_sndctrlpipe(usbduxsub->usbdev, 0), ··· 755 751 1, 756 752 /* Timeout */ 757 753 BULK_TIMEOUT); 758 - if (errcode < 0) { 754 + if (errcode < 0) 759 755 dev_err(&usbduxsub->interface->dev, 760 756 "comedi_: control msg failed (start)\n"); 761 - return errcode; 762 - } 763 - return 0; 757 + 758 + kfree(local_transfer_buffer); 759 + return errcode; 764 760 } 765 761 766 762 static int usbduxsub_stop(struct usbduxsub *usbduxsub) 767 763 { 768 764 int errcode = 0; 765 + uint8_t *local_transfer_buffer; 769 766 770 - uint8_t local_transfer_buffer[16]; 767 + local_transfer_buffer = kmalloc(1, GFP_KERNEL); 768 + if (!local_transfer_buffer) 769 + return -ENOMEM; 771 770 772 771 /* 7f92 to one */ 773 - local_transfer_buffer[0] = 1; 772 + *local_transfer_buffer = 1; 774 773 errcode = usb_control_msg(usbduxsub->usbdev, 775 774 usb_sndctrlpipe(usbduxsub->usbdev, 0), 776 775 /* bRequest, "Firmware" */ ··· 788 781 1, 789 782 /* Timeout */ 790 783 BULK_TIMEOUT); 791 - if (errcode < 0) { 784 + if (errcode < 0) 792 785 dev_err(&usbduxsub->interface->dev, 793 786 "comedi_: control msg failed (stop)\n"); 794 - return errcode; 795 - } 796 - return 0; 787 + 788 + kfree(local_transfer_buffer); 789 + return errcode; 797 790 } 798 791 799 792 static int usbduxsub_upload(struct usbduxsub *usbduxsub,
+18 -12
drivers/staging/comedi/drivers/usbduxfast.c
··· 436 436 static int usbduxfastsub_start(struct usbduxfastsub_s *udfs) 437 437 { 438 438 int ret; 439 - unsigned char local_transfer_buffer[16]; 439 + unsigned char *local_transfer_buffer; 440 + 441 + local_transfer_buffer = kmalloc(1, GFP_KERNEL); 442 + if (!local_transfer_buffer) 443 + return -ENOMEM; 440 444 441 445 /* 7f92 to zero */ 442 - local_transfer_buffer[0] = 0; 446 + *local_transfer_buffer = 0; 443 447 /* bRequest, "Firmware" */ 444 448 ret = usb_control_msg(udfs->usbdev, usb_sndctrlpipe(udfs->usbdev, 0), 445 449 USBDUXFASTSUB_FIRMWARE, ··· 454 450 local_transfer_buffer, 455 451 1, /* Length */ 456 452 EZTIMEOUT); /* Timeout */ 457 - if (ret < 0) { 453 + if (ret < 0) 458 454 dev_err(&udfs->interface->dev, 459 455 "control msg failed (start)\n"); 460 - return ret; 461 - } 462 456 463 - return 0; 457 + kfree(local_transfer_buffer); 458 + return ret; 464 459 } 465 460 466 461 static int usbduxfastsub_stop(struct usbduxfastsub_s *udfs) 467 462 { 468 463 int ret; 469 - unsigned char local_transfer_buffer[16]; 464 + unsigned char *local_transfer_buffer; 465 + 466 + local_transfer_buffer = kmalloc(1, GFP_KERNEL); 467 + if (!local_transfer_buffer) 468 + return -ENOMEM; 470 469 471 470 /* 7f92 to one */ 472 - local_transfer_buffer[0] = 1; 471 + *local_transfer_buffer = 1; 473 472 /* bRequest, "Firmware" */ 474 473 ret = usb_control_msg(udfs->usbdev, usb_sndctrlpipe(udfs->usbdev, 0), 475 474 USBDUXFASTSUB_FIRMWARE, ··· 481 474 0x0000, /* Index */ 482 475 local_transfer_buffer, 1, /* Length */ 483 476 EZTIMEOUT); /* Timeout */ 484 - if (ret < 0) { 477 + if (ret < 0) 485 478 dev_err(&udfs->interface->dev, 486 479 "control msg failed (stop)\n"); 487 - return ret; 488 - } 489 480 490 - return 0; 481 + kfree(local_transfer_buffer); 482 + return ret; 491 483 } 492 484 493 485 static int usbduxfastsub_upload(struct usbduxfastsub_s *udfs,
+17 -10
drivers/staging/comedi/drivers/usbduxsigma.c
··· 681 681 static int usbduxsub_start(struct usbduxsub *usbduxsub) 682 682 { 683 683 int errcode = 0; 684 - uint8_t local_transfer_buffer[16]; 684 + uint8_t *local_transfer_buffer; 685 + 686 + local_transfer_buffer = kmalloc(16, GFP_KERNEL); 687 + if (!local_transfer_buffer) 688 + return -ENOMEM; 685 689 686 690 /* 7f92 to zero */ 687 691 local_transfer_buffer[0] = 0; ··· 706 702 1, 707 703 /* Timeout */ 708 704 BULK_TIMEOUT); 709 - if (errcode < 0) { 705 + if (errcode < 0) 710 706 dev_err(&usbduxsub->interface->dev, 711 707 "comedi_: control msg failed (start)\n"); 712 - return errcode; 713 - } 714 - return 0; 708 + 709 + kfree(local_transfer_buffer); 710 + return errcode; 715 711 } 716 712 717 713 static int usbduxsub_stop(struct usbduxsub *usbduxsub) 718 714 { 719 715 int errcode = 0; 716 + uint8_t *local_transfer_buffer; 720 717 721 - uint8_t local_transfer_buffer[16]; 718 + local_transfer_buffer = kmalloc(16, GFP_KERNEL); 719 + if (!local_transfer_buffer) 720 + return -ENOMEM; 722 721 723 722 /* 7f92 to one */ 724 723 local_transfer_buffer[0] = 1; ··· 739 732 1, 740 733 /* Timeout */ 741 734 BULK_TIMEOUT); 742 - if (errcode < 0) { 735 + if (errcode < 0) 743 736 dev_err(&usbduxsub->interface->dev, 744 737 "comedi_: control msg failed (stop)\n"); 745 - return errcode; 746 - } 747 - return 0; 738 + 739 + kfree(local_transfer_buffer); 740 + return errcode; 748 741 } 749 742 750 743 static int usbduxsub_upload(struct usbduxsub *usbduxsub,
+12 -11
drivers/staging/imx-drm/ipuv3-crtc.c
··· 483 483 goto err_out; 484 484 } 485 485 486 - ipu_crtc->irq = ipu_idmac_channel_irq(ipu, ipu_crtc->ipu_ch, 487 - IPU_IRQ_EOF); 488 - ret = devm_request_irq(ipu_crtc->dev, ipu_crtc->irq, ipu_irq_handler, 0, 489 - "imx_drm", ipu_crtc); 490 - if (ret < 0) { 491 - dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret); 492 - goto err_out; 493 - } 494 - 495 - disable_irq(ipu_crtc->irq); 496 - 497 486 return 0; 498 487 err_out: 499 488 ipu_put_resources(ipu_crtc); ··· 493 504 static int ipu_crtc_init(struct ipu_crtc *ipu_crtc, 494 505 struct ipu_client_platformdata *pdata) 495 506 { 507 + struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); 496 508 int ret; 497 509 498 510 ret = ipu_get_resources(ipu_crtc, pdata); ··· 511 521 dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret); 512 522 goto err_put_resources; 513 523 } 524 + 525 + ipu_crtc->irq = ipu_idmac_channel_irq(ipu, ipu_crtc->ipu_ch, 526 + IPU_IRQ_EOF); 527 + ret = devm_request_irq(ipu_crtc->dev, ipu_crtc->irq, ipu_irq_handler, 0, 528 + "imx_drm", ipu_crtc); 529 + if (ret < 0) { 530 + dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret); 531 + goto err_put_resources; 532 + } 533 + 534 + disable_irq(ipu_crtc->irq); 514 535 515 536 return 0; 516 537
+26 -44
drivers/staging/tidspbridge/rmgr/drv.c
··· 76 76 struct node_res_object **node_res_obj = 77 77 (struct node_res_object **)node_resource; 78 78 struct process_context *ctxt = (struct process_context *)process_ctxt; 79 - int status = 0; 80 79 int retval; 81 80 82 81 *node_res_obj = kzalloc(sizeof(struct node_res_object), GFP_KERNEL); 83 - if (!*node_res_obj) { 84 - status = -ENOMEM; 85 - goto func_end; 86 - } 82 + if (!*node_res_obj) 83 + return -ENOMEM; 87 84 88 85 (*node_res_obj)->node = hnode; 89 - retval = idr_get_new(ctxt->node_id, *node_res_obj, 90 - &(*node_res_obj)->id); 91 - if (retval == -EAGAIN) { 92 - if (!idr_pre_get(ctxt->node_id, GFP_KERNEL)) { 93 - pr_err("%s: OUT OF MEMORY\n", __func__); 94 - status = -ENOMEM; 95 - goto func_end; 96 - } 97 - 98 - retval = idr_get_new(ctxt->node_id, *node_res_obj, 99 - &(*node_res_obj)->id); 86 + retval = idr_alloc(ctxt->node_id, *node_res_obj, 0, 0, GFP_KERNEL); 87 + if (retval >= 0) { 88 + (*node_res_obj)->id = retval; 89 + return 0; 100 90 } 101 - if (retval) { 91 + 92 + kfree(*node_res_obj); 93 + 94 + if (retval == -ENOSPC) { 102 95 pr_err("%s: FAILED, IDR is FULL\n", __func__); 103 - status = -EFAULT; 96 + return -EFAULT; 97 + } else { 98 + pr_err("%s: OUT OF MEMORY\n", __func__); 99 + return -ENOMEM; 104 100 } 105 - func_end: 106 - if (status) 107 - kfree(*node_res_obj); 108 - 109 - return status; 110 101 } 111 102 112 103 /* Release all Node resources and its context ··· 192 201 struct strm_res_object **pstrm_res = 193 202 (struct strm_res_object **)strm_res; 194 203 struct process_context *ctxt = (struct process_context *)process_ctxt; 195 - int status = 0; 196 204 int retval; 197 205 198 206 *pstrm_res = kzalloc(sizeof(struct strm_res_object), GFP_KERNEL); 199 - if (*pstrm_res == NULL) { 200 - status = -EFAULT; 201 - goto func_end; 202 - } 207 + if (*pstrm_res == NULL) 208 + return -EFAULT; 203 209 204 210 (*pstrm_res)->stream = stream_obj; 205 - retval = idr_get_new(ctxt->stream_id, *pstrm_res, 206 - &(*pstrm_res)->id); 207 - if (retval == -EAGAIN) { 208 - if (!idr_pre_get(ctxt->stream_id, GFP_KERNEL)) { 209 - pr_err("%s: OUT OF MEMORY\n", __func__); 210 - status = -ENOMEM; 211 - goto func_end; 212 - } 213 - 214 - retval = idr_get_new(ctxt->stream_id, *pstrm_res, 215 - &(*pstrm_res)->id); 211 + retval = idr_alloc(ctxt->stream_id, *pstrm_res, 0, 0, GFP_KERNEL); 212 + if (retval >= 0) { 213 + (*pstrm_res)->id = retval; 214 + return 0; 216 215 } 217 - if (retval) { 216 + 217 + if (retval == -ENOSPC) { 218 218 pr_err("%s: FAILED, IDR is FULL\n", __func__); 219 - status = -EPERM; 219 + return -EPERM; 220 + } else { 221 + pr_err("%s: OUT OF MEMORY\n", __func__); 222 + return -ENOMEM; 220 223 } 221 - 222 - func_end: 223 - return status; 224 224 } 225 225 226 226 static int drv_proc_free_strm_res(int id, void *p, void *process_ctxt)
+1 -1
drivers/staging/vt6656/card.c
··· 790 790 if ((~uLowNextTBTT) < uLowRemain) 791 791 qwTSF = ((qwTSF >> 32) + 1) << 32; 792 792 793 - qwTSF = (qwTSF & 0xffffffff00000000UL) | 793 + qwTSF = (qwTSF & 0xffffffff00000000ULL) | 794 794 (u64)(uLowNextTBTT + uLowRemain); 795 795 796 796 return (qwTSF);
-4
drivers/staging/vt6656/main_usb.c
··· 669 669 if (device->flags & DEVICE_FLAGS_OPENED) 670 670 device_close(device->dev); 671 671 672 - usb_put_dev(interface_to_usbdev(intf)); 673 - 674 672 return 0; 675 673 } 676 674 ··· 678 680 679 681 if (!device || !device->dev) 680 682 return -ENODEV; 681 - 682 - usb_get_dev(interface_to_usbdev(intf)); 683 683 684 684 if (!(device->flags & DEVICE_FLAGS_OPENED)) 685 685 device_open(device->dev);
+10 -15
drivers/staging/zcache/ramster/tcp.c
··· 300 300 301 301 static int r2net_prep_nsw(struct r2net_node *nn, struct r2net_status_wait *nsw) 302 302 { 303 - int ret = 0; 303 + int ret; 304 304 305 - do { 306 - if (!idr_pre_get(&nn->nn_status_idr, GFP_ATOMIC)) { 307 - ret = -EAGAIN; 308 - break; 309 - } 310 - spin_lock(&nn->nn_lock); 311 - ret = idr_get_new(&nn->nn_status_idr, nsw, &nsw->ns_id); 312 - if (ret == 0) 313 - list_add_tail(&nsw->ns_node_item, 314 - &nn->nn_status_list); 315 - spin_unlock(&nn->nn_lock); 316 - } while (ret == -EAGAIN); 305 + spin_lock(&nn->nn_lock); 306 + ret = idr_alloc(&nn->nn_status_idr, nsw, 0, 0, GFP_ATOMIC); 307 + if (ret >= 0) { 308 + nsw->ns_id = ret; 309 + list_add_tail(&nsw->ns_node_item, &nn->nn_status_list); 310 + } 311 + spin_unlock(&nn->nn_lock); 317 312 318 - if (ret == 0) { 313 + if (ret >= 0) { 319 314 init_waitqueue_head(&nsw->ns_wq); 320 315 nsw->ns_sys_status = R2NET_ERR_NONE; 321 316 nsw->ns_status = 0; 317 + return 0; 322 318 } 323 - 324 319 return ret; 325 320 } 326 321
+51 -1
drivers/tty/serial/8250/8250.c
··· 301 301 }, 302 302 [PORT_8250_CIR] = { 303 303 .name = "CIR port" 304 - } 304 + }, 305 + [PORT_ALTR_16550_F32] = { 306 + .name = "Altera 16550 FIFO32", 307 + .fifo_size = 32, 308 + .tx_loadsz = 32, 309 + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, 310 + .flags = UART_CAP_FIFO | UART_CAP_AFE, 311 + }, 312 + [PORT_ALTR_16550_F64] = { 313 + .name = "Altera 16550 FIFO64", 314 + .fifo_size = 64, 315 + .tx_loadsz = 64, 316 + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, 317 + .flags = UART_CAP_FIFO | UART_CAP_AFE, 318 + }, 319 + [PORT_ALTR_16550_F128] = { 320 + .name = "Altera 16550 FIFO128", 321 + .fifo_size = 128, 322 + .tx_loadsz = 128, 323 + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, 324 + .flags = UART_CAP_FIFO | UART_CAP_AFE, 325 + }, 305 326 }; 306 327 307 328 /* Uart divisor latch read */ ··· 3417 3396 MODULE_PARM_DESC(probe_rsa, "Probe I/O ports for RSA"); 3418 3397 #endif 3419 3398 MODULE_ALIAS_CHARDEV_MAJOR(TTY_MAJOR); 3399 + 3400 + #ifndef MODULE 3401 + /* This module was renamed to 8250_core in 3.7. Keep the old "8250" name 3402 + * working as well for the module options so we don't break people. We 3403 + * need to keep the names identical and the convenient macros will happily 3404 + * refuse to let us do that by failing the build with redefinition errors 3405 + * of global variables. So we stick them inside a dummy function to avoid 3406 + * those conflicts. The options still get parsed, and the redefined 3407 + * MODULE_PARAM_PREFIX lets us keep the "8250." syntax alive. 3408 + * 3409 + * This is hacky. I'm sorry. 3410 + */ 3411 + static void __used s8250_options(void) 3412 + { 3413 + #undef MODULE_PARAM_PREFIX 3414 + #define MODULE_PARAM_PREFIX "8250." 3415 + 3416 + module_param_cb(share_irqs, &param_ops_uint, &share_irqs, 0644); 3417 + module_param_cb(nr_uarts, &param_ops_uint, &nr_uarts, 0644); 3418 + module_param_cb(skip_txen_test, &param_ops_uint, &skip_txen_test, 0644); 3419 + #ifdef CONFIG_SERIAL_8250_RSA 3420 + __module_param_call(MODULE_PARAM_PREFIX, probe_rsa, 3421 + &param_array_ops, .arr = &__param_arr_probe_rsa, 3422 + 0444, -1); 3423 + #endif 3424 + } 3425 + #else 3426 + MODULE_ALIAS("8250"); 3427 + #endif
+11 -10
drivers/tty/serial/8250/8250_pci.c
··· 1571 1571 1572 1572 /* Unknown vendors/cards - this should not be in linux/pci_ids.h */ 1573 1573 #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584 1574 + #define PCI_SUBDEVICE_ID_UNKNOWN_0x1588 0x1588 1574 1575 1575 1576 /* 1576 1577 * Master list of serial port init/setup/exit quirks. ··· 1847 1846 .device = PCI_DEVICE_ID_PLX_9050, 1848 1847 .subvendor = PCI_SUBVENDOR_ID_KEYSPAN, 1849 1848 .subdevice = PCI_SUBDEVICE_ID_KEYSPAN_SX2, 1850 - .init = pci_plx9050_init, 1851 - .setup = pci_default_setup, 1852 - .exit = pci_plx9050_exit, 1853 - }, 1854 - { 1855 - .vendor = PCI_VENDOR_ID_PLX, 1856 - .device = PCI_DEVICE_ID_PLX_9050, 1857 - .subvendor = PCI_VENDOR_ID_PLX, 1858 - .subdevice = PCI_SUBDEVICE_ID_UNKNOWN_0x1584, 1859 1849 .init = pci_plx9050_init, 1860 1850 .setup = pci_default_setup, 1861 1851 .exit = pci_plx9050_exit, ··· 3725 3733 { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, 3726 3734 PCI_VENDOR_ID_PLX, 3727 3735 PCI_SUBDEVICE_ID_UNKNOWN_0x1584, 0, 0, 3728 - pbn_b0_4_115200 }, 3736 + pbn_b2_4_115200 }, 3737 + /* Unknown card - subdevice 0x1588 */ 3738 + { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, 3739 + PCI_VENDOR_ID_PLX, 3740 + PCI_SUBDEVICE_ID_UNKNOWN_0x1588, 0, 0, 3741 + pbn_b2_8_115200 }, 3729 3742 { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, 3730 3743 PCI_SUBVENDOR_ID_KEYSPAN, 3731 3744 PCI_SUBDEVICE_ID_KEYSPAN_SX2, 0, 0, ··· 4786 4789 4787 4790 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9835, 4788 4791 PCI_VENDOR_ID_IBM, 0x0299, 4792 + 0, 0, pbn_b0_bt_2_115200 }, 4793 + 4794 + { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9835, 4795 + 0x1000, 0x0012, 4789 4796 0, 0, pbn_b0_bt_2_115200 }, 4790 4797 4791 4798 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901,
+7 -5
drivers/tty/serial/8250/8250_pnp.c
··· 429 429 { 430 430 struct uart_8250_port uart; 431 431 int ret, line, flags = dev_id->driver_data; 432 + struct resource *res = NULL; 432 433 433 434 if (flags & UNKNOWN_DEV) { 434 435 ret = serial_pnp_guess_board(dev); ··· 440 439 memset(&uart, 0, sizeof(uart)); 441 440 if (pnp_irq_valid(dev, 0)) 442 441 uart.port.irq = pnp_irq(dev, 0); 443 - if ((flags & CIR_PORT) && pnp_port_valid(dev, 2)) { 444 - uart.port.iobase = pnp_port_start(dev, 2); 445 - uart.port.iotype = UPIO_PORT; 446 - } else if (pnp_port_valid(dev, 0)) { 447 - uart.port.iobase = pnp_port_start(dev, 0); 442 + if ((flags & CIR_PORT) && pnp_port_valid(dev, 2)) 443 + res = pnp_get_resource(dev, IORESOURCE_IO, 2); 444 + else if (pnp_port_valid(dev, 0)) 445 + res = pnp_get_resource(dev, IORESOURCE_IO, 0); 446 + if (pnp_resource_enabled(res)) { 447 + uart.port.iobase = res->start; 448 448 uart.port.iotype = UPIO_PORT; 449 449 } else if (pnp_mem_valid(dev, 0)) { 450 450 uart.port.mapbase = pnp_mem_start(dev, 0);
+2 -2
drivers/tty/serial/Kconfig
··· 211 211 config SERIAL_SAMSUNG_UARTS_4 212 212 bool 213 213 depends on PLAT_SAMSUNG 214 - default y if !(CPU_S3C2410 || SERIAL_S3C2412 || CPU_S3C2440 || CPU_S3C2442) 214 + default y if !(CPU_S3C2410 || CPU_S3C2412 || CPU_S3C2440 || CPU_S3C2442) 215 215 help 216 216 Internal node for the common case of 4 Samsung compatible UARTs 217 217 218 218 config SERIAL_SAMSUNG_UARTS 219 219 int 220 220 depends on PLAT_SAMSUNG 221 - default 6 if ARCH_S5P6450 221 + default 6 if CPU_S5P6450 222 222 default 4 if SERIAL_SAMSUNG_UARTS_4 || CPU_S3C2416 223 223 default 3 224 224 help
+4 -4
drivers/tty/serial/bcm63xx_uart.c
··· 235 235 */ 236 236 static void bcm_uart_do_rx(struct uart_port *port) 237 237 { 238 - struct tty_port *port = &port->state->port; 238 + struct tty_port *tty_port = &port->state->port; 239 239 unsigned int max_count; 240 240 241 241 /* limit number of char read in interrupt, should not be ··· 260 260 bcm_uart_writel(port, val, UART_CTL_REG); 261 261 262 262 port->icount.overrun++; 263 - tty_insert_flip_char(port, 0, TTY_OVERRUN); 263 + tty_insert_flip_char(tty_port, 0, TTY_OVERRUN); 264 264 } 265 265 266 266 if (!(iestat & UART_IR_STAT(UART_IR_RXNOTEMPTY))) ··· 299 299 300 300 301 301 if ((cstat & port->ignore_status_mask) == 0) 302 - tty_insert_flip_char(port, c, flag); 302 + tty_insert_flip_char(tty_port, c, flag); 303 303 304 304 } while (--max_count); 305 305 306 - tty_flip_buffer_push(port); 306 + tty_flip_buffer_push(tty_port); 307 307 } 308 308 309 309 /*
+1 -1
drivers/tty/serial/mpc52xx_uart.c
··· 550 550 return 0; 551 551 552 552 psc_num = (port->mapbase & 0xf00) >> 8; 553 - snprintf(clk_name, sizeof(clk_name), "psc%d_clk", psc_num); 553 + snprintf(clk_name, sizeof(clk_name), "psc%d_mclk", psc_num); 554 554 psc_clk = clk_get(port->dev, clk_name); 555 555 if (IS_ERR(psc_clk)) { 556 556 dev_err(port->dev, "Failed to get PSC clock entry!\n");
+6
drivers/tty/serial/of_serial.c
··· 241 241 { .compatible = "ns16850", .data = (void *)PORT_16850, }, 242 242 { .compatible = "nvidia,tegra20-uart", .data = (void *)PORT_TEGRA, }, 243 243 { .compatible = "nxp,lpc3220-uart", .data = (void *)PORT_LPC3220, }, 244 + { .compatible = "altr,16550-FIFO32", 245 + .data = (void *)PORT_ALTR_16550_F32, }, 246 + { .compatible = "altr,16550-FIFO64", 247 + .data = (void *)PORT_ALTR_16550_F64, }, 248 + { .compatible = "altr,16550-FIFO128", 249 + .data = (void *)PORT_ALTR_16550_F128, }, 244 250 #ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL 245 251 { .compatible = "ibm,qpace-nwp-serial", 246 252 .data = (void *)PORT_NWPSERIAL, },
+9 -12
drivers/tty/serial/sunsu.c
··· 968 968 #define UART_NR 4 969 969 970 970 static struct uart_sunsu_port sunsu_ports[UART_NR]; 971 + static int nr_inst; /* Number of already registered ports */ 971 972 972 973 #ifdef CONFIG_SERIO 973 974 ··· 1338 1337 printk("Console: ttyS%d (SU)\n", 1339 1338 (sunsu_reg.minor - 64) + co->index); 1340 1339 1341 - /* 1342 - * Check whether an invalid uart number has been specified, and 1343 - * if so, search for the first available port that does have 1344 - * console support. 1345 - */ 1346 - if (co->index >= UART_NR) 1347 - co->index = 0; 1340 + if (co->index > nr_inst) 1341 + return -ENODEV; 1348 1342 port = &sunsu_ports[co->index].port; 1349 1343 1350 1344 /* ··· 1404 1408 1405 1409 static int su_probe(struct platform_device *op) 1406 1410 { 1407 - static int inst; 1408 1411 struct device_node *dp = op->dev.of_node; 1409 1412 struct uart_sunsu_port *up; 1410 1413 struct resource *rp; ··· 1413 1418 1414 1419 type = su_get_type(dp); 1415 1420 if (type == SU_PORT_PORT) { 1416 - if (inst >= UART_NR) 1421 + if (nr_inst >= UART_NR) 1417 1422 return -EINVAL; 1418 - up = &sunsu_ports[inst]; 1423 + up = &sunsu_ports[nr_inst]; 1419 1424 } else { 1420 1425 up = kzalloc(sizeof(*up), GFP_KERNEL); 1421 1426 if (!up) 1422 1427 return -ENOMEM; 1423 1428 } 1424 1429 1425 - up->port.line = inst; 1430 + up->port.line = nr_inst; 1426 1431 1427 1432 spin_lock_init(&up->port.lock); 1428 1433 ··· 1456 1461 } 1457 1462 dev_set_drvdata(&op->dev, up); 1458 1463 1464 + nr_inst++; 1465 + 1459 1466 return 0; 1460 1467 } 1461 1468 ··· 1485 1488 1486 1489 dev_set_drvdata(&op->dev, up); 1487 1490 1488 - inst++; 1491 + nr_inst++; 1489 1492 1490 1493 return 0; 1491 1494
+1 -8
drivers/tty/serial/vt8500_serial.c
··· 611 611 vt8500_port->uart.dev = &pdev->dev; 612 612 vt8500_port->uart.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF; 613 613 614 - vt8500_port->clk = of_clk_get(pdev->dev.of_node, 0); 615 - if (!IS_ERR(vt8500_port->clk)) { 616 - vt8500_port->uart.uartclk = clk_get_rate(vt8500_port->clk); 617 - } else { 618 - /* use the default of 24Mhz if not specified and warn */ 619 - pr_warn("%s: serial clock source not specified\n", __func__); 620 - vt8500_port->uart.uartclk = 24000000; 621 - } 614 + vt8500_port->uart.uartclk = clk_get_rate(vt8500_port->clk); 622 615 623 616 snprintf(vt8500_port->name, sizeof(vt8500_port->name), 624 617 "VT8500 UART%d", pdev->id);
+1 -1
drivers/tty/tty_buffer.c
··· 425 425 struct tty_ldisc *disc; 426 426 427 427 tty = port->itty; 428 - if (WARN_RATELIMIT(tty == NULL, "tty is NULL\n")) 428 + if (tty == NULL) 429 429 return; 430 430 431 431 disc = tty_ldisc_ref(tty);
+1 -1
drivers/usb/Makefile
··· 46 46 obj-$(CONFIG_USB_SERIAL) += serial/ 47 47 48 48 obj-$(CONFIG_USB) += misc/ 49 - obj-$(CONFIG_USB_COMMON) += phy/ 49 + obj-$(CONFIG_USB_OTG_UTILS) += phy/ 50 50 obj-$(CONFIG_EARLY_PRINTK_DBGP) += early/ 51 51 52 52 obj-$(CONFIG_USB_ATM) += atm/
+2 -2
drivers/usb/c67x00/c67x00-sched.c
··· 100 100 #define TD_PIDEP_OFFSET 0x04 101 101 #define TD_PIDEPMASK_PID 0xF0 102 102 #define TD_PIDEPMASK_EP 0x0F 103 - #define TD_PORTLENMASK_DL 0x02FF 103 + #define TD_PORTLENMASK_DL 0x03FF 104 104 #define TD_PORTLENMASK_PN 0xC000 105 105 106 106 #define TD_STATUS_OFFSET 0x07 ··· 590 590 { 591 591 struct c67x00_td *td; 592 592 struct c67x00_urb_priv *urbp = urb->hcpriv; 593 - const __u8 active_flag = 1, retry_cnt = 1; 593 + const __u8 active_flag = 1, retry_cnt = 3; 594 594 __u8 cmd = 0; 595 595 int tt = 0; 596 596
+3 -3
drivers/usb/chipidea/udc.c
··· 1767 1767 goto put_transceiver; 1768 1768 } 1769 1769 1770 - retval = dbg_create_files(&ci->gadget.dev); 1770 + retval = dbg_create_files(ci->dev); 1771 1771 if (retval) 1772 1772 goto unreg_device; 1773 1773 ··· 1796 1796 1797 1797 dev_err(dev, "error = %i\n", retval); 1798 1798 remove_dbg: 1799 - dbg_remove_files(&ci->gadget.dev); 1799 + dbg_remove_files(ci->dev); 1800 1800 unreg_device: 1801 1801 device_unregister(&ci->gadget.dev); 1802 1802 put_transceiver: ··· 1836 1836 if (ci->global_phy) 1837 1837 usb_put_phy(ci->transceiver); 1838 1838 } 1839 - dbg_remove_files(&ci->gadget.dev); 1839 + dbg_remove_files(ci->dev); 1840 1840 device_unregister(&ci->gadget.dev); 1841 1841 /* my kobject is dynamic, I swear! */ 1842 1842 memset(&ci->gadget, 0, sizeof(ci->gadget));
+20 -3
drivers/usb/class/cdc-wdm.c
··· 56 56 #define WDM_RESPONDING 7 57 57 #define WDM_SUSPENDING 8 58 58 #define WDM_RESETTING 9 59 + #define WDM_OVERFLOW 10 59 60 60 61 #define WDM_MAX 16 61 62 ··· 156 155 { 157 156 struct wdm_device *desc = urb->context; 158 157 int status = urb->status; 158 + int length = urb->actual_length; 159 159 160 160 spin_lock(&desc->iuspin); 161 161 clear_bit(WDM_RESPONDING, &desc->flags); ··· 187 185 } 188 186 189 187 desc->rerr = status; 190 - desc->reslength = urb->actual_length; 191 - memmove(desc->ubuf + desc->length, desc->inbuf, desc->reslength); 192 - desc->length += desc->reslength; 188 + if (length + desc->length > desc->wMaxCommand) { 189 + /* The buffer would overflow */ 190 + set_bit(WDM_OVERFLOW, &desc->flags); 191 + } else { 192 + /* we may already be in overflow */ 193 + if (!test_bit(WDM_OVERFLOW, &desc->flags)) { 194 + memmove(desc->ubuf + desc->length, desc->inbuf, length); 195 + desc->length += length; 196 + desc->reslength = length; 197 + } 198 + } 193 199 skip_error: 194 200 wake_up(&desc->wait); 195 201 ··· 445 435 rv = -ENODEV; 446 436 goto err; 447 437 } 438 + if (test_bit(WDM_OVERFLOW, &desc->flags)) { 439 + clear_bit(WDM_OVERFLOW, &desc->flags); 440 + rv = -ENOBUFS; 441 + goto err; 442 + } 448 443 i++; 449 444 if (file->f_flags & O_NONBLOCK) { 450 445 if (!test_bit(WDM_READ, &desc->flags)) { ··· 493 478 spin_unlock_irq(&desc->iuspin); 494 479 goto retry; 495 480 } 481 + 496 482 if (!desc->reslength) { /* zero length read */ 497 483 dev_dbg(&desc->intf->dev, "%s: zero length - clearing WDM_READ\n", __func__); 498 484 clear_bit(WDM_READ, &desc->flags); ··· 1020 1004 struct wdm_device *desc = wdm_find_device(intf); 1021 1005 int rv; 1022 1006 1007 + clear_bit(WDM_OVERFLOW, &desc->flags); 1023 1008 clear_bit(WDM_RESETTING, &desc->flags); 1024 1009 rv = recover_from_urb_loss(desc); 1025 1010 mutex_unlock(&desc->wlock);
+1
drivers/usb/dwc3/core.c
··· 583 583 break; 584 584 } 585 585 586 + dwc3_free_event_buffers(dwc); 586 587 dwc3_core_exit(dwc); 587 588 588 589 return 0;
-2
drivers/usb/dwc3/dwc3-exynos.c
··· 23 23 #include <linux/usb/nop-usb-xceiv.h> 24 24 #include <linux/of.h> 25 25 26 - #include "core.h" 27 - 28 26 struct dwc3_exynos { 29 27 struct platform_device *dwc3; 30 28 struct platform_device *usb2_phy;
+3 -5
drivers/usb/dwc3/dwc3-omap.c
··· 54 54 #include <linux/usb/otg.h> 55 55 #include <linux/usb/nop-usb-xceiv.h> 56 56 57 - #include "core.h" 58 - 59 57 /* 60 58 * All these registers belong to OMAP's Wrapper around the 61 59 * DesignWare USB3 Core. ··· 463 465 return 0; 464 466 } 465 467 466 - static const struct of_device_id of_dwc3_matach[] = { 468 + static const struct of_device_id of_dwc3_match[] = { 467 469 { 468 470 "ti,dwc3", 469 471 }, 470 472 { }, 471 473 }; 472 - MODULE_DEVICE_TABLE(of, of_dwc3_matach); 474 + MODULE_DEVICE_TABLE(of, of_dwc3_match); 473 475 474 476 static struct platform_driver dwc3_omap_driver = { 475 477 .probe = dwc3_omap_probe, 476 478 .remove = dwc3_omap_remove, 477 479 .driver = { 478 480 .name = "omap-dwc3", 479 - .of_match_table = of_dwc3_matach, 481 + .of_match_table = of_dwc3_match, 480 482 }, 481 483 }; 482 484
-2
drivers/usb/dwc3/dwc3-pci.c
··· 45 45 #include <linux/usb/otg.h> 46 46 #include <linux/usb/nop-usb-xceiv.h> 47 47 48 - #include "core.h" 49 - 50 48 /* FIXME define these in <linux/pci_ids.h> */ 51 49 #define PCI_VENDOR_ID_SYNOPSYS 0x16c3 52 50 #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd
+4 -3
drivers/usb/dwc3/ep0.c
··· 891 891 DWC3_TRBCTL_CONTROL_DATA); 892 892 } else if (!IS_ALIGNED(req->request.length, dep->endpoint.maxpacket) 893 893 && (dep->number == 0)) { 894 - u32 transfer_size; 894 + u32 transfer_size; 895 + u32 maxpacket; 895 896 896 897 ret = usb_gadget_map_request(&dwc->gadget, &req->request, 897 898 dep->number); ··· 903 902 904 903 WARN_ON(req->request.length > DWC3_EP0_BOUNCE_SIZE); 905 904 906 - transfer_size = roundup(req->request.length, 907 - (u32) dep->endpoint.maxpacket); 905 + maxpacket = dep->endpoint.maxpacket; 906 + transfer_size = roundup(req->request.length, maxpacket); 908 907 909 908 dwc->ep0_bounced = true; 910 909
-3
drivers/usb/dwc3/gadget.c
··· 2159 2159 2160 2160 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) 2161 2161 { 2162 - struct dwc3_gadget_ep_cmd_params params; 2163 2162 struct dwc3_ep *dep; 2164 2163 int ret; 2165 2164 u32 reg; 2166 2165 u8 speed; 2167 2166 2168 2167 dev_vdbg(dwc->dev, "%s\n", __func__); 2169 - 2170 - memset(&params, 0x00, sizeof(params)); 2171 2168 2172 2169 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 2173 2170 speed = reg & DWC3_DSTS_CONNECTSPD;
+6 -6
drivers/usb/gadget/Makefile
··· 35 35 obj-$(CONFIG_USB_FUSB300) += fusb300_udc.o 36 36 obj-$(CONFIG_USB_MV_U3D) += mv_u3d_core.o 37 37 38 + # USB Functions 39 + obj-$(CONFIG_USB_F_ACM) += f_acm.o 40 + f_ss_lb-y := f_loopback.o f_sourcesink.o 41 + obj-$(CONFIG_USB_F_SS_LB) += f_ss_lb.o 42 + obj-$(CONFIG_USB_U_SERIAL) += u_serial.o 43 + 38 44 # 39 45 # USB gadget drivers 40 46 # ··· 80 74 obj-$(CONFIG_USB_G_NCM) += g_ncm.o 81 75 obj-$(CONFIG_USB_G_ACM_MS) += g_acm_ms.o 82 76 obj-$(CONFIG_USB_GADGET_TARGET) += tcm_usb_gadget.o 83 - 84 - # USB Functions 85 - obj-$(CONFIG_USB_F_ACM) += f_acm.o 86 - f_ss_lb-y := f_loopback.o f_sourcesink.o 87 - obj-$(CONFIG_USB_F_SS_LB) += f_ss_lb.o 88 - obj-$(CONFIG_USB_U_SERIAL) += u_serial.o
+1 -4
drivers/usb/gadget/composite.c
··· 1757 1757 /** 1758 1758 * usb_composite_probe() - register a composite driver 1759 1759 * @driver: the driver to register 1760 - * @bind: the callback used to allocate resources that are shared across the 1761 - * whole device, such as string IDs, and add its configurations using 1762 - * @usb_add_config(). This may fail by returning a negative errno 1763 - * value; it should return zero on successful initialization. 1760 + * 1764 1761 * Context: single threaded during gadget setup 1765 1762 * 1766 1763 * This function is used to register drivers using the composite driver
+1
drivers/usb/gadget/f_uac1.c
··· 418 418 419 419 req->context = audio; 420 420 req->complete = f_audio_complete; 421 + len = min_t(size_t, sizeof(value), len); 421 422 memcpy(req->buf, &value, len); 422 423 423 424 return len;
+8 -12
drivers/usb/gadget/imx_udc.c
··· 1334 1334 struct usb_gadget_driver *driver) 1335 1335 { 1336 1336 struct imx_udc_struct *imx_usb; 1337 - int retval; 1338 1337 1339 1338 imx_usb = container_of(gadget, struct imx_udc_struct, gadget); 1340 1339 /* first hook up the driver ... */ 1341 1340 imx_usb->driver = driver; 1342 1341 imx_usb->gadget.dev.driver = &driver->driver; 1343 - 1344 - retval = device_add(&imx_usb->gadget.dev); 1345 - if (retval) 1346 - goto fail; 1347 1342 1348 1343 D_INI(imx_usb->dev, "<%s> registered gadget driver '%s'\n", 1349 1344 __func__, driver->driver.name); ··· 1346 1351 imx_udc_enable(imx_usb); 1347 1352 1348 1353 return 0; 1349 - fail: 1350 - imx_usb->driver = NULL; 1351 - imx_usb->gadget.dev.driver = NULL; 1352 - return retval; 1353 1354 } 1354 1355 1355 1356 static int imx_udc_stop(struct usb_gadget *gadget, ··· 1360 1369 1361 1370 imx_usb->gadget.dev.driver = NULL; 1362 1371 imx_usb->driver = NULL; 1363 - 1364 - device_del(&imx_usb->gadget.dev); 1365 1372 1366 1373 D_INI(imx_usb->dev, "<%s> unregistered gadget driver '%s'\n", 1367 1374 __func__, driver->driver.name); ··· 1466 1477 imx_usb->gadget.dev.parent = &pdev->dev; 1467 1478 imx_usb->gadget.dev.dma_mask = pdev->dev.dma_mask; 1468 1479 1480 + ret = device_add(&imx_usb->gadget.dev); 1481 + if (retval) 1482 + goto fail4; 1483 + 1469 1484 platform_set_drvdata(pdev, imx_usb); 1470 1485 1471 1486 usb_init_data(imx_usb); ··· 1481 1488 1482 1489 ret = usb_add_gadget_udc(&pdev->dev, &imx_usb->gadget); 1483 1490 if (ret) 1484 - goto fail4; 1491 + goto fail5; 1485 1492 1486 1493 return 0; 1494 + fail5: 1495 + device_unregister(&imx_usb->gadget.dev); 1487 1496 fail4: 1488 1497 for (i = 0; i < IMX_USB_NB_EP + 1; i++) 1489 1498 free_irq(imx_usb->usbd_int[i], imx_usb); ··· 1509 1514 int i; 1510 1515 1511 1516 usb_del_gadget_udc(&imx_usb->gadget); 1517 + device_unregister(&imx_usb->gadget.dev); 1512 1518 imx_udc_disable(imx_usb); 1513 1519 del_timer(&imx_usb->timer); 1514 1520
+2 -1
drivers/usb/gadget/omap_udc.c
··· 62 62 #define DRIVER_VERSION "4 October 2004" 63 63 64 64 #define OMAP_DMA_USB_W2FC_TX0 29 65 + #define OMAP_DMA_USB_W2FC_RX0 26 65 66 66 67 /* 67 68 * The OMAP UDC needs _very_ early endpoint setup: before enabling the ··· 1311 1310 } 1312 1311 1313 1312 static int omap_udc_start(struct usb_gadget *g, 1314 - struct usb_gadget_driver *driver) 1313 + struct usb_gadget_driver *driver); 1315 1314 static int omap_udc_stop(struct usb_gadget *g, 1316 1315 struct usb_gadget_driver *driver); 1317 1316
+15 -9
drivers/usb/gadget/pxa25x_udc.c
··· 1266 1266 dev->gadget.dev.driver = &driver->driver; 1267 1267 dev->pullup = 1; 1268 1268 1269 - retval = device_add (&dev->gadget.dev); 1270 - if (retval) { 1271 - dev->driver = NULL; 1272 - dev->gadget.dev.driver = NULL; 1273 - return retval; 1274 - } 1275 - 1276 1269 /* ... then enable host detection and ep0; and we're ready 1277 1270 * for set_configuration as well as eventual disconnect. 1278 1271 */ ··· 1303 1310 } 1304 1311 del_timer_sync(&dev->timer); 1305 1312 1313 + /* report disconnect; the driver is already quiesced */ 1314 + if (driver) 1315 + driver->disconnect(&dev->gadget); 1316 + 1306 1317 /* re-init driver-visible data structures */ 1307 1318 udc_reinit(dev); 1308 1319 } ··· 1328 1331 dev->gadget.dev.driver = NULL; 1329 1332 dev->driver = NULL; 1330 1333 1331 - device_del (&dev->gadget.dev); 1332 1334 dump_state(dev); 1333 1335 1334 1336 return 0; ··· 2142 2146 dev->gadget.dev.parent = &pdev->dev; 2143 2147 dev->gadget.dev.dma_mask = pdev->dev.dma_mask; 2144 2148 2149 + retval = device_add(&dev->gadget.dev); 2150 + if (retval) { 2151 + dev->driver = NULL; 2152 + dev->gadget.dev.driver = NULL; 2153 + goto err_device_add; 2154 + } 2155 + 2145 2156 the_controller = dev; 2146 2157 platform_set_drvdata(pdev, dev); 2147 2158 ··· 2199 2196 free_irq(irq, dev); 2200 2197 #endif 2201 2198 err_irq1: 2199 + device_unregister(&dev->gadget.dev); 2200 + err_device_add: 2202 2201 if (gpio_is_valid(dev->mach->gpio_pullup)) 2203 2202 gpio_free(dev->mach->gpio_pullup); 2204 2203 err_gpio_pullup: ··· 2222 2217 { 2223 2218 struct pxa25x_udc *dev = platform_get_drvdata(pdev); 2224 2219 2225 - usb_del_gadget_udc(&dev->gadget); 2226 2220 if (dev->driver) 2227 2221 return -EBUSY; 2228 2222 2223 + usb_del_gadget_udc(&dev->gadget); 2224 + device_unregister(&dev->gadget.dev); 2229 2225 dev->pullup = 0; 2230 2226 pullup(dev); 2231 2227
+12 -6
drivers/usb/gadget/pxa27x_udc.c
··· 1814 1814 udc->gadget.dev.driver = &driver->driver; 1815 1815 dplus_pullup(udc, 1); 1816 1816 1817 - retval = device_add(&udc->gadget.dev); 1818 - if (retval) { 1819 - dev_err(udc->dev, "device_add error %d\n", retval); 1820 - goto fail; 1821 - } 1822 1817 if (!IS_ERR_OR_NULL(udc->transceiver)) { 1823 1818 retval = otg_set_peripheral(udc->transceiver->otg, 1824 1819 &udc->gadget); ··· 1871 1876 1872 1877 udc->driver = NULL; 1873 1878 1874 - device_del(&udc->gadget.dev); 1875 1879 1876 1880 if (!IS_ERR_OR_NULL(udc->transceiver)) 1877 1881 return otg_set_peripheral(udc->transceiver->otg, NULL); ··· 2474 2480 driver_name, udc->irq, retval); 2475 2481 goto err_irq; 2476 2482 } 2483 + 2484 + retval = device_add(&udc->gadget.dev); 2485 + if (retval) { 2486 + dev_err(udc->dev, "device_add error %d\n", retval); 2487 + goto err_dev_add; 2488 + } 2489 + 2477 2490 retval = usb_add_gadget_udc(&pdev->dev, &udc->gadget); 2478 2491 if (retval) 2479 2492 goto err_add_udc; 2480 2493 2481 2494 pxa_init_debugfs(udc); 2495 + 2482 2496 return 0; 2497 + 2483 2498 err_add_udc: 2499 + device_unregister(&udc->gadget.dev); 2500 + err_dev_add: 2484 2501 free_irq(udc->irq, udc); 2485 2502 err_irq: 2486 2503 iounmap(udc->regs); ··· 2512 2507 int gpio = udc->mach->gpio_pullup; 2513 2508 2514 2509 usb_del_gadget_udc(&udc->gadget); 2510 + device_del(&udc->gadget.dev); 2515 2511 usb_gadget_unregister_driver(udc->driver); 2516 2512 free_irq(udc->irq, udc); 2517 2513 pxa_cleanup_debugfs(udc);
+12 -16
drivers/usb/gadget/s3c2410_udc.c
··· 1668 1668 static int s3c2410_udc_start(struct usb_gadget *g, 1669 1669 struct usb_gadget_driver *driver) 1670 1670 { 1671 - struct s3c2410_udc *udc = to_s3c2410(g) 1672 - int retval; 1671 + struct s3c2410_udc *udc = to_s3c2410(g); 1673 1672 1674 1673 dprintk(DEBUG_NORMAL, "%s() '%s'\n", __func__, driver->driver.name); 1675 1674 ··· 1676 1677 udc->driver = driver; 1677 1678 udc->gadget.dev.driver = &driver->driver; 1678 1679 1679 - /* Bind the driver */ 1680 - retval = device_add(&udc->gadget.dev); 1681 - if (retval) { 1682 - dev_err(&udc->gadget.dev, "Error in device_add() : %d\n", retval); 1683 - goto register_error; 1684 - } 1685 - 1686 1680 /* Enable udc */ 1687 1681 s3c2410_udc_enable(udc); 1688 1682 1689 1683 return 0; 1690 - 1691 - register_error: 1692 - udc->driver = NULL; 1693 - udc->gadget.dev.driver = NULL; 1694 - return retval; 1695 1684 } 1696 1685 1697 1686 static int s3c2410_udc_stop(struct usb_gadget *g, ··· 1687 1700 { 1688 1701 struct s3c2410_udc *udc = to_s3c2410(g); 1689 1702 1690 - device_del(&udc->gadget.dev); 1691 1703 udc->driver = NULL; 1692 1704 1693 1705 /* Disable udc */ ··· 1828 1842 udc->gadget.dev.parent = &pdev->dev; 1829 1843 udc->gadget.dev.dma_mask = pdev->dev.dma_mask; 1830 1844 1845 + /* Bind the driver */ 1846 + retval = device_add(&udc->gadget.dev); 1847 + if (retval) { 1848 + dev_err(&udc->gadget.dev, "Error in device_add() : %d\n", retval); 1849 + goto err_device_add; 1850 + } 1851 + 1831 1852 the_controller = udc; 1832 1853 platform_set_drvdata(pdev, udc); 1833 1854 ··· 1923 1930 err_int: 1924 1931 free_irq(IRQ_USBD, udc); 1925 1932 err_map: 1933 + device_unregister(&udc->gadget.dev); 1934 + err_device_add: 1926 1935 iounmap(base_addr); 1927 1936 err_mem: 1928 1937 release_mem_region(rsrc_start, rsrc_len); ··· 1942 1947 1943 1948 dev_dbg(&pdev->dev, "%s()\n", __func__); 1944 1949 1945 - usb_del_gadget_udc(&udc->gadget); 1946 1950 if (udc->driver) 1947 1951 return -EBUSY; 1948 1952 1953 + usb_del_gadget_udc(&udc->gadget); 1954 + device_unregister(&udc->gadget.dev); 1949 1955 debugfs_remove(udc->regs_info); 1950 1956 1951 1957 if (udc_info && !udc_info->udc_command &&
+3
drivers/usb/gadget/u_uac1.c
··· 240 240 snd = &card->playback; 241 241 snd->filp = filp_open(fn_play, O_WRONLY, 0); 242 242 if (IS_ERR(snd->filp)) { 243 + int ret = PTR_ERR(snd->filp); 244 + 243 245 ERROR(card, "No such PCM playback device: %s\n", fn_play); 244 246 snd->filp = NULL; 247 + return ret; 245 248 } 246 249 pcm_file = snd->filp->private_data; 247 250 snd->substream = pcm_file->substream;
+2 -4
drivers/usb/host/ehci-hcd.c
··· 748 748 /* guard against (alleged) silicon errata */ 749 749 if (cmd & CMD_IAAD) 750 750 ehci_dbg(ehci, "IAA with IAAD still set?\n"); 751 - if (ehci->async_iaa) { 751 + if (ehci->async_iaa) 752 752 COUNT(ehci->stats.iaa); 753 - end_unlink_async(ehci); 754 - } else 755 - ehci_dbg(ehci, "IAA with nothing unlinked?\n"); 753 + end_unlink_async(ehci); 756 754 } 757 755 758 756 /* remote wakeup [4.3.1] */
+27 -9
drivers/usb/host/ehci-q.c
··· 135 135 * qtd is updated in qh_completions(). Update the QH 136 136 * overlay here. 137 137 */ 138 - if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current) { 138 + if (qh->hw->hw_token & ACTIVE_BIT(ehci)) { 139 139 qh->hw->hw_qtd_next = qtd->hw_next; 140 140 qtd = NULL; 141 141 } ··· 449 449 else if (last_status == -EINPROGRESS && !urb->unlinked) 450 450 continue; 451 451 452 - /* qh unlinked; token in overlay may be most current */ 453 - if (state == QH_STATE_IDLE 454 - && cpu_to_hc32(ehci, qtd->qtd_dma) 455 - == hw->hw_current) { 452 + /* 453 + * If this was the active qtd when the qh was unlinked 454 + * and the overlay's token is active, then the overlay 455 + * hasn't been written back to the qtd yet so use its 456 + * token instead of the qtd's. After the qtd is 457 + * processed and removed, the overlay won't be valid 458 + * any more. 459 + */ 460 + if (state == QH_STATE_IDLE && 461 + qh->qtd_list.next == &qtd->qtd_list && 462 + (hw->hw_token & ACTIVE_BIT(ehci))) { 456 463 token = hc32_to_cpu(ehci, hw->hw_token); 464 + hw->hw_token &= ~ACTIVE_BIT(ehci); 457 465 458 466 /* An unlink may leave an incomplete 459 467 * async transaction in the TT buffer. ··· 1178 1170 struct ehci_qh *prev; 1179 1171 1180 1172 /* Add to the end of the list of QHs waiting for the next IAAD */ 1181 - qh->qh_state = QH_STATE_UNLINK; 1173 + qh->qh_state = QH_STATE_UNLINK_WAIT; 1182 1174 if (ehci->async_unlink) 1183 1175 ehci->async_unlink_last->unlink_next = qh; 1184 1176 else ··· 1221 1213 1222 1214 /* Do only the first waiting QH (nVidia bug?) */ 1223 1215 qh = ehci->async_unlink; 1224 - ehci->async_iaa = qh; 1225 - ehci->async_unlink = qh->unlink_next; 1226 - qh->unlink_next = NULL; 1216 + 1217 + /* 1218 + * Intel (?) bug: The HC can write back the overlay region 1219 + * even after the IAA interrupt occurs. In self-defense, 1220 + * always go through two IAA cycles for each QH. 1221 + */ 1222 + if (qh->qh_state == QH_STATE_UNLINK_WAIT) { 1223 + qh->qh_state = QH_STATE_UNLINK; 1224 + } else { 1225 + ehci->async_iaa = qh; 1226 + ehci->async_unlink = qh->unlink_next; 1227 + qh->unlink_next = NULL; 1228 + } 1227 1229 1228 1230 /* Make sure the unlinks are all visible to the hardware */ 1229 1231 wmb();
-5
drivers/usb/musb/Kconfig
··· 7 7 config USB_MUSB_HDRC 8 8 tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)' 9 9 depends on USB && USB_GADGET 10 - select NOP_USB_XCEIV if (ARCH_DAVINCI || MACH_OMAP3EVM || BLACKFIN) 11 - select NOP_USB_XCEIV if (SOC_TI81XX || SOC_AM33XX) 12 - select TWL4030_USB if MACH_OMAP_3430SDP 13 - select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA 14 - select OMAP_CONTROL_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA 15 10 select USB_OTG_UTILS 16 11 help 17 12 Say Y here if your system has a dual role high speed USB
-6
drivers/usb/musb/musb_core.c
··· 1624 1624 1625 1625 /*-------------------------------------------------------------------------*/ 1626 1626 1627 - #ifdef CONFIG_SYSFS 1628 - 1629 1627 static ssize_t 1630 1628 musb_mode_show(struct device *dev, struct device_attribute *attr, char *buf) 1631 1629 { ··· 1739 1741 static const struct attribute_group musb_attr_group = { 1740 1742 .attrs = musb_attributes, 1741 1743 }; 1742 - 1743 - #endif /* sysfs */ 1744 1744 1745 1745 /* Only used to provide driver mode change events */ 1746 1746 static void musb_irq_work(struct work_struct *data) ··· 1964 1968 if (status < 0) 1965 1969 goto fail4; 1966 1970 1967 - #ifdef CONFIG_SYSFS 1968 1971 status = sysfs_create_group(&musb->controller->kobj, &musb_attr_group); 1969 1972 if (status) 1970 1973 goto fail5; 1971 - #endif 1972 1974 1973 1975 pm_runtime_put(musb->controller); 1974 1976
+8 -4
drivers/usb/musb/omap2430.c
··· 51 51 }; 52 52 #define glue_to_musb(g) platform_get_drvdata(g->musb) 53 53 54 - struct omap2430_glue *_glue; 54 + static struct omap2430_glue *_glue; 55 55 56 56 static struct timer_list musb_idle_timer; 57 57 ··· 237 237 { 238 238 struct omap2430_glue *glue = _glue; 239 239 240 - if (glue && glue_to_musb(glue)) { 241 - glue->status = status; 242 - } else { 240 + if (!glue) { 241 + pr_err("%s: musb core is not yet initialized\n", __func__); 242 + return; 243 + } 244 + glue->status = status; 245 + 246 + if (!glue_to_musb(glue)) { 243 247 pr_err("%s: musb core is not yet ready\n", __func__); 244 248 return; 245 249 }
+7 -3
drivers/usb/otg/otg.c
··· 130 130 spin_lock_irqsave(&phy_lock, flags); 131 131 132 132 phy = __usb_find_phy(&phy_list, type); 133 - if (IS_ERR(phy)) { 133 + if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) { 134 134 pr_err("unable to find transceiver of type %s\n", 135 135 usb_phy_type_string(type)); 136 136 goto err0; ··· 228 228 spin_lock_irqsave(&phy_lock, flags); 229 229 230 230 phy = __usb_find_phy_dev(dev, &phy_bind_list, index); 231 - if (IS_ERR(phy)) { 231 + if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) { 232 232 pr_err("unable to find transceiver\n"); 233 233 goto err0; 234 234 } ··· 301 301 */ 302 302 void usb_put_phy(struct usb_phy *x) 303 303 { 304 - if (x) 304 + if (x) { 305 + struct module *owner = x->dev->driver->owner; 306 + 305 307 put_device(x->dev); 308 + module_put(owner); 309 + } 306 310 } 307 311 EXPORT_SYMBOL(usb_put_phy); 308 312
+9 -15
drivers/usb/phy/omap-control-usb.c
··· 219 219 220 220 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 221 221 "control_dev_conf"); 222 - control_usb->dev_conf = devm_request_and_ioremap(&pdev->dev, res); 223 - if (!control_usb->dev_conf) { 224 - dev_err(&pdev->dev, "Failed to obtain io memory\n"); 225 - return -EADDRNOTAVAIL; 226 - } 222 + control_usb->dev_conf = devm_ioremap_resource(&pdev->dev, res); 223 + if (IS_ERR(control_usb->dev_conf)) 224 + return PTR_ERR(control_usb->dev_conf); 227 225 228 226 if (control_usb->type == OMAP_CTRL_DEV_TYPE1) { 229 227 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 230 228 "otghs_control"); 231 - control_usb->otghs_control = devm_request_and_ioremap( 229 + control_usb->otghs_control = devm_ioremap_resource( 232 230 &pdev->dev, res); 233 - if (!control_usb->otghs_control) { 234 - dev_err(&pdev->dev, "Failed to obtain io memory\n"); 235 - return -EADDRNOTAVAIL; 236 - } 231 + if (IS_ERR(control_usb->otghs_control)) 232 + return PTR_ERR(control_usb->otghs_control); 237 233 } 238 234 239 235 if (control_usb->type == OMAP_CTRL_DEV_TYPE2) { 240 236 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 241 237 "phy_power_usb"); 242 - control_usb->phy_power = devm_request_and_ioremap( 238 + control_usb->phy_power = devm_ioremap_resource( 243 239 &pdev->dev, res); 244 - if (!control_usb->phy_power) { 245 - dev_dbg(&pdev->dev, "Failed to obtain io memory\n"); 246 - return -EADDRNOTAVAIL; 247 - } 240 + if (IS_ERR(control_usb->phy_power)) 241 + return PTR_ERR(control_usb->phy_power); 248 242 249 243 control_usb->sys_clk = devm_clk_get(control_usb->dev, 250 244 "sys_clkin");
+3 -5
drivers/usb/phy/omap-usb3.c
··· 212 212 } 213 213 214 214 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll_ctrl"); 215 - phy->pll_ctrl_base = devm_request_and_ioremap(&pdev->dev, res); 216 - if (!phy->pll_ctrl_base) { 217 - dev_err(&pdev->dev, "ioremap of pll_ctrl failed\n"); 218 - return -ENOMEM; 219 - } 215 + phy->pll_ctrl_base = devm_ioremap_resource(&pdev->dev, res); 216 + if (IS_ERR(phy->pll_ctrl_base)) 217 + return PTR_ERR(phy->pll_ctrl_base); 220 218 221 219 phy->dev = &pdev->dev; 222 220
+3 -5
drivers/usb/phy/samsung-usbphy.c
··· 787 787 return -ENODEV; 788 788 } 789 789 790 - phy_base = devm_request_and_ioremap(dev, phy_mem); 791 - if (!phy_base) { 792 - dev_err(dev, "%s: register mapping failed\n", __func__); 793 - return -ENXIO; 794 - } 790 + phy_base = devm_ioremap_resource(dev, phy_mem); 791 + if (IS_ERR(phy_base)) 792 + return PTR_ERR(phy_base); 795 793 796 794 sphy = devm_kzalloc(dev, sizeof(*sphy), GFP_KERNEL); 797 795 if (!sphy)
+20
drivers/usb/serial/cp210x.c
··· 85 85 { USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */ 86 86 { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ 87 87 { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ 88 + { USB_DEVICE(0x2405, 0x0003) }, /* West Mountain Radio RIGblaster Advantage */ 88 89 { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */ 89 90 { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ 90 91 { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */ ··· 151 150 { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */ 152 151 { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */ 153 152 { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */ 153 + { USB_DEVICE(0x1FB9, 0x0100) }, /* Lake Shore Model 121 Current Source */ 154 + { USB_DEVICE(0x1FB9, 0x0200) }, /* Lake Shore Model 218A Temperature Monitor */ 155 + { USB_DEVICE(0x1FB9, 0x0201) }, /* Lake Shore Model 219 Temperature Monitor */ 156 + { USB_DEVICE(0x1FB9, 0x0202) }, /* Lake Shore Model 233 Temperature Transmitter */ 157 + { USB_DEVICE(0x1FB9, 0x0203) }, /* Lake Shore Model 235 Temperature Transmitter */ 158 + { USB_DEVICE(0x1FB9, 0x0300) }, /* Lake Shore Model 335 Temperature Controller */ 159 + { USB_DEVICE(0x1FB9, 0x0301) }, /* Lake Shore Model 336 Temperature Controller */ 160 + { USB_DEVICE(0x1FB9, 0x0302) }, /* Lake Shore Model 350 Temperature Controller */ 161 + { USB_DEVICE(0x1FB9, 0x0303) }, /* Lake Shore Model 371 AC Bridge */ 162 + { USB_DEVICE(0x1FB9, 0x0400) }, /* Lake Shore Model 411 Handheld Gaussmeter */ 163 + { USB_DEVICE(0x1FB9, 0x0401) }, /* Lake Shore Model 425 Gaussmeter */ 164 + { USB_DEVICE(0x1FB9, 0x0402) }, /* Lake Shore Model 455A Gaussmeter */ 165 + { USB_DEVICE(0x1FB9, 0x0403) }, /* Lake Shore Model 475A Gaussmeter */ 166 + { USB_DEVICE(0x1FB9, 0x0404) }, /* Lake Shore Model 465 Three Axis Gaussmeter */ 167 + { USB_DEVICE(0x1FB9, 0x0600) }, /* Lake Shore Model 625A Superconducting MPS */ 168 + { USB_DEVICE(0x1FB9, 0x0601) }, /* Lake Shore Model 642A Magnet Power Supply */ 169 + { USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */ 170 + { USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */ 171 + { USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */ 154 172 { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */ 155 173 { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */ 156 174 { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
+5
drivers/usb/serial/option.c
··· 341 341 #define CINTERION_PRODUCT_EU3_E 0x0051 342 342 #define CINTERION_PRODUCT_EU3_P 0x0052 343 343 #define CINTERION_PRODUCT_PH8 0x0053 344 + #define CINTERION_PRODUCT_AH6 0x0055 345 + #define CINTERION_PRODUCT_PLS8 0x0060 344 346 345 347 /* Olivetti products */ 346 348 #define OLIVETTI_VENDOR_ID 0x0b3c ··· 581 579 { USB_DEVICE(QUANTA_VENDOR_ID, 0xea42), 582 580 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 583 581 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c05, USB_CLASS_COMM, 0x02, 0xff) }, 582 + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c1f, USB_CLASS_COMM, 0x02, 0xff) }, 584 583 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) }, 585 584 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff), 586 585 .driver_info = (kernel_ulong_t) &net_intf1_blacklist }, ··· 1263 1260 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) }, 1264 1261 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) }, 1265 1262 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) }, 1263 + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AH6) }, 1264 + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLS8) }, 1266 1265 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, 1267 1266 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, 1268 1267 { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
+1
drivers/usb/serial/qcaux.c
··· 69 69 { USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xfd, 0xff) }, /* NMEA */ 70 70 { USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xfe, 0xff) }, /* WMC */ 71 71 { USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xff, 0xff) }, /* DIAG */ 72 + { USB_DEVICE_AND_INTERFACE_INFO(0x1fac, 0x0151, 0xff, 0xff, 0xff) }, 72 73 { }, 73 74 }; 74 75 MODULE_DEVICE_TABLE(usb, id_table);
+5 -2
drivers/usb/serial/qcserial.c
··· 197 197 198 198 if (is_gobi1k) { 199 199 /* Gobi 1K USB layout: 200 - * 0: serial port (doesn't respond) 200 + * 0: DM/DIAG (use libqcdm from ModemManager for communication) 201 201 * 1: serial port (doesn't respond) 202 202 * 2: AT-capable modem port 203 203 * 3: QMI/net 204 204 */ 205 - if (ifnum == 2) 205 + if (ifnum == 0) { 206 + dev_dbg(dev, "Gobi 1K DM/DIAG interface found\n"); 207 + altsetting = 1; 208 + } else if (ifnum == 2) 206 209 dev_dbg(dev, "Modem port found\n"); 207 210 else 208 211 altsetting = -1;
+5 -2
drivers/usb/serial/quatech2.c
··· 661 661 __func__); 662 662 break; 663 663 } 664 - tty_flip_buffer_push(&port->port); 664 + 665 + if (port_priv->is_open) 666 + tty_flip_buffer_push(&port->port); 665 667 666 668 newport = *(ch + 3); 667 669 ··· 706 704 tty_insert_flip_string(&port->port, ch, 1); 707 705 } 708 706 709 - tty_flip_buffer_push(&port->port); 707 + if (port_priv->is_open) 708 + tty_flip_buffer_push(&port->port); 710 709 } 711 710 712 711 static void qt2_write_bulk_callback(struct urb *urb)
+2 -74
drivers/usb/storage/initializers.c
··· 92 92 return 0; 93 93 } 94 94 95 - /* This places the HUAWEI usb dongles in multi-port mode */ 96 - static int usb_stor_huawei_feature_init(struct us_data *us) 95 + /* This places the HUAWEI E220 devices in multi-port mode */ 96 + int usb_stor_huawei_e220_init(struct us_data *us) 97 97 { 98 98 int result; 99 99 ··· 103 103 0x01, 0x0, NULL, 0x0, 1000); 104 104 US_DEBUGP("Huawei mode set result is %d\n", result); 105 105 return 0; 106 - } 107 - 108 - /* 109 - * It will send a scsi switch command called rewind' to huawei dongle. 110 - * When the dongle receives this command at the first time, 111 - * it will reboot immediately. After rebooted, it will ignore this command. 112 - * So it is unnecessary to read its response. 113 - */ 114 - static int usb_stor_huawei_scsi_init(struct us_data *us) 115 - { 116 - int result = 0; 117 - int act_len = 0; 118 - struct bulk_cb_wrap *bcbw = (struct bulk_cb_wrap *) us->iobuf; 119 - char rewind_cmd[] = {0x11, 0x06, 0x20, 0x00, 0x00, 0x01, 0x01, 0x00, 120 - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 121 - 122 - bcbw->Signature = cpu_to_le32(US_BULK_CB_SIGN); 123 - bcbw->Tag = 0; 124 - bcbw->DataTransferLength = 0; 125 - bcbw->Flags = bcbw->Lun = 0; 126 - bcbw->Length = sizeof(rewind_cmd); 127 - memset(bcbw->CDB, 0, sizeof(bcbw->CDB)); 128 - memcpy(bcbw->CDB, rewind_cmd, sizeof(rewind_cmd)); 129 - 130 - result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcbw, 131 - US_BULK_CB_WRAP_LEN, &act_len); 132 - US_DEBUGP("transfer actual length=%d, result=%d\n", act_len, result); 133 - return result; 134 - } 135 - 136 - /* 137 - * It tries to find the supported Huawei USB dongles. 138 - * In Huawei, they assign the following product IDs 139 - * for all of their mobile broadband dongles, 140 - * including the new dongles in the future. 141 - * So if the product ID is not included in this list, 142 - * it means it is not Huawei's mobile broadband dongles. 143 - */ 144 - static int usb_stor_huawei_dongles_pid(struct us_data *us) 145 - { 146 - struct usb_interface_descriptor *idesc; 147 - int idProduct; 148 - 149 - idesc = &us->pusb_intf->cur_altsetting->desc; 150 - idProduct = le16_to_cpu(us->pusb_dev->descriptor.idProduct); 151 - /* The first port is CDROM, 152 - * means the dongle in the single port mode, 153 - * and a switch command is required to be sent. */ 154 - if (idesc && idesc->bInterfaceNumber == 0) { 155 - if ((idProduct == 0x1001) 156 - || (idProduct == 0x1003) 157 - || (idProduct == 0x1004) 158 - || (idProduct >= 0x1401 && idProduct <= 0x1500) 159 - || (idProduct >= 0x1505 && idProduct <= 0x1600) 160 - || (idProduct >= 0x1c02 && idProduct <= 0x2202)) { 161 - return 1; 162 - } 163 - } 164 - return 0; 165 - } 166 - 167 - int usb_stor_huawei_init(struct us_data *us) 168 - { 169 - int result = 0; 170 - 171 - if (usb_stor_huawei_dongles_pid(us)) { 172 - if (le16_to_cpu(us->pusb_dev->descriptor.idProduct) >= 0x1446) 173 - result = usb_stor_huawei_scsi_init(us); 174 - else 175 - result = usb_stor_huawei_feature_init(us); 176 - } 177 - return result; 178 106 }
+2 -2
drivers/usb/storage/initializers.h
··· 46 46 * flash reader */ 47 47 int usb_stor_ucr61s2b_init(struct us_data *us); 48 48 49 - /* This places the HUAWEI usb dongles in multi-port mode */ 50 - int usb_stor_huawei_init(struct us_data *us); 49 + /* This places the HUAWEI E220 devices in multi-port mode */ 50 + int usb_stor_huawei_e220_init(struct us_data *us);
+335 -2
drivers/usb/storage/unusual_devs.h
··· 53 53 * as opposed to devices that do something strangely or wrongly. 54 54 */ 55 55 56 + /* In-kernel mode switching is deprecated. Do not add new devices to 57 + * this list for the sole purpose of switching them to a different 58 + * mode. Existing userspace solutions are superior. 59 + * 60 + * New mode switching devices should instead be added to the database 61 + * maintained at http://www.draisberghof.de/usb_modeswitch/ 62 + */ 63 + 56 64 #if !defined(CONFIG_USB_STORAGE_SDDR09) && \ 57 65 !defined(CONFIG_USB_STORAGE_SDDR09_MODULE) 58 66 #define NO_SDDR09 ··· 1535 1527 /* Reported by fangxiaozhi <huananhu@huawei.com> 1536 1528 * This brings the HUAWEI data card devices into multi-port mode 1537 1529 */ 1538 - UNUSUAL_VENDOR_INTF(0x12d1, 0x08, 0x06, 0x50, 1530 + UNUSUAL_DEV( 0x12d1, 0x1001, 0x0000, 0x0000, 1539 1531 "HUAWEI MOBILE", 1540 1532 "Mass Storage", 1541 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_init, 1533 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1534 + 0), 1535 + UNUSUAL_DEV( 0x12d1, 0x1003, 0x0000, 0x0000, 1536 + "HUAWEI MOBILE", 1537 + "Mass Storage", 1538 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1539 + 0), 1540 + UNUSUAL_DEV( 0x12d1, 0x1004, 0x0000, 0x0000, 1541 + "HUAWEI MOBILE", 1542 + "Mass Storage", 1543 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1544 + 0), 1545 + UNUSUAL_DEV( 0x12d1, 0x1401, 0x0000, 0x0000, 1546 + "HUAWEI MOBILE", 1547 + "Mass Storage", 1548 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1549 + 0), 1550 + UNUSUAL_DEV( 0x12d1, 0x1402, 0x0000, 0x0000, 1551 + "HUAWEI MOBILE", 1552 + "Mass Storage", 1553 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1554 + 0), 1555 + UNUSUAL_DEV( 0x12d1, 0x1403, 0x0000, 0x0000, 1556 + "HUAWEI MOBILE", 1557 + "Mass Storage", 1558 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1559 + 0), 1560 + UNUSUAL_DEV( 0x12d1, 0x1404, 0x0000, 0x0000, 1561 + "HUAWEI MOBILE", 1562 + "Mass Storage", 1563 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1564 + 0), 1565 + UNUSUAL_DEV( 0x12d1, 0x1405, 0x0000, 0x0000, 1566 + "HUAWEI MOBILE", 1567 + "Mass Storage", 1568 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1569 + 0), 1570 + UNUSUAL_DEV( 0x12d1, 0x1406, 0x0000, 0x0000, 1571 + "HUAWEI MOBILE", 1572 + "Mass Storage", 1573 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1574 + 0), 1575 + UNUSUAL_DEV( 0x12d1, 0x1407, 0x0000, 0x0000, 1576 + "HUAWEI MOBILE", 1577 + "Mass Storage", 1578 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1579 + 0), 1580 + UNUSUAL_DEV( 0x12d1, 0x1408, 0x0000, 0x0000, 1581 + "HUAWEI MOBILE", 1582 + "Mass Storage", 1583 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1584 + 0), 1585 + UNUSUAL_DEV( 0x12d1, 0x1409, 0x0000, 0x0000, 1586 + "HUAWEI MOBILE", 1587 + "Mass Storage", 1588 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1589 + 0), 1590 + UNUSUAL_DEV( 0x12d1, 0x140A, 0x0000, 0x0000, 1591 + "HUAWEI MOBILE", 1592 + "Mass Storage", 1593 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1594 + 0), 1595 + UNUSUAL_DEV( 0x12d1, 0x140B, 0x0000, 0x0000, 1596 + "HUAWEI MOBILE", 1597 + "Mass Storage", 1598 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1599 + 0), 1600 + UNUSUAL_DEV( 0x12d1, 0x140C, 0x0000, 0x0000, 1601 + "HUAWEI MOBILE", 1602 + "Mass Storage", 1603 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1604 + 0), 1605 + UNUSUAL_DEV( 0x12d1, 0x140D, 0x0000, 0x0000, 1606 + "HUAWEI MOBILE", 1607 + "Mass Storage", 1608 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1609 + 0), 1610 + UNUSUAL_DEV( 0x12d1, 0x140E, 0x0000, 0x0000, 1611 + "HUAWEI MOBILE", 1612 + "Mass Storage", 1613 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1614 + 0), 1615 + UNUSUAL_DEV( 0x12d1, 0x140F, 0x0000, 0x0000, 1616 + "HUAWEI MOBILE", 1617 + "Mass Storage", 1618 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1619 + 0), 1620 + UNUSUAL_DEV( 0x12d1, 0x1410, 0x0000, 0x0000, 1621 + "HUAWEI MOBILE", 1622 + "Mass Storage", 1623 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1624 + 0), 1625 + UNUSUAL_DEV( 0x12d1, 0x1411, 0x0000, 0x0000, 1626 + "HUAWEI MOBILE", 1627 + "Mass Storage", 1628 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1629 + 0), 1630 + UNUSUAL_DEV( 0x12d1, 0x1412, 0x0000, 0x0000, 1631 + "HUAWEI MOBILE", 1632 + "Mass Storage", 1633 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1634 + 0), 1635 + UNUSUAL_DEV( 0x12d1, 0x1413, 0x0000, 0x0000, 1636 + "HUAWEI MOBILE", 1637 + "Mass Storage", 1638 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1639 + 0), 1640 + UNUSUAL_DEV( 0x12d1, 0x1414, 0x0000, 0x0000, 1641 + "HUAWEI MOBILE", 1642 + "Mass Storage", 1643 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1644 + 0), 1645 + UNUSUAL_DEV( 0x12d1, 0x1415, 0x0000, 0x0000, 1646 + "HUAWEI MOBILE", 1647 + "Mass Storage", 1648 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1649 + 0), 1650 + UNUSUAL_DEV( 0x12d1, 0x1416, 0x0000, 0x0000, 1651 + "HUAWEI MOBILE", 1652 + "Mass Storage", 1653 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1654 + 0), 1655 + UNUSUAL_DEV( 0x12d1, 0x1417, 0x0000, 0x0000, 1656 + "HUAWEI MOBILE", 1657 + "Mass Storage", 1658 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1659 + 0), 1660 + UNUSUAL_DEV( 0x12d1, 0x1418, 0x0000, 0x0000, 1661 + "HUAWEI MOBILE", 1662 + "Mass Storage", 1663 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1664 + 0), 1665 + UNUSUAL_DEV( 0x12d1, 0x1419, 0x0000, 0x0000, 1666 + "HUAWEI MOBILE", 1667 + "Mass Storage", 1668 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1669 + 0), 1670 + UNUSUAL_DEV( 0x12d1, 0x141A, 0x0000, 0x0000, 1671 + "HUAWEI MOBILE", 1672 + "Mass Storage", 1673 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1674 + 0), 1675 + UNUSUAL_DEV( 0x12d1, 0x141B, 0x0000, 0x0000, 1676 + "HUAWEI MOBILE", 1677 + "Mass Storage", 1678 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1679 + 0), 1680 + UNUSUAL_DEV( 0x12d1, 0x141C, 0x0000, 0x0000, 1681 + "HUAWEI MOBILE", 1682 + "Mass Storage", 1683 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1684 + 0), 1685 + UNUSUAL_DEV( 0x12d1, 0x141D, 0x0000, 0x0000, 1686 + "HUAWEI MOBILE", 1687 + "Mass Storage", 1688 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1689 + 0), 1690 + UNUSUAL_DEV( 0x12d1, 0x141E, 0x0000, 0x0000, 1691 + "HUAWEI MOBILE", 1692 + "Mass Storage", 1693 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1694 + 0), 1695 + UNUSUAL_DEV( 0x12d1, 0x141F, 0x0000, 0x0000, 1696 + "HUAWEI MOBILE", 1697 + "Mass Storage", 1698 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1699 + 0), 1700 + UNUSUAL_DEV( 0x12d1, 0x1420, 0x0000, 0x0000, 1701 + "HUAWEI MOBILE", 1702 + "Mass Storage", 1703 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1704 + 0), 1705 + UNUSUAL_DEV( 0x12d1, 0x1421, 0x0000, 0x0000, 1706 + "HUAWEI MOBILE", 1707 + "Mass Storage", 1708 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1709 + 0), 1710 + UNUSUAL_DEV( 0x12d1, 0x1422, 0x0000, 0x0000, 1711 + "HUAWEI MOBILE", 1712 + "Mass Storage", 1713 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1714 + 0), 1715 + UNUSUAL_DEV( 0x12d1, 0x1423, 0x0000, 0x0000, 1716 + "HUAWEI MOBILE", 1717 + "Mass Storage", 1718 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1719 + 0), 1720 + UNUSUAL_DEV( 0x12d1, 0x1424, 0x0000, 0x0000, 1721 + "HUAWEI MOBILE", 1722 + "Mass Storage", 1723 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1724 + 0), 1725 + UNUSUAL_DEV( 0x12d1, 0x1425, 0x0000, 0x0000, 1726 + "HUAWEI MOBILE", 1727 + "Mass Storage", 1728 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1729 + 0), 1730 + UNUSUAL_DEV( 0x12d1, 0x1426, 0x0000, 0x0000, 1731 + "HUAWEI MOBILE", 1732 + "Mass Storage", 1733 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1734 + 0), 1735 + UNUSUAL_DEV( 0x12d1, 0x1427, 0x0000, 0x0000, 1736 + "HUAWEI MOBILE", 1737 + "Mass Storage", 1738 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1739 + 0), 1740 + UNUSUAL_DEV( 0x12d1, 0x1428, 0x0000, 0x0000, 1741 + "HUAWEI MOBILE", 1742 + "Mass Storage", 1743 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1744 + 0), 1745 + UNUSUAL_DEV( 0x12d1, 0x1429, 0x0000, 0x0000, 1746 + "HUAWEI MOBILE", 1747 + "Mass Storage", 1748 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1749 + 0), 1750 + UNUSUAL_DEV( 0x12d1, 0x142A, 0x0000, 0x0000, 1751 + "HUAWEI MOBILE", 1752 + "Mass Storage", 1753 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1754 + 0), 1755 + UNUSUAL_DEV( 0x12d1, 0x142B, 0x0000, 0x0000, 1756 + "HUAWEI MOBILE", 1757 + "Mass Storage", 1758 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1759 + 0), 1760 + UNUSUAL_DEV( 0x12d1, 0x142C, 0x0000, 0x0000, 1761 + "HUAWEI MOBILE", 1762 + "Mass Storage", 1763 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1764 + 0), 1765 + UNUSUAL_DEV( 0x12d1, 0x142D, 0x0000, 0x0000, 1766 + "HUAWEI MOBILE", 1767 + "Mass Storage", 1768 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1769 + 0), 1770 + UNUSUAL_DEV( 0x12d1, 0x142E, 0x0000, 0x0000, 1771 + "HUAWEI MOBILE", 1772 + "Mass Storage", 1773 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1774 + 0), 1775 + UNUSUAL_DEV( 0x12d1, 0x142F, 0x0000, 0x0000, 1776 + "HUAWEI MOBILE", 1777 + "Mass Storage", 1778 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1779 + 0), 1780 + UNUSUAL_DEV( 0x12d1, 0x1430, 0x0000, 0x0000, 1781 + "HUAWEI MOBILE", 1782 + "Mass Storage", 1783 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1784 + 0), 1785 + UNUSUAL_DEV( 0x12d1, 0x1431, 0x0000, 0x0000, 1786 + "HUAWEI MOBILE", 1787 + "Mass Storage", 1788 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1789 + 0), 1790 + UNUSUAL_DEV( 0x12d1, 0x1432, 0x0000, 0x0000, 1791 + "HUAWEI MOBILE", 1792 + "Mass Storage", 1793 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1794 + 0), 1795 + UNUSUAL_DEV( 0x12d1, 0x1433, 0x0000, 0x0000, 1796 + "HUAWEI MOBILE", 1797 + "Mass Storage", 1798 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1799 + 0), 1800 + UNUSUAL_DEV( 0x12d1, 0x1434, 0x0000, 0x0000, 1801 + "HUAWEI MOBILE", 1802 + "Mass Storage", 1803 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1804 + 0), 1805 + UNUSUAL_DEV( 0x12d1, 0x1435, 0x0000, 0x0000, 1806 + "HUAWEI MOBILE", 1807 + "Mass Storage", 1808 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1809 + 0), 1810 + UNUSUAL_DEV( 0x12d1, 0x1436, 0x0000, 0x0000, 1811 + "HUAWEI MOBILE", 1812 + "Mass Storage", 1813 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1814 + 0), 1815 + UNUSUAL_DEV( 0x12d1, 0x1437, 0x0000, 0x0000, 1816 + "HUAWEI MOBILE", 1817 + "Mass Storage", 1818 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1819 + 0), 1820 + UNUSUAL_DEV( 0x12d1, 0x1438, 0x0000, 0x0000, 1821 + "HUAWEI MOBILE", 1822 + "Mass Storage", 1823 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1824 + 0), 1825 + UNUSUAL_DEV( 0x12d1, 0x1439, 0x0000, 0x0000, 1826 + "HUAWEI MOBILE", 1827 + "Mass Storage", 1828 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1829 + 0), 1830 + UNUSUAL_DEV( 0x12d1, 0x143A, 0x0000, 0x0000, 1831 + "HUAWEI MOBILE", 1832 + "Mass Storage", 1833 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1834 + 0), 1835 + UNUSUAL_DEV( 0x12d1, 0x143B, 0x0000, 0x0000, 1836 + "HUAWEI MOBILE", 1837 + "Mass Storage", 1838 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1839 + 0), 1840 + UNUSUAL_DEV( 0x12d1, 0x143C, 0x0000, 0x0000, 1841 + "HUAWEI MOBILE", 1842 + "Mass Storage", 1843 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1844 + 0), 1845 + UNUSUAL_DEV( 0x12d1, 0x143D, 0x0000, 0x0000, 1846 + "HUAWEI MOBILE", 1847 + "Mass Storage", 1848 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1849 + 0), 1850 + UNUSUAL_DEV( 0x12d1, 0x143E, 0x0000, 0x0000, 1851 + "HUAWEI MOBILE", 1852 + "Mass Storage", 1853 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1854 + 0), 1855 + UNUSUAL_DEV( 0x12d1, 0x143F, 0x0000, 0x0000, 1856 + "HUAWEI MOBILE", 1857 + "Mass Storage", 1858 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1542 1859 0), 1543 1860 1544 1861 /* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */
+2 -1
drivers/vhost/net.c
··· 339 339 msg.msg_controllen = 0; 340 340 ubufs = NULL; 341 341 } else { 342 - struct ubuf_info *ubuf = &vq->ubuf_info[head]; 342 + struct ubuf_info *ubuf; 343 + ubuf = vq->ubuf_info + vq->upend_idx; 343 344 344 345 vq->heads[vq->upend_idx].len = 345 346 VHOST_DMA_IN_PROGRESS;
+15 -7
drivers/video/atmel_lcdfb.c
··· 422 422 = var->bits_per_pixel; 423 423 break; 424 424 case 16: 425 + /* Older SOCs use IBGR:555 rather than BGR:565. */ 426 + if (sinfo->have_intensity_bit) 427 + var->green.length = 5; 428 + else 429 + var->green.length = 6; 430 + 425 431 if (sinfo->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) { 426 - /* RGB:565 mode */ 427 - var->red.offset = 11; 432 + /* RGB:5X5 mode */ 433 + var->red.offset = var->green.length + 5; 428 434 var->blue.offset = 0; 429 435 } else { 430 - /* BGR:565 mode */ 436 + /* BGR:5X5 mode */ 431 437 var->red.offset = 0; 432 - var->blue.offset = 11; 438 + var->blue.offset = var->green.length + 5; 433 439 } 434 440 var->green.offset = 5; 435 - var->green.length = 6; 436 441 var->red.length = var->blue.length = 5; 437 442 break; 438 443 case 32: ··· 684 679 685 680 case FB_VISUAL_PSEUDOCOLOR: 686 681 if (regno < 256) { 687 - if (cpu_is_at91sam9261() || cpu_is_at91sam9263() 688 - || cpu_is_at91sam9rl()) { 682 + if (sinfo->have_intensity_bit) { 689 683 /* old style I+BGR:555 */ 690 684 val = ((red >> 11) & 0x001f); 691 685 val |= ((green >> 6) & 0x03e0); ··· 874 870 } 875 871 sinfo->info = info; 876 872 sinfo->pdev = pdev; 873 + if (cpu_is_at91sam9261() || cpu_is_at91sam9263() || 874 + cpu_is_at91sam9rl()) { 875 + sinfo->have_intensity_bit = true; 876 + } 877 877 878 878 strcpy(info->fix.id, sinfo->pdev->name); 879 879 info->flags = ATMEL_LCDFB_FBINFO_DEFAULT;
+1
drivers/video/omap/lcd_ams_delta.c
··· 27 27 #include <linux/lcd.h> 28 28 #include <linux/gpio.h> 29 29 30 + #include <mach/hardware.h> 30 31 #include <mach/board-ams-delta.h> 31 32 32 33 #include "omapfb.h"
+3
drivers/video/omap/lcd_osk.c
··· 24 24 #include <linux/platform_device.h> 25 25 26 26 #include <asm/gpio.h> 27 + 28 + #include <mach/hardware.h> 27 29 #include <mach/mux.h> 30 + 28 31 #include "omapfb.h" 29 32 30 33 static int osk_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev)
+4 -2
drivers/w1/masters/w1-gpio.c
··· 47 47 return gpio_get_value(pdata->pin) ? 1 : 0; 48 48 } 49 49 50 + #if defined(CONFIG_OF) 50 51 static struct of_device_id w1_gpio_dt_ids[] = { 51 52 { .compatible = "w1-gpio" }, 52 53 {} 53 54 }; 54 55 MODULE_DEVICE_TABLE(of, w1_gpio_dt_ids); 56 + #endif 55 57 56 58 static int w1_gpio_probe_dt(struct platform_device *pdev) 57 59 { ··· 160 158 return err; 161 159 } 162 160 163 - static int __exit w1_gpio_remove(struct platform_device *pdev) 161 + static int w1_gpio_remove(struct platform_device *pdev) 164 162 { 165 163 struct w1_bus_master *master = platform_get_drvdata(pdev); 166 164 struct w1_gpio_platform_data *pdata = pdev->dev.platform_data; ··· 212 210 .of_match_table = of_match_ptr(w1_gpio_dt_ids), 213 211 }, 214 212 .probe = w1_gpio_probe, 215 - .remove = __exit_p(w1_gpio_remove), 213 + .remove = w1_gpio_remove, 216 214 .suspend = w1_gpio_suspend, 217 215 .resume = w1_gpio_resume, 218 216 };
+2 -1
drivers/w1/w1.c
··· 924 924 tmp64 = (triplet_ret >> 2); 925 925 rn |= (tmp64 << i); 926 926 927 - if (kthread_should_stop()) { 927 + /* ensure we're called from kthread and not by netlink callback */ 928 + if (!dev->priv && kthread_should_stop()) { 928 929 mutex_unlock(&dev->bus_mutex); 929 930 dev_dbg(&dev->dev, "Abort w1_search\n"); 930 931 return;
+4 -4
drivers/xen/xen-acpi-processor.c
··· 500 500 (void)acpi_processor_preregister_performance(acpi_perf_data); 501 501 502 502 for_each_possible_cpu(i) { 503 + struct acpi_processor *pr; 503 504 struct acpi_processor_performance *perf; 504 505 506 + pr = per_cpu(processors, i); 505 507 perf = per_cpu_ptr(acpi_perf_data, i); 506 - rc = acpi_processor_register_performance(perf, i); 508 + pr->performance = perf; 509 + rc = acpi_processor_get_performance_info(pr); 507 510 if (rc) 508 511 goto err_out; 509 512 } 510 - rc = acpi_processor_notify_smm(THIS_MODULE); 511 - if (rc) 512 - goto err_unregister; 513 513 514 514 for_each_possible_cpu(i) { 515 515 struct acpi_processor *_pr;
+2 -1
drivers/xen/xen-pciback/pciback_ops.c
··· 113 113 if (dev->msi_enabled) 114 114 pci_disable_msi(dev); 115 115 #endif 116 - pci_disable_device(dev); 116 + if (pci_is_enabled(dev)) 117 + pci_disable_device(dev); 117 118 118 119 pci_write_config_word(dev, PCI_COMMAND, 0); 119 120
-1
drivers/xen/xen-stub.c
··· 25 25 #include <linux/export.h> 26 26 #include <linux/types.h> 27 27 #include <linux/acpi.h> 28 - #include <acpi/acpi_drivers.h> 29 28 #include <xen/acpi.h> 30 29 31 30 #ifdef CONFIG_ACPI
+4 -1
fs/btrfs/extent-tree.c
··· 1467 1467 if (ret && !insert) { 1468 1468 err = -ENOENT; 1469 1469 goto out; 1470 + } else if (ret) { 1471 + err = -EIO; 1472 + WARN_ON(1); 1473 + goto out; 1470 1474 } 1471 - BUG_ON(ret); /* Corruption */ 1472 1475 1473 1476 leaf = path->nodes[0]; 1474 1477 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
+1
fs/btrfs/file.c
··· 591 591 } 592 592 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 593 593 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 594 + clear_bit(EXTENT_FLAG_LOGGING, &flags); 594 595 remove_extent_mapping(em_tree, em); 595 596 if (no_splits) 596 597 goto next;
+3
fs/btrfs/inode.c
··· 2312 2312 key.type = BTRFS_EXTENT_DATA_KEY; 2313 2313 key.offset = start; 2314 2314 2315 + path->leave_spinning = 1; 2315 2316 if (merge) { 2316 2317 struct btrfs_file_extent_item *fi; 2317 2318 u64 extent_len; ··· 2369 2368 2370 2369 btrfs_mark_buffer_dirty(leaf); 2371 2370 inode_add_bytes(inode, len); 2371 + btrfs_release_path(path); 2372 2372 2373 2373 ret = btrfs_inc_extent_ref(trans, root, new->bytenr, 2374 2374 new->disk_len, 0, ··· 2383 2381 ret = 1; 2384 2382 out_free_path: 2385 2383 btrfs_release_path(path); 2384 + path->leave_spinning = 0; 2386 2385 btrfs_end_transaction(trans, root); 2387 2386 out_unlock: 2388 2387 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
-1
fs/btrfs/locking.h
··· 26 26 27 27 void btrfs_tree_lock(struct extent_buffer *eb); 28 28 void btrfs_tree_unlock(struct extent_buffer *eb); 29 - int btrfs_try_spin_lock(struct extent_buffer *eb); 30 29 31 30 void btrfs_tree_read_lock(struct extent_buffer *eb); 32 31 void btrfs_tree_read_unlock(struct extent_buffer *eb);
+6 -4
fs/btrfs/qgroup.c
··· 1525 1525 1526 1526 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) && 1527 1527 qg->reserved + qg->rfer + num_bytes > 1528 - qg->max_rfer) 1528 + qg->max_rfer) { 1529 1529 ret = -EDQUOT; 1530 + goto out; 1531 + } 1530 1532 1531 1533 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) && 1532 1534 qg->reserved + qg->excl + num_bytes > 1533 - qg->max_excl) 1535 + qg->max_excl) { 1534 1536 ret = -EDQUOT; 1537 + goto out; 1538 + } 1535 1539 1536 1540 list_for_each_entry(glist, &qg->groups, next_group) { 1537 1541 ulist_add(ulist, glist->group->qgroupid, 1538 1542 (uintptr_t)glist->group, GFP_ATOMIC); 1539 1543 } 1540 1544 } 1541 - if (ret) 1542 - goto out; 1543 1545 1544 1546 /* 1545 1547 * no limits exceeded, now record the reservation into all qgroups
+5 -6
fs/btrfs/transaction.c
··· 625 625 626 626 btrfs_trans_release_metadata(trans, root); 627 627 trans->block_rsv = NULL; 628 - /* 629 - * the same root has to be passed to start_transaction and 630 - * end_transaction. Subvolume quota depends on this. 631 - */ 632 - WARN_ON(trans->root != root); 633 628 634 629 if (trans->qgroup_reserved) { 635 - btrfs_qgroup_free(root, trans->qgroup_reserved); 630 + /* 631 + * the same root has to be passed here between start_transaction 632 + * and end_transaction. Subvolume quota depends on this. 633 + */ 634 + btrfs_qgroup_free(trans->root, trans->qgroup_reserved); 636 635 trans->qgroup_reserved = 0; 637 636 } 638 637
+6
fs/btrfs/volumes.c
··· 684 684 __btrfs_close_devices(fs_devices); 685 685 free_fs_devices(fs_devices); 686 686 } 687 + /* 688 + * Wait for rcu kworkers under __btrfs_close_devices 689 + * to finish all blkdev_puts so device is really 690 + * free when umount is done. 691 + */ 692 + rcu_barrier(); 687 693 return ret; 688 694 } 689 695
+1
fs/cifs/cifsfs.c
··· 777 777 .kill_sb = cifs_kill_sb, 778 778 /* .fs_flags */ 779 779 }; 780 + MODULE_ALIAS_FS("cifs"); 780 781 const struct inode_operations cifs_dir_inode_ops = { 781 782 .create = cifs_create, 782 783 .atomic_open = cifs_atomic_open,
+7 -8
fs/compat.c
··· 558 558 } 559 559 *ret_pointer = iov; 560 560 561 + ret = -EFAULT; 562 + if (!access_ok(VERIFY_READ, uvector, nr_segs*sizeof(*uvector))) 563 + goto out; 564 + 561 565 /* 562 566 * Single unix specification: 563 567 * We should -EINVAL if an element length is not >= 0 and fitting an ··· 1084 1080 if (!file->f_op) 1085 1081 goto out; 1086 1082 1087 - ret = -EFAULT; 1088 - if (!access_ok(VERIFY_READ, uvector, nr_segs*sizeof(*uvector))) 1089 - goto out; 1090 - 1091 - tot_len = compat_rw_copy_check_uvector(type, uvector, nr_segs, 1083 + ret = compat_rw_copy_check_uvector(type, uvector, nr_segs, 1092 1084 UIO_FASTIOV, iovstack, &iov); 1093 - if (tot_len == 0) { 1094 - ret = 0; 1085 + if (ret <= 0) 1095 1086 goto out; 1096 - } 1097 1087 1088 + tot_len = ret; 1098 1089 ret = rw_verify_area(type, file, pos, tot_len); 1099 1090 if (ret < 0) 1100 1091 goto out;
-1
fs/ext2/ialloc.c
··· 118 118 * as writing the quota to disk may need the lock as well. 119 119 */ 120 120 /* Quota is already initialized in iput() */ 121 - ext2_xattr_delete_inode(inode); 122 121 dquot_free_inode(inode); 123 122 dquot_drop(inode); 124 123
+2
fs/ext2/inode.c
··· 34 34 #include "ext2.h" 35 35 #include "acl.h" 36 36 #include "xip.h" 37 + #include "xattr.h" 37 38 38 39 static int __ext2_write_inode(struct inode *inode, int do_sync); 39 40 ··· 89 88 inode->i_size = 0; 90 89 if (inode->i_blocks) 91 90 ext2_truncate_blocks(inode, 0); 91 + ext2_xattr_delete_inode(inode); 92 92 } 93 93 94 94 invalidate_inode_buffers(inode);
+2 -2
fs/ext3/super.c
··· 353 353 return bdev; 354 354 355 355 fail: 356 - ext3_msg(sb, "error: failed to open journal device %s: %ld", 356 + ext3_msg(sb, KERN_ERR, "error: failed to open journal device %s: %ld", 357 357 __bdevname(dev, b), PTR_ERR(bdev)); 358 358 359 359 return NULL; ··· 887 887 /*todo: use simple_strtoll with >32bit ext3 */ 888 888 sb_block = simple_strtoul(options, &options, 0); 889 889 if (*options && *options != ',') { 890 - ext3_msg(sb, "error: invalid sb specification: %s", 890 + ext3_msg(sb, KERN_ERR, "error: invalid sb specification: %s", 891 891 (char *) *data); 892 892 return 1; 893 893 }
+2
fs/ext4/super.c
··· 91 91 .fs_flags = FS_REQUIRES_DEV, 92 92 }; 93 93 MODULE_ALIAS_FS("ext2"); 94 + MODULE_ALIAS("ext2"); 94 95 #define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type) 95 96 #else 96 97 #define IS_EXT2_SB(sb) (0) ··· 107 106 .fs_flags = FS_REQUIRES_DEV, 108 107 }; 109 108 MODULE_ALIAS_FS("ext3"); 109 + MODULE_ALIAS("ext3"); 110 110 #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type) 111 111 #else 112 112 #define IS_EXT3_SB(sb) (0)
+1
fs/freevxfs/vxfs_super.c
··· 258 258 .fs_flags = FS_REQUIRES_DEV, 259 259 }; 260 260 MODULE_ALIAS_FS("vxfs"); /* makes mount -t vxfs autoload the module */ 261 + MODULE_ALIAS("vxfs"); 261 262 262 263 static int __init 263 264 vxfs_init(void)
+1
fs/hostfs/hostfs_kern.c
··· 986 986 .kill_sb = hostfs_kill_sb, 987 987 .fs_flags = 0, 988 988 }; 989 + MODULE_ALIAS_FS("hostfs"); 989 990 990 991 static int __init init_hostfs(void) 991 992 {
+1
fs/hpfs/super.c
··· 688 688 .kill_sb = kill_block_super, 689 689 .fs_flags = FS_REQUIRES_DEV, 690 690 }; 691 + MODULE_ALIAS_FS("hpfs"); 691 692 692 693 static int __init init_hpfs_fs(void) 693 694 {
+1
fs/isofs/inode.c
··· 1557 1557 .fs_flags = FS_REQUIRES_DEV, 1558 1558 }; 1559 1559 MODULE_ALIAS_FS("iso9660"); 1560 + MODULE_ALIAS("iso9660"); 1560 1561 1561 1562 static int __init init_iso9660_fs(void) 1562 1563 {
+1
fs/nfs/super.c
··· 335 335 .fs_flags = FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA, 336 336 }; 337 337 MODULE_ALIAS_FS("nfs4"); 338 + MODULE_ALIAS("nfs4"); 338 339 EXPORT_SYMBOL_GPL(nfs4_fs_type); 339 340 340 341 static int __init register_nfs4_fs(void)
+2 -34
fs/nfsd/nfs4state.c
··· 230 230 __nfs4_file_put_access(fp, oflag); 231 231 } 232 232 233 - static inline int get_new_stid(struct nfs4_stid *stid) 234 - { 235 - static int min_stateid = 0; 236 - struct idr *stateids = &stid->sc_client->cl_stateids; 237 - int new_stid; 238 - int error; 239 - 240 - error = idr_get_new_above(stateids, stid, min_stateid, &new_stid); 241 - /* 242 - * Note: the necessary preallocation was done in 243 - * nfs4_alloc_stateid(). The idr code caps the number of 244 - * preallocations that can exist at a time, but the state lock 245 - * prevents anyone from using ours before we get here: 246 - */ 247 - WARN_ON_ONCE(error); 248 - /* 249 - * It shouldn't be a problem to reuse an opaque stateid value. 250 - * I don't think it is for 4.1. But with 4.0 I worry that, for 251 - * example, a stray write retransmission could be accepted by 252 - * the server when it should have been rejected. Therefore, 253 - * adopt a trick from the sctp code to attempt to maximize the 254 - * amount of time until an id is reused, by ensuring they always 255 - * "increase" (mod INT_MAX): 256 - */ 257 - 258 - min_stateid = new_stid+1; 259 - if (min_stateid == INT_MAX) 260 - min_stateid = 0; 261 - return new_stid; 262 - } 263 - 264 233 static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct 265 234 kmem_cache *slab) 266 235 { ··· 242 273 if (!stid) 243 274 return NULL; 244 275 245 - if (!idr_pre_get(stateids, GFP_KERNEL)) 246 - goto out_free; 247 - if (idr_get_new_above(stateids, stid, min_stateid, &new_id)) 276 + new_id = idr_alloc(stateids, stid, min_stateid, 0, GFP_KERNEL); 277 + if (new_id < 0) 248 278 goto out_free; 249 279 stid->sc_client = cl; 250 280 stid->sc_type = 0;
+3
fs/pipe.c
··· 863 863 { 864 864 int ret = -ENOENT; 865 865 866 + if (!(filp->f_mode & (FMODE_READ|FMODE_WRITE))) 867 + return -EINVAL; 868 + 866 869 mutex_lock(&inode->i_mutex); 867 870 868 871 if (inode->i_pipe) {
+4 -1
fs/quota/dquot.c
··· 1439 1439 * did a write before quota was turned on 1440 1440 */ 1441 1441 rsv = inode_get_rsv_space(inode); 1442 - if (unlikely(rsv)) 1442 + if (unlikely(rsv)) { 1443 + spin_lock(&dq_data_lock); 1443 1444 dquot_resv_space(inode->i_dquot[cnt], rsv); 1445 + spin_unlock(&dq_data_lock); 1446 + } 1444 1447 } 1445 1448 } 1446 1449 out_err:
+1 -3
fs/reiserfs/super.c
··· 1147 1147 "on filesystem root."); 1148 1148 return 0; 1149 1149 } 1150 - qf_names[qtype] = 1151 - kmalloc(strlen(arg) + 1, GFP_KERNEL); 1150 + qf_names[qtype] = kstrdup(arg, GFP_KERNEL); 1152 1151 if (!qf_names[qtype]) { 1153 1152 reiserfs_warning(s, "reiserfs-2502", 1154 1153 "not enough memory " ··· 1155 1156 "quotafile name."); 1156 1157 return 0; 1157 1158 } 1158 - strcpy(qf_names[qtype], arg); 1159 1159 if (qtype == USRQUOTA) 1160 1160 *mount_options |= 1 << REISERFS_USRQUOTA; 1161 1161 else
+1
fs/squashfs/super.c
··· 489 489 .kill_sb = kill_block_super, 490 490 .fs_flags = FS_REQUIRES_DEV 491 491 }; 492 + MODULE_ALIAS_FS("squashfs"); 492 493 493 494 static const struct super_operations squashfs_super_ops = { 494 495 .alloc_inode = squashfs_alloc_inode,
+1
fs/sysv/super.c
··· 555 555 .fs_flags = FS_REQUIRES_DEV, 556 556 }; 557 557 MODULE_ALIAS_FS("v7"); 558 + MODULE_ALIAS("v7"); 558 559 559 560 static int __init init_sysv_fs(void) 560 561 {
+1
fs/udf/super.c
··· 118 118 .kill_sb = kill_block_super, 119 119 .fs_flags = FS_REQUIRES_DEV, 120 120 }; 121 + MODULE_ALIAS_FS("udf"); 121 122 122 123 static struct kmem_cache *udf_inode_cachep; 123 124
+6
fs/xfs/xfs_buf.c
··· 1334 1334 int size; 1335 1335 int i; 1336 1336 1337 + /* 1338 + * Make sure we capture only current IO errors rather than stale errors 1339 + * left over from previous use of the buffer (e.g. failed readahead). 1340 + */ 1341 + bp->b_error = 0; 1342 + 1337 1343 if (bp->b_flags & XBF_WRITE) { 1338 1344 if (bp->b_flags & XBF_SYNCIO) 1339 1345 rw = WRITE_SYNC;
+2 -2
fs/xfs/xfs_iomap.c
··· 325 325 * rather than falling short due to things like stripe unit/width alignment of 326 326 * real extents. 327 327 */ 328 - STATIC int 328 + STATIC xfs_fsblock_t 329 329 xfs_iomap_eof_prealloc_initial_size( 330 330 struct xfs_mount *mp, 331 331 struct xfs_inode *ip, ··· 413 413 * have a large file on a small filesystem and the above 414 414 * lowspace thresholds are smaller than MAXEXTLEN. 415 415 */ 416 - while (alloc_blocks >= freesp) 416 + while (alloc_blocks && alloc_blocks >= freesp) 417 417 alloc_blocks >>= 4; 418 418 } 419 419
+3
include/acpi/processor.h
··· 235 235 if a _PPC object exists, rmmod is disallowed then */ 236 236 int acpi_processor_notify_smm(struct module *calling_module); 237 237 238 + /* parsing the _P* objects. */ 239 + extern int acpi_processor_get_performance_info(struct acpi_processor *pr); 240 + 238 241 /* for communication between multiple parts of the processor kernel module */ 239 242 DECLARE_PER_CPU(struct acpi_processor *, processors); 240 243 extern struct acpi_processor_errata errata;
-6
include/asm-generic/atomic.h
··· 136 136 #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) 137 137 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) 138 138 139 - #define cmpxchg_local(ptr, o, n) \ 140 - ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ 141 - (unsigned long)(n), sizeof(*(ptr)))) 142 - 143 - #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) 144 - 145 139 static inline int __atomic_add_unless(atomic_t *v, int a, int u) 146 140 { 147 141 int c, old;
+10
include/asm-generic/cmpxchg.h
··· 92 92 */ 93 93 #include <asm-generic/cmpxchg-local.h> 94 94 95 + #ifndef cmpxchg_local 96 + #define cmpxchg_local(ptr, o, n) \ 97 + ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ 98 + (unsigned long)(n), sizeof(*(ptr)))) 99 + #endif 100 + 101 + #ifndef cmpxchg64_local 102 + #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) 103 + #endif 104 + 95 105 #define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n)) 96 106 #define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) 97 107
+51 -17
include/linux/idr.h
··· 73 73 */ 74 74 75 75 void *idr_find_slowpath(struct idr *idp, int id); 76 - int idr_pre_get(struct idr *idp, gfp_t gfp_mask); 77 - int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id); 78 76 void idr_preload(gfp_t gfp_mask); 79 77 int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask); 80 78 int idr_for_each(struct idr *idp, ··· 97 99 98 100 /** 99 101 * idr_find - return pointer for given id 100 - * @idp: idr handle 102 + * @idr: idr handle 101 103 * @id: lookup key 102 104 * 103 105 * Return the pointer given the id it has been registered with. A %NULL ··· 118 120 } 119 121 120 122 /** 121 - * idr_get_new - allocate new idr entry 122 - * @idp: idr handle 123 - * @ptr: pointer you want associated with the id 124 - * @id: pointer to the allocated handle 125 - * 126 - * Simple wrapper around idr_get_new_above() w/ @starting_id of zero. 127 - */ 128 - static inline int idr_get_new(struct idr *idp, void *ptr, int *id) 129 - { 130 - return idr_get_new_above(idp, ptr, 0, id); 131 - } 132 - 133 - /** 134 123 * idr_for_each_entry - iterate over an idr's elements of a given type 135 124 * @idp: idr handle 136 125 * @entry: the type * to use as cursor ··· 128 143 entry != NULL; \ 129 144 ++id, entry = (typeof(entry))idr_get_next((idp), &(id))) 130 145 131 - void __idr_remove_all(struct idr *idp); /* don't use */ 146 + /* 147 + * Don't use the following functions. These exist only to suppress 148 + * deprecated warnings on EXPORT_SYMBOL()s. 149 + */ 150 + int __idr_pre_get(struct idr *idp, gfp_t gfp_mask); 151 + int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id); 152 + void __idr_remove_all(struct idr *idp); 153 + 154 + /** 155 + * idr_pre_get - reserve resources for idr allocation 156 + * @idp: idr handle 157 + * @gfp_mask: memory allocation flags 158 + * 159 + * Part of old alloc interface. This is going away. Use 160 + * idr_preload[_end]() and idr_alloc() instead. 161 + */ 162 + static inline int __deprecated idr_pre_get(struct idr *idp, gfp_t gfp_mask) 163 + { 164 + return __idr_pre_get(idp, gfp_mask); 165 + } 166 + 167 + /** 168 + * idr_get_new_above - allocate new idr entry above or equal to a start id 169 + * @idp: idr handle 170 + * @ptr: pointer you want associated with the id 171 + * @starting_id: id to start search at 172 + * @id: pointer to the allocated handle 173 + * 174 + * Part of old alloc interface. This is going away. Use 175 + * idr_preload[_end]() and idr_alloc() instead. 176 + */ 177 + static inline int __deprecated idr_get_new_above(struct idr *idp, void *ptr, 178 + int starting_id, int *id) 179 + { 180 + return __idr_get_new_above(idp, ptr, starting_id, id); 181 + } 182 + 183 + /** 184 + * idr_get_new - allocate new idr entry 185 + * @idp: idr handle 186 + * @ptr: pointer you want associated with the id 187 + * @id: pointer to the allocated handle 188 + * 189 + * Part of old alloc interface. This is going away. Use 190 + * idr_preload[_end]() and idr_alloc() instead. 191 + */ 192 + static inline int __deprecated idr_get_new(struct idr *idp, void *ptr, int *id) 193 + { 194 + return __idr_get_new_above(idp, ptr, 0, id); 195 + } 132 196 133 197 /** 134 198 * idr_remove_all - remove all ids from the given idr tree
+6 -3
include/linux/iio/common/st_sensors.h
··· 227 227 }; 228 228 229 229 #ifdef CONFIG_IIO_BUFFER 230 + irqreturn_t st_sensors_trigger_handler(int irq, void *p); 231 + 232 + int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf); 233 + #endif 234 + 235 + #ifdef CONFIG_IIO_TRIGGER 230 236 int st_sensors_allocate_trigger(struct iio_dev *indio_dev, 231 237 const struct iio_trigger_ops *trigger_ops); 232 238 233 239 void st_sensors_deallocate_trigger(struct iio_dev *indio_dev); 234 240 235 - irqreturn_t st_sensors_trigger_handler(int irq, void *p); 236 - 237 - int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf); 238 241 #else 239 242 static inline int st_sensors_allocate_trigger(struct iio_dev *indio_dev, 240 243 const struct iio_trigger_ops *trigger_ops)
+3 -1
include/linux/list.h
··· 667 667 pos = n) 668 668 669 669 #define hlist_entry_safe(ptr, type, member) \ 670 - (ptr) ? hlist_entry(ptr, type, member) : NULL 670 + ({ typeof(ptr) ____ptr = (ptr); \ 671 + ____ptr ? hlist_entry(____ptr, type, member) : NULL; \ 672 + }) 671 673 672 674 /** 673 675 * hlist_for_each_entry - iterate over list of given type
+1
include/linux/mfd/palmas.h
··· 221 221 }; 222 222 223 223 struct palmas_platform_data { 224 + int irq_flags; 224 225 int gpio_base; 225 226 226 227 /* bit value to be loaded to the POWER_CTRL register */
+1
include/linux/mfd/tps65912.h
··· 323 323 void tps65912_device_exit(struct tps65912 *tps65912); 324 324 int tps65912_irq_init(struct tps65912 *tps65912, int irq, 325 325 struct tps65912_platform_data *pdata); 326 + int tps65912_irq_exit(struct tps65912 *tps65912); 326 327 327 328 #endif /* __LINUX_MFD_TPS65912_H */
+2
include/linux/mfd/wm831x/auxadc.h
··· 15 15 #ifndef __MFD_WM831X_AUXADC_H__ 16 16 #define __MFD_WM831X_AUXADC_H__ 17 17 18 + struct wm831x; 19 + 18 20 /* 19 21 * R16429 (0x402D) - AuxADC Data 20 22 */
+1 -1
include/linux/mfd/wm831x/core.h
··· 20 20 #include <linux/irqdomain.h> 21 21 #include <linux/list.h> 22 22 #include <linux/regmap.h> 23 + #include <linux/mfd/wm831x/auxadc.h> 23 24 24 25 /* 25 26 * Register values. ··· 356 355 }; 357 356 358 357 struct wm831x; 359 - enum wm831x_auxadc; 360 358 361 359 typedef int (*wm831x_auxadc_read_fn)(struct wm831x *wm831x, 362 360 enum wm831x_auxadc input);
+7
include/linux/mtd/nand.h
··· 187 187 * This happens with the Renesas AG-AND chips, possibly others. 188 188 */ 189 189 #define BBT_AUTO_REFRESH 0x00000080 190 + /* 191 + * Chip requires ready check on read (for auto-incremented sequential read). 192 + * True only for small page devices; large page devices do not support 193 + * autoincrement. 194 + */ 195 + #define NAND_NEED_READRDY 0x00000100 196 + 190 197 /* Chip does not allow subpage writes */ 191 198 #define NAND_NO_SUBPAGE_WRITE 0x00000200 192 199
+6
include/linux/perf_event.h
··· 799 799 static inline void perf_event_task_tick(void) { } 800 800 #endif 801 801 802 + #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) 803 + extern void perf_restore_debug_store(void); 804 + #else 805 + static inline void perf_restore_debug_store(void) { } 806 + #endif 807 + 802 808 #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) 803 809 804 810 /*
+1
include/linux/res_counter.h
··· 14 14 */ 15 15 16 16 #include <linux/cgroup.h> 17 + #include <linux/errno.h> 17 18 18 19 /* 19 20 * The core object. the cgroup that wishes to account for some
+9 -4
include/linux/skbuff.h
··· 503 503 union { 504 504 __u32 mark; 505 505 __u32 dropcount; 506 - __u32 avail_size; 506 + __u32 reserved_tailroom; 507 507 }; 508 508 509 509 sk_buff_data_t inner_transport_header; ··· 1292 1292 * do not lose pfmemalloc information as the pages would not be 1293 1293 * allocated using __GFP_MEMALLOC. 1294 1294 */ 1295 - if (page->pfmemalloc && !page->mapping) 1296 - skb->pfmemalloc = true; 1297 1295 frag->page.p = page; 1298 1296 frag->page_offset = off; 1299 1297 skb_frag_size_set(frag, size); 1298 + 1299 + page = compound_head(page); 1300 + if (page->pfmemalloc && !page->mapping) 1301 + skb->pfmemalloc = true; 1300 1302 } 1301 1303 1302 1304 /** ··· 1453 1451 */ 1454 1452 static inline int skb_availroom(const struct sk_buff *skb) 1455 1453 { 1456 - return skb_is_nonlinear(skb) ? 0 : skb->avail_size - skb->len; 1454 + if (skb_is_nonlinear(skb)) 1455 + return 0; 1456 + 1457 + return skb->end - skb->tail - skb->reserved_tailroom; 1457 1458 } 1458 1459 1459 1460 /**
+1
include/linux/udp.h
··· 68 68 * For encapsulation sockets. 69 69 */ 70 70 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); 71 + void (*encap_destroy)(struct sock *sk); 71 72 }; 72 73 73 74 static inline struct udp_sock *udp_sk(const struct sock *sk)
+1
include/linux/usb/cdc_ncm.h
··· 127 127 u16 connected; 128 128 }; 129 129 130 + extern u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf); 130 131 extern int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting); 131 132 extern void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf); 132 133 extern struct sk_buff *cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign);
+2 -1
include/linux/usb/composite.h
··· 60 60 * @name: For diagnostics, identifies the function. 61 61 * @strings: tables of strings, keyed by identifiers assigned during bind() 62 62 * and by language IDs provided in control requests 63 - * @descriptors: Table of full (or low) speed descriptors, using interface and 63 + * @fs_descriptors: Table of full (or low) speed descriptors, using interface and 64 64 * string identifiers assigned during @bind(). If this pointer is null, 65 65 * the function will not be available at full speed (or at low speed). 66 66 * @hs_descriptors: Table of high speed descriptors, using interface and ··· 290 290 * after function notifications 291 291 * @resume: Notifies configuration when the host restarts USB traffic, 292 292 * before function notifications 293 + * @gadget_driver: Gadget driver controlling this driver 293 294 * 294 295 * Devices default to reporting self powered operation. Devices which rely 295 296 * on bus powered operation should report this in their @bind method.
+4 -2
include/net/dst.h
··· 413 413 414 414 static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr) 415 415 { 416 - return dst->ops->neigh_lookup(dst, NULL, daddr); 416 + struct neighbour *n = dst->ops->neigh_lookup(dst, NULL, daddr); 417 + return IS_ERR(n) ? NULL : n; 417 418 } 418 419 419 420 static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst, 420 421 struct sk_buff *skb) 421 422 { 422 - return dst->ops->neigh_lookup(dst, skb, NULL); 423 + struct neighbour *n = dst->ops->neigh_lookup(dst, skb, NULL); 424 + return IS_ERR(n) ? NULL : n; 423 425 } 424 426 425 427 static inline void dst_link_failure(struct sk_buff *skb)
+1
include/net/flow_keys.h
··· 9 9 __be32 ports; 10 10 __be16 port16[2]; 11 11 }; 12 + u16 thoff; 12 13 u8 ip_proto; 13 14 }; 14 15
+9
include/net/inet_frag.h
··· 43 43 44 44 #define INETFRAGS_HASHSZ 64 45 45 46 + /* averaged: 47 + * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ / 48 + * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or 49 + * struct frag_queue)) 50 + */ 51 + #define INETFRAGS_MAXDEPTH 128 52 + 46 53 struct inet_frags { 47 54 struct hlist_head hash[INETFRAGS_HASHSZ]; 48 55 /* This rwlock is a global lock (seperate per IPv4, IPv6 and ··· 83 76 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, 84 77 struct inet_frags *f, void *key, unsigned int hash) 85 78 __releases(&f->lock); 79 + void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, 80 + const char *prefix); 86 81 87 82 static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f) 88 83 {
+6 -8
include/net/ip_fib.h
··· 152 152 }; 153 153 154 154 #ifdef CONFIG_IP_ROUTE_MULTIPATH 155 - 156 155 #define FIB_RES_NH(res) ((res).fi->fib_nh[(res).nh_sel]) 157 - 158 - #define FIB_TABLE_HASHSZ 2 159 - 160 156 #else /* CONFIG_IP_ROUTE_MULTIPATH */ 161 - 162 157 #define FIB_RES_NH(res) ((res).fi->fib_nh[0]) 163 - 164 - #define FIB_TABLE_HASHSZ 256 165 - 166 158 #endif /* CONFIG_IP_ROUTE_MULTIPATH */ 159 + 160 + #ifdef CONFIG_IP_MULTIPLE_TABLES 161 + #define FIB_TABLE_HASHSZ 256 162 + #else 163 + #define FIB_TABLE_HASHSZ 2 164 + #endif 167 165 168 166 extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh); 169 167
+12
include/net/ip_vs.h
··· 976 976 int sysctl_sync_retries; 977 977 int sysctl_nat_icmp_send; 978 978 int sysctl_pmtu_disc; 979 + int sysctl_backup_only; 979 980 980 981 /* ip_vs_lblc */ 981 982 int sysctl_lblc_expiration; ··· 1068 1067 return ipvs->sysctl_pmtu_disc; 1069 1068 } 1070 1069 1070 + static inline int sysctl_backup_only(struct netns_ipvs *ipvs) 1071 + { 1072 + return ipvs->sync_state & IP_VS_STATE_BACKUP && 1073 + ipvs->sysctl_backup_only; 1074 + } 1075 + 1071 1076 #else 1072 1077 1073 1078 static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs) ··· 1119 1112 static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs) 1120 1113 { 1121 1114 return 1; 1115 + } 1116 + 1117 + static inline int sysctl_backup_only(struct netns_ipvs *ipvs) 1118 + { 1119 + return 0; 1122 1120 } 1123 1121 1124 1122 #endif
+4 -2
include/uapi/linux/acct.h
··· 107 107 #define ACORE 0x08 /* ... dumped core */ 108 108 #define AXSIG 0x10 /* ... was killed by a signal */ 109 109 110 - #ifdef __BIG_ENDIAN 110 + #if defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN) 111 111 #define ACCT_BYTEORDER 0x80 /* accounting file is big endian */ 112 - #else 112 + #elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN) 113 113 #define ACCT_BYTEORDER 0x00 /* accounting file is little endian */ 114 + #else 115 + #error unspecified endianness 114 116 #endif 115 117 116 118 #ifndef __KERNEL__
+2 -2
include/uapi/linux/aio_abi.h
··· 62 62 __s64 res2; /* secondary result */ 63 63 }; 64 64 65 - #if defined(__LITTLE_ENDIAN) 65 + #if defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN) 66 66 #define PADDED(x,y) x, y 67 - #elif defined(__BIG_ENDIAN) 67 + #elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN) 68 68 #define PADDED(x,y) y, x 69 69 #else 70 70 #error edit for your odd byteorder.
+4 -2
include/uapi/linux/raid/md_p.h
··· 145 145 __u32 failed_disks; /* 4 Number of failed disks */ 146 146 __u32 spare_disks; /* 5 Number of spare disks */ 147 147 __u32 sb_csum; /* 6 checksum of the whole superblock */ 148 - #ifdef __BIG_ENDIAN 148 + #if defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN) 149 149 __u32 events_hi; /* 7 high-order of superblock update count */ 150 150 __u32 events_lo; /* 8 low-order of superblock update count */ 151 151 __u32 cp_events_hi; /* 9 high-order of checkpoint update count */ 152 152 __u32 cp_events_lo; /* 10 low-order of checkpoint update count */ 153 - #else 153 + #elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN) 154 154 __u32 events_lo; /* 7 low-order of superblock update count */ 155 155 __u32 events_hi; /* 8 high-order of superblock update count */ 156 156 __u32 cp_events_lo; /* 9 low-order of checkpoint update count */ 157 157 __u32 cp_events_hi; /* 10 high-order of checkpoint update count */ 158 + #else 159 + #error unspecified endianness 158 160 #endif 159 161 __u32 recovery_cp; /* 11 recovery checkpoint sector count */ 160 162 /* There are only valid for minor_version > 90 */
+4 -1
include/uapi/linux/serial_core.h
··· 51 51 #define PORT_8250_CIR 23 /* CIR infrared port, has its own driver */ 52 52 #define PORT_XR17V35X 24 /* Exar XR17V35x UARTs */ 53 53 #define PORT_BRCM_TRUMANAGE 25 54 - #define PORT_MAX_8250 25 /* max port ID */ 54 + #define PORT_ALTR_16550_F32 26 /* Altera 16550 UART with 32 FIFOs */ 55 + #define PORT_ALTR_16550_F64 27 /* Altera 16550 UART with 64 FIFOs */ 56 + #define PORT_ALTR_16550_F128 28 /* Altera 16550 UART with 128 FIFOs */ 57 + #define PORT_MAX_8250 28 /* max port ID */ 55 58 56 59 /* 57 60 * ARM specific type numbers. These are not currently guaranteed
+1 -1
include/video/atmel_lcdc.h
··· 30 30 */ 31 31 #define ATMEL_LCDC_WIRING_BGR 0 32 32 #define ATMEL_LCDC_WIRING_RGB 1 33 - #define ATMEL_LCDC_WIRING_RGB555 2 34 33 35 34 36 35 /* LCD Controller info data structure, stored in device platform_data */ ··· 61 62 void (*atmel_lcdfb_power_control)(int on); 62 63 struct fb_monspecs *default_monspecs; 63 64 u32 pseudo_palette[16]; 65 + bool have_intensity_bit; 64 66 }; 65 67 66 68 #define ATMEL_LCDC_DMABADDR1 0x00
-4
init/Kconfig
··· 28 28 29 29 menu "General setup" 30 30 31 - config EXPERIMENTAL 32 - bool 33 - default y 34 - 35 31 config BROKEN 36 32 bool 37 33
+4 -1
kernel/fork.c
··· 1141 1141 if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) 1142 1142 return ERR_PTR(-EINVAL); 1143 1143 1144 + if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) 1145 + return ERR_PTR(-EINVAL); 1146 + 1144 1147 /* 1145 1148 * Thread groups must share signals as well, and detached threads 1146 1149 * can only be started up within the thread group. ··· 1810 1807 * If unsharing a user namespace must also unshare the thread. 1811 1808 */ 1812 1809 if (unshare_flags & CLONE_NEWUSER) 1813 - unshare_flags |= CLONE_THREAD; 1810 + unshare_flags |= CLONE_THREAD | CLONE_FS; 1814 1811 /* 1815 1812 * If unsharing a pid namespace must also unshare the thread. 1816 1813 */
+23 -23
kernel/futex.c
··· 223 223 * @rw: mapping needs to be read/write (values: VERIFY_READ, 224 224 * VERIFY_WRITE) 225 225 * 226 - * Returns a negative error code or 0 226 + * Return: a negative error code or 0 227 + * 227 228 * The key words are stored in *key on success. 228 229 * 229 230 * For shared mappings, it's (page->index, file_inode(vma->vm_file), ··· 706 705 * be "current" except in the case of requeue pi. 707 706 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) 708 707 * 709 - * Returns: 710 - * 0 - ready to wait 711 - * 1 - acquired the lock 708 + * Return: 709 + * 0 - ready to wait; 710 + * 1 - acquired the lock; 712 711 * <0 - error 713 712 * 714 713 * The hb->lock and futex_key refs shall be held by the caller. ··· 1192 1191 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit. 1193 1192 * hb1 and hb2 must be held by the caller. 1194 1193 * 1195 - * Returns: 1196 - * 0 - failed to acquire the lock atomicly 1197 - * 1 - acquired the lock 1194 + * Return: 1195 + * 0 - failed to acquire the lock atomically; 1196 + * 1 - acquired the lock; 1198 1197 * <0 - error 1199 1198 */ 1200 1199 static int futex_proxy_trylock_atomic(u32 __user *pifutex, ··· 1255 1254 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire 1256 1255 * uaddr2 atomically on behalf of the top waiter. 1257 1256 * 1258 - * Returns: 1259 - * >=0 - on success, the number of tasks requeued or woken 1257 + * Return: 1258 + * >=0 - on success, the number of tasks requeued or woken; 1260 1259 * <0 - on error 1261 1260 */ 1262 1261 static int futex_requeue(u32 __user *uaddr1, unsigned int flags, ··· 1537 1536 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must 1538 1537 * be paired with exactly one earlier call to queue_me(). 1539 1538 * 1540 - * Returns: 1541 - * 1 - if the futex_q was still queued (and we removed unqueued it) 1539 + * Return: 1540 + * 1 - if the futex_q was still queued (and we removed unqueued it); 1542 1541 * 0 - if the futex_q was already removed by the waking thread 1543 1542 */ 1544 1543 static int unqueue_me(struct futex_q *q) ··· 1708 1707 * the pi_state owner as well as handle race conditions that may allow us to 1709 1708 * acquire the lock. Must be called with the hb lock held. 1710 1709 * 1711 - * Returns: 1712 - * 1 - success, lock taken 1713 - * 0 - success, lock not taken 1710 + * Return: 1711 + * 1 - success, lock taken; 1712 + * 0 - success, lock not taken; 1714 1713 * <0 - on error (-EFAULT) 1715 1714 */ 1716 1715 static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) ··· 1825 1824 * Return with the hb lock held and a q.key reference on success, and unlocked 1826 1825 * with no q.key reference on failure. 1827 1826 * 1828 - * Returns: 1829 - * 0 - uaddr contains val and hb has been locked 1827 + * Return: 1828 + * 0 - uaddr contains val and hb has been locked; 1830 1829 * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked 1831 1830 */ 1832 1831 static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags, ··· 2204 2203 * the wakeup and return the appropriate error code to the caller. Must be 2205 2204 * called with the hb lock held. 2206 2205 * 2207 - * Returns 2208 - * 0 - no early wakeup detected 2209 - * <0 - -ETIMEDOUT or -ERESTARTNOINTR 2206 + * Return: 2207 + * 0 = no early wakeup detected; 2208 + * <0 = -ETIMEDOUT or -ERESTARTNOINTR 2210 2209 */ 2211 2210 static inline 2212 2211 int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, ··· 2248 2247 * @val: the expected value of uaddr 2249 2248 * @abs_time: absolute timeout 2250 2249 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all 2251 - * @clockrt: whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0) 2252 2250 * @uaddr2: the pi futex we will take prior to returning to user-space 2253 2251 * 2254 2252 * The caller will wait on uaddr and will be requeued by futex_requeue() to ··· 2258 2258 * there was a need to. 2259 2259 * 2260 2260 * We call schedule in futex_wait_queue_me() when we enqueue and return there 2261 - * via the following: 2261 + * via the following-- 2262 2262 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue() 2263 2263 * 2) wakeup on uaddr2 after a requeue 2264 2264 * 3) signal ··· 2276 2276 * 2277 2277 * If 4 or 7, we cleanup and return with -ETIMEDOUT. 2278 2278 * 2279 - * Returns: 2280 - * 0 - On success 2279 + * Return: 2280 + * 0 - On success; 2281 2281 * <0 - On error 2282 2282 */ 2283 2283 static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+4 -1
kernel/signal.c
··· 485 485 if (force_default || ka->sa.sa_handler != SIG_IGN) 486 486 ka->sa.sa_handler = SIG_DFL; 487 487 ka->sa.sa_flags = 0; 488 + #ifdef __ARCH_HAS_SA_RESTORER 489 + ka->sa.sa_restorer = NULL; 490 + #endif 488 491 sigemptyset(&ka->sa.sa_mask); 489 492 ka++; 490 493 } ··· 2685 2682 /** 2686 2683 * sys_rt_sigpending - examine a pending signal that has been raised 2687 2684 * while blocked 2688 - * @set: stores pending signals 2685 + * @uset: stores pending signals 2689 2686 * @sigsetsize: size of sigset_t type or larger 2690 2687 */ 2691 2688 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
+4
kernel/user_namespace.c
··· 21 21 #include <linux/uaccess.h> 22 22 #include <linux/ctype.h> 23 23 #include <linux/projid.h> 24 + #include <linux/fs_struct.h> 24 25 25 26 static struct kmem_cache *user_ns_cachep __read_mostly; 26 27 ··· 836 835 837 836 /* Threaded processes may not enter a different user namespace */ 838 837 if (atomic_read(&current->mm->mm_users) > 1) 838 + return -EINVAL; 839 + 840 + if (current->fs->users != 1) 839 841 return -EINVAL; 840 842 841 843 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
+29 -22
kernel/workqueue.c
··· 457 457 int ret; 458 458 459 459 mutex_lock(&worker_pool_idr_mutex); 460 - idr_pre_get(&worker_pool_idr, GFP_KERNEL); 461 - ret = idr_get_new(&worker_pool_idr, pool, &pool->id); 460 + ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL); 461 + if (ret >= 0) 462 + pool->id = ret; 462 463 mutex_unlock(&worker_pool_idr_mutex); 463 464 464 - return ret; 465 + return ret < 0 ? ret : 0; 465 466 } 466 467 467 468 /* ··· 3447 3446 3448 3447 spin_unlock_irq(&pool->lock); 3449 3448 mutex_unlock(&pool->assoc_mutex); 3450 - } 3451 3449 3452 - /* 3453 - * Call schedule() so that we cross rq->lock and thus can guarantee 3454 - * sched callbacks see the %WORKER_UNBOUND flag. This is necessary 3455 - * as scheduler callbacks may be invoked from other cpus. 3456 - */ 3457 - schedule(); 3450 + /* 3451 + * Call schedule() so that we cross rq->lock and thus can 3452 + * guarantee sched callbacks see the %WORKER_UNBOUND flag. 3453 + * This is necessary as scheduler callbacks may be invoked 3454 + * from other cpus. 3455 + */ 3456 + schedule(); 3458 3457 3459 - /* 3460 - * Sched callbacks are disabled now. Zap nr_running. After this, 3461 - * nr_running stays zero and need_more_worker() and keep_working() 3462 - * are always true as long as the worklist is not empty. Pools on 3463 - * @cpu now behave as unbound (in terms of concurrency management) 3464 - * pools which are served by workers tied to the CPU. 3465 - * 3466 - * On return from this function, the current worker would trigger 3467 - * unbound chain execution of pending work items if other workers 3468 - * didn't already. 3469 - */ 3470 - for_each_std_worker_pool(pool, cpu) 3458 + /* 3459 + * Sched callbacks are disabled now. Zap nr_running. 3460 + * After this, nr_running stays zero and need_more_worker() 3461 + * and keep_working() are always true as long as the 3462 + * worklist is not empty. This pool now behaves as an 3463 + * unbound (in terms of concurrency management) pool which 3464 + * are served by workers tied to the pool. 3465 + */ 3471 3466 atomic_set(&pool->nr_running, 0); 3467 + 3468 + /* 3469 + * With concurrency management just turned off, a busy 3470 + * worker blocking could lead to lengthy stalls. Kick off 3471 + * unbound chain execution of currently pending work items. 3472 + */ 3473 + spin_lock_irq(&pool->lock); 3474 + wake_up_worker(pool); 3475 + spin_unlock_irq(&pool->lock); 3476 + } 3472 3477 } 3473 3478 3474 3479 /*
+30 -50
lib/idr.c
··· 106 106 if (layer_idr) 107 107 return get_from_free_list(layer_idr); 108 108 109 - /* try to allocate directly from kmem_cache */ 110 - new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); 109 + /* 110 + * Try to allocate directly from kmem_cache. We want to try this 111 + * before preload buffer; otherwise, non-preloading idr_alloc() 112 + * users will end up taking advantage of preloading ones. As the 113 + * following is allowed to fail for preloaded cases, suppress 114 + * warning this time. 115 + */ 116 + new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN); 111 117 if (new) 112 118 return new; 113 119 ··· 121 115 * Try to fetch one from the per-cpu preload buffer if in process 122 116 * context. See idr_preload() for details. 123 117 */ 124 - if (in_interrupt()) 125 - return NULL; 126 - 127 - preempt_disable(); 128 - new = __this_cpu_read(idr_preload_head); 129 - if (new) { 130 - __this_cpu_write(idr_preload_head, new->ary[0]); 131 - __this_cpu_dec(idr_preload_cnt); 132 - new->ary[0] = NULL; 118 + if (!in_interrupt()) { 119 + preempt_disable(); 120 + new = __this_cpu_read(idr_preload_head); 121 + if (new) { 122 + __this_cpu_write(idr_preload_head, new->ary[0]); 123 + __this_cpu_dec(idr_preload_cnt); 124 + new->ary[0] = NULL; 125 + } 126 + preempt_enable(); 127 + if (new) 128 + return new; 133 129 } 134 - preempt_enable(); 135 - return new; 130 + 131 + /* 132 + * Both failed. Try kmem_cache again w/o adding __GFP_NOWARN so 133 + * that memory allocation failure warning is printed as intended. 134 + */ 135 + return kmem_cache_zalloc(idr_layer_cache, gfp_mask); 136 136 } 137 137 138 138 static void idr_layer_rcu_free(struct rcu_head *head) ··· 196 184 } 197 185 } 198 186 199 - /** 200 - * idr_pre_get - reserve resources for idr allocation 201 - * @idp: idr handle 202 - * @gfp_mask: memory allocation flags 203 - * 204 - * This function should be called prior to calling the idr_get_new* functions. 205 - * It preallocates enough memory to satisfy the worst possible allocation. The 206 - * caller should pass in GFP_KERNEL if possible. This of course requires that 207 - * no spinning locks be held. 208 - * 209 - * If the system is REALLY out of memory this function returns %0, 210 - * otherwise %1. 211 - */ 212 - int idr_pre_get(struct idr *idp, gfp_t gfp_mask) 187 + int __idr_pre_get(struct idr *idp, gfp_t gfp_mask) 213 188 { 214 189 while (idp->id_free_cnt < MAX_IDR_FREE) { 215 190 struct idr_layer *new; ··· 207 208 } 208 209 return 1; 209 210 } 210 - EXPORT_SYMBOL(idr_pre_get); 211 + EXPORT_SYMBOL(__idr_pre_get); 211 212 212 213 /** 213 214 * sub_alloc - try to allocate an id without growing the tree depth 214 215 * @idp: idr handle 215 216 * @starting_id: id to start search at 216 - * @id: pointer to the allocated handle 217 217 * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer 218 218 * @gfp_mask: allocation mask for idr_layer_alloc() 219 219 * @layer_idr: optional idr passed to idr_layer_alloc() ··· 374 376 idr_mark_full(pa, id); 375 377 } 376 378 377 - /** 378 - * idr_get_new_above - allocate new idr entry above or equal to a start id 379 - * @idp: idr handle 380 - * @ptr: pointer you want associated with the id 381 - * @starting_id: id to start search at 382 - * @id: pointer to the allocated handle 383 - * 384 - * This is the allocate id function. It should be called with any 385 - * required locks. 386 - * 387 - * If allocation from IDR's private freelist fails, idr_get_new_above() will 388 - * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill 389 - * IDR's preallocation and then retry the idr_get_new_above() call. 390 - * 391 - * If the idr is full idr_get_new_above() will return %-ENOSPC. 392 - * 393 - * @id returns a value in the range @starting_id ... %0x7fffffff 394 - */ 395 - int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) 379 + int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) 396 380 { 397 381 struct idr_layer *pa[MAX_IDR_LEVEL + 1]; 398 382 int rv; ··· 387 407 *id = rv; 388 408 return 0; 389 409 } 390 - EXPORT_SYMBOL(idr_get_new_above); 410 + EXPORT_SYMBOL(__idr_get_new_above); 391 411 392 412 /** 393 413 * idr_preload - preload for idr_alloc() ··· 888 908 int ida_pre_get(struct ida *ida, gfp_t gfp_mask) 889 909 { 890 910 /* allocate idr_layers */ 891 - if (!idr_pre_get(&ida->idr, gfp_mask)) 911 + if (!__idr_pre_get(&ida->idr, gfp_mask)) 892 912 return 0; 893 913 894 914 /* allocate free_bitmap */
+1 -1
lib/xz/Kconfig
··· 15 15 16 16 config XZ_DEC_POWERPC 17 17 bool "PowerPC BCJ filter decoder" 18 - default y if POWERPC 18 + default y if PPC 19 19 select XZ_DEC_BCJ 20 20 21 21 config XZ_DEC_IA64
+6 -2
mm/Kconfig
··· 286 286 default "1" 287 287 288 288 config VIRT_TO_BUS 289 - def_bool y 290 - depends on HAVE_VIRT_TO_BUS 289 + bool 290 + help 291 + An architecture should select this if it implements the 292 + deprecated interface virt_to_bus(). All new architectures 293 + should probably not select this. 294 + 291 295 292 296 config MMU_NOTIFIER 293 297 bool
+3 -2
mm/fremap.c
··· 129 129 struct vm_area_struct *vma; 130 130 int err = -EINVAL; 131 131 int has_write_lock = 0; 132 - vm_flags_t vm_flags; 132 + vm_flags_t vm_flags = 0; 133 133 134 134 if (prot) 135 135 return err; ··· 254 254 */ 255 255 256 256 out: 257 - vm_flags = vma->vm_flags; 257 + if (vma) 258 + vm_flags = vma->vm_flags; 258 259 if (likely(!has_write_lock)) 259 260 up_read(&mm->mmap_sem); 260 261 else
+1 -1
mm/memory_hotplug.c
··· 1801 1801 int retry = 1; 1802 1802 1803 1803 start_pfn = PFN_DOWN(start); 1804 - end_pfn = start_pfn + PFN_DOWN(size); 1804 + end_pfn = PFN_UP(start + size - 1); 1805 1805 1806 1806 /* 1807 1807 * When CONFIG_MEMCG is on, one memory block may be used by other
-8
mm/process_vm_access.c
··· 429 429 if (flags != 0) 430 430 return -EINVAL; 431 431 432 - if (!access_ok(VERIFY_READ, lvec, liovcnt * sizeof(*lvec))) 433 - goto out; 434 - 435 - if (!access_ok(VERIFY_READ, rvec, riovcnt * sizeof(*rvec))) 436 - goto out; 437 - 438 432 if (vm_write) 439 433 rc = compat_rw_copy_check_uvector(WRITE, lvec, liovcnt, 440 434 UIO_FASTIOV, iovstack_l, ··· 453 459 kfree(iov_r); 454 460 if (iov_l != iovstack_l) 455 461 kfree(iov_l); 456 - 457 - out: 458 462 return rc; 459 463 } 460 464
+1
net/bridge/br_netlink.c
··· 327 327 br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE); 328 328 br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD); 329 329 br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE); 330 + br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK); 330 331 331 332 if (tb[IFLA_BRPORT_COST]) { 332 333 err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST]));
+29 -13
net/ceph/osdmap.c
··· 654 654 return 0; 655 655 } 656 656 657 + static int __decode_pgid(void **p, void *end, struct ceph_pg *pg) 658 + { 659 + u8 v; 660 + 661 + ceph_decode_need(p, end, 1+8+4+4, bad); 662 + v = ceph_decode_8(p); 663 + if (v != 1) 664 + goto bad; 665 + pg->pool = ceph_decode_64(p); 666 + pg->seed = ceph_decode_32(p); 667 + *p += 4; /* skip preferred */ 668 + return 0; 669 + 670 + bad: 671 + dout("error decoding pgid\n"); 672 + return -EINVAL; 673 + } 674 + 657 675 /* 658 676 * decode a full map. 659 677 */ ··· 763 745 for (i = 0; i < len; i++) { 764 746 int n, j; 765 747 struct ceph_pg pgid; 766 - struct ceph_pg_v1 pgid_v1; 767 748 struct ceph_pg_mapping *pg; 768 749 769 - ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad); 770 - ceph_decode_copy(p, &pgid_v1, sizeof(pgid_v1)); 771 - pgid.pool = le32_to_cpu(pgid_v1.pool); 772 - pgid.seed = le16_to_cpu(pgid_v1.ps); 750 + err = __decode_pgid(p, end, &pgid); 751 + if (err) 752 + goto bad; 753 + ceph_decode_need(p, end, sizeof(u32), bad); 773 754 n = ceph_decode_32(p); 774 755 err = -EINVAL; 775 756 if (n > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) ··· 835 818 u16 version; 836 819 837 820 ceph_decode_16_safe(p, end, version, bad); 838 - if (version > 6) { 839 - pr_warning("got unknown v %d > %d of inc osdmap\n", version, 6); 821 + if (version != 6) { 822 + pr_warning("got unknown v %d != 6 of inc osdmap\n", version); 840 823 goto bad; 841 824 } 842 825 ··· 980 963 while (len--) { 981 964 struct ceph_pg_mapping *pg; 982 965 int j; 983 - struct ceph_pg_v1 pgid_v1; 984 966 struct ceph_pg pgid; 985 967 u32 pglen; 986 - ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad); 987 - ceph_decode_copy(p, &pgid_v1, sizeof(pgid_v1)); 988 - pgid.pool = le32_to_cpu(pgid_v1.pool); 989 - pgid.seed = le16_to_cpu(pgid_v1.ps); 990 - pglen = ceph_decode_32(p); 991 968 969 + err = __decode_pgid(p, end, &pgid); 970 + if (err) 971 + goto bad; 972 + ceph_decode_need(p, end, sizeof(u32), bad); 973 + pglen = ceph_decode_32(p); 992 974 if (pglen) { 993 975 ceph_decode_need(p, end, pglen*sizeof(u32), bad); 994 976
+1 -1
net/core/dev.c
··· 2211 2211 __be16 skb_network_protocol(struct sk_buff *skb) 2212 2212 { 2213 2213 __be16 type = skb->protocol; 2214 + int vlan_depth = ETH_HLEN; 2214 2215 2215 2216 while (type == htons(ETH_P_8021Q)) { 2216 - int vlan_depth = ETH_HLEN; 2217 2217 struct vlan_hdr *vh; 2218 2218 2219 2219 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
+2
net/core/flow_dissector.c
··· 151 151 flow->ports = *ports; 152 152 } 153 153 154 + flow->thoff = (u16) nhoff; 155 + 154 156 return true; 155 157 } 156 158 EXPORT_SYMBOL(skb_flow_dissect);
+1 -1
net/core/rtnetlink.c
··· 2690 2690 struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len); 2691 2691 2692 2692 while (RTA_OK(attr, attrlen)) { 2693 - unsigned int flavor = attr->rta_type; 2693 + unsigned int flavor = attr->rta_type & NLA_TYPE_MASK; 2694 2694 if (flavor) { 2695 2695 if (flavor > rta_max[sz_idx]) 2696 2696 return -EINVAL;
+19 -1
net/ipv4/inet_fragment.c
··· 21 21 #include <linux/rtnetlink.h> 22 22 #include <linux/slab.h> 23 23 24 + #include <net/sock.h> 24 25 #include <net/inet_frag.h> 25 26 26 27 static void inet_frag_secret_rebuild(unsigned long dummy) ··· 278 277 __releases(&f->lock) 279 278 { 280 279 struct inet_frag_queue *q; 280 + int depth = 0; 281 281 282 282 hlist_for_each_entry(q, &f->hash[hash], list) { 283 283 if (q->net == nf && f->match(q, key)) { ··· 286 284 read_unlock(&f->lock); 287 285 return q; 288 286 } 287 + depth++; 289 288 } 290 289 read_unlock(&f->lock); 291 290 292 - return inet_frag_create(nf, f, key); 291 + if (depth <= INETFRAGS_MAXDEPTH) 292 + return inet_frag_create(nf, f, key); 293 + else 294 + return ERR_PTR(-ENOBUFS); 293 295 } 294 296 EXPORT_SYMBOL(inet_frag_find); 297 + 298 + void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, 299 + const char *prefix) 300 + { 301 + static const char msg[] = "inet_frag_find: Fragment hash bucket" 302 + " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH) 303 + ". Dropping fragment.\n"; 304 + 305 + if (PTR_ERR(q) == -ENOBUFS) 306 + LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg); 307 + } 308 + EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);
+4 -7
net/ipv4/ip_fragment.c
··· 292 292 hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); 293 293 294 294 q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash); 295 - if (q == NULL) 296 - goto out_nomem; 297 - 295 + if (IS_ERR_OR_NULL(q)) { 296 + inet_frag_maybe_warn_overflow(q, pr_fmt()); 297 + return NULL; 298 + } 298 299 return container_of(q, struct ipq, q); 299 - 300 - out_nomem: 301 - LIMIT_NETDEBUG(KERN_ERR pr_fmt("ip_frag_create: no memory left !\n")); 302 - return NULL; 303 300 } 304 301 305 302 /* Is the fragment too far ahead to be part of ipq? */
+1 -4
net/ipv4/ip_gre.c
··· 796 796 797 797 if (dev->header_ops && dev->type == ARPHRD_IPGRE) { 798 798 gre_hlen = 0; 799 - if (skb->protocol == htons(ETH_P_IP)) 800 - tiph = (const struct iphdr *)skb->data; 801 - else 802 - tiph = &tunnel->parms.iph; 799 + tiph = (const struct iphdr *)skb->data; 803 800 } else { 804 801 gre_hlen = tunnel->hlen; 805 802 tiph = &tunnel->parms.iph;
+2 -1
net/ipv4/ipconfig.c
··· 1522 1522 } 1523 1523 for (i++; i < CONF_NAMESERVERS_MAX; i++) 1524 1524 if (ic_nameservers[i] != NONE) 1525 - pr_cont(", nameserver%u=%pI4\n", i, &ic_nameservers[i]); 1525 + pr_cont(", nameserver%u=%pI4", i, &ic_nameservers[i]); 1526 + pr_cont("\n"); 1526 1527 #endif /* !SILENT */ 1527 1528 1528 1529 return 0;
-13
net/ipv4/netfilter/Kconfig
··· 36 36 37 37 If unsure, say Y. 38 38 39 - config IP_NF_QUEUE 40 - tristate "IP Userspace queueing via NETLINK (OBSOLETE)" 41 - depends on NETFILTER_ADVANCED 42 - help 43 - Netfilter has the ability to queue packets to user space: the 44 - netlink device can be used to access them using this driver. 45 - 46 - This option enables the old IPv4-only "ip_queue" implementation 47 - which has been obsoleted by the new "nfnetlink_queue" code (see 48 - CONFIG_NETFILTER_NETLINK_QUEUE). 49 - 50 - To compile it as a module, choose M here. If unsure, say N. 51 - 52 39 config IP_NF_IPTABLES 53 40 tristate "IP tables support (required for filtering/masq/NAT)" 54 41 default m if NETFILTER_ADVANCED=n
+1 -1
net/ipv4/tcp.c
··· 766 766 * Make sure that we have exactly size bytes 767 767 * available to the caller, no more, no less. 768 768 */ 769 - skb->avail_size = size; 769 + skb->reserved_tailroom = skb->end - skb->tail - size; 770 770 return skb; 771 771 } 772 772 __kfree_skb(skb);
+7 -7
net/ipv4/tcp_ipv4.c
··· 274 274 struct inet_sock *inet = inet_sk(sk); 275 275 u32 mtu = tcp_sk(sk)->mtu_info; 276 276 277 - /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs 278 - * send out by Linux are always <576bytes so they should go through 279 - * unfragmented). 280 - */ 281 - if (sk->sk_state == TCP_LISTEN) 282 - return; 283 - 284 277 dst = inet_csk_update_pmtu(sk, mtu); 285 278 if (!dst) 286 279 return; ··· 401 408 goto out; 402 409 403 410 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ 411 + /* We are not interested in TCP_LISTEN and open_requests 412 + * (SYN-ACKs send out by Linux are always <576bytes so 413 + * they should go through unfragmented). 414 + */ 415 + if (sk->sk_state == TCP_LISTEN) 416 + goto out; 417 + 404 418 tp->mtu_info = info; 405 419 if (!sock_owned_by_user(sk)) { 406 420 tcp_v4_mtu_reduced(sk);
-1
net/ipv4/tcp_output.c
··· 1136 1136 eat = min_t(int, len, skb_headlen(skb)); 1137 1137 if (eat) { 1138 1138 __skb_pull(skb, eat); 1139 - skb->avail_size -= eat; 1140 1139 len -= eat; 1141 1140 if (!len) 1142 1141 return;
+7
net/ipv4/udp.c
··· 1762 1762 1763 1763 void udp_destroy_sock(struct sock *sk) 1764 1764 { 1765 + struct udp_sock *up = udp_sk(sk); 1765 1766 bool slow = lock_sock_fast(sk); 1766 1767 udp_flush_pending_frames(sk); 1767 1768 unlock_sock_fast(sk, slow); 1769 + if (static_key_false(&udp_encap_needed) && up->encap_type) { 1770 + void (*encap_destroy)(struct sock *sk); 1771 + encap_destroy = ACCESS_ONCE(up->encap_destroy); 1772 + if (encap_destroy) 1773 + encap_destroy(sk); 1774 + } 1768 1775 } 1769 1776 1770 1777 /*
+2
net/ipv6/netfilter/ip6t_NPT.c
··· 114 114 static struct xt_target ip6t_npt_target_reg[] __read_mostly = { 115 115 { 116 116 .name = "SNPT", 117 + .table = "mangle", 117 118 .target = ip6t_snpt_tg, 118 119 .targetsize = sizeof(struct ip6t_npt_tginfo), 119 120 .checkentry = ip6t_npt_checkentry, ··· 125 124 }, 126 125 { 127 126 .name = "DNPT", 127 + .table = "mangle", 128 128 .target = ip6t_dnpt_tg, 129 129 .targetsize = sizeof(struct ip6t_npt_tginfo), 130 130 .checkentry = ip6t_npt_checkentry,
+6 -6
net/ipv6/netfilter/nf_conntrack_reasm.c
··· 14 14 * 2 of the License, or (at your option) any later version. 15 15 */ 16 16 17 + #define pr_fmt(fmt) "IPv6-nf: " fmt 18 + 17 19 #include <linux/errno.h> 18 20 #include <linux/types.h> 19 21 #include <linux/string.h> ··· 182 180 183 181 q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash); 184 182 local_bh_enable(); 185 - if (q == NULL) 186 - goto oom; 187 - 183 + if (IS_ERR_OR_NULL(q)) { 184 + inet_frag_maybe_warn_overflow(q, pr_fmt()); 185 + return NULL; 186 + } 188 187 return container_of(q, struct frag_queue, q); 189 - 190 - oom: 191 - return NULL; 192 188 } 193 189 194 190
+6 -2
net/ipv6/reassembly.c
··· 26 26 * YOSHIFUJI,H. @USAGI Always remove fragment header to 27 27 * calculate ICV correctly. 28 28 */ 29 + 30 + #define pr_fmt(fmt) "IPv6: " fmt 31 + 29 32 #include <linux/errno.h> 30 33 #include <linux/types.h> 31 34 #include <linux/string.h> ··· 188 185 hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd); 189 186 190 187 q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash); 191 - if (q == NULL) 188 + if (IS_ERR_OR_NULL(q)) { 189 + inet_frag_maybe_warn_overflow(q, pr_fmt()); 192 190 return NULL; 193 - 191 + } 194 192 return container_of(q, struct frag_queue, q); 195 193 } 196 194
+7
net/ipv6/tcp_ipv6.c
··· 389 389 } 390 390 391 391 if (type == ICMPV6_PKT_TOOBIG) { 392 + /* We are not interested in TCP_LISTEN and open_requests 393 + * (SYN-ACKs send out by Linux are always <576bytes so 394 + * they should go through unfragmented). 395 + */ 396 + if (sk->sk_state == TCP_LISTEN) 397 + goto out; 398 + 392 399 tp->mtu_info = ntohl(info); 393 400 if (!sock_owned_by_user(sk)) 394 401 tcp_v6_mtu_reduced(sk);
+8
net/ipv6/udp.c
··· 1286 1286 1287 1287 void udpv6_destroy_sock(struct sock *sk) 1288 1288 { 1289 + struct udp_sock *up = udp_sk(sk); 1289 1290 lock_sock(sk); 1290 1291 udp_v6_flush_pending_frames(sk); 1291 1292 release_sock(sk); 1293 + 1294 + if (static_key_false(&udpv6_encap_needed) && up->encap_type) { 1295 + void (*encap_destroy)(struct sock *sk); 1296 + encap_destroy = ACCESS_ONCE(up->encap_destroy); 1297 + if (encap_destroy) 1298 + encap_destroy(sk); 1299 + } 1292 1300 1293 1301 inet6_destroy_sock(sk); 1294 1302 }
+4 -2
net/irda/af_irda.c
··· 2583 2583 NULL, NULL, NULL); 2584 2584 2585 2585 /* Check if the we got some results */ 2586 - if (!self->cachedaddr) 2587 - return -EAGAIN; /* Didn't find any devices */ 2586 + if (!self->cachedaddr) { 2587 + err = -EAGAIN; /* Didn't find any devices */ 2588 + goto out; 2589 + } 2588 2590 daddr = self->cachedaddr; 2589 2591 /* Cleanup */ 2590 2592 self->cachedaddr = 0;
+103 -107
net/l2tp/l2tp_core.c
··· 114 114 115 115 static void l2tp_session_set_header_len(struct l2tp_session *session, int version); 116 116 static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); 117 - static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel); 118 117 119 118 static inline struct l2tp_net *l2tp_pernet(struct net *net) 120 119 { ··· 191 192 } else { 192 193 /* Socket is owned by kernelspace */ 193 194 sk = tunnel->sock; 195 + sock_hold(sk); 194 196 } 195 197 196 198 out: ··· 210 210 } 211 211 sock_put(sk); 212 212 } 213 + sock_put(sk); 213 214 } 214 215 EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put); 215 216 ··· 374 373 struct sk_buff *skbp; 375 374 struct sk_buff *tmp; 376 375 u32 ns = L2TP_SKB_CB(skb)->ns; 377 - struct l2tp_stats *sstats; 378 376 379 377 spin_lock_bh(&session->reorder_q.lock); 380 - sstats = &session->stats; 381 378 skb_queue_walk_safe(&session->reorder_q, skbp, tmp) { 382 379 if (L2TP_SKB_CB(skbp)->ns > ns) { 383 380 __skb_queue_before(&session->reorder_q, skbp, skb); ··· 383 384 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n", 384 385 session->name, ns, L2TP_SKB_CB(skbp)->ns, 385 386 skb_queue_len(&session->reorder_q)); 386 - u64_stats_update_begin(&sstats->syncp); 387 - sstats->rx_oos_packets++; 388 - u64_stats_update_end(&sstats->syncp); 387 + atomic_long_inc(&session->stats.rx_oos_packets); 389 388 goto out; 390 389 } 391 390 } ··· 400 403 { 401 404 struct l2tp_tunnel *tunnel = session->tunnel; 402 405 int length = L2TP_SKB_CB(skb)->length; 403 - struct l2tp_stats *tstats, *sstats; 404 406 405 407 /* We're about to requeue the skb, so return resources 406 408 * to its current owner (a socket receive buffer). 407 409 */ 408 410 skb_orphan(skb); 409 411 410 - tstats = &tunnel->stats; 411 - u64_stats_update_begin(&tstats->syncp); 412 - sstats = &session->stats; 413 - u64_stats_update_begin(&sstats->syncp); 414 - tstats->rx_packets++; 415 - tstats->rx_bytes += length; 416 - sstats->rx_packets++; 417 - sstats->rx_bytes += length; 418 - u64_stats_update_end(&tstats->syncp); 419 - u64_stats_update_end(&sstats->syncp); 412 + atomic_long_inc(&tunnel->stats.rx_packets); 413 + atomic_long_add(length, &tunnel->stats.rx_bytes); 414 + atomic_long_inc(&session->stats.rx_packets); 415 + atomic_long_add(length, &session->stats.rx_bytes); 420 416 421 417 if (L2TP_SKB_CB(skb)->has_seq) { 422 418 /* Bump our Nr */ ··· 440 450 { 441 451 struct sk_buff *skb; 442 452 struct sk_buff *tmp; 443 - struct l2tp_stats *sstats; 444 453 445 454 /* If the pkt at the head of the queue has the nr that we 446 455 * expect to send up next, dequeue it and any other ··· 447 458 */ 448 459 start: 449 460 spin_lock_bh(&session->reorder_q.lock); 450 - sstats = &session->stats; 451 461 skb_queue_walk_safe(&session->reorder_q, skb, tmp) { 452 462 if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) { 453 - u64_stats_update_begin(&sstats->syncp); 454 - sstats->rx_seq_discards++; 455 - sstats->rx_errors++; 456 - u64_stats_update_end(&sstats->syncp); 463 + atomic_long_inc(&session->stats.rx_seq_discards); 464 + atomic_long_inc(&session->stats.rx_errors); 457 465 l2tp_dbg(session, L2TP_MSG_SEQ, 458 466 "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n", 459 467 session->name, L2TP_SKB_CB(skb)->ns, ··· 609 623 struct l2tp_tunnel *tunnel = session->tunnel; 610 624 int offset; 611 625 u32 ns, nr; 612 - struct l2tp_stats *sstats = &session->stats; 613 626 614 627 /* The ref count is increased since we now hold a pointer to 615 628 * the session. Take care to decrement the refcnt when exiting ··· 625 640 "%s: cookie mismatch (%u/%u). Discarding.\n", 626 641 tunnel->name, tunnel->tunnel_id, 627 642 session->session_id); 628 - u64_stats_update_begin(&sstats->syncp); 629 - sstats->rx_cookie_discards++; 630 - u64_stats_update_end(&sstats->syncp); 643 + atomic_long_inc(&session->stats.rx_cookie_discards); 631 644 goto discard; 632 645 } 633 646 ptr += session->peer_cookie_len; ··· 694 711 l2tp_warn(session, L2TP_MSG_SEQ, 695 712 "%s: recv data has no seq numbers when required. Discarding.\n", 696 713 session->name); 697 - u64_stats_update_begin(&sstats->syncp); 698 - sstats->rx_seq_discards++; 699 - u64_stats_update_end(&sstats->syncp); 714 + atomic_long_inc(&session->stats.rx_seq_discards); 700 715 goto discard; 701 716 } 702 717 ··· 713 732 l2tp_warn(session, L2TP_MSG_SEQ, 714 733 "%s: recv data has no seq numbers when required. Discarding.\n", 715 734 session->name); 716 - u64_stats_update_begin(&sstats->syncp); 717 - sstats->rx_seq_discards++; 718 - u64_stats_update_end(&sstats->syncp); 735 + atomic_long_inc(&session->stats.rx_seq_discards); 719 736 goto discard; 720 737 } 721 738 } ··· 767 788 * packets 768 789 */ 769 790 if (L2TP_SKB_CB(skb)->ns != session->nr) { 770 - u64_stats_update_begin(&sstats->syncp); 771 - sstats->rx_seq_discards++; 772 - u64_stats_update_end(&sstats->syncp); 791 + atomic_long_inc(&session->stats.rx_seq_discards); 773 792 l2tp_dbg(session, L2TP_MSG_SEQ, 774 793 "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n", 775 794 session->name, L2TP_SKB_CB(skb)->ns, ··· 793 816 return; 794 817 795 818 discard: 796 - u64_stats_update_begin(&sstats->syncp); 797 - sstats->rx_errors++; 798 - u64_stats_update_end(&sstats->syncp); 819 + atomic_long_inc(&session->stats.rx_errors); 799 820 kfree_skb(skb); 800 821 801 822 if (session->deref) ··· 802 827 l2tp_session_dec_refcount(session); 803 828 } 804 829 EXPORT_SYMBOL(l2tp_recv_common); 830 + 831 + /* Drop skbs from the session's reorder_q 832 + */ 833 + int l2tp_session_queue_purge(struct l2tp_session *session) 834 + { 835 + struct sk_buff *skb = NULL; 836 + BUG_ON(!session); 837 + BUG_ON(session->magic != L2TP_SESSION_MAGIC); 838 + while ((skb = skb_dequeue(&session->reorder_q))) { 839 + atomic_long_inc(&session->stats.rx_errors); 840 + kfree_skb(skb); 841 + if (session->deref) 842 + (*session->deref)(session); 843 + } 844 + return 0; 845 + } 846 + EXPORT_SYMBOL_GPL(l2tp_session_queue_purge); 805 847 806 848 /* Internal UDP receive frame. Do the real work of receiving an L2TP data frame 807 849 * here. The skb is not on a list when we get here. ··· 835 843 u32 tunnel_id, session_id; 836 844 u16 version; 837 845 int length; 838 - struct l2tp_stats *tstats; 839 846 840 847 if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb)) 841 848 goto discard_bad_csum; ··· 923 932 discard_bad_csum: 924 933 LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name); 925 934 UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0); 926 - tstats = &tunnel->stats; 927 - u64_stats_update_begin(&tstats->syncp); 928 - tstats->rx_errors++; 929 - u64_stats_update_end(&tstats->syncp); 935 + atomic_long_inc(&tunnel->stats.rx_errors); 930 936 kfree_skb(skb); 931 937 932 938 return 0; ··· 1050 1062 struct l2tp_tunnel *tunnel = session->tunnel; 1051 1063 unsigned int len = skb->len; 1052 1064 int error; 1053 - struct l2tp_stats *tstats, *sstats; 1054 1065 1055 1066 /* Debug */ 1056 1067 if (session->send_seq) ··· 1078 1091 error = ip_queue_xmit(skb, fl); 1079 1092 1080 1093 /* Update stats */ 1081 - tstats = &tunnel->stats; 1082 - u64_stats_update_begin(&tstats->syncp); 1083 - sstats = &session->stats; 1084 - u64_stats_update_begin(&sstats->syncp); 1085 1094 if (error >= 0) { 1086 - tstats->tx_packets++; 1087 - tstats->tx_bytes += len; 1088 - sstats->tx_packets++; 1089 - sstats->tx_bytes += len; 1095 + atomic_long_inc(&tunnel->stats.tx_packets); 1096 + atomic_long_add(len, &tunnel->stats.tx_bytes); 1097 + atomic_long_inc(&session->stats.tx_packets); 1098 + atomic_long_add(len, &session->stats.tx_bytes); 1090 1099 } else { 1091 - tstats->tx_errors++; 1092 - sstats->tx_errors++; 1100 + atomic_long_inc(&tunnel->stats.tx_errors); 1101 + atomic_long_inc(&session->stats.tx_errors); 1093 1102 } 1094 - u64_stats_update_end(&tstats->syncp); 1095 - u64_stats_update_end(&sstats->syncp); 1096 1103 1097 1104 return 0; 1098 1105 } ··· 1263 1282 /* No longer an encapsulation socket. See net/ipv4/udp.c */ 1264 1283 (udp_sk(sk))->encap_type = 0; 1265 1284 (udp_sk(sk))->encap_rcv = NULL; 1285 + (udp_sk(sk))->encap_destroy = NULL; 1266 1286 break; 1267 1287 case L2TP_ENCAPTYPE_IP: 1268 1288 break; ··· 1293 1311 1294 1312 /* When the tunnel is closed, all the attached sessions need to go too. 1295 1313 */ 1296 - static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) 1314 + void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) 1297 1315 { 1298 1316 int hash; 1299 1317 struct hlist_node *walk; ··· 1316 1334 1317 1335 hlist_del_init(&session->hlist); 1318 1336 1319 - /* Since we should hold the sock lock while 1320 - * doing any unbinding, we need to release the 1321 - * lock we're holding before taking that lock. 1322 - * Hold a reference to the sock so it doesn't 1323 - * disappear as we're jumping between locks. 1324 - */ 1325 1337 if (session->ref != NULL) 1326 1338 (*session->ref)(session); 1327 1339 1328 1340 write_unlock_bh(&tunnel->hlist_lock); 1329 1341 1330 - if (tunnel->version != L2TP_HDR_VER_2) { 1331 - struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); 1332 - 1333 - spin_lock_bh(&pn->l2tp_session_hlist_lock); 1334 - hlist_del_init_rcu(&session->global_hlist); 1335 - spin_unlock_bh(&pn->l2tp_session_hlist_lock); 1336 - synchronize_rcu(); 1337 - } 1342 + __l2tp_session_unhash(session); 1343 + l2tp_session_queue_purge(session); 1338 1344 1339 1345 if (session->session_close != NULL) 1340 1346 (*session->session_close)(session); 1341 1347 1342 1348 if (session->deref != NULL) 1343 1349 (*session->deref)(session); 1350 + 1351 + l2tp_session_dec_refcount(session); 1344 1352 1345 1353 write_lock_bh(&tunnel->hlist_lock); 1346 1354 ··· 1343 1371 } 1344 1372 } 1345 1373 write_unlock_bh(&tunnel->hlist_lock); 1374 + } 1375 + EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall); 1376 + 1377 + /* Tunnel socket destroy hook for UDP encapsulation */ 1378 + static void l2tp_udp_encap_destroy(struct sock *sk) 1379 + { 1380 + struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); 1381 + if (tunnel) { 1382 + l2tp_tunnel_closeall(tunnel); 1383 + sock_put(sk); 1384 + } 1346 1385 } 1347 1386 1348 1387 /* Really kill the tunnel. ··· 1380 1397 return; 1381 1398 1382 1399 sock = sk->sk_socket; 1383 - BUG_ON(!sock); 1384 1400 1385 - /* If the tunnel socket was created directly by the kernel, use the 1386 - * sk_* API to release the socket now. Otherwise go through the 1387 - * inet_* layer to shut the socket down, and let userspace close it. 1401 + /* If the tunnel socket was created by userspace, then go through the 1402 + * inet layer to shut the socket down, and let userspace close it. 1403 + * Otherwise, if we created the socket directly within the kernel, use 1404 + * the sk API to release it here. 1388 1405 * In either case the tunnel resources are freed in the socket 1389 1406 * destructor when the tunnel socket goes away. 1390 1407 */ 1391 - if (sock->file == NULL) { 1392 - kernel_sock_shutdown(sock, SHUT_RDWR); 1393 - sk_release_kernel(sk); 1408 + if (tunnel->fd >= 0) { 1409 + if (sock) 1410 + inet_shutdown(sock, 2); 1394 1411 } else { 1395 - inet_shutdown(sock, 2); 1412 + if (sock) 1413 + kernel_sock_shutdown(sock, SHUT_RDWR); 1414 + sk_release_kernel(sk); 1396 1415 } 1397 1416 1398 1417 l2tp_tunnel_sock_put(sk); ··· 1653 1668 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ 1654 1669 udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP; 1655 1670 udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv; 1671 + udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy; 1656 1672 #if IS_ENABLED(CONFIG_IPV6) 1657 1673 if (sk->sk_family == PF_INET6) 1658 1674 udpv6_encap_enable(); ··· 1709 1723 */ 1710 1724 int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) 1711 1725 { 1726 + l2tp_tunnel_closeall(tunnel); 1712 1727 return (false == queue_work(l2tp_wq, &tunnel->del_work)); 1713 1728 } 1714 1729 EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); ··· 1718 1731 */ 1719 1732 void l2tp_session_free(struct l2tp_session *session) 1720 1733 { 1721 - struct l2tp_tunnel *tunnel; 1734 + struct l2tp_tunnel *tunnel = session->tunnel; 1722 1735 1723 1736 BUG_ON(atomic_read(&session->ref_count) != 0); 1724 1737 1725 - tunnel = session->tunnel; 1726 - if (tunnel != NULL) { 1738 + if (tunnel) { 1727 1739 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); 1728 - 1729 - /* Delete the session from the hash */ 1730 - write_lock_bh(&tunnel->hlist_lock); 1731 - hlist_del_init(&session->hlist); 1732 - write_unlock_bh(&tunnel->hlist_lock); 1733 - 1734 - /* Unlink from the global hash if not L2TPv2 */ 1735 - if (tunnel->version != L2TP_HDR_VER_2) { 1736 - struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); 1737 - 1738 - spin_lock_bh(&pn->l2tp_session_hlist_lock); 1739 - hlist_del_init_rcu(&session->global_hlist); 1740 - spin_unlock_bh(&pn->l2tp_session_hlist_lock); 1741 - synchronize_rcu(); 1742 - } 1743 - 1744 1740 if (session->session_id != 0) 1745 1741 atomic_dec(&l2tp_session_count); 1746 - 1747 1742 sock_put(tunnel->sock); 1748 - 1749 - /* This will delete the tunnel context if this 1750 - * is the last session on the tunnel. 1751 - */ 1752 1743 session->tunnel = NULL; 1753 1744 l2tp_tunnel_dec_refcount(tunnel); 1754 1745 } ··· 1737 1772 } 1738 1773 EXPORT_SYMBOL_GPL(l2tp_session_free); 1739 1774 1775 + /* Remove an l2tp session from l2tp_core's hash lists. 1776 + * Provides a tidyup interface for pseudowire code which can't just route all 1777 + * shutdown via. l2tp_session_delete and a pseudowire-specific session_close 1778 + * callback. 1779 + */ 1780 + void __l2tp_session_unhash(struct l2tp_session *session) 1781 + { 1782 + struct l2tp_tunnel *tunnel = session->tunnel; 1783 + 1784 + /* Remove the session from core hashes */ 1785 + if (tunnel) { 1786 + /* Remove from the per-tunnel hash */ 1787 + write_lock_bh(&tunnel->hlist_lock); 1788 + hlist_del_init(&session->hlist); 1789 + write_unlock_bh(&tunnel->hlist_lock); 1790 + 1791 + /* For L2TPv3 we have a per-net hash: remove from there, too */ 1792 + if (tunnel->version != L2TP_HDR_VER_2) { 1793 + struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); 1794 + spin_lock_bh(&pn->l2tp_session_hlist_lock); 1795 + hlist_del_init_rcu(&session->global_hlist); 1796 + spin_unlock_bh(&pn->l2tp_session_hlist_lock); 1797 + synchronize_rcu(); 1798 + } 1799 + } 1800 + } 1801 + EXPORT_SYMBOL_GPL(__l2tp_session_unhash); 1802 + 1740 1803 /* This function is used by the netlink SESSION_DELETE command and by 1741 1804 pseudowire modules. 1742 1805 */ 1743 1806 int l2tp_session_delete(struct l2tp_session *session) 1744 1807 { 1808 + if (session->ref) 1809 + (*session->ref)(session); 1810 + __l2tp_session_unhash(session); 1811 + l2tp_session_queue_purge(session); 1745 1812 if (session->session_close != NULL) 1746 1813 (*session->session_close)(session); 1747 - 1814 + if (session->deref) 1815 + (*session->ref)(session); 1748 1816 l2tp_session_dec_refcount(session); 1749 - 1750 1817 return 0; 1751 1818 } 1752 1819 EXPORT_SYMBOL_GPL(l2tp_session_delete); 1753 - 1754 1820 1755 1821 /* We come here whenever a session's send_seq, cookie_len or 1756 1822 * l2specific_len parameters are set.
+12 -10
net/l2tp/l2tp_core.h
··· 36 36 struct sk_buff; 37 37 38 38 struct l2tp_stats { 39 - u64 tx_packets; 40 - u64 tx_bytes; 41 - u64 tx_errors; 42 - u64 rx_packets; 43 - u64 rx_bytes; 44 - u64 rx_seq_discards; 45 - u64 rx_oos_packets; 46 - u64 rx_errors; 47 - u64 rx_cookie_discards; 48 - struct u64_stats_sync syncp; 39 + atomic_long_t tx_packets; 40 + atomic_long_t tx_bytes; 41 + atomic_long_t tx_errors; 42 + atomic_long_t rx_packets; 43 + atomic_long_t rx_bytes; 44 + atomic_long_t rx_seq_discards; 45 + atomic_long_t rx_oos_packets; 46 + atomic_long_t rx_errors; 47 + atomic_long_t rx_cookie_discards; 49 48 }; 50 49 51 50 struct l2tp_tunnel; ··· 239 240 extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth); 240 241 241 242 extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp); 243 + extern void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel); 242 244 extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel); 243 245 extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg); 246 + extern void __l2tp_session_unhash(struct l2tp_session *session); 244 247 extern int l2tp_session_delete(struct l2tp_session *session); 245 248 extern void l2tp_session_free(struct l2tp_session *session); 246 249 extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb)); 250 + extern int l2tp_session_queue_purge(struct l2tp_session *session); 247 251 extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb); 248 252 249 253 extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len);
+14 -14
net/l2tp/l2tp_debugfs.c
··· 146 146 tunnel->sock ? atomic_read(&tunnel->sock->sk_refcnt) : 0, 147 147 atomic_read(&tunnel->ref_count)); 148 148 149 - seq_printf(m, " %08x rx %llu/%llu/%llu rx %llu/%llu/%llu\n", 149 + seq_printf(m, " %08x rx %ld/%ld/%ld rx %ld/%ld/%ld\n", 150 150 tunnel->debug, 151 - (unsigned long long)tunnel->stats.tx_packets, 152 - (unsigned long long)tunnel->stats.tx_bytes, 153 - (unsigned long long)tunnel->stats.tx_errors, 154 - (unsigned long long)tunnel->stats.rx_packets, 155 - (unsigned long long)tunnel->stats.rx_bytes, 156 - (unsigned long long)tunnel->stats.rx_errors); 151 + atomic_long_read(&tunnel->stats.tx_packets), 152 + atomic_long_read(&tunnel->stats.tx_bytes), 153 + atomic_long_read(&tunnel->stats.tx_errors), 154 + atomic_long_read(&tunnel->stats.rx_packets), 155 + atomic_long_read(&tunnel->stats.rx_bytes), 156 + atomic_long_read(&tunnel->stats.rx_errors)); 157 157 158 158 if (tunnel->show != NULL) 159 159 tunnel->show(m, tunnel); ··· 203 203 seq_printf(m, "\n"); 204 204 } 205 205 206 - seq_printf(m, " %hu/%hu tx %llu/%llu/%llu rx %llu/%llu/%llu\n", 206 + seq_printf(m, " %hu/%hu tx %ld/%ld/%ld rx %ld/%ld/%ld\n", 207 207 session->nr, session->ns, 208 - (unsigned long long)session->stats.tx_packets, 209 - (unsigned long long)session->stats.tx_bytes, 210 - (unsigned long long)session->stats.tx_errors, 211 - (unsigned long long)session->stats.rx_packets, 212 - (unsigned long long)session->stats.rx_bytes, 213 - (unsigned long long)session->stats.rx_errors); 208 + atomic_long_read(&session->stats.tx_packets), 209 + atomic_long_read(&session->stats.tx_bytes), 210 + atomic_long_read(&session->stats.tx_errors), 211 + atomic_long_read(&session->stats.rx_packets), 212 + atomic_long_read(&session->stats.rx_bytes), 213 + atomic_long_read(&session->stats.rx_errors)); 214 214 215 215 if (session->show != NULL) 216 216 session->show(m, session);
+6
net/l2tp/l2tp_ip.c
··· 228 228 static void l2tp_ip_destroy_sock(struct sock *sk) 229 229 { 230 230 struct sk_buff *skb; 231 + struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); 231 232 232 233 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) 233 234 kfree_skb(skb); 235 + 236 + if (tunnel) { 237 + l2tp_tunnel_closeall(tunnel); 238 + sock_put(sk); 239 + } 234 240 235 241 sk_refcnt_debug_dec(sk); 236 242 }
+7
net/l2tp/l2tp_ip6.c
··· 241 241 242 242 static void l2tp_ip6_destroy_sock(struct sock *sk) 243 243 { 244 + struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); 245 + 244 246 lock_sock(sk); 245 247 ip6_flush_pending_frames(sk); 246 248 release_sock(sk); 249 + 250 + if (tunnel) { 251 + l2tp_tunnel_closeall(tunnel); 252 + sock_put(sk); 253 + } 247 254 248 255 inet6_destroy_sock(sk); 249 256 }
+28 -44
net/l2tp/l2tp_netlink.c
··· 246 246 #if IS_ENABLED(CONFIG_IPV6) 247 247 struct ipv6_pinfo *np = NULL; 248 248 #endif 249 - struct l2tp_stats stats; 250 - unsigned int start; 251 249 252 250 hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, 253 251 L2TP_CMD_TUNNEL_GET); ··· 263 265 if (nest == NULL) 264 266 goto nla_put_failure; 265 267 266 - do { 267 - start = u64_stats_fetch_begin(&tunnel->stats.syncp); 268 - stats.tx_packets = tunnel->stats.tx_packets; 269 - stats.tx_bytes = tunnel->stats.tx_bytes; 270 - stats.tx_errors = tunnel->stats.tx_errors; 271 - stats.rx_packets = tunnel->stats.rx_packets; 272 - stats.rx_bytes = tunnel->stats.rx_bytes; 273 - stats.rx_errors = tunnel->stats.rx_errors; 274 - stats.rx_seq_discards = tunnel->stats.rx_seq_discards; 275 - stats.rx_oos_packets = tunnel->stats.rx_oos_packets; 276 - } while (u64_stats_fetch_retry(&tunnel->stats.syncp, start)); 277 - 278 - if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) || 279 - nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) || 280 - nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) || 281 - nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) || 282 - nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) || 268 + if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, 269 + atomic_long_read(&tunnel->stats.tx_packets)) || 270 + nla_put_u64(skb, L2TP_ATTR_TX_BYTES, 271 + atomic_long_read(&tunnel->stats.tx_bytes)) || 272 + nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, 273 + atomic_long_read(&tunnel->stats.tx_errors)) || 274 + nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, 275 + atomic_long_read(&tunnel->stats.rx_packets)) || 276 + nla_put_u64(skb, L2TP_ATTR_RX_BYTES, 277 + atomic_long_read(&tunnel->stats.rx_bytes)) || 283 278 nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, 284 - stats.rx_seq_discards) || 279 + atomic_long_read(&tunnel->stats.rx_seq_discards)) || 285 280 nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, 286 - stats.rx_oos_packets) || 287 - nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors)) 281 + atomic_long_read(&tunnel->stats.rx_oos_packets)) || 282 + nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, 283 + atomic_long_read(&tunnel->stats.rx_errors))) 288 284 goto nla_put_failure; 289 285 nla_nest_end(skb, nest); 290 286 ··· 604 612 struct nlattr *nest; 605 613 struct l2tp_tunnel *tunnel = session->tunnel; 606 614 struct sock *sk = NULL; 607 - struct l2tp_stats stats; 608 - unsigned int start; 609 615 610 616 sk = tunnel->sock; 611 617 ··· 646 656 if (nest == NULL) 647 657 goto nla_put_failure; 648 658 649 - do { 650 - start = u64_stats_fetch_begin(&session->stats.syncp); 651 - stats.tx_packets = session->stats.tx_packets; 652 - stats.tx_bytes = session->stats.tx_bytes; 653 - stats.tx_errors = session->stats.tx_errors; 654 - stats.rx_packets = session->stats.rx_packets; 655 - stats.rx_bytes = session->stats.rx_bytes; 656 - stats.rx_errors = session->stats.rx_errors; 657 - stats.rx_seq_discards = session->stats.rx_seq_discards; 658 - stats.rx_oos_packets = session->stats.rx_oos_packets; 659 - } while (u64_stats_fetch_retry(&session->stats.syncp, start)); 660 - 661 - if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) || 662 - nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) || 663 - nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) || 664 - nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) || 665 - nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) || 659 + if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, 660 + atomic_long_read(&session->stats.tx_packets)) || 661 + nla_put_u64(skb, L2TP_ATTR_TX_BYTES, 662 + atomic_long_read(&session->stats.tx_bytes)) || 663 + nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, 664 + atomic_long_read(&session->stats.tx_errors)) || 665 + nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, 666 + atomic_long_read(&session->stats.rx_packets)) || 667 + nla_put_u64(skb, L2TP_ATTR_RX_BYTES, 668 + atomic_long_read(&session->stats.rx_bytes)) || 666 669 nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, 667 - stats.rx_seq_discards) || 670 + atomic_long_read(&session->stats.rx_seq_discards)) || 668 671 nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, 669 - stats.rx_oos_packets) || 670 - nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors)) 672 + atomic_long_read(&session->stats.rx_oos_packets)) || 673 + nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, 674 + atomic_long_read(&session->stats.rx_errors))) 671 675 goto nla_put_failure; 672 676 nla_nest_end(skb, nest); 673 677
+36 -75
net/l2tp/l2tp_ppp.c
··· 97 97 #include <net/ip.h> 98 98 #include <net/udp.h> 99 99 #include <net/xfrm.h> 100 + #include <net/inet_common.h> 100 101 101 102 #include <asm/byteorder.h> 102 103 #include <linux/atomic.h> ··· 260 259 session->name); 261 260 262 261 /* Not bound. Nothing we can do, so discard. */ 263 - session->stats.rx_errors++; 262 + atomic_long_inc(&session->stats.rx_errors); 264 263 kfree_skb(skb); 265 264 } 266 265 ··· 448 447 { 449 448 struct pppol2tp_session *ps = l2tp_session_priv(session); 450 449 struct sock *sk = ps->sock; 451 - struct sk_buff *skb; 450 + struct socket *sock = sk->sk_socket; 452 451 453 452 BUG_ON(session->magic != L2TP_SESSION_MAGIC); 454 453 455 - if (session->session_id == 0) 456 - goto out; 457 454 458 - if (sk != NULL) { 459 - lock_sock(sk); 460 - 461 - if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { 462 - pppox_unbind_sock(sk); 463 - sk->sk_state = PPPOX_DEAD; 464 - sk->sk_state_change(sk); 465 - } 466 - 467 - /* Purge any queued data */ 468 - skb_queue_purge(&sk->sk_receive_queue); 469 - skb_queue_purge(&sk->sk_write_queue); 470 - while ((skb = skb_dequeue(&session->reorder_q))) { 471 - kfree_skb(skb); 472 - sock_put(sk); 473 - } 474 - 475 - release_sock(sk); 455 + if (sock) { 456 + inet_shutdown(sock, 2); 457 + /* Don't let the session go away before our socket does */ 458 + l2tp_session_inc_refcount(session); 476 459 } 477 - 478 - out: 479 460 return; 480 461 } 481 462 ··· 466 483 */ 467 484 static void pppol2tp_session_destruct(struct sock *sk) 468 485 { 469 - struct l2tp_session *session; 470 - 471 - if (sk->sk_user_data != NULL) { 472 - session = sk->sk_user_data; 473 - if (session == NULL) 474 - goto out; 475 - 486 + struct l2tp_session *session = sk->sk_user_data; 487 + if (session) { 476 488 sk->sk_user_data = NULL; 477 489 BUG_ON(session->magic != L2TP_SESSION_MAGIC); 478 490 l2tp_session_dec_refcount(session); 479 491 } 480 - 481 - out: 482 492 return; 483 493 } 484 494 ··· 501 525 session = pppol2tp_sock_to_session(sk); 502 526 503 527 /* Purge any queued data */ 504 - skb_queue_purge(&sk->sk_receive_queue); 505 - skb_queue_purge(&sk->sk_write_queue); 506 528 if (session != NULL) { 507 - struct sk_buff *skb; 508 - while ((skb = skb_dequeue(&session->reorder_q))) { 509 - kfree_skb(skb); 510 - sock_put(sk); 511 - } 529 + __l2tp_session_unhash(session); 530 + l2tp_session_queue_purge(session); 512 531 sock_put(sk); 513 532 } 533 + skb_queue_purge(&sk->sk_receive_queue); 534 + skb_queue_purge(&sk->sk_write_queue); 514 535 515 536 release_sock(sk); 516 537 ··· 853 880 return error; 854 881 } 855 882 856 - /* Called when deleting sessions via the netlink interface. 857 - */ 858 - static int pppol2tp_session_delete(struct l2tp_session *session) 859 - { 860 - struct pppol2tp_session *ps = l2tp_session_priv(session); 861 - 862 - if (ps->sock == NULL) 863 - l2tp_session_dec_refcount(session); 864 - 865 - return 0; 866 - } 867 - 868 883 #endif /* CONFIG_L2TP_V3 */ 869 884 870 885 /* getname() support. ··· 986 1025 static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest, 987 1026 struct l2tp_stats *stats) 988 1027 { 989 - dest->tx_packets = stats->tx_packets; 990 - dest->tx_bytes = stats->tx_bytes; 991 - dest->tx_errors = stats->tx_errors; 992 - dest->rx_packets = stats->rx_packets; 993 - dest->rx_bytes = stats->rx_bytes; 994 - dest->rx_seq_discards = stats->rx_seq_discards; 995 - dest->rx_oos_packets = stats->rx_oos_packets; 996 - dest->rx_errors = stats->rx_errors; 1028 + dest->tx_packets = atomic_long_read(&stats->tx_packets); 1029 + dest->tx_bytes = atomic_long_read(&stats->tx_bytes); 1030 + dest->tx_errors = atomic_long_read(&stats->tx_errors); 1031 + dest->rx_packets = atomic_long_read(&stats->rx_packets); 1032 + dest->rx_bytes = atomic_long_read(&stats->rx_bytes); 1033 + dest->rx_seq_discards = atomic_long_read(&stats->rx_seq_discards); 1034 + dest->rx_oos_packets = atomic_long_read(&stats->rx_oos_packets); 1035 + dest->rx_errors = atomic_long_read(&stats->rx_errors); 997 1036 } 998 1037 999 1038 /* Session ioctl helper. ··· 1627 1666 tunnel->name, 1628 1667 (tunnel == tunnel->sock->sk_user_data) ? 'Y' : 'N', 1629 1668 atomic_read(&tunnel->ref_count) - 1); 1630 - seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n", 1669 + seq_printf(m, " %08x %ld/%ld/%ld %ld/%ld/%ld\n", 1631 1670 tunnel->debug, 1632 - (unsigned long long)tunnel->stats.tx_packets, 1633 - (unsigned long long)tunnel->stats.tx_bytes, 1634 - (unsigned long long)tunnel->stats.tx_errors, 1635 - (unsigned long long)tunnel->stats.rx_packets, 1636 - (unsigned long long)tunnel->stats.rx_bytes, 1637 - (unsigned long long)tunnel->stats.rx_errors); 1671 + atomic_long_read(&tunnel->stats.tx_packets), 1672 + atomic_long_read(&tunnel->stats.tx_bytes), 1673 + atomic_long_read(&tunnel->stats.tx_errors), 1674 + atomic_long_read(&tunnel->stats.rx_packets), 1675 + atomic_long_read(&tunnel->stats.rx_bytes), 1676 + atomic_long_read(&tunnel->stats.rx_errors)); 1638 1677 } 1639 1678 1640 1679 static void pppol2tp_seq_session_show(struct seq_file *m, void *v) ··· 1669 1708 session->lns_mode ? "LNS" : "LAC", 1670 1709 session->debug, 1671 1710 jiffies_to_msecs(session->reorder_timeout)); 1672 - seq_printf(m, " %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n", 1711 + seq_printf(m, " %hu/%hu %ld/%ld/%ld %ld/%ld/%ld\n", 1673 1712 session->nr, session->ns, 1674 - (unsigned long long)session->stats.tx_packets, 1675 - (unsigned long long)session->stats.tx_bytes, 1676 - (unsigned long long)session->stats.tx_errors, 1677 - (unsigned long long)session->stats.rx_packets, 1678 - (unsigned long long)session->stats.rx_bytes, 1679 - (unsigned long long)session->stats.rx_errors); 1713 + atomic_long_read(&session->stats.tx_packets), 1714 + atomic_long_read(&session->stats.tx_bytes), 1715 + atomic_long_read(&session->stats.tx_errors), 1716 + atomic_long_read(&session->stats.rx_packets), 1717 + atomic_long_read(&session->stats.rx_bytes), 1718 + atomic_long_read(&session->stats.rx_errors)); 1680 1719 1681 1720 if (po) 1682 1721 seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan)); ··· 1800 1839 1801 1840 static const struct l2tp_nl_cmd_ops pppol2tp_nl_cmd_ops = { 1802 1841 .session_create = pppol2tp_session_create, 1803 - .session_delete = pppol2tp_session_delete, 1842 + .session_delete = l2tp_session_delete, 1804 1843 }; 1805 1844 1806 1845 #endif /* CONFIG_L2TP_V3 */
+8 -6
net/netfilter/ipvs/ip_vs_core.c
··· 1394 1394 skb_reset_network_header(skb); 1395 1395 IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n", 1396 1396 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu); 1397 - rcu_read_lock(); 1398 1397 ipv4_update_pmtu(skb, dev_net(skb->dev), 1399 1398 mtu, 0, 0, 0, 0); 1400 - rcu_read_unlock(); 1401 1399 /* Client uses PMTUD? */ 1402 1400 if (!(cih->frag_off & htons(IP_DF))) 1403 1401 goto ignore_ipip; ··· 1575 1577 } 1576 1578 /* ipvs enabled in this netns ? */ 1577 1579 net = skb_net(skb); 1578 - if (!net_ipvs(net)->enable) 1580 + ipvs = net_ipvs(net); 1581 + if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable)) 1579 1582 return NF_ACCEPT; 1580 1583 1581 1584 ip_vs_fill_iph_skb(af, skb, &iph); ··· 1653 1654 } 1654 1655 1655 1656 IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet"); 1656 - ipvs = net_ipvs(net); 1657 1657 /* Check the server status */ 1658 1658 if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) { 1659 1659 /* the destination server is not available */ ··· 1813 1815 { 1814 1816 int r; 1815 1817 struct net *net; 1818 + struct netns_ipvs *ipvs; 1816 1819 1817 1820 if (ip_hdr(skb)->protocol != IPPROTO_ICMP) 1818 1821 return NF_ACCEPT; 1819 1822 1820 1823 /* ipvs enabled in this netns ? */ 1821 1824 net = skb_net(skb); 1822 - if (!net_ipvs(net)->enable) 1825 + ipvs = net_ipvs(net); 1826 + if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable)) 1823 1827 return NF_ACCEPT; 1824 1828 1825 1829 return ip_vs_in_icmp(skb, &r, hooknum); ··· 1835 1835 { 1836 1836 int r; 1837 1837 struct net *net; 1838 + struct netns_ipvs *ipvs; 1838 1839 struct ip_vs_iphdr iphdr; 1839 1840 1840 1841 ip_vs_fill_iph_skb(AF_INET6, skb, &iphdr); ··· 1844 1843 1845 1844 /* ipvs enabled in this netns ? */ 1846 1845 net = skb_net(skb); 1847 - if (!net_ipvs(net)->enable) 1846 + ipvs = net_ipvs(net); 1847 + if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable)) 1848 1848 return NF_ACCEPT; 1849 1849 1850 1850 return ip_vs_in_icmp_v6(skb, &r, hooknum, &iphdr);
+7
net/netfilter/ipvs/ip_vs_ctl.c
··· 1808 1808 .mode = 0644, 1809 1809 .proc_handler = proc_dointvec, 1810 1810 }, 1811 + { 1812 + .procname = "backup_only", 1813 + .maxlen = sizeof(int), 1814 + .mode = 0644, 1815 + .proc_handler = proc_dointvec, 1816 + }, 1811 1817 #ifdef CONFIG_IP_VS_DEBUG 1812 1818 { 1813 1819 .procname = "debug_level", ··· 3747 3741 tbl[idx++].data = &ipvs->sysctl_nat_icmp_send; 3748 3742 ipvs->sysctl_pmtu_disc = 1; 3749 3743 tbl[idx++].data = &ipvs->sysctl_pmtu_disc; 3744 + tbl[idx++].data = &ipvs->sysctl_backup_only; 3750 3745 3751 3746 3752 3747 ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl);
+9 -7
net/netfilter/ipvs/ip_vs_proto_sctp.c
··· 906 906 sctp_chunkhdr_t _sctpch, *sch; 907 907 unsigned char chunk_type; 908 908 int event, next_state; 909 - int ihl; 909 + int ihl, cofs; 910 910 911 911 #ifdef CONFIG_IP_VS_IPV6 912 912 ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr); ··· 914 914 ihl = ip_hdrlen(skb); 915 915 #endif 916 916 917 - sch = skb_header_pointer(skb, ihl + sizeof(sctp_sctphdr_t), 918 - sizeof(_sctpch), &_sctpch); 917 + cofs = ihl + sizeof(sctp_sctphdr_t); 918 + sch = skb_header_pointer(skb, cofs, sizeof(_sctpch), &_sctpch); 919 919 if (sch == NULL) 920 920 return; 921 921 ··· 933 933 */ 934 934 if ((sch->type == SCTP_CID_COOKIE_ECHO) || 935 935 (sch->type == SCTP_CID_COOKIE_ACK)) { 936 - sch = skb_header_pointer(skb, (ihl + sizeof(sctp_sctphdr_t) + 937 - sch->length), sizeof(_sctpch), &_sctpch); 938 - if (sch) { 939 - if (sch->type == SCTP_CID_ABORT) 936 + int clen = ntohs(sch->length); 937 + 938 + if (clen >= sizeof(sctp_chunkhdr_t)) { 939 + sch = skb_header_pointer(skb, cofs + ALIGN(clen, 4), 940 + sizeof(_sctpch), &_sctpch); 941 + if (sch && sch->type == SCTP_CID_ABORT) 940 942 chunk_type = sch->type; 941 943 } 942 944 }
+6 -6
net/netfilter/nf_conntrack_proto_dccp.c
··· 969 969 { 970 970 int ret; 971 971 972 + ret = register_pernet_subsys(&dccp_net_ops); 973 + if (ret < 0) 974 + goto out_pernet; 975 + 972 976 ret = nf_ct_l4proto_register(&dccp_proto4); 973 977 if (ret < 0) 974 978 goto out_dccp4; ··· 981 977 if (ret < 0) 982 978 goto out_dccp6; 983 979 984 - ret = register_pernet_subsys(&dccp_net_ops); 985 - if (ret < 0) 986 - goto out_pernet; 987 - 988 980 return 0; 989 - out_pernet: 990 - nf_ct_l4proto_unregister(&dccp_proto6); 991 981 out_dccp6: 992 982 nf_ct_l4proto_unregister(&dccp_proto4); 993 983 out_dccp4: 984 + unregister_pernet_subsys(&dccp_net_ops); 985 + out_pernet: 994 986 return ret; 995 987 } 996 988
+6 -6
net/netfilter/nf_conntrack_proto_gre.c
··· 420 420 { 421 421 int ret; 422 422 423 - ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_gre4); 424 - if (ret < 0) 425 - goto out_gre4; 426 - 427 423 ret = register_pernet_subsys(&proto_gre_net_ops); 428 424 if (ret < 0) 429 425 goto out_pernet; 430 426 427 + ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_gre4); 428 + if (ret < 0) 429 + goto out_gre4; 430 + 431 431 return 0; 432 - out_pernet: 433 - nf_ct_l4proto_unregister(&nf_conntrack_l4proto_gre4); 434 432 out_gre4: 433 + unregister_pernet_subsys(&proto_gre_net_ops); 434 + out_pernet: 435 435 return ret; 436 436 } 437 437
+6 -6
net/netfilter/nf_conntrack_proto_sctp.c
··· 888 888 { 889 889 int ret; 890 890 891 + ret = register_pernet_subsys(&sctp_net_ops); 892 + if (ret < 0) 893 + goto out_pernet; 894 + 891 895 ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_sctp4); 892 896 if (ret < 0) 893 897 goto out_sctp4; ··· 900 896 if (ret < 0) 901 897 goto out_sctp6; 902 898 903 - ret = register_pernet_subsys(&sctp_net_ops); 904 - if (ret < 0) 905 - goto out_pernet; 906 - 907 899 return 0; 908 - out_pernet: 909 - nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp6); 910 900 out_sctp6: 911 901 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp4); 912 902 out_sctp4: 903 + unregister_pernet_subsys(&sctp_net_ops); 904 + out_pernet: 913 905 return ret; 914 906 } 915 907
+6 -6
net/netfilter/nf_conntrack_proto_udplite.c
··· 371 371 { 372 372 int ret; 373 373 374 + ret = register_pernet_subsys(&udplite_net_ops); 375 + if (ret < 0) 376 + goto out_pernet; 377 + 374 378 ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_udplite4); 375 379 if (ret < 0) 376 380 goto out_udplite4; ··· 383 379 if (ret < 0) 384 380 goto out_udplite6; 385 381 386 - ret = register_pernet_subsys(&udplite_net_ops); 387 - if (ret < 0) 388 - goto out_pernet; 389 - 390 382 return 0; 391 - out_pernet: 392 - nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite6); 393 383 out_udplite6: 394 384 nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite4); 395 385 out_udplite4: 386 + unregister_pernet_subsys(&udplite_net_ops); 387 + out_pernet: 396 388 return ret; 397 389 } 398 390
+1
net/netlink/genetlink.c
··· 142 142 int err = 0; 143 143 144 144 BUG_ON(grp->name[0] == '\0'); 145 + BUG_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL); 145 146 146 147 genl_lock(); 147 148
+53 -9
net/nfc/llcp/llcp.c
··· 68 68 } 69 69 } 70 70 71 - static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen) 71 + static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen, 72 + int err) 72 73 { 73 74 struct sock *sk; 74 75 struct hlist_node *tmp; ··· 101 100 102 101 nfc_llcp_accept_unlink(accept_sk); 103 102 103 + if (err) 104 + accept_sk->sk_err = err; 104 105 accept_sk->sk_state = LLCP_CLOSED; 106 + accept_sk->sk_state_change(sk); 105 107 106 108 bh_unlock_sock(accept_sk); 107 109 ··· 127 123 continue; 128 124 } 129 125 126 + if (err) 127 + sk->sk_err = err; 130 128 sk->sk_state = LLCP_CLOSED; 129 + sk->sk_state_change(sk); 131 130 132 131 bh_unlock_sock(sk); 133 132 ··· 140 133 } 141 134 142 135 write_unlock(&local->sockets.lock); 136 + 137 + /* 138 + * If we want to keep the listening sockets alive, 139 + * we don't touch the RAW ones. 140 + */ 141 + if (listen == true) 142 + return; 143 + 144 + write_lock(&local->raw_sockets.lock); 145 + 146 + sk_for_each_safe(sk, tmp, &local->raw_sockets.head) { 147 + llcp_sock = nfc_llcp_sock(sk); 148 + 149 + bh_lock_sock(sk); 150 + 151 + nfc_llcp_socket_purge(llcp_sock); 152 + 153 + if (err) 154 + sk->sk_err = err; 155 + sk->sk_state = LLCP_CLOSED; 156 + sk->sk_state_change(sk); 157 + 158 + bh_unlock_sock(sk); 159 + 160 + sock_orphan(sk); 161 + 162 + sk_del_node_init(sk); 163 + } 164 + 165 + write_unlock(&local->raw_sockets.lock); 143 166 } 144 167 145 168 struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local) ··· 179 142 return local; 180 143 } 181 144 145 + static void local_cleanup(struct nfc_llcp_local *local, bool listen) 146 + { 147 + nfc_llcp_socket_release(local, listen, ENXIO); 148 + del_timer_sync(&local->link_timer); 149 + skb_queue_purge(&local->tx_queue); 150 + cancel_work_sync(&local->tx_work); 151 + cancel_work_sync(&local->rx_work); 152 + cancel_work_sync(&local->timeout_work); 153 + kfree_skb(local->rx_pending); 154 + } 155 + 182 156 static void local_release(struct kref *ref) 183 157 { 184 158 struct nfc_llcp_local *local; ··· 197 149 local = container_of(ref, struct nfc_llcp_local, ref); 198 150 199 151 list_del(&local->list); 200 - nfc_llcp_socket_release(local, false); 201 - del_timer_sync(&local->link_timer); 202 - skb_queue_purge(&local->tx_queue); 203 - cancel_work_sync(&local->tx_work); 204 - cancel_work_sync(&local->rx_work); 205 - cancel_work_sync(&local->timeout_work); 206 - kfree_skb(local->rx_pending); 152 + local_cleanup(local, false); 207 153 kfree(local); 208 154 } 209 155 ··· 1390 1348 return; 1391 1349 1392 1350 /* Close and purge all existing sockets */ 1393 - nfc_llcp_socket_release(local, true); 1351 + nfc_llcp_socket_release(local, true, 0); 1394 1352 } 1395 1353 1396 1354 void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx, ··· 1468 1426 pr_debug("No such device\n"); 1469 1427 return; 1470 1428 } 1429 + 1430 + local_cleanup(local, false); 1471 1431 1472 1432 nfc_llcp_local_put(local); 1473 1433 }
+2
net/nfc/llcp/sock.c
··· 278 278 279 279 pr_debug("Returning sk state %d\n", sk->sk_state); 280 280 281 + sk_acceptq_removed(parent); 282 + 281 283 return sk; 282 284 } 283 285
+2 -2
net/openvswitch/actions.c
··· 58 58 59 59 if (skb->ip_summed == CHECKSUM_COMPLETE) 60 60 skb->csum = csum_sub(skb->csum, csum_partial(skb->data 61 - + ETH_HLEN, VLAN_HLEN, 0)); 61 + + (2 * ETH_ALEN), VLAN_HLEN, 0)); 62 62 63 63 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); 64 64 *current_tci = vhdr->h_vlan_TCI; ··· 115 115 116 116 if (skb->ip_summed == CHECKSUM_COMPLETE) 117 117 skb->csum = csum_add(skb->csum, csum_partial(skb->data 118 - + ETH_HLEN, VLAN_HLEN, 0)); 118 + + (2 * ETH_ALEN), VLAN_HLEN, 0)); 119 119 120 120 } 121 121 __vlan_hwaccel_put_tag(skb, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
+3
net/openvswitch/datapath.c
··· 395 395 396 396 skb_copy_and_csum_dev(skb, nla_data(nla)); 397 397 398 + genlmsg_end(user_skb, upcall); 398 399 err = genlmsg_unicast(net, user_skb, upcall_info->portid); 399 400 400 401 out: ··· 1692 1691 if (IS_ERR(vport)) 1693 1692 goto exit_unlock; 1694 1693 1694 + err = 0; 1695 1695 reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq, 1696 1696 OVS_VPORT_CMD_NEW); 1697 1697 if (IS_ERR(reply)) { ··· 1774 1772 if (IS_ERR(reply)) 1775 1773 goto exit_unlock; 1776 1774 1775 + err = 0; 1777 1776 ovs_dp_detach_port(vport); 1778 1777 1779 1778 genl_notify(reply, genl_info_net(info), info->snd_portid,
+5 -1
net/openvswitch/flow.c
··· 482 482 return htons(ETH_P_802_2); 483 483 484 484 __skb_pull(skb, sizeof(struct llc_snap_hdr)); 485 - return llc->ethertype; 485 + 486 + if (ntohs(llc->ethertype) >= 1536) 487 + return llc->ethertype; 488 + 489 + return htons(ETH_P_802_2); 486 490 } 487 491 488 492 static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
+1 -2
net/openvswitch/vport-netdev.c
··· 43 43 44 44 /* Make our own copy of the packet. Otherwise we will mangle the 45 45 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). 46 - * (No one comes after us, since we tell handle_bridge() that we took 47 - * the packet.) */ 46 + */ 48 47 skb = skb_share_check(skb, GFP_ATOMIC); 49 48 if (unlikely(!skb)) 50 49 return;
+1 -2
net/openvswitch/vport.c
··· 325 325 * @skb: skb that was received 326 326 * 327 327 * Must be called with rcu_read_lock. The packet cannot be shared and 328 - * skb->data should point to the Ethernet header. The caller must have already 329 - * called compute_ip_summed() to initialize the checksumming fields. 328 + * skb->data should point to the Ethernet header. 330 329 */ 331 330 void ovs_vport_receive(struct vport *vport, struct sk_buff *skb) 332 331 {
+1 -1
net/sctp/associola.c
··· 1079 1079 transports) { 1080 1080 1081 1081 if (transport == active) 1082 - break; 1082 + continue; 1083 1083 list_for_each_entry(chunk, &transport->transmitted, 1084 1084 transmitted_list) { 1085 1085 if (key == chunk->subh.data_hdr->tsn) {
+1 -1
net/sctp/sm_statefuns.c
··· 2082 2082 } 2083 2083 2084 2084 /* Delete the tempory new association. */ 2085 - sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); 2085 + sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, SCTP_ASOC(new_asoc)); 2086 2086 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); 2087 2087 2088 2088 /* Restore association pointer to provide SCTP command interpeter
+8 -4
net/sunrpc/auth_gss/svcauth_gss.c
··· 447 447 else { 448 448 int N, i; 449 449 450 + /* 451 + * NOTE: we skip uid_valid()/gid_valid() checks here: 452 + * instead, * -1 id's are later mapped to the 453 + * (export-specific) anonymous id by nfsd_setuser. 454 + * 455 + * (But supplementary gid's get no such special 456 + * treatment so are checked for validity here.) 457 + */ 450 458 /* uid */ 451 459 rsci.cred.cr_uid = make_kuid(&init_user_ns, id); 452 - if (!uid_valid(rsci.cred.cr_uid)) 453 - goto out; 454 460 455 461 /* gid */ 456 462 if (get_int(&mesg, &id)) 457 463 goto out; 458 464 rsci.cred.cr_gid = make_kgid(&init_user_ns, id); 459 - if (!gid_valid(rsci.cred.cr_gid)) 460 - goto out; 461 465 462 466 /* number of additional gid's */ 463 467 if (get_int(&mesg, &N))
+1
net/sunrpc/rpc_pipe.c
··· 1175 1175 .kill_sb = rpc_kill_sb, 1176 1176 }; 1177 1177 MODULE_ALIAS_FS("rpc_pipefs"); 1178 + MODULE_ALIAS("rpc_pipefs"); 1178 1179 1179 1180 static void 1180 1181 init_once(void *foo)
+10 -5
net/sunrpc/xprtsock.c
··· 849 849 xs_tcp_shutdown(xprt); 850 850 } 851 851 852 + static void xs_local_destroy(struct rpc_xprt *xprt) 853 + { 854 + xs_close(xprt); 855 + xs_free_peer_addresses(xprt); 856 + xprt_free(xprt); 857 + module_put(THIS_MODULE); 858 + } 859 + 852 860 /** 853 861 * xs_destroy - prepare to shutdown a transport 854 862 * @xprt: doomed transport ··· 870 862 871 863 cancel_delayed_work_sync(&transport->connect_worker); 872 864 873 - xs_close(xprt); 874 - xs_free_peer_addresses(xprt); 875 - xprt_free(xprt); 876 - module_put(THIS_MODULE); 865 + xs_local_destroy(xprt); 877 866 } 878 867 879 868 static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) ··· 2487 2482 .send_request = xs_local_send_request, 2488 2483 .set_retrans_timeout = xprt_set_retrans_timeout_def, 2489 2484 .close = xs_close, 2490 - .destroy = xs_destroy, 2485 + .destroy = xs_local_destroy, 2491 2486 .print_stats = xs_local_print_stats, 2492 2487 }; 2493 2488
+6 -5
scripts/Makefile.headersinst
··· 14 14 include $(kbuild-file) 15 15 16 16 # called may set destination dir (when installing to asm/) 17 - _dst := $(or $(destination-y),$(dst),$(obj)) 17 + _dst := $(if $(destination-y),$(destination-y),$(if $(dst),$(dst),$(obj))) 18 18 19 19 old-kbuild-file := $(srctree)/$(subst uapi/,,$(obj))/Kbuild 20 20 ifneq ($(wildcard $(old-kbuild-file)),) ··· 48 48 output-files := $(addprefix $(installdir)/, $(all-files)) 49 49 50 50 input-files := $(foreach hdr, $(header-y), \ 51 - $(or \ 51 + $(if $(wildcard $(srcdir)/$(hdr)), \ 52 52 $(wildcard $(srcdir)/$(hdr)), \ 53 - $(wildcard $(oldsrcdir)/$(hdr)), \ 54 - $(error Missing UAPI file $(srcdir)/$(hdr)) \ 53 + $(if $(wildcard $(oldsrcdir)/$(hdr)), \ 54 + $(wildcard $(oldsrcdir)/$(hdr)), \ 55 + $(error Missing UAPI file $(srcdir)/$(hdr))) \ 55 56 )) \ 56 57 $(foreach hdr, $(genhdr-y), \ 57 - $(or \ 58 + $(if $(wildcard $(gendir)/$(hdr)), \ 58 59 $(wildcard $(gendir)/$(hdr)), \ 59 60 $(error Missing generated UAPI file $(gendir)/$(hdr)) \ 60 61 ))
+2 -2
security/keys/compat.c
··· 40 40 ARRAY_SIZE(iovstack), 41 41 iovstack, &iov); 42 42 if (ret < 0) 43 - return ret; 43 + goto err; 44 44 if (ret == 0) 45 45 goto no_payload_free; 46 46 47 47 ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid); 48 - 48 + err: 49 49 if (iov != iovstack) 50 50 kfree(iov); 51 51 return ret;
+1 -1
security/keys/process_keys.c
··· 57 57 58 58 kenter("%p{%u}", user, uid); 59 59 60 - if (user->uid_keyring) { 60 + if (user->uid_keyring && user->session_keyring) { 61 61 kleave(" = 0 [exist]"); 62 62 return 0; 63 63 }
+1 -1
security/selinux/xfrm.c
··· 310 310 311 311 if (old_ctx) { 312 312 new_ctx = kmalloc(sizeof(*old_ctx) + old_ctx->ctx_len, 313 - GFP_KERNEL); 313 + GFP_ATOMIC); 314 314 if (!new_ctx) 315 315 return -ENOMEM; 316 316
+4 -4
sound/core/seq/seq_timer.c
··· 290 290 tid.device = SNDRV_TIMER_GLOBAL_SYSTEM; 291 291 err = snd_timer_open(&t, str, &tid, q->queue); 292 292 } 293 - if (err < 0) { 294 - snd_printk(KERN_ERR "seq fatal error: cannot create timer (%i)\n", err); 295 - return err; 296 - } 293 + } 294 + if (err < 0) { 295 + snd_printk(KERN_ERR "seq fatal error: cannot create timer (%i)\n", err); 296 + return err; 297 297 } 298 298 t->callback = snd_seq_timer_interrupt; 299 299 t->callback_data = q;
+6
sound/oss/sequencer.c
··· 545 545 case MIDI_PGM_CHANGE: 546 546 if (seq_mode == SEQ_2) 547 547 { 548 + if (chn > 15) 549 + break; 550 + 548 551 synth_devs[dev]->chn_info[chn].pgm_num = p1; 549 552 if ((int) dev >= num_synths) 550 553 synth_devs[dev]->set_instr(dev, chn, p1); ··· 599 596 case MIDI_PITCH_BEND: 600 597 if (seq_mode == SEQ_2) 601 598 { 599 + if (chn > 15) 600 + break; 601 + 602 602 synth_devs[dev]->chn_info[chn].bender_value = w14; 603 603 604 604 if ((int) dev < num_synths)
+2 -1
sound/pci/asihpi/asihpi.c
··· 2549 2549 2550 2550 static int snd_card_asihpi_mixer_new(struct snd_card_asihpi *asihpi) 2551 2551 { 2552 - struct snd_card *card = asihpi->card; 2552 + struct snd_card *card; 2553 2553 unsigned int idx = 0; 2554 2554 unsigned int subindex = 0; 2555 2555 int err; ··· 2557 2557 2558 2558 if (snd_BUG_ON(!asihpi)) 2559 2559 return -EINVAL; 2560 + card = asihpi->card; 2560 2561 strcpy(card->mixername, "Asihpi Mixer"); 2561 2562 2562 2563 err =
+15 -11
sound/pci/hda/hda_codec.c
··· 494 494 495 495 int snd_hda_get_num_raw_conns(struct hda_codec *codec, hda_nid_t nid) 496 496 { 497 - return get_num_conns(codec, nid) & AC_CLIST_LENGTH; 497 + return snd_hda_get_raw_connections(codec, nid, NULL, 0); 498 498 } 499 499 500 500 /** ··· 516 516 unsigned int shift, num_elems, mask; 517 517 hda_nid_t prev_nid; 518 518 int null_count = 0; 519 - 520 - if (snd_BUG_ON(!conn_list || max_conns <= 0)) 521 - return -EINVAL; 522 519 523 520 parm = get_num_conns(codec, nid); 524 521 if (!parm) ··· 542 545 AC_VERB_GET_CONNECT_LIST, 0); 543 546 if (parm == -1 && codec->bus->rirb_error) 544 547 return -EIO; 545 - conn_list[0] = parm & mask; 548 + if (conn_list) 549 + conn_list[0] = parm & mask; 546 550 return 1; 547 551 } 548 552 ··· 578 580 continue; 579 581 } 580 582 for (n = prev_nid + 1; n <= val; n++) { 581 - if (conns >= max_conns) 582 - return -ENOSPC; 583 - conn_list[conns++] = n; 583 + if (conn_list) { 584 + if (conns >= max_conns) 585 + return -ENOSPC; 586 + conn_list[conns] = n; 587 + } 588 + conns++; 584 589 } 585 590 } else { 586 - if (conns >= max_conns) 587 - return -ENOSPC; 588 - conn_list[conns++] = val; 591 + if (conn_list) { 592 + if (conns >= max_conns) 593 + return -ENOSPC; 594 + conn_list[conns] = val; 595 + } 596 + conns++; 589 597 } 590 598 prev_nid = val; 591 599 }
+15 -13
sound/pci/hda/patch_ca0132.c
··· 3239 3239 struct ca0132_spec *spec = codec->spec; 3240 3240 unsigned int tmp; 3241 3241 3242 - if (!dspload_is_loaded(codec)) 3242 + if (spec->dsp_state != DSP_DOWNLOADED) 3243 3243 return 0; 3244 3244 3245 3245 /* if CrystalVoice if off, vipsource should be 0 */ ··· 4267 4267 */ 4268 4268 static void ca0132_setup_defaults(struct hda_codec *codec) 4269 4269 { 4270 + struct ca0132_spec *spec = codec->spec; 4270 4271 unsigned int tmp; 4271 4272 int num_fx; 4272 4273 int idx, i; 4273 4274 4274 - if (!dspload_is_loaded(codec)) 4275 + if (spec->dsp_state != DSP_DOWNLOADED) 4275 4276 return; 4276 4277 4277 4278 /* out, in effects + voicefx */ ··· 4352 4351 return false; 4353 4352 4354 4353 dsp_os_image = (struct dsp_image_seg *)(fw_entry->data); 4355 - dspload_image(codec, dsp_os_image, 0, 0, true, 0); 4354 + if (dspload_image(codec, dsp_os_image, 0, 0, true, 0)) { 4355 + pr_err("ca0132 dspload_image failed.\n"); 4356 + goto exit_download; 4357 + } 4358 + 4356 4359 dsp_loaded = dspload_wait_loaded(codec); 4357 4360 4361 + exit_download: 4358 4362 release_firmware(fw_entry); 4359 - 4360 4363 4361 4364 return dsp_loaded; 4362 4365 } ··· 4372 4367 #ifndef CONFIG_SND_HDA_CODEC_CA0132_DSP 4373 4368 return; /* NOP */ 4374 4369 #endif 4375 - spec->dsp_state = DSP_DOWNLOAD_INIT; 4376 4370 4377 - if (spec->dsp_state == DSP_DOWNLOAD_INIT) { 4378 - chipio_enable_clocks(codec); 4379 - spec->dsp_state = DSP_DOWNLOADING; 4380 - if (!ca0132_download_dsp_images(codec)) 4381 - spec->dsp_state = DSP_DOWNLOAD_FAILED; 4382 - else 4383 - spec->dsp_state = DSP_DOWNLOADED; 4384 - } 4371 + chipio_enable_clocks(codec); 4372 + spec->dsp_state = DSP_DOWNLOADING; 4373 + if (!ca0132_download_dsp_images(codec)) 4374 + spec->dsp_state = DSP_DOWNLOAD_FAILED; 4375 + else 4376 + spec->dsp_state = DSP_DOWNLOADED; 4385 4377 4386 4378 if (spec->dsp_state == DSP_DOWNLOADED) 4387 4379 ca0132_set_dsp_msr(codec, true);
+4
sound/pci/hda/patch_cirrus.c
··· 506 506 if (!spec) 507 507 return -ENOMEM; 508 508 509 + spec->gen.automute_hook = cs_automute; 510 + 509 511 snd_hda_pick_fixup(codec, cs420x_models, cs420x_fixup_tbl, 510 512 cs420x_fixups); 511 513 snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE); ··· 894 892 spec = cs_alloc_spec(codec, CS4210_VENDOR_NID); 895 893 if (!spec) 896 894 return -ENOMEM; 895 + 896 + spec->gen.automute_hook = cs_automute; 897 897 898 898 snd_hda_pick_fixup(codec, cs421x_models, cs421x_fixup_tbl, 899 899 cs421x_fixups);
+29
sound/pci/hda/patch_sigmatel.c
··· 815 815 return 0; 816 816 } 817 817 818 + /* check whether a built-in speaker is included in parsed pins */ 819 + static bool has_builtin_speaker(struct hda_codec *codec) 820 + { 821 + struct sigmatel_spec *spec = codec->spec; 822 + hda_nid_t *nid_pin; 823 + int nids, i; 824 + 825 + if (spec->gen.autocfg.line_out_type == AUTO_PIN_SPEAKER_OUT) { 826 + nid_pin = spec->gen.autocfg.line_out_pins; 827 + nids = spec->gen.autocfg.line_outs; 828 + } else { 829 + nid_pin = spec->gen.autocfg.speaker_pins; 830 + nids = spec->gen.autocfg.speaker_outs; 831 + } 832 + 833 + for (i = 0; i < nids; i++) { 834 + unsigned int def_conf = snd_hda_codec_get_pincfg(codec, nid_pin[i]); 835 + if (snd_hda_get_input_pin_attr(def_conf) == INPUT_PIN_ATTR_INT) 836 + return true; 837 + } 838 + return false; 839 + } 840 + 818 841 /* 819 842 * PC beep controls 820 843 */ ··· 3912 3889 stac_free(codec); 3913 3890 return err; 3914 3891 } 3892 + 3893 + /* Don't GPIO-mute speakers if there are no internal speakers, because 3894 + * the GPIO might be necessary for Headphone 3895 + */ 3896 + if (spec->eapd_switch && !has_builtin_speaker(codec)) 3897 + spec->eapd_switch = 0; 3915 3898 3916 3899 codec->proc_widget_hook = stac92hd7x_proc_hook; 3917 3900
+15
sound/usb/card.c
··· 244 244 usb_ifnum_to_if(dev, ctrlif)->intf_assoc; 245 245 246 246 if (!assoc) { 247 + /* 248 + * Firmware writers cannot count to three. So to find 249 + * the IAD on the NuForce UDH-100, also check the next 250 + * interface. 251 + */ 252 + struct usb_interface *iface = 253 + usb_ifnum_to_if(dev, ctrlif + 1); 254 + if (iface && 255 + iface->intf_assoc && 256 + iface->intf_assoc->bFunctionClass == USB_CLASS_AUDIO && 257 + iface->intf_assoc->bFunctionProtocol == UAC_VERSION_2) 258 + assoc = iface->intf_assoc; 259 + } 260 + 261 + if (!assoc) { 247 262 snd_printk(KERN_ERR "Audio class v2 interfaces need an interface association\n"); 248 263 return -EINVAL; 249 264 }
+1 -1
tools/usb/ffs-test.c
··· 38 38 #include <unistd.h> 39 39 #include <tools/le_byteshift.h> 40 40 41 - #include "../../include/linux/usb/functionfs.h" 41 + #include "../../include/uapi/linux/usb/functionfs.h" 42 42 43 43 44 44 /******************** Little Endian Handling ********************************/