Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'v3.8-rc7' into x86/asm

Merge in the updates to head_32.S from the previous urgent branch, as
upcoming patches will make further changes.

Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>

+4453 -2603
+1
Documentation/device-mapper/dm-raid.txt
··· 141 141 1.2.0 Handle creation of arrays that contain failed devices. 142 142 1.3.0 Added support for RAID 10 143 143 1.3.1 Allow device replacement/rebuild for RAID 10 144 + 1.3.2 Fix/improve redundancy checking for RAID10
+3 -2
Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt
··· 81 81 Required properties for pin configuration node: 82 82 - atmel,pins: 4 integers array, represents a group of pins mux and config 83 83 setting. The format is atmel,pins = <PIN_BANK PIN_BANK_NUM PERIPH CONFIG>. 84 - The PERIPH 0 means gpio. 84 + The PERIPH 0 means gpio, PERIPH 1 is periph A, PERIPH 2 is periph B... 85 + PIN_BANK 0 is pioA, PIN_BANK 1 is pioB... 85 86 86 87 Bits used for CONFIG: 87 88 PULL_UP (1 << 0): indicate this pin need a pull up. ··· 127 126 pinctrl_dbgu: dbgu-0 { 128 127 atmel,pins = 129 128 <1 14 0x1 0x0 /* PB14 periph A */ 130 - 1 15 0x1 0x1>; /* PB15 periph with pullup */ 129 + 1 15 0x1 0x1>; /* PB15 periph A with pullup */ 131 130 }; 132 131 }; 133 132 };
Documentation/hid/hid-sensor.txt
+1 -1
Documentation/kernel-parameters.txt
··· 2438 2438 real-time workloads. It can also improve energy 2439 2439 efficiency for asymmetric multiprocessors. 2440 2440 2441 - rcu_nocbs_poll [KNL,BOOT] 2441 + rcu_nocb_poll [KNL,BOOT] 2442 2442 Rather than requiring that offloaded CPUs 2443 2443 (specified by rcu_nocbs= above) explicitly 2444 2444 awaken the corresponding "rcuoN" kthreads,
+26 -1
Documentation/x86/boot.txt
··· 57 57 Protocol 2.11: (Kernel 3.6) Added a field for offset of EFI handover 58 58 protocol entry point. 59 59 60 + Protocol 2.12: (Kernel 3.8) Added the xloadflags field and extension fields 61 + to struct boot_params for for loading bzImage and ramdisk 62 + above 4G in 64bit. 63 + 60 64 **** MEMORY LAYOUT 61 65 62 66 The traditional memory map for the kernel loader, used for Image or ··· 186 182 0230/4 2.05+ kernel_alignment Physical addr alignment required for kernel 187 183 0234/1 2.05+ relocatable_kernel Whether kernel is relocatable or not 188 184 0235/1 2.10+ min_alignment Minimum alignment, as a power of two 189 - 0236/2 N/A pad3 Unused 185 + 0236/2 2.12+ xloadflags Boot protocol option flags 190 186 0238/4 2.06+ cmdline_size Maximum size of the kernel command line 191 187 023C/4 2.07+ hardware_subarch Hardware subarchitecture 192 188 0240/8 2.07+ hardware_subarch_data Subarchitecture-specific data ··· 585 581 There may be a considerable performance cost with an excessively 586 582 misaligned kernel. Therefore, a loader should typically try each 587 583 power-of-two alignment from kernel_alignment down to this alignment. 584 + 585 + Field name: xloadflags 586 + Type: read 587 + Offset/size: 0x236/2 588 + Protocol: 2.12+ 589 + 590 + This field is a bitmask. 591 + 592 + Bit 0 (read): XLF_KERNEL_64 593 + - If 1, this kernel has the legacy 64-bit entry point at 0x200. 594 + 595 + Bit 1 (read): XLF_CAN_BE_LOADED_ABOVE_4G 596 + - If 1, kernel/boot_params/cmdline/ramdisk can be above 4G. 597 + 598 + Bit 2 (read): XLF_EFI_HANDOVER_32 599 + - If 1, the kernel supports the 32-bit EFI handoff entry point 600 + given at handover_offset. 601 + 602 + Bit 3 (read): XLF_EFI_HANDOVER_64 603 + - If 1, the kernel supports the 64-bit EFI handoff entry point 604 + given at handover_offset + 0x200. 588 605 589 606 Field name: cmdline_size 590 607 Type: read
+4
Documentation/x86/zero-page.txt
··· 19 19 090/010 ALL hd1_info hd1 disk parameter, OBSOLETE!! 20 20 0A0/010 ALL sys_desc_table System description table (struct sys_desc_table) 21 21 0B0/010 ALL olpc_ofw_header OLPC's OpenFirmware CIF and friends 22 + 0C0/004 ALL ext_ramdisk_image ramdisk_image high 32bits 23 + 0C4/004 ALL ext_ramdisk_size ramdisk_size high 32bits 24 + 0C8/004 ALL ext_cmd_line_ptr cmd_line_ptr high 32bits 22 25 140/080 ALL edid_info Video mode setup (struct edid_info) 23 26 1C0/020 ALL efi_info EFI 32 information (struct efi_info) 24 27 1E0/004 ALL alk_mem_k Alternative mem check, in KB ··· 30 27 1E9/001 ALL eddbuf_entries Number of entries in eddbuf (below) 31 28 1EA/001 ALL edd_mbr_sig_buf_entries Number of entries in edd_mbr_sig_buffer 32 29 (below) 30 + 1EF/001 ALL sentinel Used to detect broken bootloaders 33 31 290/040 ALL edd_mbr_sig_buffer EDD MBR signatures 34 32 2D0/A00 ALL e820_map E820 memory map table 35 33 (array of struct e820entry)
+5 -5
MAINTAINERS
··· 1489 1489 M: Haavard Skinnemoen <hskinnemoen@gmail.com> 1490 1490 M: Hans-Christian Egtvedt <egtvedt@samfundet.no> 1491 1491 W: http://www.atmel.com/products/AVR32/ 1492 - W: http://avr32linux.org/ 1492 + W: http://mirror.egtvedt.no/avr32linux.org/ 1493 1493 W: http://avrfreaks.net/ 1494 1494 S: Maintained 1495 1495 F: arch/avr32/ ··· 2966 2966 F: drivers/net/ethernet/i825xx/eexpress.* 2967 2967 2968 2968 ETHERNET BRIDGE 2969 - M: Stephen Hemminger <shemminger@vyatta.com> 2969 + M: Stephen Hemminger <stephen@networkplumber.org> 2970 2970 L: bridge@lists.linux-foundation.org 2971 2971 L: netdev@vger.kernel.org 2972 2972 W: http://www.linuxfoundation.org/en/Net:Bridge ··· 4905 4905 4906 4906 MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2) 4907 4907 M: Mirko Lindner <mlindner@marvell.com> 4908 - M: Stephen Hemminger <shemminger@vyatta.com> 4908 + M: Stephen Hemminger <stephen@networkplumber.org> 4909 4909 L: netdev@vger.kernel.org 4910 4910 S: Maintained 4911 4911 F: drivers/net/ethernet/marvell/sk* ··· 5180 5180 F: drivers/infiniband/hw/nes/ 5181 5181 5182 5182 NETEM NETWORK EMULATOR 5183 - M: Stephen Hemminger <shemminger@vyatta.com> 5183 + M: Stephen Hemminger <stephen@networkplumber.org> 5184 5184 L: netem@lists.linux-foundation.org 5185 5185 S: Maintained 5186 5186 F: net/sched/sch_netem.c ··· 7088 7088 F: sound/ 7089 7089 7090 7090 SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC) 7091 - M: Liam Girdwood <lrg@ti.com> 7091 + M: Liam Girdwood <lgirdwood@gmail.com> 7092 7092 M: Mark Brown <broonie@opensource.wolfsonmicro.com> 7093 7093 T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git 7094 7094 L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+2 -2
Makefile
··· 1 1 VERSION = 3 2 2 PATCHLEVEL = 8 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc4 5 - NAME = Terrified Chipmunk 4 + EXTRAVERSION = -rc7 5 + NAME = Unicycling Gorilla 6 6 7 7 # *DOCUMENTATION* 8 8 # To see a list of typical targets execute "make help"
+1 -1
arch/arm/boot/dts/armada-370-db.dts
··· 26 26 27 27 memory { 28 28 device_type = "memory"; 29 - reg = <0x00000000 0x20000000>; /* 512 MB */ 29 + reg = <0x00000000 0x40000000>; /* 1 GB */ 30 30 }; 31 31 32 32 soc {
+6 -8
arch/arm/boot/dts/armada-xp-mv78230.dtsi
··· 50 50 }; 51 51 52 52 gpio0: gpio@d0018100 { 53 - compatible = "marvell,armadaxp-gpio"; 54 - reg = <0xd0018100 0x40>, 55 - <0xd0018800 0x30>; 53 + compatible = "marvell,orion-gpio"; 54 + reg = <0xd0018100 0x40>; 56 55 ngpios = <32>; 57 56 gpio-controller; 58 57 #gpio-cells = <2>; 59 58 interrupt-controller; 60 59 #interrupts-cells = <2>; 61 - interrupts = <16>, <17>, <18>, <19>; 60 + interrupts = <82>, <83>, <84>, <85>; 62 61 }; 63 62 64 63 gpio1: gpio@d0018140 { 65 - compatible = "marvell,armadaxp-gpio"; 66 - reg = <0xd0018140 0x40>, 67 - <0xd0018840 0x30>; 64 + compatible = "marvell,orion-gpio"; 65 + reg = <0xd0018140 0x40>; 68 66 ngpios = <17>; 69 67 gpio-controller; 70 68 #gpio-cells = <2>; 71 69 interrupt-controller; 72 70 #interrupts-cells = <2>; 73 - interrupts = <20>, <21>, <22>; 71 + interrupts = <87>, <88>, <89>; 74 72 }; 75 73 }; 76 74 };
+9 -12
arch/arm/boot/dts/armada-xp-mv78260.dtsi
··· 51 51 }; 52 52 53 53 gpio0: gpio@d0018100 { 54 - compatible = "marvell,armadaxp-gpio"; 55 - reg = <0xd0018100 0x40>, 56 - <0xd0018800 0x30>; 54 + compatible = "marvell,orion-gpio"; 55 + reg = <0xd0018100 0x40>; 57 56 ngpios = <32>; 58 57 gpio-controller; 59 58 #gpio-cells = <2>; 60 59 interrupt-controller; 61 60 #interrupts-cells = <2>; 62 - interrupts = <16>, <17>, <18>, <19>; 61 + interrupts = <82>, <83>, <84>, <85>; 63 62 }; 64 63 65 64 gpio1: gpio@d0018140 { 66 - compatible = "marvell,armadaxp-gpio"; 67 - reg = <0xd0018140 0x40>, 68 - <0xd0018840 0x30>; 65 + compatible = "marvell,orion-gpio"; 66 + reg = <0xd0018140 0x40>; 69 67 ngpios = <32>; 70 68 gpio-controller; 71 69 #gpio-cells = <2>; 72 70 interrupt-controller; 73 71 #interrupts-cells = <2>; 74 - interrupts = <20>, <21>, <22>, <23>; 72 + interrupts = <87>, <88>, <89>, <90>; 75 73 }; 76 74 77 75 gpio2: gpio@d0018180 { 78 - compatible = "marvell,armadaxp-gpio"; 79 - reg = <0xd0018180 0x40>, 80 - <0xd0018870 0x30>; 76 + compatible = "marvell,orion-gpio"; 77 + reg = <0xd0018180 0x40>; 81 78 ngpios = <3>; 82 79 gpio-controller; 83 80 #gpio-cells = <2>; 84 81 interrupt-controller; 85 82 #interrupts-cells = <2>; 86 - interrupts = <24>; 83 + interrupts = <91>; 87 84 }; 88 85 89 86 ethernet@d0034000 {
+9 -12
arch/arm/boot/dts/armada-xp-mv78460.dtsi
··· 66 66 }; 67 67 68 68 gpio0: gpio@d0018100 { 69 - compatible = "marvell,armadaxp-gpio"; 70 - reg = <0xd0018100 0x40>, 71 - <0xd0018800 0x30>; 69 + compatible = "marvell,orion-gpio"; 70 + reg = <0xd0018100 0x40>; 72 71 ngpios = <32>; 73 72 gpio-controller; 74 73 #gpio-cells = <2>; 75 74 interrupt-controller; 76 75 #interrupts-cells = <2>; 77 - interrupts = <16>, <17>, <18>, <19>; 76 + interrupts = <82>, <83>, <84>, <85>; 78 77 }; 79 78 80 79 gpio1: gpio@d0018140 { 81 - compatible = "marvell,armadaxp-gpio"; 82 - reg = <0xd0018140 0x40>, 83 - <0xd0018840 0x30>; 80 + compatible = "marvell,orion-gpio"; 81 + reg = <0xd0018140 0x40>; 84 82 ngpios = <32>; 85 83 gpio-controller; 86 84 #gpio-cells = <2>; 87 85 interrupt-controller; 88 86 #interrupts-cells = <2>; 89 - interrupts = <20>, <21>, <22>, <23>; 87 + interrupts = <87>, <88>, <89>, <90>; 90 88 }; 91 89 92 90 gpio2: gpio@d0018180 { 93 - compatible = "marvell,armadaxp-gpio"; 94 - reg = <0xd0018180 0x40>, 95 - <0xd0018870 0x30>; 91 + compatible = "marvell,orion-gpio"; 92 + reg = <0xd0018180 0x40>; 96 93 ngpios = <3>; 97 94 gpio-controller; 98 95 #gpio-cells = <2>; 99 96 interrupt-controller; 100 97 #interrupts-cells = <2>; 101 - interrupts = <24>; 98 + interrupts = <91>; 102 99 }; 103 100 104 101 ethernet@d0034000 {
+2 -2
arch/arm/boot/dts/at91rm9200.dtsi
··· 336 336 337 337 i2c@0 { 338 338 compatible = "i2c-gpio"; 339 - gpios = <&pioA 23 0 /* sda */ 340 - &pioA 24 0 /* scl */ 339 + gpios = <&pioA 25 0 /* sda */ 340 + &pioA 26 0 /* scl */ 341 341 >; 342 342 i2c-gpio,sda-open-drain; 343 343 i2c-gpio,scl-open-drain;
+40 -20
arch/arm/boot/dts/at91sam9x5.dtsi
··· 143 143 atmel,pins = 144 144 <0 3 0x1 0x0>; /* PA3 periph A */ 145 145 }; 146 + 147 + pinctrl_usart0_sck: usart0_sck-0 { 148 + atmel,pins = 149 + <0 4 0x1 0x0>; /* PA4 periph A */ 150 + }; 146 151 }; 147 152 148 153 usart1 { ··· 159 154 160 155 pinctrl_usart1_rts: usart1_rts-0 { 161 156 atmel,pins = 162 - <3 27 0x3 0x0>; /* PC27 periph C */ 157 + <2 27 0x3 0x0>; /* PC27 periph C */ 163 158 }; 164 159 165 160 pinctrl_usart1_cts: usart1_cts-0 { 166 161 atmel,pins = 167 - <3 28 0x3 0x0>; /* PC28 periph C */ 162 + <2 28 0x3 0x0>; /* PC28 periph C */ 163 + }; 164 + 165 + pinctrl_usart1_sck: usart1_sck-0 { 166 + atmel,pins = 167 + <2 28 0x3 0x0>; /* PC29 periph C */ 168 168 }; 169 169 }; 170 170 ··· 182 172 183 173 pinctrl_uart2_rts: uart2_rts-0 { 184 174 atmel,pins = 185 - <0 0 0x2 0x0>; /* PB0 periph B */ 175 + <1 0 0x2 0x0>; /* PB0 periph B */ 186 176 }; 187 177 188 178 pinctrl_uart2_cts: uart2_cts-0 { 189 179 atmel,pins = 190 - <0 1 0x2 0x0>; /* PB1 periph B */ 180 + <1 1 0x2 0x0>; /* PB1 periph B */ 181 + }; 182 + 183 + pinctrl_usart2_sck: usart2_sck-0 { 184 + atmel,pins = 185 + <1 2 0x2 0x0>; /* PB2 periph B */ 191 186 }; 192 187 }; 193 188 194 189 usart3 { 195 190 pinctrl_uart3: usart3-0 { 196 191 atmel,pins = 197 - <3 23 0x2 0x1 /* PC22 periph B with pullup */ 198 - 3 23 0x2 0x0>; /* PC23 periph B */ 192 + <2 23 0x2 0x1 /* PC22 periph B with pullup */ 193 + 2 23 0x2 0x0>; /* PC23 periph B */ 199 194 }; 200 195 201 196 pinctrl_usart3_rts: usart3_rts-0 { 202 197 atmel,pins = 203 - <3 24 0x2 0x0>; /* PC24 periph B */ 198 + <2 24 0x2 0x0>; /* PC24 periph B */ 204 199 }; 205 200 206 201 pinctrl_usart3_cts: usart3_cts-0 { 207 202 atmel,pins = 208 - <3 25 0x2 0x0>; /* PC25 periph B */ 203 + <2 25 0x2 0x0>; /* PC25 periph B */ 204 + }; 205 + 206 + pinctrl_usart3_sck: usart3_sck-0 { 207 + atmel,pins = 208 + <2 26 0x2 0x0>; /* PC26 periph B */ 209 209 }; 210 210 }; 211 211 212 212 uart0 { 213 213 pinctrl_uart0: uart0-0 { 214 214 atmel,pins = 215 - <3 8 0x3 0x0 /* PC8 periph C */ 216 - 3 9 0x3 0x1>; /* PC9 periph C with pullup */ 215 + <2 8 0x3 0x0 /* PC8 periph C */ 216 + 2 9 0x3 0x1>; /* PC9 periph C with pullup */ 217 217 }; 218 218 }; 219 219 220 220 uart1 { 221 221 pinctrl_uart1: uart1-0 { 222 222 atmel,pins = 223 - <3 16 0x3 0x0 /* PC16 periph C */ 224 - 3 17 0x3 0x1>; /* PC17 periph C with pullup */ 223 + <2 16 0x3 0x0 /* PC16 periph C */ 224 + 2 17 0x3 0x1>; /* PC17 periph C with pullup */ 225 225 }; 226 226 }; 227 227 ··· 260 240 261 241 pinctrl_macb0_rmii_mii: macb0_rmii_mii-0 { 262 242 atmel,pins = 263 - <1 8 0x1 0x0 /* PA8 periph A */ 264 - 1 11 0x1 0x0 /* PA11 periph A */ 265 - 1 12 0x1 0x0 /* PA12 periph A */ 266 - 1 13 0x1 0x0 /* PA13 periph A */ 267 - 1 14 0x1 0x0 /* PA14 periph A */ 268 - 1 15 0x1 0x0 /* PA15 periph A */ 269 - 1 16 0x1 0x0 /* PA16 periph A */ 270 - 1 17 0x1 0x0>; /* PA17 periph A */ 243 + <1 8 0x1 0x0 /* PB8 periph A */ 244 + 1 11 0x1 0x0 /* PB11 periph A */ 245 + 1 12 0x1 0x0 /* PB12 periph A */ 246 + 1 13 0x1 0x0 /* PB13 periph A */ 247 + 1 14 0x1 0x0 /* PB14 periph A */ 248 + 1 15 0x1 0x0 /* PB15 periph A */ 249 + 1 16 0x1 0x0 /* PB16 periph A */ 250 + 1 17 0x1 0x0>; /* PB17 periph A */ 271 251 }; 272 252 }; 273 253
+6 -6
arch/arm/boot/dts/cros5250-common.dtsi
··· 96 96 fifo-depth = <0x80>; 97 97 card-detect-delay = <200>; 98 98 samsung,dw-mshc-ciu-div = <3>; 99 - samsung,dw-mshc-sdr-timing = <2 3 3>; 100 - samsung,dw-mshc-ddr-timing = <1 2 3>; 99 + samsung,dw-mshc-sdr-timing = <2 3>; 100 + samsung,dw-mshc-ddr-timing = <1 2>; 101 101 102 102 slot@0 { 103 103 reg = <0>; ··· 120 120 fifo-depth = <0x80>; 121 121 card-detect-delay = <200>; 122 122 samsung,dw-mshc-ciu-div = <3>; 123 - samsung,dw-mshc-sdr-timing = <2 3 3>; 124 - samsung,dw-mshc-ddr-timing = <1 2 3>; 123 + samsung,dw-mshc-sdr-timing = <2 3>; 124 + samsung,dw-mshc-ddr-timing = <1 2>; 125 125 126 126 slot@0 { 127 127 reg = <0>; ··· 141 141 fifo-depth = <0x80>; 142 142 card-detect-delay = <200>; 143 143 samsung,dw-mshc-ciu-div = <3>; 144 - samsung,dw-mshc-sdr-timing = <2 3 3>; 145 - samsung,dw-mshc-ddr-timing = <1 2 3>; 144 + samsung,dw-mshc-sdr-timing = <2 3>; 145 + samsung,dw-mshc-ddr-timing = <1 2>; 146 146 147 147 slot@0 { 148 148 reg = <0>;
+12 -2
arch/arm/boot/dts/dove-cubox.dts
··· 26 26 }; 27 27 28 28 &uart0 { status = "okay"; }; 29 - &sdio0 { status = "okay"; }; 30 29 &sata0 { status = "okay"; }; 31 30 &i2c0 { status = "okay"; }; 31 + 32 + &sdio0 { 33 + status = "okay"; 34 + /* sdio0 card detect is connected to wrong pin on CuBox */ 35 + cd-gpios = <&gpio0 12 1>; 36 + }; 32 37 33 38 &spi0 { 34 39 status = "okay"; ··· 47 42 }; 48 43 49 44 &pinctrl { 50 - pinctrl-0 = <&pmx_gpio_18>; 45 + pinctrl-0 = <&pmx_gpio_12 &pmx_gpio_18>; 51 46 pinctrl-names = "default"; 47 + 48 + pmx_gpio_12: pmx-gpio-12 { 49 + marvell,pins = "mpp12"; 50 + marvell,function = "gpio"; 51 + }; 52 52 53 53 pmx_gpio_18: pmx-gpio-18 { 54 54 marvell,pins = "mpp18";
+4 -4
arch/arm/boot/dts/exynos5250-smdk5250.dts
··· 115 115 fifo-depth = <0x80>; 116 116 card-detect-delay = <200>; 117 117 samsung,dw-mshc-ciu-div = <3>; 118 - samsung,dw-mshc-sdr-timing = <2 3 3>; 119 - samsung,dw-mshc-ddr-timing = <1 2 3>; 118 + samsung,dw-mshc-sdr-timing = <2 3>; 119 + samsung,dw-mshc-ddr-timing = <1 2>; 120 120 121 121 slot@0 { 122 122 reg = <0>; ··· 139 139 fifo-depth = <0x80>; 140 140 card-detect-delay = <200>; 141 141 samsung,dw-mshc-ciu-div = <3>; 142 - samsung,dw-mshc-sdr-timing = <2 3 3>; 143 - samsung,dw-mshc-ddr-timing = <1 2 3>; 142 + samsung,dw-mshc-sdr-timing = <2 3>; 143 + samsung,dw-mshc-ddr-timing = <1 2>; 144 144 145 145 slot@0 { 146 146 reg = <0>;
+16
arch/arm/boot/dts/kirkwood-ns2-common.dtsi
··· 1 1 /include/ "kirkwood.dtsi" 2 + /include/ "kirkwood-6281.dtsi" 2 3 3 4 / { 4 5 chosen { ··· 7 6 }; 8 7 9 8 ocp@f1000000 { 9 + pinctrl: pinctrl@10000 { 10 + pinctrl-0 = < &pmx_spi &pmx_twsi0 &pmx_uart0 11 + &pmx_ns2_sata0 &pmx_ns2_sata1>; 12 + pinctrl-names = "default"; 13 + 14 + pmx_ns2_sata0: pmx-ns2-sata0 { 15 + marvell,pins = "mpp21"; 16 + marvell,function = "sata0"; 17 + }; 18 + pmx_ns2_sata1: pmx-ns2-sata1 { 19 + marvell,pins = "mpp20"; 20 + marvell,function = "sata1"; 21 + }; 22 + }; 23 + 10 24 serial@12000 { 11 25 clock-frequency = <166666667>; 12 26 status = "okay";
+2
arch/arm/boot/dts/kirkwood.dtsi
··· 36 36 reg = <0x10100 0x40>; 37 37 ngpios = <32>; 38 38 interrupt-controller; 39 + #interrupt-cells = <2>; 39 40 interrupts = <35>, <36>, <37>, <38>; 40 41 }; 41 42 ··· 47 46 reg = <0x10140 0x40>; 48 47 ngpios = <18>; 49 48 interrupt-controller; 49 + #interrupt-cells = <2>; 50 50 interrupts = <39>, <40>, <41>; 51 51 }; 52 52
+2
arch/arm/boot/dts/kizbox.dts
··· 48 48 49 49 macb0: ethernet@fffc4000 { 50 50 phy-mode = "mii"; 51 + pinctrl-0 = <&pinctrl_macb_rmii 52 + &pinctrl_macb_rmii_mii_alt>; 51 53 status = "okay"; 52 54 }; 53 55
+4 -2
arch/arm/boot/dts/sunxi.dtsi
··· 60 60 }; 61 61 62 62 uart0: uart@01c28000 { 63 - compatible = "ns8250"; 63 + compatible = "snps,dw-apb-uart"; 64 64 reg = <0x01c28000 0x400>; 65 65 interrupts = <1>; 66 66 reg-shift = <2>; 67 + reg-io-width = <4>; 67 68 clock-frequency = <24000000>; 68 69 status = "disabled"; 69 70 }; 70 71 71 72 uart1: uart@01c28400 { 72 - compatible = "ns8250"; 73 + compatible = "snps,dw-apb-uart"; 73 74 reg = <0x01c28400 0x400>; 74 75 interrupts = <2>; 75 76 reg-shift = <2>; 77 + reg-io-width = <4>; 76 78 clock-frequency = <24000000>; 77 79 status = "disabled"; 78 80 };
-2
arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
··· 45 45 reg = <1>; 46 46 }; 47 47 48 - /* A7s disabled till big.LITTLE patches are available... 49 48 cpu2: cpu@2 { 50 49 device_type = "cpu"; 51 50 compatible = "arm,cortex-a7"; ··· 62 63 compatible = "arm,cortex-a7"; 63 64 reg = <0x102>; 64 65 }; 65 - */ 66 66 }; 67 67 68 68 memory@80000000 {
+23 -2
arch/arm/common/gic.c
··· 351 351 irq_set_chained_handler(irq, gic_handle_cascade_irq); 352 352 } 353 353 354 + static u8 gic_get_cpumask(struct gic_chip_data *gic) 355 + { 356 + void __iomem *base = gic_data_dist_base(gic); 357 + u32 mask, i; 358 + 359 + for (i = mask = 0; i < 32; i += 4) { 360 + mask = readl_relaxed(base + GIC_DIST_TARGET + i); 361 + mask |= mask >> 16; 362 + mask |= mask >> 8; 363 + if (mask) 364 + break; 365 + } 366 + 367 + if (!mask) 368 + pr_crit("GIC CPU mask not found - kernel will fail to boot.\n"); 369 + 370 + return mask; 371 + } 372 + 354 373 static void __init gic_dist_init(struct gic_chip_data *gic) 355 374 { 356 375 unsigned int i; ··· 388 369 /* 389 370 * Set all global interrupts to this CPU only. 390 371 */ 391 - cpumask = readl_relaxed(base + GIC_DIST_TARGET + 0); 372 + cpumask = gic_get_cpumask(gic); 373 + cpumask |= cpumask << 8; 374 + cpumask |= cpumask << 16; 392 375 for (i = 32; i < gic_irqs; i += 4) 393 376 writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); 394 377 ··· 421 400 * Get what the GIC says our CPU mask is. 422 401 */ 423 402 BUG_ON(cpu >= NR_GIC_CPU_IF); 424 - cpu_mask = readl_relaxed(dist_base + GIC_DIST_TARGET + 0); 403 + cpu_mask = gic_get_cpumask(gic); 425 404 gic_cpu_map[cpu] = cpu_mask; 426 405 427 406 /*
+2 -1
arch/arm/configs/at91_dt_defconfig
··· 19 19 CONFIG_SOC_AT91SAM9263=y 20 20 CONFIG_SOC_AT91SAM9G45=y 21 21 CONFIG_SOC_AT91SAM9X5=y 22 + CONFIG_SOC_AT91SAM9N12=y 22 23 CONFIG_MACH_AT91SAM_DT=y 23 24 CONFIG_AT91_PROGRAMMABLE_CLOCKS=y 24 25 CONFIG_AT91_TIMER_HZ=128 ··· 32 31 CONFIG_ZBOOT_ROM_BSS=0x0 33 32 CONFIG_ARM_APPENDED_DTB=y 34 33 CONFIG_ARM_ATAG_DTB_COMPAT=y 35 - CONFIG_CMDLINE="mem=128M console=ttyS0,115200 initrd=0x21100000,25165824 root=/dev/ram0 rw" 34 + CONFIG_CMDLINE="console=ttyS0,115200 initrd=0x21100000,25165824 root=/dev/ram0 rw" 36 35 CONFIG_KEXEC=y 37 36 CONFIG_AUTO_ZRELADDR=y 38 37 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+1 -1
arch/arm/include/asm/memory.h
··· 37 37 */ 38 38 #define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) 39 39 #define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000)) 40 - #define TASK_UNMAPPED_BASE (UL(CONFIG_PAGE_OFFSET) / 3) 40 + #define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M) 41 41 42 42 /* 43 43 * The maximum size of a 26-bit user space task.
+2
arch/arm/kernel/debug.S
··· 100 100 b 1b 101 101 ENDPROC(printch) 102 102 103 + #ifdef CONFIG_MMU 103 104 ENTRY(debug_ll_addr) 104 105 addruart r2, r3, ip 105 106 str r2, [r0] 106 107 str r3, [r1] 107 108 mov pc, lr 108 109 ENDPROC(debug_ll_addr) 110 + #endif 109 111 110 112 #else 111 113
+4 -1
arch/arm/kernel/head.S
··· 246 246 247 247 /* 248 248 * Then map boot params address in r2 if specified. 249 + * We map 2 sections in case the ATAGs/DTB crosses a section boundary. 249 250 */ 250 251 mov r0, r2, lsr #SECTION_SHIFT 251 252 movs r0, r0, lsl #SECTION_SHIFT ··· 254 253 addne r3, r3, #PAGE_OFFSET 255 254 addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER) 256 255 orrne r6, r7, r0 256 + strne r6, [r3], #1 << PMD_ORDER 257 + addne r6, r6, #1 << SECTION_SHIFT 257 258 strne r6, [r3] 258 259 259 260 #ifdef CONFIG_DEBUG_LL ··· 334 331 * as it has already been validated by the primary processor. 335 332 */ 336 333 #ifdef CONFIG_ARM_VIRT_EXT 337 - bl __hyp_stub_install 334 + bl __hyp_stub_install_secondary 338 335 #endif 339 336 safe_svcmode_maskall r9 340 337
+6 -12
arch/arm/kernel/hyp-stub.S
··· 99 99 * immediately. 100 100 */ 101 101 compare_cpu_mode_with_primary r4, r5, r6, r7 102 - bxne lr 102 + movne pc, lr 103 103 104 104 /* 105 105 * Once we have given up on one CPU, we do not try to install the ··· 111 111 */ 112 112 113 113 cmp r4, #HYP_MODE 114 - bxne lr @ give up if the CPU is not in HYP mode 114 + movne pc, lr @ give up if the CPU is not in HYP mode 115 115 116 116 /* 117 117 * Configure HSCTLR to set correct exception endianness/instruction set ··· 120 120 * Eventually, CPU-specific code might be needed -- assume not for now 121 121 * 122 122 * This code relies on the "eret" instruction to synchronize the 123 - * various coprocessor accesses. 123 + * various coprocessor accesses. This is done when we switch to SVC 124 + * (see safe_svcmode_maskall). 124 125 */ 125 126 @ Now install the hypervisor stub: 126 127 adr r7, __hyp_stub_vectors ··· 156 155 1: 157 156 #endif 158 157 159 - bic r7, r4, #MODE_MASK 160 - orr r7, r7, #SVC_MODE 161 - THUMB( orr r7, r7, #PSR_T_BIT ) 162 - msr spsr_cxsf, r7 @ This is SPSR_hyp. 163 - 164 - __MSR_ELR_HYP(14) @ msr elr_hyp, lr 165 - __ERET @ return, switching to SVC mode 166 - @ The boot CPU mode is left in r4. 158 + bx lr @ The boot CPU mode is left in r4. 167 159 ENDPROC(__hyp_stub_install_secondary) 168 160 169 161 __hyp_stub_do_trap: ··· 194 200 @ fall through 195 201 ENTRY(__hyp_set_vectors) 196 202 __HVC(0) 197 - bx lr 203 + mov pc, lr 198 204 ENDPROC(__hyp_set_vectors) 199 205 200 206 #ifndef ZIMAGE
+2
arch/arm/mach-at91/setup.c
··· 105 105 switch (socid) { 106 106 case ARCH_ID_AT91RM9200: 107 107 at91_soc_initdata.type = AT91_SOC_RM9200; 108 + if (at91_soc_initdata.subtype == AT91_SOC_SUBTYPE_NONE) 109 + at91_soc_initdata.subtype = AT91_SOC_RM9200_BGA; 108 110 at91_boot_soc = at91rm9200_soc; 109 111 break; 110 112
+1 -1
arch/arm/mach-exynos/Kconfig
··· 414 414 select CPU_EXYNOS4210 415 415 select HAVE_SAMSUNG_KEYPAD if INPUT_KEYBOARD 416 416 select PINCTRL 417 - select PINCTRL_EXYNOS4 417 + select PINCTRL_EXYNOS 418 418 select USE_OF 419 419 help 420 420 Machine support for Samsung Exynos4 machine with device tree enabled.
+1
arch/arm/mach-imx/Kconfig
··· 851 851 select HAVE_CAN_FLEXCAN if CAN 852 852 select HAVE_IMX_GPC 853 853 select HAVE_IMX_MMDC 854 + select HAVE_IMX_SRC 854 855 select HAVE_SMP 855 856 select MFD_SYSCON 856 857 select PINCTRL
+3
arch/arm/mach-imx/clk-imx6q.c
··· 436 436 for (i = 0; i < ARRAY_SIZE(clks_init_on); i++) 437 437 clk_prepare_enable(clk[clks_init_on[i]]); 438 438 439 + /* Set initial power mode */ 440 + imx6q_set_lpm(WAIT_CLOCKED); 441 + 439 442 np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt"); 440 443 base = of_iomap(np, 0); 441 444 WARN_ON(!base);
+1
arch/arm/mach-imx/common.h
··· 142 142 extern void imx6q_clock_map_io(void); 143 143 144 144 extern void imx_cpu_die(unsigned int cpu); 145 + extern int imx_cpu_kill(unsigned int cpu); 145 146 146 147 #ifdef CONFIG_PM 147 148 extern void imx6q_pm_init(void);
+1 -1
arch/arm/mach-imx/devices/platform-imx-fb.c
··· 54 54 .flags = IORESOURCE_IRQ, 55 55 }, 56 56 }; 57 - return imx_add_platform_device_dmamask("imx-fb", 0, 57 + return imx_add_platform_device_dmamask(data->devid, 0, 58 58 res, ARRAY_SIZE(res), 59 59 pdata, sizeof(*pdata), DMA_BIT_MASK(32)); 60 60 }
+6 -4
arch/arm/mach-imx/hotplug.c
··· 46 46 void imx_cpu_die(unsigned int cpu) 47 47 { 48 48 cpu_enter_lowpower(); 49 - imx_enable_cpu(cpu, false); 49 + cpu_do_idle(); 50 + } 50 51 51 - /* spin here until hardware takes it down */ 52 - while (1) 53 - ; 52 + int imx_cpu_kill(unsigned int cpu) 53 + { 54 + imx_enable_cpu(cpu, false); 55 + return 1; 54 56 }
arch/arm/mach-imx/iram.h include/linux/platform_data/imx-iram.h
+1 -2
arch/arm/mach-imx/iram_alloc.c
··· 22 22 #include <linux/module.h> 23 23 #include <linux/spinlock.h> 24 24 #include <linux/genalloc.h> 25 - 26 - #include "iram.h" 25 + #include "linux/platform_data/imx-iram.h" 27 26 28 27 static unsigned long iram_phys_base; 29 28 static void __iomem *iram_virt_base;
+1
arch/arm/mach-imx/platsmp.c
··· 92 92 .smp_boot_secondary = imx_boot_secondary, 93 93 #ifdef CONFIG_HOTPLUG_CPU 94 94 .cpu_die = imx_cpu_die, 95 + .cpu_kill = imx_cpu_kill, 95 96 #endif 96 97 };
+1
arch/arm/mach-imx/pm-imx6q.c
··· 41 41 cpu_suspend(0, imx6q_suspend_finish); 42 42 imx_smp_prepare(); 43 43 imx_gpc_post_resume(); 44 + imx6q_set_lpm(WAIT_CLOCKED); 44 45 break; 45 46 default: 46 47 return -EINVAL;
+10 -4
arch/arm/mach-integrator/pci_v3.c
··· 475 475 { 476 476 int ret = 0; 477 477 478 + if (!ap_syscon_base) 479 + return -EINVAL; 480 + 478 481 if (nr == 0) { 479 482 sys->mem_offset = PHYS_PCI_MEM_BASE; 480 483 ret = pci_v3_setup_resources(sys); 481 - /* Remap the Integrator system controller */ 482 - ap_syscon_base = ioremap(INTEGRATOR_SC_BASE, 0x100); 483 - if (!ap_syscon_base) 484 - return -EINVAL; 485 484 } 486 485 487 486 return ret; ··· 495 496 unsigned long flags; 496 497 unsigned int temp; 497 498 int ret; 499 + 500 + /* Remap the Integrator system controller */ 501 + ap_syscon_base = ioremap(INTEGRATOR_SC_BASE, 0x100); 502 + if (!ap_syscon_base) { 503 + pr_err("unable to remap the AP syscon for PCIv3\n"); 504 + return; 505 + } 498 506 499 507 pcibios_min_mem = 0x00100000; 500 508
-38
arch/arm/mach-kirkwood/board-ns2.c
··· 18 18 #include <linux/gpio.h> 19 19 #include <linux/of.h> 20 20 #include "common.h" 21 - #include "mpp.h" 22 21 23 22 static struct mv643xx_eth_platform_data ns2_ge00_data = { 24 23 .phy_addr = MV643XX_ETH_PHY_ADDR(8), 25 - }; 26 - 27 - static unsigned int ns2_mpp_config[] __initdata = { 28 - MPP0_SPI_SCn, 29 - MPP1_SPI_MOSI, 30 - MPP2_SPI_SCK, 31 - MPP3_SPI_MISO, 32 - MPP4_NF_IO6, 33 - MPP5_NF_IO7, 34 - MPP6_SYSRST_OUTn, 35 - MPP7_GPO, /* Fan speed (bit 1) */ 36 - MPP8_TW0_SDA, 37 - MPP9_TW0_SCK, 38 - MPP10_UART0_TXD, 39 - MPP11_UART0_RXD, 40 - MPP12_GPO, /* Red led */ 41 - MPP14_GPIO, /* USB fuse */ 42 - MPP16_GPIO, /* SATA 0 power */ 43 - MPP17_GPIO, /* SATA 1 power */ 44 - MPP18_NF_IO0, 45 - MPP19_NF_IO1, 46 - MPP20_SATA1_ACTn, 47 - MPP21_SATA0_ACTn, 48 - MPP22_GPIO, /* Fan speed (bit 0) */ 49 - MPP23_GPIO, /* Fan power */ 50 - MPP24_GPIO, /* USB mode select */ 51 - MPP25_GPIO, /* Fan rotation fail */ 52 - MPP26_GPIO, /* USB device vbus */ 53 - MPP28_GPIO, /* USB enable host vbus */ 54 - MPP29_GPIO, /* Blue led (slow register) */ 55 - MPP30_GPIO, /* Blue led (command register) */ 56 - MPP31_GPIO, /* Board power off */ 57 - MPP32_GPIO, /* Power button (0 = Released, 1 = Pushed) */ 58 - MPP33_GPO, /* Fan speed (bit 2) */ 59 - 0 60 24 }; 61 25 62 26 #define NS2_GPIO_POWER_OFF 31 ··· 35 71 /* 36 72 * Basic setup. Needs to be called early. 37 73 */ 38 - kirkwood_mpp_conf(ns2_mpp_config); 39 - 40 74 if (of_machine_is_compatible("lacie,netspace_lite_v2") || 41 75 of_machine_is_compatible("lacie,netspace_mini_v2")) 42 76 ns2_ge00_data.phy_addr = MV643XX_ETH_PHY_ADDR(0);
+2
arch/arm/mach-mvebu/Makefile
··· 1 1 ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \ 2 2 -I$(srctree)/arch/arm/plat-orion/include 3 3 4 + AFLAGS_coherency_ll.o := -Wa,-march=armv7-a 5 + 4 6 obj-y += system-controller.o 5 7 obj-$(CONFIG_MACH_ARMADA_370_XP) += armada-370-xp.o irq-armada-370-xp.o addr-map.o coherency.o coherency_ll.o pmsu.o 6 8 obj-$(CONFIG_SMP) += platsmp.o headsmp.o
+6
arch/arm/mach-omap2/board-omap4panda.c
··· 397 397 OMAP_PULL_ENA), 398 398 OMAP4_MUX(ABE_MCBSP1_FSX, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), 399 399 400 + /* UART2 - BT/FM/GPS shared transport */ 401 + OMAP4_MUX(UART2_CTS, OMAP_PIN_INPUT | OMAP_MUX_MODE0), 402 + OMAP4_MUX(UART2_RTS, OMAP_PIN_OUTPUT | OMAP_MUX_MODE0), 403 + OMAP4_MUX(UART2_RX, OMAP_PIN_INPUT | OMAP_MUX_MODE0), 404 + OMAP4_MUX(UART2_TX, OMAP_PIN_OUTPUT | OMAP_MUX_MODE0), 405 + 400 406 { .reg_offset = OMAP_MUX_TERMINATOR }, 401 407 }; 402 408
+2
arch/arm/mach-omap2/cclock2420_data.c
··· 1935 1935 omap2_init_clk_hw_omap_clocks(c->lk.clk); 1936 1936 } 1937 1937 1938 + omap2xxx_clkt_vps_late_init(); 1939 + 1938 1940 omap2_clk_disable_autoidle_all(); 1939 1941 1940 1942 omap2_clk_enable_init_clocks(enable_init_clks,
+2
arch/arm/mach-omap2/cclock2430_data.c
··· 2050 2050 omap2_init_clk_hw_omap_clocks(c->lk.clk); 2051 2051 } 2052 2052 2053 + omap2xxx_clkt_vps_late_init(); 2054 + 2053 2055 omap2_clk_disable_autoidle_all(); 2054 2056 2055 2057 omap2_clk_enable_init_clocks(enable_init_clks,
+6 -7
arch/arm/mach-omap2/cclock44xx_data.c
··· 2026 2026 * On OMAP4460 the ABE DPLL fails to turn on if in idle low-power 2027 2027 * state when turning the ABE clock domain. Workaround this by 2028 2028 * locking the ABE DPLL on boot. 2029 + * Lock the ABE DPLL in any case to avoid issues with audio. 2029 2030 */ 2030 - if (cpu_is_omap446x()) { 2031 - rc = clk_set_parent(&abe_dpll_refclk_mux_ck, &sys_32k_ck); 2032 - if (!rc) 2033 - rc = clk_set_rate(&dpll_abe_ck, OMAP4_DPLL_ABE_DEFFREQ); 2034 - if (rc) 2035 - pr_err("%s: failed to configure ABE DPLL!\n", __func__); 2036 - } 2031 + rc = clk_set_parent(&abe_dpll_refclk_mux_ck, &sys_32k_ck); 2032 + if (!rc) 2033 + rc = clk_set_rate(&dpll_abe_ck, OMAP4_DPLL_ABE_DEFFREQ); 2034 + if (rc) 2035 + pr_err("%s: failed to configure ABE DPLL!\n", __func__); 2037 2036 2038 2037 return 0; 2039 2038 }
+1 -1
arch/arm/mach-omap2/devices.c
··· 639 639 return cnt; 640 640 } 641 641 642 - static void omap_init_ocp2scp(void) 642 + static void __init omap_init_ocp2scp(void) 643 643 { 644 644 struct omap_hwmod *oh; 645 645 struct platform_device *pdev;
+2 -1
arch/arm/mach-omap2/drm.c
··· 25 25 #include <linux/dma-mapping.h> 26 26 #include <linux/platform_data/omap_drm.h> 27 27 28 + #include "soc.h" 28 29 #include "omap_device.h" 29 30 #include "omap_hwmod.h" 30 31 ··· 57 56 oh->name); 58 57 } 59 58 60 - platform_data.omaprev = GET_OMAP_REVISION(); 59 + platform_data.omaprev = GET_OMAP_TYPE; 61 60 62 61 return platform_device_register(&omap_drm_device); 63 62
+5 -1
arch/arm/mach-omap2/omap_hwmod_44xx_data.c
··· 2132 2132 * currently reset very early during boot, before I2C is 2133 2133 * available, so it doesn't seem that we have any choice in 2134 2134 * the kernel other than to avoid resetting it. 2135 + * 2136 + * Also, McPDM needs to be configured to NO_IDLE mode when it 2137 + * is in used otherwise vital clocks will be gated which 2138 + * results 'slow motion' audio playback. 2135 2139 */ 2136 - .flags = HWMOD_EXT_OPT_MAIN_CLK, 2140 + .flags = HWMOD_EXT_OPT_MAIN_CLK | HWMOD_SWSUP_SIDLE, 2137 2141 .mpu_irqs = omap44xx_mcpdm_irqs, 2138 2142 .sdma_reqs = omap44xx_mcpdm_sdma_reqs, 2139 2143 .main_clk = "mcpdm_fck",
+2 -6
arch/arm/mach-omap2/timer.c
··· 165 165 struct device_node *np; 166 166 167 167 for_each_matching_node(np, match) { 168 - if (!of_device_is_available(np)) { 169 - of_node_put(np); 168 + if (!of_device_is_available(np)) 170 169 continue; 171 - } 172 170 173 - if (property && !of_get_property(np, property, NULL)) { 174 - of_node_put(np); 171 + if (property && !of_get_property(np, property, NULL)) 175 172 continue; 176 - } 177 173 178 174 of_add_property(np, &device_disabled); 179 175 return np;
+1 -1
arch/arm/mach-realview/include/mach/irqs-eb.h
··· 115 115 /* 116 116 * Only define NR_IRQS if less than NR_IRQS_EB 117 117 */ 118 - #define NR_IRQS_EB (IRQ_EB_GIC_START + 96) 118 + #define NR_IRQS_EB (IRQ_EB_GIC_START + 128) 119 119 120 120 #if defined(CONFIG_MACH_REALVIEW_EB) \ 121 121 && (!defined(NR_IRQS) || (NR_IRQS < NR_IRQS_EB))
+1 -1
arch/arm/mach-s3c64xx/mach-crag6410-module.c
··· 47 47 .bus_num = 0, 48 48 .chip_select = 0, 49 49 .mode = SPI_MODE_0, 50 - .irq = S3C_EINT(5), 50 + .irq = S3C_EINT(4), 51 51 .controller_data = &wm0010_spi_csinfo, 52 52 .platform_data = &wm0010_pdata, 53 53 },
+2
arch/arm/mach-s3c64xx/pm.c
··· 338 338 for (i = 0; i < ARRAY_SIZE(s3c64xx_pm_domains); i++) 339 339 pm_genpd_init(&s3c64xx_pm_domains[i]->pd, NULL, false); 340 340 341 + #ifdef CONFIG_S3C_DEV_FB 341 342 if (dev_get_platdata(&s3c_device_fb.dev)) 342 343 pm_genpd_add_device(&s3c64xx_pm_f.pd, &s3c_device_fb.dev); 344 + #endif 343 345 344 346 return 0; 345 347 }
+11 -9
arch/arm/mm/dma-mapping.c
··· 640 640 641 641 if (is_coherent || nommu()) 642 642 addr = __alloc_simple_buffer(dev, size, gfp, &page); 643 - else if (gfp & GFP_ATOMIC) 643 + else if (!(gfp & __GFP_WAIT)) 644 644 addr = __alloc_from_pool(size, &page); 645 645 else if (!IS_ENABLED(CONFIG_CMA)) 646 646 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); ··· 774 774 size_t size, enum dma_data_direction dir, 775 775 void (*op)(const void *, size_t, int)) 776 776 { 777 + unsigned long pfn; 778 + size_t left = size; 779 + 780 + pfn = page_to_pfn(page) + offset / PAGE_SIZE; 781 + offset %= PAGE_SIZE; 782 + 777 783 /* 778 784 * A single sg entry may refer to multiple physically contiguous 779 785 * pages. But we still need to process highmem pages individually. 780 786 * If highmem is not configured then the bulk of this loop gets 781 787 * optimized out. 782 788 */ 783 - size_t left = size; 784 789 do { 785 790 size_t len = left; 786 791 void *vaddr; 787 792 793 + page = pfn_to_page(pfn); 794 + 788 795 if (PageHighMem(page)) { 789 - if (len + offset > PAGE_SIZE) { 790 - if (offset >= PAGE_SIZE) { 791 - page += offset / PAGE_SIZE; 792 - offset %= PAGE_SIZE; 793 - } 796 + if (len + offset > PAGE_SIZE) 794 797 len = PAGE_SIZE - offset; 795 - } 796 798 vaddr = kmap_high_get(page); 797 799 if (vaddr) { 798 800 vaddr += offset; ··· 811 809 op(vaddr, len, dir); 812 810 } 813 811 offset = 0; 814 - page++; 812 + pfn++; 815 813 left -= len; 816 814 } while (left); 817 815 }
+1 -1
arch/arm/mm/mmu.c
··· 283 283 }, 284 284 [MT_MEMORY_SO] = { 285 285 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 286 - L_PTE_MT_UNCACHED, 286 + L_PTE_MT_UNCACHED | L_PTE_XN, 287 287 .prot_l1 = PMD_TYPE_TABLE, 288 288 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S | 289 289 PMD_SECT_UNCACHED | PMD_SECT_XN,
+1 -1
arch/arm/plat-versatile/headsmp.S
··· 20 20 */ 21 21 ENTRY(versatile_secondary_startup) 22 22 mrc p15, 0, r0, c0, c0, 5 23 - and r0, r0, #15 23 + bic r0, #0xff000000 24 24 adr r4, 1f 25 25 ldmia r4, {r5, r6} 26 26 sub r4, r4, r5
+3 -3
arch/arm/vfp/entry.S
··· 22 22 @ IRQs disabled. 23 23 @ 24 24 ENTRY(do_vfp) 25 - #ifdef CONFIG_PREEMPT 25 + #ifdef CONFIG_PREEMPT_COUNT 26 26 ldr r4, [r10, #TI_PREEMPT] @ get preempt count 27 27 add r11, r4, #1 @ increment it 28 28 str r11, [r10, #TI_PREEMPT] ··· 35 35 ENDPROC(do_vfp) 36 36 37 37 ENTRY(vfp_null_entry) 38 - #ifdef CONFIG_PREEMPT 38 + #ifdef CONFIG_PREEMPT_COUNT 39 39 get_thread_info r10 40 40 ldr r4, [r10, #TI_PREEMPT] @ get preempt count 41 41 sub r11, r4, #1 @ decrement it ··· 53 53 54 54 __INIT 55 55 ENTRY(vfp_testing_entry) 56 - #ifdef CONFIG_PREEMPT 56 + #ifdef CONFIG_PREEMPT_COUNT 57 57 get_thread_info r10 58 58 ldr r4, [r10, #TI_PREEMPT] @ get preempt count 59 59 sub r11, r4, #1 @ decrement it
+2 -2
arch/arm/vfp/vfphw.S
··· 168 168 @ else it's one 32-bit instruction, so 169 169 @ always subtract 4 from the following 170 170 @ instruction address. 171 - #ifdef CONFIG_PREEMPT 171 + #ifdef CONFIG_PREEMPT_COUNT 172 172 get_thread_info r10 173 173 ldr r4, [r10, #TI_PREEMPT] @ get preempt count 174 174 sub r11, r4, #1 @ decrement it ··· 192 192 @ not recognised by VFP 193 193 194 194 DBGSTR "not VFP" 195 - #ifdef CONFIG_PREEMPT 195 + #ifdef CONFIG_PREEMPT_COUNT 196 196 get_thread_info r10 197 197 ldr r4, [r10, #TI_PREEMPT] @ get preempt count 198 198 sub r11, r4, #1 @ decrement it
+10
arch/avr32/include/asm/dma-mapping.h
··· 336 336 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 337 337 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 338 338 339 + /* drivers/base/dma-mapping.c */ 340 + extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, 341 + void *cpu_addr, dma_addr_t dma_addr, size_t size); 342 + extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, 343 + void *cpu_addr, dma_addr_t dma_addr, 344 + size_t size); 345 + 346 + #define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) 347 + #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) 348 + 339 349 #endif /* __ASM_AVR32_DMA_MAPPING_H */
+10
arch/blackfin/include/asm/dma-mapping.h
··· 154 154 _dma_sync((dma_addr_t)vaddr, size, dir); 155 155 } 156 156 157 + /* drivers/base/dma-mapping.c */ 158 + extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, 159 + void *cpu_addr, dma_addr_t dma_addr, size_t size); 160 + extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, 161 + void *cpu_addr, dma_addr_t dma_addr, 162 + size_t size); 163 + 164 + #define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) 165 + #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) 166 + 157 167 #endif /* _BLACKFIN_DMA_MAPPING_H */
+15
arch/c6x/include/asm/dma-mapping.h
··· 89 89 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f)) 90 90 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent((d), (s), (v), (h)) 91 91 92 + /* Not supported for now */ 93 + static inline int dma_mmap_coherent(struct device *dev, 94 + struct vm_area_struct *vma, void *cpu_addr, 95 + dma_addr_t dma_addr, size_t size) 96 + { 97 + return -EINVAL; 98 + } 99 + 100 + static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, 101 + void *cpu_addr, dma_addr_t dma_addr, 102 + size_t size) 103 + { 104 + return -EINVAL; 105 + } 106 + 92 107 #endif /* _ASM_C6X_DMA_MAPPING_H */
+10
arch/cris/include/asm/dma-mapping.h
··· 158 158 { 159 159 } 160 160 161 + /* drivers/base/dma-mapping.c */ 162 + extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, 163 + void *cpu_addr, dma_addr_t dma_addr, size_t size); 164 + extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, 165 + void *cpu_addr, dma_addr_t dma_addr, 166 + size_t size); 167 + 168 + #define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) 169 + #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) 170 + 161 171 162 172 #endif
+15
arch/frv/include/asm/dma-mapping.h
··· 132 132 flush_write_buffers(); 133 133 } 134 134 135 + /* Not supported for now */ 136 + static inline int dma_mmap_coherent(struct device *dev, 137 + struct vm_area_struct *vma, void *cpu_addr, 138 + dma_addr_t dma_addr, size_t size) 139 + { 140 + return -EINVAL; 141 + } 142 + 143 + static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, 144 + void *cpu_addr, dma_addr_t dma_addr, 145 + size_t size) 146 + { 147 + return -EINVAL; 148 + } 149 + 135 150 #endif /* _ASM_DMA_MAPPING_H */
+10
arch/m68k/include/asm/dma-mapping.h
··· 115 115 #include <asm-generic/dma-mapping-broken.h> 116 116 #endif 117 117 118 + /* drivers/base/dma-mapping.c */ 119 + extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, 120 + void *cpu_addr, dma_addr_t dma_addr, size_t size); 121 + extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, 122 + void *cpu_addr, dma_addr_t dma_addr, 123 + size_t size); 124 + 125 + #define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) 126 + #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) 127 + 118 128 #endif /* _M68K_DMA_MAPPING_H */
+3
arch/mips/bcm47xx/Kconfig
··· 8 8 select SSB_DRIVER_EXTIF 9 9 select SSB_EMBEDDED 10 10 select SSB_B43_PCI_BRIDGE if PCI 11 + select SSB_DRIVER_PCICORE if PCI 11 12 select SSB_PCICORE_HOSTMODE if PCI 12 13 select SSB_DRIVER_GPIO 14 + select GPIOLIB 13 15 default y 14 16 help 15 17 Add support for old Broadcom BCM47xx boards with Sonics Silicon Backplane support. ··· 27 25 select BCMA_HOST_PCI if PCI 28 26 select BCMA_DRIVER_PCI_HOSTMODE if PCI 29 27 select BCMA_DRIVER_GPIO 28 + select GPIOLIB 30 29 default y 31 30 help 32 31 Add support for new Broadcom BCM47xx boards with Broadcom specific Advanced Microcontroller Bus.
+5 -4
arch/mips/cavium-octeon/executive/cvmx-l2c.c
··· 30 30 * measurement, and debugging facilities. 31 31 */ 32 32 33 + #include <linux/compiler.h> 33 34 #include <linux/irqflags.h> 34 35 #include <asm/octeon/cvmx.h> 35 36 #include <asm/octeon/cvmx-l2c.h> ··· 286 285 */ 287 286 static void fault_in(uint64_t addr, int len) 288 287 { 289 - volatile char *ptr; 290 - volatile char dummy; 288 + char *ptr; 289 + 291 290 /* 292 291 * Adjust addr and length so we get all cache lines even for 293 292 * small ranges spanning two cache lines. 294 293 */ 295 294 len += addr & CVMX_CACHE_LINE_MASK; 296 295 addr &= ~CVMX_CACHE_LINE_MASK; 297 - ptr = (volatile char *)cvmx_phys_to_ptr(addr); 296 + ptr = cvmx_phys_to_ptr(addr); 298 297 /* 299 298 * Invalidate L1 cache to make sure all loads result in data 300 299 * being in L2. 301 300 */ 302 301 CVMX_DCACHE_INVALIDATE; 303 302 while (len > 0) { 304 - dummy += *ptr; 303 + ACCESS_ONCE(*ptr); 305 304 len -= CVMX_CACHE_LINE_SIZE; 306 305 ptr += CVMX_CACHE_LINE_SIZE; 307 306 }
arch/mips/include/asm/break.h arch/mips/include/uapi/asm/break.h
+1 -1
arch/mips/include/asm/dsp.h
··· 16 16 #include <asm/mipsregs.h> 17 17 18 18 #define DSP_DEFAULT 0x00000000 19 - #define DSP_MASK 0x3ff 19 + #define DSP_MASK 0x3f 20 20 21 21 #define __enable_dsp_hazard() \ 22 22 do { \
+1
arch/mips/include/asm/inst.h
··· 353 353 struct u_format u_format; 354 354 struct c_format c_format; 355 355 struct r_format r_format; 356 + struct p_format p_format; 356 357 struct f_format f_format; 357 358 struct ma_format ma_format; 358 359 struct b_format b_format;
+1 -1
arch/mips/include/asm/mach-pnx833x/war.h
··· 21 21 #define R10000_LLSC_WAR 0 22 22 #define MIPS34K_MISSED_ITLB_WAR 0 23 23 24 - #endif /* __ASM_MIPS_MACH_PNX8550_WAR_H */ 24 + #endif /* __ASM_MIPS_MACH_PNX833X_WAR_H */
+1
arch/mips/include/asm/pgtable-64.h
··· 230 230 #else 231 231 #define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT)) 232 232 #define pfn_pte(pfn, prot) __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot)) 233 + #define pfn_pmd(pfn, prot) __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot)) 233 234 #endif 234 235 235 236 #define __pgd_offset(address) pgd_index(address)
+1
arch/mips/include/uapi/asm/Kbuild
··· 3 3 4 4 header-y += auxvec.h 5 5 header-y += bitsperlong.h 6 + header-y += break.h 6 7 header-y += byteorder.h 7 8 header-y += cachectl.h 8 9 header-y += errno.h
+35 -1
arch/mips/kernel/ftrace.c
··· 25 25 #define MCOUNT_OFFSET_INSNS 4 26 26 #endif 27 27 28 + /* Arch override because MIPS doesn't need to run this from stop_machine() */ 29 + void arch_ftrace_update_code(int command) 30 + { 31 + ftrace_modify_all_code(command); 32 + } 33 + 28 34 /* 29 35 * Check if the address is in kernel space 30 36 * ··· 95 89 return 0; 96 90 } 97 91 92 + #ifndef CONFIG_64BIT 93 + static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, 94 + unsigned int new_code2) 95 + { 96 + int faulted; 97 + 98 + safe_store_code(new_code1, ip, faulted); 99 + if (unlikely(faulted)) 100 + return -EFAULT; 101 + ip += 4; 102 + safe_store_code(new_code2, ip, faulted); 103 + if (unlikely(faulted)) 104 + return -EFAULT; 105 + flush_icache_range(ip, ip + 8); /* original ip + 12 */ 106 + return 0; 107 + } 108 + #endif 109 + 98 110 /* 99 111 * The details about the calling site of mcount on MIPS 100 112 * ··· 155 131 * needed. 156 132 */ 157 133 new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F; 158 - 134 + #ifdef CONFIG_64BIT 159 135 return ftrace_modify_code(ip, new); 136 + #else 137 + /* 138 + * On 32 bit MIPS platforms, gcc adds a stack adjust 139 + * instruction in the delay slot after the branch to 140 + * mcount and expects mcount to restore the sp on return. 141 + * This is based on a legacy API and does nothing but 142 + * waste instructions so it's being removed at runtime. 143 + */ 144 + return ftrace_modify_code_2(ip, new, INSN_NOP); 145 + #endif 160 146 } 161 147 162 148 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+4 -3
arch/mips/kernel/mcount.S
··· 46 46 PTR_L a5, PT_R9(sp) 47 47 PTR_L a6, PT_R10(sp) 48 48 PTR_L a7, PT_R11(sp) 49 - PTR_ADDIU sp, PT_SIZE 50 49 #else 51 - PTR_ADDIU sp, (PT_SIZE + 8) 50 + PTR_ADDIU sp, PT_SIZE 52 51 #endif 53 52 .endm 54 53 ··· 68 69 .globl _mcount 69 70 _mcount: 70 71 b ftrace_stub 71 - nop 72 + addiu sp,sp,8 73 + 74 + /* When tracing is activated, it calls ftrace_caller+8 (aka here) */ 72 75 lw t1, function_trace_stop 73 76 bnez t1, ftrace_stub 74 77 nop
+1 -1
arch/mips/kernel/vpe.c
··· 705 705 706 706 printk(KERN_WARNING 707 707 "VPE loader: TC %d is already in use.\n", 708 - t->index); 708 + v->tc->index); 709 709 return -ENOEXEC; 710 710 } 711 711 } else {
+1 -1
arch/mips/lantiq/irq.c
··· 408 408 #endif 409 409 410 410 /* tell oprofile which irq to use */ 411 - cp0_perfcount_irq = LTQ_PERF_IRQ; 411 + cp0_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ); 412 412 413 413 /* 414 414 * if the timer irq is not one of the mips irqs we need to
+1 -1
arch/mips/lib/delay.c
··· 21 21 " .set noreorder \n" 22 22 " .align 3 \n" 23 23 "1: bnez %0, 1b \n" 24 - #if __SIZEOF_LONG__ == 4 24 + #if BITS_PER_LONG == 32 25 25 " subu %0, 1 \n" 26 26 #else 27 27 " dsubu %0, 1 \n"
-6
arch/mips/mm/ioremap.c
··· 190 190 191 191 EXPORT_SYMBOL(__ioremap); 192 192 EXPORT_SYMBOL(__iounmap); 193 - 194 - int __virt_addr_valid(const volatile void *kaddr) 195 - { 196 - return pfn_valid(PFN_DOWN(virt_to_phys(kaddr))); 197 - } 198 - EXPORT_SYMBOL_GPL(__virt_addr_valid);
+6
arch/mips/mm/mmap.c
··· 192 192 193 193 return ret; 194 194 } 195 + 196 + int __virt_addr_valid(const volatile void *kaddr) 197 + { 198 + return pfn_valid(PFN_DOWN(virt_to_phys(kaddr))); 199 + } 200 + EXPORT_SYMBOL_GPL(__virt_addr_valid);
+4 -1
arch/mips/netlogic/xlr/setup.c
··· 193 193 194 194 void __init prom_init(void) 195 195 { 196 - int i, *argv, *envp; /* passed as 32 bit ptrs */ 196 + int *argv, *envp; /* passed as 32 bit ptrs */ 197 197 struct psb_info *prom_infop; 198 + #ifdef CONFIG_SMP 199 + int i; 200 + #endif 198 201 199 202 /* truncate to 32 bit and sign extend all args */ 200 203 argv = (int *)(long)(int)fw_arg1;
+1 -1
arch/mips/pci/pci-ar71xx.c
··· 24 24 #include <asm/mach-ath79/pci.h> 25 25 26 26 #define AR71XX_PCI_MEM_BASE 0x10000000 27 - #define AR71XX_PCI_MEM_SIZE 0x08000000 27 + #define AR71XX_PCI_MEM_SIZE 0x07000000 28 28 29 29 #define AR71XX_PCI_WIN0_OFFS 0x10000000 30 30 #define AR71XX_PCI_WIN1_OFFS 0x11000000
+1 -1
arch/mips/pci/pci-ar724x.c
··· 21 21 #define AR724X_PCI_CTRL_SIZE 0x100 22 22 23 23 #define AR724X_PCI_MEM_BASE 0x10000000 24 - #define AR724X_PCI_MEM_SIZE 0x08000000 24 + #define AR724X_PCI_MEM_SIZE 0x04000000 25 25 26 26 #define AR724X_PCI_REG_RESET 0x18 27 27 #define AR724X_PCI_REG_INT_STATUS 0x4c
+15
arch/mn10300/include/asm/dma-mapping.h
··· 168 168 mn10300_dcache_flush_inv(); 169 169 } 170 170 171 + /* Not supported for now */ 172 + static inline int dma_mmap_coherent(struct device *dev, 173 + struct vm_area_struct *vma, void *cpu_addr, 174 + dma_addr_t dma_addr, size_t size) 175 + { 176 + return -EINVAL; 177 + } 178 + 179 + static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, 180 + void *cpu_addr, dma_addr_t dma_addr, 181 + size_t size) 182 + { 183 + return -EINVAL; 184 + } 185 + 171 186 #endif
+15
arch/parisc/include/asm/dma-mapping.h
··· 238 238 /* At the moment, we panic on error for IOMMU resource exaustion */ 239 239 #define dma_mapping_error(dev, x) 0 240 240 241 + /* This API cannot be supported on PA-RISC */ 242 + static inline int dma_mmap_coherent(struct device *dev, 243 + struct vm_area_struct *vma, void *cpu_addr, 244 + dma_addr_t dma_addr, size_t size) 245 + { 246 + return -EINVAL; 247 + } 248 + 249 + static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, 250 + void *cpu_addr, dma_addr_t dma_addr, 251 + size_t size) 252 + { 253 + return -EINVAL; 254 + } 255 + 241 256 #endif
+2
arch/powerpc/kernel/entry_32.S
··· 439 439 ret_from_kernel_thread: 440 440 REST_NVGPRS(r1) 441 441 bl schedule_tail 442 + li r3,0 443 + stw r3,0(r1) 442 444 mtlr r14 443 445 mr r3,r15 444 446 PPC440EP_ERR42
+13
arch/powerpc/kernel/entry_64.S
··· 664 664 ld r4,TI_FLAGS(r9) 665 665 andi. r0,r4,_TIF_NEED_RESCHED 666 666 bne 1b 667 + 668 + /* 669 + * arch_local_irq_restore() from preempt_schedule_irq above may 670 + * enable hard interrupt but we really should disable interrupts 671 + * when we return from the interrupt, and so that we don't get 672 + * interrupted after loading SRR0/1. 673 + */ 674 + #ifdef CONFIG_PPC_BOOK3E 675 + wrteei 0 676 + #else 677 + ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */ 678 + mtmsrd r10,1 /* Update machine state */ 679 + #endif /* CONFIG_PPC_BOOK3E */ 667 680 #endif /* CONFIG_PREEMPT */ 668 681 669 682 .globl fast_exc_return_irq
+3 -2
arch/powerpc/kernel/kgdb.c
··· 154 154 static int kgdb_singlestep(struct pt_regs *regs) 155 155 { 156 156 struct thread_info *thread_info, *exception_thread_info; 157 - struct thread_info *backup_current_thread_info = \ 158 - (struct thread_info *)kmalloc(sizeof(struct thread_info), GFP_KERNEL); 157 + struct thread_info *backup_current_thread_info; 159 158 160 159 if (user_mode(regs)) 161 160 return 0; 162 161 162 + backup_current_thread_info = (struct thread_info *)kmalloc(sizeof(struct thread_info), GFP_KERNEL); 163 163 /* 164 164 * On Book E and perhaps other processors, singlestep is handled on 165 165 * the critical exception stack. This causes current_thread_info() ··· 185 185 /* Restore current_thread_info lastly. */ 186 186 memcpy(exception_thread_info, backup_current_thread_info, sizeof *thread_info); 187 187 188 + kfree(backup_current_thread_info); 188 189 return 1; 189 190 } 190 191
+7 -2
arch/powerpc/kernel/time.c
··· 494 494 set_dec(DECREMENTER_MAX); 495 495 496 496 /* Some implementations of hotplug will get timer interrupts while 497 - * offline, just ignore these 497 + * offline, just ignore these and we also need to set 498 + * decrementers_next_tb as MAX to make sure __check_irq_replay 499 + * don't replay timer interrupt when return, otherwise we'll trap 500 + * here infinitely :( 498 501 */ 499 - if (!cpu_online(smp_processor_id())) 502 + if (!cpu_online(smp_processor_id())) { 503 + *next_tb = ~(u64)0; 500 504 return; 505 + } 501 506 502 507 /* Conditionally hard-enable interrupts now that the DEC has been 503 508 * bumped to its maximum value
+2
arch/powerpc/kvm/emulate.c
··· 39 39 #define OP_31_XOP_TRAP 4 40 40 #define OP_31_XOP_LWZX 23 41 41 #define OP_31_XOP_TRAP_64 68 42 + #define OP_31_XOP_DCBF 86 42 43 #define OP_31_XOP_LBZX 87 43 44 #define OP_31_XOP_STWX 151 44 45 #define OP_31_XOP_STBX 215 ··· 375 374 emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs); 376 375 break; 377 376 377 + case OP_31_XOP_DCBF: 378 378 case OP_31_XOP_DCBI: 379 379 /* Do nothing. The guest is performing dcbi because 380 380 * hardware DMA is not snooped by the dcache, but
+35 -27
arch/powerpc/mm/hash_low_64.S
··· 115 115 sldi r29,r5,SID_SHIFT - VPN_SHIFT 116 116 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) 117 117 or r29,r28,r29 118 - 119 - /* Calculate hash value for primary slot and store it in r28 */ 120 - rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ 121 - rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */ 122 - xor r28,r5,r0 118 + /* 119 + * Calculate hash value for primary slot and store it in r28 120 + * r3 = va, r5 = vsid 121 + * r0 = (va >> 12) & ((1ul << (28 - 12)) -1) 122 + */ 123 + rldicl r0,r3,64-12,48 124 + xor r28,r5,r0 /* hash */ 123 125 b 4f 124 126 125 127 3: /* Calc vpn and put it in r29 */ ··· 132 130 /* 133 131 * calculate hash value for primary slot and 134 132 * store it in r28 for 1T segment 133 + * r3 = va, r5 = vsid 135 134 */ 136 - rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ 137 - clrldi r5,r5,40 /* vsid & 0xffffff */ 138 - rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */ 139 - xor r28,r28,r5 135 + sldi r28,r5,25 /* vsid << 25 */ 136 + /* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */ 137 + rldicl r0,r3,64-12,36 138 + xor r28,r28,r5 /* vsid ^ ( vsid << 25) */ 140 139 xor r28,r28,r0 /* hash */ 141 140 142 141 /* Convert linux PTE bits into HW equivalents */ ··· 410 407 */ 411 408 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) 412 409 or r29,r28,r29 413 - 414 - /* Calculate hash value for primary slot and store it in r28 */ 415 - rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ 416 - rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */ 417 - xor r28,r5,r0 410 + /* 411 + * Calculate hash value for primary slot and store it in r28 412 + * r3 = va, r5 = vsid 413 + * r0 = (va >> 12) & ((1ul << (28 - 12)) -1) 414 + */ 415 + rldicl r0,r3,64-12,48 416 + xor r28,r5,r0 /* hash */ 418 417 b 4f 419 418 420 419 3: /* Calc vpn and put it in r29 */ ··· 431 426 /* 432 427 * Calculate hash value for primary slot and 433 428 * store it in r28 for 1T segment 429 + * r3 = va, r5 = vsid 434 430 */ 435 - rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ 436 - clrldi r5,r5,40 /* vsid & 0xffffff */ 437 - rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */ 438 - xor r28,r28,r5 431 + sldi r28,r5,25 /* vsid << 25 */ 432 + /* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */ 433 + rldicl r0,r3,64-12,36 434 + xor r28,r28,r5 /* vsid ^ ( vsid << 25) */ 439 435 xor r28,r28,r0 /* hash */ 440 436 441 437 /* Convert linux PTE bits into HW equivalents */ ··· 758 752 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) 759 753 or r29,r28,r29 760 754 761 - /* Calculate hash value for primary slot and store it in r28 */ 762 - rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ 763 - rldicl r0,r3,64-16,52 /* (ea >> 16) & 0xfff */ 764 - xor r28,r5,r0 755 + /* Calculate hash value for primary slot and store it in r28 756 + * r3 = va, r5 = vsid 757 + * r0 = (va >> 16) & ((1ul << (28 - 16)) -1) 758 + */ 759 + rldicl r0,r3,64-16,52 760 + xor r28,r5,r0 /* hash */ 765 761 b 4f 766 762 767 763 3: /* Calc vpn and put it in r29 */ 768 764 sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT 769 765 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT) 770 766 or r29,r28,r29 771 - 772 767 /* 773 768 * calculate hash value for primary slot and 774 769 * store it in r28 for 1T segment 770 + * r3 = va, r5 = vsid 775 771 */ 776 - rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ 777 - clrldi r5,r5,40 /* vsid & 0xffffff */ 778 - rldicl r0,r3,64-16,40 /* (ea >> 16) & 0xffffff */ 779 - xor r28,r28,r5 772 + sldi r28,r5,25 /* vsid << 25 */ 773 + /* r0 = (va >> 16) & ((1ul << (40 - 16)) -1) */ 774 + rldicl r0,r3,64-16,40 775 + xor r28,r28,r5 /* vsid ^ ( vsid << 25) */ 780 776 xor r28,r28,r0 /* hash */ 781 777 782 778 /* Convert linux PTE bits into HW equivalents */
+1 -1
arch/powerpc/oprofile/op_model_power4.c
··· 52 52 for (pmc = 0; pmc < 4; pmc++) { 53 53 psel = mmcr1 & (OPROFILE_PM_PMCSEL_MSK 54 54 << (OPROFILE_MAX_PMC_NUM - pmc) 55 - * OPROFILE_MAX_PMC_NUM); 55 + * OPROFILE_PMSEL_FIELD_WIDTH); 56 56 psel = (psel >> ((OPROFILE_MAX_PMC_NUM - pmc) 57 57 * OPROFILE_PMSEL_FIELD_WIDTH)) & ~1ULL; 58 58 unit = mmcr1 & (OPROFILE_PM_UNIT_MSK
+7
arch/powerpc/platforms/pasemi/cpufreq.c
··· 236 236 237 237 static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy) 238 238 { 239 + /* 240 + * We don't support CPU hotplug. Don't unmap after the system 241 + * has already made it to a running state. 242 + */ 243 + if (system_state != SYSTEM_BOOTING) 244 + return 0; 245 + 239 246 if (sdcasr_mapbase) 240 247 iounmap(sdcasr_mapbase); 241 248 if (sdcpwr_mapbase)
+12
arch/s390/include/asm/pgtable.h
··· 1365 1365 __pmd_idte(address, pmdp); 1366 1366 } 1367 1367 1368 + #define __HAVE_ARCH_PMDP_SET_WRPROTECT 1369 + static inline void pmdp_set_wrprotect(struct mm_struct *mm, 1370 + unsigned long address, pmd_t *pmdp) 1371 + { 1372 + pmd_t pmd = *pmdp; 1373 + 1374 + if (pmd_write(pmd)) { 1375 + __pmd_idte(address, pmdp); 1376 + set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd)); 1377 + } 1378 + } 1379 + 1368 1380 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) 1369 1381 { 1370 1382 pmd_t __pmd;
+1
arch/x86/Kconfig
··· 2139 2139 config OLPC_XO1_SCI 2140 2140 bool "OLPC XO-1 SCI extras" 2141 2141 depends on OLPC && OLPC_XO1_PM 2142 + depends on INPUT=y 2142 2143 select POWER_SUPPLY 2143 2144 select GPIO_CS5535 2144 2145 select MFD_CORE
+2 -2
arch/x86/boot/Makefile
··· 71 71 $(obj)/bzImage: asflags-y := $(SVGA_MODE) 72 72 73 73 quiet_cmd_image = BUILD $@ 74 - cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin > $@ 74 + cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/zoffset.h > $@ 75 75 76 76 $(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/tools/build FORCE 77 77 $(call if_changed,image) ··· 92 92 $(obj)/voffset.h: vmlinux FORCE 93 93 $(call if_changed,voffset) 94 94 95 - sed-zoffset := -e 's/^\([0-9a-fA-F]*\) . \(startup_32\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p' 95 + sed-zoffset := -e 's/^\([0-9a-fA-F]*\) . \(startup_32\|startup_64\|efi_pe_entry\|efi_stub_entry\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p' 96 96 97 97 quiet_cmd_zoffset = ZOFFSET $@ 98 98 cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@
+11 -10
arch/x86/boot/compressed/eboot.c
··· 256 256 int i; 257 257 struct setup_data *data; 258 258 259 - data = (struct setup_data *)params->hdr.setup_data; 259 + data = (struct setup_data *)(unsigned long)params->hdr.setup_data; 260 260 261 261 while (data && data->next) 262 - data = (struct setup_data *)data->next; 262 + data = (struct setup_data *)(unsigned long)data->next; 263 263 264 264 status = efi_call_phys5(sys_table->boottime->locate_handle, 265 265 EFI_LOCATE_BY_PROTOCOL, &pci_proto, ··· 295 295 if (!pci) 296 296 continue; 297 297 298 + #ifdef CONFIG_X86_64 298 299 status = efi_call_phys4(pci->attributes, pci, 299 300 EfiPciIoAttributeOperationGet, 0, 300 301 &attributes); 301 - 302 + #else 303 + status = efi_call_phys5(pci->attributes, pci, 304 + EfiPciIoAttributeOperationGet, 0, 0, 305 + &attributes); 306 + #endif 302 307 if (status != EFI_SUCCESS) 303 - continue; 304 - 305 - if (!(attributes & EFI_PCI_IO_ATTRIBUTE_EMBEDDED_ROM)) 306 308 continue; 307 309 308 310 if (!pci->romimage || !pci->romsize) ··· 347 345 memcpy(rom->romdata, pci->romimage, pci->romsize); 348 346 349 347 if (data) 350 - data->next = (uint64_t)rom; 348 + data->next = (unsigned long)rom; 351 349 else 352 - params->hdr.setup_data = (uint64_t)rom; 350 + params->hdr.setup_data = (unsigned long)rom; 353 351 354 352 data = (struct setup_data *)rom; 355 353 ··· 434 432 * Once we've found a GOP supporting ConOut, 435 433 * don't bother looking any further. 436 434 */ 435 + first_gop = gop; 437 436 if (conout_found) 438 437 break; 439 - 440 - first_gop = gop; 441 438 } 442 439 } 443 440
+5 -3
arch/x86/boot/compressed/head_32.S
··· 35 35 #ifdef CONFIG_EFI_STUB 36 36 jmp preferred_addr 37 37 38 - .balign 0x10 39 38 /* 40 39 * We don't need the return address, so set up the stack so 41 - * efi_main() can find its arugments. 40 + * efi_main() can find its arguments. 42 41 */ 42 + ENTRY(efi_pe_entry) 43 43 add $0x4, %esp 44 44 45 45 call make_boot_params ··· 50 50 pushl %eax 51 51 pushl %esi 52 52 pushl %ecx 53 + sub $0x4, %esp 53 54 54 - .org 0x30,0x90 55 + ENTRY(efi_stub_entry) 56 + add $0x4, %esp 55 57 call efi_main 56 58 cmpl $0, %eax 57 59 movl %eax, %esi
+4 -4
arch/x86/boot/compressed/head_64.S
··· 201 201 */ 202 202 #ifdef CONFIG_EFI_STUB 203 203 /* 204 - * The entry point for the PE/COFF executable is 0x210, so only 205 - * legacy boot loaders will execute this jmp. 204 + * The entry point for the PE/COFF executable is efi_pe_entry, so 205 + * only legacy boot loaders will execute this jmp. 206 206 */ 207 207 jmp preferred_addr 208 208 209 - .org 0x210 209 + ENTRY(efi_pe_entry) 210 210 mov %rcx, %rdi 211 211 mov %rdx, %rsi 212 212 pushq %rdi ··· 218 218 popq %rsi 219 219 popq %rdi 220 220 221 - .org 0x230,0x90 221 + ENTRY(efi_stub_entry) 222 222 call efi_main 223 223 movq %rax,%rsi 224 224 cmpq $0,%rax
+29 -10
arch/x86/boot/header.S
··· 21 21 #include <asm/e820.h> 22 22 #include <asm/page_types.h> 23 23 #include <asm/setup.h> 24 + #include <asm/bootparam.h> 24 25 #include "boot.h" 25 26 #include "voffset.h" 26 27 #include "zoffset.h" ··· 256 255 # header, from the old boot sector. 257 256 258 257 .section ".header", "a" 258 + .globl sentinel 259 + sentinel: .byte 0xff, 0xff /* Used to detect broken loaders */ 260 + 259 261 .globl hdr 260 262 hdr: 261 263 setup_sects: .byte 0 /* Filled in by build.c */ ··· 283 279 # Part 2 of the header, from the old setup.S 284 280 285 281 .ascii "HdrS" # header signature 286 - .word 0x020b # header version number (>= 0x0105) 282 + .word 0x020c # header version number (>= 0x0105) 287 283 # or else old loadlin-1.5 will fail) 288 284 .globl realmode_swtch 289 285 realmode_swtch: .word 0, 0 # default_switch, SETUPSEG ··· 301 297 302 298 # flags, unused bits must be zero (RFU) bit within loadflags 303 299 loadflags: 304 - LOADED_HIGH = 1 # If set, the kernel is loaded high 305 - CAN_USE_HEAP = 0x80 # If set, the loader also has set 306 - # heap_end_ptr to tell how much 307 - # space behind setup.S can be used for 308 - # heap purposes. 309 - # Only the loader knows what is free 310 - .byte LOADED_HIGH 300 + .byte LOADED_HIGH # The kernel is to be loaded high 311 301 312 302 setup_move_size: .word 0x8000 # size to move, when setup is not 313 303 # loaded at 0x90000. We will move setup ··· 367 369 relocatable_kernel: .byte 0 368 370 #endif 369 371 min_alignment: .byte MIN_KERNEL_ALIGN_LG2 # minimum alignment 370 - pad3: .word 0 372 + 373 + xloadflags: 374 + #ifdef CONFIG_X86_64 375 + # define XLF0 XLF_KERNEL_64 /* 64-bit kernel */ 376 + #else 377 + # define XLF0 0 378 + #endif 379 + #ifdef CONFIG_EFI_STUB 380 + # ifdef CONFIG_X86_64 381 + # define XLF23 XLF_EFI_HANDOVER_64 /* 64-bit EFI handover ok */ 382 + # else 383 + # define XLF23 XLF_EFI_HANDOVER_32 /* 32-bit EFI handover ok */ 384 + # endif 385 + #else 386 + # define XLF23 0 387 + #endif 388 + .word XLF0 | XLF23 371 389 372 390 cmdline_size: .long COMMAND_LINE_SIZE-1 #length of the command line, 373 391 #added with boot protocol ··· 411 397 #define INIT_SIZE VO_INIT_SIZE 412 398 #endif 413 399 init_size: .long INIT_SIZE # kernel initialization size 414 - handover_offset: .long 0x30 # offset to the handover 400 + handover_offset: 401 + #ifdef CONFIG_EFI_STUB 402 + .long 0x30 # offset to the handover 415 403 # protocol entry point 404 + #else 405 + .long 0 406 + #endif 416 407 417 408 # End of setup header ##################################################### 418 409
+1 -1
arch/x86/boot/setup.ld
··· 13 13 .bstext : { *(.bstext) } 14 14 .bsdata : { *(.bsdata) } 15 15 16 - . = 497; 16 + . = 495; 17 17 .header : { *(.header) } 18 18 .entrytext : { *(.entrytext) } 19 19 .inittext : { *(.inittext) }
+63 -18
arch/x86/boot/tools/build.c
··· 52 52 53 53 #define PECOFF_RELOC_RESERVE 0x20 54 54 55 + unsigned long efi_stub_entry; 56 + unsigned long efi_pe_entry; 57 + unsigned long startup_64; 58 + 55 59 /*----------------------------------------------------------------------*/ 56 60 57 61 static const u32 crctab32[] = { ··· 136 132 137 133 static void usage(void) 138 134 { 139 - die("Usage: build setup system [> image]"); 135 + die("Usage: build setup system [zoffset.h] [> image]"); 140 136 } 141 137 142 138 #ifdef CONFIG_EFI_STUB ··· 210 206 */ 211 207 put_unaligned_le32(file_sz - 512, &buf[pe_header + 0x1c]); 212 208 213 - #ifdef CONFIG_X86_32 214 209 /* 215 - * Address of entry point. 216 - * 217 - * The EFI stub entry point is +16 bytes from the start of 218 - * the .text section. 210 + * Address of entry point for PE/COFF executable 219 211 */ 220 - put_unaligned_le32(text_start + 16, &buf[pe_header + 0x28]); 221 - #else 222 - /* 223 - * Address of entry point. startup_32 is at the beginning and 224 - * the 64-bit entry point (startup_64) is always 512 bytes 225 - * after. The EFI stub entry point is 16 bytes after that, as 226 - * the first instruction allows legacy loaders to jump over 227 - * the EFI stub initialisation 228 - */ 229 - put_unaligned_le32(text_start + 528, &buf[pe_header + 0x28]); 230 - #endif /* CONFIG_X86_32 */ 212 + put_unaligned_le32(text_start + efi_pe_entry, &buf[pe_header + 0x28]); 231 213 232 214 update_pecoff_section_header(".text", text_start, text_sz); 233 215 } 234 216 235 217 #endif /* CONFIG_EFI_STUB */ 218 + 219 + 220 + /* 221 + * Parse zoffset.h and find the entry points. We could just #include zoffset.h 222 + * but that would mean tools/build would have to be rebuilt every time. It's 223 + * not as if parsing it is hard... 224 + */ 225 + #define PARSE_ZOFS(p, sym) do { \ 226 + if (!strncmp(p, "#define ZO_" #sym " ", 11+sizeof(#sym))) \ 227 + sym = strtoul(p + 11 + sizeof(#sym), NULL, 16); \ 228 + } while (0) 229 + 230 + static void parse_zoffset(char *fname) 231 + { 232 + FILE *file; 233 + char *p; 234 + int c; 235 + 236 + file = fopen(fname, "r"); 237 + if (!file) 238 + die("Unable to open `%s': %m", fname); 239 + c = fread(buf, 1, sizeof(buf) - 1, file); 240 + if (ferror(file)) 241 + die("read-error on `zoffset.h'"); 242 + buf[c] = 0; 243 + 244 + p = (char *)buf; 245 + 246 + while (p && *p) { 247 + PARSE_ZOFS(p, efi_stub_entry); 248 + PARSE_ZOFS(p, efi_pe_entry); 249 + PARSE_ZOFS(p, startup_64); 250 + 251 + p = strchr(p, '\n'); 252 + while (p && (*p == '\r' || *p == '\n')) 253 + p++; 254 + } 255 + } 236 256 237 257 int main(int argc, char ** argv) 238 258 { ··· 269 241 void *kernel; 270 242 u32 crc = 0xffffffffUL; 271 243 272 - if (argc != 3) 244 + /* Defaults for old kernel */ 245 + #ifdef CONFIG_X86_32 246 + efi_pe_entry = 0x10; 247 + efi_stub_entry = 0x30; 248 + #else 249 + efi_pe_entry = 0x210; 250 + efi_stub_entry = 0x230; 251 + startup_64 = 0x200; 252 + #endif 253 + 254 + if (argc == 4) 255 + parse_zoffset(argv[3]); 256 + else if (argc != 3) 273 257 usage(); 274 258 275 259 /* Copy the setup code */ ··· 339 299 340 300 #ifdef CONFIG_EFI_STUB 341 301 update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz)); 302 + 303 + #ifdef CONFIG_X86_64 /* Yes, this is really how we defined it :( */ 304 + efi_stub_entry -= 0x200; 305 + #endif 306 + put_unaligned_le32(efi_stub_entry, &buf[0x264]); 342 307 #endif 343 308 344 309 crc = partial_crc32(buf, i, crc);
+2 -2
arch/x86/ia32/ia32entry.S
··· 207 207 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) 208 208 jnz ia32_ret_from_sys_call 209 209 TRACE_IRQS_ON 210 - sti 210 + ENABLE_INTERRUPTS(CLBR_NONE) 211 211 movl %eax,%esi /* second arg, syscall return value */ 212 212 cmpl $-MAX_ERRNO,%eax /* is it an error ? */ 213 213 jbe 1f ··· 217 217 call __audit_syscall_exit 218 218 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */ 219 219 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi 220 - cli 220 + DISABLE_INTERRUPTS(CLBR_NONE) 221 221 TRACE_IRQS_OFF 222 222 testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) 223 223 jz \exit
+1
arch/x86/include/asm/efi.h
··· 94 94 #endif /* CONFIG_X86_32 */ 95 95 96 96 extern int add_efi_memmap; 97 + extern unsigned long x86_efi_facility; 97 98 extern void efi_set_executable(efi_memory_desc_t *md, bool executable); 98 99 extern int efi_memblock_x86_reserve_range(void); 99 100 extern void efi_call_phys_prelog(void);
+1 -1
arch/x86/include/asm/uv/uv.h
··· 16 16 extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, 17 17 struct mm_struct *mm, 18 18 unsigned long start, 19 - unsigned end, 19 + unsigned long end, 20 20 unsigned int cpu); 21 21 22 22 #else /* X86_UV */
+46 -17
arch/x86/include/uapi/asm/bootparam.h
··· 1 1 #ifndef _ASM_X86_BOOTPARAM_H 2 2 #define _ASM_X86_BOOTPARAM_H 3 3 4 + /* setup_data types */ 5 + #define SETUP_NONE 0 6 + #define SETUP_E820_EXT 1 7 + #define SETUP_DTB 2 8 + #define SETUP_PCI 3 9 + 10 + /* ram_size flags */ 11 + #define RAMDISK_IMAGE_START_MASK 0x07FF 12 + #define RAMDISK_PROMPT_FLAG 0x8000 13 + #define RAMDISK_LOAD_FLAG 0x4000 14 + 15 + /* loadflags */ 16 + #define LOADED_HIGH (1<<0) 17 + #define QUIET_FLAG (1<<5) 18 + #define KEEP_SEGMENTS (1<<6) 19 + #define CAN_USE_HEAP (1<<7) 20 + 21 + /* xloadflags */ 22 + #define XLF_KERNEL_64 (1<<0) 23 + #define XLF_CAN_BE_LOADED_ABOVE_4G (1<<1) 24 + #define XLF_EFI_HANDOVER_32 (1<<2) 25 + #define XLF_EFI_HANDOVER_64 (1<<3) 26 + 27 + #ifndef __ASSEMBLY__ 28 + 4 29 #include <linux/types.h> 5 30 #include <linux/screen_info.h> 6 31 #include <linux/apm_bios.h> ··· 33 8 #include <asm/e820.h> 34 9 #include <asm/ist.h> 35 10 #include <video/edid.h> 36 - 37 - /* setup data types */ 38 - #define SETUP_NONE 0 39 - #define SETUP_E820_EXT 1 40 - #define SETUP_DTB 2 41 - #define SETUP_PCI 3 42 11 43 12 /* extensible setup data list node */ 44 13 struct setup_data { ··· 47 28 __u16 root_flags; 48 29 __u32 syssize; 49 30 __u16 ram_size; 50 - #define RAMDISK_IMAGE_START_MASK 0x07FF 51 - #define RAMDISK_PROMPT_FLAG 0x8000 52 - #define RAMDISK_LOAD_FLAG 0x4000 53 31 __u16 vid_mode; 54 32 __u16 root_dev; 55 33 __u16 boot_flag; ··· 58 42 __u16 kernel_version; 59 43 __u8 type_of_loader; 60 44 __u8 loadflags; 61 - #define LOADED_HIGH (1<<0) 62 - #define QUIET_FLAG (1<<5) 63 - #define KEEP_SEGMENTS (1<<6) 64 - #define CAN_USE_HEAP (1<<7) 65 45 __u16 setup_move_size; 66 46 __u32 code32_start; 67 47 __u32 ramdisk_image; ··· 70 58 __u32 initrd_addr_max; 71 59 __u32 kernel_alignment; 72 60 __u8 relocatable_kernel; 73 - __u8 _pad2[3]; 61 + __u8 min_alignment; 62 + __u16 xloadflags; 74 63 __u32 cmdline_size; 75 64 __u32 hardware_subarch; 76 65 __u64 hardware_subarch_data; ··· 119 106 __u8 hd1_info[16]; /* obsolete! */ /* 0x090 */ 120 107 struct sys_desc_table sys_desc_table; /* 0x0a0 */ 121 108 struct olpc_ofw_header olpc_ofw_header; /* 0x0b0 */ 122 - __u8 _pad4[128]; /* 0x0c0 */ 109 + __u32 ext_ramdisk_image; /* 0x0c0 */ 110 + __u32 ext_ramdisk_size; /* 0x0c4 */ 111 + __u32 ext_cmd_line_ptr; /* 0x0c8 */ 112 + __u8 _pad4[116]; /* 0x0cc */ 123 113 struct edid_info edid_info; /* 0x140 */ 124 114 struct efi_info efi_info; /* 0x1c0 */ 125 115 __u32 alt_mem_k; /* 0x1e0 */ ··· 131 115 __u8 eddbuf_entries; /* 0x1e9 */ 132 116 __u8 edd_mbr_sig_buf_entries; /* 0x1ea */ 133 117 __u8 kbd_status; /* 0x1eb */ 134 - __u8 _pad6[5]; /* 0x1ec */ 118 + __u8 _pad5[3]; /* 0x1ec */ 119 + /* 120 + * The sentinel is set to a nonzero value (0xff) in header.S. 121 + * 122 + * A bootloader is supposed to only take setup_header and put 123 + * it into a clean boot_params buffer. If it turns out that 124 + * it is clumsy or too generous with the buffer, it most 125 + * probably will pick up the sentinel variable too. The fact 126 + * that this variable then is still 0xff will let kernel 127 + * know that some variables in boot_params are invalid and 128 + * kernel should zero out certain portions of boot_params. 129 + */ 130 + __u8 sentinel; /* 0x1ef */ 131 + __u8 _pad6[1]; /* 0x1f0 */ 135 132 struct setup_header hdr; /* setup header */ /* 0x1f1 */ 136 133 __u8 _pad7[0x290-0x1f1-sizeof(struct setup_header)]; 137 134 __u32 edd_mbr_sig_buffer[EDD_MBR_SIG_MAX]; /* 0x290 */ ··· 163 134 X86_NR_SUBARCHS, 164 135 }; 165 136 166 - 137 + #endif /* __ASSEMBLY__ */ 167 138 168 139 #endif /* _ASM_X86_BOOTPARAM_H */
+3 -4
arch/x86/kernel/cpu/intel_cacheinfo.c
··· 298 298 unsigned int); 299 299 }; 300 300 301 - #ifdef CONFIG_AMD_NB 302 - 301 + #if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS) 303 302 /* 304 303 * L3 cache descriptors 305 304 */ ··· 523 524 static struct _cache_attr subcaches = 524 525 __ATTR(subcaches, 0644, show_subcaches, store_subcaches); 525 526 526 - #else /* CONFIG_AMD_NB */ 527 + #else 527 528 #define amd_init_l3_cache(x, y) 528 - #endif /* CONFIG_AMD_NB */ 529 + #endif /* CONFIG_AMD_NB && CONFIG_SYSFS */ 529 530 530 531 static int 531 532 __cpuinit cpuid4_cache_lookup_regs(int index,
+5 -1
arch/x86/kernel/cpu/perf_event_intel.c
··· 2019 2019 break; 2020 2020 2021 2021 case 28: /* Atom */ 2022 - case 54: /* Cedariew */ 2022 + case 38: /* Lincroft */ 2023 + case 39: /* Penwell */ 2024 + case 53: /* Cloverview */ 2025 + case 54: /* Cedarview */ 2023 2026 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, 2024 2027 sizeof(hw_cache_event_ids)); 2025 2028 ··· 2087 2084 pr_cont("SandyBridge events, "); 2088 2085 break; 2089 2086 case 58: /* IvyBridge */ 2087 + case 62: /* IvyBridge EP */ 2090 2088 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, 2091 2089 sizeof(hw_cache_event_ids)); 2092 2090 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
+1 -1
arch/x86/kernel/cpu/perf_event_p6.c
··· 19 19 20 20 }; 21 21 22 - static __initconst u64 p6_hw_cache_event_ids 22 + static u64 p6_hw_cache_event_ids 23 23 [PERF_COUNT_HW_CACHE_MAX] 24 24 [PERF_COUNT_HW_CACHE_OP_MAX] 25 25 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+3 -4
arch/x86/kernel/entry_64.S
··· 1781 1781 * Leave room for the "copied" frame 1782 1782 */ 1783 1783 subq $(5*8), %rsp 1784 + CFI_ADJUST_CFA_OFFSET 5*8 1784 1785 1785 1786 /* Copy the stack frame to the Saved frame */ 1786 1787 .rept 5 ··· 1864 1863 nmi_swapgs: 1865 1864 SWAPGS_UNSAFE_STACK 1866 1865 nmi_restore: 1867 - RESTORE_ALL 8 1868 - 1869 - /* Pop the extra iret frame */ 1870 - addq $(5*8), %rsp 1866 + /* Pop the extra iret frame at once */ 1867 + RESTORE_ALL 6*8 1871 1868 1872 1869 /* Clear the NMI executing stack variable */ 1873 1870 movq $0, 5*8(%rsp)
+7 -2
arch/x86/kernel/head_32.S
··· 300 300 leal -__PAGE_OFFSET(%ecx),%esp 301 301 302 302 default_entry: 303 + #define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ 304 + X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ 305 + X86_CR0_PG) 306 + movl $(CR0_STATE & ~X86_CR0_PG),%eax 307 + movl %eax,%cr0 308 + 303 309 /* 304 310 * New page tables may be in 4Mbyte page mode and may 305 311 * be using the global pages. ··· 370 364 */ 371 365 movl $pa(initial_page_table), %eax 372 366 movl %eax,%cr3 /* set the page table pointer.. */ 373 - movl %cr0,%eax 374 - orl $X86_CR0_PG,%eax 367 + movl $CR0_STATE,%eax 375 368 movl %eax,%cr0 /* ..and set paging (PG) bit */ 376 369 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ 377 370 1:
+3
arch/x86/kernel/msr.c
··· 174 174 unsigned int cpu; 175 175 struct cpuinfo_x86 *c; 176 176 177 + if (!capable(CAP_SYS_RAWIO)) 178 + return -EPERM; 179 + 177 180 cpu = iminor(file->f_path.dentry->d_inode); 178 181 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) 179 182 return -ENXIO; /* No such CPU */
+1 -1
arch/x86/kernel/pci-dma.c
··· 56 56 EXPORT_SYMBOL(x86_dma_fallback_dev); 57 57 58 58 /* Number of entries preallocated for DMA-API debugging */ 59 - #define PREALLOC_DMA_DEBUG_ENTRIES 32768 59 + #define PREALLOC_DMA_DEBUG_ENTRIES 65536 60 60 61 61 int dma_set_mask(struct device *dev, u64 mask) 62 62 {
+1 -1
arch/x86/kernel/reboot.c
··· 584 584 break; 585 585 586 586 case BOOT_EFI: 587 - if (efi_enabled) 587 + if (efi_enabled(EFI_RUNTIME_SERVICES)) 588 588 efi.reset_system(reboot_mode ? 589 589 EFI_RESET_WARM : 590 590 EFI_RESET_COLD,
+14 -14
arch/x86/kernel/setup.c
··· 807 807 #ifdef CONFIG_EFI 808 808 if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature, 809 809 "EL32", 4)) { 810 - efi_enabled = 1; 811 - efi_64bit = false; 810 + set_bit(EFI_BOOT, &x86_efi_facility); 812 811 } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature, 813 812 "EL64", 4)) { 814 - efi_enabled = 1; 815 - efi_64bit = true; 813 + set_bit(EFI_BOOT, &x86_efi_facility); 814 + set_bit(EFI_64BIT, &x86_efi_facility); 816 815 } 817 - if (efi_enabled && efi_memblock_x86_reserve_range()) 818 - efi_enabled = 0; 816 + 817 + if (efi_enabled(EFI_BOOT)) 818 + efi_memblock_x86_reserve_range(); 819 819 #endif 820 820 821 821 x86_init.oem.arch_setup(); ··· 888 888 889 889 finish_e820_parsing(); 890 890 891 - if (efi_enabled) 891 + if (efi_enabled(EFI_BOOT)) 892 892 efi_init(); 893 893 894 894 dmi_scan_machine(); ··· 971 971 * The EFI specification says that boot service code won't be called 972 972 * after ExitBootServices(). This is, in fact, a lie. 973 973 */ 974 - if (efi_enabled) 974 + if (efi_enabled(EFI_MEMMAP)) 975 975 efi_reserve_boot_services(); 976 976 977 977 /* preallocate 4k for mptable mpc */ ··· 1114 1114 1115 1115 #ifdef CONFIG_VT 1116 1116 #if defined(CONFIG_VGA_CONSOLE) 1117 - if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY)) 1117 + if (!efi_enabled(EFI_BOOT) || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY)) 1118 1118 conswitchp = &vga_con; 1119 1119 #elif defined(CONFIG_DUMMY_CONSOLE) 1120 1120 conswitchp = &dummy_con; ··· 1131 1131 register_refined_jiffies(CLOCK_TICK_RATE); 1132 1132 1133 1133 #ifdef CONFIG_EFI 1134 - /* Once setup is done above, disable efi_enabled on mismatched 1135 - * firmware/kernel archtectures since there is no support for 1136 - * runtime services. 1134 + /* Once setup is done above, unmap the EFI memory map on 1135 + * mismatched firmware/kernel archtectures since there is no 1136 + * support for runtime services. 1137 1137 */ 1138 - if (efi_enabled && IS_ENABLED(CONFIG_X86_64) != efi_64bit) { 1138 + if (efi_enabled(EFI_BOOT) && 1139 + IS_ENABLED(CONFIG_X86_64) != efi_enabled(EFI_64BIT)) { 1139 1140 pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n"); 1140 1141 efi_unmap_memmap(); 1141 - efi_enabled = 0; 1142 1142 } 1143 1143 #endif 1144 1144 }
+35 -24
arch/x86/platform/efi/efi.c
··· 51 51 52 52 #define EFI_DEBUG 1 53 53 54 - int efi_enabled; 55 - EXPORT_SYMBOL(efi_enabled); 56 - 57 54 struct efi __read_mostly efi = { 58 55 .mps = EFI_INVALID_TABLE_ADDR, 59 56 .acpi = EFI_INVALID_TABLE_ADDR, ··· 66 69 67 70 struct efi_memory_map memmap; 68 71 69 - bool efi_64bit; 70 - 71 72 static struct efi efi_phys __initdata; 72 73 static efi_system_table_t efi_systab __initdata; 73 74 74 75 static inline bool efi_is_native(void) 75 76 { 76 - return IS_ENABLED(CONFIG_X86_64) == efi_64bit; 77 + return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT); 77 78 } 79 + 80 + unsigned long x86_efi_facility; 81 + 82 + /* 83 + * Returns 1 if 'facility' is enabled, 0 otherwise. 84 + */ 85 + int efi_enabled(int facility) 86 + { 87 + return test_bit(facility, &x86_efi_facility) != 0; 88 + } 89 + EXPORT_SYMBOL(efi_enabled); 78 90 79 91 static int __init setup_noefi(char *arg) 80 92 { 81 - efi_enabled = 0; 93 + clear_bit(EFI_BOOT, &x86_efi_facility); 82 94 return 0; 83 95 } 84 96 early_param("noefi", setup_noefi); ··· 432 426 433 427 void __init efi_unmap_memmap(void) 434 428 { 429 + clear_bit(EFI_MEMMAP, &x86_efi_facility); 435 430 if (memmap.map) { 436 431 early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size); 437 432 memmap.map = NULL; ··· 467 460 468 461 static int __init efi_systab_init(void *phys) 469 462 { 470 - if (efi_64bit) { 463 + if (efi_enabled(EFI_64BIT)) { 471 464 efi_system_table_64_t *systab64; 472 465 u64 tmp = 0; 473 466 ··· 559 552 void *config_tables, *tablep; 560 553 int i, sz; 561 554 562 - if (efi_64bit) 555 + if (efi_enabled(EFI_64BIT)) 563 556 sz = sizeof(efi_config_table_64_t); 564 557 else 565 558 sz = sizeof(efi_config_table_32_t); ··· 579 572 efi_guid_t guid; 580 573 unsigned long table; 581 574 582 - if (efi_64bit) { 575 + if (efi_enabled(EFI_64BIT)) { 583 576 u64 table64; 584 577 guid = ((efi_config_table_64_t *)tablep)->guid; 585 578 table64 = ((efi_config_table_64_t *)tablep)->table; ··· 691 684 if (boot_params.efi_info.efi_systab_hi || 692 685 boot_params.efi_info.efi_memmap_hi) { 693 686 pr_info("Table located above 4GB, disabling EFI.\n"); 694 - efi_enabled = 0; 695 687 return; 696 688 } 697 689 efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab; ··· 700 694 ((__u64)boot_params.efi_info.efi_systab_hi<<32)); 701 695 #endif 702 696 703 - if (efi_systab_init(efi_phys.systab)) { 704 - efi_enabled = 0; 697 + if (efi_systab_init(efi_phys.systab)) 705 698 return; 706 - } 699 + 700 + set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility); 707 701 708 702 /* 709 703 * Show what we know for posterity ··· 721 715 efi.systab->hdr.revision >> 16, 722 716 efi.systab->hdr.revision & 0xffff, vendor); 723 717 724 - if (efi_config_init(efi.systab->tables, efi.systab->nr_tables)) { 725 - efi_enabled = 0; 718 + if (efi_config_init(efi.systab->tables, efi.systab->nr_tables)) 726 719 return; 727 - } 720 + 721 + set_bit(EFI_CONFIG_TABLES, &x86_efi_facility); 728 722 729 723 /* 730 724 * Note: We currently don't support runtime services on an EFI ··· 733 727 734 728 if (!efi_is_native()) 735 729 pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n"); 736 - else if (efi_runtime_init()) { 737 - efi_enabled = 0; 738 - return; 730 + else { 731 + if (efi_runtime_init()) 732 + return; 733 + set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility); 739 734 } 740 735 741 - if (efi_memmap_init()) { 742 - efi_enabled = 0; 736 + if (efi_memmap_init()) 743 737 return; 744 - } 738 + 739 + set_bit(EFI_MEMMAP, &x86_efi_facility); 740 + 745 741 #ifdef CONFIG_X86_32 746 742 if (efi_is_native()) { 747 743 x86_platform.get_wallclock = efi_get_time; ··· 949 941 * 950 942 * Call EFI services through wrapper functions. 951 943 */ 952 - efi.runtime_version = efi_systab.fw_revision; 944 + efi.runtime_version = efi_systab.hdr.revision; 953 945 efi.get_time = virt_efi_get_time; 954 946 efi.set_time = virt_efi_set_time; 955 947 efi.get_wakeup_time = virt_efi_get_wakeup_time; ··· 976 968 { 977 969 efi_memory_desc_t *md; 978 970 void *p; 971 + 972 + if (!efi_enabled(EFI_MEMMAP)) 973 + return 0; 979 974 980 975 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { 981 976 md = p;
+17 -5
arch/x86/platform/efi/efi_64.c
··· 38 38 #include <asm/cacheflush.h> 39 39 #include <asm/fixmap.h> 40 40 41 - static pgd_t save_pgd __initdata; 41 + static pgd_t *save_pgd __initdata; 42 42 static unsigned long efi_flags __initdata; 43 43 44 44 static void __init early_code_mapping_set_exec(int executable) ··· 61 61 void __init efi_call_phys_prelog(void) 62 62 { 63 63 unsigned long vaddress; 64 + int pgd; 65 + int n_pgds; 64 66 65 67 early_code_mapping_set_exec(1); 66 68 local_irq_save(efi_flags); 67 - vaddress = (unsigned long)__va(0x0UL); 68 - save_pgd = *pgd_offset_k(0x0UL); 69 - set_pgd(pgd_offset_k(0x0UL), *pgd_offset_k(vaddress)); 69 + 70 + n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE); 71 + save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL); 72 + 73 + for (pgd = 0; pgd < n_pgds; pgd++) { 74 + save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE); 75 + vaddress = (unsigned long)__va(pgd * PGDIR_SIZE); 76 + set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress)); 77 + } 70 78 __flush_tlb_all(); 71 79 } 72 80 ··· 83 75 /* 84 76 * After the lock is released, the original page table is restored. 85 77 */ 86 - set_pgd(pgd_offset_k(0x0UL), save_pgd); 78 + int pgd; 79 + int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE); 80 + for (pgd = 0; pgd < n_pgds; pgd++) 81 + set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]); 82 + kfree(save_pgd); 87 83 __flush_tlb_all(); 88 84 local_irq_restore(efi_flags); 89 85 early_code_mapping_set_exec(0);
+7 -3
arch/x86/platform/uv/tlb_uv.c
··· 1034 1034 * globally purge translation cache of a virtual address or all TLB's 1035 1035 * @cpumask: mask of all cpu's in which the address is to be removed 1036 1036 * @mm: mm_struct containing virtual address range 1037 - * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu) 1037 + * @start: start virtual address to be removed from TLB 1038 + * @end: end virtual address to be remove from TLB 1038 1039 * @cpu: the current cpu 1039 1040 * 1040 1041 * This is the entry point for initiating any UV global TLB shootdown. ··· 1057 1056 */ 1058 1057 const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, 1059 1058 struct mm_struct *mm, unsigned long start, 1060 - unsigned end, unsigned int cpu) 1059 + unsigned long end, unsigned int cpu) 1061 1060 { 1062 1061 int locals = 0; 1063 1062 int remotes = 0; ··· 1114 1113 1115 1114 record_send_statistics(stat, locals, hubs, remotes, bau_desc); 1116 1115 1117 - bau_desc->payload.address = start; 1116 + if (!end || (end - start) <= PAGE_SIZE) 1117 + bau_desc->payload.address = start; 1118 + else 1119 + bau_desc->payload.address = TLB_FLUSH_ALL; 1118 1120 bau_desc->payload.sending_cpu = cpu; 1119 1121 /* 1120 1122 * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
+8 -2
arch/x86/tools/insn_sanity.c
··· 55 55 static void usage(const char *err) 56 56 { 57 57 if (err) 58 - fprintf(stderr, "Error: %s\n\n", err); 58 + fprintf(stderr, "%s: Error: %s\n\n", prog, err); 59 59 fprintf(stderr, "Usage: %s [-y|-n|-v] [-s seed[,no]] [-m max] [-i input]\n", prog); 60 60 fprintf(stderr, "\t-y 64bit mode\n"); 61 61 fprintf(stderr, "\t-n 32bit mode\n"); ··· 269 269 insns++; 270 270 } 271 271 272 - fprintf(stdout, "%s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n", (errors) ? "Failure" : "Success", insns, (input_file) ? "given" : "random", errors, seed); 272 + fprintf(stdout, "%s: %s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n", 273 + prog, 274 + (errors) ? "Failure" : "Success", 275 + insns, 276 + (input_file) ? "given" : "random", 277 + errors, 278 + seed); 273 279 274 280 return errors ? 1 : 0; 275 281 }
+4 -2
arch/x86/tools/relocs.c
··· 814 814 read_relocs(fp); 815 815 if (show_absolute_syms) { 816 816 print_absolute_symbols(); 817 - return 0; 817 + goto out; 818 818 } 819 819 if (show_absolute_relocs) { 820 820 print_absolute_relocs(); 821 - return 0; 821 + goto out; 822 822 } 823 823 emit_relocs(as_text, use_real_mode); 824 + out: 825 + fclose(fp); 824 826 return 0; 825 827 }
+15
arch/xtensa/include/asm/dma-mapping.h
··· 170 170 consistent_sync(vaddr, size, direction); 171 171 } 172 172 173 + /* Not supported for now */ 174 + static inline int dma_mmap_coherent(struct device *dev, 175 + struct vm_area_struct *vma, void *cpu_addr, 176 + dma_addr_t dma_addr, size_t size) 177 + { 178 + return -EINVAL; 179 + } 180 + 181 + static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, 182 + void *cpu_addr, dma_addr_t dma_addr, 183 + size_t size) 184 + { 185 + return -EINVAL; 186 + } 187 + 173 188 #endif /* _XTENSA_DMA_MAPPING_H */
+32 -10
block/genhd.c
··· 35 35 36 36 static struct device_type disk_type; 37 37 38 + static void disk_check_events(struct disk_events *ev, 39 + unsigned int *clearing_ptr); 38 40 static void disk_alloc_events(struct gendisk *disk); 39 41 static void disk_add_events(struct gendisk *disk); 40 42 static void disk_del_events(struct gendisk *disk); ··· 1551 1549 const struct block_device_operations *bdops = disk->fops; 1552 1550 struct disk_events *ev = disk->ev; 1553 1551 unsigned int pending; 1552 + unsigned int clearing = mask; 1554 1553 1555 1554 if (!ev) { 1556 1555 /* for drivers still using the old ->media_changed method */ ··· 1561 1558 return 0; 1562 1559 } 1563 1560 1564 - /* tell the workfn about the events being cleared */ 1561 + disk_block_events(disk); 1562 + 1563 + /* 1564 + * store the union of mask and ev->clearing on the stack so that the 1565 + * race with disk_flush_events does not cause ambiguity (ev->clearing 1566 + * can still be modified even if events are blocked). 1567 + */ 1565 1568 spin_lock_irq(&ev->lock); 1566 - ev->clearing |= mask; 1569 + clearing |= ev->clearing; 1570 + ev->clearing = 0; 1567 1571 spin_unlock_irq(&ev->lock); 1568 1572 1569 - /* uncondtionally schedule event check and wait for it to finish */ 1570 - disk_block_events(disk); 1571 - queue_delayed_work(system_freezable_wq, &ev->dwork, 0); 1572 - flush_delayed_work(&ev->dwork); 1573 - __disk_unblock_events(disk, false); 1573 + disk_check_events(ev, &clearing); 1574 + /* 1575 + * if ev->clearing is not 0, the disk_flush_events got called in the 1576 + * middle of this function, so we want to run the workfn without delay. 1577 + */ 1578 + __disk_unblock_events(disk, ev->clearing ? true : false); 1574 1579 1575 1580 /* then, fetch and clear pending events */ 1576 1581 spin_lock_irq(&ev->lock); 1577 - WARN_ON_ONCE(ev->clearing & mask); /* cleared by workfn */ 1578 1582 pending = ev->pending & mask; 1579 1583 ev->pending &= ~mask; 1580 1584 spin_unlock_irq(&ev->lock); 1585 + WARN_ON_ONCE(clearing & mask); 1581 1586 1582 1587 return pending; 1583 1588 } 1584 1589 1590 + /* 1591 + * Separate this part out so that a different pointer for clearing_ptr can be 1592 + * passed in for disk_clear_events. 1593 + */ 1585 1594 static void disk_events_workfn(struct work_struct *work) 1586 1595 { 1587 1596 struct delayed_work *dwork = to_delayed_work(work); 1588 1597 struct disk_events *ev = container_of(dwork, struct disk_events, dwork); 1598 + 1599 + disk_check_events(ev, &ev->clearing); 1600 + } 1601 + 1602 + static void disk_check_events(struct disk_events *ev, 1603 + unsigned int *clearing_ptr) 1604 + { 1589 1605 struct gendisk *disk = ev->disk; 1590 1606 char *envp[ARRAY_SIZE(disk_uevents) + 1] = { }; 1591 - unsigned int clearing = ev->clearing; 1607 + unsigned int clearing = *clearing_ptr; 1592 1608 unsigned int events; 1593 1609 unsigned long intv; 1594 1610 int nr_events = 0, i; ··· 1620 1598 1621 1599 events &= ~ev->pending; 1622 1600 ev->pending |= events; 1623 - ev->clearing &= ~clearing; 1601 + *clearing_ptr &= ~clearing; 1624 1602 1625 1603 intv = disk_events_poll_jiffies(disk); 1626 1604 if (!ev->block && intv)
+3
drivers/acpi/apei/apei-base.c
··· 590 590 if (bit_width == 32 && bit_offset == 0 && (*paddr & 0x03) == 0 && 591 591 *access_bit_width < 32) 592 592 *access_bit_width = 32; 593 + else if (bit_width == 64 && bit_offset == 0 && (*paddr & 0x07) == 0 && 594 + *access_bit_width < 64) 595 + *access_bit_width = 64; 593 596 594 597 if ((bit_width + bit_offset) > *access_bit_width) { 595 598 pr_warning(FW_BUG APEI_PFX
+1 -1
drivers/acpi/osl.c
··· 250 250 return acpi_rsdp; 251 251 #endif 252 252 253 - if (efi_enabled) { 253 + if (efi_enabled(EFI_CONFIG_TABLES)) { 254 254 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 255 255 return efi.acpi20; 256 256 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
+4
drivers/acpi/processor_idle.c
··· 958 958 return -EINVAL; 959 959 } 960 960 961 + if (!dev) 962 + return -EINVAL; 963 + 961 964 dev->cpu = pr->id; 962 965 963 966 if (max_cstate == 0) ··· 1152 1149 } 1153 1150 1154 1151 /* Populate Updated C-state information */ 1152 + acpi_processor_get_power_info(pr); 1155 1153 acpi_processor_setup_cpuidle_states(pr); 1156 1154 1157 1155 /* Enable all cpuidle devices */
+7
drivers/acpi/processor_perflib.c
··· 340 340 if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10) 341 341 || boot_cpu_data.x86 == 0x11) { 342 342 rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi); 343 + /* 344 + * MSR C001_0064+: 345 + * Bit 63: PstateEn. Read-write. If set, the P-state is valid. 346 + */ 347 + if (!(hi & BIT(31))) 348 + return; 349 + 343 350 fid = lo & 0x3f; 344 351 did = (lo >> 6) & 7; 345 352 if (boot_cpu_data.x86 == 0x10)
+73 -73
drivers/atm/iphase.h
··· 636 636 #define SEG_BASE IPHASE5575_FRAG_CONTROL_REG_BASE 637 637 #define REASS_BASE IPHASE5575_REASS_CONTROL_REG_BASE 638 638 639 - typedef volatile u_int freg_t; 639 + typedef volatile u_int ffreg_t; 640 640 typedef u_int rreg_t; 641 641 642 642 typedef struct _ffredn_t { 643 - freg_t idlehead_high; /* Idle cell header (high) */ 644 - freg_t idlehead_low; /* Idle cell header (low) */ 645 - freg_t maxrate; /* Maximum rate */ 646 - freg_t stparms; /* Traffic Management Parameters */ 647 - freg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */ 648 - freg_t rm_type; /* */ 649 - u_int filler5[0x17 - 0x06]; 650 - freg_t cmd_reg; /* Command register */ 651 - u_int filler18[0x20 - 0x18]; 652 - freg_t cbr_base; /* CBR Pointer Base */ 653 - freg_t vbr_base; /* VBR Pointer Base */ 654 - freg_t abr_base; /* ABR Pointer Base */ 655 - freg_t ubr_base; /* UBR Pointer Base */ 656 - u_int filler24; 657 - freg_t vbrwq_base; /* VBR Wait Queue Base */ 658 - freg_t abrwq_base; /* ABR Wait Queue Base */ 659 - freg_t ubrwq_base; /* UBR Wait Queue Base */ 660 - freg_t vct_base; /* Main VC Table Base */ 661 - freg_t vcte_base; /* Extended Main VC Table Base */ 662 - u_int filler2a[0x2C - 0x2A]; 663 - freg_t cbr_tab_beg; /* CBR Table Begin */ 664 - freg_t cbr_tab_end; /* CBR Table End */ 665 - freg_t cbr_pointer; /* CBR Pointer */ 666 - u_int filler2f[0x30 - 0x2F]; 667 - freg_t prq_st_adr; /* Packet Ready Queue Start Address */ 668 - freg_t prq_ed_adr; /* Packet Ready Queue End Address */ 669 - freg_t prq_rd_ptr; /* Packet Ready Queue read pointer */ 670 - freg_t prq_wr_ptr; /* Packet Ready Queue write pointer */ 671 - freg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/ 672 - freg_t tcq_ed_adr; /* Transmit Complete Queue End Address */ 673 - freg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */ 674 - freg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/ 675 - u_int filler38[0x40 - 0x38]; 676 - freg_t queue_base; /* Base address for PRQ and TCQ */ 677 - freg_t desc_base; /* Base address of descriptor table */ 678 - u_int filler42[0x45 - 0x42]; 679 - freg_t mode_reg_0; /* Mode register 0 */ 680 - freg_t mode_reg_1; /* Mode register 1 */ 681 - freg_t intr_status_reg;/* Interrupt Status register */ 682 - freg_t mask_reg; /* Mask Register */ 683 - freg_t cell_ctr_high1; /* Total cell transfer count (high) */ 684 - freg_t cell_ctr_lo1; /* Total cell transfer count (low) */ 685 - freg_t state_reg; /* Status register */ 686 - u_int filler4c[0x58 - 0x4c]; 687 - freg_t curr_desc_num; /* Contains the current descriptor num */ 688 - freg_t next_desc; /* Next descriptor */ 689 - freg_t next_vc; /* Next VC */ 690 - u_int filler5b[0x5d - 0x5b]; 691 - freg_t present_slot_cnt;/* Present slot count */ 692 - u_int filler5e[0x6a - 0x5e]; 693 - freg_t new_desc_num; /* New descriptor number */ 694 - freg_t new_vc; /* New VC */ 695 - freg_t sched_tbl_ptr; /* Schedule table pointer */ 696 - freg_t vbrwq_wptr; /* VBR wait queue write pointer */ 697 - freg_t vbrwq_rptr; /* VBR wait queue read pointer */ 698 - freg_t abrwq_wptr; /* ABR wait queue write pointer */ 699 - freg_t abrwq_rptr; /* ABR wait queue read pointer */ 700 - freg_t ubrwq_wptr; /* UBR wait queue write pointer */ 701 - freg_t ubrwq_rptr; /* UBR wait queue read pointer */ 702 - freg_t cbr_vc; /* CBR VC */ 703 - freg_t vbr_sb_vc; /* VBR SB VC */ 704 - freg_t abr_sb_vc; /* ABR SB VC */ 705 - freg_t ubr_sb_vc; /* UBR SB VC */ 706 - freg_t vbr_next_link; /* VBR next link */ 707 - freg_t abr_next_link; /* ABR next link */ 708 - freg_t ubr_next_link; /* UBR next link */ 709 - u_int filler7a[0x7c-0x7a]; 710 - freg_t out_rate_head; /* Out of rate head */ 711 - u_int filler7d[0xca-0x7d]; /* pad out to full address space */ 712 - freg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */ 713 - freg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */ 714 - u_int fillercc[0x100-0xcc]; /* pad out to full address space */ 643 + ffreg_t idlehead_high; /* Idle cell header (high) */ 644 + ffreg_t idlehead_low; /* Idle cell header (low) */ 645 + ffreg_t maxrate; /* Maximum rate */ 646 + ffreg_t stparms; /* Traffic Management Parameters */ 647 + ffreg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */ 648 + ffreg_t rm_type; /* */ 649 + u_int filler5[0x17 - 0x06]; 650 + ffreg_t cmd_reg; /* Command register */ 651 + u_int filler18[0x20 - 0x18]; 652 + ffreg_t cbr_base; /* CBR Pointer Base */ 653 + ffreg_t vbr_base; /* VBR Pointer Base */ 654 + ffreg_t abr_base; /* ABR Pointer Base */ 655 + ffreg_t ubr_base; /* UBR Pointer Base */ 656 + u_int filler24; 657 + ffreg_t vbrwq_base; /* VBR Wait Queue Base */ 658 + ffreg_t abrwq_base; /* ABR Wait Queue Base */ 659 + ffreg_t ubrwq_base; /* UBR Wait Queue Base */ 660 + ffreg_t vct_base; /* Main VC Table Base */ 661 + ffreg_t vcte_base; /* Extended Main VC Table Base */ 662 + u_int filler2a[0x2C - 0x2A]; 663 + ffreg_t cbr_tab_beg; /* CBR Table Begin */ 664 + ffreg_t cbr_tab_end; /* CBR Table End */ 665 + ffreg_t cbr_pointer; /* CBR Pointer */ 666 + u_int filler2f[0x30 - 0x2F]; 667 + ffreg_t prq_st_adr; /* Packet Ready Queue Start Address */ 668 + ffreg_t prq_ed_adr; /* Packet Ready Queue End Address */ 669 + ffreg_t prq_rd_ptr; /* Packet Ready Queue read pointer */ 670 + ffreg_t prq_wr_ptr; /* Packet Ready Queue write pointer */ 671 + ffreg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/ 672 + ffreg_t tcq_ed_adr; /* Transmit Complete Queue End Address */ 673 + ffreg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */ 674 + ffreg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/ 675 + u_int filler38[0x40 - 0x38]; 676 + ffreg_t queue_base; /* Base address for PRQ and TCQ */ 677 + ffreg_t desc_base; /* Base address of descriptor table */ 678 + u_int filler42[0x45 - 0x42]; 679 + ffreg_t mode_reg_0; /* Mode register 0 */ 680 + ffreg_t mode_reg_1; /* Mode register 1 */ 681 + ffreg_t intr_status_reg;/* Interrupt Status register */ 682 + ffreg_t mask_reg; /* Mask Register */ 683 + ffreg_t cell_ctr_high1; /* Total cell transfer count (high) */ 684 + ffreg_t cell_ctr_lo1; /* Total cell transfer count (low) */ 685 + ffreg_t state_reg; /* Status register */ 686 + u_int filler4c[0x58 - 0x4c]; 687 + ffreg_t curr_desc_num; /* Contains the current descriptor num */ 688 + ffreg_t next_desc; /* Next descriptor */ 689 + ffreg_t next_vc; /* Next VC */ 690 + u_int filler5b[0x5d - 0x5b]; 691 + ffreg_t present_slot_cnt;/* Present slot count */ 692 + u_int filler5e[0x6a - 0x5e]; 693 + ffreg_t new_desc_num; /* New descriptor number */ 694 + ffreg_t new_vc; /* New VC */ 695 + ffreg_t sched_tbl_ptr; /* Schedule table pointer */ 696 + ffreg_t vbrwq_wptr; /* VBR wait queue write pointer */ 697 + ffreg_t vbrwq_rptr; /* VBR wait queue read pointer */ 698 + ffreg_t abrwq_wptr; /* ABR wait queue write pointer */ 699 + ffreg_t abrwq_rptr; /* ABR wait queue read pointer */ 700 + ffreg_t ubrwq_wptr; /* UBR wait queue write pointer */ 701 + ffreg_t ubrwq_rptr; /* UBR wait queue read pointer */ 702 + ffreg_t cbr_vc; /* CBR VC */ 703 + ffreg_t vbr_sb_vc; /* VBR SB VC */ 704 + ffreg_t abr_sb_vc; /* ABR SB VC */ 705 + ffreg_t ubr_sb_vc; /* UBR SB VC */ 706 + ffreg_t vbr_next_link; /* VBR next link */ 707 + ffreg_t abr_next_link; /* ABR next link */ 708 + ffreg_t ubr_next_link; /* UBR next link */ 709 + u_int filler7a[0x7c-0x7a]; 710 + ffreg_t out_rate_head; /* Out of rate head */ 711 + u_int filler7d[0xca-0x7d]; /* pad out to full address space */ 712 + ffreg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */ 713 + ffreg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */ 714 + u_int fillercc[0x100-0xcc]; /* pad out to full address space */ 715 715 } ffredn_t; 716 716 717 717 typedef struct _rfredn_t {
-2
drivers/base/regmap/regmap-debugfs.c
··· 121 121 c->max = p - 1; 122 122 list_add_tail(&c->list, 123 123 &map->debugfs_off_cache); 124 - } else { 125 - return base; 126 124 } 127 125 128 126 /*
+1 -1
drivers/base/regmap/regmap.c
··· 1106 1106 * @val_count: Number of registers to write 1107 1107 * 1108 1108 * This function is intended to be used for writing a large block of 1109 - * data to be device either in single transfer or multiple transfer. 1109 + * data to the device either in single transfer or multiple transfer. 1110 1110 * 1111 1111 * A value of zero will be returned on success, a negative errno will 1112 1112 * be returned in error cases.
+5
drivers/bcma/bcma_private.h
··· 94 94 #ifdef CONFIG_BCMA_DRIVER_GPIO 95 95 /* driver_gpio.c */ 96 96 int bcma_gpio_init(struct bcma_drv_cc *cc); 97 + int bcma_gpio_unregister(struct bcma_drv_cc *cc); 97 98 #else 98 99 static inline int bcma_gpio_init(struct bcma_drv_cc *cc) 99 100 { 100 101 return -ENOTSUPP; 102 + } 103 + static inline int bcma_gpio_unregister(struct bcma_drv_cc *cc) 104 + { 105 + return 0; 101 106 } 102 107 #endif /* CONFIG_BCMA_DRIVER_GPIO */ 103 108
+1 -1
drivers/bcma/driver_chipcommon_nflash.c
··· 21 21 struct bcma_bus *bus = cc->core->bus; 22 22 23 23 if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 && 24 - cc->core->id.rev != 0x38) { 24 + cc->core->id.rev != 38) { 25 25 bcma_err(bus, "NAND flash on unsupported board!\n"); 26 26 return -ENOTSUPP; 27 27 }
+5
drivers/bcma/driver_gpio.c
··· 96 96 97 97 return gpiochip_add(chip); 98 98 } 99 + 100 + int bcma_gpio_unregister(struct bcma_drv_cc *cc) 101 + { 102 + return gpiochip_remove(&cc->gpio); 103 + }
+7
drivers/bcma/main.c
··· 268 268 void bcma_bus_unregister(struct bcma_bus *bus) 269 269 { 270 270 struct bcma_device *cores[3]; 271 + int err; 272 + 273 + err = bcma_gpio_unregister(&bus->drv_cc); 274 + if (err == -EBUSY) 275 + bcma_err(bus, "Some GPIOs are still in use.\n"); 276 + else if (err) 277 + bcma_err(bus, "Can not unregister GPIO driver: %i\n", err); 271 278 272 279 cores[0] = bcma_find_core(bus, BCMA_CORE_MIPS_74K); 273 280 cores[1] = bcma_find_core(bus, BCMA_CORE_PCIE);
+1 -1
drivers/block/drbd/drbd_req.c
··· 168 168 } 169 169 170 170 /* must hold resource->req_lock */ 171 - static void start_new_tl_epoch(struct drbd_tconn *tconn) 171 + void start_new_tl_epoch(struct drbd_tconn *tconn) 172 172 { 173 173 /* no point closing an epoch, if it is empty, anyways. */ 174 174 if (tconn->current_tle_writes == 0)
+1
drivers/block/drbd/drbd_req.h
··· 267 267 int error; 268 268 }; 269 269 270 + extern void start_new_tl_epoch(struct drbd_tconn *tconn); 270 271 extern void drbd_req_destroy(struct kref *kref); 271 272 extern void _req_may_be_done(struct drbd_request *req, 272 273 struct bio_and_error *m);
+7
drivers/block/drbd/drbd_state.c
··· 931 931 enum drbd_state_rv rv = SS_SUCCESS; 932 932 enum sanitize_state_warnings ssw; 933 933 struct after_state_chg_work *ascw; 934 + bool did_remote, should_do_remote; 934 935 935 936 os = drbd_read_state(mdev); 936 937 ··· 982 981 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS)) 983 982 atomic_inc(&mdev->local_cnt); 984 983 984 + did_remote = drbd_should_do_remote(mdev->state); 985 985 mdev->state.i = ns.i; 986 + should_do_remote = drbd_should_do_remote(mdev->state); 986 987 mdev->tconn->susp = ns.susp; 987 988 mdev->tconn->susp_nod = ns.susp_nod; 988 989 mdev->tconn->susp_fen = ns.susp_fen; 990 + 991 + /* put replicated vs not-replicated requests in seperate epochs */ 992 + if (did_remote != should_do_remote) 993 + start_new_tl_epoch(mdev->tconn); 989 994 990 995 if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING) 991 996 drbd_print_uuids(mdev, "attached to UUIDs");
+18 -6
drivers/block/mtip32xx/mtip32xx.c
··· 626 626 } 627 627 } 628 628 629 - if (cmdto_cnt && !test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { 629 + if (cmdto_cnt) { 630 630 print_tags(port->dd, "timed out", tagaccum, cmdto_cnt); 631 - 632 - mtip_restart_port(port); 631 + if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { 632 + mtip_restart_port(port); 633 + wake_up_interruptible(&port->svc_wait); 634 + } 633 635 clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); 634 - wake_up_interruptible(&port->svc_wait); 635 636 } 636 637 637 638 if (port->ic_pause_timer) { ··· 3888 3887 * Delete our gendisk structure. This also removes the device 3889 3888 * from /dev 3890 3889 */ 3891 - del_gendisk(dd->disk); 3890 + if (dd->disk) { 3891 + if (dd->disk->queue) 3892 + del_gendisk(dd->disk); 3893 + else 3894 + put_disk(dd->disk); 3895 + } 3892 3896 3893 3897 spin_lock(&rssd_index_lock); 3894 3898 ida_remove(&rssd_index_ida, dd->index); ··· 3927 3921 "Shutting down %s ...\n", dd->disk->disk_name); 3928 3922 3929 3923 /* Delete our gendisk structure, and cleanup the blk queue. */ 3930 - del_gendisk(dd->disk); 3924 + if (dd->disk) { 3925 + if (dd->disk->queue) 3926 + del_gendisk(dd->disk); 3927 + else 3928 + put_disk(dd->disk); 3929 + } 3930 + 3931 3931 3932 3932 spin_lock(&rssd_index_lock); 3933 3933 ida_remove(&rssd_index_ida, dd->index);
+11 -7
drivers/block/xen-blkback/blkback.c
··· 161 161 static void make_response(struct xen_blkif *blkif, u64 id, 162 162 unsigned short op, int st); 163 163 164 - #define foreach_grant(pos, rbtree, node) \ 165 - for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node); \ 164 + #define foreach_grant_safe(pos, n, rbtree, node) \ 165 + for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \ 166 + (n) = rb_next(&(pos)->node); \ 166 167 &(pos)->node != NULL; \ 167 - (pos) = container_of(rb_next(&(pos)->node), typeof(*(pos)), node)) 168 + (pos) = container_of(n, typeof(*(pos)), node), \ 169 + (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL) 168 170 169 171 170 172 static void add_persistent_gnt(struct rb_root *root, ··· 219 217 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 220 218 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 221 219 struct persistent_gnt *persistent_gnt; 220 + struct rb_node *n; 222 221 int ret = 0; 223 222 int segs_to_unmap = 0; 224 223 225 - foreach_grant(persistent_gnt, root, node) { 224 + foreach_grant_safe(persistent_gnt, n, root, node) { 226 225 BUG_ON(persistent_gnt->handle == 227 226 BLKBACK_INVALID_HANDLE); 228 227 gnttab_set_unmap_op(&unmap[segs_to_unmap], ··· 233 230 persistent_gnt->handle); 234 231 235 232 pages[segs_to_unmap] = persistent_gnt->page; 236 - rb_erase(&persistent_gnt->node, root); 237 - kfree(persistent_gnt); 238 - num--; 239 233 240 234 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST || 241 235 !rb_next(&persistent_gnt->node)) { ··· 241 241 BUG_ON(ret); 242 242 segs_to_unmap = 0; 243 243 } 244 + 245 + rb_erase(&persistent_gnt->node, root); 246 + kfree(persistent_gnt); 247 + num--; 244 248 } 245 249 BUG_ON(num != 0); 246 250 }
+6 -4
drivers/block/xen-blkfront.c
··· 792 792 { 793 793 struct llist_node *all_gnts; 794 794 struct grant *persistent_gnt; 795 + struct llist_node *n; 795 796 796 797 /* Prevent new requests being issued until we fix things up. */ 797 798 spin_lock_irq(&info->io_lock); ··· 805 804 /* Remove all persistent grants */ 806 805 if (info->persistent_gnts_c) { 807 806 all_gnts = llist_del_all(&info->persistent_gnts); 808 - llist_for_each_entry(persistent_gnt, all_gnts, node) { 807 + llist_for_each_entry_safe(persistent_gnt, n, all_gnts, node) { 809 808 gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); 810 809 __free_page(pfn_to_page(persistent_gnt->pfn)); 811 810 kfree(persistent_gnt); ··· 836 835 static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, 837 836 struct blkif_response *bret) 838 837 { 839 - int i; 838 + int i = 0; 840 839 struct bio_vec *bvec; 841 840 struct req_iterator iter; 842 841 unsigned long flags; ··· 853 852 */ 854 853 rq_for_each_segment(bvec, s->request, iter) { 855 854 BUG_ON((bvec->bv_offset + bvec->bv_len) > PAGE_SIZE); 856 - i = offset >> PAGE_SHIFT; 855 + if (bvec->bv_offset < offset) 856 + i++; 857 857 BUG_ON(i >= s->req.u.rw.nr_segments); 858 858 shared_data = kmap_atomic( 859 859 pfn_to_page(s->grants_used[i]->pfn)); ··· 863 861 bvec->bv_len); 864 862 bvec_kunmap_irq(bvec_data, &flags); 865 863 kunmap_atomic(shared_data); 866 - offset += bvec->bv_len; 864 + offset = bvec->bv_offset + bvec->bv_len; 867 865 } 868 866 } 869 867 /* Add the persistent grant into the list of free grants */
+10
drivers/bluetooth/ath3k.c
··· 77 77 { USB_DEVICE(0x0CF3, 0x311D) }, 78 78 { USB_DEVICE(0x13d3, 0x3375) }, 79 79 { USB_DEVICE(0x04CA, 0x3005) }, 80 + { USB_DEVICE(0x04CA, 0x3006) }, 81 + { USB_DEVICE(0x04CA, 0x3008) }, 80 82 { USB_DEVICE(0x13d3, 0x3362) }, 81 83 { USB_DEVICE(0x0CF3, 0xE004) }, 82 84 { USB_DEVICE(0x0930, 0x0219) }, 83 85 { USB_DEVICE(0x0489, 0xe057) }, 86 + { USB_DEVICE(0x13d3, 0x3393) }, 87 + { USB_DEVICE(0x0489, 0xe04e) }, 88 + { USB_DEVICE(0x0489, 0xe056) }, 84 89 85 90 /* Atheros AR5BBU12 with sflash firmware */ 86 91 { USB_DEVICE(0x0489, 0xE02C) }, ··· 109 104 { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 }, 110 105 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, 111 106 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, 107 + { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, 108 + { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, 112 109 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, 113 110 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, 114 111 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 115 112 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, 113 + { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, 114 + { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, 115 + { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, 116 116 117 117 /* Atheros AR5BBU22 with sflash firmware */ 118 118 { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
+5
drivers/bluetooth/btusb.c
··· 135 135 { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, 136 136 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, 137 137 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, 138 + { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, 139 + { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, 138 140 { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, 139 141 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, 140 142 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 141 143 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, 144 + { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, 145 + { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, 146 + { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, 142 147 143 148 /* Atheros AR5BBU12 with sflash firmware */ 144 149 { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
+2 -1
drivers/char/virtio_console.c
··· 2062 2062 /* Disable interrupts for vqs */ 2063 2063 vdev->config->reset(vdev); 2064 2064 /* Finish up work that's lined up */ 2065 - cancel_work_sync(&portdev->control_work); 2065 + if (use_multiport(portdev)) 2066 + cancel_work_sync(&portdev->control_work); 2066 2067 2067 2068 list_for_each_entry_safe(port, port2, &portdev->ports, list) 2068 2069 unplug_port(port);
+6 -3
drivers/clk/mvebu/clk-cpu.c
··· 124 124 125 125 clks = kzalloc(ncpus * sizeof(*clks), GFP_KERNEL); 126 126 if (WARN_ON(!clks)) 127 - return; 127 + goto clks_out; 128 128 129 129 for_each_node_by_type(dn, "cpu") { 130 130 struct clk_init_data init; ··· 134 134 int cpu, err; 135 135 136 136 if (WARN_ON(!clk_name)) 137 - return; 137 + goto bail_out; 138 138 139 139 err = of_property_read_u32(dn, "reg", &cpu); 140 140 if (WARN_ON(err)) 141 - return; 141 + goto bail_out; 142 142 143 143 sprintf(clk_name, "cpu%d", cpu); 144 144 parent_clk = of_clk_get(node, 0); ··· 167 167 return; 168 168 bail_out: 169 169 kfree(clks); 170 + while(ncpus--) 171 + kfree(cpuclk[ncpus].clk_name); 172 + clks_out: 170 173 kfree(cpuclk); 171 174 } 172 175
+1 -1
drivers/cpufreq/Kconfig.x86
··· 106 106 config X86_POWERNOW_K8 107 107 tristate "AMD Opteron/Athlon64 PowerNow!" 108 108 select CPU_FREQ_TABLE 109 - depends on ACPI && ACPI_PROCESSOR 109 + depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ 110 110 help 111 111 This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors. 112 112 Support for K10 and newer processors is now in acpi-cpufreq.
+7
drivers/cpufreq/acpi-cpufreq.c
··· 1030 1030 late_initcall(acpi_cpufreq_init); 1031 1031 module_exit(acpi_cpufreq_exit); 1032 1032 1033 + static const struct x86_cpu_id acpi_cpufreq_ids[] = { 1034 + X86_FEATURE_MATCH(X86_FEATURE_ACPI), 1035 + X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE), 1036 + {} 1037 + }; 1038 + MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids); 1039 + 1033 1040 MODULE_ALIAS("acpi");
+5
drivers/cpufreq/cpufreq-cpu0.c
··· 71 71 } 72 72 73 73 if (cpu_reg) { 74 + rcu_read_lock(); 74 75 opp = opp_find_freq_ceil(cpu_dev, &freq_Hz); 75 76 if (IS_ERR(opp)) { 77 + rcu_read_unlock(); 76 78 pr_err("failed to find OPP for %ld\n", freq_Hz); 77 79 return PTR_ERR(opp); 78 80 } 79 81 volt = opp_get_voltage(opp); 82 + rcu_read_unlock(); 80 83 tol = volt * voltage_tolerance / 100; 81 84 volt_old = regulator_get_voltage(cpu_reg); 82 85 } ··· 239 236 */ 240 237 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) 241 238 ; 239 + rcu_read_lock(); 242 240 opp = opp_find_freq_exact(cpu_dev, 243 241 freq_table[0].frequency * 1000, true); 244 242 min_uV = opp_get_voltage(opp); 245 243 opp = opp_find_freq_exact(cpu_dev, 246 244 freq_table[i-1].frequency * 1000, true); 247 245 max_uV = opp_get_voltage(opp); 246 + rcu_read_unlock(); 248 247 ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV); 249 248 if (ret > 0) 250 249 transition_latency += ret * 1000;
+3
drivers/cpufreq/omap-cpufreq.c
··· 110 110 freq = ret; 111 111 112 112 if (mpu_reg) { 113 + rcu_read_lock(); 113 114 opp = opp_find_freq_ceil(mpu_dev, &freq); 114 115 if (IS_ERR(opp)) { 116 + rcu_read_unlock(); 115 117 dev_err(mpu_dev, "%s: unable to find MPU OPP for %d\n", 116 118 __func__, freqs.new); 117 119 return -EINVAL; 118 120 } 119 121 volt = opp_get_voltage(opp); 122 + rcu_read_unlock(); 120 123 tol = volt * OPP_TOLERANCE / 100; 121 124 volt_old = regulator_get_voltage(mpu_reg); 122 125 }
+5
drivers/devfreq/devfreq.c
··· 994 994 * @freq: The frequency given to target function 995 995 * @flags: Flags handed from devfreq framework. 996 996 * 997 + * Locking: This function must be called under rcu_read_lock(). opp is a rcu 998 + * protected pointer. The reason for the same is that the opp pointer which is 999 + * returned will remain valid for use with opp_get_{voltage, freq} only while 1000 + * under the locked area. The pointer returned must be used prior to unlocking 1001 + * with rcu_read_unlock() to maintain the integrity of the pointer. 997 1002 */ 998 1003 struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq, 999 1004 u32 flags)
+67 -27
drivers/devfreq/exynos4_bus.c
··· 73 73 #define EX4210_LV_NUM (LV_2 + 1) 74 74 #define EX4x12_LV_NUM (LV_4 + 1) 75 75 76 + /** 77 + * struct busfreq_opp_info - opp information for bus 78 + * @rate: Frequency in hertz 79 + * @volt: Voltage in microvolts corresponding to this OPP 80 + */ 81 + struct busfreq_opp_info { 82 + unsigned long rate; 83 + unsigned long volt; 84 + }; 85 + 76 86 struct busfreq_data { 77 87 enum exynos4_busf_type type; 78 88 struct device *dev; ··· 90 80 bool disabled; 91 81 struct regulator *vdd_int; 92 82 struct regulator *vdd_mif; /* Exynos4412/4212 only */ 93 - struct opp *curr_opp; 83 + struct busfreq_opp_info curr_oppinfo; 94 84 struct exynos4_ppmu dmc[2]; 95 85 96 86 struct notifier_block pm_notifier; ··· 306 296 }; 307 297 308 298 309 - static int exynos4210_set_busclk(struct busfreq_data *data, struct opp *opp) 299 + static int exynos4210_set_busclk(struct busfreq_data *data, 300 + struct busfreq_opp_info *oppi) 310 301 { 311 302 unsigned int index; 312 303 unsigned int tmp; 313 304 314 305 for (index = LV_0; index < EX4210_LV_NUM; index++) 315 - if (opp_get_freq(opp) == exynos4210_busclk_table[index].clk) 306 + if (oppi->rate == exynos4210_busclk_table[index].clk) 316 307 break; 317 308 318 309 if (index == EX4210_LV_NUM) ··· 372 361 return 0; 373 362 } 374 363 375 - static int exynos4x12_set_busclk(struct busfreq_data *data, struct opp *opp) 364 + static int exynos4x12_set_busclk(struct busfreq_data *data, 365 + struct busfreq_opp_info *oppi) 376 366 { 377 367 unsigned int index; 378 368 unsigned int tmp; 379 369 380 370 for (index = LV_0; index < EX4x12_LV_NUM; index++) 381 - if (opp_get_freq(opp) == exynos4x12_mifclk_table[index].clk) 371 + if (oppi->rate == exynos4x12_mifclk_table[index].clk) 382 372 break; 383 373 384 374 if (index == EX4x12_LV_NUM) ··· 588 576 return -EINVAL; 589 577 } 590 578 591 - static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp, 592 - struct opp *oldopp) 579 + static int exynos4_bus_setvolt(struct busfreq_data *data, 580 + struct busfreq_opp_info *oppi, 581 + struct busfreq_opp_info *oldoppi) 593 582 { 594 583 int err = 0, tmp; 595 - unsigned long volt = opp_get_voltage(opp); 584 + unsigned long volt = oppi->volt; 596 585 597 586 switch (data->type) { 598 587 case TYPE_BUSF_EXYNOS4210: ··· 608 595 if (err) 609 596 break; 610 597 611 - tmp = exynos4x12_get_intspec(opp_get_freq(opp)); 598 + tmp = exynos4x12_get_intspec(oppi->rate); 612 599 if (tmp < 0) { 613 600 err = tmp; 614 601 regulator_set_voltage(data->vdd_mif, 615 - opp_get_voltage(oldopp), 602 + oldoppi->volt, 616 603 MAX_SAFEVOLT); 617 604 break; 618 605 } ··· 622 609 /* Try to recover */ 623 610 if (err) 624 611 regulator_set_voltage(data->vdd_mif, 625 - opp_get_voltage(oldopp), 612 + oldoppi->volt, 626 613 MAX_SAFEVOLT); 627 614 break; 628 615 default: ··· 639 626 struct platform_device *pdev = container_of(dev, struct platform_device, 640 627 dev); 641 628 struct busfreq_data *data = platform_get_drvdata(pdev); 642 - struct opp *opp = devfreq_recommended_opp(dev, _freq, flags); 643 - unsigned long freq = opp_get_freq(opp); 644 - unsigned long old_freq = opp_get_freq(data->curr_opp); 629 + struct opp *opp; 630 + unsigned long freq; 631 + unsigned long old_freq = data->curr_oppinfo.rate; 632 + struct busfreq_opp_info new_oppinfo; 645 633 646 - if (IS_ERR(opp)) 634 + rcu_read_lock(); 635 + opp = devfreq_recommended_opp(dev, _freq, flags); 636 + if (IS_ERR(opp)) { 637 + rcu_read_unlock(); 647 638 return PTR_ERR(opp); 639 + } 640 + new_oppinfo.rate = opp_get_freq(opp); 641 + new_oppinfo.volt = opp_get_voltage(opp); 642 + rcu_read_unlock(); 643 + freq = new_oppinfo.rate; 648 644 649 645 if (old_freq == freq) 650 646 return 0; 651 647 652 - dev_dbg(dev, "targetting %lukHz %luuV\n", freq, opp_get_voltage(opp)); 648 + dev_dbg(dev, "targetting %lukHz %luuV\n", freq, new_oppinfo.volt); 653 649 654 650 mutex_lock(&data->lock); 655 651 ··· 666 644 goto out; 667 645 668 646 if (old_freq < freq) 669 - err = exynos4_bus_setvolt(data, opp, data->curr_opp); 647 + err = exynos4_bus_setvolt(data, &new_oppinfo, 648 + &data->curr_oppinfo); 670 649 if (err) 671 650 goto out; 672 651 673 652 if (old_freq != freq) { 674 653 switch (data->type) { 675 654 case TYPE_BUSF_EXYNOS4210: 676 - err = exynos4210_set_busclk(data, opp); 655 + err = exynos4210_set_busclk(data, &new_oppinfo); 677 656 break; 678 657 case TYPE_BUSF_EXYNOS4x12: 679 - err = exynos4x12_set_busclk(data, opp); 658 + err = exynos4x12_set_busclk(data, &new_oppinfo); 680 659 break; 681 660 default: 682 661 err = -EINVAL; ··· 687 664 goto out; 688 665 689 666 if (old_freq > freq) 690 - err = exynos4_bus_setvolt(data, opp, data->curr_opp); 667 + err = exynos4_bus_setvolt(data, &new_oppinfo, 668 + &data->curr_oppinfo); 691 669 if (err) 692 670 goto out; 693 671 694 - data->curr_opp = opp; 672 + data->curr_oppinfo = new_oppinfo; 695 673 out: 696 674 mutex_unlock(&data->lock); 697 675 return err; ··· 726 702 727 703 exynos4_read_ppmu(data); 728 704 busier_dmc = exynos4_get_busier_dmc(data); 729 - stat->current_frequency = opp_get_freq(data->curr_opp); 705 + stat->current_frequency = data->curr_oppinfo.rate; 730 706 731 707 if (busier_dmc) 732 708 addr = S5P_VA_DMC1; ··· 957 933 struct busfreq_data *data = container_of(this, struct busfreq_data, 958 934 pm_notifier); 959 935 struct opp *opp; 936 + struct busfreq_opp_info new_oppinfo; 960 937 unsigned long maxfreq = ULONG_MAX; 961 938 int err = 0; 962 939 ··· 968 943 969 944 data->disabled = true; 970 945 946 + rcu_read_lock(); 971 947 opp = opp_find_freq_floor(data->dev, &maxfreq); 948 + if (IS_ERR(opp)) { 949 + rcu_read_unlock(); 950 + dev_err(data->dev, "%s: unable to find a min freq\n", 951 + __func__); 952 + return PTR_ERR(opp); 953 + } 954 + new_oppinfo.rate = opp_get_freq(opp); 955 + new_oppinfo.volt = opp_get_voltage(opp); 956 + rcu_read_unlock(); 972 957 973 - err = exynos4_bus_setvolt(data, opp, data->curr_opp); 958 + err = exynos4_bus_setvolt(data, &new_oppinfo, 959 + &data->curr_oppinfo); 974 960 if (err) 975 961 goto unlock; 976 962 977 963 switch (data->type) { 978 964 case TYPE_BUSF_EXYNOS4210: 979 - err = exynos4210_set_busclk(data, opp); 965 + err = exynos4210_set_busclk(data, &new_oppinfo); 980 966 break; 981 967 case TYPE_BUSF_EXYNOS4x12: 982 - err = exynos4x12_set_busclk(data, opp); 968 + err = exynos4x12_set_busclk(data, &new_oppinfo); 983 969 break; 984 970 default: 985 971 err = -EINVAL; ··· 998 962 if (err) 999 963 goto unlock; 1000 964 1001 - data->curr_opp = opp; 965 + data->curr_oppinfo = new_oppinfo; 1002 966 unlock: 1003 967 mutex_unlock(&data->lock); 1004 968 if (err) ··· 1063 1027 } 1064 1028 } 1065 1029 1030 + rcu_read_lock(); 1066 1031 opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq); 1067 1032 if (IS_ERR(opp)) { 1033 + rcu_read_unlock(); 1068 1034 dev_err(dev, "Invalid initial frequency %lu kHz.\n", 1069 1035 exynos4_devfreq_profile.initial_freq); 1070 1036 return PTR_ERR(opp); 1071 1037 } 1072 - data->curr_opp = opp; 1038 + data->curr_oppinfo.rate = opp_get_freq(opp); 1039 + data->curr_oppinfo.volt = opp_get_voltage(opp); 1040 + rcu_read_unlock(); 1073 1041 1074 1042 platform_set_drvdata(pdev, data); 1075 1043
+2 -3
drivers/dma/imx-dma.c
··· 684 684 break; 685 685 } 686 686 687 - imxdmac->hw_chaining = 1; 688 - if (!imxdma_hw_chain(imxdmac)) 689 - return -EINVAL; 687 + imxdmac->hw_chaining = 0; 688 + 690 689 imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) | 691 690 ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) | 692 691 CCR_REN;
+1 -1
drivers/dma/ioat/dma_v3.c
··· 951 951 goto free_resources; 952 952 } 953 953 } 954 - dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_TO_DEVICE); 954 + dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); 955 955 956 956 /* skip validate if the capability is not present */ 957 957 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
+6 -2
drivers/dma/tegra20-apb-dma.c
··· 266 266 if (async_tx_test_ack(&dma_desc->txd)) { 267 267 list_del(&dma_desc->node); 268 268 spin_unlock_irqrestore(&tdc->lock, flags); 269 + dma_desc->txd.flags = 0; 269 270 return dma_desc; 270 271 } 271 272 } ··· 1051 1050 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT; 1052 1051 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32; 1053 1052 1054 - csr |= TEGRA_APBDMA_CSR_FLOW | TEGRA_APBDMA_CSR_IE_EOC; 1053 + csr |= TEGRA_APBDMA_CSR_FLOW; 1054 + if (flags & DMA_PREP_INTERRUPT) 1055 + csr |= TEGRA_APBDMA_CSR_IE_EOC; 1055 1056 csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; 1056 1057 1057 1058 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; ··· 1098 1095 mem += len; 1099 1096 } 1100 1097 sg_req->last_sg = true; 1101 - dma_desc->txd.flags = 0; 1098 + if (flags & DMA_CTRL_ACK) 1099 + dma_desc->txd.flags = DMA_CTRL_ACK; 1102 1100 1103 1101 /* 1104 1102 * Make sure that mode should not be conflicting with currently
+3 -3
drivers/edac/edac_mc.c
··· 340 340 /* 341 341 * Alocate and fill the csrow/channels structs 342 342 */ 343 - mci->csrows = kcalloc(sizeof(*mci->csrows), tot_csrows, GFP_KERNEL); 343 + mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL); 344 344 if (!mci->csrows) 345 345 goto error; 346 346 for (row = 0; row < tot_csrows; row++) { ··· 351 351 csr->csrow_idx = row; 352 352 csr->mci = mci; 353 353 csr->nr_channels = tot_channels; 354 - csr->channels = kcalloc(sizeof(*csr->channels), tot_channels, 354 + csr->channels = kcalloc(tot_channels, sizeof(*csr->channels), 355 355 GFP_KERNEL); 356 356 if (!csr->channels) 357 357 goto error; ··· 369 369 /* 370 370 * Allocate and fill the dimm structs 371 371 */ 372 - mci->dimms = kcalloc(sizeof(*mci->dimms), tot_dimms, GFP_KERNEL); 372 + mci->dimms = kcalloc(tot_dimms, sizeof(*mci->dimms), GFP_KERNEL); 373 373 if (!mci->dimms) 374 374 goto error; 375 375
+1 -1
drivers/edac/edac_pci_sysfs.c
··· 256 256 struct edac_pci_dev_attribute *edac_pci_dev; 257 257 edac_pci_dev = (struct edac_pci_dev_attribute *)attr; 258 258 259 - if (edac_pci_dev->show) 259 + if (edac_pci_dev->store) 260 260 return edac_pci_dev->store(edac_pci_dev->value, buffer, count); 261 261 return -EIO; 262 262 }
+1 -1
drivers/firmware/dmi_scan.c
··· 471 471 char __iomem *p, *q; 472 472 int rc; 473 473 474 - if (efi_enabled) { 474 + if (efi_enabled(EFI_CONFIG_TABLES)) { 475 475 if (efi.smbios == EFI_INVALID_TABLE_ADDR) 476 476 goto error; 477 477
+5 -4
drivers/firmware/efivars.c
··· 674 674 err = -EACCES; 675 675 break; 676 676 case EFI_NOT_FOUND: 677 - err = -ENOENT; 677 + err = -EIO; 678 678 break; 679 679 default: 680 680 err = -EINVAL; ··· 793 793 spin_unlock(&efivars->lock); 794 794 efivar_unregister(var); 795 795 drop_nlink(inode); 796 + d_delete(file->f_dentry); 796 797 dput(file->f_dentry); 797 798 798 799 } else { ··· 995 994 list_del(&var->list); 996 995 spin_unlock(&efivars->lock); 997 996 efivar_unregister(var); 998 - drop_nlink(dir); 997 + drop_nlink(dentry->d_inode); 999 998 dput(dentry); 1000 999 return 0; 1001 1000 } ··· 1783 1782 printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION, 1784 1783 EFIVARS_DATE); 1785 1784 1786 - if (!efi_enabled) 1785 + if (!efi_enabled(EFI_RUNTIME_SERVICES)) 1787 1786 return 0; 1788 1787 1789 1788 /* For now we'll register the efi directory at /sys/firmware/efi */ ··· 1823 1822 static void __exit 1824 1823 efivars_exit(void) 1825 1824 { 1826 - if (efi_enabled) { 1825 + if (efi_enabled(EFI_RUNTIME_SERVICES)) { 1827 1826 unregister_efivars(&__efivars); 1828 1827 kobject_put(efi_kobj); 1829 1828 }
+1 -1
drivers/firmware/iscsi_ibft_find.c
··· 99 99 /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will 100 100 * only use ACPI for this */ 101 101 102 - if (!efi_enabled) 102 + if (!efi_enabled(EFI_BOOT)) 103 103 find_ibft_in_mem(); 104 104 105 105 if (ibft_addr) {
+2 -2
drivers/gpu/drm/exynos/Kconfig
··· 24 24 25 25 config DRM_EXYNOS_FIMD 26 26 bool "Exynos DRM FIMD" 27 - depends on DRM_EXYNOS && !FB_S3C 27 + depends on DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM 28 28 help 29 29 Choose this option if you want to use Exynos FIMD for DRM. 30 30 ··· 48 48 49 49 config DRM_EXYNOS_IPP 50 50 bool "Exynos DRM IPP" 51 - depends on DRM_EXYNOS 51 + depends on DRM_EXYNOS && !ARCH_MULTIPLATFORM 52 52 help 53 53 Choose this option if you want to use IPP feature for DRM. 54 54
+15 -18
drivers/gpu/drm/exynos/exynos_drm_connector.c
··· 18 18 #include "exynos_drm_drv.h" 19 19 #include "exynos_drm_encoder.h" 20 20 21 - #define MAX_EDID 256 22 21 #define to_exynos_connector(x) container_of(x, struct exynos_drm_connector,\ 23 22 drm_connector) 24 23 ··· 95 96 to_exynos_connector(connector); 96 97 struct exynos_drm_manager *manager = exynos_connector->manager; 97 98 struct exynos_drm_display_ops *display_ops = manager->display_ops; 98 - unsigned int count; 99 + struct edid *edid = NULL; 100 + unsigned int count = 0; 101 + int ret; 99 102 100 103 DRM_DEBUG_KMS("%s\n", __FILE__); 101 104 ··· 115 114 * because lcd panel has only one mode. 116 115 */ 117 116 if (display_ops->get_edid) { 118 - int ret; 119 - void *edid; 120 - 121 - edid = kzalloc(MAX_EDID, GFP_KERNEL); 122 - if (!edid) { 123 - DRM_ERROR("failed to allocate edid\n"); 124 - return 0; 117 + edid = display_ops->get_edid(manager->dev, connector); 118 + if (IS_ERR_OR_NULL(edid)) { 119 + ret = PTR_ERR(edid); 120 + edid = NULL; 121 + DRM_ERROR("Panel operation get_edid failed %d\n", ret); 122 + goto out; 125 123 } 126 124 127 - ret = display_ops->get_edid(manager->dev, connector, 128 - edid, MAX_EDID); 129 - if (ret < 0) { 130 - DRM_ERROR("failed to get edid data.\n"); 131 - kfree(edid); 132 - edid = NULL; 133 - return 0; 125 + count = drm_add_edid_modes(connector, edid); 126 + if (count < 0) { 127 + DRM_ERROR("Add edid modes failed %d\n", count); 128 + goto out; 134 129 } 135 130 136 131 drm_mode_connector_update_edid_property(connector, edid); 137 - count = drm_add_edid_modes(connector, edid); 138 - kfree(edid); 139 132 } else { 140 133 struct exynos_drm_panel_info *panel; 141 134 struct drm_display_mode *mode = drm_mode_create(connector->dev); ··· 156 161 count = 1; 157 162 } 158 163 164 + out: 165 + kfree(edid); 159 166 return count; 160 167 } 161 168
+11 -13
drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
··· 19 19 struct exynos_drm_dmabuf_attachment { 20 20 struct sg_table sgt; 21 21 enum dma_data_direction dir; 22 + bool is_mapped; 22 23 }; 23 24 24 25 static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf, ··· 73 72 74 73 DRM_DEBUG_PRIME("%s\n", __FILE__); 75 74 76 - if (WARN_ON(dir == DMA_NONE)) 77 - return ERR_PTR(-EINVAL); 78 - 79 75 /* just return current sgt if already requested. */ 80 - if (exynos_attach->dir == dir) 76 + if (exynos_attach->dir == dir && exynos_attach->is_mapped) 81 77 return &exynos_attach->sgt; 82 - 83 - /* reattaching is not allowed. */ 84 - if (WARN_ON(exynos_attach->dir != DMA_NONE)) 85 - return ERR_PTR(-EBUSY); 86 78 87 79 buf = gem_obj->buffer; 88 80 if (!buf) { ··· 101 107 wr = sg_next(wr); 102 108 } 103 109 104 - nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir); 105 - if (!nents) { 106 - DRM_ERROR("failed to map sgl with iommu.\n"); 107 - sgt = ERR_PTR(-EIO); 108 - goto err_unlock; 110 + if (dir != DMA_NONE) { 111 + nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir); 112 + if (!nents) { 113 + DRM_ERROR("failed to map sgl with iommu.\n"); 114 + sg_free_table(sgt); 115 + sgt = ERR_PTR(-EIO); 116 + goto err_unlock; 117 + } 109 118 } 110 119 120 + exynos_attach->is_mapped = true; 111 121 exynos_attach->dir = dir; 112 122 attach->priv = exynos_attach; 113 123
+2 -2
drivers/gpu/drm/exynos/exynos_drm_drv.h
··· 148 148 struct exynos_drm_display_ops { 149 149 enum exynos_drm_output_type type; 150 150 bool (*is_connected)(struct device *dev); 151 - int (*get_edid)(struct device *dev, struct drm_connector *connector, 152 - u8 *edid, int len); 151 + struct edid *(*get_edid)(struct device *dev, 152 + struct drm_connector *connector); 153 153 void *(*get_panel)(struct device *dev); 154 154 int (*check_timing)(struct device *dev, void *timing); 155 155 int (*power_on)(struct device *dev, int mode);
+1 -1
drivers/gpu/drm/exynos/exynos_drm_g2d.c
··· 324 324 g2d_userptr = NULL; 325 325 } 326 326 327 - dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev, 327 + static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev, 328 328 unsigned long userptr, 329 329 unsigned long size, 330 330 struct drm_file *filp,
+4 -5
drivers/gpu/drm/exynos/exynos_drm_hdmi.c
··· 108 108 return false; 109 109 } 110 110 111 - static int drm_hdmi_get_edid(struct device *dev, 112 - struct drm_connector *connector, u8 *edid, int len) 111 + static struct edid *drm_hdmi_get_edid(struct device *dev, 112 + struct drm_connector *connector) 113 113 { 114 114 struct drm_hdmi_context *ctx = to_context(dev); 115 115 116 116 DRM_DEBUG_KMS("%s\n", __FILE__); 117 117 118 118 if (hdmi_ops && hdmi_ops->get_edid) 119 - return hdmi_ops->get_edid(ctx->hdmi_ctx->ctx, connector, edid, 120 - len); 119 + return hdmi_ops->get_edid(ctx->hdmi_ctx->ctx, connector); 121 120 122 - return 0; 121 + return NULL; 123 122 } 124 123 125 124 static int drm_hdmi_check_timing(struct device *dev, void *timing)
+2 -2
drivers/gpu/drm/exynos/exynos_drm_hdmi.h
··· 30 30 struct exynos_hdmi_ops { 31 31 /* display */ 32 32 bool (*is_connected)(void *ctx); 33 - int (*get_edid)(void *ctx, struct drm_connector *connector, 34 - u8 *edid, int len); 33 + struct edid *(*get_edid)(void *ctx, 34 + struct drm_connector *connector); 35 35 int (*check_timing)(void *ctx, void *timing); 36 36 int (*power_on)(void *ctx, int mode); 37 37
+1 -1
drivers/gpu/drm/exynos/exynos_drm_ipp.c
··· 869 869 } 870 870 } 871 871 872 - void ipp_handle_cmd_work(struct device *dev, 872 + static void ipp_handle_cmd_work(struct device *dev, 873 873 struct exynos_drm_ippdrv *ippdrv, 874 874 struct drm_exynos_ipp_cmd_work *cmd_work, 875 875 struct drm_exynos_ipp_cmd_node *c_node)
+2 -2
drivers/gpu/drm/exynos/exynos_drm_rotator.c
··· 734 734 return 0; 735 735 } 736 736 737 - struct rot_limit_table rot_limit_tbl = { 737 + static struct rot_limit_table rot_limit_tbl = { 738 738 .ycbcr420_2p = { 739 739 .min_w = 32, 740 740 .min_h = 32, ··· 751 751 }, 752 752 }; 753 753 754 - struct platform_device_id rotator_driver_ids[] = { 754 + static struct platform_device_id rotator_driver_ids[] = { 755 755 { 756 756 .name = "exynos-rot", 757 757 .driver_data = (unsigned long)&rot_limit_tbl,
+16 -10
drivers/gpu/drm/exynos/exynos_drm_vidi.c
··· 98 98 return ctx->connected ? true : false; 99 99 } 100 100 101 - static int vidi_get_edid(struct device *dev, struct drm_connector *connector, 102 - u8 *edid, int len) 101 + static struct edid *vidi_get_edid(struct device *dev, 102 + struct drm_connector *connector) 103 103 { 104 104 struct vidi_context *ctx = get_vidi_context(dev); 105 + struct edid *edid; 106 + int edid_len; 105 107 106 108 DRM_DEBUG_KMS("%s\n", __FILE__); 107 109 ··· 113 111 */ 114 112 if (!ctx->raw_edid) { 115 113 DRM_DEBUG_KMS("raw_edid is null.\n"); 116 - return -EFAULT; 114 + return ERR_PTR(-EFAULT); 117 115 } 118 116 119 - memcpy(edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions) 120 - * EDID_LENGTH, len)); 117 + edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH; 118 + edid = kzalloc(edid_len, GFP_KERNEL); 119 + if (!edid) { 120 + DRM_DEBUG_KMS("failed to allocate edid\n"); 121 + return ERR_PTR(-ENOMEM); 122 + } 121 123 122 - return 0; 124 + memcpy(edid, ctx->raw_edid, edid_len); 125 + return edid; 123 126 } 124 127 125 128 static void *vidi_get_panel(struct device *dev) ··· 521 514 struct exynos_drm_manager *manager; 522 515 struct exynos_drm_display_ops *display_ops; 523 516 struct drm_exynos_vidi_connection *vidi = data; 524 - struct edid *raw_edid; 525 517 int edid_len; 526 518 527 519 DRM_DEBUG_KMS("%s\n", __FILE__); ··· 557 551 } 558 552 559 553 if (vidi->connection) { 560 - if (!vidi->edid) { 561 - DRM_DEBUG_KMS("edid data is null.\n"); 554 + struct edid *raw_edid = (struct edid *)(uint32_t)vidi->edid; 555 + if (!drm_edid_is_valid(raw_edid)) { 556 + DRM_DEBUG_KMS("edid data is invalid.\n"); 562 557 return -EINVAL; 563 558 } 564 - raw_edid = (struct edid *)(uint32_t)vidi->edid; 565 559 edid_len = (1 + raw_edid->extensions) * EDID_LENGTH; 566 560 ctx->raw_edid = kzalloc(edid_len, GFP_KERNEL); 567 561 if (!ctx->raw_edid) {
+38 -83
drivers/gpu/drm/exynos/exynos_hdmi.c
··· 34 34 #include <linux/regulator/consumer.h> 35 35 #include <linux/io.h> 36 36 #include <linux/of_gpio.h> 37 - #include <plat/gpio-cfg.h> 38 37 39 38 #include <drm/exynos_drm.h> 40 39 ··· 97 98 98 99 void __iomem *regs; 99 100 void *parent_ctx; 100 - int external_irq; 101 - int internal_irq; 101 + int irq; 102 102 103 103 struct i2c_client *ddc_port; 104 104 struct i2c_client *hdmiphy_port; ··· 1389 1391 return hdata->hpd; 1390 1392 } 1391 1393 1392 - static int hdmi_get_edid(void *ctx, struct drm_connector *connector, 1393 - u8 *edid, int len) 1394 + static struct edid *hdmi_get_edid(void *ctx, struct drm_connector *connector) 1394 1395 { 1395 1396 struct edid *raw_edid; 1396 1397 struct hdmi_context *hdata = ctx; ··· 1397 1400 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 1398 1401 1399 1402 if (!hdata->ddc_port) 1400 - return -ENODEV; 1403 + return ERR_PTR(-ENODEV); 1401 1404 1402 1405 raw_edid = drm_get_edid(connector, hdata->ddc_port->adapter); 1403 - if (raw_edid) { 1404 - hdata->dvi_mode = !drm_detect_hdmi_monitor(raw_edid); 1405 - memcpy(edid, raw_edid, min((1 + raw_edid->extensions) 1406 - * EDID_LENGTH, len)); 1407 - DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n", 1408 - (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"), 1409 - raw_edid->width_cm, raw_edid->height_cm); 1410 - kfree(raw_edid); 1411 - } else { 1412 - return -ENODEV; 1413 - } 1406 + if (!raw_edid) 1407 + return ERR_PTR(-ENODEV); 1414 1408 1415 - return 0; 1409 + hdata->dvi_mode = !drm_detect_hdmi_monitor(raw_edid); 1410 + DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n", 1411 + (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"), 1412 + raw_edid->width_cm, raw_edid->height_cm); 1413 + 1414 + return raw_edid; 1416 1415 } 1417 1416 1418 1417 static int hdmi_v13_check_timing(struct fb_videomode *check_timing) ··· 1645 1652 1646 1653 /* resetting HDMI core */ 1647 1654 hdmi_reg_writemask(hdata, reg, 0, HDMI_CORE_SW_RSTOUT); 1648 - mdelay(10); 1655 + usleep_range(10000, 12000); 1649 1656 hdmi_reg_writemask(hdata, reg, ~0, HDMI_CORE_SW_RSTOUT); 1650 - mdelay(10); 1657 + usleep_range(10000, 12000); 1651 1658 } 1652 1659 1653 1660 static void hdmi_conf_init(struct hdmi_context *hdata) 1654 1661 { 1655 1662 struct hdmi_infoframe infoframe; 1656 1663 1657 - /* disable HPD interrupts */ 1664 + /* disable HPD interrupts from HDMI IP block, use GPIO instead */ 1658 1665 hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL | 1659 1666 HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG); 1660 1667 ··· 1772 1779 u32 val = hdmi_reg_read(hdata, HDMI_V13_PHY_STATUS); 1773 1780 if (val & HDMI_PHY_STATUS_READY) 1774 1781 break; 1775 - mdelay(1); 1782 + usleep_range(1000, 2000); 1776 1783 } 1777 1784 /* steady state not achieved */ 1778 1785 if (tries == 0) { ··· 1939 1946 u32 val = hdmi_reg_read(hdata, HDMI_PHY_STATUS_0); 1940 1947 if (val & HDMI_PHY_STATUS_READY) 1941 1948 break; 1942 - mdelay(1); 1949 + usleep_range(1000, 2000); 1943 1950 } 1944 1951 /* steady state not achieved */ 1945 1952 if (tries == 0) { ··· 1991 1998 1992 1999 /* reset hdmiphy */ 1993 2000 hdmi_reg_writemask(hdata, reg, ~0, HDMI_PHY_SW_RSTOUT); 1994 - mdelay(10); 2001 + usleep_range(10000, 12000); 1995 2002 hdmi_reg_writemask(hdata, reg, 0, HDMI_PHY_SW_RSTOUT); 1996 - mdelay(10); 2003 + usleep_range(10000, 12000); 1997 2004 } 1998 2005 1999 2006 static void hdmiphy_poweron(struct hdmi_context *hdata) ··· 2041 2048 return; 2042 2049 } 2043 2050 2044 - mdelay(10); 2051 + usleep_range(10000, 12000); 2045 2052 2046 2053 /* operation mode */ 2047 2054 operation[0] = 0x1f; ··· 2163 2170 2164 2171 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 2165 2172 2173 + mutex_lock(&hdata->hdmi_mutex); 2174 + if (!hdata->powered) { 2175 + mutex_unlock(&hdata->hdmi_mutex); 2176 + return; 2177 + } 2178 + mutex_unlock(&hdata->hdmi_mutex); 2179 + 2166 2180 hdmi_conf_apply(hdata); 2167 2181 } 2168 2182 ··· 2265 2265 .dpms = hdmi_dpms, 2266 2266 }; 2267 2267 2268 - static irqreturn_t hdmi_external_irq_thread(int irq, void *arg) 2268 + static irqreturn_t hdmi_irq_thread(int irq, void *arg) 2269 2269 { 2270 2270 struct exynos_drm_hdmi_context *ctx = arg; 2271 2271 struct hdmi_context *hdata = ctx->ctx; ··· 2273 2273 mutex_lock(&hdata->hdmi_mutex); 2274 2274 hdata->hpd = gpio_get_value(hdata->hpd_gpio); 2275 2275 mutex_unlock(&hdata->hdmi_mutex); 2276 - 2277 - if (ctx->drm_dev) 2278 - drm_helper_hpd_irq_event(ctx->drm_dev); 2279 - 2280 - return IRQ_HANDLED; 2281 - } 2282 - 2283 - static irqreturn_t hdmi_internal_irq_thread(int irq, void *arg) 2284 - { 2285 - struct exynos_drm_hdmi_context *ctx = arg; 2286 - struct hdmi_context *hdata = ctx->ctx; 2287 - u32 intc_flag; 2288 - 2289 - intc_flag = hdmi_reg_read(hdata, HDMI_INTC_FLAG); 2290 - /* clearing flags for HPD plug/unplug */ 2291 - if (intc_flag & HDMI_INTC_FLAG_HPD_UNPLUG) { 2292 - DRM_DEBUG_KMS("unplugged\n"); 2293 - hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0, 2294 - HDMI_INTC_FLAG_HPD_UNPLUG); 2295 - } 2296 - if (intc_flag & HDMI_INTC_FLAG_HPD_PLUG) { 2297 - DRM_DEBUG_KMS("plugged\n"); 2298 - hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0, 2299 - HDMI_INTC_FLAG_HPD_PLUG); 2300 - } 2301 2276 2302 2277 if (ctx->drm_dev) 2303 2278 drm_helper_hpd_irq_event(ctx->drm_dev); ··· 2530 2555 2531 2556 hdata->hdmiphy_port = hdmi_hdmiphy; 2532 2557 2533 - hdata->external_irq = gpio_to_irq(hdata->hpd_gpio); 2534 - if (hdata->external_irq < 0) { 2535 - DRM_ERROR("failed to get GPIO external irq\n"); 2536 - ret = hdata->external_irq; 2537 - goto err_hdmiphy; 2538 - } 2539 - 2540 - hdata->internal_irq = platform_get_irq(pdev, 0); 2541 - if (hdata->internal_irq < 0) { 2542 - DRM_ERROR("failed to get platform internal irq\n"); 2543 - ret = hdata->internal_irq; 2558 + hdata->irq = gpio_to_irq(hdata->hpd_gpio); 2559 + if (hdata->irq < 0) { 2560 + DRM_ERROR("failed to get GPIO irq\n"); 2561 + ret = hdata->irq; 2544 2562 goto err_hdmiphy; 2545 2563 } 2546 2564 2547 2565 hdata->hpd = gpio_get_value(hdata->hpd_gpio); 2548 2566 2549 - ret = request_threaded_irq(hdata->external_irq, NULL, 2550 - hdmi_external_irq_thread, IRQF_TRIGGER_RISING | 2567 + ret = request_threaded_irq(hdata->irq, NULL, 2568 + hdmi_irq_thread, IRQF_TRIGGER_RISING | 2551 2569 IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 2552 - "hdmi_external", drm_hdmi_ctx); 2570 + "hdmi", drm_hdmi_ctx); 2553 2571 if (ret) { 2554 - DRM_ERROR("failed to register hdmi external interrupt\n"); 2572 + DRM_ERROR("failed to register hdmi interrupt\n"); 2555 2573 goto err_hdmiphy; 2556 - } 2557 - 2558 - ret = request_threaded_irq(hdata->internal_irq, NULL, 2559 - hdmi_internal_irq_thread, IRQF_ONESHOT, 2560 - "hdmi_internal", drm_hdmi_ctx); 2561 - if (ret) { 2562 - DRM_ERROR("failed to register hdmi internal interrupt\n"); 2563 - goto err_free_irq; 2564 2574 } 2565 2575 2566 2576 /* Attach HDMI Driver to common hdmi. */ ··· 2558 2598 2559 2599 return 0; 2560 2600 2561 - err_free_irq: 2562 - free_irq(hdata->external_irq, drm_hdmi_ctx); 2563 2601 err_hdmiphy: 2564 2602 i2c_del_driver(&hdmiphy_driver); 2565 2603 err_ddc: ··· 2575 2617 2576 2618 pm_runtime_disable(dev); 2577 2619 2578 - free_irq(hdata->internal_irq, hdata); 2579 - free_irq(hdata->external_irq, hdata); 2620 + free_irq(hdata->irq, hdata); 2580 2621 2581 2622 2582 2623 /* hdmiphy i2c driver */ ··· 2594 2637 2595 2638 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 2596 2639 2597 - disable_irq(hdata->internal_irq); 2598 - disable_irq(hdata->external_irq); 2640 + disable_irq(hdata->irq); 2599 2641 2600 2642 hdata->hpd = false; 2601 2643 if (ctx->drm_dev) ··· 2619 2663 2620 2664 hdata->hpd = gpio_get_value(hdata->hpd_gpio); 2621 2665 2622 - enable_irq(hdata->external_irq); 2623 - enable_irq(hdata->internal_irq); 2666 + enable_irq(hdata->irq); 2624 2667 2625 2668 if (!pm_runtime_suspended(dev)) { 2626 2669 DRM_DEBUG_KMS("%s : Already resumed\n", __func__);
+8 -1
drivers/gpu/drm/exynos/exynos_mixer.c
··· 600 600 /* waiting until VP_SRESET_PROCESSING is 0 */ 601 601 if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING) 602 602 break; 603 - mdelay(10); 603 + usleep_range(10000, 12000); 604 604 } 605 605 WARN(tries == 0, "failed to reset Video Processor\n"); 606 606 } ··· 775 775 struct mixer_context *mixer_ctx = ctx; 776 776 777 777 DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win); 778 + 779 + mutex_lock(&mixer_ctx->mixer_mutex); 780 + if (!mixer_ctx->powered) { 781 + mutex_unlock(&mixer_ctx->mixer_mutex); 782 + return; 783 + } 784 + mutex_unlock(&mixer_ctx->mixer_mutex); 778 785 779 786 if (win > 1 && mixer_ctx->vp_enabled) 780 787 vp_video_buffer(mixer_ctx, win);
+2
drivers/gpu/drm/i915/i915_debugfs.c
··· 30 30 #include <linux/debugfs.h> 31 31 #include <linux/slab.h> 32 32 #include <linux/export.h> 33 + #include <generated/utsrelease.h> 33 34 #include <drm/drmP.h> 34 35 #include "intel_drv.h" 35 36 #include "intel_ringbuffer.h" ··· 691 690 692 691 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, 693 692 error->time.tv_usec); 693 + seq_printf(m, "Kernel: " UTS_RELEASE); 694 694 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); 695 695 seq_printf(m, "EIR: 0x%08x\n", error->eir); 696 696 seq_printf(m, "IER: 0x%08x\n", error->ier);
+1
drivers/gpu/drm/i915/i915_reg.h
··· 533 533 #define MI_MODE 0x0209c 534 534 # define VS_TIMER_DISPATCH (1 << 6) 535 535 # define MI_FLUSH_ENABLE (1 << 12) 536 + # define ASYNC_FLIP_PERF_DISABLE (1 << 14) 536 537 537 538 #define GEN6_GT_MODE 0x20d0 538 539 #define GEN6_GT_MODE_HI (1 << 9)
+18 -6
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 505 505 struct drm_i915_private *dev_priv = dev->dev_private; 506 506 int ret = init_ring_common(ring); 507 507 508 - if (INTEL_INFO(dev)->gen > 3) { 508 + if (INTEL_INFO(dev)->gen > 3) 509 509 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); 510 - if (IS_GEN7(dev)) 511 - I915_WRITE(GFX_MODE_GEN7, 512 - _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | 513 - _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); 514 - } 510 + 511 + /* We need to disable the AsyncFlip performance optimisations in order 512 + * to use MI_WAIT_FOR_EVENT within the CS. It should already be 513 + * programmed to '1' on all products. 514 + */ 515 + if (INTEL_INFO(dev)->gen >= 6) 516 + I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); 517 + 518 + /* Required for the hardware to program scanline values for waiting */ 519 + if (INTEL_INFO(dev)->gen == 6) 520 + I915_WRITE(GFX_MODE, 521 + _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS)); 522 + 523 + if (IS_GEN7(dev)) 524 + I915_WRITE(GFX_MODE_GEN7, 525 + _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | 526 + _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); 515 527 516 528 if (INTEL_INFO(dev)->gen >= 5) { 517 529 ret = init_pipe_control(ring);
+24 -3
drivers/gpu/drm/radeon/evergreen.c
··· 1313 1313 if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) { 1314 1314 radeon_wait_for_vblank(rdev, i); 1315 1315 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; 1316 + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); 1316 1317 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); 1318 + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); 1317 1319 } 1318 1320 } else { 1319 1321 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); 1320 1322 if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) { 1321 1323 radeon_wait_for_vblank(rdev, i); 1322 1324 tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; 1325 + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); 1323 1326 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp); 1327 + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); 1324 1328 } 1325 1329 } 1326 1330 /* wait for the next frame */ ··· 1349 1345 blackout &= ~BLACKOUT_MODE_MASK; 1350 1346 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1); 1351 1347 } 1348 + /* wait for the MC to settle */ 1349 + udelay(100); 1352 1350 } 1353 1351 1354 1352 void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save) ··· 1384 1378 if (ASIC_IS_DCE6(rdev)) { 1385 1379 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]); 1386 1380 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; 1381 + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); 1387 1382 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); 1383 + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); 1388 1384 } else { 1389 1385 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); 1390 1386 tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; 1387 + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); 1391 1388 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp); 1389 + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); 1392 1390 } 1393 1391 /* wait for the next frame */ 1394 1392 frame_count = radeon_get_vblank_counter(rdev, i); ··· 2046 2036 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 2047 2037 WREG32(DMA_TILING_CONFIG, gb_addr_config); 2048 2038 2049 - tmp = gb_addr_config & NUM_PIPES_MASK; 2050 - tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends, 2051 - EVERGREEN_MAX_BACKENDS, disabled_rb_mask); 2039 + if ((rdev->config.evergreen.max_backends == 1) && 2040 + (rdev->flags & RADEON_IS_IGP)) { 2041 + if ((disabled_rb_mask & 3) == 1) { 2042 + /* RB0 disabled, RB1 enabled */ 2043 + tmp = 0x11111111; 2044 + } else { 2045 + /* RB1 disabled, RB0 enabled */ 2046 + tmp = 0x00000000; 2047 + } 2048 + } else { 2049 + tmp = gb_addr_config & NUM_PIPES_MASK; 2050 + tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends, 2051 + EVERGREEN_MAX_BACKENDS, disabled_rb_mask); 2052 + } 2052 2053 WREG32(GB_BACKEND_MAP, tmp); 2053 2054 2054 2055 WREG32(CGTS_SYS_TCC_DISABLE, 0);
+6 -2
drivers/gpu/drm/radeon/ni.c
··· 1216 1216 int cayman_dma_resume(struct radeon_device *rdev) 1217 1217 { 1218 1218 struct radeon_ring *ring; 1219 - u32 rb_cntl, dma_cntl; 1219 + u32 rb_cntl, dma_cntl, ib_cntl; 1220 1220 u32 rb_bufsz; 1221 1221 u32 reg_offset, wb_offset; 1222 1222 int i, r; ··· 1265 1265 WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8); 1266 1266 1267 1267 /* enable DMA IBs */ 1268 - WREG32(DMA_IB_CNTL + reg_offset, DMA_IB_ENABLE | CMD_VMID_FORCE); 1268 + ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE; 1269 + #ifdef __BIG_ENDIAN 1270 + ib_cntl |= DMA_IB_SWAP_ENABLE; 1271 + #endif 1272 + WREG32(DMA_IB_CNTL + reg_offset, ib_cntl); 1269 1273 1270 1274 dma_cntl = RREG32(DMA_CNTL + reg_offset); 1271 1275 dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+11 -4
drivers/gpu/drm/radeon/r600.c
··· 1462 1462 u32 disabled_rb_mask) 1463 1463 { 1464 1464 u32 rendering_pipe_num, rb_num_width, req_rb_num; 1465 - u32 pipe_rb_ratio, pipe_rb_remain; 1465 + u32 pipe_rb_ratio, pipe_rb_remain, tmp; 1466 1466 u32 data = 0, mask = 1 << (max_rb_num - 1); 1467 1467 unsigned i, j; 1468 1468 1469 1469 /* mask out the RBs that don't exist on that asic */ 1470 - disabled_rb_mask |= (0xff << max_rb_num) & 0xff; 1470 + tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff); 1471 + /* make sure at least one RB is available */ 1472 + if ((tmp & 0xff) != 0xff) 1473 + disabled_rb_mask = tmp; 1471 1474 1472 1475 rendering_pipe_num = 1 << tiling_pipe_num; 1473 1476 req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask); ··· 2316 2313 int r600_dma_resume(struct radeon_device *rdev) 2317 2314 { 2318 2315 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 2319 - u32 rb_cntl, dma_cntl; 2316 + u32 rb_cntl, dma_cntl, ib_cntl; 2320 2317 u32 rb_bufsz; 2321 2318 int r; 2322 2319 ··· 2356 2353 WREG32(DMA_RB_BASE, ring->gpu_addr >> 8); 2357 2354 2358 2355 /* enable DMA IBs */ 2359 - WREG32(DMA_IB_CNTL, DMA_IB_ENABLE); 2356 + ib_cntl = DMA_IB_ENABLE; 2357 + #ifdef __BIG_ENDIAN 2358 + ib_cntl |= DMA_IB_SWAP_ENABLE; 2359 + #endif 2360 + WREG32(DMA_IB_CNTL, ib_cntl); 2360 2361 2361 2362 dma_cntl = RREG32(DMA_CNTL); 2362 2363 dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+3 -3
drivers/gpu/drm/radeon/radeon_asic.c
··· 1445 1445 .vm = { 1446 1446 .init = &cayman_vm_init, 1447 1447 .fini = &cayman_vm_fini, 1448 - .pt_ring_index = R600_RING_TYPE_DMA_INDEX, 1448 + .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1449 1449 .set_page = &cayman_vm_set_page, 1450 1450 }, 1451 1451 .ring = { ··· 1572 1572 .vm = { 1573 1573 .init = &cayman_vm_init, 1574 1574 .fini = &cayman_vm_fini, 1575 - .pt_ring_index = R600_RING_TYPE_DMA_INDEX, 1575 + .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1576 1576 .set_page = &cayman_vm_set_page, 1577 1577 }, 1578 1578 .ring = { ··· 1699 1699 .vm = { 1700 1700 .init = &si_vm_init, 1701 1701 .fini = &si_vm_fini, 1702 - .pt_ring_index = R600_RING_TYPE_DMA_INDEX, 1702 + .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1703 1703 .set_page = &si_vm_set_page, 1704 1704 }, 1705 1705 .ring = {
+8
drivers/gpu/drm/radeon/radeon_combios.c
··· 2470 2470 1), 2471 2471 ATOM_DEVICE_CRT1_SUPPORT); 2472 2472 } 2473 + /* RV100 board with external TDMS bit mis-set. 2474 + * Actually uses internal TMDS, clear the bit. 2475 + */ 2476 + if (dev->pdev->device == 0x5159 && 2477 + dev->pdev->subsystem_vendor == 0x1014 && 2478 + dev->pdev->subsystem_device == 0x029A) { 2479 + tmp &= ~(1 << 4); 2480 + } 2473 2481 if ((tmp >> 4) & 0x1) { 2474 2482 devices |= ATOM_DEVICE_DFP2_SUPPORT; 2475 2483 radeon_add_legacy_encoder(dev,
+2
drivers/gpu/drm/radeon/radeon_cs.c
··· 286 286 p->chunks[p->chunk_ib_idx].kpage[1] == NULL) { 287 287 kfree(p->chunks[p->chunk_ib_idx].kpage[0]); 288 288 kfree(p->chunks[p->chunk_ib_idx].kpage[1]); 289 + p->chunks[p->chunk_ib_idx].kpage[0] = NULL; 290 + p->chunks[p->chunk_ib_idx].kpage[1] = NULL; 289 291 return -ENOMEM; 290 292 } 291 293 }
+2 -1
drivers/gpu/drm/radeon/radeon_cursor.c
··· 241 241 y = 0; 242 242 } 243 243 244 - if (ASIC_IS_AVIVO(rdev)) { 244 + /* fixed on DCE6 and newer */ 245 + if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE6(rdev)) { 245 246 int i = 0; 246 247 struct drm_crtc *crtc_p; 247 248
+2 -1
drivers/gpu/drm/radeon/radeon_device.c
··· 429 429 { 430 430 uint32_t reg; 431 431 432 - if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) 432 + if (efi_enabled(EFI_BOOT) && 433 + rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) 433 434 return false; 434 435 435 436 /* first check CRTCs */
+4 -2
drivers/gpu/drm/radeon/radeon_display.c
··· 1115 1115 } 1116 1116 1117 1117 radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); 1118 - if (radeon_fb == NULL) 1118 + if (radeon_fb == NULL) { 1119 + drm_gem_object_unreference_unlocked(obj); 1119 1120 return ERR_PTR(-ENOMEM); 1121 + } 1120 1122 1121 1123 ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj); 1122 1124 if (ret) { 1123 1125 kfree(radeon_fb); 1124 1126 drm_gem_object_unreference_unlocked(obj); 1125 - return NULL; 1127 + return ERR_PTR(ret); 1126 1128 } 1127 1129 1128 1130 return &radeon_fb->base;
+3
drivers/gpu/drm/radeon/radeon_ring.c
··· 377 377 { 378 378 int r; 379 379 380 + /* make sure we aren't trying to allocate more space than there is on the ring */ 381 + if (ndw > (ring->ring_size / 4)) 382 + return -ENOMEM; 380 383 /* Align requested size with padding so unlock_commit can 381 384 * pad safely */ 382 385 ndw = (ndw + ring->align_mask) & ~ring->align_mask;
+1
drivers/gpu/drm/radeon/reg_srcs/cayman
··· 1 1 cayman 0x9400 2 2 0x0000802C GRBM_GFX_INDEX 3 + 0x00008040 WAIT_UNTIL 3 4 0x000084FC CP_STRMOUT_CNTL 4 5 0x000085F0 CP_COHER_CNTL 5 6 0x000085F4 CP_COHER_SIZE
+2
drivers/gpu/drm/radeon/rv515.c
··· 336 336 WREG32(R600_CITF_CNTL, blackout); 337 337 } 338 338 } 339 + /* wait for the MC to settle */ 340 + udelay(100); 339 341 } 340 342 341 343 void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
+8 -5
drivers/gpu/drm/ttm/ttm_bo_util.c
··· 429 429 struct ttm_bo_device *bdev = bo->bdev; 430 430 struct ttm_bo_driver *driver = bdev->driver; 431 431 432 - fbo = kzalloc(sizeof(*fbo), GFP_KERNEL); 432 + fbo = kmalloc(sizeof(*fbo), GFP_KERNEL); 433 433 if (!fbo) 434 434 return -ENOMEM; 435 435 ··· 448 448 fbo->vm_node = NULL; 449 449 atomic_set(&fbo->cpu_writers, 0); 450 450 451 - fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); 451 + spin_lock(&bdev->fence_lock); 452 + if (bo->sync_obj) 453 + fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); 454 + else 455 + fbo->sync_obj = NULL; 456 + spin_unlock(&bdev->fence_lock); 452 457 kref_init(&fbo->list_kref); 453 458 kref_init(&fbo->kref); 454 459 fbo->destroy = &ttm_transfered_destroy; ··· 666 661 */ 667 662 668 663 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 669 - 670 - /* ttm_buffer_object_transfer accesses bo->sync_obj */ 671 - ret = ttm_buffer_object_transfer(bo, &ghost_obj); 672 664 spin_unlock(&bdev->fence_lock); 673 665 if (tmp_obj) 674 666 driver->sync_obj_unref(&tmp_obj); 675 667 668 + ret = ttm_buffer_object_transfer(bo, &ghost_obj); 676 669 if (ret) 677 670 return ret; 678 671
+3
drivers/hid/hid-ids.h
··· 306 306 #define USB_VENDOR_ID_EZKEY 0x0518 307 307 #define USB_DEVICE_ID_BTC_8193 0x0002 308 308 309 + #define USB_VENDOR_ID_FORMOSA 0x147a 310 + #define USB_DEVICE_ID_FORMOSA_IR_RECEIVER 0xe03e 311 + 309 312 #define USB_VENDOR_ID_FREESCALE 0x15A2 310 313 #define USB_DEVICE_ID_FREESCALE_MX28 0x004F 311 314
+12 -1
drivers/hid/i2c-hid/i2c-hid.c
··· 540 540 { 541 541 struct i2c_client *client = hid->driver_data; 542 542 int report_id = buf[0]; 543 + int ret; 543 544 544 545 if (report_type == HID_INPUT_REPORT) 545 546 return -EINVAL; 546 547 547 - return i2c_hid_set_report(client, 548 + if (report_id) { 549 + buf++; 550 + count--; 551 + } 552 + 553 + ret = i2c_hid_set_report(client, 548 554 report_type == HID_FEATURE_REPORT ? 0x03 : 0x02, 549 555 report_id, buf, count); 556 + 557 + if (report_id && ret >= 0) 558 + ret++; /* add report_id to the number of transfered bytes */ 559 + 560 + return ret; 550 561 } 551 562 552 563 static int i2c_hid_parse(struct hid_device *hid)
+1
drivers/hid/usbhid/hid-quirks.c
··· 70 70 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET }, 71 71 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, 72 72 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, 73 + { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, 73 74 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, 74 75 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, 75 76 { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
+4
drivers/i2c/busses/i2c-designware-core.c
··· 34 34 #include <linux/io.h> 35 35 #include <linux/pm_runtime.h> 36 36 #include <linux/delay.h> 37 + #include <linux/module.h> 37 38 #include "i2c-designware-core.h" 38 39 39 40 /* ··· 726 725 return dw_readl(dev, DW_IC_COMP_PARAM_1); 727 726 } 728 727 EXPORT_SYMBOL_GPL(i2c_dw_read_comp_param); 728 + 729 + MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter core"); 730 + MODULE_LICENSE("GPL");
+4 -2
drivers/i2c/busses/i2c-mxs.c
··· 127 127 struct device *dev; 128 128 void __iomem *regs; 129 129 struct completion cmd_complete; 130 - u32 cmd_err; 130 + int cmd_err; 131 131 struct i2c_adapter adapter; 132 132 const struct mxs_i2c_speed_config *speed; 133 133 ··· 316 316 if (msg->len == 0) 317 317 return -EINVAL; 318 318 319 - init_completion(&i2c->cmd_complete); 319 + INIT_COMPLETION(i2c->cmd_complete); 320 320 i2c->cmd_err = 0; 321 321 322 322 ret = mxs_i2c_dma_setup_xfer(adap, msg, flags); ··· 472 472 473 473 i2c->dev = dev; 474 474 i2c->speed = &mxs_i2c_95kHz_config; 475 + 476 + init_completion(&i2c->cmd_complete); 475 477 476 478 if (dev->of_node) { 477 479 err = mxs_i2c_get_ofdata(i2c);
+3 -3
drivers/i2c/busses/i2c-omap.c
··· 803 803 if (stat & OMAP_I2C_STAT_AL) { 804 804 dev_err(dev->dev, "Arbitration lost\n"); 805 805 dev->cmd_err |= OMAP_I2C_STAT_AL; 806 - omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK); 806 + omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL); 807 807 } 808 808 809 809 return -EIO; ··· 963 963 i2c_omap_errata_i207(dev, stat); 964 964 965 965 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR); 966 - break; 966 + continue; 967 967 } 968 968 969 969 if (stat & OMAP_I2C_STAT_RRDY) { ··· 989 989 break; 990 990 991 991 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_XDR); 992 - break; 992 + continue; 993 993 } 994 994 995 995 if (stat & OMAP_I2C_STAT_XRDY) {
+4
drivers/i2c/busses/i2c-sirf.c
··· 12 12 #include <linux/slab.h> 13 13 #include <linux/platform_device.h> 14 14 #include <linux/i2c.h> 15 + #include <linux/of_i2c.h> 15 16 #include <linux/clk.h> 16 17 #include <linux/err.h> 17 18 #include <linux/io.h> ··· 329 328 adap->algo = &i2c_sirfsoc_algo; 330 329 adap->algo_data = siic; 331 330 331 + adap->dev.of_node = pdev->dev.of_node; 332 332 adap->dev.parent = &pdev->dev; 333 333 adap->nr = pdev->id; 334 334 ··· 372 370 } 373 371 374 372 clk_disable(clk); 373 + 374 + of_i2c_register_devices(adap); 375 375 376 376 dev_info(&pdev->dev, " I2C adapter ready to operate\n"); 377 377
+1 -1
drivers/i2c/muxes/i2c-mux-pinctrl.c
··· 167 167 } 168 168 169 169 mux->busses = devm_kzalloc(&pdev->dev, 170 - sizeof(mux->busses) * mux->pdata->bus_count, 170 + sizeof(*mux->busses) * mux->pdata->bus_count, 171 171 GFP_KERNEL); 172 172 if (!mux->busses) { 173 173 dev_err(&pdev->dev, "Cannot allocate busses\n");
+1 -2
drivers/idle/intel_idle.c
··· 448 448 else 449 449 on_each_cpu(__setup_broadcast_timer, (void *)true, 1); 450 450 451 - register_cpu_notifier(&cpu_hotplug_notifier); 452 - 453 451 pr_debug(PREFIX "v" INTEL_IDLE_VERSION 454 452 " model 0x%X\n", boot_cpu_data.x86_model); 455 453 ··· 610 612 return retval; 611 613 } 612 614 } 615 + register_cpu_notifier(&cpu_hotplug_notifier); 613 616 614 617 return 0; 615 618 }
+3 -8
drivers/infiniband/hw/qib/qib_qp.c
··· 263 263 struct qib_qp __rcu **qpp; 264 264 265 265 qpp = &dev->qp_table[n]; 266 - q = rcu_dereference_protected(*qpp, 267 - lockdep_is_held(&dev->qpt_lock)); 268 - for (; q; qpp = &q->next) { 266 + for (; (q = rcu_dereference_protected(*qpp, 267 + lockdep_is_held(&dev->qpt_lock))) != NULL; 268 + qpp = &q->next) 269 269 if (q == qp) { 270 270 atomic_dec(&qp->refcount); 271 271 *qpp = qp->next; 272 272 rcu_assign_pointer(qp->next, NULL); 273 - q = rcu_dereference_protected(*qpp, 274 - lockdep_is_held(&dev->qpt_lock)); 275 273 break; 276 274 } 277 - q = rcu_dereference_protected(*qpp, 278 - lockdep_is_held(&dev->qpt_lock)); 279 - } 280 275 } 281 276 282 277 spin_unlock_irqrestore(&dev->qpt_lock, flags);
+3 -3
drivers/infiniband/ulp/ipoib/ipoib_cm.c
··· 741 741 742 742 tx_req->mapping = addr; 743 743 744 + skb_orphan(skb); 745 + skb_dst_drop(skb); 746 + 744 747 rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), 745 748 addr, skb->len); 746 749 if (unlikely(rc)) { ··· 754 751 } else { 755 752 dev->trans_start = jiffies; 756 753 ++tx->tx_head; 757 - 758 - skb_orphan(skb); 759 - skb_dst_drop(skb); 760 754 761 755 if (++priv->tx_outstanding == ipoib_sendq_size) { 762 756 ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
+3 -3
drivers/infiniband/ulp/ipoib/ipoib_ib.c
··· 600 600 netif_stop_queue(dev); 601 601 } 602 602 603 + skb_orphan(skb); 604 + skb_dst_drop(skb); 605 + 603 606 rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), 604 607 address->ah, qpn, tx_req, phead, hlen); 605 608 if (unlikely(rc)) { ··· 618 615 619 616 address->last_send = priv->tx_head; 620 617 ++priv->tx_head; 621 - 622 - skb_orphan(skb); 623 - skb_dst_drop(skb); 624 618 } 625 619 626 620 if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
+34
drivers/iommu/amd_iommu_init.c
··· 975 975 } 976 976 977 977 /* 978 + * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations) 979 + * Workaround: 980 + * BIOS should disable L2B micellaneous clock gating by setting 981 + * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b 982 + */ 983 + static void __init amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) 984 + { 985 + u32 value; 986 + 987 + if ((boot_cpu_data.x86 != 0x15) || 988 + (boot_cpu_data.x86_model < 0x10) || 989 + (boot_cpu_data.x86_model > 0x1f)) 990 + return; 991 + 992 + pci_write_config_dword(iommu->dev, 0xf0, 0x90); 993 + pci_read_config_dword(iommu->dev, 0xf4, &value); 994 + 995 + if (value & BIT(2)) 996 + return; 997 + 998 + /* Select NB indirect register 0x90 and enable writing */ 999 + pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8)); 1000 + 1001 + pci_write_config_dword(iommu->dev, 0xf4, value | 0x4); 1002 + pr_info("AMD-Vi: Applying erratum 746 workaround for IOMMU at %s\n", 1003 + dev_name(&iommu->dev->dev)); 1004 + 1005 + /* Clear the enable writing bit */ 1006 + pci_write_config_dword(iommu->dev, 0xf0, 0x90); 1007 + } 1008 + 1009 + /* 978 1010 * This function clues the initialization function for one IOMMU 979 1011 * together and also allocates the command buffer and programs the 980 1012 * hardware. It does NOT enable the IOMMU. This is done afterwards. ··· 1203 1171 for (i = 0; i < 0x83; i++) 1204 1172 iommu->stored_l2[i] = iommu_read_l2(iommu, i); 1205 1173 } 1174 + 1175 + amd_iommu_erratum_746_workaround(iommu); 1206 1176 1207 1177 return pci_enable_device(iommu->dev); 1208 1178 }
+15 -6
drivers/iommu/intel-iommu.c
··· 4234 4234 .pgsize_bitmap = INTEL_IOMMU_PGSIZES, 4235 4235 }; 4236 4236 4237 + static void quirk_iommu_g4x_gfx(struct pci_dev *dev) 4238 + { 4239 + /* G4x/GM45 integrated gfx dmar support is totally busted. */ 4240 + printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n"); 4241 + dmar_map_gfx = 0; 4242 + } 4243 + 4244 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx); 4245 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx); 4246 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx); 4247 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx); 4248 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx); 4249 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx); 4250 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx); 4251 + 4237 4252 static void quirk_iommu_rwbf(struct pci_dev *dev) 4238 4253 { 4239 4254 /* ··· 4257 4242 */ 4258 4243 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n"); 4259 4244 rwbf_quirk = 1; 4260 - 4261 - /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */ 4262 - if (dev->revision == 0x07) { 4263 - printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n"); 4264 - dmar_map_gfx = 0; 4265 - } 4266 4245 } 4267 4246 4268 4247 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
+2
drivers/isdn/gigaset/capi.c
··· 248 248 CAPIMSG_APPID(data), CAPIMSG_MSGID(data), l, 249 249 CAPIMSG_CONTROL(data)); 250 250 l -= 12; 251 + if (l <= 0) 252 + return; 251 253 dbgline = kmalloc(3 * l, GFP_ATOMIC); 252 254 if (!dbgline) 253 255 return;
+37 -64
drivers/md/dm-raid.c
··· 340 340 } 341 341 342 342 /* 343 - * validate_rebuild_devices 343 + * validate_raid_redundancy 344 344 * @rs 345 345 * 346 - * Determine if the devices specified for rebuild can result in a valid 347 - * usable array that is capable of rebuilding the given devices. 346 + * Determine if there are enough devices in the array that haven't 347 + * failed (or are being rebuilt) to form a usable array. 348 348 * 349 349 * Returns: 0 on success, -EINVAL on failure. 350 350 */ 351 - static int validate_rebuild_devices(struct raid_set *rs) 351 + static int validate_raid_redundancy(struct raid_set *rs) 352 352 { 353 353 unsigned i, rebuild_cnt = 0; 354 354 unsigned rebuilds_per_group, copies, d; 355 355 356 - if (!(rs->print_flags & DMPF_REBUILD)) 357 - return 0; 358 - 359 356 for (i = 0; i < rs->md.raid_disks; i++) 360 - if (!test_bit(In_sync, &rs->dev[i].rdev.flags)) 357 + if (!test_bit(In_sync, &rs->dev[i].rdev.flags) || 358 + !rs->dev[i].rdev.sb_page) 361 359 rebuild_cnt++; 362 360 363 361 switch (rs->raid_type->level) { ··· 391 393 * A A B B C 392 394 * C D D E E 393 395 */ 394 - rebuilds_per_group = 0; 395 396 for (i = 0; i < rs->md.raid_disks * copies; i++) { 397 + if (!(i % copies)) 398 + rebuilds_per_group = 0; 396 399 d = i % rs->md.raid_disks; 397 - if (!test_bit(In_sync, &rs->dev[d].rdev.flags) && 400 + if ((!rs->dev[d].rdev.sb_page || 401 + !test_bit(In_sync, &rs->dev[d].rdev.flags)) && 398 402 (++rebuilds_per_group >= copies)) 399 403 goto too_many; 400 - if (!((i + 1) % copies)) 401 - rebuilds_per_group = 0; 402 404 } 403 405 break; 404 406 default: 405 - DMERR("The rebuild parameter is not supported for %s", 406 - rs->raid_type->name); 407 - rs->ti->error = "Rebuild not supported for this RAID type"; 408 - return -EINVAL; 407 + if (rebuild_cnt) 408 + return -EINVAL; 409 409 } 410 410 411 411 return 0; 412 412 413 413 too_many: 414 - rs->ti->error = "Too many rebuild devices specified"; 415 414 return -EINVAL; 416 415 } 417 416 ··· 658 663 return -EINVAL; 659 664 } 660 665 rs->md.dev_sectors = sectors_per_dev; 661 - 662 - if (validate_rebuild_devices(rs)) 663 - return -EINVAL; 664 666 665 667 /* Assume there are no metadata devices until the drives are parsed */ 666 668 rs->md.persistent = 0; ··· 987 995 static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) 988 996 { 989 997 int ret; 990 - unsigned redundancy = 0; 991 998 struct raid_dev *dev; 992 999 struct md_rdev *rdev, *tmp, *freshest; 993 1000 struct mddev *mddev = &rs->md; 994 - 995 - switch (rs->raid_type->level) { 996 - case 1: 997 - redundancy = rs->md.raid_disks - 1; 998 - break; 999 - case 4: 1000 - case 5: 1001 - case 6: 1002 - redundancy = rs->raid_type->parity_devs; 1003 - break; 1004 - case 10: 1005 - redundancy = raid10_md_layout_to_copies(mddev->layout) - 1; 1006 - break; 1007 - default: 1008 - ti->error = "Unknown RAID type"; 1009 - return -EINVAL; 1010 - } 1011 1001 1012 1002 freshest = NULL; 1013 1003 rdev_for_each_safe(rdev, tmp, mddev) { ··· 1019 1045 break; 1020 1046 default: 1021 1047 dev = container_of(rdev, struct raid_dev, rdev); 1022 - if (redundancy--) { 1023 - if (dev->meta_dev) 1024 - dm_put_device(ti, dev->meta_dev); 1048 + if (dev->meta_dev) 1049 + dm_put_device(ti, dev->meta_dev); 1025 1050 1026 - dev->meta_dev = NULL; 1027 - rdev->meta_bdev = NULL; 1051 + dev->meta_dev = NULL; 1052 + rdev->meta_bdev = NULL; 1028 1053 1029 - if (rdev->sb_page) 1030 - put_page(rdev->sb_page); 1054 + if (rdev->sb_page) 1055 + put_page(rdev->sb_page); 1031 1056 1032 - rdev->sb_page = NULL; 1057 + rdev->sb_page = NULL; 1033 1058 1034 - rdev->sb_loaded = 0; 1059 + rdev->sb_loaded = 0; 1035 1060 1036 - /* 1037 - * We might be able to salvage the data device 1038 - * even though the meta device has failed. For 1039 - * now, we behave as though '- -' had been 1040 - * set for this device in the table. 1041 - */ 1042 - if (dev->data_dev) 1043 - dm_put_device(ti, dev->data_dev); 1061 + /* 1062 + * We might be able to salvage the data device 1063 + * even though the meta device has failed. For 1064 + * now, we behave as though '- -' had been 1065 + * set for this device in the table. 1066 + */ 1067 + if (dev->data_dev) 1068 + dm_put_device(ti, dev->data_dev); 1044 1069 1045 - dev->data_dev = NULL; 1046 - rdev->bdev = NULL; 1070 + dev->data_dev = NULL; 1071 + rdev->bdev = NULL; 1047 1072 1048 - list_del(&rdev->same_set); 1049 - 1050 - continue; 1051 - } 1052 - ti->error = "Failed to load superblock"; 1053 - return ret; 1073 + list_del(&rdev->same_set); 1054 1074 } 1055 1075 } 1056 1076 1057 1077 if (!freshest) 1058 1078 return 0; 1079 + 1080 + if (validate_raid_redundancy(rs)) { 1081 + rs->ti->error = "Insufficient redundancy to activate array"; 1082 + return -EINVAL; 1083 + } 1059 1084 1060 1085 /* 1061 1086 * Validation of the freshest device provides the source of ··· 1405 1432 1406 1433 static struct target_type raid_target = { 1407 1434 .name = "raid", 1408 - .version = {1, 4, 0}, 1435 + .version = {1, 4, 1}, 1409 1436 .module = THIS_MODULE, 1410 1437 .ctr = raid_ctr, 1411 1438 .dtr = raid_dtr,
+1 -12
drivers/md/dm-thin.c
··· 2746 2746 return 0; 2747 2747 } 2748 2748 2749 - /* 2750 - * A thin device always inherits its queue limits from its pool. 2751 - */ 2752 - static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) 2753 - { 2754 - struct thin_c *tc = ti->private; 2755 - 2756 - *limits = bdev_get_queue(tc->pool_dev->bdev)->limits; 2757 - } 2758 - 2759 2749 static struct target_type thin_target = { 2760 2750 .name = "thin", 2761 - .version = {1, 6, 0}, 2751 + .version = {1, 7, 0}, 2762 2752 .module = THIS_MODULE, 2763 2753 .ctr = thin_ctr, 2764 2754 .dtr = thin_dtr, ··· 2757 2767 .postsuspend = thin_postsuspend, 2758 2768 .status = thin_status, 2759 2769 .iterate_devices = thin_iterate_devices, 2760 - .io_hints = thin_io_hints, 2761 2770 }; 2762 2771 2763 2772 /*----------------------------------------------------------------*/
+4 -2
drivers/md/dm.c
··· 1188 1188 { 1189 1189 struct dm_target *ti; 1190 1190 sector_t len; 1191 + unsigned num_requests; 1191 1192 1192 1193 do { 1193 1194 ti = dm_table_find_target(ci->map, ci->sector); ··· 1201 1200 * reconfiguration might also have changed that since the 1202 1201 * check was performed. 1203 1202 */ 1204 - if (!get_num_requests || !get_num_requests(ti)) 1203 + num_requests = get_num_requests ? get_num_requests(ti) : 0; 1204 + if (!num_requests) 1205 1205 return -EOPNOTSUPP; 1206 1206 1207 1207 if (is_split_required && !is_split_required(ti)) ··· 1210 1208 else 1211 1209 len = min(ci->sector_count, max_io_len(ci->sector, ti)); 1212 1210 1213 - __issue_target_requests(ci, ti, ti->num_discard_requests, len); 1211 + __issue_target_requests(ci, ti, num_requests, len); 1214 1212 1215 1213 ci->sector += len; 1216 1214 } while (ci->sector_count -= len);
+1 -1
drivers/media/platform/coda.c
··· 23 23 #include <linux/slab.h> 24 24 #include <linux/videodev2.h> 25 25 #include <linux/of.h> 26 + #include <linux/platform_data/imx-iram.h> 26 27 27 - #include <mach/iram.h> 28 28 #include <media/v4l2-ctrls.h> 29 29 #include <media/v4l2-device.h> 30 30 #include <media/v4l2-ioctl.h>
+1
drivers/media/radio/radio-keene.c
··· 374 374 radio->vdev.ioctl_ops = &usb_keene_ioctl_ops; 375 375 radio->vdev.lock = &radio->lock; 376 376 radio->vdev.release = video_device_release_empty; 377 + radio->vdev.vfl_dir = VFL_DIR_TX; 377 378 378 379 radio->usbdev = interface_to_usbdev(intf); 379 380 radio->intf = intf;
+1
drivers/media/radio/radio-si4713.c
··· 250 250 .name = "radio-si4713", 251 251 .release = video_device_release, 252 252 .ioctl_ops = &radio_si4713_ioctl_ops, 253 + .vfl_dir = VFL_DIR_TX, 253 254 }; 254 255 255 256 /* Platform driver interface */
+1
drivers/media/radio/radio-wl1273.c
··· 1971 1971 .ioctl_ops = &wl1273_ioctl_ops, 1972 1972 .name = WL1273_FM_DRIVER_NAME, 1973 1973 .release = wl1273_vdev_release, 1974 + .vfl_dir = VFL_DIR_TX, 1974 1975 }; 1975 1976 1976 1977 static int wl1273_fm_radio_remove(struct platform_device *pdev)
+10
drivers/media/radio/wl128x/fmdrv_v4l2.c
··· 518 518 .ioctl_ops = &fm_drv_ioctl_ops, 519 519 .name = FM_DRV_NAME, 520 520 .release = video_device_release, 521 + /* 522 + * To ensure both the tuner and modulator ioctls are accessible we 523 + * set the vfl_dir to M2M to indicate this. 524 + * 525 + * It is not really a mem2mem device of course, but it can both receive 526 + * and transmit using the same radio device. It's the only radio driver 527 + * that does this and it should really be split in two radio devices, 528 + * but that would affect applications using this driver. 529 + */ 530 + .vfl_dir = VFL_DIR_M2M, 521 531 }; 522 532 523 533 int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr)
+1
drivers/mfd/Kconfig
··· 237 237 depends on I2C=y && GPIOLIB 238 238 select MFD_CORE 239 239 select REGMAP_I2C 240 + select REGMAP_IRQ 240 241 select IRQ_DOMAIN 241 242 help 242 243 if you say yes here you get support for the TPS65910 series of
+1
drivers/mfd/ab8500-core.c
··· 19 19 #include <linux/mfd/core.h> 20 20 #include <linux/mfd/abx500.h> 21 21 #include <linux/mfd/abx500/ab8500.h> 22 + #include <linux/mfd/abx500/ab8500-bm.h> 22 23 #include <linux/mfd/dbx500-prcmu.h> 23 24 #include <linux/regulator/ab8500.h> 24 25 #include <linux/of.h>
+6 -1
drivers/mfd/arizona-core.c
··· 239 239 return ret; 240 240 } 241 241 242 - regcache_sync(arizona->regmap); 242 + ret = regcache_sync(arizona->regmap); 243 + if (ret != 0) { 244 + dev_err(arizona->dev, "Failed to restore register cache\n"); 245 + regulator_disable(arizona->dcvdd); 246 + return ret; 247 + } 243 248 244 249 return 0; 245 250 }
+2 -16
drivers/mfd/arizona-irq.c
··· 176 176 aod = &wm5102_aod; 177 177 irq = &wm5102_irq; 178 178 179 - switch (arizona->rev) { 180 - case 0: 181 - case 1: 182 - ctrlif_error = false; 183 - break; 184 - default: 185 - break; 186 - } 179 + ctrlif_error = false; 187 180 break; 188 181 #endif 189 182 #ifdef CONFIG_MFD_WM5110 ··· 184 191 aod = &wm5110_aod; 185 192 irq = &wm5110_irq; 186 193 187 - switch (arizona->rev) { 188 - case 0: 189 - case 1: 190 - ctrlif_error = false; 191 - break; 192 - default: 193 - break; 194 - } 194 + ctrlif_error = false; 195 195 break; 196 196 #endif 197 197 default:
+61
drivers/mfd/da9052-i2c.c
··· 27 27 #include <linux/of_device.h> 28 28 #endif 29 29 30 + /* I2C safe register check */ 31 + static inline bool i2c_safe_reg(unsigned char reg) 32 + { 33 + switch (reg) { 34 + case DA9052_STATUS_A_REG: 35 + case DA9052_STATUS_B_REG: 36 + case DA9052_STATUS_C_REG: 37 + case DA9052_STATUS_D_REG: 38 + case DA9052_ADC_RES_L_REG: 39 + case DA9052_ADC_RES_H_REG: 40 + case DA9052_VDD_RES_REG: 41 + case DA9052_ICHG_AV_REG: 42 + case DA9052_TBAT_RES_REG: 43 + case DA9052_ADCIN4_RES_REG: 44 + case DA9052_ADCIN5_RES_REG: 45 + case DA9052_ADCIN6_RES_REG: 46 + case DA9052_TJUNC_RES_REG: 47 + case DA9052_TSI_X_MSB_REG: 48 + case DA9052_TSI_Y_MSB_REG: 49 + case DA9052_TSI_LSB_REG: 50 + case DA9052_TSI_Z_MSB_REG: 51 + return true; 52 + default: 53 + return false; 54 + } 55 + } 56 + 57 + /* 58 + * There is an issue with DA9052 and DA9053_AA/BA/BB PMIC where the PMIC 59 + * gets lockup up or fails to respond following a system reset. 60 + * This fix is to follow any read or write with a dummy read to a safe 61 + * register. 62 + */ 63 + int da9052_i2c_fix(struct da9052 *da9052, unsigned char reg) 64 + { 65 + int val; 66 + 67 + switch (da9052->chip_id) { 68 + case DA9052: 69 + case DA9053_AA: 70 + case DA9053_BA: 71 + case DA9053_BB: 72 + /* A dummy read to a safe register address. */ 73 + if (!i2c_safe_reg(reg)) 74 + return regmap_read(da9052->regmap, 75 + DA9052_PARK_REGISTER, 76 + &val); 77 + break; 78 + default: 79 + /* 80 + * For other chips parking of I2C register 81 + * to a safe place is not required. 82 + */ 83 + break; 84 + } 85 + 86 + return 0; 87 + } 88 + EXPORT_SYMBOL(da9052_i2c_fix); 89 + 30 90 static int da9052_i2c_enable_multiwrite(struct da9052 *da9052) 31 91 { 32 92 int reg_val, ret; ··· 143 83 144 84 da9052->dev = &client->dev; 145 85 da9052->chip_irq = client->irq; 86 + da9052->fix_io = da9052_i2c_fix; 146 87 147 88 i2c_set_clientdata(client, da9052); 148 89
+9 -4
drivers/mfd/db8500-prcmu.c
··· 2524 2524 2525 2525 for (n = 0; n < NUM_PRCMU_WAKEUPS; n++) { 2526 2526 if (ev & prcmu_irq_bit[n]) 2527 - generic_handle_irq(IRQ_PRCMU_BASE + n); 2527 + generic_handle_irq(irq_find_mapping(db8500_irq_domain, n)); 2528 2528 } 2529 2529 r = true; 2530 2530 break; ··· 2737 2737 } 2738 2738 2739 2739 static struct irq_domain_ops db8500_irq_ops = { 2740 - .map = db8500_irq_map, 2741 - .xlate = irq_domain_xlate_twocell, 2740 + .map = db8500_irq_map, 2741 + .xlate = irq_domain_xlate_twocell, 2742 2742 }; 2743 2743 2744 2744 static int db8500_irq_init(struct device_node *np) 2745 2745 { 2746 - int irq_base = -1; 2746 + int irq_base = 0; 2747 + int i; 2747 2748 2748 2749 /* In the device tree case, just take some IRQs */ 2749 2750 if (!np) ··· 2758 2757 pr_err("Failed to create irqdomain\n"); 2759 2758 return -ENOSYS; 2760 2759 } 2760 + 2761 + /* All wakeups will be used, so create mappings for all */ 2762 + for (i = 0; i < NUM_PRCMU_WAKEUPS; i++) 2763 + irq_create_mapping(db8500_irq_domain, i); 2761 2764 2762 2765 return 0; 2763 2766 }
+9 -9
drivers/mfd/max77686.c
··· 93 93 if (max77686 == NULL) 94 94 return -ENOMEM; 95 95 96 - max77686->regmap = regmap_init_i2c(i2c, &max77686_regmap_config); 97 - if (IS_ERR(max77686->regmap)) { 98 - ret = PTR_ERR(max77686->regmap); 99 - dev_err(max77686->dev, "Failed to allocate register map: %d\n", 100 - ret); 101 - kfree(max77686); 102 - return ret; 103 - } 104 - 105 96 i2c_set_clientdata(i2c, max77686); 106 97 max77686->dev = &i2c->dev; 107 98 max77686->i2c = i2c; ··· 101 110 max77686->wakeup = pdata->wakeup; 102 111 max77686->irq_gpio = pdata->irq_gpio; 103 112 max77686->irq = i2c->irq; 113 + 114 + max77686->regmap = regmap_init_i2c(i2c, &max77686_regmap_config); 115 + if (IS_ERR(max77686->regmap)) { 116 + ret = PTR_ERR(max77686->regmap); 117 + dev_err(max77686->dev, "Failed to allocate register map: %d\n", 118 + ret); 119 + kfree(max77686); 120 + return ret; 121 + } 104 122 105 123 if (regmap_read(max77686->regmap, 106 124 MAX77686_REG_DEVICE_ID, &data) < 0) {
+18 -16
drivers/mfd/max77693.c
··· 114 114 u8 reg_data; 115 115 int ret = 0; 116 116 117 + if (!pdata) { 118 + dev_err(&i2c->dev, "No platform data found.\n"); 119 + return -EINVAL; 120 + } 121 + 117 122 max77693 = devm_kzalloc(&i2c->dev, 118 123 sizeof(struct max77693_dev), GFP_KERNEL); 119 124 if (max77693 == NULL) 120 125 return -ENOMEM; 121 - 122 - max77693->regmap = devm_regmap_init_i2c(i2c, &max77693_regmap_config); 123 - if (IS_ERR(max77693->regmap)) { 124 - ret = PTR_ERR(max77693->regmap); 125 - dev_err(max77693->dev,"failed to allocate register map: %d\n", 126 - ret); 127 - goto err_regmap; 128 - } 129 126 130 127 i2c_set_clientdata(i2c, max77693); 131 128 max77693->dev = &i2c->dev; ··· 130 133 max77693->irq = i2c->irq; 131 134 max77693->type = id->driver_data; 132 135 133 - if (!pdata) 134 - goto err_regmap; 136 + max77693->regmap = devm_regmap_init_i2c(i2c, &max77693_regmap_config); 137 + if (IS_ERR(max77693->regmap)) { 138 + ret = PTR_ERR(max77693->regmap); 139 + dev_err(max77693->dev, "failed to allocate register map: %d\n", 140 + ret); 141 + return ret; 142 + } 135 143 136 144 max77693->wakeup = pdata->wakeup; 137 145 138 - if (max77693_read_reg(max77693->regmap, 139 - MAX77693_PMIC_REG_PMIC_ID2, &reg_data) < 0) { 146 + ret = max77693_read_reg(max77693->regmap, MAX77693_PMIC_REG_PMIC_ID2, 147 + &reg_data); 148 + if (ret < 0) { 140 149 dev_err(max77693->dev, "device not found on this channel\n"); 141 - ret = -ENODEV; 142 - goto err_regmap; 150 + return ret; 143 151 } else 144 152 dev_info(max77693->dev, "device ID: 0x%x\n", reg_data); 145 153 ··· 165 163 ret = PTR_ERR(max77693->regmap_muic); 166 164 dev_err(max77693->dev, 167 165 "failed to allocate register map: %d\n", ret); 168 - goto err_regmap; 166 + goto err_regmap_muic; 169 167 } 170 168 171 169 ret = max77693_irq_init(max77693); ··· 186 184 err_mfd: 187 185 max77693_irq_exit(max77693); 188 186 err_irq: 187 + err_regmap_muic: 189 188 i2c_unregister_device(max77693->muic); 190 189 i2c_unregister_device(max77693->haptic); 191 - err_regmap: 192 190 return ret; 193 191 } 194 192
+2 -3
drivers/mfd/pcf50633-core.c
··· 208 208 if (!pcf) 209 209 return -ENOMEM; 210 210 211 + i2c_set_clientdata(client, pcf); 212 + pcf->dev = &client->dev; 211 213 pcf->pdata = pdata; 212 214 213 215 mutex_init(&pcf->lock); ··· 220 218 dev_err(pcf->dev, "Failed to allocate register map: %d\n", ret); 221 219 return ret; 222 220 } 223 - 224 - i2c_set_clientdata(client, pcf); 225 - pcf->dev = &client->dev; 226 221 227 222 version = pcf50633_reg_read(pcf, 0); 228 223 variant = pcf50633_reg_read(pcf, 1);
+29
drivers/mfd/rtl8411.c
··· 112 112 BPP_LDO_POWB, BPP_LDO_SUSPEND); 113 113 } 114 114 115 + static int rtl8411_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) 116 + { 117 + u8 mask, val; 118 + 119 + mask = (BPP_REG_TUNED18 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_MASK; 120 + if (voltage == OUTPUT_3V3) 121 + val = (BPP_ASIC_3V3 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_3V3; 122 + else if (voltage == OUTPUT_1V8) 123 + val = (BPP_ASIC_1V8 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_1V8; 124 + else 125 + return -EINVAL; 126 + 127 + return rtsx_pci_write_register(pcr, LDO_CTL, mask, val); 128 + } 129 + 115 130 static unsigned int rtl8411_cd_deglitch(struct rtsx_pcr *pcr) 116 131 { 117 132 unsigned int card_exist; ··· 178 163 return card_exist; 179 164 } 180 165 166 + static int rtl8411_conv_clk_and_div_n(int input, int dir) 167 + { 168 + int output; 169 + 170 + if (dir == CLK_TO_DIV_N) 171 + output = input * 4 / 5 - 2; 172 + else 173 + output = (input + 2) * 5 / 4; 174 + 175 + return output; 176 + } 177 + 181 178 static const struct pcr_ops rtl8411_pcr_ops = { 182 179 .extra_init_hw = rtl8411_extra_init_hw, 183 180 .optimize_phy = NULL, ··· 199 172 .disable_auto_blink = rtl8411_disable_auto_blink, 200 173 .card_power_on = rtl8411_card_power_on, 201 174 .card_power_off = rtl8411_card_power_off, 175 + .switch_output_voltage = rtl8411_switch_output_voltage, 202 176 .cd_deglitch = rtl8411_cd_deglitch, 177 + .conv_clk_and_div_n = rtl8411_conv_clk_and_div_n, 203 178 }; 204 179 205 180 /* SD Pull Control Enable:
+21
drivers/mfd/rts5209.c
··· 144 144 return rtsx_pci_send_cmd(pcr, 100); 145 145 } 146 146 147 + static int rts5209_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) 148 + { 149 + int err; 150 + 151 + if (voltage == OUTPUT_3V3) { 152 + err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24); 153 + if (err < 0) 154 + return err; 155 + } else if (voltage == OUTPUT_1V8) { 156 + err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24); 157 + if (err < 0) 158 + return err; 159 + } else { 160 + return -EINVAL; 161 + } 162 + 163 + return 0; 164 + } 165 + 147 166 static const struct pcr_ops rts5209_pcr_ops = { 148 167 .extra_init_hw = rts5209_extra_init_hw, 149 168 .optimize_phy = rts5209_optimize_phy, ··· 172 153 .disable_auto_blink = rts5209_disable_auto_blink, 173 154 .card_power_on = rts5209_card_power_on, 174 155 .card_power_off = rts5209_card_power_off, 156 + .switch_output_voltage = rts5209_switch_output_voltage, 175 157 .cd_deglitch = NULL, 158 + .conv_clk_and_div_n = NULL, 176 159 }; 177 160 178 161 /* SD Pull Control Enable:
+21
drivers/mfd/rts5229.c
··· 114 114 return rtsx_pci_send_cmd(pcr, 100); 115 115 } 116 116 117 + static int rts5229_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) 118 + { 119 + int err; 120 + 121 + if (voltage == OUTPUT_3V3) { 122 + err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24); 123 + if (err < 0) 124 + return err; 125 + } else if (voltage == OUTPUT_1V8) { 126 + err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24); 127 + if (err < 0) 128 + return err; 129 + } else { 130 + return -EINVAL; 131 + } 132 + 133 + return 0; 134 + } 135 + 117 136 static const struct pcr_ops rts5229_pcr_ops = { 118 137 .extra_init_hw = rts5229_extra_init_hw, 119 138 .optimize_phy = rts5229_optimize_phy, ··· 142 123 .disable_auto_blink = rts5229_disable_auto_blink, 143 124 .card_power_on = rts5229_card_power_on, 144 125 .card_power_off = rts5229_card_power_off, 126 + .switch_output_voltage = rts5229_switch_output_voltage, 145 127 .cd_deglitch = NULL, 128 + .conv_clk_and_div_n = NULL, 146 129 }; 147 130 148 131 /* SD Pull Control Enable:
+23 -4
drivers/mfd/rtsx_pcr.c
··· 630 630 if (clk == pcr->cur_clock) 631 631 return 0; 632 632 633 - N = (u8)(clk - 2); 633 + if (pcr->ops->conv_clk_and_div_n) 634 + N = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N); 635 + else 636 + N = (u8)(clk - 2); 634 637 if ((clk <= 2) || (N > max_N)) 635 638 return -EINVAL; 636 639 ··· 644 641 /* Make sure that the SSC clock div_n is equal or greater than min_N */ 645 642 div = CLK_DIV_1; 646 643 while ((N < min_N) && (div < max_div)) { 647 - N = (N + 2) * 2 - 2; 644 + if (pcr->ops->conv_clk_and_div_n) { 645 + int dbl_clk = pcr->ops->conv_clk_and_div_n(N, 646 + DIV_N_TO_CLK) * 2; 647 + N = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk, 648 + CLK_TO_DIV_N); 649 + } else { 650 + N = (N + 2) * 2 - 2; 651 + } 648 652 div++; 649 653 } 650 654 dev_dbg(&(pcr->pci->dev), "N = %d, div = %d\n", N, div); ··· 712 702 return 0; 713 703 } 714 704 EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off); 705 + 706 + int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) 707 + { 708 + if (pcr->ops->switch_output_voltage) 709 + return pcr->ops->switch_output_voltage(pcr, voltage); 710 + 711 + return 0; 712 + } 713 + EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage); 715 714 716 715 unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr) 717 716 { ··· 786 767 787 768 spin_unlock_irqrestore(&pcr->lock, flags); 788 769 789 - if (card_detect & SD_EXIST) 770 + if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event) 790 771 pcr->slots[RTSX_SD_CARD].card_event( 791 772 pcr->slots[RTSX_SD_CARD].p_dev); 792 - if (card_detect & MS_EXIST) 773 + if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event) 793 774 pcr->slots[RTSX_MS_CARD].card_event( 794 775 pcr->slots[RTSX_MS_CARD].p_dev); 795 776 }
+5 -12
drivers/mfd/tc3589x.c
··· 219 219 } 220 220 221 221 static struct irq_domain_ops tc3589x_irq_ops = { 222 - .map = tc3589x_irq_map, 222 + .map = tc3589x_irq_map, 223 223 .unmap = tc3589x_irq_unmap, 224 - .xlate = irq_domain_xlate_twocell, 224 + .xlate = irq_domain_xlate_twocell, 225 225 }; 226 226 227 227 static int tc3589x_irq_init(struct tc3589x *tc3589x, struct device_node *np) 228 228 { 229 229 int base = tc3589x->irq_base; 230 230 231 - if (base) { 232 - tc3589x->domain = irq_domain_add_legacy( 233 - NULL, TC3589x_NR_INTERNAL_IRQS, base, 234 - 0, &tc3589x_irq_ops, tc3589x); 235 - } 236 - else { 237 - tc3589x->domain = irq_domain_add_linear( 238 - np, TC3589x_NR_INTERNAL_IRQS, 239 - &tc3589x_irq_ops, tc3589x); 240 - } 231 + tc3589x->domain = irq_domain_add_simple( 232 + np, TC3589x_NR_INTERNAL_IRQS, base, 233 + &tc3589x_irq_ops, tc3589x); 241 234 242 235 if (!tc3589x->domain) { 243 236 dev_err(tc3589x->dev, "Failed to create irqdomain\n");
+1 -1
drivers/mfd/twl4030-power.c
··· 159 159 static int twl4030_write_script(u8 address, struct twl4030_ins *script, 160 160 int len) 161 161 { 162 - int err; 162 + int err = -EINVAL; 163 163 164 164 for (; len; len--, address++, script++) { 165 165 if (len == 1) {
+6 -2
drivers/mfd/vexpress-config.c
··· 67 67 68 68 return bridge; 69 69 } 70 + EXPORT_SYMBOL(vexpress_config_bridge_register); 70 71 71 72 void vexpress_config_bridge_unregister(struct vexpress_config_bridge *bridge) 72 73 { ··· 84 83 while (!list_empty(&__bridge.transactions)) 85 84 cpu_relax(); 86 85 } 86 + EXPORT_SYMBOL(vexpress_config_bridge_unregister); 87 87 88 88 89 89 struct vexpress_config_func { ··· 144 142 145 143 return func; 146 144 } 145 + EXPORT_SYMBOL(__vexpress_config_func_get); 147 146 148 147 void vexpress_config_func_put(struct vexpress_config_func *func) 149 148 { ··· 152 149 of_node_put(func->bridge->node); 153 150 kfree(func); 154 151 } 155 - 152 + EXPORT_SYMBOL(vexpress_config_func_put); 156 153 157 154 struct vexpress_config_trans { 158 155 struct vexpress_config_func *func; ··· 232 229 233 230 complete(&trans->completion); 234 231 } 232 + EXPORT_SYMBOL(vexpress_config_complete); 235 233 236 234 int vexpress_config_wait(struct vexpress_config_trans *trans) 237 235 { ··· 240 236 241 237 return trans->status; 242 238 } 243 - 239 + EXPORT_SYMBOL(vexpress_config_wait); 244 240 245 241 int vexpress_config_read(struct vexpress_config_func *func, int offset, 246 242 u32 *data)
+20 -12
drivers/mfd/vexpress-sysreg.c
··· 313 313 } 314 314 315 315 316 - void __init vexpress_sysreg_early_init(void __iomem *base) 316 + void __init vexpress_sysreg_setup(struct device_node *node) 317 317 { 318 - struct device_node *node = of_find_compatible_node(NULL, NULL, 319 - "arm,vexpress-sysreg"); 320 - 321 - if (node) 322 - base = of_iomap(node, 0); 323 - 324 - if (WARN_ON(!base)) 318 + if (WARN_ON(!vexpress_sysreg_base)) 325 319 return; 326 - 327 - vexpress_sysreg_base = base; 328 320 329 321 if (readl(vexpress_sysreg_base + SYS_MISC) & SYS_MISC_MASTERSITE) 330 322 vexpress_master_site = VEXPRESS_SITE_DB2; ··· 328 336 WARN_ON(!vexpress_sysreg_config_bridge); 329 337 } 330 338 339 + void __init vexpress_sysreg_early_init(void __iomem *base) 340 + { 341 + vexpress_sysreg_base = base; 342 + vexpress_sysreg_setup(NULL); 343 + } 344 + 331 345 void __init vexpress_sysreg_of_early_init(void) 332 346 { 333 - vexpress_sysreg_early_init(NULL); 347 + struct device_node *node = of_find_compatible_node(NULL, NULL, 348 + "arm,vexpress-sysreg"); 349 + 350 + if (node) { 351 + vexpress_sysreg_base = of_iomap(node, 0); 352 + vexpress_sysreg_setup(node); 353 + } else { 354 + pr_info("vexpress-sysreg: No Device Tree node found."); 355 + } 334 356 } 335 357 336 358 ··· 432 426 return -EBUSY; 433 427 } 434 428 435 - if (!vexpress_sysreg_base) 429 + if (!vexpress_sysreg_base) { 436 430 vexpress_sysreg_base = devm_ioremap(&pdev->dev, res->start, 437 431 resource_size(res)); 432 + vexpress_sysreg_setup(pdev->dev.of_node); 433 + } 438 434 439 435 if (!vexpress_sysreg_base) { 440 436 dev_err(&pdev->dev, "Failed to obtain base address!\n");
+1 -1
drivers/mfd/wm5102-tables.c
··· 1882 1882 } 1883 1883 } 1884 1884 1885 - #define WM5102_MAX_REGISTER 0x1a8fff 1885 + #define WM5102_MAX_REGISTER 0x1a9800 1886 1886 1887 1887 const struct regmap_config wm5102_spi_regmap = { 1888 1888 .reg_bits = 32,
+30 -62
drivers/mmc/host/mvsdio.c
··· 50 50 struct timer_list timer; 51 51 struct mmc_host *mmc; 52 52 struct device *dev; 53 - struct resource *res; 54 - int irq; 55 53 struct clk *clk; 56 54 int gpio_card_detect; 57 55 int gpio_write_protect; ··· 716 718 if (!r || irq < 0 || !mvsd_data) 717 719 return -ENXIO; 718 720 719 - r = request_mem_region(r->start, SZ_1K, DRIVER_NAME); 720 - if (!r) 721 - return -EBUSY; 722 - 723 721 mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev); 724 722 if (!mmc) { 725 723 ret = -ENOMEM; ··· 725 731 host = mmc_priv(mmc); 726 732 host->mmc = mmc; 727 733 host->dev = &pdev->dev; 728 - host->res = r; 729 734 host->base_clock = mvsd_data->clock / 2; 735 + host->clk = ERR_PTR(-EINVAL); 730 736 731 737 mmc->ops = &mvsd_ops; 732 738 ··· 746 752 747 753 spin_lock_init(&host->lock); 748 754 749 - host->base = ioremap(r->start, SZ_4K); 755 + host->base = devm_request_and_ioremap(&pdev->dev, r); 750 756 if (!host->base) { 751 757 ret = -ENOMEM; 752 758 goto out; ··· 759 765 760 766 mvsd_power_down(host); 761 767 762 - ret = request_irq(irq, mvsd_irq, 0, DRIVER_NAME, host); 768 + ret = devm_request_irq(&pdev->dev, irq, mvsd_irq, 0, DRIVER_NAME, host); 763 769 if (ret) { 764 770 pr_err("%s: cannot assign irq %d\n", DRIVER_NAME, irq); 765 771 goto out; 766 - } else 767 - host->irq = irq; 772 + } 768 773 769 774 /* Not all platforms can gate the clock, so it is not 770 775 an error if the clock does not exists. */ 771 - host->clk = clk_get(&pdev->dev, NULL); 772 - if (!IS_ERR(host->clk)) { 776 + host->clk = devm_clk_get(&pdev->dev, NULL); 777 + if (!IS_ERR(host->clk)) 773 778 clk_prepare_enable(host->clk); 774 - } 775 779 776 780 if (mvsd_data->gpio_card_detect) { 777 - ret = gpio_request(mvsd_data->gpio_card_detect, 778 - DRIVER_NAME " cd"); 781 + ret = devm_gpio_request_one(&pdev->dev, 782 + mvsd_data->gpio_card_detect, 783 + GPIOF_IN, DRIVER_NAME " cd"); 779 784 if (ret == 0) { 780 - gpio_direction_input(mvsd_data->gpio_card_detect); 781 785 irq = gpio_to_irq(mvsd_data->gpio_card_detect); 782 - ret = request_irq(irq, mvsd_card_detect_irq, 783 - IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING, 784 - DRIVER_NAME " cd", host); 786 + ret = devm_request_irq(&pdev->dev, irq, 787 + mvsd_card_detect_irq, 788 + IRQ_TYPE_EDGE_RISING | 789 + IRQ_TYPE_EDGE_FALLING, 790 + DRIVER_NAME " cd", host); 785 791 if (ret == 0) 786 792 host->gpio_card_detect = 787 793 mvsd_data->gpio_card_detect; 788 794 else 789 - gpio_free(mvsd_data->gpio_card_detect); 795 + devm_gpio_free(&pdev->dev, 796 + mvsd_data->gpio_card_detect); 790 797 } 791 798 } 792 799 if (!host->gpio_card_detect) 793 800 mmc->caps |= MMC_CAP_NEEDS_POLL; 794 801 795 802 if (mvsd_data->gpio_write_protect) { 796 - ret = gpio_request(mvsd_data->gpio_write_protect, 797 - DRIVER_NAME " wp"); 803 + ret = devm_gpio_request_one(&pdev->dev, 804 + mvsd_data->gpio_write_protect, 805 + GPIOF_IN, DRIVER_NAME " wp"); 798 806 if (ret == 0) { 799 - gpio_direction_input(mvsd_data->gpio_write_protect); 800 807 host->gpio_write_protect = 801 808 mvsd_data->gpio_write_protect; 802 809 } ··· 819 824 return 0; 820 825 821 826 out: 822 - if (host) { 823 - if (host->irq) 824 - free_irq(host->irq, host); 825 - if (host->gpio_card_detect) { 826 - free_irq(gpio_to_irq(host->gpio_card_detect), host); 827 - gpio_free(host->gpio_card_detect); 828 - } 829 - if (host->gpio_write_protect) 830 - gpio_free(host->gpio_write_protect); 831 - if (host->base) 832 - iounmap(host->base); 833 - } 834 - if (r) 835 - release_resource(r); 836 - if (mmc) 837 - if (!IS_ERR_OR_NULL(host->clk)) { 827 + if (mmc) { 828 + if (!IS_ERR(host->clk)) 838 829 clk_disable_unprepare(host->clk); 839 - clk_put(host->clk); 840 - } 841 830 mmc_free_host(mmc); 831 + } 842 832 843 833 return ret; 844 834 } ··· 832 852 { 833 853 struct mmc_host *mmc = platform_get_drvdata(pdev); 834 854 835 - if (mmc) { 836 - struct mvsd_host *host = mmc_priv(mmc); 855 + struct mvsd_host *host = mmc_priv(mmc); 837 856 838 - if (host->gpio_card_detect) { 839 - free_irq(gpio_to_irq(host->gpio_card_detect), host); 840 - gpio_free(host->gpio_card_detect); 841 - } 842 - mmc_remove_host(mmc); 843 - free_irq(host->irq, host); 844 - if (host->gpio_write_protect) 845 - gpio_free(host->gpio_write_protect); 846 - del_timer_sync(&host->timer); 847 - mvsd_power_down(host); 848 - iounmap(host->base); 849 - release_resource(host->res); 857 + mmc_remove_host(mmc); 858 + del_timer_sync(&host->timer); 859 + mvsd_power_down(host); 850 860 851 - if (!IS_ERR(host->clk)) { 852 - clk_disable_unprepare(host->clk); 853 - clk_put(host->clk); 854 - } 855 - mmc_free_host(mmc); 856 - } 861 + if (!IS_ERR(host->clk)) 862 + clk_disable_unprepare(host->clk); 863 + mmc_free_host(mmc); 864 + 857 865 platform_set_drvdata(pdev, NULL); 858 866 return 0; 859 867 }
+5 -25
drivers/mmc/host/rtsx_pci_sdmmc.c
··· 1060 1060 return 0; 1061 1061 } 1062 1062 1063 - static int sd_change_bank_voltage(struct realtek_pci_sdmmc *host, u8 voltage) 1064 - { 1065 - struct rtsx_pcr *pcr = host->pcr; 1066 - int err; 1067 - 1068 - if (voltage == SD_IO_3V3) { 1069 - err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24); 1070 - if (err < 0) 1071 - return err; 1072 - } else if (voltage == SD_IO_1V8) { 1073 - err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24); 1074 - if (err < 0) 1075 - return err; 1076 - } else { 1077 - return -EINVAL; 1078 - } 1079 - 1080 - return 0; 1081 - } 1082 - 1083 1063 static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios) 1084 1064 { 1085 1065 struct realtek_pci_sdmmc *host = mmc_priv(mmc); ··· 1078 1098 rtsx_pci_start_run(pcr); 1079 1099 1080 1100 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) 1081 - voltage = SD_IO_3V3; 1101 + voltage = OUTPUT_3V3; 1082 1102 else 1083 - voltage = SD_IO_1V8; 1103 + voltage = OUTPUT_1V8; 1084 1104 1085 - if (voltage == SD_IO_1V8) { 1105 + if (voltage == OUTPUT_1V8) { 1086 1106 err = rtsx_pci_write_register(pcr, 1087 1107 SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_B); 1088 1108 if (err < 0) ··· 1093 1113 goto out; 1094 1114 } 1095 1115 1096 - err = sd_change_bank_voltage(host, voltage); 1116 + err = rtsx_pci_switch_output_voltage(pcr, voltage); 1097 1117 if (err < 0) 1098 1118 goto out; 1099 1119 1100 - if (voltage == SD_IO_1V8) { 1120 + if (voltage == OUTPUT_1V8) { 1101 1121 err = sd_wait_voltage_stable_2(host); 1102 1122 if (err < 0) 1103 1123 goto out;
+1
drivers/mtd/devices/Kconfig
··· 272 272 tristate "M-Systems Disk-On-Chip G3" 273 273 select BCH 274 274 select BCH_CONST_PARAMS 275 + select BITREVERSE 275 276 ---help--- 276 277 This provides an MTD device driver for the M-Systems DiskOnChip 277 278 G3 devices.
+1 -1
drivers/mtd/maps/physmap_of.c
··· 170 170 resource_size_t res_size; 171 171 struct mtd_part_parser_data ppdata; 172 172 bool map_indirect; 173 - const char *mtd_name; 173 + const char *mtd_name = NULL; 174 174 175 175 match = of_match_device(of_flash_match, &dev->dev); 176 176 if (!match)
+2 -2
drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
··· 17 17 #include "bcm47xxnflash.h" 18 18 19 19 /* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has 20 - * shown 164 retries as maxiumum. */ 21 - #define NFLASH_READY_RETRIES 1000 20 + * shown ~1000 retries as maxiumum. */ 21 + #define NFLASH_READY_RETRIES 10000 22 22 23 23 #define NFLASH_SECTOR_SIZE 512 24 24
+1 -1
drivers/mtd/nand/davinci_nand.c
··· 523 523 static const struct of_device_id davinci_nand_of_match[] = { 524 524 {.compatible = "ti,davinci-nand", }, 525 525 {}, 526 - } 526 + }; 527 527 MODULE_DEVICE_TABLE(of, davinci_nand_of_match); 528 528 529 529 static struct davinci_nand_pdata
+5 -2
drivers/mtd/nand/nand_base.c
··· 2857 2857 int i; 2858 2858 int val; 2859 2859 2860 - /* ONFI need to be probed in 8 bits mode */ 2861 - WARN_ON(chip->options & NAND_BUSWIDTH_16); 2860 + /* ONFI need to be probed in 8 bits mode, and 16 bits should be selected with NAND_BUSWIDTH_AUTO */ 2861 + if (chip->options & NAND_BUSWIDTH_16) { 2862 + pr_err("Trying ONFI probe in 16 bits mode, aborting !\n"); 2863 + return 0; 2864 + } 2862 2865 /* Try ONFI for unknown chip or LP */ 2863 2866 chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1); 2864 2867 if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
+1
drivers/net/bonding/bond_sysfs.c
··· 1053 1053 pr_info("%s: Setting primary slave to None.\n", 1054 1054 bond->dev->name); 1055 1055 bond->primary_slave = NULL; 1056 + memset(bond->params.primary, 0, sizeof(bond->params.primary)); 1056 1057 bond_select_active_slave(bond); 1057 1058 goto out; 1058 1059 }
+7 -3
drivers/net/can/c_can/c_can.c
··· 488 488 489 489 priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface), 490 490 IFX_WRITE_LOW_16BIT(mask)); 491 + 492 + /* According to C_CAN documentation, the reserved bit 493 + * in IFx_MASK2 register is fixed 1 494 + */ 491 495 priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface), 492 - IFX_WRITE_HIGH_16BIT(mask)); 496 + IFX_WRITE_HIGH_16BIT(mask) | BIT(13)); 493 497 494 498 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 495 499 IFX_WRITE_LOW_16BIT(id)); ··· 964 960 break; 965 961 case LEC_ACK_ERROR: 966 962 netdev_dbg(dev, "ack error\n"); 967 - cf->data[2] |= (CAN_ERR_PROT_LOC_ACK | 963 + cf->data[3] |= (CAN_ERR_PROT_LOC_ACK | 968 964 CAN_ERR_PROT_LOC_ACK_DEL); 969 965 break; 970 966 case LEC_BIT1_ERROR: ··· 977 973 break; 978 974 case LEC_CRC_ERROR: 979 975 netdev_dbg(dev, "CRC error\n"); 980 - cf->data[2] |= (CAN_ERR_PROT_LOC_CRC_SEQ | 976 + cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ | 981 977 CAN_ERR_PROT_LOC_CRC_DEL); 982 978 break; 983 979 default:
+1 -1
drivers/net/can/pch_can.c
··· 560 560 stats->rx_errors++; 561 561 break; 562 562 case PCH_CRC_ERR: 563 - cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ | 563 + cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ | 564 564 CAN_ERR_PROT_LOC_CRC_DEL; 565 565 priv->can.can_stats.bus_error++; 566 566 stats->rx_errors++;
+2 -2
drivers/net/can/ti_hecc.c
··· 746 746 } 747 747 if (err_status & HECC_CANES_CRCE) { 748 748 hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE); 749 - cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ | 749 + cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ | 750 750 CAN_ERR_PROT_LOC_CRC_DEL; 751 751 } 752 752 if (err_status & HECC_CANES_ACKE) { 753 753 hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE); 754 - cf->data[2] |= CAN_ERR_PROT_LOC_ACK | 754 + cf->data[3] |= CAN_ERR_PROT_LOC_ACK | 755 755 CAN_ERR_PROT_LOC_ACK_DEL; 756 756 } 757 757 }
+1 -1
drivers/net/ethernet/3com/3c574_cs.c
··· 432 432 netdev_info(dev, "%s at io %#3lx, irq %d, hw_addr %pM\n", 433 433 cardname, dev->base_addr, dev->irq, dev->dev_addr); 434 434 netdev_info(dev, " %dK FIFO split %s Rx:Tx, %sMII interface.\n", 435 - 8 << config & Ram_size, 435 + 8 << (config & Ram_size), 436 436 ram_split[(config & Ram_split) >> Ram_split_shift], 437 437 config & Autoselect ? "autoselect " : ""); 438 438
+39 -23
drivers/net/ethernet/broadcom/tg3.c
··· 1283 1283 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg); 1284 1284 } 1285 1285 1286 - #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \ 1287 - tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \ 1288 - MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \ 1289 - MII_TG3_AUXCTL_ACTL_TX_6DB) 1286 + static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable) 1287 + { 1288 + u32 val; 1289 + int err; 1290 1290 1291 - #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \ 1292 - tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \ 1293 - MII_TG3_AUXCTL_ACTL_TX_6DB); 1291 + err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); 1292 + 1293 + if (err) 1294 + return err; 1295 + if (enable) 1296 + 1297 + val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA; 1298 + else 1299 + val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA; 1300 + 1301 + err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 1302 + val | MII_TG3_AUXCTL_ACTL_TX_6DB); 1303 + 1304 + return err; 1305 + } 1294 1306 1295 1307 static int tg3_bmcr_reset(struct tg3 *tp) 1296 1308 { ··· 2235 2223 2236 2224 otp = tp->phy_otp; 2237 2225 2238 - if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) 2226 + if (tg3_phy_toggle_auxctl_smdsp(tp, true)) 2239 2227 return; 2240 2228 2241 2229 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); ··· 2260 2248 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT); 2261 2249 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); 2262 2250 2263 - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); 2251 + tg3_phy_toggle_auxctl_smdsp(tp, false); 2264 2252 } 2265 2253 2266 2254 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up) ··· 2296 2284 2297 2285 if (!tp->setlpicnt) { 2298 2286 if (current_link_up == 1 && 2299 - !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { 2287 + !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2300 2288 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000); 2301 - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); 2289 + tg3_phy_toggle_auxctl_smdsp(tp, false); 2302 2290 } 2303 2291 2304 2292 val = tr32(TG3_CPMU_EEE_MODE); ··· 2314 2302 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 2315 2303 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || 2316 2304 tg3_flag(tp, 57765_CLASS)) && 2317 - !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { 2305 + !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2318 2306 val = MII_TG3_DSP_TAP26_ALNOKO | 2319 2307 MII_TG3_DSP_TAP26_RMRXSTO; 2320 2308 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); 2321 - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); 2309 + tg3_phy_toggle_auxctl_smdsp(tp, false); 2322 2310 } 2323 2311 2324 2312 val = tr32(TG3_CPMU_EEE_MODE); ··· 2462 2450 tg3_writephy(tp, MII_CTRL1000, 2463 2451 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); 2464 2452 2465 - err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp); 2453 + err = tg3_phy_toggle_auxctl_smdsp(tp, true); 2466 2454 if (err) 2467 2455 return err; 2468 2456 ··· 2483 2471 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); 2484 2472 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000); 2485 2473 2486 - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); 2474 + tg3_phy_toggle_auxctl_smdsp(tp, false); 2487 2475 2488 2476 tg3_writephy(tp, MII_CTRL1000, phy9_orig); 2489 2477 ··· 2584 2572 2585 2573 out: 2586 2574 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) && 2587 - !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { 2575 + !tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2588 2576 tg3_phydsp_write(tp, 0x201f, 0x2aaa); 2589 2577 tg3_phydsp_write(tp, 0x000a, 0x0323); 2590 - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); 2578 + tg3_phy_toggle_auxctl_smdsp(tp, false); 2591 2579 } 2592 2580 2593 2581 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) { ··· 2596 2584 } 2597 2585 2598 2586 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) { 2599 - if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { 2587 + if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2600 2588 tg3_phydsp_write(tp, 0x000a, 0x310b); 2601 2589 tg3_phydsp_write(tp, 0x201f, 0x9506); 2602 2590 tg3_phydsp_write(tp, 0x401f, 0x14e2); 2603 - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); 2591 + tg3_phy_toggle_auxctl_smdsp(tp, false); 2604 2592 } 2605 2593 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) { 2606 - if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { 2594 + if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { 2607 2595 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); 2608 2596 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) { 2609 2597 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); ··· 2612 2600 } else 2613 2601 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); 2614 2602 2615 - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); 2603 + tg3_phy_toggle_auxctl_smdsp(tp, false); 2616 2604 } 2617 2605 } 2618 2606 ··· 4021 4009 tw32(TG3_CPMU_EEE_MODE, 4022 4010 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); 4023 4011 4024 - err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp); 4012 + err = tg3_phy_toggle_auxctl_smdsp(tp, true); 4025 4013 if (!err) { 4026 4014 u32 err2; 4027 4015 ··· 4054 4042 MII_TG3_DSP_CH34TP2_HIBW01); 4055 4043 } 4056 4044 4057 - err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); 4045 + err2 = tg3_phy_toggle_auxctl_smdsp(tp, false); 4058 4046 if (!err) 4059 4047 err = err2; 4060 4048 } ··· 6961 6949 { 6962 6950 int i; 6963 6951 struct tg3 *tp = netdev_priv(dev); 6952 + 6953 + if (tg3_irq_sync(tp)) 6954 + return; 6964 6955 6965 6956 for (i = 0; i < tp->irq_cnt; i++) 6966 6957 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]); ··· 16382 16367 tp->pm_cap = pm_cap; 16383 16368 tp->rx_mode = TG3_DEF_RX_MODE; 16384 16369 tp->tx_mode = TG3_DEF_TX_MODE; 16370 + tp->irq_sync = 1; 16385 16371 16386 16372 if (tg3_debug > 0) 16387 16373 tp->msg_enable = tg3_debug;
+4
drivers/net/ethernet/calxeda/xgmac.c
··· 548 548 return -1; 549 549 } 550 550 551 + /* All frames should fit into a single buffer */ 552 + if (!(status & RXDESC_FIRST_SEG) || !(status & RXDESC_LAST_SEG)) 553 + return -1; 554 + 551 555 /* Check if packet has checksum already */ 552 556 if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) && 553 557 !(ext_status & RXDESC_IP_PAYLOAD_MASK))
+13 -2
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
··· 1994 1994 { 1995 1995 const struct port_info *pi = netdev_priv(dev); 1996 1996 struct adapter *adap = pi->adapter; 1997 + struct sge_rspq *q; 1998 + int i; 1999 + int r = 0; 1997 2000 1998 - return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq, 1999 - c->rx_coalesce_usecs, c->rx_max_coalesced_frames); 2001 + for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) { 2002 + q = &adap->sge.ethrxq[i].rspq; 2003 + r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs, 2004 + c->rx_max_coalesced_frames); 2005 + if (r) { 2006 + dev_err(&dev->dev, "failed to set coalesce %d\n", r); 2007 + break; 2008 + } 2009 + } 2010 + return r; 2000 2011 } 2001 2012 2002 2013 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+4 -4
drivers/net/ethernet/emulex/benet/be.h
··· 36 36 37 37 #define DRV_VER "4.4.161.0u" 38 38 #define DRV_NAME "be2net" 39 - #define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" 40 - #define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC" 41 - #define OC_NAME "Emulex OneConnect 10Gbps NIC" 39 + #define BE_NAME "Emulex BladeEngine2" 40 + #define BE3_NAME "Emulex BladeEngine3" 41 + #define OC_NAME "Emulex OneConnect" 42 42 #define OC_NAME_BE OC_NAME "(be3)" 43 43 #define OC_NAME_LANCER OC_NAME "(Lancer)" 44 44 #define OC_NAME_SH OC_NAME "(Skyhawk)" 45 - #define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver" 45 + #define DRV_DESC "Emulex OneConnect 10Gbps NIC Driver" 46 46 47 47 #define BE_VENDOR_ID 0x19a2 48 48 #define EMULEX_VENDOR_ID 0x10df
+1 -1
drivers/net/ethernet/emulex/benet/be_main.c
··· 25 25 MODULE_VERSION(DRV_VER); 26 26 MODULE_DEVICE_TABLE(pci, be_dev_ids); 27 27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER); 28 - MODULE_AUTHOR("ServerEngines Corporation"); 28 + MODULE_AUTHOR("Emulex Corporation"); 29 29 MODULE_LICENSE("GPL"); 30 30 31 31 static unsigned int num_vfs;
+9
drivers/net/ethernet/intel/e1000e/defines.h
··· 232 232 #define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ 233 233 #define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */ 234 234 #define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */ 235 + #define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */ 235 236 #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ 236 237 #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ 237 238 #define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ ··· 390 389 391 390 #define E1000_PBS_16K E1000_PBA_16K 392 391 392 + /* Uncorrectable/correctable ECC Error counts and enable bits */ 393 + #define E1000_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF 394 + #define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00 395 + #define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8 396 + #define E1000_PBECCSTS_ECC_ENABLE 0x00010000 397 + 393 398 #define IFS_MAX 80 394 399 #define IFS_MIN 40 395 400 #define IFS_RATIO 4 ··· 415 408 #define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */ 416 409 #define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ 417 410 #define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ 411 + #define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ 418 412 #define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ 419 413 #define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */ 420 414 #define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ ··· 451 443 #define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ 452 444 #define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ 453 445 #define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ 446 + #define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */ 454 447 #define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */ 455 448 #define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */ 456 449 #define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */
+2
drivers/net/ethernet/intel/e1000e/e1000.h
··· 309 309 310 310 struct napi_struct napi; 311 311 312 + unsigned int uncorr_errors; /* uncorrectable ECC errors */ 313 + unsigned int corr_errors; /* correctable ECC errors */ 312 314 unsigned int restart_queue; 313 315 u32 txd_cmd; 314 316
+2
drivers/net/ethernet/intel/e1000e/ethtool.c
··· 108 108 E1000_STAT("dropped_smbus", stats.mgpdc), 109 109 E1000_STAT("rx_dma_failed", rx_dma_failed), 110 110 E1000_STAT("tx_dma_failed", tx_dma_failed), 111 + E1000_STAT("uncorr_ecc_errors", uncorr_errors), 112 + E1000_STAT("corr_ecc_errors", corr_errors), 111 113 }; 112 114 113 115 #define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
+1
drivers/net/ethernet/intel/e1000e/hw.h
··· 77 77 #define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */ 78 78 E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */ 79 79 E1000_PBS = 0x01008, /* Packet Buffer Size */ 80 + E1000_PBECCSTS = 0x0100C, /* Packet Buffer ECC Status - RW */ 80 81 E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */ 81 82 E1000_EEWR = 0x0102C, /* EEPROM Write Register - RW */ 82 83 E1000_FLOP = 0x0103C, /* FLASH Opcode Register */
+11
drivers/net/ethernet/intel/e1000e/ich8lan.c
··· 3624 3624 if (hw->mac.type == e1000_ich8lan) 3625 3625 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); 3626 3626 ew32(RFCTL, reg); 3627 + 3628 + /* Enable ECC on Lynxpoint */ 3629 + if (hw->mac.type == e1000_pch_lpt) { 3630 + reg = er32(PBECCSTS); 3631 + reg |= E1000_PBECCSTS_ECC_ENABLE; 3632 + ew32(PBECCSTS, reg); 3633 + 3634 + reg = er32(CTRL); 3635 + reg |= E1000_CTRL_MEHE; 3636 + ew32(CTRL, reg); 3637 + } 3627 3638 } 3628 3639 3629 3640 /**
+46
drivers/net/ethernet/intel/e1000e/netdev.c
··· 1678 1678 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1679 1679 } 1680 1680 1681 + /* Reset on uncorrectable ECC error */ 1682 + if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) { 1683 + u32 pbeccsts = er32(PBECCSTS); 1684 + 1685 + adapter->corr_errors += 1686 + pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; 1687 + adapter->uncorr_errors += 1688 + (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> 1689 + E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; 1690 + 1691 + /* Do the reset outside of interrupt context */ 1692 + schedule_work(&adapter->reset_task); 1693 + 1694 + /* return immediately since reset is imminent */ 1695 + return IRQ_HANDLED; 1696 + } 1697 + 1681 1698 if (napi_schedule_prep(&adapter->napi)) { 1682 1699 adapter->total_tx_bytes = 0; 1683 1700 adapter->total_tx_packets = 0; ··· 1756 1739 /* guard against interrupt when we're going down */ 1757 1740 if (!test_bit(__E1000_DOWN, &adapter->state)) 1758 1741 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1742 + } 1743 + 1744 + /* Reset on uncorrectable ECC error */ 1745 + if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) { 1746 + u32 pbeccsts = er32(PBECCSTS); 1747 + 1748 + adapter->corr_errors += 1749 + pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; 1750 + adapter->uncorr_errors += 1751 + (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> 1752 + E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; 1753 + 1754 + /* Do the reset outside of interrupt context */ 1755 + schedule_work(&adapter->reset_task); 1756 + 1757 + /* return immediately since reset is imminent */ 1758 + return IRQ_HANDLED; 1759 1759 } 1760 1760 1761 1761 if (napi_schedule_prep(&adapter->napi)) { ··· 2138 2104 if (adapter->msix_entries) { 2139 2105 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); 2140 2106 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); 2107 + } else if (hw->mac.type == e1000_pch_lpt) { 2108 + ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER); 2141 2109 } else { 2142 2110 ew32(IMS, IMS_ENABLE_MASK); 2143 2111 } ··· 4287 4251 adapter->stats.mgptc += er32(MGTPTC); 4288 4252 adapter->stats.mgprc += er32(MGTPRC); 4289 4253 adapter->stats.mgpdc += er32(MGTPDC); 4254 + 4255 + /* Correctable ECC Errors */ 4256 + if (hw->mac.type == e1000_pch_lpt) { 4257 + u32 pbeccsts = er32(PBECCSTS); 4258 + adapter->corr_errors += 4259 + pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; 4260 + adapter->uncorr_errors += 4261 + (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> 4262 + E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; 4263 + } 4290 4264 } 4291 4265 4292 4266 /**
+2 -1
drivers/net/ethernet/intel/ixgbe/Makefile
··· 32 32 33 33 obj-$(CONFIG_IXGBE) += ixgbe.o 34 34 35 - ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o ixgbe_debugfs.o\ 35 + ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ 36 36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ 37 37 ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o ixgbe_ptp.o 38 38 ··· 40 40 ixgbe_dcb_82599.o ixgbe_dcb_nl.o 41 41 42 42 ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o 43 + ixgbe-$(CONFIG_DEBUG_FS) += ixgbe_debugfs.o 43 44 ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o
-5
drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
··· 24 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 25 26 26 *******************************************************************************/ 27 - 28 - #ifdef CONFIG_DEBUG_FS 29 - 30 27 #include <linux/debugfs.h> 31 28 #include <linux/module.h> 32 29 ··· 274 277 { 275 278 debugfs_remove_recursive(ixgbe_dbg_root); 276 279 } 277 - 278 - #endif /* CONFIG_DEBUG_FS */
+2 -2
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
··· 660 660 break; 661 661 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 662 662 tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; 663 - tsync_rx_mtrl = IXGBE_RXMTRL_V1_SYNC_MSG; 663 + tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG; 664 664 break; 665 665 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 666 666 tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; 667 - tsync_rx_mtrl = IXGBE_RXMTRL_V1_DELAY_REQ_MSG; 667 + tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG; 668 668 break; 669 669 case HWTSTAMP_FILTER_PTP_V2_EVENT: 670 670 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+9 -4
drivers/net/ethernet/mellanox/mlx4/en_tx.c
··· 630 630 ring->tx_csum++; 631 631 } 632 632 633 - /* Copy dst mac address to wqe */ 634 - ethh = (struct ethhdr *)skb->data; 635 - tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest); 636 - tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2)); 633 + if (mlx4_is_mfunc(mdev->dev) || priv->validate_loopback) { 634 + /* Copy dst mac address to wqe. This allows loopback in eSwitch, 635 + * so that VFs and PF can communicate with each other 636 + */ 637 + ethh = (struct ethhdr *)skb->data; 638 + tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest); 639 + tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2)); 640 + } 641 + 637 642 /* Handle LSO (TSO) packets */ 638 643 if (lso_header_size) { 639 644 /* Mark opcode as LSO */
+3 -10
drivers/net/ethernet/mellanox/mlx4/main.c
··· 380 380 } 381 381 } 382 382 383 - if ((dev_cap->flags & 383 + if ((dev->caps.flags & 384 384 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) && 385 385 mlx4_is_master(dev)) 386 386 dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE; ··· 1790 1790 int i; 1791 1791 1792 1792 if (msi_x) { 1793 - /* In multifunction mode each function gets 2 msi-X vectors 1794 - * one for data path completions anf the other for asynch events 1795 - * or command completions */ 1796 - if (mlx4_is_mfunc(dev)) { 1797 - nreq = 2; 1798 - } else { 1799 - nreq = min_t(int, dev->caps.num_eqs - 1800 - dev->caps.reserved_eqs, nreq); 1801 - } 1793 + nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, 1794 + nreq); 1802 1795 1803 1796 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); 1804 1797 if (!entries)
+1 -1
drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
··· 144 144 buffrag->length, PCI_DMA_TODEVICE); 145 145 buffrag->dma = 0ULL; 146 146 } 147 - for (j = 0; j < cmd_buf->frag_count; j++) { 147 + for (j = 1; j < cmd_buf->frag_count; j++) { 148 148 buffrag++; 149 149 if (buffrag->dma) { 150 150 pci_unmap_page(adapter->pdev, buffrag->dma,
+2
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
··· 1963 1963 while (--i >= 0) { 1964 1964 nf = &pbuf->frag_array[i+1]; 1965 1965 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE); 1966 + nf->dma = 0ULL; 1966 1967 } 1967 1968 1968 1969 nf = &pbuf->frag_array[0]; 1969 1970 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); 1971 + nf->dma = 0ULL; 1970 1972 1971 1973 out_err: 1972 1974 return -ENOMEM;
+6 -15
drivers/net/ethernet/realtek/r8169.c
··· 1826 1826 1827 1827 if (opts2 & RxVlanTag) 1828 1828 __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff)); 1829 - 1830 - desc->opts2 = 0; 1831 1829 } 1832 1830 1833 1831 static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd) ··· 6062 6064 !(status & (RxRWT | RxFOVF)) && 6063 6065 (dev->features & NETIF_F_RXALL)) 6064 6066 goto process_pkt; 6065 - 6066 - rtl8169_mark_to_asic(desc, rx_buf_sz); 6067 6067 } else { 6068 6068 struct sk_buff *skb; 6069 6069 dma_addr_t addr; ··· 6082 6086 if (unlikely(rtl8169_fragmented_frame(status))) { 6083 6087 dev->stats.rx_dropped++; 6084 6088 dev->stats.rx_length_errors++; 6085 - rtl8169_mark_to_asic(desc, rx_buf_sz); 6086 - continue; 6089 + goto release_descriptor; 6087 6090 } 6088 6091 6089 6092 skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry], 6090 6093 tp, pkt_size, addr); 6091 - rtl8169_mark_to_asic(desc, rx_buf_sz); 6092 6094 if (!skb) { 6093 6095 dev->stats.rx_dropped++; 6094 - continue; 6096 + goto release_descriptor; 6095 6097 } 6096 6098 6097 6099 rtl8169_rx_csum(skb, status); ··· 6105 6111 tp->rx_stats.bytes += pkt_size; 6106 6112 u64_stats_update_end(&tp->rx_stats.syncp); 6107 6113 } 6108 - 6109 - /* Work around for AMD plateform. */ 6110 - if ((desc->opts2 & cpu_to_le32(0xfffe000)) && 6111 - (tp->mac_version == RTL_GIGA_MAC_VER_05)) { 6112 - desc->opts2 = 0; 6113 - cur_rx++; 6114 - } 6114 + release_descriptor: 6115 + desc->opts2 = 0; 6116 + wmb(); 6117 + rtl8169_mark_to_asic(desc, rx_buf_sz); 6115 6118 } 6116 6119 6117 6120 count = cur_rx - tp->cur_rx;
+2 -6
drivers/net/ethernet/via/via-rhine.c
··· 1801 1801 rp->tx_skbuff[entry]->len, 1802 1802 PCI_DMA_TODEVICE); 1803 1803 } 1804 - dev_kfree_skb_irq(rp->tx_skbuff[entry]); 1804 + dev_kfree_skb(rp->tx_skbuff[entry]); 1805 1805 rp->tx_skbuff[entry] = NULL; 1806 1806 entry = (++rp->dirty_tx) % TX_RING_SIZE; 1807 1807 } ··· 2010 2010 if (intr_status & IntrPCIErr) 2011 2011 netif_warn(rp, hw, dev, "PCI error\n"); 2012 2012 2013 - napi_disable(&rp->napi); 2014 - rhine_irq_disable(rp); 2015 - /* Slow and safe. Consider __napi_schedule as a replacement ? */ 2016 - napi_enable(&rp->napi); 2017 - napi_schedule(&rp->napi); 2013 + iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable); 2018 2014 2019 2015 out_unlock: 2020 2016 mutex_unlock(&rp->task_lock);
+1 -1
drivers/net/hyperv/hyperv_net.h
··· 84 84 }; 85 85 86 86 struct netvsc_device_info { 87 - unsigned char mac_adr[6]; 87 + unsigned char mac_adr[ETH_ALEN]; 88 88 bool link_state; /* 0 - link up, 1 - link down */ 89 89 int ring_size; 90 90 };
+1 -1
drivers/net/hyperv/netvsc_drv.c
··· 349 349 struct net_device_context *ndevctx = netdev_priv(ndev); 350 350 struct hv_device *hdev = ndevctx->device_ctx; 351 351 struct sockaddr *addr = p; 352 - char save_adr[14]; 352 + char save_adr[ETH_ALEN]; 353 353 unsigned char save_aatype; 354 354 int err; 355 355
+5
drivers/net/loopback.c
··· 77 77 78 78 skb_orphan(skb); 79 79 80 + /* Before queueing this packet to netif_rx(), 81 + * make sure dst is refcounted. 82 + */ 83 + skb_dst_force(skb); 84 + 80 85 skb->protocol = eth_type_trans(skb, dev); 81 86 82 87 /* it's OK to use per_cpu_ptr() because BHs are off */
+4 -1
drivers/net/macvlan.c
··· 822 822 823 823 static size_t macvlan_get_size(const struct net_device *dev) 824 824 { 825 - return nla_total_size(4); 825 + return (0 826 + + nla_total_size(4) /* IFLA_MACVLAN_MODE */ 827 + + nla_total_size(2) /* IFLA_MACVLAN_FLAGS */ 828 + ); 826 829 } 827 830 828 831 static int macvlan_fill_info(struct sk_buff *skb,
+20 -9
drivers/net/phy/icplus.c
··· 36 36 37 37 /* IP101A/G - IP1001 */ 38 38 #define IP10XX_SPEC_CTRL_STATUS 16 /* Spec. Control Register */ 39 + #define IP1001_RXPHASE_SEL (1<<0) /* Add delay on RX_CLK */ 40 + #define IP1001_TXPHASE_SEL (1<<1) /* Add delay on TX_CLK */ 39 41 #define IP1001_SPEC_CTRL_STATUS_2 20 /* IP1001 Spec. Control Reg 2 */ 40 - #define IP1001_PHASE_SEL_MASK 3 /* IP1001 RX/TXPHASE_SEL */ 41 42 #define IP1001_APS_ON 11 /* IP1001 APS Mode bit */ 42 43 #define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */ 43 44 #define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */ ··· 139 138 if (c < 0) 140 139 return c; 141 140 142 - /* INTR pin used: speed/link/duplex will cause an interrupt */ 143 - c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT); 144 - if (c < 0) 145 - return c; 141 + if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) || 142 + (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) || 143 + (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) || 144 + (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) { 146 145 147 - if (phydev->interface == PHY_INTERFACE_MODE_RGMII) { 148 - /* Additional delay (2ns) used to adjust RX clock phase 149 - * at RGMII interface */ 150 146 c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS); 151 147 if (c < 0) 152 148 return c; 153 149 154 - c |= IP1001_PHASE_SEL_MASK; 150 + c &= ~(IP1001_RXPHASE_SEL | IP1001_TXPHASE_SEL); 151 + 152 + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) 153 + c |= (IP1001_RXPHASE_SEL | IP1001_TXPHASE_SEL); 154 + else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) 155 + c |= IP1001_RXPHASE_SEL; 156 + else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) 157 + c |= IP1001_TXPHASE_SEL; 158 + 155 159 c = phy_write(phydev, IP10XX_SPEC_CTRL_STATUS, c); 156 160 if (c < 0) 157 161 return c; ··· 170 164 int c; 171 165 172 166 c = ip1xx_reset(phydev); 167 + if (c < 0) 168 + return c; 169 + 170 + /* INTR pin used: speed/link/duplex will cause an interrupt */ 171 + c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT); 173 172 if (c < 0) 174 173 return c; 175 174
-9
drivers/net/phy/marvell.c
··· 353 353 int err; 354 354 int temp; 355 355 356 - /* Enable Fiber/Copper auto selection */ 357 - temp = phy_read(phydev, MII_M1111_PHY_EXT_SR); 358 - temp &= ~MII_M1111_HWCFG_FIBER_COPPER_AUTO; 359 - phy_write(phydev, MII_M1111_PHY_EXT_SR, temp); 360 - 361 - temp = phy_read(phydev, MII_BMCR); 362 - temp |= BMCR_RESET; 363 - phy_write(phydev, MII_BMCR, temp); 364 - 365 356 if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) || 366 357 (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) || 367 358 (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
+56 -27
drivers/net/tun.c
··· 109 109 unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; 110 110 }; 111 111 112 - /* 1024 is probably a high enough limit: modern hypervisors seem to support on 113 - * the order of 100-200 CPUs so this leaves us some breathing space if we want 114 - * to match a queue per guest CPU. 115 - */ 116 - #define MAX_TAP_QUEUES 1024 112 + /* DEFAULT_MAX_NUM_RSS_QUEUES were choosed to let the rx/tx queues allocated for 113 + * the netdevice to be fit in one page. So we can make sure the success of 114 + * memory allocation. TODO: increase the limit. */ 115 + #define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES 116 + #define MAX_TAP_FLOWS 4096 117 117 118 118 #define TUN_FLOW_EXPIRE (3 * HZ) 119 119 ··· 185 185 unsigned long ageing_time; 186 186 unsigned int numdisabled; 187 187 struct list_head disabled; 188 + void *security; 189 + u32 flow_count; 188 190 }; 189 191 190 192 static inline u32 tun_hashfn(u32 rxhash) ··· 220 218 e->queue_index = queue_index; 221 219 e->tun = tun; 222 220 hlist_add_head_rcu(&e->hash_link, head); 221 + ++tun->flow_count; 223 222 } 224 223 return e; 225 224 } ··· 231 228 e->rxhash, e->queue_index); 232 229 hlist_del_rcu(&e->hash_link); 233 230 kfree_rcu(e, rcu); 231 + --tun->flow_count; 234 232 } 235 233 236 234 static void tun_flow_flush(struct tun_struct *tun) ··· 298 294 } 299 295 300 296 static void tun_flow_update(struct tun_struct *tun, u32 rxhash, 301 - u16 queue_index) 297 + struct tun_file *tfile) 302 298 { 303 299 struct hlist_head *head; 304 300 struct tun_flow_entry *e; 305 301 unsigned long delay = tun->ageing_time; 302 + u16 queue_index = tfile->queue_index; 306 303 307 304 if (!rxhash) 308 305 return; ··· 312 307 313 308 rcu_read_lock(); 314 309 315 - if (tun->numqueues == 1) 310 + /* We may get a very small possibility of OOO during switching, not 311 + * worth to optimize.*/ 312 + if (tun->numqueues == 1 || tfile->detached) 316 313 goto unlock; 317 314 318 315 e = tun_flow_find(head, rxhash); ··· 324 317 e->updated = jiffies; 325 318 } else { 326 319 spin_lock_bh(&tun->lock); 327 - if (!tun_flow_find(head, rxhash)) 320 + if (!tun_flow_find(head, rxhash) && 321 + tun->flow_count < MAX_TAP_FLOWS) 328 322 tun_flow_create(tun, head, rxhash, queue_index); 329 323 330 324 if (!timer_pending(&tun->flow_gc_timer)) ··· 414 406 415 407 tun = rtnl_dereference(tfile->tun); 416 408 417 - if (tun) { 409 + if (tun && !tfile->detached) { 418 410 u16 index = tfile->queue_index; 419 411 BUG_ON(index >= tun->numqueues); 420 412 dev = tun->dev; 421 413 422 414 rcu_assign_pointer(tun->tfiles[index], 423 415 tun->tfiles[tun->numqueues - 1]); 424 - rcu_assign_pointer(tfile->tun, NULL); 425 416 ntfile = rtnl_dereference(tun->tfiles[index]); 426 417 ntfile->queue_index = index; 427 418 428 419 --tun->numqueues; 429 - if (clean) 420 + if (clean) { 421 + rcu_assign_pointer(tfile->tun, NULL); 430 422 sock_put(&tfile->sk); 431 - else 423 + } else 432 424 tun_disable_queue(tun, tfile); 433 425 434 426 synchronize_net(); ··· 442 434 } 443 435 444 436 if (clean) { 445 - if (tun && tun->numqueues == 0 && tun->numdisabled == 0 && 446 - !(tun->flags & TUN_PERSIST)) 447 - if (tun->dev->reg_state == NETREG_REGISTERED) 437 + if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { 438 + netif_carrier_off(tun->dev); 439 + 440 + if (!(tun->flags & TUN_PERSIST) && 441 + tun->dev->reg_state == NETREG_REGISTERED) 448 442 unregister_netdevice(tun->dev); 443 + } 449 444 450 445 BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED, 451 446 &tfile->socket.flags)); ··· 476 465 rcu_assign_pointer(tfile->tun, NULL); 477 466 --tun->numqueues; 478 467 } 468 + list_for_each_entry(tfile, &tun->disabled, next) { 469 + wake_up_all(&tfile->wq.wait); 470 + rcu_assign_pointer(tfile->tun, NULL); 471 + } 479 472 BUG_ON(tun->numqueues != 0); 480 473 481 474 synchronize_net(); ··· 505 490 struct tun_file *tfile = file->private_data; 506 491 int err; 507 492 493 + err = security_tun_dev_attach(tfile->socket.sk, tun->security); 494 + if (err < 0) 495 + goto out; 496 + 508 497 err = -EINVAL; 509 - if (rtnl_dereference(tfile->tun)) 498 + if (rtnl_dereference(tfile->tun) && !tfile->detached) 510 499 goto out; 511 500 512 501 err = -EBUSY; ··· 1209 1190 tun->dev->stats.rx_packets++; 1210 1191 tun->dev->stats.rx_bytes += len; 1211 1192 1212 - tun_flow_update(tun, rxhash, tfile->queue_index); 1193 + tun_flow_update(tun, rxhash, tfile); 1213 1194 return total_len; 1214 1195 } 1215 1196 ··· 1392 1373 1393 1374 BUG_ON(!(list_empty(&tun->disabled))); 1394 1375 tun_flow_uninit(tun); 1376 + security_tun_dev_free_security(tun->security); 1395 1377 free_netdev(dev); 1396 1378 } 1397 1379 ··· 1582 1562 1583 1563 if (tun_not_capable(tun)) 1584 1564 return -EPERM; 1585 - err = security_tun_dev_attach(tfile->socket.sk); 1565 + err = security_tun_dev_open(tun->security); 1586 1566 if (err < 0) 1587 1567 return err; 1588 1568 ··· 1597 1577 else { 1598 1578 char *name; 1599 1579 unsigned long flags = 0; 1580 + int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ? 1581 + MAX_TAP_QUEUES : 1; 1600 1582 1601 1583 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1602 1584 return -EPERM; ··· 1622 1600 name = ifr->ifr_name; 1623 1601 1624 1602 dev = alloc_netdev_mqs(sizeof(struct tun_struct), name, 1625 - tun_setup, 1626 - MAX_TAP_QUEUES, MAX_TAP_QUEUES); 1603 + tun_setup, queues, queues); 1604 + 1627 1605 if (!dev) 1628 1606 return -ENOMEM; 1629 1607 ··· 1641 1619 1642 1620 spin_lock_init(&tun->lock); 1643 1621 1644 - security_tun_dev_post_create(&tfile->sk); 1622 + err = security_tun_dev_alloc_security(&tun->security); 1623 + if (err < 0) 1624 + goto err_free_dev; 1645 1625 1646 1626 tun_net_init(dev); 1647 1627 ··· 1668 1644 device_create_file(&tun->dev->dev, &dev_attr_owner) || 1669 1645 device_create_file(&tun->dev->dev, &dev_attr_group)) 1670 1646 pr_err("Failed to create tun sysfs files\n"); 1671 - 1672 - netif_carrier_on(tun->dev); 1673 1647 } 1648 + 1649 + netif_carrier_on(tun->dev); 1674 1650 1675 1651 tun_debug(KERN_INFO, tun, "tun_set_iff\n"); 1676 1652 ··· 1813 1789 1814 1790 if (ifr->ifr_flags & IFF_ATTACH_QUEUE) { 1815 1791 tun = tfile->detached; 1816 - if (!tun) 1792 + if (!tun) { 1817 1793 ret = -EINVAL; 1818 - else 1819 - ret = tun_attach(tun, file); 1794 + goto unlock; 1795 + } 1796 + ret = security_tun_dev_attach_queue(tun->security); 1797 + if (ret < 0) 1798 + goto unlock; 1799 + ret = tun_attach(tun, file); 1820 1800 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { 1821 1801 tun = rtnl_dereference(tfile->tun); 1822 - if (!tun || !(tun->flags & TUN_TAP_MQ)) 1802 + if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached) 1823 1803 ret = -EINVAL; 1824 1804 else 1825 1805 __tun_detach(tfile, false); 1826 1806 } else 1827 1807 ret = -EINVAL; 1828 1808 1809 + unlock: 1829 1810 rtnl_unlock(); 1830 1811 return ret; 1831 1812 }
+19
drivers/net/usb/cdc_mbim.c
··· 374 374 .tx_fixup = cdc_mbim_tx_fixup, 375 375 }; 376 376 377 + /* MBIM and NCM devices should not need a ZLP after NTBs with 378 + * dwNtbOutMaxSize length. This driver_info is for the exceptional 379 + * devices requiring it anyway, allowing them to be supported without 380 + * forcing the performance penalty on all the sane devices. 381 + */ 382 + static const struct driver_info cdc_mbim_info_zlp = { 383 + .description = "CDC MBIM", 384 + .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN | FLAG_SEND_ZLP, 385 + .bind = cdc_mbim_bind, 386 + .unbind = cdc_mbim_unbind, 387 + .manage_power = cdc_mbim_manage_power, 388 + .rx_fixup = cdc_mbim_rx_fixup, 389 + .tx_fixup = cdc_mbim_tx_fixup, 390 + }; 391 + 377 392 static const struct usb_device_id mbim_devs[] = { 378 393 /* This duplicate NCM entry is intentional. MBIM devices can 379 394 * be disguised as NCM by default, and this is necessary to ··· 399 384 */ 400 385 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), 401 386 .driver_info = (unsigned long)&cdc_mbim_info, 387 + }, 388 + /* Sierra Wireless MC7710 need ZLPs */ 389 + { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), 390 + .driver_info = (unsigned long)&cdc_mbim_info_zlp, 402 391 }, 403 392 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), 404 393 .driver_info = (unsigned long)&cdc_mbim_info,
+33 -1
drivers/net/usb/cdc_ncm.c
··· 435 435 len -= temp; 436 436 } 437 437 438 + /* some buggy devices have an IAD but no CDC Union */ 439 + if (!ctx->union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) { 440 + ctx->control = intf; 441 + ctx->data = usb_ifnum_to_if(dev->udev, intf->cur_altsetting->desc.bInterfaceNumber + 1); 442 + dev_dbg(&intf->dev, "CDC Union missing - got slave from IAD\n"); 443 + } 444 + 438 445 /* check if we got everything */ 439 446 if ((ctx->control == NULL) || (ctx->data == NULL) || 440 447 ((!ctx->mbim_desc) && ((ctx->ether_desc == NULL) || (ctx->control != intf)))) ··· 504 497 error2: 505 498 usb_set_intfdata(ctx->control, NULL); 506 499 usb_set_intfdata(ctx->data, NULL); 507 - usb_driver_release_interface(driver, ctx->data); 500 + if (ctx->data != ctx->control) 501 + usb_driver_release_interface(driver, ctx->data); 508 502 error: 509 503 cdc_ncm_free((struct cdc_ncm_ctx *)dev->data[0]); 510 504 dev->data[0] = 0; ··· 1163 1155 .tx_fixup = cdc_ncm_tx_fixup, 1164 1156 }; 1165 1157 1158 + /* Same as wwan_info, but with FLAG_NOARP */ 1159 + static const struct driver_info wwan_noarp_info = { 1160 + .description = "Mobile Broadband Network Device (NO ARP)", 1161 + .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET 1162 + | FLAG_WWAN | FLAG_NOARP, 1163 + .bind = cdc_ncm_bind, 1164 + .unbind = cdc_ncm_unbind, 1165 + .check_connect = cdc_ncm_check_connect, 1166 + .manage_power = usbnet_manage_power, 1167 + .status = cdc_ncm_status, 1168 + .rx_fixup = cdc_ncm_rx_fixup, 1169 + .tx_fixup = cdc_ncm_tx_fixup, 1170 + }; 1171 + 1166 1172 static const struct usb_device_id cdc_devs[] = { 1167 1173 /* Ericsson MBM devices like F5521gw */ 1168 1174 { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO ··· 1214 1192 }, 1215 1193 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46), 1216 1194 .driver_info = (unsigned long)&wwan_info, 1195 + }, 1196 + { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76), 1197 + .driver_info = (unsigned long)&wwan_info, 1198 + }, 1199 + 1200 + /* Infineon(now Intel) HSPA Modem platform */ 1201 + { USB_DEVICE_AND_INTERFACE_INFO(0x1519, 0x0443, 1202 + USB_CLASS_COMM, 1203 + USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), 1204 + .driver_info = (unsigned long)&wwan_noarp_info, 1217 1205 }, 1218 1206 1219 1207 /* Generic CDC-NCM devices */
+35 -17
drivers/net/usb/dm9601.c
··· 45 45 #define DM_MCAST_ADDR 0x16 /* 8 bytes */ 46 46 #define DM_GPR_CTRL 0x1e 47 47 #define DM_GPR_DATA 0x1f 48 + #define DM_CHIP_ID 0x2c 49 + #define DM_MODE_CTRL 0x91 /* only on dm9620 */ 50 + 51 + /* chip id values */ 52 + #define ID_DM9601 0 53 + #define ID_DM9620 1 48 54 49 55 #define DM_MAX_MCAST 64 50 56 #define DM_MCAST_SIZE 8 ··· 58 52 #define DM_TX_OVERHEAD 2 /* 2 byte header */ 59 53 #define DM_RX_OVERHEAD 7 /* 3 byte header + 4 byte crc tail */ 60 54 #define DM_TIMEOUT 1000 61 - 62 55 63 56 static int dm_read(struct usbnet *dev, u8 reg, u16 length, void *data) 64 57 { ··· 89 84 90 85 static int dm_write_reg(struct usbnet *dev, u8 reg, u8 value) 91 86 { 92 - return usbnet_write_cmd(dev, DM_WRITE_REGS, 87 + return usbnet_write_cmd(dev, DM_WRITE_REG, 93 88 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 94 89 value, reg, NULL, 0); 95 90 } 96 91 97 - static void dm_write_async_helper(struct usbnet *dev, u8 reg, u8 value, 98 - u16 length, void *data) 92 + static void dm_write_async(struct usbnet *dev, u8 reg, u16 length, void *data) 99 93 { 100 94 usbnet_write_cmd_async(dev, DM_WRITE_REGS, 101 95 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 102 - value, reg, data, length); 103 - } 104 - 105 - static void dm_write_async(struct usbnet *dev, u8 reg, u16 length, void *data) 106 - { 107 - netdev_dbg(dev->net, "dm_write_async() reg=0x%02x length=%d\n", reg, length); 108 - 109 - dm_write_async_helper(dev, reg, 0, length, data); 96 + 0, reg, data, length); 110 97 } 111 98 112 99 static void dm_write_reg_async(struct usbnet *dev, u8 reg, u8 value) 113 100 { 114 - netdev_dbg(dev->net, "dm_write_reg_async() reg=0x%02x value=0x%02x\n", 115 - reg, value); 116 - 117 - dm_write_async_helper(dev, reg, value, 0, NULL); 101 + usbnet_write_cmd_async(dev, DM_WRITE_REG, 102 + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 103 + value, reg, NULL, 0); 118 104 } 119 105 120 106 static int dm_read_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 *value) ··· 354 358 static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf) 355 359 { 356 360 int ret; 357 - u8 mac[ETH_ALEN]; 361 + u8 mac[ETH_ALEN], id; 358 362 359 363 ret = usbnet_get_endpoints(dev, intf); 360 364 if (ret) ··· 393 397 "dm9601: No valid MAC address in EEPROM, using %pM\n", 394 398 dev->net->dev_addr); 395 399 __dm9601_set_mac_address(dev); 400 + } 401 + 402 + if (dm_read_reg(dev, DM_CHIP_ID, &id) < 0) { 403 + netdev_err(dev->net, "Error reading chip ID\n"); 404 + ret = -ENODEV; 405 + goto out; 406 + } 407 + 408 + /* put dm9620 devices in dm9601 mode */ 409 + if (id == ID_DM9620) { 410 + u8 mode; 411 + 412 + if (dm_read_reg(dev, DM_MODE_CTRL, &mode) < 0) { 413 + netdev_err(dev->net, "Error reading MODE_CTRL\n"); 414 + ret = -ENODEV; 415 + goto out; 416 + } 417 + dm_write_reg(dev, DM_MODE_CTRL, mode & 0x7f); 396 418 } 397 419 398 420 /* power up phy */ ··· 593 579 }, 594 580 { 595 581 USB_DEVICE(0x0a46, 0x9000), /* DM9000E */ 582 + .driver_info = (unsigned long)&dm9601_info, 583 + }, 584 + { 585 + USB_DEVICE(0x0a46, 0x9620), /* DM9620 USB to Fast Ethernet Adapter */ 596 586 .driver_info = (unsigned long)&dm9601_info, 597 587 }, 598 588 {}, // END
+15
drivers/net/usb/qmi_wwan.c
··· 351 351 USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 57), 352 352 .driver_info = (unsigned long)&qmi_wwan_info, 353 353 }, 354 + { /* HUAWEI_INTERFACE_NDIS_CONTROL_QUALCOMM */ 355 + USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69), 356 + .driver_info = (unsigned long)&qmi_wwan_info, 357 + }, 354 358 355 359 /* 2. Combined interface devices matching on class+protocol */ 356 360 { /* Huawei E367 and possibly others in "Windows mode" */ ··· 363 359 }, 364 360 { /* Huawei E392, E398 and possibly others in "Windows mode" */ 365 361 USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17), 362 + .driver_info = (unsigned long)&qmi_wwan_info, 363 + }, 364 + { /* HUAWEI_NDIS_SINGLE_INTERFACE_VDF */ 365 + USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x37), 366 + .driver_info = (unsigned long)&qmi_wwan_info, 367 + }, 368 + { /* HUAWEI_INTERFACE_NDIS_HW_QUALCOMM */ 369 + USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x67), 366 370 .driver_info = (unsigned long)&qmi_wwan_info, 367 371 }, 368 372 { /* Pantech UML290, P4200 and more */ ··· 445 433 {QMI_FIXED_INTF(0x19d2, 0x0199, 1)}, /* ZTE MF820S */ 446 434 {QMI_FIXED_INTF(0x19d2, 0x0200, 1)}, 447 435 {QMI_FIXED_INTF(0x19d2, 0x0257, 3)}, /* ZTE MF821 */ 436 + {QMI_FIXED_INTF(0x19d2, 0x0265, 4)}, /* ONDA MT8205 4G LTE */ 448 437 {QMI_FIXED_INTF(0x19d2, 0x0284, 4)}, /* ZTE MF880 */ 449 438 {QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */ 450 439 {QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */ ··· 472 459 {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */ 473 460 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ 474 461 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 462 + {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 463 + {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 475 464 476 465 /* 4. Gobi 1000 devices */ 477 466 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
+33 -6
drivers/net/usb/usbnet.c
··· 380 380 unsigned long lockflags; 381 381 size_t size = dev->rx_urb_size; 382 382 383 + /* prevent rx skb allocation when error ratio is high */ 384 + if (test_bit(EVENT_RX_KILL, &dev->flags)) { 385 + usb_free_urb(urb); 386 + return -ENOLINK; 387 + } 388 + 383 389 skb = __netdev_alloc_skb_ip_align(dev->net, size, flags); 384 390 if (!skb) { 385 391 netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); ··· 543 537 dev->net->stats.rx_errors++; 544 538 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status); 545 539 break; 540 + } 541 + 542 + /* stop rx if packet error rate is high */ 543 + if (++dev->pkt_cnt > 30) { 544 + dev->pkt_cnt = 0; 545 + dev->pkt_err = 0; 546 + } else { 547 + if (state == rx_cleanup) 548 + dev->pkt_err++; 549 + if (dev->pkt_err > 20) 550 + set_bit(EVENT_RX_KILL, &dev->flags); 546 551 } 547 552 548 553 state = defer_bh(dev, skb, &dev->rxq, state); ··· 807 790 (dev->driver_info->flags & FLAG_FRAMING_RN) ? "RNDIS" : 808 791 (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" : 809 792 "simple"); 793 + 794 + /* reset rx error state */ 795 + dev->pkt_cnt = 0; 796 + dev->pkt_err = 0; 797 + clear_bit(EVENT_RX_KILL, &dev->flags); 810 798 811 799 // delay posting reads until we're fully open 812 800 tasklet_schedule (&dev->bh); ··· 1125 1103 if (info->tx_fixup) { 1126 1104 skb = info->tx_fixup (dev, skb, GFP_ATOMIC); 1127 1105 if (!skb) { 1128 - if (netif_msg_tx_err(dev)) { 1129 - netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n"); 1130 - goto drop; 1131 - } else { 1132 - /* cdc_ncm collected packet; waits for more */ 1106 + /* packet collected; minidriver waiting for more */ 1107 + if (info->flags & FLAG_MULTI_PACKET) 1133 1108 goto not_drop; 1134 - } 1109 + netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n"); 1110 + goto drop; 1135 1111 } 1136 1112 } 1137 1113 length = skb->len; ··· 1273 1253 netdev_dbg(dev->net, "bogus skb state %d\n", entry->state); 1274 1254 } 1275 1255 } 1256 + 1257 + /* restart RX again after disabling due to high error rate */ 1258 + clear_bit(EVENT_RX_KILL, &dev->flags); 1276 1259 1277 1260 // waiting for all pending urbs to complete? 1278 1261 if (dev->wait) { ··· 1470 1447 /* WWAN devices should always be named "wwan%d" */ 1471 1448 if ((dev->driver_info->flags & FLAG_WWAN) != 0) 1472 1449 strcpy(net->name, "wwan%d"); 1450 + 1451 + /* devices that cannot do ARP */ 1452 + if ((dev->driver_info->flags & FLAG_NOARP) != 0) 1453 + net->flags |= IFF_NOARP; 1473 1454 1474 1455 /* maybe the remote can't receive an Ethernet MTU */ 1475 1456 if (net->mtu > (dev->hard_mtu - net->hard_header_len))
+98 -20
drivers/net/virtio_net.c
··· 26 26 #include <linux/scatterlist.h> 27 27 #include <linux/if_vlan.h> 28 28 #include <linux/slab.h> 29 + #include <linux/cpu.h> 29 30 30 31 static int napi_weight = 128; 31 32 module_param(napi_weight, int, 0444); ··· 124 123 125 124 /* Does the affinity hint is set for virtqueues? */ 126 125 bool affinity_hint_set; 126 + 127 + /* Per-cpu variable to show the mapping from CPU to virtqueue */ 128 + int __percpu *vq_index; 129 + 130 + /* CPU hot plug notifier */ 131 + struct notifier_block nb; 127 132 }; 128 133 129 134 struct skb_vnet_hdr { ··· 1020 1013 return 0; 1021 1014 } 1022 1015 1023 - static void virtnet_set_affinity(struct virtnet_info *vi, bool set) 1016 + static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu) 1024 1017 { 1025 1018 int i; 1019 + int cpu; 1020 + 1021 + if (vi->affinity_hint_set) { 1022 + for (i = 0; i < vi->max_queue_pairs; i++) { 1023 + virtqueue_set_affinity(vi->rq[i].vq, -1); 1024 + virtqueue_set_affinity(vi->sq[i].vq, -1); 1025 + } 1026 + 1027 + vi->affinity_hint_set = false; 1028 + } 1029 + 1030 + i = 0; 1031 + for_each_online_cpu(cpu) { 1032 + if (cpu == hcpu) { 1033 + *per_cpu_ptr(vi->vq_index, cpu) = -1; 1034 + } else { 1035 + *per_cpu_ptr(vi->vq_index, cpu) = 1036 + ++i % vi->curr_queue_pairs; 1037 + } 1038 + } 1039 + } 1040 + 1041 + static void virtnet_set_affinity(struct virtnet_info *vi) 1042 + { 1043 + int i; 1044 + int cpu; 1026 1045 1027 1046 /* In multiqueue mode, when the number of cpu is equal to the number of 1028 1047 * queue pairs, we let the queue pairs to be private to one cpu by 1029 1048 * setting the affinity hint to eliminate the contention. 1030 1049 */ 1031 - if ((vi->curr_queue_pairs == 1 || 1032 - vi->max_queue_pairs != num_online_cpus()) && set) { 1033 - if (vi->affinity_hint_set) 1034 - set = false; 1035 - else 1036 - return; 1050 + if (vi->curr_queue_pairs == 1 || 1051 + vi->max_queue_pairs != num_online_cpus()) { 1052 + virtnet_clean_affinity(vi, -1); 1053 + return; 1037 1054 } 1038 1055 1039 - for (i = 0; i < vi->max_queue_pairs; i++) { 1040 - int cpu = set ? i : -1; 1056 + i = 0; 1057 + for_each_online_cpu(cpu) { 1041 1058 virtqueue_set_affinity(vi->rq[i].vq, cpu); 1042 1059 virtqueue_set_affinity(vi->sq[i].vq, cpu); 1060 + *per_cpu_ptr(vi->vq_index, cpu) = i; 1061 + i++; 1043 1062 } 1044 1063 1045 - if (set) 1046 - vi->affinity_hint_set = true; 1047 - else 1048 - vi->affinity_hint_set = false; 1064 + vi->affinity_hint_set = true; 1065 + } 1066 + 1067 + static int virtnet_cpu_callback(struct notifier_block *nfb, 1068 + unsigned long action, void *hcpu) 1069 + { 1070 + struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb); 1071 + 1072 + switch(action & ~CPU_TASKS_FROZEN) { 1073 + case CPU_ONLINE: 1074 + case CPU_DOWN_FAILED: 1075 + case CPU_DEAD: 1076 + virtnet_set_affinity(vi); 1077 + break; 1078 + case CPU_DOWN_PREPARE: 1079 + virtnet_clean_affinity(vi, (long)hcpu); 1080 + break; 1081 + default: 1082 + break; 1083 + } 1084 + return NOTIFY_OK; 1049 1085 } 1050 1086 1051 1087 static void virtnet_get_ringparam(struct net_device *dev, ··· 1132 1082 if (queue_pairs > vi->max_queue_pairs) 1133 1083 return -EINVAL; 1134 1084 1085 + get_online_cpus(); 1135 1086 err = virtnet_set_queues(vi, queue_pairs); 1136 1087 if (!err) { 1137 1088 netif_set_real_num_tx_queues(dev, queue_pairs); 1138 1089 netif_set_real_num_rx_queues(dev, queue_pairs); 1139 1090 1140 - virtnet_set_affinity(vi, true); 1091 + virtnet_set_affinity(vi); 1141 1092 } 1093 + put_online_cpus(); 1142 1094 1143 1095 return err; 1144 1096 } ··· 1179 1127 1180 1128 /* To avoid contending a lock hold by a vcpu who would exit to host, select the 1181 1129 * txq based on the processor id. 1182 - * TODO: handle cpu hotplug. 1183 1130 */ 1184 1131 static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb) 1185 1132 { 1186 - int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 1187 - smp_processor_id(); 1133 + int txq; 1134 + struct virtnet_info *vi = netdev_priv(dev); 1135 + 1136 + if (skb_rx_queue_recorded(skb)) { 1137 + txq = skb_get_rx_queue(skb); 1138 + } else { 1139 + txq = *__this_cpu_ptr(vi->vq_index); 1140 + if (txq == -1) 1141 + txq = 0; 1142 + } 1188 1143 1189 1144 while (unlikely(txq >= dev->real_num_tx_queues)) 1190 1145 txq -= dev->real_num_tx_queues; ··· 1307 1248 { 1308 1249 struct virtio_device *vdev = vi->vdev; 1309 1250 1310 - virtnet_set_affinity(vi, false); 1251 + virtnet_clean_affinity(vi, -1); 1311 1252 1312 1253 vdev->config->del_vqs(vdev); 1313 1254 ··· 1430 1371 if (ret) 1431 1372 goto err_free; 1432 1373 1433 - virtnet_set_affinity(vi, true); 1374 + get_online_cpus(); 1375 + virtnet_set_affinity(vi); 1376 + put_online_cpus(); 1377 + 1434 1378 return 0; 1435 1379 1436 1380 err_free: ··· 1515 1453 if (vi->stats == NULL) 1516 1454 goto free; 1517 1455 1456 + vi->vq_index = alloc_percpu(int); 1457 + if (vi->vq_index == NULL) 1458 + goto free_stats; 1459 + 1518 1460 mutex_init(&vi->config_lock); 1519 1461 vi->config_enable = true; 1520 1462 INIT_WORK(&vi->config_work, virtnet_config_changed_work); ··· 1542 1476 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ 1543 1477 err = init_vqs(vi); 1544 1478 if (err) 1545 - goto free_stats; 1479 + goto free_index; 1546 1480 1547 1481 netif_set_real_num_tx_queues(dev, 1); 1548 1482 netif_set_real_num_rx_queues(dev, 1); ··· 1563 1497 err = -ENOMEM; 1564 1498 goto free_recv_bufs; 1565 1499 } 1500 + } 1501 + 1502 + vi->nb.notifier_call = &virtnet_cpu_callback; 1503 + err = register_hotcpu_notifier(&vi->nb); 1504 + if (err) { 1505 + pr_debug("virtio_net: registering cpu notifier failed\n"); 1506 + goto free_recv_bufs; 1566 1507 } 1567 1508 1568 1509 /* Assume link up if device can't report link status, ··· 1593 1520 free_vqs: 1594 1521 cancel_delayed_work_sync(&vi->refill); 1595 1522 virtnet_del_vqs(vi); 1523 + free_index: 1524 + free_percpu(vi->vq_index); 1596 1525 free_stats: 1597 1526 free_percpu(vi->stats); 1598 1527 free: ··· 1618 1543 { 1619 1544 struct virtnet_info *vi = vdev->priv; 1620 1545 1546 + unregister_hotcpu_notifier(&vi->nb); 1547 + 1621 1548 /* Prevent config work handler from accessing the device. */ 1622 1549 mutex_lock(&vi->config_lock); 1623 1550 vi->config_enable = false; ··· 1631 1554 1632 1555 flush_work(&vi->config_work); 1633 1556 1557 + free_percpu(vi->vq_index); 1634 1558 free_percpu(vi->stats); 1635 1559 free_netdev(vi->dev); 1636 1560 }
+3 -4
drivers/net/vmxnet3/vmxnet3_drv.c
··· 154 154 if (ret & 1) { /* Link is up. */ 155 155 printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n", 156 156 adapter->netdev->name, adapter->link_speed); 157 - if (!netif_carrier_ok(adapter->netdev)) 158 - netif_carrier_on(adapter->netdev); 157 + netif_carrier_on(adapter->netdev); 159 158 160 159 if (affectTxQueue) { 161 160 for (i = 0; i < adapter->num_tx_queues; i++) ··· 164 165 } else { 165 166 printk(KERN_INFO "%s: NIC Link is Down\n", 166 167 adapter->netdev->name); 167 - if (netif_carrier_ok(adapter->netdev)) 168 - netif_carrier_off(adapter->netdev); 168 + netif_carrier_off(adapter->netdev); 169 169 170 170 if (affectTxQueue) { 171 171 for (i = 0; i < adapter->num_tx_queues; i++) ··· 3059 3061 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); 3060 3062 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues); 3061 3063 3064 + netif_carrier_off(netdev); 3062 3065 err = register_netdev(netdev); 3063 3066 3064 3067 if (err) {
+2
drivers/net/wireless/ath/ath9k/ar9003_calib.c
··· 976 976 AR_PHY_CL_TAB_1, 977 977 AR_PHY_CL_TAB_2 }; 978 978 979 + ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask); 980 + 979 981 if (rtt) { 980 982 if (!ar9003_hw_rtt_restore(ah, chan)) 981 983 run_rtt_cal = true;
+7 -20
drivers/net/wireless/ath/ath9k/ar9003_phy.c
··· 586 586 ath9k_hw_synth_delay(ah, chan, synthDelay); 587 587 } 588 588 589 - static void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx) 589 + void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx) 590 590 { 591 - switch (rx) { 592 - case 0x5: 591 + if (ah->caps.tx_chainmask == 5 || ah->caps.rx_chainmask == 5) 593 592 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP, 594 593 AR_PHY_SWAP_ALT_CHAIN); 595 - case 0x3: 596 - case 0x1: 597 - case 0x2: 598 - case 0x7: 599 - REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx); 600 - REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx); 601 - break; 602 - default: 603 - break; 604 - } 594 + 595 + REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx); 596 + REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx); 605 597 606 598 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && (tx == 0x7)) 607 - REG_WRITE(ah, AR_SELFGEN_MASK, 0x3); 608 - else 609 - REG_WRITE(ah, AR_SELFGEN_MASK, tx); 599 + tx = 3; 610 600 611 - if (tx == 0x5) { 612 - REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP, 613 - AR_PHY_SWAP_ALT_CHAIN); 614 - } 601 + REG_WRITE(ah, AR_SELFGEN_MASK, tx); 615 602 } 616 603 617 604 /*
-3
drivers/net/wireless/ath/ath9k/ath9k.h
··· 317 317 u32 *rxlink; 318 318 u32 num_pkts; 319 319 unsigned int rxfilter; 320 - spinlock_t rxbuflock; 321 320 struct list_head rxbuf; 322 321 struct ath_descdma rxdma; 323 322 struct ath_buf *rx_bufptr; ··· 327 328 328 329 int ath_startrecv(struct ath_softc *sc); 329 330 bool ath_stoprecv(struct ath_softc *sc); 330 - void ath_flushrecv(struct ath_softc *sc); 331 331 u32 ath_calcrxfilter(struct ath_softc *sc); 332 332 int ath_rx_init(struct ath_softc *sc, int nbufs); 333 333 void ath_rx_cleanup(struct ath_softc *sc); ··· 644 646 enum sc_op_flags { 645 647 SC_OP_INVALID, 646 648 SC_OP_BEACONS, 647 - SC_OP_RXFLUSH, 648 649 SC_OP_ANI_RUN, 649 650 SC_OP_PRIM_STA_VIF, 650 651 SC_OP_HW_RESET,
+1 -1
drivers/net/wireless/ath/ath9k/beacon.c
··· 147 147 skb->len, DMA_TO_DEVICE); 148 148 dev_kfree_skb_any(skb); 149 149 bf->bf_buf_addr = 0; 150 + bf->bf_mpdu = NULL; 150 151 } 151 152 152 153 skb = ieee80211_beacon_get(hw, vif); ··· 360 359 return; 361 360 362 361 bf = ath9k_beacon_generate(sc->hw, vif); 363 - WARN_ON(!bf); 364 362 365 363 if (sc->beacon.bmisscnt != 0) { 366 364 ath_dbg(common, BSTUCK, "resume beacon xmit after %u misses\n",
-1
drivers/net/wireless/ath/ath9k/debug.c
··· 861 861 RXS_ERR("RX-LENGTH-ERR", rx_len_err); 862 862 RXS_ERR("RX-OOM-ERR", rx_oom_err); 863 863 RXS_ERR("RX-RATE-ERR", rx_rate_err); 864 - RXS_ERR("RX-DROP-RXFLUSH", rx_drop_rxflush); 865 864 RXS_ERR("RX-TOO-MANY-FRAGS", rx_too_many_frags_err); 866 865 867 866 PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN);
-2
drivers/net/wireless/ath/ath9k/debug.h
··· 216 216 * @rx_oom_err: No. of frames dropped due to OOM issues. 217 217 * @rx_rate_err: No. of frames dropped due to rate errors. 218 218 * @rx_too_many_frags_err: Frames dropped due to too-many-frags received. 219 - * @rx_drop_rxflush: No. of frames dropped due to RX-FLUSH. 220 219 * @rx_beacons: No. of beacons received. 221 220 * @rx_frags: No. of rx-fragements received. 222 221 */ ··· 234 235 u32 rx_oom_err; 235 236 u32 rx_rate_err; 236 237 u32 rx_too_many_frags_err; 237 - u32 rx_drop_rxflush; 238 238 u32 rx_beacons; 239 239 u32 rx_frags; 240 240 };
+2
drivers/net/wireless/ath/ath9k/htc_hst.c
··· 344 344 endpoint->ep_callbacks.tx(endpoint->ep_callbacks.priv, 345 345 skb, htc_hdr->endpoint_id, 346 346 txok); 347 + } else { 348 + kfree_skb(skb); 347 349 } 348 350 } 349 351
+1
drivers/net/wireless/ath/ath9k/hw.h
··· 1066 1066 int ar9003_paprd_init_table(struct ath_hw *ah); 1067 1067 bool ar9003_paprd_is_done(struct ath_hw *ah); 1068 1068 bool ar9003_is_paprd_enabled(struct ath_hw *ah); 1069 + void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx); 1069 1070 1070 1071 /* Hardware family op attach helpers */ 1071 1072 void ar5008_hw_attach_phy_ops(struct ath_hw *ah);
+9 -13
drivers/net/wireless/ath/ath9k/main.c
··· 182 182 ath_start_ani(sc); 183 183 } 184 184 185 - static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush) 185 + static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx) 186 186 { 187 187 struct ath_hw *ah = sc->sc_ah; 188 188 bool ret = true; ··· 201 201 202 202 if (!ath_drain_all_txq(sc, retry_tx)) 203 203 ret = false; 204 - 205 - if (!flush) { 206 - if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 207 - ath_rx_tasklet(sc, 1, true); 208 - ath_rx_tasklet(sc, 1, false); 209 - } else { 210 - ath_flushrecv(sc); 211 - } 212 204 213 205 return ret; 214 206 } ··· 254 262 struct ath_common *common = ath9k_hw_common(ah); 255 263 struct ath9k_hw_cal_data *caldata = NULL; 256 264 bool fastcc = true; 257 - bool flush = false; 258 265 int r; 259 266 260 267 __ath_cancel_work(sc); 261 268 269 + tasklet_disable(&sc->intr_tq); 262 270 spin_lock_bh(&sc->sc_pcu_lock); 263 271 264 272 if (!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) { ··· 268 276 269 277 if (!hchan) { 270 278 fastcc = false; 271 - flush = true; 272 279 hchan = ah->curchan; 273 280 } 274 281 275 - if (!ath_prepare_reset(sc, retry_tx, flush)) 282 + if (!ath_prepare_reset(sc, retry_tx)) 276 283 fastcc = false; 277 284 278 285 ath_dbg(common, CONFIG, "Reset to %u MHz, HT40: %d fastcc: %d\n", ··· 293 302 294 303 out: 295 304 spin_unlock_bh(&sc->sc_pcu_lock); 305 + tasklet_enable(&sc->intr_tq); 306 + 296 307 return r; 297 308 } 298 309 ··· 797 804 ath9k_hw_cfg_gpio_input(ah, ah->led_pin); 798 805 } 799 806 800 - ath_prepare_reset(sc, false, true); 807 + ath_prepare_reset(sc, false); 801 808 802 809 if (sc->rx.frag) { 803 810 dev_kfree_skb_any(sc->rx.frag); ··· 1826 1833 1827 1834 static bool validate_antenna_mask(struct ath_hw *ah, u32 val) 1828 1835 { 1836 + if (AR_SREV_9300_20_OR_LATER(ah)) 1837 + return true; 1838 + 1829 1839 switch (val & 0x7) { 1830 1840 case 0x1: 1831 1841 case 0x3:
+15 -39
drivers/net/wireless/ath/ath9k/recv.c
··· 254 254 255 255 static void ath_edma_start_recv(struct ath_softc *sc) 256 256 { 257 - spin_lock_bh(&sc->rx.rxbuflock); 258 - 259 257 ath9k_hw_rxena(sc->sc_ah); 260 258 261 259 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP, ··· 265 267 ath_opmode_init(sc); 266 268 267 269 ath9k_hw_startpcureceive(sc->sc_ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)); 268 - 269 - spin_unlock_bh(&sc->rx.rxbuflock); 270 270 } 271 271 272 272 static void ath_edma_stop_recv(struct ath_softc *sc) ··· 281 285 int error = 0; 282 286 283 287 spin_lock_init(&sc->sc_pcu_lock); 284 - spin_lock_init(&sc->rx.rxbuflock); 285 - clear_bit(SC_OP_RXFLUSH, &sc->sc_flags); 286 288 287 289 common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 + 288 290 sc->sc_ah->caps.rx_status_len; ··· 441 447 return 0; 442 448 } 443 449 444 - spin_lock_bh(&sc->rx.rxbuflock); 445 450 if (list_empty(&sc->rx.rxbuf)) 446 451 goto start_recv; 447 452 ··· 461 468 ath_opmode_init(sc); 462 469 ath9k_hw_startpcureceive(ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)); 463 470 464 - spin_unlock_bh(&sc->rx.rxbuflock); 465 - 466 471 return 0; 472 + } 473 + 474 + static void ath_flushrecv(struct ath_softc *sc) 475 + { 476 + if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 477 + ath_rx_tasklet(sc, 1, true); 478 + ath_rx_tasklet(sc, 1, false); 467 479 } 468 480 469 481 bool ath_stoprecv(struct ath_softc *sc) ··· 476 478 struct ath_hw *ah = sc->sc_ah; 477 479 bool stopped, reset = false; 478 480 479 - spin_lock_bh(&sc->rx.rxbuflock); 480 481 ath9k_hw_abortpcurecv(ah); 481 482 ath9k_hw_setrxfilter(ah, 0); 482 483 stopped = ath9k_hw_stopdmarecv(ah, &reset); 484 + 485 + ath_flushrecv(sc); 483 486 484 487 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 485 488 ath_edma_stop_recv(sc); 486 489 else 487 490 sc->rx.rxlink = NULL; 488 - spin_unlock_bh(&sc->rx.rxbuflock); 489 491 490 492 if (!(ah->ah_flags & AH_UNPLUGGED) && 491 493 unlikely(!stopped)) { ··· 495 497 ATH_DBG_WARN_ON_ONCE(!stopped); 496 498 } 497 499 return stopped && !reset; 498 - } 499 - 500 - void ath_flushrecv(struct ath_softc *sc) 501 - { 502 - set_bit(SC_OP_RXFLUSH, &sc->sc_flags); 503 - if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 504 - ath_rx_tasklet(sc, 1, true); 505 - ath_rx_tasklet(sc, 1, false); 506 - clear_bit(SC_OP_RXFLUSH, &sc->sc_flags); 507 500 } 508 501 509 502 static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) ··· 733 744 return NULL; 734 745 } 735 746 747 + list_del(&bf->list); 736 748 if (!bf->bf_mpdu) 737 749 return bf; 738 750 ··· 1049 1059 dma_type = DMA_FROM_DEVICE; 1050 1060 1051 1061 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; 1052 - spin_lock_bh(&sc->rx.rxbuflock); 1053 1062 1054 1063 tsf = ath9k_hw_gettsf64(ah); 1055 1064 tsf_lower = tsf & 0xffffffff; 1056 1065 1057 1066 do { 1058 1067 bool decrypt_error = false; 1059 - /* If handling rx interrupt and flush is in progress => exit */ 1060 - if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags) && (flush == 0)) 1061 - break; 1062 1068 1063 1069 memset(&rs, 0, sizeof(rs)); 1064 1070 if (edma) ··· 1096 1110 sc->rx.num_pkts++; 1097 1111 1098 1112 ath_debug_stat_rx(sc, &rs); 1099 - 1100 - /* 1101 - * If we're asked to flush receive queue, directly 1102 - * chain it back at the queue without processing it. 1103 - */ 1104 - if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags)) { 1105 - RX_STAT_INC(rx_drop_rxflush); 1106 - goto requeue_drop_frag; 1107 - } 1108 1113 1109 1114 memset(rxs, 0, sizeof(struct ieee80211_rx_status)); 1110 1115 ··· 1231 1254 sc->rx.frag = NULL; 1232 1255 } 1233 1256 requeue: 1257 + list_add_tail(&bf->list, &sc->rx.rxbuf); 1258 + if (flush) 1259 + continue; 1260 + 1234 1261 if (edma) { 1235 - list_add_tail(&bf->list, &sc->rx.rxbuf); 1236 1262 ath_rx_edma_buf_link(sc, qtype); 1237 1263 } else { 1238 - list_move_tail(&bf->list, &sc->rx.rxbuf); 1239 1264 ath_rx_buf_link(sc, bf); 1240 - if (!flush) 1241 - ath9k_hw_rxena(ah); 1265 + ath9k_hw_rxena(ah); 1242 1266 } 1243 1267 } while (1); 1244 - 1245 - spin_unlock_bh(&sc->rx.rxbuflock); 1246 1268 1247 1269 if (!(ah->imask & ATH9K_INT_RXEOL)) { 1248 1270 ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
+25 -17
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
··· 36 36 #include "debug.h" 37 37 38 38 #define N_TX_QUEUES 4 /* #tx queues on mac80211<->driver interface */ 39 + #define BRCMS_FLUSH_TIMEOUT 500 /* msec */ 39 40 40 41 /* Flags we support */ 41 42 #define MAC_FILTERS (FIF_PROMISC_IN_BSS | \ ··· 709 708 wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, blocked); 710 709 } 711 710 711 + static bool brcms_tx_flush_completed(struct brcms_info *wl) 712 + { 713 + bool result; 714 + 715 + spin_lock_bh(&wl->lock); 716 + result = brcms_c_tx_flush_completed(wl->wlc); 717 + spin_unlock_bh(&wl->lock); 718 + return result; 719 + } 720 + 712 721 static void brcms_ops_flush(struct ieee80211_hw *hw, bool drop) 713 722 { 714 723 struct brcms_info *wl = hw->priv; 724 + int ret; 715 725 716 726 no_printk("%s: drop = %s\n", __func__, drop ? "true" : "false"); 717 727 718 - /* wait for packet queue and dma fifos to run empty */ 719 - spin_lock_bh(&wl->lock); 720 - brcms_c_wait_for_tx_completion(wl->wlc, drop); 721 - spin_unlock_bh(&wl->lock); 728 + ret = wait_event_timeout(wl->tx_flush_wq, 729 + brcms_tx_flush_completed(wl), 730 + msecs_to_jiffies(BRCMS_FLUSH_TIMEOUT)); 731 + 732 + brcms_dbg_mac80211(wl->wlc->hw->d11core, 733 + "ret=%d\n", jiffies_to_msecs(ret)); 722 734 } 723 735 724 736 static const struct ieee80211_ops brcms_ops = { ··· 786 772 787 773 done: 788 774 spin_unlock_bh(&wl->lock); 775 + wake_up(&wl->tx_flush_wq); 789 776 } 790 777 791 778 /* ··· 1034 1019 wl->wiphy = hw->wiphy; 1035 1020 1036 1021 atomic_set(&wl->callbacks, 0); 1022 + 1023 + init_waitqueue_head(&wl->tx_flush_wq); 1037 1024 1038 1025 /* setup the bottom half handler */ 1039 1026 tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl); ··· 1424 1407 #endif 1425 1408 t->ms = ms; 1426 1409 t->periodic = (bool) periodic; 1427 - t->set = true; 1428 - 1429 - atomic_inc(&t->wl->callbacks); 1410 + if (!t->set) { 1411 + t->set = true; 1412 + atomic_inc(&t->wl->callbacks); 1413 + } 1430 1414 1431 1415 ieee80211_queue_delayed_work(hw, &t->dly_wrk, msecs_to_jiffies(ms)); 1432 1416 } ··· 1625 1607 wiphy_rfkill_start_polling(wl->pub->ieee_hw->wiphy); 1626 1608 spin_lock_bh(&wl->lock); 1627 1609 return blocked; 1628 - } 1629 - 1630 - /* 1631 - * precondition: perimeter lock has been acquired 1632 - */ 1633 - void brcms_msleep(struct brcms_info *wl, uint ms) 1634 - { 1635 - spin_unlock_bh(&wl->lock); 1636 - msleep(ms); 1637 - spin_lock_bh(&wl->lock); 1638 1610 }
+2 -1
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
··· 68 68 spinlock_t lock; /* per-device perimeter lock */ 69 69 spinlock_t isr_lock; /* per-device ISR synchronization lock */ 70 70 71 + /* tx flush */ 72 + wait_queue_head_t tx_flush_wq; 71 73 72 74 /* timer related fields */ 73 75 atomic_t callbacks; /* # outstanding callback functions */ ··· 102 100 extern void brcms_free_timer(struct brcms_timer *timer); 103 101 extern void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic); 104 102 extern bool brcms_del_timer(struct brcms_timer *timer); 105 - extern void brcms_msleep(struct brcms_info *wl, uint ms); 106 103 extern void brcms_dpc(unsigned long data); 107 104 extern void brcms_timer(struct brcms_timer *t); 108 105 extern void brcms_fatal_error(struct brcms_info *wl);
+12 -28
drivers/net/wireless/brcm80211/brcmsmac/main.c
··· 1027 1027 static bool 1028 1028 brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal) 1029 1029 { 1030 - bool morepending = false; 1031 1030 struct bcma_device *core; 1032 1031 struct tx_status txstatus, *txs; 1033 1032 u32 s1, s2; ··· 1040 1041 txs = &txstatus; 1041 1042 core = wlc_hw->d11core; 1042 1043 *fatal = false; 1043 - s1 = bcma_read32(core, D11REGOFFS(frmtxstatus)); 1044 - while (!(*fatal) 1045 - && (s1 & TXS_V)) { 1046 - /* !give others some time to run! */ 1047 - if (n >= max_tx_num) { 1048 - morepending = true; 1049 - break; 1050 - } 1051 1044 1045 + while (n < max_tx_num) { 1046 + s1 = bcma_read32(core, D11REGOFFS(frmtxstatus)); 1052 1047 if (s1 == 0xffffffff) { 1053 1048 brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit, 1054 1049 __func__); 1055 1050 *fatal = true; 1056 1051 return false; 1057 1052 } 1058 - s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2)); 1053 + /* only process when valid */ 1054 + if (!(s1 & TXS_V)) 1055 + break; 1059 1056 1057 + s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2)); 1060 1058 txs->status = s1 & TXS_STATUS_MASK; 1061 1059 txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT; 1062 1060 txs->sequence = s2 & TXS_SEQ_MASK; ··· 1061 1065 txs->lasttxtime = 0; 1062 1066 1063 1067 *fatal = brcms_c_dotxstatus(wlc_hw->wlc, txs); 1064 - 1065 - s1 = bcma_read32(core, D11REGOFFS(frmtxstatus)); 1068 + if (*fatal == true) 1069 + return false; 1066 1070 n++; 1067 1071 } 1068 1072 1069 - if (*fatal) 1070 - return false; 1071 - 1072 - return morepending; 1073 + return n >= max_tx_num; 1073 1074 } 1074 1075 1075 1076 static void brcms_c_tbtt(struct brcms_c_info *wlc) ··· 7511 7518 return wlc->band->bandunit; 7512 7519 } 7513 7520 7514 - void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop) 7521 + bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc) 7515 7522 { 7516 - int timeout = 20; 7517 7523 int i; 7518 7524 7519 7525 /* Kick DMA to send any pending AMPDU */ 7520 7526 for (i = 0; i < ARRAY_SIZE(wlc->hw->di); i++) 7521 7527 if (wlc->hw->di[i]) 7522 - dma_txflush(wlc->hw->di[i]); 7528 + dma_kick_tx(wlc->hw->di[i]); 7523 7529 7524 - /* wait for queue and DMA fifos to run dry */ 7525 - while (brcms_txpktpendtot(wlc) > 0) { 7526 - brcms_msleep(wlc->wl, 1); 7527 - 7528 - if (--timeout == 0) 7529 - break; 7530 - } 7531 - 7532 - WARN_ON_ONCE(timeout == 0); 7530 + return !brcms_txpktpendtot(wlc); 7533 7531 } 7534 7532 7535 7533 void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval)
+1 -2
drivers/net/wireless/brcm80211/brcmsmac/pub.h
··· 314 314 extern void brcms_c_scan_start(struct brcms_c_info *wlc); 315 315 extern void brcms_c_scan_stop(struct brcms_c_info *wlc); 316 316 extern int brcms_c_get_curband(struct brcms_c_info *wlc); 317 - extern void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, 318 - bool drop); 319 317 extern int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel); 320 318 extern int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl); 321 319 extern void brcms_c_get_current_rateset(struct brcms_c_info *wlc, ··· 330 332 extern int brcms_c_get_tx_power(struct brcms_c_info *wlc); 331 333 extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc); 332 334 extern void brcms_c_mute(struct brcms_c_info *wlc, bool on); 335 + extern bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc); 333 336 334 337 #endif /* _BRCM_PUB_H_ */
+14 -21
drivers/net/wireless/iwlegacy/common.c
··· 3958 3958 3959 3959 memset(&il->staging, 0, sizeof(il->staging)); 3960 3960 3961 - if (!il->vif) { 3961 + switch (il->iw_mode) { 3962 + case NL80211_IFTYPE_UNSPECIFIED: 3962 3963 il->staging.dev_type = RXON_DEV_TYPE_ESS; 3963 - } else if (il->vif->type == NL80211_IFTYPE_STATION) { 3964 + break; 3965 + case NL80211_IFTYPE_STATION: 3964 3966 il->staging.dev_type = RXON_DEV_TYPE_ESS; 3965 3967 il->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; 3966 - } else if (il->vif->type == NL80211_IFTYPE_ADHOC) { 3968 + break; 3969 + case NL80211_IFTYPE_ADHOC: 3967 3970 il->staging.dev_type = RXON_DEV_TYPE_IBSS; 3968 3971 il->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK; 3969 3972 il->staging.filter_flags = 3970 3973 RXON_FILTER_BCON_AWARE_MSK | RXON_FILTER_ACCEPT_GRP_MSK; 3971 - } else { 3974 + break; 3975 + default: 3972 3976 IL_ERR("Unsupported interface type %d\n", il->vif->type); 3973 3977 return; 3974 3978 } ··· 4554 4550 EXPORT_SYMBOL(il_mac_add_interface); 4555 4551 4556 4552 static void 4557 - il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif, 4558 - bool mode_change) 4553 + il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif) 4559 4554 { 4560 4555 lockdep_assert_held(&il->mutex); 4561 4556 ··· 4563 4560 il_force_scan_end(il); 4564 4561 } 4565 4562 4566 - if (!mode_change) 4567 - il_set_mode(il); 4568 - 4563 + il_set_mode(il); 4569 4564 } 4570 4565 4571 4566 void ··· 4576 4575 4577 4576 WARN_ON(il->vif != vif); 4578 4577 il->vif = NULL; 4579 - 4580 - il_teardown_interface(il, vif, false); 4578 + il->iw_mode = NL80211_IFTYPE_UNSPECIFIED; 4579 + il_teardown_interface(il, vif); 4581 4580 memset(il->bssid, 0, ETH_ALEN); 4582 4581 4583 4582 D_MAC80211("leave\n"); ··· 4686 4685 } 4687 4686 4688 4687 /* success */ 4689 - il_teardown_interface(il, vif, true); 4690 4688 vif->type = newtype; 4691 4689 vif->p2p = false; 4692 - err = il_set_mode(il); 4693 - WARN_ON(err); 4694 - /* 4695 - * We've switched internally, but submitting to the 4696 - * device may have failed for some reason. Mask this 4697 - * error, because otherwise mac80211 will not switch 4698 - * (and set the interface type back) and we'll be 4699 - * out of sync with it. 4700 - */ 4690 + il->iw_mode = newtype; 4691 + il_teardown_interface(il, vif); 4701 4692 err = 0; 4702 4693 4703 4694 out:
+9 -17
drivers/net/wireless/iwlwifi/dvm/tx.c
··· 1079 1079 { 1080 1080 u16 status = le16_to_cpu(tx_resp->status.status); 1081 1081 1082 + info->flags &= ~IEEE80211_TX_CTL_AMPDU; 1083 + 1082 1084 info->status.rates[0].count = tx_resp->failure_frame + 1; 1083 1085 info->flags |= iwl_tx_status_to_mac80211(status); 1084 1086 iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags), ··· 1153 1151 next_reclaimed = ssn; 1154 1152 } 1155 1153 1154 + if (tid != IWL_TID_NON_QOS) { 1155 + priv->tid_data[sta_id][tid].next_reclaimed = 1156 + next_reclaimed; 1157 + IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n", 1158 + next_reclaimed); 1159 + } 1160 + 1156 1161 iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs); 1157 1162 1158 1163 iwlagn_check_ratid_empty(priv, sta_id, tid); ··· 1210 1201 if (!is_agg) 1211 1202 iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1); 1212 1203 1213 - /* 1214 - * W/A for FW bug - the seq_ctl isn't updated when the 1215 - * queues are flushed. Fetch it from the packet itself 1216 - */ 1217 - if (!is_agg && status == TX_STATUS_FAIL_FIFO_FLUSHED) { 1218 - next_reclaimed = le16_to_cpu(hdr->seq_ctrl); 1219 - next_reclaimed = 1220 - SEQ_TO_SN(next_reclaimed + 0x10); 1221 - } 1222 - 1223 1204 is_offchannel_skb = 1224 1205 (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN); 1225 1206 freed++; 1226 - } 1227 - 1228 - if (tid != IWL_TID_NON_QOS) { 1229 - priv->tid_data[sta_id][tid].next_reclaimed = 1230 - next_reclaimed; 1231 - IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n", 1232 - next_reclaimed); 1233 1207 } 1234 1208 1235 1209 WARN_ON(!is_agg && freed != 1);
+2 -15
drivers/net/wireless/mwifiex/cfg80211.c
··· 1459 1459 struct cfg80211_ssid req_ssid; 1460 1460 int ret, auth_type = 0; 1461 1461 struct cfg80211_bss *bss = NULL; 1462 - u8 is_scanning_required = 0, config_bands = 0; 1462 + u8 is_scanning_required = 0; 1463 1463 1464 1464 memset(&req_ssid, 0, sizeof(struct cfg80211_ssid)); 1465 1465 ··· 1477 1477 1478 1478 /* disconnect before try to associate */ 1479 1479 mwifiex_deauthenticate(priv, NULL); 1480 - 1481 - if (channel) { 1482 - if (mode == NL80211_IFTYPE_STATION) { 1483 - if (channel->band == IEEE80211_BAND_2GHZ) 1484 - config_bands = BAND_B | BAND_G | BAND_GN; 1485 - else 1486 - config_bands = BAND_A | BAND_AN; 1487 - 1488 - if (!((config_bands | priv->adapter->fw_bands) & 1489 - ~priv->adapter->fw_bands)) 1490 - priv->adapter->config_bands = config_bands; 1491 - } 1492 - } 1493 1480 1494 1481 /* As this is new association, clear locally stored 1495 1482 * keys and security related flags */ ··· 1694 1707 1695 1708 if (cfg80211_get_chandef_type(&params->chandef) != 1696 1709 NL80211_CHAN_NO_HT) 1697 - config_bands |= BAND_GN; 1710 + config_bands |= BAND_G | BAND_GN; 1698 1711 } else { 1699 1712 if (cfg80211_get_chandef_type(&params->chandef) == 1700 1713 NL80211_CHAN_NO_HT)
+1 -1
drivers/net/wireless/mwifiex/pcie.c
··· 161 161 162 162 if (pdev) { 163 163 card = (struct pcie_service_card *) pci_get_drvdata(pdev); 164 - if (!card || card->adapter) { 164 + if (!card || !card->adapter) { 165 165 pr_err("Card or adapter structure is not valid\n"); 166 166 return 0; 167 167 }
+5 -4
drivers/net/wireless/mwifiex/scan.c
··· 1563 1563 dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n", 1564 1564 scan_rsp->number_of_sets); 1565 1565 ret = -1; 1566 - goto done; 1566 + goto check_next_scan; 1567 1567 } 1568 1568 1569 1569 bytes_left = le16_to_cpu(scan_rsp->bss_descript_size); ··· 1634 1634 if (!beacon_size || beacon_size > bytes_left) { 1635 1635 bss_info += bytes_left; 1636 1636 bytes_left = 0; 1637 - return -1; 1637 + ret = -1; 1638 + goto check_next_scan; 1638 1639 } 1639 1640 1640 1641 /* Initialize the current working beacon pointer for this BSS ··· 1691 1690 dev_err(priv->adapter->dev, 1692 1691 "%s: bytes left < IE length\n", 1693 1692 __func__); 1694 - goto done; 1693 + goto check_next_scan; 1695 1694 } 1696 1695 if (element_id == WLAN_EID_DS_PARAMS) { 1697 1696 channel = *(current_ptr + sizeof(struct ieee_types_header)); ··· 1754 1753 } 1755 1754 } 1756 1755 1756 + check_next_scan: 1757 1757 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); 1758 1758 if (list_empty(&adapter->scan_pending_q)) { 1759 1759 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); ··· 1815 1813 } 1816 1814 } 1817 1815 1818 - done: 1819 1816 return ret; 1820 1817 } 1821 1818
+14
drivers/net/wireless/mwifiex/sta_ioctl.c
··· 283 283 if (ret) 284 284 goto done; 285 285 286 + if (bss_desc) { 287 + u8 config_bands = 0; 288 + 289 + if (mwifiex_band_to_radio_type((u8) bss_desc->bss_band) 290 + == HostCmd_SCAN_RADIO_TYPE_BG) 291 + config_bands = BAND_B | BAND_G | BAND_GN; 292 + else 293 + config_bands = BAND_A | BAND_AN; 294 + 295 + if (!((config_bands | adapter->fw_bands) & 296 + ~adapter->fw_bands)) 297 + adapter->config_bands = config_bands; 298 + } 299 + 286 300 ret = mwifiex_check_network_compatibility(priv, bss_desc); 287 301 if (ret) 288 302 goto done;
+2 -2
drivers/net/wireless/rtlwifi/Kconfig
··· 57 57 58 58 config RTLWIFI 59 59 tristate 60 - depends on RTL8192CE || RTL8192CU || RTL8192SE || RTL8192DE 60 + depends on RTL8192CE || RTL8192CU || RTL8192SE || RTL8192DE || RTL8723AE 61 61 default m 62 62 63 63 config RTLWIFI_DEBUG 64 64 bool "Additional debugging output" 65 - depends on RTL8192CE || RTL8192CU || RTL8192SE || RTL8192DE 65 + depends on RTL8192CE || RTL8192CU || RTL8192SE || RTL8192DE || RTL8723AE 66 66 default y 67 67 68 68 config RTL8192C_COMMON
+4 -3
drivers/net/wireless/rtlwifi/base.c
··· 1004 1004 is_tx ? "Tx" : "Rx"); 1005 1005 1006 1006 if (is_tx) { 1007 - rtl_lps_leave(hw); 1007 + schedule_work(&rtlpriv-> 1008 + works.lps_leave_work); 1008 1009 ppsc->last_delaylps_stamp_jiffies = 1009 1010 jiffies; 1010 1011 } ··· 1015 1014 } 1016 1015 } else if (ETH_P_ARP == ether_type) { 1017 1016 if (is_tx) { 1018 - rtl_lps_leave(hw); 1017 + schedule_work(&rtlpriv->works.lps_leave_work); 1019 1018 ppsc->last_delaylps_stamp_jiffies = jiffies; 1020 1019 } 1021 1020 ··· 1025 1024 "802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx"); 1026 1025 1027 1026 if (is_tx) { 1028 - rtl_lps_leave(hw); 1027 + schedule_work(&rtlpriv->works.lps_leave_work); 1029 1028 ppsc->last_delaylps_stamp_jiffies = jiffies; 1030 1029 } 1031 1030
+2 -2
drivers/net/wireless/rtlwifi/usb.c
··· 542 542 WARN_ON(skb_queue_empty(&rx_queue)); 543 543 while (!skb_queue_empty(&rx_queue)) { 544 544 _skb = skb_dequeue(&rx_queue); 545 - _rtl_usb_rx_process_agg(hw, skb); 546 - ieee80211_rx_irqsafe(hw, skb); 545 + _rtl_usb_rx_process_agg(hw, _skb); 546 + ieee80211_rx_irqsafe(hw, _skb); 547 547 } 548 548 } 549 549
+3
drivers/net/xen-netback/common.h
··· 151 151 /* Notify xenvif that ring now has space to send an skb to the frontend */ 152 152 void xenvif_notify_tx_completion(struct xenvif *vif); 153 153 154 + /* Prevent the device from generating any further traffic. */ 155 + void xenvif_carrier_off(struct xenvif *vif); 156 + 154 157 /* Returns number of ring slots required to send an skb to the frontend */ 155 158 unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); 156 159
+14 -9
drivers/net/xen-netback/interface.c
··· 343 343 return err; 344 344 } 345 345 346 - void xenvif_disconnect(struct xenvif *vif) 346 + void xenvif_carrier_off(struct xenvif *vif) 347 347 { 348 348 struct net_device *dev = vif->dev; 349 - if (netif_carrier_ok(dev)) { 350 - rtnl_lock(); 351 - netif_carrier_off(dev); /* discard queued packets */ 352 - if (netif_running(dev)) 353 - xenvif_down(vif); 354 - rtnl_unlock(); 355 - xenvif_put(vif); 356 - } 349 + 350 + rtnl_lock(); 351 + netif_carrier_off(dev); /* discard queued packets */ 352 + if (netif_running(dev)) 353 + xenvif_down(vif); 354 + rtnl_unlock(); 355 + xenvif_put(vif); 356 + } 357 + 358 + void xenvif_disconnect(struct xenvif *vif) 359 + { 360 + if (netif_carrier_ok(vif->dev)) 361 + xenvif_carrier_off(vif); 357 362 358 363 atomic_dec(&vif->refcnt); 359 364 wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
+71 -44
drivers/net/xen-netback/netback.c
··· 147 147 atomic_dec(&netbk->netfront_count); 148 148 } 149 149 150 - static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx); 150 + static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, 151 + u8 status); 151 152 static void make_tx_response(struct xenvif *vif, 152 153 struct xen_netif_tx_request *txp, 153 154 s8 st); ··· 880 879 881 880 do { 882 881 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); 883 - if (cons >= end) 882 + if (cons == end) 884 883 break; 885 884 txp = RING_GET_REQUEST(&vif->tx, cons++); 886 885 } while (1); 887 886 vif->tx.req_cons = cons; 888 887 xen_netbk_check_rx_xenvif(vif); 888 + xenvif_put(vif); 889 + } 890 + 891 + static void netbk_fatal_tx_err(struct xenvif *vif) 892 + { 893 + netdev_err(vif->dev, "fatal error; disabling device\n"); 894 + xenvif_carrier_off(vif); 889 895 xenvif_put(vif); 890 896 } 891 897 ··· 909 901 910 902 do { 911 903 if (frags >= work_to_do) { 912 - netdev_dbg(vif->dev, "Need more frags\n"); 904 + netdev_err(vif->dev, "Need more frags\n"); 905 + netbk_fatal_tx_err(vif); 913 906 return -frags; 914 907 } 915 908 916 909 if (unlikely(frags >= MAX_SKB_FRAGS)) { 917 - netdev_dbg(vif->dev, "Too many frags\n"); 910 + netdev_err(vif->dev, "Too many frags\n"); 911 + netbk_fatal_tx_err(vif); 918 912 return -frags; 919 913 } 920 914 921 915 memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags), 922 916 sizeof(*txp)); 923 917 if (txp->size > first->size) { 924 - netdev_dbg(vif->dev, "Frags galore\n"); 918 + netdev_err(vif->dev, "Frag is bigger than frame.\n"); 919 + netbk_fatal_tx_err(vif); 925 920 return -frags; 926 921 } 927 922 ··· 932 921 frags++; 933 922 934 923 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { 935 - netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n", 924 + netdev_err(vif->dev, "txp->offset: %x, size: %u\n", 936 925 txp->offset, txp->size); 926 + netbk_fatal_tx_err(vif); 937 927 return -frags; 938 928 } 939 929 } while ((txp++)->flags & XEN_NETTXF_more_data); ··· 978 966 pending_idx = netbk->pending_ring[index]; 979 967 page = xen_netbk_alloc_page(netbk, skb, pending_idx); 980 968 if (!page) 981 - return NULL; 969 + goto err; 982 970 983 971 gop->source.u.ref = txp->gref; 984 972 gop->source.domid = vif->domid; ··· 1000 988 } 1001 989 1002 990 return gop; 991 + err: 992 + /* Unwind, freeing all pages and sending error responses. */ 993 + while (i-- > start) { 994 + xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]), 995 + XEN_NETIF_RSP_ERROR); 996 + } 997 + /* The head too, if necessary. */ 998 + if (start) 999 + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); 1000 + 1001 + return NULL; 1003 1002 } 1004 1003 1005 1004 static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, ··· 1019 996 { 1020 997 struct gnttab_copy *gop = *gopp; 1021 998 u16 pending_idx = *((u16 *)skb->data); 1022 - struct pending_tx_info *pending_tx_info = netbk->pending_tx_info; 1023 - struct xenvif *vif = pending_tx_info[pending_idx].vif; 1024 - struct xen_netif_tx_request *txp; 1025 999 struct skb_shared_info *shinfo = skb_shinfo(skb); 1026 1000 int nr_frags = shinfo->nr_frags; 1027 1001 int i, err, start; 1028 1002 1029 1003 /* Check status of header. */ 1030 1004 err = gop->status; 1031 - if (unlikely(err)) { 1032 - pending_ring_idx_t index; 1033 - index = pending_index(netbk->pending_prod++); 1034 - txp = &pending_tx_info[pending_idx].req; 1035 - make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); 1036 - netbk->pending_ring[index] = pending_idx; 1037 - xenvif_put(vif); 1038 - } 1005 + if (unlikely(err)) 1006 + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); 1039 1007 1040 1008 /* Skip first skb fragment if it is on same page as header fragment. */ 1041 1009 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); 1042 1010 1043 1011 for (i = start; i < nr_frags; i++) { 1044 1012 int j, newerr; 1045 - pending_ring_idx_t index; 1046 1013 1047 1014 pending_idx = frag_get_pending_idx(&shinfo->frags[i]); 1048 1015 ··· 1041 1028 if (likely(!newerr)) { 1042 1029 /* Had a previous error? Invalidate this fragment. */ 1043 1030 if (unlikely(err)) 1044 - xen_netbk_idx_release(netbk, pending_idx); 1031 + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); 1045 1032 continue; 1046 1033 } 1047 1034 1048 1035 /* Error on this fragment: respond to client with an error. */ 1049 - txp = &netbk->pending_tx_info[pending_idx].req; 1050 - make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); 1051 - index = pending_index(netbk->pending_prod++); 1052 - netbk->pending_ring[index] = pending_idx; 1053 - xenvif_put(vif); 1036 + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); 1054 1037 1055 1038 /* Not the first error? Preceding frags already invalidated. */ 1056 1039 if (err) ··· 1054 1045 1055 1046 /* First error: invalidate header and preceding fragments. */ 1056 1047 pending_idx = *((u16 *)skb->data); 1057 - xen_netbk_idx_release(netbk, pending_idx); 1048 + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); 1058 1049 for (j = start; j < i; j++) { 1059 1050 pending_idx = frag_get_pending_idx(&shinfo->frags[j]); 1060 - xen_netbk_idx_release(netbk, pending_idx); 1051 + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); 1061 1052 } 1062 1053 1063 1054 /* Remember the error: invalidate all subsequent fragments. */ ··· 1091 1082 1092 1083 /* Take an extra reference to offset xen_netbk_idx_release */ 1093 1084 get_page(netbk->mmap_pages[pending_idx]); 1094 - xen_netbk_idx_release(netbk, pending_idx); 1085 + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); 1095 1086 } 1096 1087 } 1097 1088 ··· 1104 1095 1105 1096 do { 1106 1097 if (unlikely(work_to_do-- <= 0)) { 1107 - netdev_dbg(vif->dev, "Missing extra info\n"); 1098 + netdev_err(vif->dev, "Missing extra info\n"); 1099 + netbk_fatal_tx_err(vif); 1108 1100 return -EBADR; 1109 1101 } 1110 1102 ··· 1114 1104 if (unlikely(!extra.type || 1115 1105 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 1116 1106 vif->tx.req_cons = ++cons; 1117 - netdev_dbg(vif->dev, 1107 + netdev_err(vif->dev, 1118 1108 "Invalid extra type: %d\n", extra.type); 1109 + netbk_fatal_tx_err(vif); 1119 1110 return -EINVAL; 1120 1111 } 1121 1112 ··· 1132 1121 struct xen_netif_extra_info *gso) 1133 1122 { 1134 1123 if (!gso->u.gso.size) { 1135 - netdev_dbg(vif->dev, "GSO size must not be zero.\n"); 1124 + netdev_err(vif->dev, "GSO size must not be zero.\n"); 1125 + netbk_fatal_tx_err(vif); 1136 1126 return -EINVAL; 1137 1127 } 1138 1128 1139 1129 /* Currently only TCPv4 S.O. is supported. */ 1140 1130 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { 1141 - netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); 1131 + netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); 1132 + netbk_fatal_tx_err(vif); 1142 1133 return -EINVAL; 1143 1134 } 1144 1135 ··· 1277 1264 1278 1265 /* Get a netif from the list with work to do. */ 1279 1266 vif = poll_net_schedule_list(netbk); 1267 + /* This can sometimes happen because the test of 1268 + * list_empty(net_schedule_list) at the top of the 1269 + * loop is unlocked. Just go back and have another 1270 + * look. 1271 + */ 1280 1272 if (!vif) 1281 1273 continue; 1274 + 1275 + if (vif->tx.sring->req_prod - vif->tx.req_cons > 1276 + XEN_NETIF_TX_RING_SIZE) { 1277 + netdev_err(vif->dev, 1278 + "Impossible number of requests. " 1279 + "req_prod %d, req_cons %d, size %ld\n", 1280 + vif->tx.sring->req_prod, vif->tx.req_cons, 1281 + XEN_NETIF_TX_RING_SIZE); 1282 + netbk_fatal_tx_err(vif); 1283 + continue; 1284 + } 1282 1285 1283 1286 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); 1284 1287 if (!work_to_do) { ··· 1323 1294 work_to_do = xen_netbk_get_extras(vif, extras, 1324 1295 work_to_do); 1325 1296 idx = vif->tx.req_cons; 1326 - if (unlikely(work_to_do < 0)) { 1327 - netbk_tx_err(vif, &txreq, idx); 1297 + if (unlikely(work_to_do < 0)) 1328 1298 continue; 1329 - } 1330 1299 } 1331 1300 1332 1301 ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); 1333 - if (unlikely(ret < 0)) { 1334 - netbk_tx_err(vif, &txreq, idx - ret); 1302 + if (unlikely(ret < 0)) 1335 1303 continue; 1336 - } 1304 + 1337 1305 idx += ret; 1338 1306 1339 1307 if (unlikely(txreq.size < ETH_HLEN)) { ··· 1342 1316 1343 1317 /* No crossing a page as the payload mustn't fragment. */ 1344 1318 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { 1345 - netdev_dbg(vif->dev, 1319 + netdev_err(vif->dev, 1346 1320 "txreq.offset: %x, size: %u, end: %lu\n", 1347 1321 txreq.offset, txreq.size, 1348 1322 (txreq.offset&~PAGE_MASK) + txreq.size); 1349 - netbk_tx_err(vif, &txreq, idx); 1323 + netbk_fatal_tx_err(vif); 1350 1324 continue; 1351 1325 } 1352 1326 ··· 1374 1348 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; 1375 1349 1376 1350 if (netbk_set_skb_gso(vif, skb, gso)) { 1351 + /* Failure in netbk_set_skb_gso is fatal. */ 1377 1352 kfree_skb(skb); 1378 - netbk_tx_err(vif, &txreq, idx); 1379 1353 continue; 1380 1354 } 1381 1355 } ··· 1474 1448 txp->size -= data_len; 1475 1449 } else { 1476 1450 /* Schedule a response immediately. */ 1477 - xen_netbk_idx_release(netbk, pending_idx); 1451 + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); 1478 1452 } 1479 1453 1480 1454 if (txp->flags & XEN_NETTXF_csum_blank) ··· 1526 1500 xen_netbk_tx_submit(netbk); 1527 1501 } 1528 1502 1529 - static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx) 1503 + static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, 1504 + u8 status) 1530 1505 { 1531 1506 struct xenvif *vif; 1532 1507 struct pending_tx_info *pending_tx_info; ··· 1541 1514 1542 1515 vif = pending_tx_info->vif; 1543 1516 1544 - make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY); 1517 + make_tx_response(vif, &pending_tx_info->req, status); 1545 1518 1546 1519 index = pending_index(netbk->pending_prod++); 1547 1520 netbk->pending_ring[index] = pending_idx;
+2 -3
drivers/pinctrl/Kconfig
··· 181 181 182 182 config PINCTRL_SAMSUNG 183 183 bool 184 - depends on OF && GPIOLIB 185 184 select PINMUX 186 185 select PINCONF 187 186 188 - config PINCTRL_EXYNOS4 189 - bool "Pinctrl driver data for Exynos4 SoC" 187 + config PINCTRL_EXYNOS 188 + bool "Pinctrl driver data for Samsung EXYNOS SoCs" 190 189 depends on OF && GPIOLIB 191 190 select PINCTRL_SAMSUNG 192 191
+1 -1
drivers/pinctrl/Makefile
··· 36 36 obj-$(CONFIG_PINCTRL_U300) += pinctrl-u300.o 37 37 obj-$(CONFIG_PINCTRL_COH901) += pinctrl-coh901.o 38 38 obj-$(CONFIG_PINCTRL_SAMSUNG) += pinctrl-samsung.o 39 - obj-$(CONFIG_PINCTRL_EXYNOS4) += pinctrl-exynos.o 39 + obj-$(CONFIG_PINCTRL_EXYNOS) += pinctrl-exynos.o 40 40 obj-$(CONFIG_PINCTRL_EXYNOS5440) += pinctrl-exynos5440.o 41 41 obj-$(CONFIG_PINCTRL_XWAY) += pinctrl-xway.o 42 42 obj-$(CONFIG_PINCTRL_LANTIQ) += pinctrl-lantiq.o
+1 -1
drivers/pinctrl/mvebu/pinctrl-dove.c
··· 588 588 { 589 589 const struct of_device_id *match = 590 590 of_match_device(dove_pinctrl_of_match, &pdev->dev); 591 - pdev->dev.platform_data = match->data; 591 + pdev->dev.platform_data = (void *)match->data; 592 592 593 593 /* 594 594 * General MPP Configuration Register is part of pdma registers.
+4 -4
drivers/pinctrl/mvebu/pinctrl-kirkwood.c
··· 66 66 MPP_VAR_FUNCTION(0x5, "sata0", "act", V(0, 1, 1, 1, 1, 0)), 67 67 MPP_VAR_FUNCTION(0xb, "lcd", "vsync", V(0, 0, 0, 0, 1, 0))), 68 68 MPP_MODE(6, 69 - MPP_VAR_FUNCTION(0x0, "sysrst", "out", V(1, 1, 1, 1, 1, 1)), 70 - MPP_VAR_FUNCTION(0x1, "spi", "mosi", V(1, 1, 1, 1, 1, 1)), 71 - MPP_VAR_FUNCTION(0x2, "ptp", "trig", V(1, 1, 1, 1, 0, 0))), 69 + MPP_VAR_FUNCTION(0x1, "sysrst", "out", V(1, 1, 1, 1, 1, 1)), 70 + MPP_VAR_FUNCTION(0x2, "spi", "mosi", V(1, 1, 1, 1, 1, 1)), 71 + MPP_VAR_FUNCTION(0x3, "ptp", "trig", V(1, 1, 1, 1, 0, 0))), 72 72 MPP_MODE(7, 73 73 MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1, 1)), 74 74 MPP_VAR_FUNCTION(0x1, "pex", "rsto", V(1, 1, 1, 1, 0, 1)), ··· 458 458 { 459 459 const struct of_device_id *match = 460 460 of_match_device(kirkwood_pinctrl_of_match, &pdev->dev); 461 - pdev->dev.platform_data = match->data; 461 + pdev->dev.platform_data = (void *)match->data; 462 462 return mvebu_pinctrl_probe(pdev); 463 463 } 464 464
+5 -5
drivers/pinctrl/pinctrl-exynos5440.c
··· 599 599 } 600 600 601 601 /* parse the pin numbers listed in the 'samsung,exynos5440-pins' property */ 602 - static int __init exynos5440_pinctrl_parse_dt_pins(struct platform_device *pdev, 602 + static int exynos5440_pinctrl_parse_dt_pins(struct platform_device *pdev, 603 603 struct device_node *cfg_np, unsigned int **pin_list, 604 604 unsigned int *npins) 605 605 { ··· 630 630 * Parse the information about all the available pin groups and pin functions 631 631 * from device node of the pin-controller. 632 632 */ 633 - static int __init exynos5440_pinctrl_parse_dt(struct platform_device *pdev, 633 + static int exynos5440_pinctrl_parse_dt(struct platform_device *pdev, 634 634 struct exynos5440_pinctrl_priv_data *priv) 635 635 { 636 636 struct device *dev = &pdev->dev; ··· 723 723 } 724 724 725 725 /* register the pinctrl interface with the pinctrl subsystem */ 726 - static int __init exynos5440_pinctrl_register(struct platform_device *pdev, 726 + static int exynos5440_pinctrl_register(struct platform_device *pdev, 727 727 struct exynos5440_pinctrl_priv_data *priv) 728 728 { 729 729 struct device *dev = &pdev->dev; ··· 798 798 } 799 799 800 800 /* register the gpiolib interface with the gpiolib subsystem */ 801 - static int __init exynos5440_gpiolib_register(struct platform_device *pdev, 801 + static int exynos5440_gpiolib_register(struct platform_device *pdev, 802 802 struct exynos5440_pinctrl_priv_data *priv) 803 803 { 804 804 struct gpio_chip *gc; ··· 831 831 } 832 832 833 833 /* unregister the gpiolib interface with the gpiolib subsystem */ 834 - static int __init exynos5440_gpiolib_unregister(struct platform_device *pdev, 834 + static int exynos5440_gpiolib_unregister(struct platform_device *pdev, 835 835 struct exynos5440_pinctrl_priv_data *priv) 836 836 { 837 837 int ret = gpiochip_remove(priv->gc);
+4 -5
drivers/pinctrl/pinctrl-mxs.c
··· 146 146 static void mxs_dt_free_map(struct pinctrl_dev *pctldev, 147 147 struct pinctrl_map *map, unsigned num_maps) 148 148 { 149 - int i; 149 + u32 i; 150 150 151 151 for (i = 0; i < num_maps; i++) { 152 152 if (map[i].type == PIN_MAP_TYPE_MUX_GROUP) ··· 203 203 void __iomem *reg; 204 204 u8 bank, shift; 205 205 u16 pin; 206 - int i; 206 + u32 i; 207 207 208 208 for (i = 0; i < g->npins; i++) { 209 209 bank = PINID_TO_BANK(g->pins[i]); ··· 256 256 void __iomem *reg; 257 257 u8 ma, vol, pull, bank, shift; 258 258 u16 pin; 259 - int i; 259 + u32 i; 260 260 261 261 ma = CONFIG_TO_MA(config); 262 262 vol = CONFIG_TO_VOL(config); ··· 345 345 const char *propname = "fsl,pinmux-ids"; 346 346 char *group; 347 347 int length = strlen(np->name) + SUFFIX_LEN; 348 - int i; 349 - u32 val; 348 + u32 val, i; 350 349 351 350 group = devm_kzalloc(&pdev->dev, length, GFP_KERNEL); 352 351 if (!group)
+1 -1
drivers/pinctrl/pinctrl-nomadik.c
··· 676 676 } 677 677 EXPORT_SYMBOL(nmk_gpio_set_mode); 678 678 679 - static int nmk_prcm_gpiocr_get_mode(struct pinctrl_dev *pctldev, int gpio) 679 + static int __maybe_unused nmk_prcm_gpiocr_get_mode(struct pinctrl_dev *pctldev, int gpio) 680 680 { 681 681 int i; 682 682 u16 reg;
+2 -77
drivers/pinctrl/pinctrl-single.c
··· 30 30 #define PCS_MUX_BITS_NAME "pinctrl-single,bits" 31 31 #define PCS_REG_NAME_LEN ((sizeof(unsigned long) * 2) + 1) 32 32 #define PCS_OFF_DISABLED ~0U 33 - #define PCS_MAX_GPIO_VALUES 2 34 33 35 34 /** 36 35 * struct pcs_pingroup - pingroups for a function ··· 74 75 const char **pgnames; 75 76 int npgnames; 76 77 struct list_head node; 77 - }; 78 - 79 - /** 80 - * struct pcs_gpio_range - pinctrl gpio range 81 - * @range: subrange of the GPIO number space 82 - * @gpio_func: gpio function value in the pinmux register 83 - */ 84 - struct pcs_gpio_range { 85 - struct pinctrl_gpio_range range; 86 - int gpio_func; 87 78 }; 88 79 89 80 /** ··· 403 414 } 404 415 405 416 static int pcs_request_gpio(struct pinctrl_dev *pctldev, 406 - struct pinctrl_gpio_range *range, unsigned pin) 417 + struct pinctrl_gpio_range *range, unsigned offset) 407 418 { 408 - struct pcs_device *pcs = pinctrl_dev_get_drvdata(pctldev); 409 - struct pcs_gpio_range *gpio = NULL; 410 - int end, mux_bytes; 411 - unsigned data; 412 - 413 - gpio = container_of(range, struct pcs_gpio_range, range); 414 - end = range->pin_base + range->npins - 1; 415 - if (pin < range->pin_base || pin > end) { 416 - dev_err(pctldev->dev, 417 - "pin %d isn't in the range of %d to %d\n", 418 - pin, range->pin_base, end); 419 - return -EINVAL; 420 - } 421 - mux_bytes = pcs->width / BITS_PER_BYTE; 422 - data = pcs->read(pcs->base + pin * mux_bytes) & ~pcs->fmask; 423 - data |= gpio->gpio_func; 424 - pcs->write(data, pcs->base + pin * mux_bytes); 425 - return 0; 419 + return -ENOTSUPP; 426 420 } 427 421 428 422 static struct pinmux_ops pcs_pinmux_ops = { ··· 879 907 880 908 static struct of_device_id pcs_of_match[]; 881 909 882 - static int pcs_add_gpio_range(struct device_node *node, struct pcs_device *pcs) 883 - { 884 - struct pcs_gpio_range *gpio; 885 - struct device_node *child; 886 - struct resource r; 887 - const char name[] = "pinctrl-single"; 888 - u32 gpiores[PCS_MAX_GPIO_VALUES]; 889 - int ret, i = 0, mux_bytes = 0; 890 - 891 - for_each_child_of_node(node, child) { 892 - ret = of_address_to_resource(child, 0, &r); 893 - if (ret < 0) 894 - continue; 895 - memset(gpiores, 0, sizeof(u32) * PCS_MAX_GPIO_VALUES); 896 - ret = of_property_read_u32_array(child, "pinctrl-single,gpio", 897 - gpiores, PCS_MAX_GPIO_VALUES); 898 - if (ret < 0) 899 - continue; 900 - gpio = devm_kzalloc(pcs->dev, sizeof(*gpio), GFP_KERNEL); 901 - if (!gpio) { 902 - dev_err(pcs->dev, "failed to allocate pcs gpio\n"); 903 - return -ENOMEM; 904 - } 905 - gpio->range.name = devm_kzalloc(pcs->dev, sizeof(name), 906 - GFP_KERNEL); 907 - if (!gpio->range.name) { 908 - dev_err(pcs->dev, "failed to allocate range name\n"); 909 - return -ENOMEM; 910 - } 911 - memcpy((char *)gpio->range.name, name, sizeof(name)); 912 - 913 - gpio->range.id = i++; 914 - gpio->range.base = gpiores[0]; 915 - gpio->gpio_func = gpiores[1]; 916 - mux_bytes = pcs->width / BITS_PER_BYTE; 917 - gpio->range.pin_base = (r.start - pcs->res->start) / mux_bytes; 918 - gpio->range.npins = (r.end - r.start) / mux_bytes + 1; 919 - 920 - pinctrl_add_gpio_range(pcs->pctl, &gpio->range); 921 - } 922 - return 0; 923 - } 924 - 925 910 static int pcs_probe(struct platform_device *pdev) 926 911 { 927 912 struct device_node *np = pdev->dev.of_node; ··· 974 1045 ret = -EINVAL; 975 1046 goto free; 976 1047 } 977 - 978 - ret = pcs_add_gpio_range(np, pcs); 979 - if (ret < 0) 980 - goto free; 981 1048 982 1049 dev_info(pcs->dev, "%i pins at pa %p size %u\n", 983 1050 pcs->desc.npins, pcs->base, pcs->size);
+18
drivers/pinctrl/pinctrl-sirf.c
··· 1246 1246 return of_iomap(np, 0); 1247 1247 } 1248 1248 1249 + static int sirfsoc_gpio_of_xlate(struct gpio_chip *gc, 1250 + const struct of_phandle_args *gpiospec, 1251 + u32 *flags) 1252 + { 1253 + if (gpiospec->args[0] > SIRFSOC_GPIO_NO_OF_BANKS * SIRFSOC_GPIO_BANK_SIZE) 1254 + return -EINVAL; 1255 + 1256 + if (gc != &sgpio_bank[gpiospec->args[0] / SIRFSOC_GPIO_BANK_SIZE].chip.gc) 1257 + return -EINVAL; 1258 + 1259 + if (flags) 1260 + *flags = gpiospec->args[1]; 1261 + 1262 + return gpiospec->args[0] % SIRFSOC_GPIO_BANK_SIZE; 1263 + } 1264 + 1249 1265 static int sirfsoc_pinmux_probe(struct platform_device *pdev) 1250 1266 { 1251 1267 int ret; ··· 1752 1736 bank->chip.gc.ngpio = SIRFSOC_GPIO_BANK_SIZE; 1753 1737 bank->chip.gc.label = kstrdup(np->full_name, GFP_KERNEL); 1754 1738 bank->chip.gc.of_node = np; 1739 + bank->chip.gc.of_xlate = sirfsoc_gpio_of_xlate; 1740 + bank->chip.gc.of_gpio_n_cells = 2; 1755 1741 bank->chip.regs = regs; 1756 1742 bank->id = i; 1757 1743 bank->is_marco = is_marco;
+1 -1
drivers/platform/x86/ibm_rtl.c
··· 244 244 if (force) 245 245 pr_warn("module loaded by force\n"); 246 246 /* first ensure that we are running on IBM HW */ 247 - else if (efi_enabled || !dmi_check_system(ibm_rtl_dmi_table)) 247 + else if (efi_enabled(EFI_BOOT) || !dmi_check_system(ibm_rtl_dmi_table)) 248 248 return -ENODEV; 249 249 250 250 /* Get the address for the Extended BIOS Data Area */
+4
drivers/platform/x86/samsung-laptop.c
··· 26 26 #include <linux/seq_file.h> 27 27 #include <linux/debugfs.h> 28 28 #include <linux/ctype.h> 29 + #include <linux/efi.h> 29 30 #include <acpi/video.h> 30 31 31 32 /* ··· 1544 1543 { 1545 1544 struct samsung_laptop *samsung; 1546 1545 int ret; 1546 + 1547 + if (efi_enabled(EFI_BOOT)) 1548 + return -ENODEV; 1547 1549 1548 1550 quirks = &samsung_unknown; 1549 1551 if (!force && !dmi_check_system(samsung_dmi_table))
+1
drivers/regulator/dbx500-prcmu.c
··· 14 14 #include <linux/debugfs.h> 15 15 #include <linux/seq_file.h> 16 16 #include <linux/slab.h> 17 + #include <linux/module.h> 17 18 18 19 #include "dbx500-prcmu.h" 19 20
+8 -7
drivers/regulator/max77686.c
··· 379 379 }; 380 380 381 381 #ifdef CONFIG_OF 382 - static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev, 382 + static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev, 383 383 struct max77686_platform_data *pdata) 384 384 { 385 + struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent); 385 386 struct device_node *pmic_np, *regulators_np; 386 387 struct max77686_regulator_data *rdata; 387 388 struct of_regulator_match rmatch; ··· 391 390 pmic_np = iodev->dev->of_node; 392 391 regulators_np = of_find_node_by_name(pmic_np, "voltage-regulators"); 393 392 if (!regulators_np) { 394 - dev_err(iodev->dev, "could not find regulators sub-node\n"); 393 + dev_err(&pdev->dev, "could not find regulators sub-node\n"); 395 394 return -EINVAL; 396 395 } 397 396 398 397 pdata->num_regulators = ARRAY_SIZE(regulators); 399 - rdata = devm_kzalloc(iodev->dev, sizeof(*rdata) * 398 + rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) * 400 399 pdata->num_regulators, GFP_KERNEL); 401 400 if (!rdata) { 402 - dev_err(iodev->dev, 401 + dev_err(&pdev->dev, 403 402 "could not allocate memory for regulator data\n"); 404 403 return -ENOMEM; 405 404 } ··· 408 407 rmatch.name = regulators[i].name; 409 408 rmatch.init_data = NULL; 410 409 rmatch.of_node = NULL; 411 - of_regulator_match(iodev->dev, regulators_np, &rmatch, 1); 410 + of_regulator_match(&pdev->dev, regulators_np, &rmatch, 1); 412 411 rdata[i].initdata = rmatch.init_data; 413 412 rdata[i].of_node = rmatch.of_node; 414 413 } ··· 418 417 return 0; 419 418 } 420 419 #else 421 - static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev, 420 + static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev, 422 421 struct max77686_platform_data *pdata) 423 422 { 424 423 return 0; ··· 441 440 } 442 441 443 442 if (iodev->dev->of_node) { 444 - ret = max77686_pmic_dt_parse_pdata(iodev, pdata); 443 + ret = max77686_pmic_dt_parse_pdata(pdev, pdata); 445 444 if (ret) 446 445 return ret; 447 446 }
+1 -2
drivers/regulator/max8907-regulator.c
··· 237 237 return -EINVAL; 238 238 } 239 239 240 - ret = of_regulator_match(pdev->dev.parent, regulators, 241 - max8907_matches, 240 + ret = of_regulator_match(&pdev->dev, regulators, max8907_matches, 242 241 ARRAY_SIZE(max8907_matches)); 243 242 if (ret < 0) { 244 243 dev_err(&pdev->dev, "Error parsing regulator init data: %d\n",
+19 -20
drivers/regulator/max8997.c
··· 934 934 }; 935 935 936 936 #ifdef CONFIG_OF 937 - static int max8997_pmic_dt_parse_dvs_gpio(struct max8997_dev *iodev, 937 + static int max8997_pmic_dt_parse_dvs_gpio(struct platform_device *pdev, 938 938 struct max8997_platform_data *pdata, 939 939 struct device_node *pmic_np) 940 940 { ··· 944 944 gpio = of_get_named_gpio(pmic_np, 945 945 "max8997,pmic-buck125-dvs-gpios", i); 946 946 if (!gpio_is_valid(gpio)) { 947 - dev_err(iodev->dev, "invalid gpio[%d]: %d\n", i, gpio); 947 + dev_err(&pdev->dev, "invalid gpio[%d]: %d\n", i, gpio); 948 948 return -EINVAL; 949 949 } 950 950 pdata->buck125_gpios[i] = gpio; ··· 952 952 return 0; 953 953 } 954 954 955 - static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev, 955 + static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev, 956 956 struct max8997_platform_data *pdata) 957 957 { 958 + struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent); 958 959 struct device_node *pmic_np, *regulators_np, *reg_np; 959 960 struct max8997_regulator_data *rdata; 960 961 unsigned int i, dvs_voltage_nr = 1, ret; 961 962 962 963 pmic_np = iodev->dev->of_node; 963 964 if (!pmic_np) { 964 - dev_err(iodev->dev, "could not find pmic sub-node\n"); 965 + dev_err(&pdev->dev, "could not find pmic sub-node\n"); 965 966 return -ENODEV; 966 967 } 967 968 968 969 regulators_np = of_find_node_by_name(pmic_np, "regulators"); 969 970 if (!regulators_np) { 970 - dev_err(iodev->dev, "could not find regulators sub-node\n"); 971 + dev_err(&pdev->dev, "could not find regulators sub-node\n"); 971 972 return -EINVAL; 972 973 } 973 974 ··· 977 976 for_each_child_of_node(regulators_np, reg_np) 978 977 pdata->num_regulators++; 979 978 980 - rdata = devm_kzalloc(iodev->dev, sizeof(*rdata) * 979 + rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) * 981 980 pdata->num_regulators, GFP_KERNEL); 982 981 if (!rdata) { 983 - dev_err(iodev->dev, "could not allocate memory for " 984 - "regulator data\n"); 982 + dev_err(&pdev->dev, "could not allocate memory for regulator data\n"); 985 983 return -ENOMEM; 986 984 } 987 985 ··· 991 991 break; 992 992 993 993 if (i == ARRAY_SIZE(regulators)) { 994 - dev_warn(iodev->dev, "don't know how to configure " 995 - "regulator %s\n", reg_np->name); 994 + dev_warn(&pdev->dev, "don't know how to configure regulator %s\n", 995 + reg_np->name); 996 996 continue; 997 997 } 998 998 999 999 rdata->id = i; 1000 - rdata->initdata = of_get_regulator_init_data( 1001 - iodev->dev, reg_np); 1000 + rdata->initdata = of_get_regulator_init_data(&pdev->dev, 1001 + reg_np); 1002 1002 rdata->reg_node = reg_np; 1003 1003 rdata++; 1004 1004 } ··· 1014 1014 1015 1015 if (pdata->buck1_gpiodvs || pdata->buck2_gpiodvs || 1016 1016 pdata->buck5_gpiodvs) { 1017 - ret = max8997_pmic_dt_parse_dvs_gpio(iodev, pdata, pmic_np); 1017 + ret = max8997_pmic_dt_parse_dvs_gpio(pdev, pdata, pmic_np); 1018 1018 if (ret) 1019 1019 return -EINVAL; 1020 1020 ··· 1025 1025 } else { 1026 1026 if (pdata->buck125_default_idx >= 8) { 1027 1027 pdata->buck125_default_idx = 0; 1028 - dev_info(iodev->dev, "invalid value for " 1029 - "default dvs index, using 0 instead\n"); 1028 + dev_info(&pdev->dev, "invalid value for default dvs index, using 0 instead\n"); 1030 1029 } 1031 1030 } 1032 1031 ··· 1039 1040 if (of_property_read_u32_array(pmic_np, 1040 1041 "max8997,pmic-buck1-dvs-voltage", 1041 1042 pdata->buck1_voltage, dvs_voltage_nr)) { 1042 - dev_err(iodev->dev, "buck1 voltages not specified\n"); 1043 + dev_err(&pdev->dev, "buck1 voltages not specified\n"); 1043 1044 return -EINVAL; 1044 1045 } 1045 1046 1046 1047 if (of_property_read_u32_array(pmic_np, 1047 1048 "max8997,pmic-buck2-dvs-voltage", 1048 1049 pdata->buck2_voltage, dvs_voltage_nr)) { 1049 - dev_err(iodev->dev, "buck2 voltages not specified\n"); 1050 + dev_err(&pdev->dev, "buck2 voltages not specified\n"); 1050 1051 return -EINVAL; 1051 1052 } 1052 1053 1053 1054 if (of_property_read_u32_array(pmic_np, 1054 1055 "max8997,pmic-buck5-dvs-voltage", 1055 1056 pdata->buck5_voltage, dvs_voltage_nr)) { 1056 - dev_err(iodev->dev, "buck5 voltages not specified\n"); 1057 + dev_err(&pdev->dev, "buck5 voltages not specified\n"); 1057 1058 return -EINVAL; 1058 1059 } 1059 1060 1060 1061 return 0; 1061 1062 } 1062 1063 #else 1063 - static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev, 1064 + static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev, 1064 1065 struct max8997_platform_data *pdata) 1065 1066 { 1066 1067 return 0; ··· 1084 1085 } 1085 1086 1086 1087 if (iodev->dev->of_node) { 1087 - ret = max8997_pmic_dt_parse_pdata(iodev, pdata); 1088 + ret = max8997_pmic_dt_parse_pdata(pdev, pdata); 1088 1089 if (ret) 1089 1090 return ret; 1090 1091 }
+1 -1
drivers/regulator/max8998.c
··· 65 65 .min = 2800000, .step = 100000, .max = 3100000, 66 66 }; 67 67 static const struct voltage_map_desc ldo10_voltage_map_desc = { 68 - .min = 95000, .step = 50000, .max = 1300000, 68 + .min = 950000, .step = 50000, .max = 1300000, 69 69 }; 70 70 static const struct voltage_map_desc ldo1213_voltage_map_desc = { 71 71 .min = 800000, .step = 100000, .max = 3300000,
+6
drivers/regulator/of_regulator.c
··· 120 120 if (!dev || !node) 121 121 return -EINVAL; 122 122 123 + for (i = 0; i < num_matches; i++) { 124 + struct of_regulator_match *match = &matches[i]; 125 + match->init_data = NULL; 126 + match->of_node = NULL; 127 + } 128 + 123 129 for_each_child_of_node(node, child) { 124 130 name = of_get_property(child, 125 131 "regulator-compatible", NULL);
+2 -2
drivers/regulator/s2mps11.c
··· 174 174 .min_uV = S2MPS11_BUCK_MIN2, \ 175 175 .uV_step = S2MPS11_BUCK_STEP2, \ 176 176 .n_voltages = S2MPS11_BUCK_N_VOLTAGES, \ 177 - .vsel_reg = S2MPS11_REG_B9CTRL2, \ 177 + .vsel_reg = S2MPS11_REG_B10CTRL2, \ 178 178 .vsel_mask = S2MPS11_BUCK_VSEL_MASK, \ 179 - .enable_reg = S2MPS11_REG_B9CTRL1, \ 179 + .enable_reg = S2MPS11_REG_B10CTRL1, \ 180 180 .enable_mask = S2MPS11_ENABLE_MASK \ 181 181 } 182 182
+2 -2
drivers/regulator/tps65217-regulator.c
··· 305 305 if (!regs) 306 306 return NULL; 307 307 308 - count = of_regulator_match(pdev->dev.parent, regs, 309 - reg_matches, TPS65217_NUM_REGULATOR); 308 + count = of_regulator_match(&pdev->dev, regs, reg_matches, 309 + TPS65217_NUM_REGULATOR); 310 310 of_node_put(regs); 311 311 if ((count < 0) || (count > TPS65217_NUM_REGULATOR)) 312 312 return NULL;
+1 -1
drivers/regulator/tps65910-regulator.c
··· 998 998 return NULL; 999 999 } 1000 1000 1001 - ret = of_regulator_match(pdev->dev.parent, regulators, matches, count); 1001 + ret = of_regulator_match(&pdev->dev, regulators, matches, count); 1002 1002 if (ret < 0) { 1003 1003 dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", 1004 1004 ret);
+1 -1
drivers/regulator/tps80031-regulator.c
··· 728 728 } 729 729 } 730 730 rdev = regulator_register(&ri->rinfo->desc, &config); 731 - if (IS_ERR_OR_NULL(rdev)) { 731 + if (IS_ERR(rdev)) { 732 732 dev_err(&pdev->dev, 733 733 "register regulator failed %s\n", 734 734 ri->rinfo->desc.name);
+3
drivers/rtc/rtc-isl1208.c
··· 506 506 { 507 507 unsigned long timeout = jiffies + msecs_to_jiffies(1000); 508 508 struct i2c_client *client = data; 509 + struct rtc_device *rtc = i2c_get_clientdata(client); 509 510 int handled = 0, sr, err; 510 511 511 512 /* ··· 528 527 529 528 if (sr & ISL1208_REG_SR_ALM) { 530 529 dev_dbg(&client->dev, "alarm!\n"); 530 + 531 + rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF); 531 532 532 533 /* Clear the alarm */ 533 534 sr &= ~ISL1208_REG_SR_ALM;
+5 -3
drivers/rtc/rtc-pl031.c
··· 44 44 #define RTC_YMR 0x34 /* Year match register */ 45 45 #define RTC_YLR 0x38 /* Year data load register */ 46 46 47 + #define RTC_CR_EN (1 << 0) /* counter enable bit */ 47 48 #define RTC_CR_CWEN (1 << 26) /* Clockwatch enable bit */ 48 49 49 50 #define RTC_TCR_EN (1 << 1) /* Periodic timer enable bit */ ··· 321 320 struct pl031_local *ldata; 322 321 struct pl031_vendor_data *vendor = id->data; 323 322 struct rtc_class_ops *ops = &vendor->ops; 324 - unsigned long time; 323 + unsigned long time, data; 325 324 326 325 ret = amba_request_regions(adev, NULL); 327 326 if (ret) ··· 346 345 dev_dbg(&adev->dev, "designer ID = 0x%02x\n", amba_manf(adev)); 347 346 dev_dbg(&adev->dev, "revision = 0x%01x\n", amba_rev(adev)); 348 347 348 + data = readl(ldata->base + RTC_CR); 349 349 /* Enable the clockwatch on ST Variants */ 350 350 if (vendor->clockwatch) 351 - writel(readl(ldata->base + RTC_CR) | RTC_CR_CWEN, 352 - ldata->base + RTC_CR); 351 + data |= RTC_CR_CWEN; 352 + writel(data | RTC_CR_EN, ldata->base + RTC_CR); 353 353 354 354 /* 355 355 * On ST PL031 variants, the RTC reset value does not provide correct
+1 -1
drivers/rtc/rtc-vt8500.c
··· 137 137 return -EINVAL; 138 138 } 139 139 140 - writel((bin2bcd(tm->tm_year - 100) << DATE_YEAR_S) 140 + writel((bin2bcd(tm->tm_year % 100) << DATE_YEAR_S) 141 141 | (bin2bcd(tm->tm_mon + 1) << DATE_MONTH_S) 142 142 | (bin2bcd(tm->tm_mday)) 143 143 | ((tm->tm_year >= 200) << DATE_CENTURY_S),
+1 -1
drivers/scsi/isci/init.c
··· 633 633 return -ENOMEM; 634 634 pci_set_drvdata(pdev, pci_info); 635 635 636 - if (efi_enabled) 636 + if (efi_enabled(EFI_RUNTIME_SERVICES)) 637 637 orom = isci_get_efi_var(pdev); 638 638 639 639 if (!orom)
+12
drivers/ssb/driver_gpio.c
··· 174 174 175 175 return -1; 176 176 } 177 + 178 + int ssb_gpio_unregister(struct ssb_bus *bus) 179 + { 180 + if (ssb_chipco_available(&bus->chipco) || 181 + ssb_extif_available(&bus->extif)) { 182 + return gpiochip_remove(&bus->gpio); 183 + } else { 184 + SSB_WARN_ON(1); 185 + } 186 + 187 + return -1; 188 + }
+9
drivers/ssb/main.c
··· 443 443 444 444 void ssb_bus_unregister(struct ssb_bus *bus) 445 445 { 446 + int err; 447 + 448 + err = ssb_gpio_unregister(bus); 449 + if (err == -EBUSY) 450 + ssb_dprintk(KERN_ERR PFX "Some GPIOs are still in use.\n"); 451 + else if (err) 452 + ssb_dprintk(KERN_ERR PFX 453 + "Can not unregister GPIO driver: %i\n", err); 454 + 446 455 ssb_buses_lock(); 447 456 ssb_devices_unregister(bus); 448 457 list_del(&bus->list);
+5
drivers/ssb/ssb_private.h
··· 252 252 253 253 #ifdef CONFIG_SSB_DRIVER_GPIO 254 254 extern int ssb_gpio_init(struct ssb_bus *bus); 255 + extern int ssb_gpio_unregister(struct ssb_bus *bus); 255 256 #else /* CONFIG_SSB_DRIVER_GPIO */ 256 257 static inline int ssb_gpio_init(struct ssb_bus *bus) 257 258 { 258 259 return -ENOTSUPP; 260 + } 261 + static inline int ssb_gpio_unregister(struct ssb_bus *bus) 262 + { 263 + return 0; 259 264 } 260 265 #endif /* CONFIG_SSB_DRIVER_GPIO */ 261 266
+7 -1
drivers/target/target_core_device.c
··· 941 941 942 942 int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) 943 943 { 944 + int block_size = dev->dev_attrib.block_size; 945 + 944 946 if (dev->export_count) { 945 947 pr_err("dev[%p]: Unable to change SE Device" 946 948 " fabric_max_sectors while export_count is %d\n", ··· 980 978 /* 981 979 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() 982 980 */ 981 + if (!block_size) { 982 + block_size = 512; 983 + pr_warn("Defaulting to 512 for zero block_size\n"); 984 + } 983 985 fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors, 984 - dev->dev_attrib.block_size); 986 + block_size); 985 987 986 988 dev->dev_attrib.fabric_max_sectors = fabric_max_sectors; 987 989 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
+5
drivers/target/target_core_fabric_configfs.c
··· 754 754 return -EFAULT; 755 755 } 756 756 757 + if (!(dev->dev_flags & DF_CONFIGURED)) { 758 + pr_err("se_device not configured yet, cannot port link\n"); 759 + return -ENODEV; 760 + } 761 + 757 762 tpg_ci = &lun_ci->ci_parent->ci_group->cg_item; 758 763 se_tpg = container_of(to_config_group(tpg_ci), 759 764 struct se_portal_group, tpg_group);
+8 -10
drivers/target/target_core_sbc.c
··· 58 58 buf[7] = dev->dev_attrib.block_size & 0xff; 59 59 60 60 rbuf = transport_kmap_data_sg(cmd); 61 - if (!rbuf) 62 - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 63 - 64 - memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 65 - transport_kunmap_data_sg(cmd); 61 + if (rbuf) { 62 + memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 63 + transport_kunmap_data_sg(cmd); 64 + } 66 65 67 66 target_complete_cmd(cmd, GOOD); 68 67 return 0; ··· 96 97 buf[14] = 0x80; 97 98 98 99 rbuf = transport_kmap_data_sg(cmd); 99 - if (!rbuf) 100 - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 101 - 102 - memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 103 - transport_kunmap_data_sg(cmd); 100 + if (rbuf) { 101 + memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 102 + transport_kunmap_data_sg(cmd); 103 + } 104 104 105 105 target_complete_cmd(cmd, GOOD); 106 106 return 0;
+11 -33
drivers/target/target_core_spc.c
··· 641 641 642 642 out: 643 643 rbuf = transport_kmap_data_sg(cmd); 644 - if (!rbuf) 645 - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 646 - 647 - memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 648 - transport_kunmap_data_sg(cmd); 644 + if (rbuf) { 645 + memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 646 + transport_kunmap_data_sg(cmd); 647 + } 649 648 650 649 if (!ret) 651 650 target_complete_cmd(cmd, GOOD); ··· 850 851 { 851 852 struct se_device *dev = cmd->se_dev; 852 853 char *cdb = cmd->t_task_cdb; 853 - unsigned char *buf, *map_buf; 854 + unsigned char buf[SE_MODE_PAGE_BUF], *rbuf; 854 855 int type = dev->transport->get_device_type(dev); 855 856 int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10); 856 857 bool dbd = !!(cdb[1] & 0x08); ··· 862 863 int ret; 863 864 int i; 864 865 865 - map_buf = transport_kmap_data_sg(cmd); 866 - if (!map_buf) 867 - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 868 - /* 869 - * If SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is not set, then we 870 - * know we actually allocated a full page. Otherwise, if the 871 - * data buffer is too small, allocate a temporary buffer so we 872 - * don't have to worry about overruns in all our INQUIRY 873 - * emulation handling. 874 - */ 875 - if (cmd->data_length < SE_MODE_PAGE_BUF && 876 - (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) { 877 - buf = kzalloc(SE_MODE_PAGE_BUF, GFP_KERNEL); 878 - if (!buf) { 879 - transport_kunmap_data_sg(cmd); 880 - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 881 - } 882 - } else { 883 - buf = map_buf; 884 - } 866 + memset(buf, 0, SE_MODE_PAGE_BUF); 867 + 885 868 /* 886 869 * Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for 887 870 * MODE_SENSE_10 and byte 2 for MODE_SENSE (6). ··· 915 934 if (page == 0x3f) { 916 935 if (subpage != 0x00 && subpage != 0xff) { 917 936 pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage); 918 - kfree(buf); 919 - transport_kunmap_data_sg(cmd); 920 937 return TCM_INVALID_CDB_FIELD; 921 938 } 922 939 ··· 951 972 pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n", 952 973 page, subpage); 953 974 954 - transport_kunmap_data_sg(cmd); 955 975 return TCM_UNKNOWN_MODE_PAGE; 956 976 957 977 set_length: ··· 959 981 else 960 982 buf[0] = length - 1; 961 983 962 - if (buf != map_buf) { 963 - memcpy(map_buf, buf, cmd->data_length); 964 - kfree(buf); 984 + rbuf = transport_kmap_data_sg(cmd); 985 + if (rbuf) { 986 + memcpy(rbuf, buf, min_t(u32, SE_MODE_PAGE_BUF, cmd->data_length)); 987 + transport_kunmap_data_sg(cmd); 965 988 } 966 989 967 - transport_kunmap_data_sg(cmd); 968 990 target_complete_cmd(cmd, GOOD); 969 991 return 0; 970 992 }
+44
drivers/usb/core/hcd.c
··· 39 39 #include <asm/unaligned.h> 40 40 #include <linux/platform_device.h> 41 41 #include <linux/workqueue.h> 42 + #include <linux/pm_runtime.h> 42 43 43 44 #include <linux/usb.h> 44 45 #include <linux/usb/hcd.h> ··· 1026 1025 return retval; 1027 1026 } 1028 1027 1028 + /* 1029 + * usb_hcd_start_port_resume - a root-hub port is sending a resume signal 1030 + * @bus: the bus which the root hub belongs to 1031 + * @portnum: the port which is being resumed 1032 + * 1033 + * HCDs should call this function when they know that a resume signal is 1034 + * being sent to a root-hub port. The root hub will be prevented from 1035 + * going into autosuspend until usb_hcd_end_port_resume() is called. 1036 + * 1037 + * The bus's private lock must be held by the caller. 1038 + */ 1039 + void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum) 1040 + { 1041 + unsigned bit = 1 << portnum; 1042 + 1043 + if (!(bus->resuming_ports & bit)) { 1044 + bus->resuming_ports |= bit; 1045 + pm_runtime_get_noresume(&bus->root_hub->dev); 1046 + } 1047 + } 1048 + EXPORT_SYMBOL_GPL(usb_hcd_start_port_resume); 1049 + 1050 + /* 1051 + * usb_hcd_end_port_resume - a root-hub port has stopped sending a resume signal 1052 + * @bus: the bus which the root hub belongs to 1053 + * @portnum: the port which is being resumed 1054 + * 1055 + * HCDs should call this function when they know that a resume signal has 1056 + * stopped being sent to a root-hub port. The root hub will be allowed to 1057 + * autosuspend again. 1058 + * 1059 + * The bus's private lock must be held by the caller. 1060 + */ 1061 + void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum) 1062 + { 1063 + unsigned bit = 1 << portnum; 1064 + 1065 + if (bus->resuming_ports & bit) { 1066 + bus->resuming_ports &= ~bit; 1067 + pm_runtime_put_noidle(&bus->root_hub->dev); 1068 + } 1069 + } 1070 + EXPORT_SYMBOL_GPL(usb_hcd_end_port_resume); 1029 1071 1030 1072 /*-------------------------------------------------------------------------*/ 1031 1073
+52 -18
drivers/usb/core/hub.c
··· 2838 2838 EXPORT_SYMBOL_GPL(usb_enable_ltm); 2839 2839 2840 2840 #ifdef CONFIG_USB_SUSPEND 2841 + /* 2842 + * usb_disable_function_remotewakeup - disable usb3.0 2843 + * device's function remote wakeup 2844 + * @udev: target device 2845 + * 2846 + * Assume there's only one function on the USB 3.0 2847 + * device and disable remote wake for the first 2848 + * interface. FIXME if the interface association 2849 + * descriptor shows there's more than one function. 2850 + */ 2851 + static int usb_disable_function_remotewakeup(struct usb_device *udev) 2852 + { 2853 + return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 2854 + USB_REQ_CLEAR_FEATURE, USB_RECIP_INTERFACE, 2855 + USB_INTRF_FUNC_SUSPEND, 0, NULL, 0, 2856 + USB_CTRL_SET_TIMEOUT); 2857 + } 2841 2858 2842 2859 /* 2843 2860 * usb_port_suspend - suspend a usb device's upstream port ··· 2972 2955 dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n", 2973 2956 port1, status); 2974 2957 /* paranoia: "should not happen" */ 2975 - if (udev->do_remote_wakeup) 2976 - (void) usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 2977 - USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, 2978 - USB_DEVICE_REMOTE_WAKEUP, 0, 2979 - NULL, 0, 2980 - USB_CTRL_SET_TIMEOUT); 2958 + if (udev->do_remote_wakeup) { 2959 + if (!hub_is_superspeed(hub->hdev)) { 2960 + (void) usb_control_msg(udev, 2961 + usb_sndctrlpipe(udev, 0), 2962 + USB_REQ_CLEAR_FEATURE, 2963 + USB_RECIP_DEVICE, 2964 + USB_DEVICE_REMOTE_WAKEUP, 0, 2965 + NULL, 0, 2966 + USB_CTRL_SET_TIMEOUT); 2967 + } else 2968 + (void) usb_disable_function_remotewakeup(udev); 2969 + 2970 + } 2981 2971 2982 2972 /* Try to enable USB2 hardware LPM again */ 2983 2973 if (udev->usb2_hw_lpm_capable == 1) ··· 3076 3052 * udev->reset_resume 3077 3053 */ 3078 3054 } else if (udev->actconfig && !udev->reset_resume) { 3079 - le16_to_cpus(&devstatus); 3080 - if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) { 3081 - status = usb_control_msg(udev, 3082 - usb_sndctrlpipe(udev, 0), 3083 - USB_REQ_CLEAR_FEATURE, 3055 + if (!hub_is_superspeed(udev->parent)) { 3056 + le16_to_cpus(&devstatus); 3057 + if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) 3058 + status = usb_control_msg(udev, 3059 + usb_sndctrlpipe(udev, 0), 3060 + USB_REQ_CLEAR_FEATURE, 3084 3061 USB_RECIP_DEVICE, 3085 - USB_DEVICE_REMOTE_WAKEUP, 0, 3086 - NULL, 0, 3087 - USB_CTRL_SET_TIMEOUT); 3088 - if (status) 3089 - dev_dbg(&udev->dev, 3090 - "disable remote wakeup, status %d\n", 3091 - status); 3062 + USB_DEVICE_REMOTE_WAKEUP, 0, 3063 + NULL, 0, 3064 + USB_CTRL_SET_TIMEOUT); 3065 + } else { 3066 + status = usb_get_status(udev, USB_RECIP_INTERFACE, 0, 3067 + &devstatus); 3068 + le16_to_cpus(&devstatus); 3069 + if (!status && devstatus & (USB_INTRF_STAT_FUNC_RW_CAP 3070 + | USB_INTRF_STAT_FUNC_RW)) 3071 + status = 3072 + usb_disable_function_remotewakeup(udev); 3092 3073 } 3074 + 3075 + if (status) 3076 + dev_dbg(&udev->dev, 3077 + "disable remote wakeup, status %d\n", 3078 + status); 3093 3079 status = 0; 3094 3080 } 3095 3081 return status;
+1
drivers/usb/host/ehci-hcd.c
··· 797 797 ehci->reset_done[i] = jiffies + msecs_to_jiffies(25); 798 798 set_bit(i, &ehci->resuming_ports); 799 799 ehci_dbg (ehci, "port %d remote wakeup\n", i + 1); 800 + usb_hcd_start_port_resume(&hcd->self, i); 800 801 mod_timer(&hcd->rh_timer, ehci->reset_done[i]); 801 802 } 802 803 }
+8 -1
drivers/usb/host/ehci-hub.c
··· 649 649 status = STS_PCD; 650 650 } 651 651 } 652 - /* FIXME autosuspend idle root hubs */ 652 + 653 + /* If a resume is in progress, make sure it can finish */ 654 + if (ehci->resuming_ports) 655 + mod_timer(&hcd->rh_timer, jiffies + msecs_to_jiffies(25)); 656 + 653 657 spin_unlock_irqrestore (&ehci->lock, flags); 654 658 return status ? retval : 0; 655 659 } ··· 855 851 /* resume signaling for 20 msec */ 856 852 ehci->reset_done[wIndex] = jiffies 857 853 + msecs_to_jiffies(20); 854 + usb_hcd_start_port_resume(&hcd->self, wIndex); 858 855 /* check the port again */ 859 856 mod_timer(&ehci_to_hcd(ehci)->rh_timer, 860 857 ehci->reset_done[wIndex]); ··· 867 862 clear_bit(wIndex, &ehci->suspended_ports); 868 863 set_bit(wIndex, &ehci->port_c_suspend); 869 864 ehci->reset_done[wIndex] = 0; 865 + usb_hcd_end_port_resume(&hcd->self, wIndex); 870 866 871 867 /* stop resume signaling */ 872 868 temp = ehci_readl(ehci, status_reg); ··· 956 950 ehci->reset_done[wIndex] = 0; 957 951 if (temp & PORT_PE) 958 952 set_bit(wIndex, &ehci->port_c_suspend); 953 + usb_hcd_end_port_resume(&hcd->self, wIndex); 959 954 } 960 955 961 956 if (temp & PORT_OC)
+30 -20
drivers/usb/host/ehci-q.c
··· 1197 1197 if (ehci->async_iaa || ehci->async_unlinking) 1198 1198 return; 1199 1199 1200 - /* Do all the waiting QHs at once */ 1201 - ehci->async_iaa = ehci->async_unlink; 1202 - ehci->async_unlink = NULL; 1203 - 1204 1200 /* If the controller isn't running, we don't have to wait for it */ 1205 1201 if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) { 1202 + 1203 + /* Do all the waiting QHs */ 1204 + ehci->async_iaa = ehci->async_unlink; 1205 + ehci->async_unlink = NULL; 1206 + 1206 1207 if (!nested) /* Avoid recursion */ 1207 1208 end_unlink_async(ehci); 1208 1209 1209 1210 /* Otherwise start a new IAA cycle */ 1210 1211 } else if (likely(ehci->rh_state == EHCI_RH_RUNNING)) { 1212 + struct ehci_qh *qh; 1213 + 1214 + /* Do only the first waiting QH (nVidia bug?) */ 1215 + qh = ehci->async_unlink; 1216 + ehci->async_iaa = qh; 1217 + ehci->async_unlink = qh->unlink_next; 1218 + qh->unlink_next = NULL; 1219 + 1211 1220 /* Make sure the unlinks are all visible to the hardware */ 1212 1221 wmb(); 1213 1222 ··· 1264 1255 } 1265 1256 } 1266 1257 1258 + static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh); 1259 + 1267 1260 static void unlink_empty_async(struct ehci_hcd *ehci) 1268 1261 { 1269 - struct ehci_qh *qh, *next; 1270 - bool stopped = (ehci->rh_state < EHCI_RH_RUNNING); 1262 + struct ehci_qh *qh; 1263 + struct ehci_qh *qh_to_unlink = NULL; 1271 1264 bool check_unlinks_later = false; 1265 + int count = 0; 1272 1266 1273 - /* Unlink all the async QHs that have been empty for a timer cycle */ 1274 - next = ehci->async->qh_next.qh; 1275 - while (next) { 1276 - qh = next; 1277 - next = qh->qh_next.qh; 1278 - 1267 + /* Find the last async QH which has been empty for a timer cycle */ 1268 + for (qh = ehci->async->qh_next.qh; qh; qh = qh->qh_next.qh) { 1279 1269 if (list_empty(&qh->qtd_list) && 1280 1270 qh->qh_state == QH_STATE_LINKED) { 1281 - if (!stopped && qh->unlink_cycle == 1282 - ehci->async_unlink_cycle) 1271 + ++count; 1272 + if (qh->unlink_cycle == ehci->async_unlink_cycle) 1283 1273 check_unlinks_later = true; 1284 1274 else 1285 - single_unlink_async(ehci, qh); 1275 + qh_to_unlink = qh; 1286 1276 } 1287 1277 } 1288 1278 1289 - /* Start a new IAA cycle if any QHs are waiting for it */ 1290 - if (ehci->async_unlink) 1291 - start_iaa_cycle(ehci, false); 1279 + /* If nothing else is being unlinked, unlink the last empty QH */ 1280 + if (!ehci->async_iaa && !ehci->async_unlink && qh_to_unlink) { 1281 + start_unlink_async(ehci, qh_to_unlink); 1282 + --count; 1283 + } 1292 1284 1293 - /* QHs that haven't been empty for long enough will be handled later */ 1294 - if (check_unlinks_later) { 1285 + /* Other QHs will be handled later */ 1286 + if (count > 0) { 1295 1287 ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true); 1296 1288 ++ehci->async_unlink_cycle; 1297 1289 }
+6 -3
drivers/usb/host/ehci-sched.c
··· 213 213 } 214 214 215 215 static const unsigned char 216 - max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 }; 216 + max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 125, 25 }; 217 217 218 218 /* carryover low/fullspeed bandwidth that crosses uframe boundries */ 219 219 static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8]) ··· 2212 2212 } 2213 2213 ehci->now_frame = now_frame; 2214 2214 2215 + frame = ehci->last_iso_frame; 2215 2216 for (;;) { 2216 2217 union ehci_shadow q, *q_p; 2217 2218 __hc32 type, *hw_p; 2218 2219 2219 - frame = ehci->last_iso_frame; 2220 2220 restart: 2221 2221 /* scan each element in frame's queue for completions */ 2222 2222 q_p = &ehci->pshadow [frame]; ··· 2321 2321 /* Stop when we have reached the current frame */ 2322 2322 if (frame == now_frame) 2323 2323 break; 2324 - ehci->last_iso_frame = (frame + 1) & fmask; 2324 + 2325 + /* The last frame may still have active siTDs */ 2326 + ehci->last_iso_frame = frame; 2327 + frame = (frame + 1) & fmask; 2325 2328 } 2326 2329 }
+15 -14
drivers/usb/host/ehci-timer.c
··· 113 113 114 114 if (want != actual) { 115 115 116 - /* Poll again later, but give up after about 20 ms */ 117 - if (ehci->ASS_poll_count++ < 20) { 118 - ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true); 119 - return; 120 - } 121 - ehci_dbg(ehci, "Waited too long for the async schedule status (%x/%x), giving up\n", 122 - want, actual); 116 + /* Poll again later */ 117 + ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true); 118 + ++ehci->ASS_poll_count; 119 + return; 123 120 } 121 + 122 + if (ehci->ASS_poll_count > 20) 123 + ehci_dbg(ehci, "ASS poll count reached %d\n", 124 + ehci->ASS_poll_count); 124 125 ehci->ASS_poll_count = 0; 125 126 126 127 /* The status is up-to-date; restart or stop the schedule as needed */ ··· 160 159 161 160 if (want != actual) { 162 161 163 - /* Poll again later, but give up after about 20 ms */ 164 - if (ehci->PSS_poll_count++ < 20) { 165 - ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true); 166 - return; 167 - } 168 - ehci_dbg(ehci, "Waited too long for the periodic schedule status (%x/%x), giving up\n", 169 - want, actual); 162 + /* Poll again later */ 163 + ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true); 164 + return; 170 165 } 166 + 167 + if (ehci->PSS_poll_count > 20) 168 + ehci_dbg(ehci, "PSS poll count reached %d\n", 169 + ehci->PSS_poll_count); 171 170 ehci->PSS_poll_count = 0; 172 171 173 172 /* The status is up-to-date; restart or stop the schedule as needed */
+1
drivers/usb/host/pci-quirks.c
··· 780 780 "defaulting to EHCI.\n"); 781 781 dev_warn(&xhci_pdev->dev, 782 782 "USB 3.0 devices will work at USB 2.0 speeds.\n"); 783 + usb_disable_xhci_ports(xhci_pdev); 783 784 return; 784 785 } 785 786
+3
drivers/usb/host/uhci-hub.c
··· 116 116 } 117 117 } 118 118 clear_bit(port, &uhci->resuming_ports); 119 + usb_hcd_end_port_resume(&uhci_to_hcd(uhci)->self, port); 119 120 } 120 121 121 122 /* Wait for the UHCI controller in HP's iLO2 server management chip. ··· 168 167 set_bit(port, &uhci->resuming_ports); 169 168 uhci->ports_timeout = jiffies + 170 169 msecs_to_jiffies(25); 170 + usb_hcd_start_port_resume( 171 + &uhci_to_hcd(uhci)->self, port); 171 172 172 173 /* Make sure we see the port again 173 174 * after the resuming period is over. */
+9 -4
drivers/usb/host/xhci-ring.c
··· 1698 1698 faked_port_index + 1); 1699 1699 if (slot_id && xhci->devs[slot_id]) 1700 1700 xhci_ring_device(xhci, slot_id); 1701 - if (bus_state->port_remote_wakeup && (1 << faked_port_index)) { 1701 + if (bus_state->port_remote_wakeup & (1 << faked_port_index)) { 1702 1702 bus_state->port_remote_wakeup &= 1703 1703 ~(1 << faked_port_index); 1704 1704 xhci_test_and_clear_bit(xhci, port_array, ··· 2589 2589 (trb_comp_code != COMP_STALL && 2590 2590 trb_comp_code != COMP_BABBLE)) 2591 2591 xhci_urb_free_priv(xhci, urb_priv); 2592 + else 2593 + kfree(urb_priv); 2592 2594 2593 2595 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); 2594 2596 if ((urb->actual_length != urb->transfer_buffer_length && ··· 3110 3108 * running_total. 3111 3109 */ 3112 3110 packets_transferred = (running_total + trb_buff_len) / 3113 - usb_endpoint_maxp(&urb->ep->desc); 3111 + GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc)); 3114 3112 3115 3113 if ((total_packet_count - packets_transferred) > 31) 3116 3114 return 31 << 17; ··· 3644 3642 td_len = urb->iso_frame_desc[i].length; 3645 3643 td_remain_len = td_len; 3646 3644 total_packet_count = DIV_ROUND_UP(td_len, 3647 - usb_endpoint_maxp(&urb->ep->desc)); 3645 + GET_MAX_PACKET( 3646 + usb_endpoint_maxp(&urb->ep->desc))); 3648 3647 /* A zero-length transfer still involves at least one packet. */ 3649 3648 if (total_packet_count == 0) 3650 3649 total_packet_count++; ··· 3667 3664 td = urb_priv->td[i]; 3668 3665 for (j = 0; j < trbs_per_td; j++) { 3669 3666 u32 remainder = 0; 3670 - field = TRB_TBC(burst_count) | TRB_TLBPC(residue); 3667 + field = 0; 3671 3668 3672 3669 if (first_trb) { 3670 + field = TRB_TBC(burst_count) | 3671 + TRB_TLBPC(residue); 3673 3672 /* Queue the isoc TRB */ 3674 3673 field |= TRB_TYPE(TRB_ISOC); 3675 3674 /* Assume URB_ISO_ASAP is set */
+1
drivers/usb/serial/cp210x.c
··· 60 60 { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */ 61 61 { USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */ 62 62 { USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */ 63 + { USB_DEVICE(0x0FDE, 0xCA05) }, /* OWL Wireless Electricity Monitor CM-160 */ 63 64 { USB_DEVICE(0x10A6, 0xAA26) }, /* Knock-off DCU-11 cable */ 64 65 { USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */ 65 66 { USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */
+2
drivers/usb/serial/ftdi_sio.c
··· 584 584 /* 585 585 * ELV devices: 586 586 */ 587 + { USB_DEVICE(FTDI_ELV_VID, FTDI_ELV_WS300_PID) }, 587 588 { USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) }, 588 589 { USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) }, 589 590 { USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) }, ··· 671 670 { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) }, 672 671 { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) }, 673 672 { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) }, 673 + { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) }, 674 674 { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) }, 675 675 { USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) }, 676 676 { USB_DEVICE(FTDI_VID, FTDI_MHAM_KW_PID) },
+8 -1
drivers/usb/serial/ftdi_sio_ids.h
··· 147 147 #define XSENS_CONVERTER_6_PID 0xD38E 148 148 #define XSENS_CONVERTER_7_PID 0xD38F 149 149 150 + /** 151 + * Zolix (www.zolix.com.cb) product ids 152 + */ 153 + #define FTDI_OMNI1509 0xD491 /* Omni1509 embedded USB-serial */ 154 + 150 155 /* 151 156 * NDI (www.ndigital.com) product ids 152 157 */ ··· 209 204 210 205 /* 211 206 * ELV USB devices submitted by Christian Abt of ELV (www.elv.de). 212 - * All of these devices use FTDI's vendor ID (0x0403). 207 + * Almost all of these devices use FTDI's vendor ID (0x0403). 213 208 * Further IDs taken from ELV Windows .inf file. 214 209 * 215 210 * The previously included PID for the UO 100 module was incorrect. ··· 217 212 * 218 213 * Armin Laeuger originally sent the PID for the UM 100 module. 219 214 */ 215 + #define FTDI_ELV_VID 0x1B1F /* ELV AG */ 216 + #define FTDI_ELV_WS300_PID 0xC006 /* eQ3 WS 300 PC II */ 220 217 #define FTDI_ELV_USR_PID 0xE000 /* ELV Universal-Sound-Recorder */ 221 218 #define FTDI_ELV_MSM1_PID 0xE001 /* ELV Mini-Sound-Modul */ 222 219 #define FTDI_ELV_KL100_PID 0xE002 /* ELV Kfz-Leistungsmesser KL 100 */
+13
drivers/usb/serial/option.c
··· 242 242 #define TELIT_PRODUCT_CC864_DUAL 0x1005 243 243 #define TELIT_PRODUCT_CC864_SINGLE 0x1006 244 244 #define TELIT_PRODUCT_DE910_DUAL 0x1010 245 + #define TELIT_PRODUCT_LE920 0x1200 245 246 246 247 /* ZTE PRODUCTS */ 247 248 #define ZTE_VENDOR_ID 0x19d2 ··· 454 453 #define TPLINK_VENDOR_ID 0x2357 455 454 #define TPLINK_PRODUCT_MA180 0x0201 456 455 456 + /* Changhong products */ 457 + #define CHANGHONG_VENDOR_ID 0x2077 458 + #define CHANGHONG_PRODUCT_CH690 0x7001 459 + 457 460 /* some devices interfaces need special handling due to a number of reasons */ 458 461 enum option_blacklist_reason { 459 462 OPTION_BLACKLIST_NONE = 0, ··· 537 532 538 533 static const struct option_blacklist_info zte_1255_blacklist = { 539 534 .reserved = BIT(3) | BIT(4), 535 + }; 536 + 537 + static const struct option_blacklist_info telit_le920_blacklist = { 538 + .sendsetup = BIT(0), 539 + .reserved = BIT(1) | BIT(5), 540 540 }; 541 541 542 542 static const struct usb_device_id option_ids[] = { ··· 794 784 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) }, 795 785 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) }, 796 786 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) }, 787 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), 788 + .driver_info = (kernel_ulong_t)&telit_le920_blacklist }, 797 789 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ 798 790 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), 799 791 .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, ··· 1330 1318 { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) }, 1331 1319 { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180), 1332 1320 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1321 + { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) }, 1333 1322 { } /* Terminating entry */ 1334 1323 }; 1335 1324 MODULE_DEVICE_TABLE(usb, option_ids);
+1
drivers/usb/serial/qcserial.c
··· 53 53 {DEVICE_G1K(0x05c6, 0x9221)}, /* Generic Gobi QDL device */ 54 54 {DEVICE_G1K(0x05c6, 0x9231)}, /* Generic Gobi QDL device */ 55 55 {DEVICE_G1K(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */ 56 + {DEVICE_G1K(0x1bc7, 0x900e)}, /* Telit Gobi QDL device */ 56 57 57 58 /* Gobi 2000 devices */ 58 59 {USB_DEVICE(0x1410, 0xa010)}, /* Novatel Gobi 2000 QDL device */
+74 -2
drivers/usb/storage/initializers.c
··· 92 92 return 0; 93 93 } 94 94 95 - /* This places the HUAWEI E220 devices in multi-port mode */ 96 - int usb_stor_huawei_e220_init(struct us_data *us) 95 + /* This places the HUAWEI usb dongles in multi-port mode */ 96 + static int usb_stor_huawei_feature_init(struct us_data *us) 97 97 { 98 98 int result; 99 99 ··· 103 103 0x01, 0x0, NULL, 0x0, 1000); 104 104 US_DEBUGP("Huawei mode set result is %d\n", result); 105 105 return 0; 106 + } 107 + 108 + /* 109 + * It will send a scsi switch command called rewind' to huawei dongle. 110 + * When the dongle receives this command at the first time, 111 + * it will reboot immediately. After rebooted, it will ignore this command. 112 + * So it is unnecessary to read its response. 113 + */ 114 + static int usb_stor_huawei_scsi_init(struct us_data *us) 115 + { 116 + int result = 0; 117 + int act_len = 0; 118 + struct bulk_cb_wrap *bcbw = (struct bulk_cb_wrap *) us->iobuf; 119 + char rewind_cmd[] = {0x11, 0x06, 0x20, 0x00, 0x00, 0x01, 0x01, 0x00, 120 + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 121 + 122 + bcbw->Signature = cpu_to_le32(US_BULK_CB_SIGN); 123 + bcbw->Tag = 0; 124 + bcbw->DataTransferLength = 0; 125 + bcbw->Flags = bcbw->Lun = 0; 126 + bcbw->Length = sizeof(rewind_cmd); 127 + memset(bcbw->CDB, 0, sizeof(bcbw->CDB)); 128 + memcpy(bcbw->CDB, rewind_cmd, sizeof(rewind_cmd)); 129 + 130 + result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcbw, 131 + US_BULK_CB_WRAP_LEN, &act_len); 132 + US_DEBUGP("transfer actual length=%d, result=%d\n", act_len, result); 133 + return result; 134 + } 135 + 136 + /* 137 + * It tries to find the supported Huawei USB dongles. 138 + * In Huawei, they assign the following product IDs 139 + * for all of their mobile broadband dongles, 140 + * including the new dongles in the future. 141 + * So if the product ID is not included in this list, 142 + * it means it is not Huawei's mobile broadband dongles. 143 + */ 144 + static int usb_stor_huawei_dongles_pid(struct us_data *us) 145 + { 146 + struct usb_interface_descriptor *idesc; 147 + int idProduct; 148 + 149 + idesc = &us->pusb_intf->cur_altsetting->desc; 150 + idProduct = us->pusb_dev->descriptor.idProduct; 151 + /* The first port is CDROM, 152 + * means the dongle in the single port mode, 153 + * and a switch command is required to be sent. */ 154 + if (idesc && idesc->bInterfaceNumber == 0) { 155 + if ((idProduct == 0x1001) 156 + || (idProduct == 0x1003) 157 + || (idProduct == 0x1004) 158 + || (idProduct >= 0x1401 && idProduct <= 0x1500) 159 + || (idProduct >= 0x1505 && idProduct <= 0x1600) 160 + || (idProduct >= 0x1c02 && idProduct <= 0x2202)) { 161 + return 1; 162 + } 163 + } 164 + return 0; 165 + } 166 + 167 + int usb_stor_huawei_init(struct us_data *us) 168 + { 169 + int result = 0; 170 + 171 + if (usb_stor_huawei_dongles_pid(us)) { 172 + if (us->pusb_dev->descriptor.idProduct >= 0x1446) 173 + result = usb_stor_huawei_scsi_init(us); 174 + else 175 + result = usb_stor_huawei_feature_init(us); 176 + } 177 + return result; 106 178 }
+2 -2
drivers/usb/storage/initializers.h
··· 46 46 * flash reader */ 47 47 int usb_stor_ucr61s2b_init(struct us_data *us); 48 48 49 - /* This places the HUAWEI E220 devices in multi-port mode */ 50 - int usb_stor_huawei_e220_init(struct us_data *us); 49 + /* This places the HUAWEI usb dongles in multi-port mode */ 50 + int usb_stor_huawei_init(struct us_data *us);
+2 -327
drivers/usb/storage/unusual_devs.h
··· 1527 1527 /* Reported by fangxiaozhi <huananhu@huawei.com> 1528 1528 * This brings the HUAWEI data card devices into multi-port mode 1529 1529 */ 1530 - UNUSUAL_DEV( 0x12d1, 0x1001, 0x0000, 0x0000, 1530 + UNUSUAL_VENDOR_INTF(0x12d1, 0x08, 0x06, 0x50, 1531 1531 "HUAWEI MOBILE", 1532 1532 "Mass Storage", 1533 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1534 - 0), 1535 - UNUSUAL_DEV( 0x12d1, 0x1003, 0x0000, 0x0000, 1536 - "HUAWEI MOBILE", 1537 - "Mass Storage", 1538 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1539 - 0), 1540 - UNUSUAL_DEV( 0x12d1, 0x1004, 0x0000, 0x0000, 1541 - "HUAWEI MOBILE", 1542 - "Mass Storage", 1543 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1544 - 0), 1545 - UNUSUAL_DEV( 0x12d1, 0x1401, 0x0000, 0x0000, 1546 - "HUAWEI MOBILE", 1547 - "Mass Storage", 1548 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1549 - 0), 1550 - UNUSUAL_DEV( 0x12d1, 0x1402, 0x0000, 0x0000, 1551 - "HUAWEI MOBILE", 1552 - "Mass Storage", 1553 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1554 - 0), 1555 - UNUSUAL_DEV( 0x12d1, 0x1403, 0x0000, 0x0000, 1556 - "HUAWEI MOBILE", 1557 - "Mass Storage", 1558 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1559 - 0), 1560 - UNUSUAL_DEV( 0x12d1, 0x1404, 0x0000, 0x0000, 1561 - "HUAWEI MOBILE", 1562 - "Mass Storage", 1563 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1564 - 0), 1565 - UNUSUAL_DEV( 0x12d1, 0x1405, 0x0000, 0x0000, 1566 - "HUAWEI MOBILE", 1567 - "Mass Storage", 1568 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1569 - 0), 1570 - UNUSUAL_DEV( 0x12d1, 0x1406, 0x0000, 0x0000, 1571 - "HUAWEI MOBILE", 1572 - "Mass Storage", 1573 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1574 - 0), 1575 - UNUSUAL_DEV( 0x12d1, 0x1407, 0x0000, 0x0000, 1576 - "HUAWEI MOBILE", 1577 - "Mass Storage", 1578 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1579 - 0), 1580 - UNUSUAL_DEV( 0x12d1, 0x1408, 0x0000, 0x0000, 1581 - "HUAWEI MOBILE", 1582 - "Mass Storage", 1583 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1584 - 0), 1585 - UNUSUAL_DEV( 0x12d1, 0x1409, 0x0000, 0x0000, 1586 - "HUAWEI MOBILE", 1587 - "Mass Storage", 1588 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1589 - 0), 1590 - UNUSUAL_DEV( 0x12d1, 0x140A, 0x0000, 0x0000, 1591 - "HUAWEI MOBILE", 1592 - "Mass Storage", 1593 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1594 - 0), 1595 - UNUSUAL_DEV( 0x12d1, 0x140B, 0x0000, 0x0000, 1596 - "HUAWEI MOBILE", 1597 - "Mass Storage", 1598 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1599 - 0), 1600 - UNUSUAL_DEV( 0x12d1, 0x140C, 0x0000, 0x0000, 1601 - "HUAWEI MOBILE", 1602 - "Mass Storage", 1603 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1604 - 0), 1605 - UNUSUAL_DEV( 0x12d1, 0x140D, 0x0000, 0x0000, 1606 - "HUAWEI MOBILE", 1607 - "Mass Storage", 1608 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1609 - 0), 1610 - UNUSUAL_DEV( 0x12d1, 0x140E, 0x0000, 0x0000, 1611 - "HUAWEI MOBILE", 1612 - "Mass Storage", 1613 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1614 - 0), 1615 - UNUSUAL_DEV( 0x12d1, 0x140F, 0x0000, 0x0000, 1616 - "HUAWEI MOBILE", 1617 - "Mass Storage", 1618 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1619 - 0), 1620 - UNUSUAL_DEV( 0x12d1, 0x1410, 0x0000, 0x0000, 1621 - "HUAWEI MOBILE", 1622 - "Mass Storage", 1623 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1624 - 0), 1625 - UNUSUAL_DEV( 0x12d1, 0x1411, 0x0000, 0x0000, 1626 - "HUAWEI MOBILE", 1627 - "Mass Storage", 1628 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1629 - 0), 1630 - UNUSUAL_DEV( 0x12d1, 0x1412, 0x0000, 0x0000, 1631 - "HUAWEI MOBILE", 1632 - "Mass Storage", 1633 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1634 - 0), 1635 - UNUSUAL_DEV( 0x12d1, 0x1413, 0x0000, 0x0000, 1636 - "HUAWEI MOBILE", 1637 - "Mass Storage", 1638 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1639 - 0), 1640 - UNUSUAL_DEV( 0x12d1, 0x1414, 0x0000, 0x0000, 1641 - "HUAWEI MOBILE", 1642 - "Mass Storage", 1643 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1644 - 0), 1645 - UNUSUAL_DEV( 0x12d1, 0x1415, 0x0000, 0x0000, 1646 - "HUAWEI MOBILE", 1647 - "Mass Storage", 1648 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1649 - 0), 1650 - UNUSUAL_DEV( 0x12d1, 0x1416, 0x0000, 0x0000, 1651 - "HUAWEI MOBILE", 1652 - "Mass Storage", 1653 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1654 - 0), 1655 - UNUSUAL_DEV( 0x12d1, 0x1417, 0x0000, 0x0000, 1656 - "HUAWEI MOBILE", 1657 - "Mass Storage", 1658 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1659 - 0), 1660 - UNUSUAL_DEV( 0x12d1, 0x1418, 0x0000, 0x0000, 1661 - "HUAWEI MOBILE", 1662 - "Mass Storage", 1663 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1664 - 0), 1665 - UNUSUAL_DEV( 0x12d1, 0x1419, 0x0000, 0x0000, 1666 - "HUAWEI MOBILE", 1667 - "Mass Storage", 1668 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1669 - 0), 1670 - UNUSUAL_DEV( 0x12d1, 0x141A, 0x0000, 0x0000, 1671 - "HUAWEI MOBILE", 1672 - "Mass Storage", 1673 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1674 - 0), 1675 - UNUSUAL_DEV( 0x12d1, 0x141B, 0x0000, 0x0000, 1676 - "HUAWEI MOBILE", 1677 - "Mass Storage", 1678 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1679 - 0), 1680 - UNUSUAL_DEV( 0x12d1, 0x141C, 0x0000, 0x0000, 1681 - "HUAWEI MOBILE", 1682 - "Mass Storage", 1683 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1684 - 0), 1685 - UNUSUAL_DEV( 0x12d1, 0x141D, 0x0000, 0x0000, 1686 - "HUAWEI MOBILE", 1687 - "Mass Storage", 1688 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1689 - 0), 1690 - UNUSUAL_DEV( 0x12d1, 0x141E, 0x0000, 0x0000, 1691 - "HUAWEI MOBILE", 1692 - "Mass Storage", 1693 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1694 - 0), 1695 - UNUSUAL_DEV( 0x12d1, 0x141F, 0x0000, 0x0000, 1696 - "HUAWEI MOBILE", 1697 - "Mass Storage", 1698 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1699 - 0), 1700 - UNUSUAL_DEV( 0x12d1, 0x1420, 0x0000, 0x0000, 1701 - "HUAWEI MOBILE", 1702 - "Mass Storage", 1703 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1704 - 0), 1705 - UNUSUAL_DEV( 0x12d1, 0x1421, 0x0000, 0x0000, 1706 - "HUAWEI MOBILE", 1707 - "Mass Storage", 1708 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1709 - 0), 1710 - UNUSUAL_DEV( 0x12d1, 0x1422, 0x0000, 0x0000, 1711 - "HUAWEI MOBILE", 1712 - "Mass Storage", 1713 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1714 - 0), 1715 - UNUSUAL_DEV( 0x12d1, 0x1423, 0x0000, 0x0000, 1716 - "HUAWEI MOBILE", 1717 - "Mass Storage", 1718 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1719 - 0), 1720 - UNUSUAL_DEV( 0x12d1, 0x1424, 0x0000, 0x0000, 1721 - "HUAWEI MOBILE", 1722 - "Mass Storage", 1723 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1724 - 0), 1725 - UNUSUAL_DEV( 0x12d1, 0x1425, 0x0000, 0x0000, 1726 - "HUAWEI MOBILE", 1727 - "Mass Storage", 1728 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1729 - 0), 1730 - UNUSUAL_DEV( 0x12d1, 0x1426, 0x0000, 0x0000, 1731 - "HUAWEI MOBILE", 1732 - "Mass Storage", 1733 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1734 - 0), 1735 - UNUSUAL_DEV( 0x12d1, 0x1427, 0x0000, 0x0000, 1736 - "HUAWEI MOBILE", 1737 - "Mass Storage", 1738 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1739 - 0), 1740 - UNUSUAL_DEV( 0x12d1, 0x1428, 0x0000, 0x0000, 1741 - "HUAWEI MOBILE", 1742 - "Mass Storage", 1743 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1744 - 0), 1745 - UNUSUAL_DEV( 0x12d1, 0x1429, 0x0000, 0x0000, 1746 - "HUAWEI MOBILE", 1747 - "Mass Storage", 1748 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1749 - 0), 1750 - UNUSUAL_DEV( 0x12d1, 0x142A, 0x0000, 0x0000, 1751 - "HUAWEI MOBILE", 1752 - "Mass Storage", 1753 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1754 - 0), 1755 - UNUSUAL_DEV( 0x12d1, 0x142B, 0x0000, 0x0000, 1756 - "HUAWEI MOBILE", 1757 - "Mass Storage", 1758 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1759 - 0), 1760 - UNUSUAL_DEV( 0x12d1, 0x142C, 0x0000, 0x0000, 1761 - "HUAWEI MOBILE", 1762 - "Mass Storage", 1763 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1764 - 0), 1765 - UNUSUAL_DEV( 0x12d1, 0x142D, 0x0000, 0x0000, 1766 - "HUAWEI MOBILE", 1767 - "Mass Storage", 1768 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1769 - 0), 1770 - UNUSUAL_DEV( 0x12d1, 0x142E, 0x0000, 0x0000, 1771 - "HUAWEI MOBILE", 1772 - "Mass Storage", 1773 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1774 - 0), 1775 - UNUSUAL_DEV( 0x12d1, 0x142F, 0x0000, 0x0000, 1776 - "HUAWEI MOBILE", 1777 - "Mass Storage", 1778 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1779 - 0), 1780 - UNUSUAL_DEV( 0x12d1, 0x1430, 0x0000, 0x0000, 1781 - "HUAWEI MOBILE", 1782 - "Mass Storage", 1783 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1784 - 0), 1785 - UNUSUAL_DEV( 0x12d1, 0x1431, 0x0000, 0x0000, 1786 - "HUAWEI MOBILE", 1787 - "Mass Storage", 1788 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1789 - 0), 1790 - UNUSUAL_DEV( 0x12d1, 0x1432, 0x0000, 0x0000, 1791 - "HUAWEI MOBILE", 1792 - "Mass Storage", 1793 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1794 - 0), 1795 - UNUSUAL_DEV( 0x12d1, 0x1433, 0x0000, 0x0000, 1796 - "HUAWEI MOBILE", 1797 - "Mass Storage", 1798 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1799 - 0), 1800 - UNUSUAL_DEV( 0x12d1, 0x1434, 0x0000, 0x0000, 1801 - "HUAWEI MOBILE", 1802 - "Mass Storage", 1803 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1804 - 0), 1805 - UNUSUAL_DEV( 0x12d1, 0x1435, 0x0000, 0x0000, 1806 - "HUAWEI MOBILE", 1807 - "Mass Storage", 1808 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1809 - 0), 1810 - UNUSUAL_DEV( 0x12d1, 0x1436, 0x0000, 0x0000, 1811 - "HUAWEI MOBILE", 1812 - "Mass Storage", 1813 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1814 - 0), 1815 - UNUSUAL_DEV( 0x12d1, 0x1437, 0x0000, 0x0000, 1816 - "HUAWEI MOBILE", 1817 - "Mass Storage", 1818 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1819 - 0), 1820 - UNUSUAL_DEV( 0x12d1, 0x1438, 0x0000, 0x0000, 1821 - "HUAWEI MOBILE", 1822 - "Mass Storage", 1823 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1824 - 0), 1825 - UNUSUAL_DEV( 0x12d1, 0x1439, 0x0000, 0x0000, 1826 - "HUAWEI MOBILE", 1827 - "Mass Storage", 1828 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1829 - 0), 1830 - UNUSUAL_DEV( 0x12d1, 0x143A, 0x0000, 0x0000, 1831 - "HUAWEI MOBILE", 1832 - "Mass Storage", 1833 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1834 - 0), 1835 - UNUSUAL_DEV( 0x12d1, 0x143B, 0x0000, 0x0000, 1836 - "HUAWEI MOBILE", 1837 - "Mass Storage", 1838 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1839 - 0), 1840 - UNUSUAL_DEV( 0x12d1, 0x143C, 0x0000, 0x0000, 1841 - "HUAWEI MOBILE", 1842 - "Mass Storage", 1843 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1844 - 0), 1845 - UNUSUAL_DEV( 0x12d1, 0x143D, 0x0000, 0x0000, 1846 - "HUAWEI MOBILE", 1847 - "Mass Storage", 1848 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1849 - 0), 1850 - UNUSUAL_DEV( 0x12d1, 0x143E, 0x0000, 0x0000, 1851 - "HUAWEI MOBILE", 1852 - "Mass Storage", 1853 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1854 - 0), 1855 - UNUSUAL_DEV( 0x12d1, 0x143F, 0x0000, 0x0000, 1856 - "HUAWEI MOBILE", 1857 - "Mass Storage", 1858 - USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init, 1533 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_init, 1859 1534 0), 1860 1535 1861 1536 /* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */
+12
drivers/usb/storage/usb.c
··· 120 120 .useTransport = use_transport, \ 121 121 } 122 122 123 + #define UNUSUAL_VENDOR_INTF(idVendor, cl, sc, pr, \ 124 + vendor_name, product_name, use_protocol, use_transport, \ 125 + init_function, Flags) \ 126 + { \ 127 + .vendorName = vendor_name, \ 128 + .productName = product_name, \ 129 + .useProtocol = use_protocol, \ 130 + .useTransport = use_transport, \ 131 + .initFunction = init_function, \ 132 + } 133 + 123 134 static struct us_unusual_dev us_unusual_dev_list[] = { 124 135 # include "unusual_devs.h" 125 136 { } /* Terminating entry */ ··· 142 131 #undef UNUSUAL_DEV 143 132 #undef COMPLIANT_DEV 144 133 #undef USUAL_DEV 134 + #undef UNUSUAL_VENDOR_INTF 145 135 146 136 #ifdef CONFIG_LOCKDEP 147 137
+15
drivers/usb/storage/usual-tables.c
··· 41 41 #define USUAL_DEV(useProto, useTrans) \ 42 42 { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans) } 43 43 44 + /* Define the device is matched with Vendor ID and interface descriptors */ 45 + #define UNUSUAL_VENDOR_INTF(id_vendor, cl, sc, pr, \ 46 + vendorName, productName, useProtocol, useTransport, \ 47 + initFunction, flags) \ 48 + { \ 49 + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \ 50 + | USB_DEVICE_ID_MATCH_VENDOR, \ 51 + .idVendor = (id_vendor), \ 52 + .bInterfaceClass = (cl), \ 53 + .bInterfaceSubClass = (sc), \ 54 + .bInterfaceProtocol = (pr), \ 55 + .driver_info = (flags) \ 56 + } 57 + 44 58 struct usb_device_id usb_storage_usb_ids[] = { 45 59 # include "unusual_devs.h" 46 60 { } /* Terminating entry */ ··· 64 50 #undef UNUSUAL_DEV 65 51 #undef COMPLIANT_DEV 66 52 #undef USUAL_DEV 53 + #undef UNUSUAL_VENDOR_INTF 67 54 68 55 /* 69 56 * The table of devices to ignore
+28 -13
drivers/vhost/net.c
··· 165 165 } 166 166 167 167 /* Caller must have TX VQ lock */ 168 - static void tx_poll_start(struct vhost_net *net, struct socket *sock) 168 + static int tx_poll_start(struct vhost_net *net, struct socket *sock) 169 169 { 170 + int ret; 171 + 170 172 if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED)) 171 - return; 172 - vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file); 173 - net->tx_poll_state = VHOST_NET_POLL_STARTED; 173 + return 0; 174 + ret = vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file); 175 + if (!ret) 176 + net->tx_poll_state = VHOST_NET_POLL_STARTED; 177 + return ret; 174 178 } 175 179 176 180 /* In case of DMA done not in order in lower device driver for some reason. ··· 646 642 vhost_poll_stop(n->poll + VHOST_NET_VQ_RX); 647 643 } 648 644 649 - static void vhost_net_enable_vq(struct vhost_net *n, 645 + static int vhost_net_enable_vq(struct vhost_net *n, 650 646 struct vhost_virtqueue *vq) 651 647 { 652 648 struct socket *sock; 649 + int ret; 653 650 654 651 sock = rcu_dereference_protected(vq->private_data, 655 652 lockdep_is_held(&vq->mutex)); 656 653 if (!sock) 657 - return; 654 + return 0; 658 655 if (vq == n->vqs + VHOST_NET_VQ_TX) { 659 656 n->tx_poll_state = VHOST_NET_POLL_STOPPED; 660 - tx_poll_start(n, sock); 657 + ret = tx_poll_start(n, sock); 661 658 } else 662 - vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file); 659 + ret = vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file); 660 + 661 + return ret; 663 662 } 664 663 665 664 static struct socket *vhost_net_stop_vq(struct vhost_net *n, ··· 834 827 r = PTR_ERR(ubufs); 835 828 goto err_ubufs; 836 829 } 837 - oldubufs = vq->ubufs; 838 - vq->ubufs = ubufs; 830 + 839 831 vhost_net_disable_vq(n, vq); 840 832 rcu_assign_pointer(vq->private_data, sock); 841 - vhost_net_enable_vq(n, vq); 842 - 843 833 r = vhost_init_used(vq); 844 834 if (r) 845 - goto err_vq; 835 + goto err_used; 836 + r = vhost_net_enable_vq(n, vq); 837 + if (r) 838 + goto err_used; 839 + 840 + oldubufs = vq->ubufs; 841 + vq->ubufs = ubufs; 846 842 847 843 n->tx_packets = 0; 848 844 n->tx_zcopy_err = 0; ··· 869 859 mutex_unlock(&n->dev.mutex); 870 860 return 0; 871 861 862 + err_used: 863 + rcu_assign_pointer(vq->private_data, oldsock); 864 + vhost_net_enable_vq(n, vq); 865 + if (ubufs) 866 + vhost_ubuf_put_and_wait(ubufs); 872 867 err_ubufs: 873 868 fput(sock->file); 874 869 err_vq:
+1 -3
drivers/vhost/tcm_vhost.c
··· 575 575 576 576 /* Must use ioctl VHOST_SCSI_SET_ENDPOINT */ 577 577 tv_tpg = vs->vs_tpg; 578 - if (unlikely(!tv_tpg)) { 579 - pr_err("%s endpoint not set\n", __func__); 578 + if (unlikely(!tv_tpg)) 580 579 return; 581 - } 582 580 583 581 mutex_lock(&vq->mutex); 584 582 vhost_disable_notify(&vs->dev, vq);
+15 -3
drivers/vhost/vhost.c
··· 77 77 init_poll_funcptr(&poll->table, vhost_poll_func); 78 78 poll->mask = mask; 79 79 poll->dev = dev; 80 + poll->wqh = NULL; 80 81 81 82 vhost_work_init(&poll->work, fn); 82 83 } 83 84 84 85 /* Start polling a file. We add ourselves to file's wait queue. The caller must 85 86 * keep a reference to a file until after vhost_poll_stop is called. */ 86 - void vhost_poll_start(struct vhost_poll *poll, struct file *file) 87 + int vhost_poll_start(struct vhost_poll *poll, struct file *file) 87 88 { 88 89 unsigned long mask; 90 + int ret = 0; 89 91 90 92 mask = file->f_op->poll(file, &poll->table); 91 93 if (mask) 92 94 vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask); 95 + if (mask & POLLERR) { 96 + if (poll->wqh) 97 + remove_wait_queue(poll->wqh, &poll->wait); 98 + ret = -EINVAL; 99 + } 100 + 101 + return ret; 93 102 } 94 103 95 104 /* Stop polling a file. After this function returns, it becomes safe to drop the 96 105 * file reference. You must also flush afterwards. */ 97 106 void vhost_poll_stop(struct vhost_poll *poll) 98 107 { 99 - remove_wait_queue(poll->wqh, &poll->wait); 108 + if (poll->wqh) { 109 + remove_wait_queue(poll->wqh, &poll->wait); 110 + poll->wqh = NULL; 111 + } 100 112 } 101 113 102 114 static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work, ··· 804 792 fput(filep); 805 793 806 794 if (pollstart && vq->handle_kick) 807 - vhost_poll_start(&vq->poll, vq->kick); 795 + r = vhost_poll_start(&vq->poll, vq->kick); 808 796 809 797 mutex_unlock(&vq->mutex); 810 798
+1 -1
drivers/vhost/vhost.h
··· 42 42 43 43 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, 44 44 unsigned long mask, struct vhost_dev *dev); 45 - void vhost_poll_start(struct vhost_poll *poll, struct file *file); 45 + int vhost_poll_start(struct vhost_poll *poll, struct file *file); 46 46 void vhost_poll_stop(struct vhost_poll *poll); 47 47 void vhost_poll_flush(struct vhost_poll *poll); 48 48 void vhost_poll_queue(struct vhost_poll *poll);
+12 -1
drivers/video/imxfb.c
··· 139 139 struct clk *clk_ahb; 140 140 struct clk *clk_per; 141 141 enum imxfb_type devtype; 142 + bool enabled; 142 143 143 144 /* 144 145 * These are the addresses we mapped ··· 537 536 538 537 static void imxfb_enable_controller(struct imxfb_info *fbi) 539 538 { 539 + 540 + if (fbi->enabled) 541 + return; 542 + 540 543 pr_debug("Enabling LCD controller\n"); 541 544 542 545 writel(fbi->screen_dma, fbi->regs + LCDC_SSA); ··· 561 556 clk_prepare_enable(fbi->clk_ipg); 562 557 clk_prepare_enable(fbi->clk_ahb); 563 558 clk_prepare_enable(fbi->clk_per); 559 + fbi->enabled = true; 564 560 565 561 if (fbi->backlight_power) 566 562 fbi->backlight_power(1); ··· 571 565 572 566 static void imxfb_disable_controller(struct imxfb_info *fbi) 573 567 { 568 + if (!fbi->enabled) 569 + return; 570 + 574 571 pr_debug("Disabling LCD controller\n"); 575 572 576 573 if (fbi->backlight_power) ··· 584 575 clk_disable_unprepare(fbi->clk_per); 585 576 clk_disable_unprepare(fbi->clk_ipg); 586 577 clk_disable_unprepare(fbi->clk_ahb); 578 + fbi->enabled = false; 587 579 588 580 writel(0, fbi->regs + LCDC_RMCR); 589 581 } ··· 739 729 740 730 memset(fbi, 0, sizeof(struct imxfb_info)); 741 731 732 + fbi->devtype = pdev->id_entry->driver_data; 733 + 742 734 strlcpy(info->fix.id, IMX_NAME, sizeof(info->fix.id)); 743 735 744 736 info->fix.type = FB_TYPE_PACKED_PIXELS; ··· 801 789 return -ENOMEM; 802 790 803 791 fbi = info->par; 804 - fbi->devtype = pdev->id_entry->driver_data; 805 792 806 793 if (!fb_mode) 807 794 fb_mode = pdata->mode[0].mode.name;
+2 -2
drivers/xen/events.c
··· 840 840 841 841 if (irq == -1) { 842 842 irq = xen_allocate_irq_dynamic(); 843 - if (irq == -1) 843 + if (irq < 0) 844 844 goto out; 845 845 846 846 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, ··· 944 944 945 945 if (irq == -1) { 946 946 irq = xen_allocate_irq_dynamic(); 947 - if (irq == -1) 947 + if (irq < 0) 948 948 goto out; 949 949 950 950 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
+7 -7
drivers/xen/xen-pciback/pciback_ops.c
··· 135 135 struct pci_dev *dev, struct xen_pci_op *op) 136 136 { 137 137 struct xen_pcibk_dev_data *dev_data; 138 - int otherend = pdev->xdev->otherend_id; 139 138 int status; 140 139 141 140 if (unlikely(verbose_request)) ··· 143 144 status = pci_enable_msi(dev); 144 145 145 146 if (status) { 146 - printk(KERN_ERR "error enable msi for guest %x status %x\n", 147 - otherend, status); 147 + pr_warn_ratelimited(DRV_NAME ": %s: error enabling MSI for guest %u: err %d\n", 148 + pci_name(dev), pdev->xdev->otherend_id, 149 + status); 148 150 op->value = 0; 149 151 return XEN_PCI_ERR_op_failed; 150 152 } ··· 223 223 pci_name(dev), i, 224 224 op->msix_entries[i].vector); 225 225 } 226 - } else { 227 - printk(KERN_WARNING DRV_NAME ": %s: failed to enable MSI-X: err %d!\n", 228 - pci_name(dev), result); 229 - } 226 + } else 227 + pr_warn_ratelimited(DRV_NAME ": %s: error enabling MSI-X for guest %u: err %d!\n", 228 + pci_name(dev), pdev->xdev->otherend_id, 229 + result); 230 230 kfree(entries); 231 231 232 232 op->value = result;
+14 -14
fs/btrfs/extent-tree.c
··· 3997 3997 * We make the other tasks wait for the flush only when we can flush 3998 3998 * all things. 3999 3999 */ 4000 - if (ret && flush == BTRFS_RESERVE_FLUSH_ALL) { 4000 + if (ret && flush != BTRFS_RESERVE_NO_FLUSH) { 4001 4001 flushing = true; 4002 4002 space_info->flush = 1; 4003 4003 } ··· 4534 4534 unsigned nr_extents = 0; 4535 4535 int extra_reserve = 0; 4536 4536 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL; 4537 - int ret; 4537 + int ret = 0; 4538 4538 bool delalloc_lock = true; 4539 4539 4540 4540 /* If we are a free space inode we need to not flush since we will be in ··· 4579 4579 csum_bytes = BTRFS_I(inode)->csum_bytes; 4580 4580 spin_unlock(&BTRFS_I(inode)->lock); 4581 4581 4582 - if (root->fs_info->quota_enabled) { 4582 + if (root->fs_info->quota_enabled) 4583 4583 ret = btrfs_qgroup_reserve(root, num_bytes + 4584 4584 nr_extents * root->leafsize); 4585 - if (ret) { 4586 - spin_lock(&BTRFS_I(inode)->lock); 4587 - calc_csum_metadata_size(inode, num_bytes, 0); 4588 - spin_unlock(&BTRFS_I(inode)->lock); 4589 - if (delalloc_lock) 4590 - mutex_unlock(&BTRFS_I(inode)->delalloc_mutex); 4591 - return ret; 4592 - } 4593 - } 4594 4585 4595 - ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush); 4586 + /* 4587 + * ret != 0 here means the qgroup reservation failed, we go straight to 4588 + * the shared error handling then. 4589 + */ 4590 + if (ret == 0) 4591 + ret = reserve_metadata_bytes(root, block_rsv, 4592 + to_reserve, flush); 4593 + 4596 4594 if (ret) { 4597 4595 u64 to_free = 0; 4598 4596 unsigned dropped; ··· 5558 5560 int empty_cluster = 2 * 1024 * 1024; 5559 5561 struct btrfs_space_info *space_info; 5560 5562 int loop = 0; 5561 - int index = 0; 5563 + int index = __get_raid_index(data); 5562 5564 int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ? 5563 5565 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC; 5564 5566 bool found_uncached_bg = false; ··· 6786 6788 &wc->flags[level]); 6787 6789 if (ret < 0) { 6788 6790 btrfs_tree_unlock_rw(eb, path->locks[level]); 6791 + path->locks[level] = 0; 6789 6792 return ret; 6790 6793 } 6791 6794 BUG_ON(wc->refs[level] == 0); 6792 6795 if (wc->refs[level] == 1) { 6793 6796 btrfs_tree_unlock_rw(eb, path->locks[level]); 6797 + path->locks[level] = 0; 6794 6798 return 1; 6795 6799 } 6796 6800 }
+13 -1
fs/btrfs/extent_map.c
··· 171 171 if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags)) 172 172 return 0; 173 173 174 + if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) || 175 + test_bit(EXTENT_FLAG_LOGGING, &next->flags)) 176 + return 0; 177 + 174 178 if (extent_map_end(prev) == next->start && 175 179 prev->flags == next->flags && 176 180 prev->bdev == next->bdev && ··· 259 255 if (!em) 260 256 goto out; 261 257 262 - list_move(&em->list, &tree->modified_extents); 258 + if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags)) 259 + list_move(&em->list, &tree->modified_extents); 263 260 em->generation = gen; 264 261 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 265 262 em->mod_start = em->start; ··· 283 278 write_unlock(&tree->lock); 284 279 return ret; 285 280 281 + } 282 + 283 + void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em) 284 + { 285 + clear_bit(EXTENT_FLAG_LOGGING, &em->flags); 286 + if (em->in_tree) 287 + try_merge_map(tree, em); 286 288 } 287 289 288 290 /**
+1
fs/btrfs/extent_map.h
··· 69 69 int __init extent_map_init(void); 70 70 void extent_map_exit(void); 71 71 int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len, u64 gen); 72 + void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em); 72 73 struct extent_map *search_extent_mapping(struct extent_map_tree *tree, 73 74 u64 start, u64 len); 74 75 #endif
+2 -2
fs/btrfs/file-item.c
··· 460 460 if (!contig) 461 461 offset = page_offset(bvec->bv_page) + bvec->bv_offset; 462 462 463 - if (!contig && (offset >= ordered->file_offset + ordered->len || 464 - offset < ordered->file_offset)) { 463 + if (offset >= ordered->file_offset + ordered->len || 464 + offset < ordered->file_offset) { 465 465 unsigned long bytes_left; 466 466 sums->len = this_sum_bytes; 467 467 this_sum_bytes = 0;
+27 -8
fs/btrfs/file.c
··· 293 293 struct btrfs_key key; 294 294 struct btrfs_ioctl_defrag_range_args range; 295 295 int num_defrag; 296 + int index; 297 + int ret; 296 298 297 299 /* get the inode */ 298 300 key.objectid = defrag->root; 299 301 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY); 300 302 key.offset = (u64)-1; 303 + 304 + index = srcu_read_lock(&fs_info->subvol_srcu); 305 + 301 306 inode_root = btrfs_read_fs_root_no_name(fs_info, &key); 302 307 if (IS_ERR(inode_root)) { 303 - kmem_cache_free(btrfs_inode_defrag_cachep, defrag); 304 - return PTR_ERR(inode_root); 308 + ret = PTR_ERR(inode_root); 309 + goto cleanup; 310 + } 311 + if (btrfs_root_refs(&inode_root->root_item) == 0) { 312 + ret = -ENOENT; 313 + goto cleanup; 305 314 } 306 315 307 316 key.objectid = defrag->ino; ··· 318 309 key.offset = 0; 319 310 inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL); 320 311 if (IS_ERR(inode)) { 321 - kmem_cache_free(btrfs_inode_defrag_cachep, defrag); 322 - return PTR_ERR(inode); 312 + ret = PTR_ERR(inode); 313 + goto cleanup; 323 314 } 315 + srcu_read_unlock(&fs_info->subvol_srcu, index); 324 316 325 317 /* do a chunk of defrag */ 326 318 clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags); ··· 356 346 357 347 iput(inode); 358 348 return 0; 349 + cleanup: 350 + srcu_read_unlock(&fs_info->subvol_srcu, index); 351 + kmem_cache_free(btrfs_inode_defrag_cachep, defrag); 352 + return ret; 359 353 } 360 354 361 355 /* ··· 1608 1594 if (err < 0 && num_written > 0) 1609 1595 num_written = err; 1610 1596 } 1611 - out: 1597 + 1612 1598 if (sync) 1613 1599 atomic_dec(&BTRFS_I(inode)->sync_writers); 1600 + out: 1614 1601 sb_end_write(inode->i_sb); 1615 1602 current->backing_dev_info = NULL; 1616 1603 return num_written ? num_written : err; ··· 2256 2241 if (lockend <= lockstart) 2257 2242 lockend = lockstart + root->sectorsize; 2258 2243 2244 + lockend--; 2259 2245 len = lockend - lockstart + 1; 2260 2246 2261 2247 len = max_t(u64, len, root->sectorsize); ··· 2323 2307 } 2324 2308 } 2325 2309 2326 - *offset = start; 2327 - free_extent_map(em); 2328 - break; 2310 + if (!test_bit(EXTENT_FLAG_PREALLOC, 2311 + &em->flags)) { 2312 + *offset = start; 2313 + free_extent_map(em); 2314 + break; 2315 + } 2329 2316 } 2330 2317 } 2331 2318
+12 -8
fs/btrfs/free-space-cache.c
··· 1862 1862 { 1863 1863 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 1864 1864 struct btrfs_free_space *info; 1865 - int ret = 0; 1865 + int ret; 1866 + bool re_search = false; 1866 1867 1867 1868 spin_lock(&ctl->tree_lock); 1868 1869 1869 1870 again: 1871 + ret = 0; 1870 1872 if (!bytes) 1871 1873 goto out_lock; 1872 1874 ··· 1881 1879 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 1882 1880 1, 0); 1883 1881 if (!info) { 1884 - /* the tree logging code might be calling us before we 1885 - * have fully loaded the free space rbtree for this 1886 - * block group. So it is possible the entry won't 1887 - * be in the rbtree yet at all. The caching code 1888 - * will make sure not to put it in the rbtree if 1889 - * the logging code has pinned it. 1882 + /* 1883 + * If we found a partial bit of our free space in a 1884 + * bitmap but then couldn't find the other part this may 1885 + * be a problem, so WARN about it. 1890 1886 */ 1887 + WARN_ON(re_search); 1891 1888 goto out_lock; 1892 1889 } 1893 1890 } 1894 1891 1892 + re_search = false; 1895 1893 if (!info->bitmap) { 1896 1894 unlink_free_space(ctl, info); 1897 1895 if (offset == info->offset) { ··· 1937 1935 } 1938 1936 1939 1937 ret = remove_from_bitmap(ctl, info, &offset, &bytes); 1940 - if (ret == -EAGAIN) 1938 + if (ret == -EAGAIN) { 1939 + re_search = true; 1941 1940 goto again; 1941 + } 1942 1942 BUG_ON(ret); /* logic error */ 1943 1943 out_lock: 1944 1944 spin_unlock(&ctl->tree_lock);
+102 -35
fs/btrfs/inode.c
··· 88 88 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK, 89 89 }; 90 90 91 - static int btrfs_setsize(struct inode *inode, loff_t newsize); 91 + static int btrfs_setsize(struct inode *inode, struct iattr *attr); 92 92 static int btrfs_truncate(struct inode *inode); 93 93 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent); 94 94 static noinline int cow_file_range(struct inode *inode, ··· 2478 2478 continue; 2479 2479 } 2480 2480 nr_truncate++; 2481 + 2482 + /* 1 for the orphan item deletion. */ 2483 + trans = btrfs_start_transaction(root, 1); 2484 + if (IS_ERR(trans)) { 2485 + ret = PTR_ERR(trans); 2486 + goto out; 2487 + } 2488 + ret = btrfs_orphan_add(trans, inode); 2489 + btrfs_end_transaction(trans, root); 2490 + if (ret) 2491 + goto out; 2492 + 2481 2493 ret = btrfs_truncate(inode); 2482 2494 } else { 2483 2495 nr_unlink++; ··· 3677 3665 block_end - cur_offset, 0); 3678 3666 if (IS_ERR(em)) { 3679 3667 err = PTR_ERR(em); 3668 + em = NULL; 3680 3669 break; 3681 3670 } 3682 3671 last_byte = min(extent_map_end(em), block_end); ··· 3761 3748 return err; 3762 3749 } 3763 3750 3764 - static int btrfs_setsize(struct inode *inode, loff_t newsize) 3751 + static int btrfs_setsize(struct inode *inode, struct iattr *attr) 3765 3752 { 3766 3753 struct btrfs_root *root = BTRFS_I(inode)->root; 3767 3754 struct btrfs_trans_handle *trans; 3768 3755 loff_t oldsize = i_size_read(inode); 3756 + loff_t newsize = attr->ia_size; 3757 + int mask = attr->ia_valid; 3769 3758 int ret; 3770 3759 3771 3760 if (newsize == oldsize) 3772 3761 return 0; 3762 + 3763 + /* 3764 + * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a 3765 + * special case where we need to update the times despite not having 3766 + * these flags set. For all other operations the VFS set these flags 3767 + * explicitly if it wants a timestamp update. 3768 + */ 3769 + if (newsize != oldsize && (!(mask & (ATTR_CTIME | ATTR_MTIME)))) 3770 + inode->i_ctime = inode->i_mtime = current_fs_time(inode->i_sb); 3773 3771 3774 3772 if (newsize > oldsize) { 3775 3773 truncate_pagecache(inode, oldsize, newsize); ··· 3807 3783 set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE, 3808 3784 &BTRFS_I(inode)->runtime_flags); 3809 3785 3786 + /* 3787 + * 1 for the orphan item we're going to add 3788 + * 1 for the orphan item deletion. 3789 + */ 3790 + trans = btrfs_start_transaction(root, 2); 3791 + if (IS_ERR(trans)) 3792 + return PTR_ERR(trans); 3793 + 3794 + /* 3795 + * We need to do this in case we fail at _any_ point during the 3796 + * actual truncate. Once we do the truncate_setsize we could 3797 + * invalidate pages which forces any outstanding ordered io to 3798 + * be instantly completed which will give us extents that need 3799 + * to be truncated. If we fail to get an orphan inode down we 3800 + * could have left over extents that were never meant to live, 3801 + * so we need to garuntee from this point on that everything 3802 + * will be consistent. 3803 + */ 3804 + ret = btrfs_orphan_add(trans, inode); 3805 + btrfs_end_transaction(trans, root); 3806 + if (ret) 3807 + return ret; 3808 + 3810 3809 /* we don't support swapfiles, so vmtruncate shouldn't fail */ 3811 3810 truncate_setsize(inode, newsize); 3812 3811 ret = btrfs_truncate(inode); 3812 + if (ret && inode->i_nlink) 3813 + btrfs_orphan_del(NULL, inode); 3813 3814 } 3814 3815 3815 3816 return ret; ··· 3854 3805 return err; 3855 3806 3856 3807 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 3857 - err = btrfs_setsize(inode, attr->ia_size); 3808 + err = btrfs_setsize(inode, attr); 3858 3809 if (err) 3859 3810 return err; 3860 3811 } ··· 5621 5572 return em; 5622 5573 if (em) { 5623 5574 /* 5624 - * if our em maps to a hole, there might 5625 - * actually be delalloc bytes behind it 5575 + * if our em maps to 5576 + * - a hole or 5577 + * - a pre-alloc extent, 5578 + * there might actually be delalloc bytes behind it. 5626 5579 */ 5627 - if (em->block_start != EXTENT_MAP_HOLE) 5580 + if (em->block_start != EXTENT_MAP_HOLE && 5581 + !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 5628 5582 return em; 5629 5583 else 5630 5584 hole_em = em; ··· 5709 5657 */ 5710 5658 em->block_start = hole_em->block_start; 5711 5659 em->block_len = hole_len; 5660 + if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags)) 5661 + set_bit(EXTENT_FLAG_PREALLOC, &em->flags); 5712 5662 } else { 5713 5663 em->start = range_start; 5714 5664 em->len = found; ··· 6969 6915 6970 6916 /* 6971 6917 * 1 for the truncate slack space 6972 - * 1 for the orphan item we're going to add 6973 - * 1 for the orphan item deletion 6974 6918 * 1 for updating the inode. 6975 6919 */ 6976 - trans = btrfs_start_transaction(root, 4); 6920 + trans = btrfs_start_transaction(root, 2); 6977 6921 if (IS_ERR(trans)) { 6978 6922 err = PTR_ERR(trans); 6979 6923 goto out; ··· 6981 6929 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv, 6982 6930 min_size); 6983 6931 BUG_ON(ret); 6984 - 6985 - ret = btrfs_orphan_add(trans, inode); 6986 - if (ret) { 6987 - btrfs_end_transaction(trans, root); 6988 - goto out; 6989 - } 6990 6932 6991 6933 /* 6992 6934 * setattr is responsible for setting the ordered_data_close flag, ··· 7050 7004 ret = btrfs_orphan_del(trans, inode); 7051 7005 if (ret) 7052 7006 err = ret; 7053 - } else if (ret && inode->i_nlink > 0) { 7054 - /* 7055 - * Failed to do the truncate, remove us from the in memory 7056 - * orphan list. 7057 - */ 7058 - ret = btrfs_orphan_del(NULL, inode); 7059 7007 } 7060 7008 7061 7009 if (trans) { ··· 7571 7531 */ 7572 7532 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput) 7573 7533 { 7574 - struct list_head *head = &root->fs_info->delalloc_inodes; 7575 7534 struct btrfs_inode *binode; 7576 7535 struct inode *inode; 7577 7536 struct btrfs_delalloc_work *work, *next; 7578 7537 struct list_head works; 7538 + struct list_head splice; 7579 7539 int ret = 0; 7580 7540 7581 7541 if (root->fs_info->sb->s_flags & MS_RDONLY) 7582 7542 return -EROFS; 7583 7543 7584 7544 INIT_LIST_HEAD(&works); 7585 - 7545 + INIT_LIST_HEAD(&splice); 7546 + again: 7586 7547 spin_lock(&root->fs_info->delalloc_lock); 7587 - while (!list_empty(head)) { 7588 - binode = list_entry(head->next, struct btrfs_inode, 7548 + list_splice_init(&root->fs_info->delalloc_inodes, &splice); 7549 + while (!list_empty(&splice)) { 7550 + binode = list_entry(splice.next, struct btrfs_inode, 7589 7551 delalloc_inodes); 7552 + 7553 + list_del_init(&binode->delalloc_inodes); 7554 + 7590 7555 inode = igrab(&binode->vfs_inode); 7591 7556 if (!inode) 7592 - list_del_init(&binode->delalloc_inodes); 7557 + continue; 7558 + 7559 + list_add_tail(&binode->delalloc_inodes, 7560 + &root->fs_info->delalloc_inodes); 7593 7561 spin_unlock(&root->fs_info->delalloc_lock); 7594 - if (inode) { 7595 - work = btrfs_alloc_delalloc_work(inode, 0, delay_iput); 7596 - if (!work) { 7597 - ret = -ENOMEM; 7598 - goto out; 7599 - } 7600 - list_add_tail(&work->list, &works); 7601 - btrfs_queue_worker(&root->fs_info->flush_workers, 7602 - &work->work); 7562 + 7563 + work = btrfs_alloc_delalloc_work(inode, 0, delay_iput); 7564 + if (unlikely(!work)) { 7565 + ret = -ENOMEM; 7566 + goto out; 7603 7567 } 7568 + list_add_tail(&work->list, &works); 7569 + btrfs_queue_worker(&root->fs_info->flush_workers, 7570 + &work->work); 7571 + 7604 7572 cond_resched(); 7605 7573 spin_lock(&root->fs_info->delalloc_lock); 7574 + } 7575 + spin_unlock(&root->fs_info->delalloc_lock); 7576 + 7577 + list_for_each_entry_safe(work, next, &works, list) { 7578 + list_del_init(&work->list); 7579 + btrfs_wait_and_free_delalloc_work(work); 7580 + } 7581 + 7582 + spin_lock(&root->fs_info->delalloc_lock); 7583 + if (!list_empty(&root->fs_info->delalloc_inodes)) { 7584 + spin_unlock(&root->fs_info->delalloc_lock); 7585 + goto again; 7606 7586 } 7607 7587 spin_unlock(&root->fs_info->delalloc_lock); 7608 7588 ··· 7638 7578 atomic_read(&root->fs_info->async_delalloc_pages) == 0)); 7639 7579 } 7640 7580 atomic_dec(&root->fs_info->async_submit_draining); 7581 + return 0; 7641 7582 out: 7642 7583 list_for_each_entry_safe(work, next, &works, list) { 7643 7584 list_del_init(&work->list); 7644 7585 btrfs_wait_and_free_delalloc_work(work); 7586 + } 7587 + 7588 + if (!list_empty_careful(&splice)) { 7589 + spin_lock(&root->fs_info->delalloc_lock); 7590 + list_splice_tail(&splice, &root->fs_info->delalloc_inodes); 7591 + spin_unlock(&root->fs_info->delalloc_lock); 7645 7592 } 7646 7593 return ret; 7647 7594 }
+98 -36
fs/btrfs/ioctl.c
··· 515 515 516 516 BUG_ON(ret); 517 517 518 - d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry)); 519 518 fail: 520 519 if (async_transid) { 521 520 *async_transid = trans->transid; ··· 524 525 } 525 526 if (err && !ret) 526 527 ret = err; 528 + 529 + if (!ret) 530 + d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry)); 531 + 527 532 return ret; 528 533 } 529 534 ··· 1342 1339 if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running, 1343 1340 1)) { 1344 1341 pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n"); 1345 - return -EINPROGRESS; 1342 + mnt_drop_write_file(file); 1343 + return -EINVAL; 1346 1344 } 1347 1345 1348 1346 mutex_lock(&root->fs_info->volume_mutex); ··· 1366 1362 printk(KERN_INFO "btrfs: resizing devid %llu\n", 1367 1363 (unsigned long long)devid); 1368 1364 } 1365 + 1369 1366 device = btrfs_find_device(root->fs_info, devid, NULL, NULL); 1370 1367 if (!device) { 1371 1368 printk(KERN_INFO "btrfs: resizer unable to find device %llu\n", ··· 1374 1369 ret = -EINVAL; 1375 1370 goto out_free; 1376 1371 } 1377 - if (device->fs_devices && device->fs_devices->seeding) { 1372 + 1373 + if (!device->writeable) { 1378 1374 printk(KERN_INFO "btrfs: resizer unable to apply on " 1379 - "seeding device %llu\n", 1375 + "readonly device %llu\n", 1380 1376 (unsigned long long)devid); 1381 1377 ret = -EINVAL; 1382 1378 goto out_free; ··· 1449 1443 kfree(vol_args); 1450 1444 out: 1451 1445 mutex_unlock(&root->fs_info->volume_mutex); 1452 - mnt_drop_write_file(file); 1453 1446 atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0); 1447 + mnt_drop_write_file(file); 1454 1448 return ret; 1455 1449 } 1456 1450 ··· 2101 2095 err = inode_permission(inode, MAY_WRITE | MAY_EXEC); 2102 2096 if (err) 2103 2097 goto out_dput; 2104 - 2105 - /* check if subvolume may be deleted by a non-root user */ 2106 - err = btrfs_may_delete(dir, dentry, 1); 2107 - if (err) 2108 - goto out_dput; 2109 2098 } 2099 + 2100 + /* check if subvolume may be deleted by a user */ 2101 + err = btrfs_may_delete(dir, dentry, 1); 2102 + if (err) 2103 + goto out_dput; 2110 2104 2111 2105 if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) { 2112 2106 err = -EINVAL; ··· 2189 2183 struct btrfs_ioctl_defrag_range_args *range; 2190 2184 int ret; 2191 2185 2192 - if (btrfs_root_readonly(root)) 2193 - return -EROFS; 2186 + ret = mnt_want_write_file(file); 2187 + if (ret) 2188 + return ret; 2194 2189 2195 2190 if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running, 2196 2191 1)) { 2197 2192 pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n"); 2198 - return -EINPROGRESS; 2193 + mnt_drop_write_file(file); 2194 + return -EINVAL; 2199 2195 } 2200 - ret = mnt_want_write_file(file); 2201 - if (ret) { 2202 - atomic_set(&root->fs_info->mutually_exclusive_operation_running, 2203 - 0); 2204 - return ret; 2196 + 2197 + if (btrfs_root_readonly(root)) { 2198 + ret = -EROFS; 2199 + goto out; 2205 2200 } 2206 2201 2207 2202 switch (inode->i_mode & S_IFMT) { ··· 2254 2247 ret = -EINVAL; 2255 2248 } 2256 2249 out: 2257 - mnt_drop_write_file(file); 2258 2250 atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0); 2251 + mnt_drop_write_file(file); 2259 2252 return ret; 2260 2253 } 2261 2254 ··· 2270 2263 if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running, 2271 2264 1)) { 2272 2265 pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n"); 2273 - return -EINPROGRESS; 2266 + return -EINVAL; 2274 2267 } 2275 2268 2276 2269 mutex_lock(&root->fs_info->volume_mutex); ··· 2307 2300 1)) { 2308 2301 pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n"); 2309 2302 mnt_drop_write_file(file); 2310 - return -EINPROGRESS; 2303 + return -EINVAL; 2311 2304 } 2312 2305 2313 2306 mutex_lock(&root->fs_info->volume_mutex); ··· 2323 2316 kfree(vol_args); 2324 2317 out: 2325 2318 mutex_unlock(&root->fs_info->volume_mutex); 2326 - mnt_drop_write_file(file); 2327 2319 atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0); 2320 + mnt_drop_write_file(file); 2328 2321 return ret; 2329 2322 } 2330 2323 ··· 3444 3437 struct btrfs_fs_info *fs_info = root->fs_info; 3445 3438 struct btrfs_ioctl_balance_args *bargs; 3446 3439 struct btrfs_balance_control *bctl; 3440 + bool need_unlock; /* for mut. excl. ops lock */ 3447 3441 int ret; 3448 - int need_to_clear_lock = 0; 3449 3442 3450 3443 if (!capable(CAP_SYS_ADMIN)) 3451 3444 return -EPERM; ··· 3454 3447 if (ret) 3455 3448 return ret; 3456 3449 3457 - mutex_lock(&fs_info->volume_mutex); 3450 + again: 3451 + if (!atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)) { 3452 + mutex_lock(&fs_info->volume_mutex); 3453 + mutex_lock(&fs_info->balance_mutex); 3454 + need_unlock = true; 3455 + goto locked; 3456 + } 3457 + 3458 + /* 3459 + * mut. excl. ops lock is locked. Three possibilites: 3460 + * (1) some other op is running 3461 + * (2) balance is running 3462 + * (3) balance is paused -- special case (think resume) 3463 + */ 3458 3464 mutex_lock(&fs_info->balance_mutex); 3465 + if (fs_info->balance_ctl) { 3466 + /* this is either (2) or (3) */ 3467 + if (!atomic_read(&fs_info->balance_running)) { 3468 + mutex_unlock(&fs_info->balance_mutex); 3469 + if (!mutex_trylock(&fs_info->volume_mutex)) 3470 + goto again; 3471 + mutex_lock(&fs_info->balance_mutex); 3472 + 3473 + if (fs_info->balance_ctl && 3474 + !atomic_read(&fs_info->balance_running)) { 3475 + /* this is (3) */ 3476 + need_unlock = false; 3477 + goto locked; 3478 + } 3479 + 3480 + mutex_unlock(&fs_info->balance_mutex); 3481 + mutex_unlock(&fs_info->volume_mutex); 3482 + goto again; 3483 + } else { 3484 + /* this is (2) */ 3485 + mutex_unlock(&fs_info->balance_mutex); 3486 + ret = -EINPROGRESS; 3487 + goto out; 3488 + } 3489 + } else { 3490 + /* this is (1) */ 3491 + mutex_unlock(&fs_info->balance_mutex); 3492 + pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n"); 3493 + ret = -EINVAL; 3494 + goto out; 3495 + } 3496 + 3497 + locked: 3498 + BUG_ON(!atomic_read(&fs_info->mutually_exclusive_operation_running)); 3459 3499 3460 3500 if (arg) { 3461 3501 bargs = memdup_user(arg, sizeof(*bargs)); 3462 3502 if (IS_ERR(bargs)) { 3463 3503 ret = PTR_ERR(bargs); 3464 - goto out; 3504 + goto out_unlock; 3465 3505 } 3466 3506 3467 3507 if (bargs->flags & BTRFS_BALANCE_RESUME) { ··· 3528 3474 bargs = NULL; 3529 3475 } 3530 3476 3531 - if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running, 3532 - 1)) { 3533 - pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n"); 3477 + if (fs_info->balance_ctl) { 3534 3478 ret = -EINPROGRESS; 3535 3479 goto out_bargs; 3536 3480 } 3537 - need_to_clear_lock = 1; 3538 3481 3539 3482 bctl = kzalloc(sizeof(*bctl), GFP_NOFS); 3540 3483 if (!bctl) { ··· 3552 3501 } 3553 3502 3554 3503 do_balance: 3555 - ret = btrfs_balance(bctl, bargs); 3556 3504 /* 3557 - * bctl is freed in __cancel_balance or in free_fs_info if 3558 - * restriper was paused all the way until unmount 3505 + * Ownership of bctl and mutually_exclusive_operation_running 3506 + * goes to to btrfs_balance. bctl is freed in __cancel_balance, 3507 + * or, if restriper was paused all the way until unmount, in 3508 + * free_fs_info. mutually_exclusive_operation_running is 3509 + * cleared in __cancel_balance. 3559 3510 */ 3511 + need_unlock = false; 3512 + 3513 + ret = btrfs_balance(bctl, bargs); 3514 + 3560 3515 if (arg) { 3561 3516 if (copy_to_user(arg, bargs, sizeof(*bargs))) 3562 3517 ret = -EFAULT; ··· 3570 3513 3571 3514 out_bargs: 3572 3515 kfree(bargs); 3573 - out: 3574 - if (need_to_clear_lock) 3575 - atomic_set(&root->fs_info->mutually_exclusive_operation_running, 3576 - 0); 3516 + out_unlock: 3577 3517 mutex_unlock(&fs_info->balance_mutex); 3578 3518 mutex_unlock(&fs_info->volume_mutex); 3519 + if (need_unlock) 3520 + atomic_set(&fs_info->mutually_exclusive_operation_running, 0); 3521 + out: 3579 3522 mnt_drop_write_file(file); 3580 3523 return ret; 3581 3524 } ··· 3753 3696 if (IS_ERR(sa)) { 3754 3697 ret = PTR_ERR(sa); 3755 3698 goto drop_write; 3699 + } 3700 + 3701 + if (!sa->qgroupid) { 3702 + ret = -EINVAL; 3703 + goto out; 3756 3704 } 3757 3705 3758 3706 trans = btrfs_join_transaction(root);
+10 -3
fs/btrfs/ordered-data.c
··· 836 836 * if the disk i_size is already at the inode->i_size, or 837 837 * this ordered extent is inside the disk i_size, we're done 838 838 */ 839 - if (disk_i_size == i_size || offset <= disk_i_size) { 839 + if (disk_i_size == i_size) 840 840 goto out; 841 - } 841 + 842 + /* 843 + * We still need to update disk_i_size if outstanding_isize is greater 844 + * than disk_i_size. 845 + */ 846 + if (offset <= disk_i_size && 847 + (!ordered || ordered->outstanding_isize <= disk_i_size)) 848 + goto out; 842 849 843 850 /* 844 851 * walk backward from this ordered extent to disk_i_size. ··· 877 870 break; 878 871 if (test->file_offset >= i_size) 879 872 break; 880 - if (test->file_offset >= disk_i_size) { 873 + if (entry_end(test) > disk_i_size) { 881 874 /* 882 875 * we don't update disk_i_size now, so record this 883 876 * undealt i_size. Or we will not know the real
+19 -1
fs/btrfs/qgroup.c
··· 379 379 380 380 ret = add_relation_rb(fs_info, found_key.objectid, 381 381 found_key.offset); 382 + if (ret == -ENOENT) { 383 + printk(KERN_WARNING 384 + "btrfs: orphan qgroup relation 0x%llx->0x%llx\n", 385 + (unsigned long long)found_key.objectid, 386 + (unsigned long long)found_key.offset); 387 + ret = 0; /* ignore the error */ 388 + } 382 389 if (ret) 383 390 goto out; 384 391 next2: ··· 963 956 struct btrfs_fs_info *fs_info, u64 qgroupid) 964 957 { 965 958 struct btrfs_root *quota_root; 959 + struct btrfs_qgroup *qgroup; 966 960 int ret = 0; 967 961 968 962 quota_root = fs_info->quota_root; 969 963 if (!quota_root) 970 964 return -EINVAL; 971 965 966 + /* check if there are no relations to this qgroup */ 967 + spin_lock(&fs_info->qgroup_lock); 968 + qgroup = find_qgroup_rb(fs_info, qgroupid); 969 + if (qgroup) { 970 + if (!list_empty(&qgroup->groups) || !list_empty(&qgroup->members)) { 971 + spin_unlock(&fs_info->qgroup_lock); 972 + return -EBUSY; 973 + } 974 + } 975 + spin_unlock(&fs_info->qgroup_lock); 976 + 972 977 ret = del_qgroup_item(trans, quota_root, qgroupid); 973 978 974 979 spin_lock(&fs_info->qgroup_lock); 975 980 del_qgroup_rb(quota_root->fs_info, qgroupid); 976 - 977 981 spin_unlock(&fs_info->qgroup_lock); 978 982 979 983 return ret;
+20 -5
fs/btrfs/scrub.c
··· 580 580 int corrected = 0; 581 581 struct btrfs_key key; 582 582 struct inode *inode = NULL; 583 + struct btrfs_fs_info *fs_info; 583 584 u64 end = offset + PAGE_SIZE - 1; 584 585 struct btrfs_root *local_root; 586 + int srcu_index; 585 587 586 588 key.objectid = root; 587 589 key.type = BTRFS_ROOT_ITEM_KEY; 588 590 key.offset = (u64)-1; 589 - local_root = btrfs_read_fs_root_no_name(fixup->root->fs_info, &key); 590 - if (IS_ERR(local_root)) 591 + 592 + fs_info = fixup->root->fs_info; 593 + srcu_index = srcu_read_lock(&fs_info->subvol_srcu); 594 + 595 + local_root = btrfs_read_fs_root_no_name(fs_info, &key); 596 + if (IS_ERR(local_root)) { 597 + srcu_read_unlock(&fs_info->subvol_srcu, srcu_index); 591 598 return PTR_ERR(local_root); 599 + } 592 600 593 601 key.type = BTRFS_INODE_ITEM_KEY; 594 602 key.objectid = inum; 595 603 key.offset = 0; 596 - inode = btrfs_iget(fixup->root->fs_info->sb, &key, local_root, NULL); 604 + inode = btrfs_iget(fs_info->sb, &key, local_root, NULL); 605 + srcu_read_unlock(&fs_info->subvol_srcu, srcu_index); 597 606 if (IS_ERR(inode)) 598 607 return PTR_ERR(inode); 599 608 ··· 615 606 } 616 607 617 608 if (PageUptodate(page)) { 618 - struct btrfs_fs_info *fs_info; 619 609 if (PageDirty(page)) { 620 610 /* 621 611 * we need to write the data to the defect sector. the ··· 3188 3180 u64 physical_for_dev_replace; 3189 3181 u64 len; 3190 3182 struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info; 3183 + int srcu_index; 3191 3184 3192 3185 key.objectid = root; 3193 3186 key.type = BTRFS_ROOT_ITEM_KEY; 3194 3187 key.offset = (u64)-1; 3188 + 3189 + srcu_index = srcu_read_lock(&fs_info->subvol_srcu); 3190 + 3195 3191 local_root = btrfs_read_fs_root_no_name(fs_info, &key); 3196 - if (IS_ERR(local_root)) 3192 + if (IS_ERR(local_root)) { 3193 + srcu_read_unlock(&fs_info->subvol_srcu, srcu_index); 3197 3194 return PTR_ERR(local_root); 3195 + } 3198 3196 3199 3197 key.type = BTRFS_INODE_ITEM_KEY; 3200 3198 key.objectid = inum; 3201 3199 key.offset = 0; 3202 3200 inode = btrfs_iget(fs_info->sb, &key, local_root, NULL); 3201 + srcu_read_unlock(&fs_info->subvol_srcu, srcu_index); 3203 3202 if (IS_ERR(inode)) 3204 3203 return PTR_ERR(inode); 3205 3204
+3 -1
fs/btrfs/send.c
··· 1814 1814 (unsigned long)nce->ino); 1815 1815 if (!nce_head) { 1816 1816 nce_head = kmalloc(sizeof(*nce_head), GFP_NOFS); 1817 - if (!nce_head) 1817 + if (!nce_head) { 1818 + kfree(nce); 1818 1819 return -ENOMEM; 1820 + } 1819 1821 INIT_LIST_HEAD(nce_head); 1820 1822 1821 1823 ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head);
+1 -1
fs/btrfs/super.c
··· 267 267 function, line, errstr); 268 268 return; 269 269 } 270 - trans->transaction->aborted = errno; 270 + ACCESS_ONCE(trans->transaction->aborted) = errno; 271 271 __btrfs_std_error(root->fs_info, function, line, errno, NULL); 272 272 } 273 273 /*
+37 -9
fs/btrfs/transaction.c
··· 333 333 &root->fs_info->trans_block_rsv, 334 334 num_bytes, flush); 335 335 if (ret) 336 - return ERR_PTR(ret); 336 + goto reserve_fail; 337 337 } 338 338 again: 339 339 h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); 340 - if (!h) 341 - return ERR_PTR(-ENOMEM); 340 + if (!h) { 341 + ret = -ENOMEM; 342 + goto alloc_fail; 343 + } 342 344 343 345 /* 344 346 * If we are JOIN_NOLOCK we're already committing a transaction and ··· 367 365 if (ret < 0) { 368 366 /* We must get the transaction if we are JOIN_NOLOCK. */ 369 367 BUG_ON(type == TRANS_JOIN_NOLOCK); 370 - 371 - if (type < TRANS_JOIN_NOLOCK) 372 - sb_end_intwrite(root->fs_info->sb); 373 - kmem_cache_free(btrfs_trans_handle_cachep, h); 374 - return ERR_PTR(ret); 368 + goto join_fail; 375 369 } 376 370 377 371 cur_trans = root->fs_info->running_transaction; ··· 408 410 if (!current->journal_info && type != TRANS_USERSPACE) 409 411 current->journal_info = h; 410 412 return h; 413 + 414 + join_fail: 415 + if (type < TRANS_JOIN_NOLOCK) 416 + sb_end_intwrite(root->fs_info->sb); 417 + kmem_cache_free(btrfs_trans_handle_cachep, h); 418 + alloc_fail: 419 + if (num_bytes) 420 + btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv, 421 + num_bytes); 422 + reserve_fail: 423 + if (qgroup_reserved) 424 + btrfs_qgroup_free(root, qgroup_reserved); 425 + return ERR_PTR(ret); 411 426 } 412 427 413 428 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, ··· 1479 1468 goto cleanup_transaction; 1480 1469 } 1481 1470 1482 - if (cur_trans->aborted) { 1471 + /* Stop the commit early if ->aborted is set */ 1472 + if (unlikely(ACCESS_ONCE(cur_trans->aborted))) { 1483 1473 ret = cur_trans->aborted; 1484 1474 goto cleanup_transaction; 1485 1475 } ··· 1586 1574 wait_event(cur_trans->writer_wait, 1587 1575 atomic_read(&cur_trans->num_writers) == 1); 1588 1576 1577 + /* ->aborted might be set after the previous check, so check it */ 1578 + if (unlikely(ACCESS_ONCE(cur_trans->aborted))) { 1579 + ret = cur_trans->aborted; 1580 + goto cleanup_transaction; 1581 + } 1589 1582 /* 1590 1583 * the reloc mutex makes sure that we stop 1591 1584 * the balancing code from coming in and moving ··· 1669 1652 1670 1653 ret = commit_cowonly_roots(trans, root); 1671 1654 if (ret) { 1655 + mutex_unlock(&root->fs_info->tree_log_mutex); 1656 + mutex_unlock(&root->fs_info->reloc_mutex); 1657 + goto cleanup_transaction; 1658 + } 1659 + 1660 + /* 1661 + * The tasks which save the space cache and inode cache may also 1662 + * update ->aborted, check it. 1663 + */ 1664 + if (unlikely(ACCESS_ONCE(cur_trans->aborted))) { 1665 + ret = cur_trans->aborted; 1672 1666 mutex_unlock(&root->fs_info->tree_log_mutex); 1673 1667 mutex_unlock(&root->fs_info->reloc_mutex); 1674 1668 goto cleanup_transaction;
+8 -2
fs/btrfs/tree-log.c
··· 3357 3357 if (skip_csum) 3358 3358 return 0; 3359 3359 3360 + if (em->compress_type) { 3361 + csum_offset = 0; 3362 + csum_len = block_len; 3363 + } 3364 + 3360 3365 /* block start is already adjusted for the file extent offset. */ 3361 3366 ret = btrfs_lookup_csums_range(log->fs_info->csum_root, 3362 3367 em->block_start + csum_offset, ··· 3415 3410 em = list_entry(extents.next, struct extent_map, list); 3416 3411 3417 3412 list_del_init(&em->list); 3418 - clear_bit(EXTENT_FLAG_LOGGING, &em->flags); 3419 3413 3420 3414 /* 3421 3415 * If we had an error we just need to delete everybody from our 3422 3416 * private list. 3423 3417 */ 3424 3418 if (ret) { 3419 + clear_em_logging(tree, em); 3425 3420 free_extent_map(em); 3426 3421 continue; 3427 3422 } ··· 3429 3424 write_unlock(&tree->lock); 3430 3425 3431 3426 ret = log_one_extent(trans, inode, root, em, path); 3432 - free_extent_map(em); 3433 3427 write_lock(&tree->lock); 3428 + clear_em_logging(tree, em); 3429 + free_extent_map(em); 3434 3430 } 3435 3431 WARN_ON(!list_empty(&extents)); 3436 3432 write_unlock(&tree->lock);
+19 -7
fs/btrfs/volumes.c
··· 1431 1431 } 1432 1432 } else { 1433 1433 ret = btrfs_get_bdev_and_sb(device_path, 1434 - FMODE_READ | FMODE_EXCL, 1434 + FMODE_WRITE | FMODE_EXCL, 1435 1435 root->fs_info->bdev_holder, 0, 1436 1436 &bdev, &bh); 1437 1437 if (ret) ··· 1556 1556 ret = 0; 1557 1557 1558 1558 /* Notify udev that device has changed */ 1559 - btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 1559 + if (bdev) 1560 + btrfs_kobject_uevent(bdev, KOBJ_CHANGE); 1560 1561 1561 1562 error_brelse: 1562 1563 brelse(bh); ··· 2615 2614 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 2616 2615 chunk_used = btrfs_block_group_used(&cache->item); 2617 2616 2618 - user_thresh = div_factor_fine(cache->key.offset, bargs->usage); 2617 + if (bargs->usage == 0) 2618 + user_thresh = 0; 2619 + else if (bargs->usage > 100) 2620 + user_thresh = cache->key.offset; 2621 + else 2622 + user_thresh = div_factor_fine(cache->key.offset, 2623 + bargs->usage); 2624 + 2619 2625 if (chunk_used < user_thresh) 2620 2626 ret = 0; 2621 2627 ··· 2967 2959 unset_balance_control(fs_info); 2968 2960 ret = del_balance_item(fs_info->tree_root); 2969 2961 BUG_ON(ret); 2962 + 2963 + atomic_set(&fs_info->mutually_exclusive_operation_running, 0); 2970 2964 } 2971 2965 2972 2966 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock, ··· 3148 3138 out: 3149 3139 if (bctl->flags & BTRFS_BALANCE_RESUME) 3150 3140 __cancel_balance(fs_info); 3151 - else 3141 + else { 3152 3142 kfree(bctl); 3143 + atomic_set(&fs_info->mutually_exclusive_operation_running, 0); 3144 + } 3153 3145 return ret; 3154 3146 } 3155 3147 ··· 3168 3156 ret = btrfs_balance(fs_info->balance_ctl, NULL); 3169 3157 } 3170 3158 3171 - atomic_set(&fs_info->mutually_exclusive_operation_running, 0); 3172 3159 mutex_unlock(&fs_info->balance_mutex); 3173 3160 mutex_unlock(&fs_info->volume_mutex); 3174 3161 ··· 3190 3179 return 0; 3191 3180 } 3192 3181 3193 - WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)); 3194 3182 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 3195 3183 if (IS_ERR(tsk)) 3196 3184 return PTR_ERR(tsk); ··· 3242 3232 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs); 3243 3233 btrfs_balance_sys(leaf, item, &disk_bargs); 3244 3234 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 3235 + 3236 + WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)); 3245 3237 3246 3238 mutex_lock(&fs_info->volume_mutex); 3247 3239 mutex_lock(&fs_info->balance_mutex); ··· 3508 3496 { 1, 1, 2, 2, 2, 2 /* raid1 */ }, 3509 3497 { 1, 2, 1, 1, 1, 2 /* dup */ }, 3510 3498 { 1, 1, 0, 2, 1, 1 /* raid0 */ }, 3511 - { 1, 1, 0, 1, 1, 1 /* single */ }, 3499 + { 1, 1, 1, 1, 1, 1 /* single */ }, 3512 3500 }; 3513 3501 3514 3502 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
+2
fs/cifs/cifs_dfs_ref.c
··· 226 226 compose_mount_options_err: 227 227 kfree(mountdata); 228 228 mountdata = ERR_PTR(rc); 229 + kfree(*devname); 230 + *devname = NULL; 229 231 goto compose_mount_options_out; 230 232 } 231 233
+1 -1
fs/cifs/connect.c
··· 1917 1917 } 1918 1918 case AF_INET6: { 1919 1919 struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr; 1920 - struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)&rhs; 1920 + struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs; 1921 1921 return ipv6_addr_equal(&saddr6->sin6_addr, &vaddr6->sin6_addr); 1922 1922 } 1923 1923 default:
+4 -4
fs/dlm/user.c
··· 503 503 #endif 504 504 return -EINVAL; 505 505 506 - #ifdef CONFIG_COMPAT 507 - if (count > sizeof(struct dlm_write_request32) + DLM_RESNAME_MAXLEN) 508 - #else 506 + /* 507 + * can't compare against COMPAT/dlm_write_request32 because 508 + * we don't yet know if is64bit is zero 509 + */ 509 510 if (count > sizeof(struct dlm_write_request) + DLM_RESNAME_MAXLEN) 510 - #endif 511 511 return -EINVAL; 512 512 513 513 kbuf = kzalloc(count + 1, GFP_NOFS);
+6 -1
fs/gfs2/lock_dlm.c
··· 281 281 { 282 282 struct gfs2_sbd *sdp = gl->gl_sbd; 283 283 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 284 + int lvb_needs_unlock = 0; 284 285 int error; 285 286 286 287 if (gl->gl_lksb.sb_lkid == 0) { ··· 295 294 gfs2_update_request_times(gl); 296 295 297 296 /* don't want to skip dlm_unlock writing the lvb when lock is ex */ 297 + 298 + if (gl->gl_lksb.sb_lvbptr && (gl->gl_state == LM_ST_EXCLUSIVE)) 299 + lvb_needs_unlock = 1; 300 + 298 301 if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) && 299 - gl->gl_lksb.sb_lvbptr && (gl->gl_state != LM_ST_EXCLUSIVE)) { 302 + !lvb_needs_unlock) { 300 303 gfs2_glock_free(gl); 301 304 return; 302 305 }
+20
fs/nfs/namespace.c
··· 177 177 return mnt; 178 178 } 179 179 180 + static int 181 + nfs_namespace_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) 182 + { 183 + if (NFS_FH(dentry->d_inode)->size != 0) 184 + return nfs_getattr(mnt, dentry, stat); 185 + generic_fillattr(dentry->d_inode, stat); 186 + return 0; 187 + } 188 + 189 + static int 190 + nfs_namespace_setattr(struct dentry *dentry, struct iattr *attr) 191 + { 192 + if (NFS_FH(dentry->d_inode)->size != 0) 193 + return nfs_setattr(dentry, attr); 194 + return -EACCES; 195 + } 196 + 180 197 const struct inode_operations nfs_mountpoint_inode_operations = { 181 198 .getattr = nfs_getattr, 199 + .setattr = nfs_setattr, 182 200 }; 183 201 184 202 const struct inode_operations nfs_referral_inode_operations = { 203 + .getattr = nfs_namespace_getattr, 204 + .setattr = nfs_namespace_setattr, 185 205 }; 186 206 187 207 static void nfs_expire_automounts(struct work_struct *work)
+26 -36
fs/nfs/nfs4client.c
··· 236 236 error = nfs4_discover_server_trunking(clp, &old); 237 237 if (error < 0) 238 238 goto error; 239 + nfs_put_client(clp); 239 240 if (clp != old) { 240 241 clp->cl_preserve_clid = true; 241 - nfs_put_client(clp); 242 242 clp = old; 243 - atomic_inc(&clp->cl_count); 244 243 } 245 244 246 245 return clp; ··· 305 306 .clientid = new->cl_clientid, 306 307 .confirm = new->cl_confirm, 307 308 }; 308 - int status; 309 + int status = -NFS4ERR_STALE_CLIENTID; 309 310 310 311 spin_lock(&nn->nfs_client_lock); 311 312 list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) { ··· 331 332 332 333 if (prev) 333 334 nfs_put_client(prev); 335 + prev = pos; 334 336 335 337 status = nfs4_proc_setclientid_confirm(pos, &clid, cred); 336 - if (status == 0) { 338 + switch (status) { 339 + case -NFS4ERR_STALE_CLIENTID: 340 + break; 341 + case 0: 337 342 nfs4_swap_callback_idents(pos, new); 338 343 339 - nfs_put_client(pos); 344 + prev = NULL; 340 345 *result = pos; 341 346 dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n", 342 347 __func__, pos, atomic_read(&pos->cl_count)); 343 - return 0; 344 - } 345 - if (status != -NFS4ERR_STALE_CLIENTID) { 346 - nfs_put_client(pos); 347 - dprintk("NFS: <-- %s status = %d, no result\n", 348 - __func__, status); 349 - return status; 348 + default: 349 + goto out; 350 350 } 351 351 352 352 spin_lock(&nn->nfs_client_lock); 353 - prev = pos; 354 353 } 354 + spin_unlock(&nn->nfs_client_lock); 355 355 356 - /* 357 - * No matching nfs_client found. This should be impossible, 358 - * because the new nfs_client has already been added to 359 - * nfs_client_list by nfs_get_client(). 360 - * 361 - * Don't BUG(), since the caller is holding a mutex. 362 - */ 356 + /* No match found. The server lost our clientid */ 357 + out: 363 358 if (prev) 364 359 nfs_put_client(prev); 365 - spin_unlock(&nn->nfs_client_lock); 366 - pr_err("NFS: %s Error: no matching nfs_client found\n", __func__); 367 - return -NFS4ERR_STALE_CLIENTID; 360 + dprintk("NFS: <-- %s status = %d\n", __func__, status); 361 + return status; 368 362 } 369 363 370 364 #ifdef CONFIG_NFS_V4_1 ··· 424 432 { 425 433 struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id); 426 434 struct nfs_client *pos, *n, *prev = NULL; 427 - int error; 435 + int status = -NFS4ERR_STALE_CLIENTID; 428 436 429 437 spin_lock(&nn->nfs_client_lock); 430 438 list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) { ··· 440 448 nfs_put_client(prev); 441 449 prev = pos; 442 450 443 - error = nfs_wait_client_init_complete(pos); 444 - if (error < 0) { 451 + nfs4_schedule_lease_recovery(pos); 452 + status = nfs_wait_client_init_complete(pos); 453 + if (status < 0) { 445 454 nfs_put_client(pos); 446 455 spin_lock(&nn->nfs_client_lock); 447 456 continue; 448 457 } 449 - 458 + status = pos->cl_cons_state; 450 459 spin_lock(&nn->nfs_client_lock); 460 + if (status < 0) 461 + continue; 451 462 } 452 463 453 464 if (pos->rpc_ops != new->rpc_ops) ··· 468 473 if (!nfs4_match_serverowners(pos, new)) 469 474 continue; 470 475 476 + atomic_inc(&pos->cl_count); 471 477 spin_unlock(&nn->nfs_client_lock); 472 478 dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n", 473 479 __func__, pos, atomic_read(&pos->cl_count)); ··· 477 481 return 0; 478 482 } 479 483 480 - /* 481 - * No matching nfs_client found. This should be impossible, 482 - * because the new nfs_client has already been added to 483 - * nfs_client_list by nfs_get_client(). 484 - * 485 - * Don't BUG(), since the caller is holding a mutex. 486 - */ 484 + /* No matching nfs_client found. */ 487 485 spin_unlock(&nn->nfs_client_lock); 488 - pr_err("NFS: %s Error: no matching nfs_client found\n", __func__); 489 - return -NFS4ERR_STALE_CLIENTID; 486 + dprintk("NFS: <-- %s status = %d\n", __func__, status); 487 + return status; 490 488 } 491 489 #endif /* CONFIG_NFS_V4_1 */ 492 490
+14 -8
fs/nfs/nfs4state.c
··· 136 136 clp->cl_confirm = clid.confirm; 137 137 138 138 status = nfs40_walk_client_list(clp, result, cred); 139 - switch (status) { 140 - case -NFS4ERR_STALE_CLIENTID: 141 - set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); 142 - case 0: 139 + if (status == 0) { 143 140 /* Sustain the lease, even if it's empty. If the clientid4 144 141 * goes stale it's of no use for trunking discovery. */ 145 142 nfs4_schedule_state_renewal(*result); 146 - break; 147 143 } 148 - 149 144 out: 150 145 return status; 151 146 } ··· 1858 1863 case -ETIMEDOUT: 1859 1864 case -EAGAIN: 1860 1865 ssleep(1); 1866 + case -NFS4ERR_STALE_CLIENTID: 1861 1867 dprintk("NFS: %s after status %d, retrying\n", 1862 1868 __func__, status); 1863 1869 goto again; ··· 2018 2022 nfs4_begin_drain_session(clp); 2019 2023 cred = nfs4_get_exchange_id_cred(clp); 2020 2024 status = nfs4_proc_destroy_session(clp->cl_session, cred); 2021 - if (status && status != -NFS4ERR_BADSESSION && 2022 - status != -NFS4ERR_DEADSESSION) { 2025 + switch (status) { 2026 + case 0: 2027 + case -NFS4ERR_BADSESSION: 2028 + case -NFS4ERR_DEADSESSION: 2029 + break; 2030 + case -NFS4ERR_BACK_CHAN_BUSY: 2031 + case -NFS4ERR_DELAY: 2032 + set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); 2033 + status = 0; 2034 + ssleep(1); 2035 + goto out; 2036 + default: 2023 2037 status = nfs4_recovery_handle_error(clp, status); 2024 2038 goto out; 2025 2039 }
+9 -13
fs/nfs/super.c
··· 2589 2589 struct nfs_server *server; 2590 2590 struct dentry *mntroot = ERR_PTR(-ENOMEM); 2591 2591 struct nfs_subversion *nfs_mod = NFS_SB(data->sb)->nfs_client->cl_nfs_mod; 2592 - int error; 2593 2592 2594 - dprintk("--> nfs_xdev_mount_common()\n"); 2593 + dprintk("--> nfs_xdev_mount()\n"); 2595 2594 2596 2595 mount_info.mntfh = mount_info.cloned->fh; 2597 2596 2598 2597 /* create a new volume representation */ 2599 2598 server = nfs_mod->rpc_ops->clone_server(NFS_SB(data->sb), data->fh, data->fattr, data->authflavor); 2600 - if (IS_ERR(server)) { 2601 - error = PTR_ERR(server); 2602 - goto out_err; 2603 - } 2604 2599 2605 - mntroot = nfs_fs_mount_common(server, flags, dev_name, &mount_info, nfs_mod); 2606 - dprintk("<-- nfs_xdev_mount_common() = 0\n"); 2607 - out: 2600 + if (IS_ERR(server)) 2601 + mntroot = ERR_CAST(server); 2602 + else 2603 + mntroot = nfs_fs_mount_common(server, flags, 2604 + dev_name, &mount_info, nfs_mod); 2605 + 2606 + dprintk("<-- nfs_xdev_mount() = %ld\n", 2607 + IS_ERR(mntroot) ? PTR_ERR(mntroot) : 0L); 2608 2608 return mntroot; 2609 - 2610 - out_err: 2611 - dprintk("<-- nfs_xdev_mount_common() = %d [error]\n", error); 2612 - goto out; 2613 2609 } 2614 2610 2615 2611 #if IS_ENABLED(CONFIG_NFS_V4)
+4 -1
fs/nilfs2/ioctl.c
··· 664 664 if (ret < 0) 665 665 printk(KERN_ERR "NILFS: GC failed during preparation: " 666 666 "cannot read source blocks: err=%d\n", ret); 667 - else 667 + else { 668 + if (nilfs_sb_need_update(nilfs)) 669 + set_nilfs_discontinued(nilfs); 668 670 ret = nilfs_clean_segments(inode->i_sb, argv, kbufs); 671 + } 669 672 670 673 nilfs_remove_all_gcinodes(nilfs); 671 674 clear_nilfs_gc_running(nilfs);
+1 -1
fs/xfs/xfs_aops.c
··· 86 86 } 87 87 88 88 if (ioend->io_iocb) { 89 + inode_dio_done(ioend->io_inode); 89 90 if (ioend->io_isasync) { 90 91 aio_complete(ioend->io_iocb, ioend->io_error ? 91 92 ioend->io_error : ioend->io_result, 0); 92 93 } 93 - inode_dio_done(ioend->io_inode); 94 94 } 95 95 96 96 mempool_free(ioend, xfs_ioend_pool);
+3 -3
fs/xfs/xfs_bmap.c
··· 4680 4680 return error; 4681 4681 } 4682 4682 4683 - if (bma->flags & XFS_BMAPI_STACK_SWITCH) 4684 - bma->stack_switch = 1; 4685 - 4686 4683 error = xfs_bmap_alloc(bma); 4687 4684 if (error) 4688 4685 return error; ··· 4952 4955 bma.userdata = 0; 4953 4956 bma.flist = flist; 4954 4957 bma.firstblock = firstblock; 4958 + 4959 + if (flags & XFS_BMAPI_STACK_SWITCH) 4960 + bma.stack_switch = 1; 4955 4961 4956 4962 while (bno < end && n < *nmap) { 4957 4963 inhole = eof || bma.got.br_startoff > bno;
+20
fs/xfs/xfs_buf.c
··· 487 487 struct rb_node *parent; 488 488 xfs_buf_t *bp; 489 489 xfs_daddr_t blkno = map[0].bm_bn; 490 + xfs_daddr_t eofs; 490 491 int numblks = 0; 491 492 int i; 492 493 ··· 498 497 /* Check for IOs smaller than the sector size / not sector aligned */ 499 498 ASSERT(!(numbytes < (1 << btp->bt_sshift))); 500 499 ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_smask)); 500 + 501 + /* 502 + * Corrupted block numbers can get through to here, unfortunately, so we 503 + * have to check that the buffer falls within the filesystem bounds. 504 + */ 505 + eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks); 506 + if (blkno >= eofs) { 507 + /* 508 + * XXX (dgc): we should really be returning EFSCORRUPTED here, 509 + * but none of the higher level infrastructure supports 510 + * returning a specific error on buffer lookup failures. 511 + */ 512 + xfs_alert(btp->bt_mount, 513 + "%s: Block out of range: block 0x%llx, EOFS 0x%llx ", 514 + __func__, blkno, eofs); 515 + return NULL; 516 + } 501 517 502 518 /* get tree root */ 503 519 pag = xfs_perag_get(btp->bt_mount, ··· 1505 1487 while (!list_empty(&btp->bt_lru)) { 1506 1488 bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru); 1507 1489 if (atomic_read(&bp->b_hold) > 1) { 1490 + trace_xfs_buf_wait_buftarg(bp, _RET_IP_); 1491 + list_move_tail(&bp->b_lru, &btp->bt_lru); 1508 1492 spin_unlock(&btp->bt_lru_lock); 1509 1493 delay(100); 1510 1494 goto restart;
+10 -2
fs/xfs/xfs_buf_item.c
··· 652 652 653 653 /* 654 654 * If the buf item isn't tracking any data, free it, otherwise drop the 655 - * reference we hold to it. 655 + * reference we hold to it. If we are aborting the transaction, this may 656 + * be the only reference to the buf item, so we free it anyway 657 + * regardless of whether it is dirty or not. A dirty abort implies a 658 + * shutdown, anyway. 656 659 */ 657 660 clean = 1; 658 661 for (i = 0; i < bip->bli_format_count; i++) { ··· 667 664 } 668 665 if (clean) 669 666 xfs_buf_item_relse(bp); 670 - else 667 + else if (aborted) { 668 + if (atomic_dec_and_test(&bip->bli_refcount)) { 669 + ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp)); 670 + xfs_buf_item_relse(bp); 671 + } 672 + } else 671 673 atomic_dec(&bip->bli_refcount); 672 674 673 675 if (!hold)
+2 -2
fs/xfs/xfs_dfrag.c
··· 246 246 goto out_unlock; 247 247 } 248 248 249 - error = -filemap_write_and_wait(VFS_I(ip)->i_mapping); 249 + error = -filemap_write_and_wait(VFS_I(tip)->i_mapping); 250 250 if (error) 251 251 goto out_unlock; 252 - truncate_pagecache_range(VFS_I(ip), 0, -1); 252 + truncate_pagecache_range(VFS_I(tip), 0, -1); 253 253 254 254 /* Verify O_DIRECT for ftmp */ 255 255 if (VN_CACHED(VFS_I(tip)) != 0) {
+9
fs/xfs/xfs_iomap.c
··· 351 351 } 352 352 if (shift) 353 353 alloc_blocks >>= shift; 354 + 355 + /* 356 + * If we are still trying to allocate more space than is 357 + * available, squash the prealloc hard. This can happen if we 358 + * have a large file on a small filesystem and the above 359 + * lowspace thresholds are smaller than MAXEXTLEN. 360 + */ 361 + while (alloc_blocks >= freesp) 362 + alloc_blocks >>= 4; 354 363 } 355 364 356 365 if (alloc_blocks < mp->m_writeio_blocks)
+1 -1
fs/xfs/xfs_mount.c
··· 658 658 return; 659 659 } 660 660 /* quietly fail */ 661 - xfs_buf_ioerror(bp, EFSCORRUPTED); 661 + xfs_buf_ioerror(bp, EWRONGFS); 662 662 } 663 663 664 664 static void
+1
fs/xfs/xfs_trace.h
··· 341 341 DEFINE_BUF_EVENT(xfs_buf_item_iodone); 342 342 DEFINE_BUF_EVENT(xfs_buf_item_iodone_async); 343 343 DEFINE_BUF_EVENT(xfs_buf_error_relse); 344 + DEFINE_BUF_EVENT(xfs_buf_wait_buftarg); 344 345 DEFINE_BUF_EVENT(xfs_trans_read_buf_io); 345 346 DEFINE_BUF_EVENT(xfs_trans_read_buf_shut); 346 347
+18 -6
include/linux/efi.h
··· 618 618 #endif 619 619 620 620 /* 621 - * We play games with efi_enabled so that the compiler will, if possible, remove 622 - * EFI-related code altogether. 621 + * We play games with efi_enabled so that the compiler will, if 622 + * possible, remove EFI-related code altogether. 623 623 */ 624 + #define EFI_BOOT 0 /* Were we booted from EFI? */ 625 + #define EFI_SYSTEM_TABLES 1 /* Can we use EFI system tables? */ 626 + #define EFI_CONFIG_TABLES 2 /* Can we use EFI config tables? */ 627 + #define EFI_RUNTIME_SERVICES 3 /* Can we use runtime services? */ 628 + #define EFI_MEMMAP 4 /* Can we use EFI memory map? */ 629 + #define EFI_64BIT 5 /* Is the firmware 64-bit? */ 630 + 624 631 #ifdef CONFIG_EFI 625 632 # ifdef CONFIG_X86 626 - extern int efi_enabled; 627 - extern bool efi_64bit; 633 + extern int efi_enabled(int facility); 628 634 # else 629 - # define efi_enabled 1 635 + static inline int efi_enabled(int facility) 636 + { 637 + return 1; 638 + } 630 639 # endif 631 640 #else 632 - # define efi_enabled 0 641 + static inline int efi_enabled(int facility) 642 + { 643 + return 0; 644 + } 633 645 #endif 634 646 635 647 /*
+25
include/linux/llist.h
··· 125 125 (pos) = llist_entry((pos)->member.next, typeof(*(pos)), member)) 126 126 127 127 /** 128 + * llist_for_each_entry_safe - iterate safely against remove over some entries 129 + * of lock-less list of given type. 130 + * @pos: the type * to use as a loop cursor. 131 + * @n: another type * to use as a temporary storage. 132 + * @node: the fist entry of deleted list entries. 133 + * @member: the name of the llist_node with the struct. 134 + * 135 + * In general, some entries of the lock-less list can be traversed 136 + * safely only after being removed from list, so start with an entry 137 + * instead of list head. This variant allows removal of entries 138 + * as we iterate. 139 + * 140 + * If being used on entries deleted from lock-less list directly, the 141 + * traverse order is from the newest to the oldest added entry. If 142 + * you want to traverse from the oldest to the newest, you must 143 + * reverse the order by yourself before traversing. 144 + */ 145 + #define llist_for_each_entry_safe(pos, n, node, member) \ 146 + for ((pos) = llist_entry((node), typeof(*(pos)), member), \ 147 + (n) = (pos)->member.next; \ 148 + &(pos)->member != NULL; \ 149 + (pos) = llist_entry(n, typeof(*(pos)), member), \ 150 + (n) = (&(pos)->member != NULL) ? (pos)->member.next : NULL) 151 + 152 + /** 128 153 * llist_empty - tests whether a lock-less list is empty 129 154 * @head: the list to test 130 155 *
+1 -1
include/linux/memcontrol.h
··· 429 429 * the slab_mutex must be held when looping through those caches 430 430 */ 431 431 #define for_each_memcg_cache_index(_idx) \ 432 - for ((_idx) = 0; i < memcg_limited_groups_array_size; (_idx)++) 432 + for ((_idx) = 0; (_idx) < memcg_limited_groups_array_size; (_idx)++) 433 433 434 434 static inline bool memcg_kmem_enabled(void) 435 435 {
-2
include/linux/mfd/abx500.h
··· 272 272 const struct abx500_fg_parameters *fg_params; 273 273 }; 274 274 275 - extern struct abx500_bm_data ab8500_bm_data; 276 - 277 275 enum { 278 276 NTC_EXTERNAL = 0, 279 277 NTC_INTERNAL,
+4 -25
include/linux/mfd/abx500/ab8500-bm.h
··· 422 422 struct ab8500_btemp; 423 423 struct ab8500_gpadc; 424 424 struct ab8500_fg; 425 + 425 426 #ifdef CONFIG_AB8500_BM 427 + extern struct abx500_bm_data ab8500_bm_data; 428 + 426 429 void ab8500_fg_reinit(void); 427 430 void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA); 428 431 struct ab8500_btemp *ab8500_btemp_get(void); ··· 437 434 int ab8500_fg_inst_curr_done(struct ab8500_fg *di); 438 435 439 436 #else 440 - int ab8500_fg_inst_curr_done(struct ab8500_fg *di) 441 - { 442 - } 443 - static void ab8500_fg_reinit(void) 444 - { 445 - } 446 - static void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA) 447 - { 448 - } 449 - static struct ab8500_btemp *ab8500_btemp_get(void) 450 - { 451 - return NULL; 452 - } 453 - static int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp) 454 - { 455 - return 0; 456 - } 457 - struct ab8500_fg *ab8500_fg_get(void) 458 - { 459 - return NULL; 460 - } 461 - static int ab8500_fg_inst_curr_blocking(struct ab8500_fg *dev) 462 - { 463 - return -ENODEV; 464 - } 437 + static struct abx500_bm_data ab8500_bm_data; 465 438 466 439 static inline int ab8500_fg_inst_curr_start(struct ab8500_fg *di) 467 440 {
+62 -4
include/linux/mfd/da9052/da9052.h
··· 99 99 u8 chip_id; 100 100 101 101 int chip_irq; 102 + 103 + /* SOC I/O transfer related fixes for DA9052/53 */ 104 + int (*fix_io) (struct da9052 *da9052, unsigned char reg); 102 105 }; 103 106 104 107 /* ADC API */ ··· 116 113 ret = regmap_read(da9052->regmap, reg, &val); 117 114 if (ret < 0) 118 115 return ret; 116 + 117 + if (da9052->fix_io) { 118 + ret = da9052->fix_io(da9052, reg); 119 + if (ret < 0) 120 + return ret; 121 + } 122 + 119 123 return val; 120 124 } 121 125 122 126 static inline int da9052_reg_write(struct da9052 *da9052, unsigned char reg, 123 127 unsigned char val) 124 128 { 125 - return regmap_write(da9052->regmap, reg, val); 129 + int ret; 130 + 131 + ret = regmap_write(da9052->regmap, reg, val); 132 + if (ret < 0) 133 + return ret; 134 + 135 + if (da9052->fix_io) { 136 + ret = da9052->fix_io(da9052, reg); 137 + if (ret < 0) 138 + return ret; 139 + } 140 + 141 + return ret; 126 142 } 127 143 128 144 static inline int da9052_group_read(struct da9052 *da9052, unsigned char reg, 129 145 unsigned reg_cnt, unsigned char *val) 130 146 { 131 - return regmap_bulk_read(da9052->regmap, reg, val, reg_cnt); 147 + int ret; 148 + 149 + ret = regmap_bulk_read(da9052->regmap, reg, val, reg_cnt); 150 + if (ret < 0) 151 + return ret; 152 + 153 + if (da9052->fix_io) { 154 + ret = da9052->fix_io(da9052, reg); 155 + if (ret < 0) 156 + return ret; 157 + } 158 + 159 + return ret; 132 160 } 133 161 134 162 static inline int da9052_group_write(struct da9052 *da9052, unsigned char reg, 135 163 unsigned reg_cnt, unsigned char *val) 136 164 { 137 - return regmap_raw_write(da9052->regmap, reg, val, reg_cnt); 165 + int ret; 166 + 167 + ret = regmap_raw_write(da9052->regmap, reg, val, reg_cnt); 168 + if (ret < 0) 169 + return ret; 170 + 171 + if (da9052->fix_io) { 172 + ret = da9052->fix_io(da9052, reg); 173 + if (ret < 0) 174 + return ret; 175 + } 176 + 177 + return ret; 138 178 } 139 179 140 180 static inline int da9052_reg_update(struct da9052 *da9052, unsigned char reg, 141 181 unsigned char bit_mask, 142 182 unsigned char reg_val) 143 183 { 144 - return regmap_update_bits(da9052->regmap, reg, bit_mask, reg_val); 184 + int ret; 185 + 186 + ret = regmap_update_bits(da9052->regmap, reg, bit_mask, reg_val); 187 + if (ret < 0) 188 + return ret; 189 + 190 + if (da9052->fix_io) { 191 + ret = da9052->fix_io(da9052, reg); 192 + if (ret < 0) 193 + return ret; 194 + } 195 + 196 + return ret; 145 197 } 146 198 147 199 int da9052_device_init(struct da9052 *da9052, u8 chip_id);
+3
include/linux/mfd/da9052/reg.h
··· 34 34 #define DA9052_STATUS_C_REG 3 35 35 #define DA9052_STATUS_D_REG 4 36 36 37 + /* PARK REGISTER */ 38 + #define DA9052_PARK_REGISTER DA9052_STATUS_D_REG 39 + 37 40 /* EVENT REGISTERS */ 38 41 #define DA9052_EVENT_A_REG 5 39 42 #define DA9052_EVENT_B_REG 6
+3
include/linux/mfd/rtsx_common.h
··· 38 38 #define RTSX_SD_CARD 0 39 39 #define RTSX_MS_CARD 1 40 40 41 + #define CLK_TO_DIV_N 0 42 + #define DIV_N_TO_CLK 1 43 + 41 44 struct platform_device; 42 45 43 46 struct rtsx_slot {
+21 -4
include/linux/mfd/rtsx_pci.h
··· 158 158 #define SG_TRANS_DATA (0x02 << 4) 159 159 #define SG_LINK_DESC (0x03 << 4) 160 160 161 - /* SD bank voltage */ 162 - #define SD_IO_3V3 0 163 - #define SD_IO_1V8 1 164 - 161 + /* Output voltage */ 162 + #define OUTPUT_3V3 0 163 + #define OUTPUT_1V8 1 165 164 166 165 /* Card Clock Enable Register */ 167 166 #define SD_CLK_EN 0x04 ··· 200 201 #define CHANGE_CLK 0x01 201 202 202 203 /* LDO_CTL */ 204 + #define BPP_ASIC_1V7 0x00 205 + #define BPP_ASIC_1V8 0x01 206 + #define BPP_ASIC_1V9 0x02 207 + #define BPP_ASIC_2V0 0x03 208 + #define BPP_ASIC_2V7 0x04 209 + #define BPP_ASIC_2V8 0x05 210 + #define BPP_ASIC_3V2 0x06 211 + #define BPP_ASIC_3V3 0x07 212 + #define BPP_REG_TUNED18 0x07 213 + #define BPP_TUNED18_SHIFT_8402 5 214 + #define BPP_TUNED18_SHIFT_8411 4 215 + #define BPP_PAD_MASK 0x04 216 + #define BPP_PAD_3V3 0x04 217 + #define BPP_PAD_1V8 0x00 203 218 #define BPP_LDO_POWB 0x03 204 219 #define BPP_LDO_ON 0x00 205 220 #define BPP_LDO_SUSPEND 0x02 ··· 701 688 int (*disable_auto_blink)(struct rtsx_pcr *pcr); 702 689 int (*card_power_on)(struct rtsx_pcr *pcr, int card); 703 690 int (*card_power_off)(struct rtsx_pcr *pcr, int card); 691 + int (*switch_output_voltage)(struct rtsx_pcr *pcr, 692 + u8 voltage); 704 693 unsigned int (*cd_deglitch)(struct rtsx_pcr *pcr); 694 + int (*conv_clk_and_div_n)(int clk, int dir); 705 695 }; 706 696 707 697 enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN}; ··· 799 783 u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk); 800 784 int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card); 801 785 int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card); 786 + int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage); 802 787 unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr); 803 788 void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr); 804 789
+1 -1
include/linux/mmu_notifier.h
··· 151 151 * Therefore notifier chains can only be traversed when either 152 152 * 153 153 * 1. mmap_sem is held. 154 - * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->mutex). 154 + * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->rwsem). 155 155 * 3. No other concurrent thread can access the list (release) 156 156 */ 157 157 struct mmu_notifier {
+46 -13
include/linux/security.h
··· 989 989 * tells the LSM to decrement the number of secmark labeling rules loaded 990 990 * @req_classify_flow: 991 991 * Sets the flow's sid to the openreq sid. 992 + * @tun_dev_alloc_security: 993 + * This hook allows a module to allocate a security structure for a TUN 994 + * device. 995 + * @security pointer to a security structure pointer. 996 + * Returns a zero on success, negative values on failure. 997 + * @tun_dev_free_security: 998 + * This hook allows a module to free the security structure for a TUN 999 + * device. 1000 + * @security pointer to the TUN device's security structure 992 1001 * @tun_dev_create: 993 1002 * Check permissions prior to creating a new TUN device. 994 - * @tun_dev_post_create: 995 - * This hook allows a module to update or allocate a per-socket security 996 - * structure. 997 - * @sk contains the newly created sock structure. 1003 + * @tun_dev_attach_queue: 1004 + * Check permissions prior to attaching to a TUN device queue. 1005 + * @security pointer to the TUN device's security structure. 998 1006 * @tun_dev_attach: 999 - * Check permissions prior to attaching to a persistent TUN device. This 1000 - * hook can also be used by the module to update any security state 1007 + * This hook can be used by the module to update any security state 1001 1008 * associated with the TUN device's sock structure. 1002 1009 * @sk contains the existing sock structure. 1010 + * @security pointer to the TUN device's security structure. 1011 + * @tun_dev_open: 1012 + * This hook can be used by the module to update any security state 1013 + * associated with the TUN device's security structure. 1014 + * @security pointer to the TUN devices's security structure. 1003 1015 * 1004 1016 * Security hooks for XFRM operations. 1005 1017 * ··· 1632 1620 void (*secmark_refcount_inc) (void); 1633 1621 void (*secmark_refcount_dec) (void); 1634 1622 void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl); 1635 - int (*tun_dev_create)(void); 1636 - void (*tun_dev_post_create)(struct sock *sk); 1637 - int (*tun_dev_attach)(struct sock *sk); 1623 + int (*tun_dev_alloc_security) (void **security); 1624 + void (*tun_dev_free_security) (void *security); 1625 + int (*tun_dev_create) (void); 1626 + int (*tun_dev_attach_queue) (void *security); 1627 + int (*tun_dev_attach) (struct sock *sk, void *security); 1628 + int (*tun_dev_open) (void *security); 1638 1629 #endif /* CONFIG_SECURITY_NETWORK */ 1639 1630 1640 1631 #ifdef CONFIG_SECURITY_NETWORK_XFRM ··· 2581 2566 int security_secmark_relabel_packet(u32 secid); 2582 2567 void security_secmark_refcount_inc(void); 2583 2568 void security_secmark_refcount_dec(void); 2569 + int security_tun_dev_alloc_security(void **security); 2570 + void security_tun_dev_free_security(void *security); 2584 2571 int security_tun_dev_create(void); 2585 - void security_tun_dev_post_create(struct sock *sk); 2586 - int security_tun_dev_attach(struct sock *sk); 2572 + int security_tun_dev_attach_queue(void *security); 2573 + int security_tun_dev_attach(struct sock *sk, void *security); 2574 + int security_tun_dev_open(void *security); 2587 2575 2588 2576 #else /* CONFIG_SECURITY_NETWORK */ 2589 2577 static inline int security_unix_stream_connect(struct sock *sock, ··· 2751 2733 { 2752 2734 } 2753 2735 2736 + static inline int security_tun_dev_alloc_security(void **security) 2737 + { 2738 + return 0; 2739 + } 2740 + 2741 + static inline void security_tun_dev_free_security(void *security) 2742 + { 2743 + } 2744 + 2754 2745 static inline int security_tun_dev_create(void) 2755 2746 { 2756 2747 return 0; 2757 2748 } 2758 2749 2759 - static inline void security_tun_dev_post_create(struct sock *sk) 2750 + static inline int security_tun_dev_attach_queue(void *security) 2760 2751 { 2752 + return 0; 2761 2753 } 2762 2754 2763 - static inline int security_tun_dev_attach(struct sock *sk) 2755 + static inline int security_tun_dev_attach(struct sock *sk, void *security) 2756 + { 2757 + return 0; 2758 + } 2759 + 2760 + static inline int security_tun_dev_open(void *security) 2764 2761 { 2765 2762 return 0; 2766 2763 }
+2
include/linux/usb.h
··· 357 357 int bandwidth_int_reqs; /* number of Interrupt requests */ 358 358 int bandwidth_isoc_reqs; /* number of Isoc. requests */ 359 359 360 + unsigned resuming_ports; /* bit array: resuming root-hub ports */ 361 + 360 362 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE) 361 363 struct mon_bus *mon_bus; /* non-null when associated */ 362 364 int monitored; /* non-zero when monitored */
+3
include/linux/usb/hcd.h
··· 430 430 extern void usb_wakeup_notification(struct usb_device *hdev, 431 431 unsigned int portnum); 432 432 433 + extern void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum); 434 + extern void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum); 435 + 433 436 /* The D0/D1 toggle bits ... USE WITH CAUTION (they're almost hcd-internal) */ 434 437 #define usb_gettoggle(dev, ep, out) (((dev)->toggle[out] >> (ep)) & 1) 435 438 #define usb_dotoggle(dev, ep, out) ((dev)->toggle[out] ^= (1 << (ep)))
+3
include/linux/usb/usbnet.h
··· 33 33 wait_queue_head_t *wait; 34 34 struct mutex phy_mutex; 35 35 unsigned char suspend_count; 36 + unsigned char pkt_cnt, pkt_err; 36 37 37 38 /* i/o info: pipes etc */ 38 39 unsigned in, out; ··· 71 70 # define EVENT_DEV_OPEN 7 72 71 # define EVENT_DEVICE_REPORT_IDLE 8 73 72 # define EVENT_NO_RUNTIME_PM 9 73 + # define EVENT_RX_KILL 10 74 74 }; 75 75 76 76 static inline struct usb_driver *driver_of(struct usb_interface *intf) ··· 109 107 */ 110 108 #define FLAG_MULTI_PACKET 0x2000 111 109 #define FLAG_RX_ASSEMBLE 0x4000 /* rx packets may span >1 frames */ 110 + #define FLAG_NOARP 0x8000 /* device can't do ARP */ 112 111 113 112 /* init device ... can sleep, or cause probe() failure */ 114 113 int (*bind)(struct usbnet *, struct usb_interface *);
+2
include/net/ip.h
··· 143 143 extern int ip4_datagram_connect(struct sock *sk, 144 144 struct sockaddr *uaddr, int addr_len); 145 145 146 + extern void ip4_datagram_release_cb(struct sock *sk); 147 + 146 148 struct ip_reply_arg { 147 149 struct kvec iov[1]; 148 150 int flags;
+2
include/net/netfilter/nf_conntrack_core.h
··· 31 31 extern int nf_conntrack_proto_init(struct net *net); 32 32 extern void nf_conntrack_proto_fini(struct net *net); 33 33 34 + extern void nf_conntrack_cleanup_end(void); 35 + 34 36 extern bool 35 37 nf_ct_get_tuple(const struct sk_buff *skb, 36 38 unsigned int nhoff,
+10 -10
include/net/transp_v6.h
··· 34 34 struct sockaddr *uaddr, 35 35 int addr_len); 36 36 37 - extern int datagram_recv_ctl(struct sock *sk, 38 - struct msghdr *msg, 39 - struct sk_buff *skb); 37 + extern int ip6_datagram_recv_ctl(struct sock *sk, 38 + struct msghdr *msg, 39 + struct sk_buff *skb); 40 40 41 - extern int datagram_send_ctl(struct net *net, 42 - struct sock *sk, 43 - struct msghdr *msg, 44 - struct flowi6 *fl6, 45 - struct ipv6_txoptions *opt, 46 - int *hlimit, int *tclass, 47 - int *dontfrag); 41 + extern int ip6_datagram_send_ctl(struct net *net, 42 + struct sock *sk, 43 + struct msghdr *msg, 44 + struct flowi6 *fl6, 45 + struct ipv6_txoptions *opt, 46 + int *hlimit, int *tclass, 47 + int *dontfrag); 48 48 49 49 #define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006) 50 50
+6
include/uapi/linux/usb/ch9.h
··· 152 152 #define USB_INTRF_FUNC_SUSPEND_LP (1 << (8 + 0)) 153 153 #define USB_INTRF_FUNC_SUSPEND_RW (1 << (8 + 1)) 154 154 155 + /* 156 + * Interface status, Figure 9-5 USB 3.0 spec 157 + */ 158 + #define USB_INTRF_STAT_FUNC_RW_CAP 1 159 + #define USB_INTRF_STAT_FUNC_RW 2 160 + 155 161 #define USB_ENDPOINT_HALT 0 /* IN/OUT will STALL */ 156 162 157 163 /* Bit array elements as returned by the USB_REQ_GET_STATUS request. */
+2 -2
init/main.c
··· 604 604 pidmap_init(); 605 605 anon_vma_init(); 606 606 #ifdef CONFIG_X86 607 - if (efi_enabled) 607 + if (efi_enabled(EFI_RUNTIME_SERVICES)) 608 608 efi_enter_virtual_mode(); 609 609 #endif 610 610 thread_info_cache_init(); ··· 632 632 acpi_early_init(); /* before LAPIC and SMP init */ 633 633 sfi_init_late(); 634 634 635 - if (efi_enabled) { 635 + if (efi_enabled(EFI_RUNTIME_SERVICES)) { 636 636 efi_late_init(); 637 637 efi_free_boot_services(); 638 638 }
+18 -2
kernel/events/core.c
··· 908 908 } 909 909 910 910 /* 911 + * Initialize event state based on the perf_event_attr::disabled. 912 + */ 913 + static inline void perf_event__state_init(struct perf_event *event) 914 + { 915 + event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : 916 + PERF_EVENT_STATE_INACTIVE; 917 + } 918 + 919 + /* 911 920 * Called at perf_event creation and when events are attached/detached from a 912 921 * group. 913 922 */ ··· 6188 6179 event->overflow_handler = overflow_handler; 6189 6180 event->overflow_handler_context = context; 6190 6181 6191 - if (attr->disabled) 6192 - event->state = PERF_EVENT_STATE_OFF; 6182 + perf_event__state_init(event); 6193 6183 6194 6184 pmu = NULL; 6195 6185 ··· 6617 6609 6618 6610 mutex_lock(&gctx->mutex); 6619 6611 perf_remove_from_context(group_leader); 6612 + 6613 + /* 6614 + * Removing from the context ends up with disabled 6615 + * event. What we want here is event in the initial 6616 + * startup state, ready to be add into new context. 6617 + */ 6618 + perf_event__state_init(group_leader); 6620 6619 list_for_each_entry(sibling, &group_leader->sibling_list, 6621 6620 group_entry) { 6622 6621 perf_remove_from_context(sibling); 6622 + perf_event__state_init(sibling); 6623 6623 put_ctx(gctx); 6624 6624 } 6625 6625 mutex_unlock(&gctx->mutex);
-9
kernel/printk.c
··· 87 87 struct console *console_drivers; 88 88 EXPORT_SYMBOL_GPL(console_drivers); 89 89 90 - #ifdef CONFIG_LOCKDEP 91 - static struct lockdep_map console_lock_dep_map = { 92 - .name = "console_lock" 93 - }; 94 - #endif 95 - 96 90 /* 97 91 * This is used for debugging the mess that is the VT code by 98 92 * keeping track if we have the console semaphore held. It's ··· 1918 1924 return; 1919 1925 console_locked = 1; 1920 1926 console_may_schedule = 1; 1921 - mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_); 1922 1927 } 1923 1928 EXPORT_SYMBOL(console_lock); 1924 1929 ··· 1939 1946 } 1940 1947 console_locked = 1; 1941 1948 console_may_schedule = 0; 1942 - mutex_acquire(&console_lock_dep_map, 0, 1, _RET_IP_); 1943 1949 return 1; 1944 1950 } 1945 1951 EXPORT_SYMBOL(console_trylock); ··· 2099 2107 local_irq_restore(flags); 2100 2108 } 2101 2109 console_locked = 0; 2102 - mutex_release(&console_lock_dep_map, 1, _RET_IP_); 2103 2110 2104 2111 /* Release the exclusive_console once it is used */ 2105 2112 if (unlikely(exclusive_console))
+10 -3
kernel/rcutree_plugin.h
··· 40 40 #ifdef CONFIG_RCU_NOCB_CPU 41 41 static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ 42 42 static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */ 43 - static bool rcu_nocb_poll; /* Offload kthread are to poll. */ 44 - module_param(rcu_nocb_poll, bool, 0444); 43 + static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */ 45 44 static char __initdata nocb_buf[NR_CPUS * 5]; 46 45 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 47 46 ··· 2158 2159 } 2159 2160 __setup("rcu_nocbs=", rcu_nocb_setup); 2160 2161 2162 + static int __init parse_rcu_nocb_poll(char *arg) 2163 + { 2164 + rcu_nocb_poll = 1; 2165 + return 0; 2166 + } 2167 + early_param("rcu_nocb_poll", parse_rcu_nocb_poll); 2168 + 2161 2169 /* Is the specified CPU a no-CPUs CPU? */ 2162 2170 static bool is_nocb_cpu(int cpu) 2163 2171 { ··· 2372 2366 for (;;) { 2373 2367 /* If not polling, wait for next batch of callbacks. */ 2374 2368 if (!rcu_nocb_poll) 2375 - wait_event(rdp->nocb_wq, rdp->nocb_head); 2369 + wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head); 2376 2370 list = ACCESS_ONCE(rdp->nocb_head); 2377 2371 if (!list) { 2378 2372 schedule_timeout_interruptible(1); 2373 + flush_signals(current); 2379 2374 continue; 2380 2375 } 2381 2376
+2 -2
kernel/sched/debug.c
··· 222 222 cfs_rq->runnable_load_avg); 223 223 SEQ_printf(m, " .%-30s: %lld\n", "blocked_load_avg", 224 224 cfs_rq->blocked_load_avg); 225 - SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg", 226 - atomic64_read(&cfs_rq->tg->load_avg)); 225 + SEQ_printf(m, " .%-30s: %lld\n", "tg_load_avg", 226 + (unsigned long long)atomic64_read(&cfs_rq->tg->load_avg)); 227 227 SEQ_printf(m, " .%-30s: %lld\n", "tg_load_contrib", 228 228 cfs_rq->tg_load_contrib); 229 229 SEQ_printf(m, " .%-30s: %d\n", "tg_runnable_contrib",
+1 -1
kernel/sched/fair.c
··· 2663 2663 hrtimer_cancel(&cfs_b->slack_timer); 2664 2664 } 2665 2665 2666 - static void unthrottle_offline_cfs_rqs(struct rq *rq) 2666 + static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq) 2667 2667 { 2668 2668 struct cfs_rq *cfs_rq; 2669 2669
+1 -1
kernel/sched/rt.c
··· 566 566 static int do_balance_runtime(struct rt_rq *rt_rq) 567 567 { 568 568 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 569 - struct root_domain *rd = cpu_rq(smp_processor_id())->rd; 569 + struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd; 570 570 int i, weight, more = 0; 571 571 u64 rt_period; 572 572
+12 -1
kernel/smp.c
··· 33 33 struct call_single_data csd; 34 34 atomic_t refs; 35 35 cpumask_var_t cpumask; 36 + cpumask_var_t cpumask_ipi; 36 37 }; 37 38 38 39 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data); ··· 57 56 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, 58 57 cpu_to_node(cpu))) 59 58 return notifier_from_errno(-ENOMEM); 59 + if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL, 60 + cpu_to_node(cpu))) 61 + return notifier_from_errno(-ENOMEM); 60 62 break; 61 63 62 64 #ifdef CONFIG_HOTPLUG_CPU ··· 69 65 case CPU_DEAD: 70 66 case CPU_DEAD_FROZEN: 71 67 free_cpumask_var(cfd->cpumask); 68 + free_cpumask_var(cfd->cpumask_ipi); 72 69 break; 73 70 #endif 74 71 }; ··· 531 526 return; 532 527 } 533 528 529 + /* 530 + * After we put an entry into the list, data->cpumask 531 + * may be cleared again when another CPU sends another IPI for 532 + * a SMP function call, so data->cpumask will be zero. 533 + */ 534 + cpumask_copy(data->cpumask_ipi, data->cpumask); 534 535 raw_spin_lock_irqsave(&call_function.lock, flags); 535 536 /* 536 537 * Place entry at the _HEAD_ of the list, so that any cpu still ··· 560 549 smp_mb(); 561 550 562 551 /* Send a message to all CPUs in the map */ 563 - arch_send_call_function_ipi_mask(data->cpumask); 552 + arch_send_call_function_ipi_mask(data->cpumask_ipi); 564 553 565 554 /* Optionally wait for the CPUs to complete */ 566 555 if (wait)
+2
lib/digsig.c
··· 162 162 memset(out1, 0, head); 163 163 memcpy(out1 + head, p, l); 164 164 165 + kfree(p); 166 + 165 167 err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len); 166 168 if (err) 167 169 goto err;
+4
mm/huge_memory.c
··· 1257 1257 if (flags & FOLL_WRITE && !pmd_write(*pmd)) 1258 1258 goto out; 1259 1259 1260 + /* Avoid dumping huge zero page */ 1261 + if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) 1262 + return ERR_PTR(-EFAULT); 1263 + 1260 1264 page = pmd_page(*pmd); 1261 1265 VM_BUG_ON(!PageHead(page)); 1262 1266 if (flags & FOLL_TOUCH) {
+1
mm/hugetlb.c
··· 3033 3033 if (!huge_pte_none(huge_ptep_get(ptep))) { 3034 3034 pte = huge_ptep_get_and_clear(mm, address, ptep); 3035 3035 pte = pte_mkhuge(pte_modify(pte, newprot)); 3036 + pte = arch_make_huge_pte(pte, vma, NULL, 0); 3036 3037 set_huge_pte_at(mm, address, ptep, pte); 3037 3038 pages++; 3038 3039 }
+3 -1
mm/migrate.c
··· 160 160 if (is_write_migration_entry(entry)) 161 161 pte = pte_mkwrite(pte); 162 162 #ifdef CONFIG_HUGETLB_PAGE 163 - if (PageHuge(new)) 163 + if (PageHuge(new)) { 164 164 pte = pte_mkhuge(pte); 165 + pte = arch_make_huge_pte(pte, vma, new, 0); 166 + } 165 167 #endif 166 168 flush_cache_page(vma, addr, pte_pfn(pte)); 167 169 set_pte_at(mm, addr, ptep, pte);
+1 -1
mm/mmap.c
··· 2943 2943 * vma in this mm is backed by the same anon_vma or address_space. 2944 2944 * 2945 2945 * We can take all the locks in random order because the VM code 2946 - * taking i_mmap_mutex or anon_vma->mutex outside the mmap_sem never 2946 + * taking i_mmap_mutex or anon_vma->rwsem outside the mmap_sem never 2947 2947 * takes more than one of them in a row. Secondly we're protected 2948 2948 * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex. 2949 2949 *
+18 -1
net/batman-adv/distributed-arp-table.c
··· 738 738 struct arphdr *arphdr; 739 739 struct ethhdr *ethhdr; 740 740 __be32 ip_src, ip_dst; 741 + uint8_t *hw_src, *hw_dst; 741 742 uint16_t type = 0; 742 743 743 744 /* pull the ethernet header */ ··· 778 777 ip_src = batadv_arp_ip_src(skb, hdr_size); 779 778 ip_dst = batadv_arp_ip_dst(skb, hdr_size); 780 779 if (ipv4_is_loopback(ip_src) || ipv4_is_multicast(ip_src) || 781 - ipv4_is_loopback(ip_dst) || ipv4_is_multicast(ip_dst)) 780 + ipv4_is_loopback(ip_dst) || ipv4_is_multicast(ip_dst) || 781 + ipv4_is_zeronet(ip_src) || ipv4_is_lbcast(ip_src) || 782 + ipv4_is_zeronet(ip_dst) || ipv4_is_lbcast(ip_dst)) 782 783 goto out; 784 + 785 + hw_src = batadv_arp_hw_src(skb, hdr_size); 786 + if (is_zero_ether_addr(hw_src) || is_multicast_ether_addr(hw_src)) 787 + goto out; 788 + 789 + /* we don't care about the destination MAC address in ARP requests */ 790 + if (arphdr->ar_op != htons(ARPOP_REQUEST)) { 791 + hw_dst = batadv_arp_hw_dst(skb, hdr_size); 792 + if (is_zero_ether_addr(hw_dst) || 793 + is_multicast_ether_addr(hw_dst)) 794 + goto out; 795 + } 783 796 784 797 type = ntohs(arphdr->ar_op); 785 798 out: ··· 1027 1012 */ 1028 1013 ret = !batadv_is_my_client(bat_priv, hw_dst); 1029 1014 out: 1015 + if (ret) 1016 + kfree_skb(skb); 1030 1017 /* if ret == false -> packet has to be delivered to the interface */ 1031 1018 return ret; 1032 1019 }
+3 -3
net/bluetooth/hci_conn.c
··· 249 249 __u8 reason = hci_proto_disconn_ind(conn); 250 250 251 251 switch (conn->type) { 252 - case ACL_LINK: 253 - hci_acl_disconn(conn, reason); 254 - break; 255 252 case AMP_LINK: 256 253 hci_amp_disconn(conn, reason); 254 + break; 255 + default: 256 + hci_acl_disconn(conn, reason); 257 257 break; 258 258 } 259 259 }
-8
net/bluetooth/hci_core.c
··· 2810 2810 if (conn) { 2811 2811 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF); 2812 2812 2813 - hci_dev_lock(hdev); 2814 - if (test_bit(HCI_MGMT, &hdev->dev_flags) && 2815 - !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 2816 - mgmt_device_connected(hdev, &conn->dst, conn->type, 2817 - conn->dst_type, 0, NULL, 0, 2818 - conn->dev_class); 2819 - hci_dev_unlock(hdev); 2820 - 2821 2813 /* Send to upper protocol */ 2822 2814 l2cap_recv_acldata(conn, skb, flags); 2823 2815 return;
+1 -1
net/bluetooth/hci_event.c
··· 2688 2688 if (ev->opcode != HCI_OP_NOP) 2689 2689 del_timer(&hdev->cmd_timer); 2690 2690 2691 - if (ev->ncmd) { 2691 + if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) { 2692 2692 atomic_set(&hdev->cmd_cnt, 1); 2693 2693 if (!skb_queue_empty(&hdev->cmd_q)) 2694 2694 queue_work(hdev->workqueue, &hdev->cmd_work);
+1 -1
net/bluetooth/hidp/core.c
··· 931 931 hid->version = req->version; 932 932 hid->country = req->country; 933 933 934 - strncpy(hid->name, req->name, 128); 934 + strncpy(hid->name, req->name, sizeof(req->name) - 1); 935 935 936 936 snprintf(hid->phys, sizeof(hid->phys), "%pMR", 937 937 &bt_sk(session->ctrl_sock->sk)->src);
+11
net/bluetooth/l2cap_core.c
··· 3727 3727 static int l2cap_connect_req(struct l2cap_conn *conn, 3728 3728 struct l2cap_cmd_hdr *cmd, u8 *data) 3729 3729 { 3730 + struct hci_dev *hdev = conn->hcon->hdev; 3731 + struct hci_conn *hcon = conn->hcon; 3732 + 3733 + hci_dev_lock(hdev); 3734 + if (test_bit(HCI_MGMT, &hdev->dev_flags) && 3735 + !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags)) 3736 + mgmt_device_connected(hdev, &hcon->dst, hcon->type, 3737 + hcon->dst_type, 0, NULL, 0, 3738 + hcon->dev_class); 3739 + hci_dev_unlock(hdev); 3740 + 3730 3741 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0); 3731 3742 return 0; 3732 3743 }
+1 -1
net/bluetooth/sco.c
··· 352 352 353 353 case BT_CONNECTED: 354 354 case BT_CONFIG: 355 - if (sco_pi(sk)->conn) { 355 + if (sco_pi(sk)->conn->hcon) { 356 356 sk->sk_state = BT_DISCONN; 357 357 sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT); 358 358 hci_conn_put(sco_pi(sk)->conn->hcon);
+13
net/bluetooth/smp.c
··· 859 859 860 860 skb_pull(skb, sizeof(code)); 861 861 862 + /* 863 + * The SMP context must be initialized for all other PDUs except 864 + * pairing and security requests. If we get any other PDU when 865 + * not initialized simply disconnect (done if this function 866 + * returns an error). 867 + */ 868 + if (code != SMP_CMD_PAIRING_REQ && code != SMP_CMD_SECURITY_REQ && 869 + !conn->smp_chan) { 870 + BT_ERR("Unexpected SMP command 0x%02x. Disconnecting.", code); 871 + kfree_skb(skb); 872 + return -ENOTSUPP; 873 + } 874 + 862 875 switch (code) { 863 876 case SMP_CMD_PAIRING_REQ: 864 877 reason = smp_cmd_pairing_req(conn, skb);
+6 -3
net/core/pktgen.c
··· 1781 1781 return -EFAULT; 1782 1782 i += len; 1783 1783 mutex_lock(&pktgen_thread_lock); 1784 - pktgen_add_device(t, f); 1784 + ret = pktgen_add_device(t, f); 1785 1785 mutex_unlock(&pktgen_thread_lock); 1786 - ret = count; 1787 - sprintf(pg_result, "OK: add_device=%s", f); 1786 + if (!ret) { 1787 + ret = count; 1788 + sprintf(pg_result, "OK: add_device=%s", f); 1789 + } else 1790 + sprintf(pg_result, "ERROR: can not add device %s", f); 1788 1791 goto out; 1789 1792 } 1790 1793
-2
net/core/request_sock.c
··· 186 186 struct fastopen_queue *fastopenq = 187 187 inet_csk(lsk)->icsk_accept_queue.fastopenq; 188 188 189 - BUG_ON(!spin_is_locked(&sk->sk_lock.slock) && !sock_owned_by_user(sk)); 190 - 191 189 tcp_sk(sk)->fastopen_rsk = NULL; 192 190 spin_lock_bh(&fastopenq->lock); 193 191 fastopenq->qlen--;
+4 -1
net/core/scm.c
··· 35 35 #include <net/sock.h> 36 36 #include <net/compat.h> 37 37 #include <net/scm.h> 38 + #include <net/cls_cgroup.h> 38 39 39 40 40 41 /* ··· 303 302 } 304 303 /* Bump the usage count and install the file. */ 305 304 sock = sock_from_file(fp[i], &err); 306 - if (sock) 305 + if (sock) { 307 306 sock_update_netprioidx(sock->sk, current); 307 + sock_update_classid(sock->sk, current); 308 + } 308 309 fd_install(new_fd, get_file(fp[i])); 309 310 } 310 311
+14 -32
net/core/skbuff.c
··· 683 683 new->network_header = old->network_header; 684 684 new->mac_header = old->mac_header; 685 685 new->inner_transport_header = old->inner_transport_header; 686 - new->inner_network_header = old->inner_transport_header; 686 + new->inner_network_header = old->inner_network_header; 687 687 skb_dst_copy(new, old); 688 688 new->rxhash = old->rxhash; 689 689 new->ooo_okay = old->ooo_okay; ··· 1649 1649 1650 1650 static struct page *linear_to_page(struct page *page, unsigned int *len, 1651 1651 unsigned int *offset, 1652 - struct sk_buff *skb, struct sock *sk) 1652 + struct sock *sk) 1653 1653 { 1654 1654 struct page_frag *pfrag = sk_page_frag(sk); 1655 1655 ··· 1682 1682 static bool spd_fill_page(struct splice_pipe_desc *spd, 1683 1683 struct pipe_inode_info *pipe, struct page *page, 1684 1684 unsigned int *len, unsigned int offset, 1685 - struct sk_buff *skb, bool linear, 1685 + bool linear, 1686 1686 struct sock *sk) 1687 1687 { 1688 1688 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) 1689 1689 return true; 1690 1690 1691 1691 if (linear) { 1692 - page = linear_to_page(page, len, &offset, skb, sk); 1692 + page = linear_to_page(page, len, &offset, sk); 1693 1693 if (!page) 1694 1694 return true; 1695 1695 } ··· 1706 1706 return false; 1707 1707 } 1708 1708 1709 - static inline void __segment_seek(struct page **page, unsigned int *poff, 1710 - unsigned int *plen, unsigned int off) 1711 - { 1712 - unsigned long n; 1713 - 1714 - *poff += off; 1715 - n = *poff / PAGE_SIZE; 1716 - if (n) 1717 - *page = nth_page(*page, n); 1718 - 1719 - *poff = *poff % PAGE_SIZE; 1720 - *plen -= off; 1721 - } 1722 - 1723 1709 static bool __splice_segment(struct page *page, unsigned int poff, 1724 1710 unsigned int plen, unsigned int *off, 1725 - unsigned int *len, struct sk_buff *skb, 1711 + unsigned int *len, 1726 1712 struct splice_pipe_desc *spd, bool linear, 1727 1713 struct sock *sk, 1728 1714 struct pipe_inode_info *pipe) ··· 1723 1737 } 1724 1738 1725 1739 /* ignore any bits we already processed */ 1726 - if (*off) { 1727 - __segment_seek(&page, &poff, &plen, *off); 1728 - *off = 0; 1729 - } 1740 + poff += *off; 1741 + plen -= *off; 1742 + *off = 0; 1730 1743 1731 1744 do { 1732 1745 unsigned int flen = min(*len, plen); 1733 1746 1734 - /* the linear region may spread across several pages */ 1735 - flen = min_t(unsigned int, flen, PAGE_SIZE - poff); 1736 - 1737 - if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk)) 1747 + if (spd_fill_page(spd, pipe, page, &flen, poff, 1748 + linear, sk)) 1738 1749 return true; 1739 - 1740 - __segment_seek(&page, &poff, &plen, flen); 1750 + poff += flen; 1751 + plen -= flen; 1741 1752 *len -= flen; 1742 - 1743 1753 } while (*len && plen); 1744 1754 1745 1755 return false; ··· 1759 1777 if (__splice_segment(virt_to_page(skb->data), 1760 1778 (unsigned long) skb->data & (PAGE_SIZE - 1), 1761 1779 skb_headlen(skb), 1762 - offset, len, skb, spd, 1780 + offset, len, spd, 1763 1781 skb_head_is_locked(skb), 1764 1782 sk, pipe)) 1765 1783 return true; ··· 1772 1790 1773 1791 if (__splice_segment(skb_frag_page(f), 1774 1792 f->page_offset, skb_frag_size(f), 1775 - offset, len, skb, spd, false, sk, pipe)) 1793 + offset, len, spd, false, sk, pipe)) 1776 1794 return true; 1777 1795 } 1778 1796
+14 -4
net/ipv4/ah4.c
··· 269 269 skb->network_header += ah_hlen; 270 270 memcpy(skb_network_header(skb), work_iph, ihl); 271 271 __skb_pull(skb, ah_hlen + ihl); 272 - skb_set_transport_header(skb, -ihl); 272 + 273 + if (x->props.mode == XFRM_MODE_TUNNEL) 274 + skb_reset_transport_header(skb); 275 + else 276 + skb_set_transport_header(skb, -ihl); 273 277 out: 274 278 kfree(AH_SKB_CB(skb)->tmp); 275 279 xfrm_input_resume(skb, err); ··· 385 381 skb->network_header += ah_hlen; 386 382 memcpy(skb_network_header(skb), work_iph, ihl); 387 383 __skb_pull(skb, ah_hlen + ihl); 388 - skb_set_transport_header(skb, -ihl); 384 + if (x->props.mode == XFRM_MODE_TUNNEL) 385 + skb_reset_transport_header(skb); 386 + else 387 + skb_set_transport_header(skb, -ihl); 389 388 390 389 err = nexthdr; 391 390 ··· 420 413 if (!x) 421 414 return; 422 415 423 - if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) 416 + if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) { 417 + atomic_inc(&flow_cache_genid); 418 + rt_genid_bump(net); 419 + 424 420 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0); 425 - else 421 + } else 426 422 ipv4_redirect(skb, net, 0, 0, IPPROTO_AH, 0); 427 423 xfrm_state_put(x); 428 424 }
+25
net/ipv4/datagram.c
··· 85 85 return err; 86 86 } 87 87 EXPORT_SYMBOL(ip4_datagram_connect); 88 + 89 + void ip4_datagram_release_cb(struct sock *sk) 90 + { 91 + const struct inet_sock *inet = inet_sk(sk); 92 + const struct ip_options_rcu *inet_opt; 93 + __be32 daddr = inet->inet_daddr; 94 + struct flowi4 fl4; 95 + struct rtable *rt; 96 + 97 + if (! __sk_dst_get(sk) || __sk_dst_check(sk, 0)) 98 + return; 99 + 100 + rcu_read_lock(); 101 + inet_opt = rcu_dereference(inet->inet_opt); 102 + if (inet_opt && inet_opt->opt.srr) 103 + daddr = inet_opt->opt.faddr; 104 + rt = ip_route_output_ports(sock_net(sk), &fl4, sk, daddr, 105 + inet->inet_saddr, inet->inet_dport, 106 + inet->inet_sport, sk->sk_protocol, 107 + RT_CONN_FLAGS(sk), sk->sk_bound_dev_if); 108 + if (!IS_ERR(rt)) 109 + __sk_dst_set(sk, &rt->dst); 110 + rcu_read_unlock(); 111 + } 112 + EXPORT_SYMBOL_GPL(ip4_datagram_release_cb);
+9 -3
net/ipv4/esp4.c
··· 346 346 347 347 pskb_trim(skb, skb->len - alen - padlen - 2); 348 348 __skb_pull(skb, hlen); 349 - skb_set_transport_header(skb, -ihl); 349 + if (x->props.mode == XFRM_MODE_TUNNEL) 350 + skb_reset_transport_header(skb); 351 + else 352 + skb_set_transport_header(skb, -ihl); 350 353 351 354 err = nexthdr[1]; 352 355 ··· 502 499 if (!x) 503 500 return; 504 501 505 - if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) 502 + if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) { 503 + atomic_inc(&flow_cache_genid); 504 + rt_genid_bump(net); 505 + 506 506 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0); 507 - else 507 + } else 508 508 ipv4_redirect(skb, net, 0, 0, IPPROTO_ESP, 0); 509 509 xfrm_state_put(x); 510 510 }
+5 -1
net/ipv4/ip_gre.c
··· 963 963 ptr--; 964 964 } 965 965 if (tunnel->parms.o_flags&GRE_CSUM) { 966 + int offset = skb_transport_offset(skb); 967 + 966 968 *ptr = 0; 967 - *(__sum16 *)ptr = ip_compute_csum((void *)(iph+1), skb->len - sizeof(struct iphdr)); 969 + *(__sum16 *)ptr = csum_fold(skb_checksum(skb, offset, 970 + skb->len - offset, 971 + 0)); 968 972 } 969 973 } 970 974
+5 -2
net/ipv4/ipcomp.c
··· 47 47 if (!x) 48 48 return; 49 49 50 - if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) 50 + if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) { 51 + atomic_inc(&flow_cache_genid); 52 + rt_genid_bump(net); 53 + 51 54 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0); 52 - else 55 + } else 53 56 ipv4_redirect(skb, net, 0, 0, IPPROTO_COMP, 0); 54 57 xfrm_state_put(x); 55 58 }
+1
net/ipv4/ping.c
··· 738 738 .recvmsg = ping_recvmsg, 739 739 .bind = ping_bind, 740 740 .backlog_rcv = ping_queue_rcv_skb, 741 + .release_cb = ip4_datagram_release_cb, 741 742 .hash = ping_v4_hash, 742 743 .unhash = ping_v4_unhash, 743 744 .get_port = ping_v4_get_port,
+1
net/ipv4/raw.c
··· 894 894 .recvmsg = raw_recvmsg, 895 895 .bind = raw_bind, 896 896 .backlog_rcv = raw_rcv_skb, 897 + .release_cb = ip4_datagram_release_cb, 897 898 .hash = raw_hash_sk, 898 899 .unhash = raw_unhash_sk, 899 900 .obj_size = sizeof(struct raw_sock),
+52 -2
net/ipv4/route.c
··· 912 912 struct dst_entry *dst = &rt->dst; 913 913 struct fib_result res; 914 914 915 + if (dst_metric_locked(dst, RTAX_MTU)) 916 + return; 917 + 915 918 if (dst->dev->mtu < mtu) 916 919 return; 917 920 ··· 965 962 } 966 963 EXPORT_SYMBOL_GPL(ipv4_update_pmtu); 967 964 968 - void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) 965 + static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) 969 966 { 970 967 const struct iphdr *iph = (const struct iphdr *) skb->data; 971 968 struct flowi4 fl4; ··· 977 974 __ip_rt_update_pmtu(rt, &fl4, mtu); 978 975 ip_rt_put(rt); 979 976 } 977 + } 978 + 979 + void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) 980 + { 981 + const struct iphdr *iph = (const struct iphdr *) skb->data; 982 + struct flowi4 fl4; 983 + struct rtable *rt; 984 + struct dst_entry *dst; 985 + bool new = false; 986 + 987 + bh_lock_sock(sk); 988 + rt = (struct rtable *) __sk_dst_get(sk); 989 + 990 + if (sock_owned_by_user(sk) || !rt) { 991 + __ipv4_sk_update_pmtu(skb, sk, mtu); 992 + goto out; 993 + } 994 + 995 + __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0); 996 + 997 + if (!__sk_dst_check(sk, 0)) { 998 + rt = ip_route_output_flow(sock_net(sk), &fl4, sk); 999 + if (IS_ERR(rt)) 1000 + goto out; 1001 + 1002 + new = true; 1003 + } 1004 + 1005 + __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu); 1006 + 1007 + dst = dst_check(&rt->dst, 0); 1008 + if (!dst) { 1009 + if (new) 1010 + dst_release(&rt->dst); 1011 + 1012 + rt = ip_route_output_flow(sock_net(sk), &fl4, sk); 1013 + if (IS_ERR(rt)) 1014 + goto out; 1015 + 1016 + new = true; 1017 + } 1018 + 1019 + if (new) 1020 + __sk_dst_set(sk, &rt->dst); 1021 + 1022 + out: 1023 + bh_unlock_sock(sk); 980 1024 } 981 1025 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu); 982 1026 ··· 1170 1120 if (!mtu || time_after_eq(jiffies, rt->dst.expires)) 1171 1121 mtu = dst_metric_raw(dst, RTAX_MTU); 1172 1122 1173 - if (mtu && rt_is_output_route(rt)) 1123 + if (mtu) 1174 1124 return mtu; 1175 1125 1176 1126 mtu = dst->dev->mtu;
+10 -4
net/ipv4/tcp_cong.c
··· 310 310 { 311 311 int cnt; /* increase in packets */ 312 312 unsigned int delta = 0; 313 + u32 snd_cwnd = tp->snd_cwnd; 314 + 315 + if (unlikely(!snd_cwnd)) { 316 + pr_err_once("snd_cwnd is nul, please report this bug.\n"); 317 + snd_cwnd = 1U; 318 + } 313 319 314 320 /* RFC3465: ABC Slow start 315 321 * Increase only after a full MSS of bytes is acked ··· 330 324 if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh) 331 325 cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */ 332 326 else 333 - cnt = tp->snd_cwnd; /* exponential increase */ 327 + cnt = snd_cwnd; /* exponential increase */ 334 328 335 329 /* RFC3465: ABC 336 330 * We MAY increase by 2 if discovered delayed ack ··· 340 334 tp->bytes_acked = 0; 341 335 342 336 tp->snd_cwnd_cnt += cnt; 343 - while (tp->snd_cwnd_cnt >= tp->snd_cwnd) { 344 - tp->snd_cwnd_cnt -= tp->snd_cwnd; 337 + while (tp->snd_cwnd_cnt >= snd_cwnd) { 338 + tp->snd_cwnd_cnt -= snd_cwnd; 345 339 delta++; 346 340 } 347 - tp->snd_cwnd = min(tp->snd_cwnd + delta, tp->snd_cwnd_clamp); 341 + tp->snd_cwnd = min(snd_cwnd + delta, tp->snd_cwnd_clamp); 348 342 } 349 343 EXPORT_SYMBOL_GPL(tcp_slow_start); 350 344
+6 -2
net/ipv4/tcp_input.c
··· 3504 3504 } 3505 3505 } else { 3506 3506 if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) { 3507 + if (!tcp_packets_in_flight(tp)) { 3508 + tcp_enter_frto_loss(sk, 2, flag); 3509 + return true; 3510 + } 3511 + 3507 3512 /* Prevent sending of new data. */ 3508 3513 tp->snd_cwnd = min(tp->snd_cwnd, 3509 3514 tcp_packets_in_flight(tp)); ··· 5654 5649 * the remote receives only the retransmitted (regular) SYNs: either 5655 5650 * the original SYN-data or the corresponding SYN-ACK is lost. 5656 5651 */ 5657 - syn_drop = (cookie->len <= 0 && data && 5658 - inet_csk(sk)->icsk_retransmits); 5652 + syn_drop = (cookie->len <= 0 && data && tp->total_retrans); 5659 5653 5660 5654 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop); 5661 5655
+9 -6
net/ipv4/tcp_ipv4.c
··· 369 369 * We do take care of PMTU discovery (RFC1191) special case : 370 370 * we can receive locally generated ICMP messages while socket is held. 371 371 */ 372 - if (sock_owned_by_user(sk) && 373 - type != ICMP_DEST_UNREACH && 374 - code != ICMP_FRAG_NEEDED) 375 - NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); 376 - 372 + if (sock_owned_by_user(sk)) { 373 + if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)) 374 + NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); 375 + } 377 376 if (sk->sk_state == TCP_CLOSE) 378 377 goto out; 379 378 ··· 496 497 * errors returned from accept(). 497 498 */ 498 499 inet_csk_reqsk_queue_drop(sk, req, prev); 500 + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 499 501 goto out; 500 502 501 503 case TCP_SYN_SENT: ··· 1501 1501 * clogging syn queue with openreqs with exponentially increasing 1502 1502 * timeout. 1503 1503 */ 1504 - if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) 1504 + if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { 1505 + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); 1505 1506 goto drop; 1507 + } 1506 1508 1507 1509 req = inet_reqsk_alloc(&tcp_request_sock_ops); 1508 1510 if (!req) ··· 1669 1667 drop_and_free: 1670 1668 reqsk_free(req); 1671 1669 drop: 1670 + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 1672 1671 return 0; 1673 1672 } 1674 1673 EXPORT_SYMBOL(tcp_v4_conn_request);
+1
net/ipv4/udp.c
··· 1952 1952 .recvmsg = udp_recvmsg, 1953 1953 .sendpage = udp_sendpage, 1954 1954 .backlog_rcv = __udp_queue_rcv_skb, 1955 + .release_cb = ip4_datagram_release_cb, 1955 1956 .hash = udp_lib_hash, 1956 1957 .unhash = udp_lib_unhash, 1957 1958 .rehash = udp_v4_rehash,
+1
net/ipv6/addrconf.c
··· 1660 1660 if (dev->addr_len != IEEE802154_ADDR_LEN) 1661 1661 return -1; 1662 1662 memcpy(eui, dev->dev_addr, 8); 1663 + eui[0] ^= 2; 1663 1664 return 0; 1664 1665 } 1665 1666
+9 -2
net/ipv6/ah6.c
··· 472 472 skb->network_header += ah_hlen; 473 473 memcpy(skb_network_header(skb), work_iph, hdr_len); 474 474 __skb_pull(skb, ah_hlen + hdr_len); 475 - skb_set_transport_header(skb, -hdr_len); 475 + if (x->props.mode == XFRM_MODE_TUNNEL) 476 + skb_reset_transport_header(skb); 477 + else 478 + skb_set_transport_header(skb, -hdr_len); 476 479 out: 477 480 kfree(AH_SKB_CB(skb)->tmp); 478 481 xfrm_input_resume(skb, err); ··· 596 593 597 594 skb->network_header += ah_hlen; 598 595 memcpy(skb_network_header(skb), work_iph, hdr_len); 599 - skb->transport_header = skb->network_header; 600 596 __skb_pull(skb, ah_hlen + hdr_len); 597 + 598 + if (x->props.mode == XFRM_MODE_TUNNEL) 599 + skb_reset_transport_header(skb); 600 + else 601 + skb_set_transport_header(skb, -hdr_len); 601 602 602 603 err = nexthdr; 603 604
+9 -7
net/ipv6/datagram.c
··· 380 380 if (skb->protocol == htons(ETH_P_IPV6)) { 381 381 sin->sin6_addr = ipv6_hdr(skb)->saddr; 382 382 if (np->rxopt.all) 383 - datagram_recv_ctl(sk, msg, skb); 383 + ip6_datagram_recv_ctl(sk, msg, skb); 384 384 if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) 385 385 sin->sin6_scope_id = IP6CB(skb)->iif; 386 386 } else { ··· 468 468 } 469 469 470 470 471 - int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) 471 + int ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg, 472 + struct sk_buff *skb) 472 473 { 473 474 struct ipv6_pinfo *np = inet6_sk(sk); 474 475 struct inet6_skb_parm *opt = IP6CB(skb); ··· 598 597 } 599 598 return 0; 600 599 } 600 + EXPORT_SYMBOL_GPL(ip6_datagram_recv_ctl); 601 601 602 - int datagram_send_ctl(struct net *net, struct sock *sk, 603 - struct msghdr *msg, struct flowi6 *fl6, 604 - struct ipv6_txoptions *opt, 605 - int *hlimit, int *tclass, int *dontfrag) 602 + int ip6_datagram_send_ctl(struct net *net, struct sock *sk, 603 + struct msghdr *msg, struct flowi6 *fl6, 604 + struct ipv6_txoptions *opt, 605 + int *hlimit, int *tclass, int *dontfrag) 606 606 { 607 607 struct in6_pktinfo *src_info; 608 608 struct cmsghdr *cmsg; ··· 873 871 exit_f: 874 872 return err; 875 873 } 876 - EXPORT_SYMBOL_GPL(datagram_send_ctl); 874 + EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl);
+4 -1
net/ipv6/esp6.c
··· 300 300 301 301 pskb_trim(skb, skb->len - alen - padlen - 2); 302 302 __skb_pull(skb, hlen); 303 - skb_set_transport_header(skb, -hdr_len); 303 + if (x->props.mode == XFRM_MODE_TUNNEL) 304 + skb_reset_transport_header(skb); 305 + else 306 + skb_set_transport_header(skb, -hdr_len); 304 307 305 308 err = nexthdr[1]; 306 309
+12
net/ipv6/icmp.c
··· 81 81 return net->ipv6.icmp_sk[smp_processor_id()]; 82 82 } 83 83 84 + static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 85 + u8 type, u8 code, int offset, __be32 info) 86 + { 87 + struct net *net = dev_net(skb->dev); 88 + 89 + if (type == ICMPV6_PKT_TOOBIG) 90 + ip6_update_pmtu(skb, net, info, 0, 0); 91 + else if (type == NDISC_REDIRECT) 92 + ip6_redirect(skb, net, 0, 0); 93 + } 94 + 84 95 static int icmpv6_rcv(struct sk_buff *skb); 85 96 86 97 static const struct inet6_protocol icmpv6_protocol = { 87 98 .handler = icmpv6_rcv, 99 + .err_handler = icmpv6_err, 88 100 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 89 101 }; 90 102
+2 -2
net/ipv6/ip6_flowlabel.c
··· 365 365 msg.msg_control = (void*)(fl->opt+1); 366 366 memset(&flowi6, 0, sizeof(flowi6)); 367 367 368 - err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk, 369 - &junk, &junk); 368 + err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, 369 + &junk, &junk, &junk); 370 370 if (err) 371 371 goto done; 372 372 err = -EINVAL;
+1 -1
net/ipv6/ip6_gre.c
··· 960 960 int ret; 961 961 962 962 if (!ip6_tnl_xmit_ctl(t)) 963 - return -1; 963 + goto tx_err; 964 964 965 965 switch (skb->protocol) { 966 966 case htons(ETH_P_IP):
+2 -2
net/ipv6/ip6_output.c
··· 1213 1213 if (dst_allfrag(rt->dst.path)) 1214 1214 cork->flags |= IPCORK_ALLFRAG; 1215 1215 cork->length = 0; 1216 - exthdrlen = (opt ? opt->opt_flen : 0) - rt->rt6i_nfheader_len; 1216 + exthdrlen = (opt ? opt->opt_flen : 0); 1217 1217 length += exthdrlen; 1218 1218 transhdrlen += exthdrlen; 1219 - dst_exthdrlen = rt->dst.header_len; 1219 + dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len; 1220 1220 } else { 1221 1221 rt = (struct rt6_info *)cork->dst; 1222 1222 fl6 = &inet->cork.fl.u.ip6;
+3
net/ipv6/ip6mr.c
··· 1710 1710 return -EINVAL; 1711 1711 if (get_user(v, (u32 __user *)optval)) 1712 1712 return -EFAULT; 1713 + /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */ 1714 + if (v != RT_TABLE_DEFAULT && v >= 100000000) 1715 + return -EINVAL; 1713 1716 if (sk == mrt->mroute6_sk) 1714 1717 return -EBUSY; 1715 1718
+3 -3
net/ipv6/ipv6_sockglue.c
··· 476 476 msg.msg_controllen = optlen; 477 477 msg.msg_control = (void*)(opt+1); 478 478 479 - retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk, 480 - &junk); 479 + retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, 480 + &junk, &junk); 481 481 if (retv) 482 482 goto done; 483 483 update: ··· 1002 1002 release_sock(sk); 1003 1003 1004 1004 if (skb) { 1005 - int err = datagram_recv_ctl(sk, &msg, skb); 1005 + int err = ip6_datagram_recv_ctl(sk, &msg, skb); 1006 1006 kfree_skb(skb); 1007 1007 if (err) 1008 1008 return err;
+3 -3
net/ipv6/raw.c
··· 507 507 sock_recv_ts_and_drops(msg, sk, skb); 508 508 509 509 if (np->rxopt.all) 510 - datagram_recv_ctl(sk, msg, skb); 510 + ip6_datagram_recv_ctl(sk, msg, skb); 511 511 512 512 err = copied; 513 513 if (flags & MSG_TRUNC) ··· 822 822 memset(opt, 0, sizeof(struct ipv6_txoptions)); 823 823 opt->tot_len = sizeof(struct ipv6_txoptions); 824 824 825 - err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 826 - &hlimit, &tclass, &dontfrag); 825 + err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 826 + &hlimit, &tclass, &dontfrag); 827 827 if (err < 0) { 828 828 fl6_sock_release(flowlabel); 829 829 return err;
+1 -1
net/ipv6/route.c
··· 928 928 dst_hold(&rt->dst); 929 929 read_unlock_bh(&table->tb6_lock); 930 930 931 - if (!rt->n && !(rt->rt6i_flags & RTF_NONEXTHOP)) 931 + if (!rt->n && !(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_LOCAL))) 932 932 nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr); 933 933 else if (!(rt->dst.flags & DST_HOST)) 934 934 nrt = rt6_alloc_clone(rt, &fl6->daddr);
+5 -1
net/ipv6/tcp_ipv6.c
··· 423 423 } 424 424 425 425 inet_csk_reqsk_queue_drop(sk, req, prev); 426 + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 426 427 goto out; 427 428 428 429 case TCP_SYN_SENT: ··· 959 958 goto drop; 960 959 } 961 960 962 - if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) 961 + if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { 962 + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); 963 963 goto drop; 964 + } 964 965 965 966 req = inet6_reqsk_alloc(&tcp6_request_sock_ops); 966 967 if (req == NULL) ··· 1111 1108 drop_and_free: 1112 1109 reqsk_free(req); 1113 1110 drop: 1111 + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 1114 1112 return 0; /* don't send reset */ 1115 1113 } 1116 1114
+3 -3
net/ipv6/udp.c
··· 443 443 ip_cmsg_recv(msg, skb); 444 444 } else { 445 445 if (np->rxopt.all) 446 - datagram_recv_ctl(sk, msg, skb); 446 + ip6_datagram_recv_ctl(sk, msg, skb); 447 447 } 448 448 449 449 err = copied; ··· 1153 1153 memset(opt, 0, sizeof(struct ipv6_txoptions)); 1154 1154 opt->tot_len = sizeof(*opt); 1155 1155 1156 - err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 1157 - &hlimit, &tclass, &dontfrag); 1156 + err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 1157 + &hlimit, &tclass, &dontfrag); 1158 1158 if (err < 0) { 1159 1159 fl6_sock_release(flowlabel); 1160 1160 return err;
+65 -17
net/l2tp/l2tp_core.c
··· 168 168 169 169 } 170 170 171 + /* Lookup the tunnel socket, possibly involving the fs code if the socket is 172 + * owned by userspace. A struct sock returned from this function must be 173 + * released using l2tp_tunnel_sock_put once you're done with it. 174 + */ 175 + struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel) 176 + { 177 + int err = 0; 178 + struct socket *sock = NULL; 179 + struct sock *sk = NULL; 180 + 181 + if (!tunnel) 182 + goto out; 183 + 184 + if (tunnel->fd >= 0) { 185 + /* Socket is owned by userspace, who might be in the process 186 + * of closing it. Look the socket up using the fd to ensure 187 + * consistency. 188 + */ 189 + sock = sockfd_lookup(tunnel->fd, &err); 190 + if (sock) 191 + sk = sock->sk; 192 + } else { 193 + /* Socket is owned by kernelspace */ 194 + sk = tunnel->sock; 195 + } 196 + 197 + out: 198 + return sk; 199 + } 200 + EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_lookup); 201 + 202 + /* Drop a reference to a tunnel socket obtained via. l2tp_tunnel_sock_put */ 203 + void l2tp_tunnel_sock_put(struct sock *sk) 204 + { 205 + struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); 206 + if (tunnel) { 207 + if (tunnel->fd >= 0) { 208 + /* Socket is owned by userspace */ 209 + sockfd_put(sk->sk_socket); 210 + } 211 + sock_put(sk); 212 + } 213 + } 214 + EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put); 215 + 171 216 /* Lookup a session by id in the global session list 172 217 */ 173 218 static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id) ··· 1168 1123 struct udphdr *uh; 1169 1124 struct inet_sock *inet; 1170 1125 __wsum csum; 1171 - int old_headroom; 1172 - int new_headroom; 1173 1126 int headroom; 1174 1127 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; 1175 1128 int udp_len; ··· 1179 1136 */ 1180 1137 headroom = NET_SKB_PAD + sizeof(struct iphdr) + 1181 1138 uhlen + hdr_len; 1182 - old_headroom = skb_headroom(skb); 1183 1139 if (skb_cow_head(skb, headroom)) { 1184 1140 kfree_skb(skb); 1185 1141 return NET_XMIT_DROP; 1186 1142 } 1187 1143 1188 - new_headroom = skb_headroom(skb); 1189 1144 skb_orphan(skb); 1190 - skb->truesize += new_headroom - old_headroom; 1191 - 1192 1145 /* Setup L2TP header */ 1193 1146 session->build_header(session, __skb_push(skb, hdr_len)); 1194 1147 ··· 1646 1607 tunnel->old_sk_destruct = sk->sk_destruct; 1647 1608 sk->sk_destruct = &l2tp_tunnel_destruct; 1648 1609 tunnel->sock = sk; 1610 + tunnel->fd = fd; 1649 1611 lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock"); 1650 1612 1651 1613 sk->sk_allocation = GFP_ATOMIC; ··· 1682 1642 */ 1683 1643 int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) 1684 1644 { 1685 - int err = 0; 1686 - struct socket *sock = tunnel->sock ? tunnel->sock->sk_socket : NULL; 1645 + int err = -EBADF; 1646 + struct socket *sock = NULL; 1647 + struct sock *sk = NULL; 1648 + 1649 + sk = l2tp_tunnel_sock_lookup(tunnel); 1650 + if (!sk) 1651 + goto out; 1652 + 1653 + sock = sk->sk_socket; 1654 + BUG_ON(!sock); 1687 1655 1688 1656 /* Force the tunnel socket to close. This will eventually 1689 1657 * cause the tunnel to be deleted via the normal socket close 1690 1658 * mechanisms when userspace closes the tunnel socket. 1691 1659 */ 1692 - if (sock != NULL) { 1693 - err = inet_shutdown(sock, 2); 1660 + err = inet_shutdown(sock, 2); 1694 1661 1695 - /* If the tunnel's socket was created by the kernel, 1696 - * close the socket here since the socket was not 1697 - * created by userspace. 1698 - */ 1699 - if (sock->file == NULL) 1700 - err = inet_release(sock); 1701 - } 1662 + /* If the tunnel's socket was created by the kernel, 1663 + * close the socket here since the socket was not 1664 + * created by userspace. 1665 + */ 1666 + if (sock->file == NULL) 1667 + err = inet_release(sock); 1702 1668 1669 + l2tp_tunnel_sock_put(sk); 1670 + out: 1703 1671 return err; 1704 1672 } 1705 1673 EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
+4 -1
net/l2tp/l2tp_core.h
··· 188 188 int (*recv_payload_hook)(struct sk_buff *skb); 189 189 void (*old_sk_destruct)(struct sock *); 190 190 struct sock *sock; /* Parent socket */ 191 - int fd; 191 + int fd; /* Parent fd, if tunnel socket 192 + * was created by userspace */ 192 193 193 194 uint8_t priv[0]; /* private data */ 194 195 }; ··· 229 228 return tunnel; 230 229 } 231 230 231 + extern struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel); 232 + extern void l2tp_tunnel_sock_put(struct sock *sk); 232 233 extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id); 233 234 extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth); 234 235 extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
+5 -5
net/l2tp/l2tp_ip6.c
··· 554 554 memset(opt, 0, sizeof(struct ipv6_txoptions)); 555 555 opt->tot_len = sizeof(struct ipv6_txoptions); 556 556 557 - err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 558 - &hlimit, &tclass, &dontfrag); 557 + err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 558 + &hlimit, &tclass, &dontfrag); 559 559 if (err < 0) { 560 560 fl6_sock_release(flowlabel); 561 561 return err; ··· 646 646 struct msghdr *msg, size_t len, int noblock, 647 647 int flags, int *addr_len) 648 648 { 649 - struct inet_sock *inet = inet_sk(sk); 649 + struct ipv6_pinfo *np = inet6_sk(sk); 650 650 struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)msg->msg_name; 651 651 size_t copied = 0; 652 652 int err = -EOPNOTSUPP; ··· 688 688 lsa->l2tp_scope_id = IP6CB(skb)->iif; 689 689 } 690 690 691 - if (inet->cmsg_flags) 692 - ip_cmsg_recv(msg, skb); 691 + if (np->rxopt.all) 692 + ip6_datagram_recv_ctl(sk, msg, skb); 693 693 694 694 if (flags & MSG_TRUNC) 695 695 copied = skb->len;
-6
net/l2tp/l2tp_ppp.c
··· 388 388 struct l2tp_session *session; 389 389 struct l2tp_tunnel *tunnel; 390 390 struct pppol2tp_session *ps; 391 - int old_headroom; 392 - int new_headroom; 393 391 int uhlen, headroom; 394 392 395 393 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) ··· 406 408 if (tunnel == NULL) 407 409 goto abort_put_sess; 408 410 409 - old_headroom = skb_headroom(skb); 410 411 uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; 411 412 headroom = NET_SKB_PAD + 412 413 sizeof(struct iphdr) + /* IP header */ ··· 414 417 sizeof(ppph); /* PPP header */ 415 418 if (skb_cow_head(skb, headroom)) 416 419 goto abort_put_sess_tun; 417 - 418 - new_headroom = skb_headroom(skb); 419 - skb->truesize += new_headroom - old_headroom; 420 420 421 421 /* Setup PPP header */ 422 422 __skb_push(skb, sizeof(ppph));
+11 -1
net/mac80211/cfg.c
··· 164 164 sta = sta_info_get(sdata, mac_addr); 165 165 else 166 166 sta = sta_info_get_bss(sdata, mac_addr); 167 - if (!sta) { 167 + /* 168 + * The ASSOC test makes sure the driver is ready to 169 + * receive the key. When wpa_supplicant has roamed 170 + * using FT, it attempts to set the key before 171 + * association has completed, this rejects that attempt 172 + * so it will set the key again after assocation. 173 + * 174 + * TODO: accept the key if we have a station entry and 175 + * add it to the device after the station. 176 + */ 177 + if (!sta || !test_sta_flag(sta, WLAN_STA_ASSOC)) { 168 178 ieee80211_key_free(sdata->local, key); 169 179 err = -ENOENT; 170 180 goto out_unlock;
+2 -4
net/mac80211/ieee80211_i.h
··· 1358 1358 void ieee80211_sched_scan_stopped_work(struct work_struct *work); 1359 1359 1360 1360 /* off-channel helpers */ 1361 - void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local, 1362 - bool offchannel_ps_enable); 1363 - void ieee80211_offchannel_return(struct ieee80211_local *local, 1364 - bool offchannel_ps_disable); 1361 + void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local); 1362 + void ieee80211_offchannel_return(struct ieee80211_local *local); 1365 1363 void ieee80211_roc_setup(struct ieee80211_local *local); 1366 1364 void ieee80211_start_next_roc(struct ieee80211_local *local); 1367 1365 void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata);
+4 -1
net/mac80211/mesh_hwmp.c
··· 215 215 skb->priority = 7; 216 216 217 217 info->control.vif = &sdata->vif; 218 + info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 218 219 ieee80211_set_qos_hdr(sdata, skb); 219 220 } 220 221 ··· 247 246 return -EAGAIN; 248 247 249 248 skb = dev_alloc_skb(local->tx_headroom + 249 + IEEE80211_ENCRYPT_HEADROOM + 250 + IEEE80211_ENCRYPT_TAILROOM + 250 251 hdr_len + 251 252 2 + 15 /* PERR IE */); 252 253 if (!skb) 253 254 return -1; 254 - skb_reserve(skb, local->tx_headroom); 255 + skb_reserve(skb, local->tx_headroom + IEEE80211_ENCRYPT_HEADROOM); 255 256 mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len); 256 257 memset(mgmt, 0, hdr_len); 257 258 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+7 -12
net/mac80211/offchannel.c
··· 102 102 ieee80211_sta_reset_conn_monitor(sdata); 103 103 } 104 104 105 - void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local, 106 - bool offchannel_ps_enable) 105 + void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local) 107 106 { 108 107 struct ieee80211_sub_if_data *sdata; 109 108 ··· 133 134 134 135 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) { 135 136 netif_tx_stop_all_queues(sdata->dev); 136 - if (offchannel_ps_enable && 137 - (sdata->vif.type == NL80211_IFTYPE_STATION) && 137 + if (sdata->vif.type == NL80211_IFTYPE_STATION && 138 138 sdata->u.mgd.associated) 139 139 ieee80211_offchannel_ps_enable(sdata); 140 140 } ··· 141 143 mutex_unlock(&local->iflist_mtx); 142 144 } 143 145 144 - void ieee80211_offchannel_return(struct ieee80211_local *local, 145 - bool offchannel_ps_disable) 146 + void ieee80211_offchannel_return(struct ieee80211_local *local) 146 147 { 147 148 struct ieee80211_sub_if_data *sdata; 148 149 ··· 160 163 continue; 161 164 162 165 /* Tell AP we're back */ 163 - if (offchannel_ps_disable && 164 - sdata->vif.type == NL80211_IFTYPE_STATION) { 165 - if (sdata->u.mgd.associated) 166 - ieee80211_offchannel_ps_disable(sdata); 167 - } 166 + if (sdata->vif.type == NL80211_IFTYPE_STATION && 167 + sdata->u.mgd.associated) 168 + ieee80211_offchannel_ps_disable(sdata); 168 169 169 170 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) { 170 171 /* ··· 380 385 local->tmp_channel = NULL; 381 386 ieee80211_hw_config(local, 0); 382 387 383 - ieee80211_offchannel_return(local, true); 388 + ieee80211_offchannel_return(local); 384 389 } 385 390 386 391 ieee80211_recalc_idle(local);
+5 -10
net/mac80211/scan.c
··· 292 292 if (!was_hw_scan) { 293 293 ieee80211_configure_filter(local); 294 294 drv_sw_scan_complete(local); 295 - ieee80211_offchannel_return(local, true); 295 + ieee80211_offchannel_return(local); 296 296 } 297 297 298 298 ieee80211_recalc_idle(local); ··· 341 341 local->next_scan_state = SCAN_DECISION; 342 342 local->scan_channel_idx = 0; 343 343 344 - ieee80211_offchannel_stop_vifs(local, true); 344 + ieee80211_offchannel_stop_vifs(local); 345 345 346 346 ieee80211_configure_filter(local); 347 347 ··· 678 678 local->scan_channel = NULL; 679 679 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); 680 680 681 - /* 682 - * Re-enable vifs and beaconing. Leave PS 683 - * in off-channel state..will put that back 684 - * on-channel at the end of scanning. 685 - */ 686 - ieee80211_offchannel_return(local, false); 681 + /* disable PS */ 682 + ieee80211_offchannel_return(local); 687 683 688 684 *next_delay = HZ / 5; 689 685 /* afterwards, resume scan & go to next channel */ ··· 689 693 static void ieee80211_scan_state_resume(struct ieee80211_local *local, 690 694 unsigned long *next_delay) 691 695 { 692 - /* PS already is in off-channel mode */ 693 - ieee80211_offchannel_stop_vifs(local, false); 696 + ieee80211_offchannel_stop_vifs(local); 694 697 695 698 if (local->ops->flush) { 696 699 drv_flush(local, false);
+6 -3
net/mac80211/tx.c
··· 1673 1673 chanctx_conf = 1674 1674 rcu_dereference(tmp_sdata->vif.chanctx_conf); 1675 1675 } 1676 - if (!chanctx_conf) 1677 - goto fail_rcu; 1678 1676 1679 - chan = chanctx_conf->def.chan; 1677 + if (chanctx_conf) 1678 + chan = chanctx_conf->def.chan; 1679 + else if (!local->use_chanctx) 1680 + chan = local->_oper_channel; 1681 + else 1682 + goto fail_rcu; 1680 1683 1681 1684 /* 1682 1685 * Frame injection is not allowed if beaconing is not allowed
+5 -4
net/netfilter/nf_conntrack_core.c
··· 1376 1376 synchronize_net(); 1377 1377 nf_conntrack_proto_fini(net); 1378 1378 nf_conntrack_cleanup_net(net); 1379 + } 1379 1380 1380 - if (net_eq(net, &init_net)) { 1381 - RCU_INIT_POINTER(nf_ct_destroy, NULL); 1382 - nf_conntrack_cleanup_init_net(); 1383 - } 1381 + void nf_conntrack_cleanup_end(void) 1382 + { 1383 + RCU_INIT_POINTER(nf_ct_destroy, NULL); 1384 + nf_conntrack_cleanup_init_net(); 1384 1385 } 1385 1386 1386 1387 void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
+1
net/netfilter/nf_conntrack_standalone.c
··· 575 575 static void __exit nf_conntrack_standalone_fini(void) 576 576 { 577 577 unregister_pernet_subsys(&nf_conntrack_net_ops); 578 + nf_conntrack_cleanup_end(); 578 579 } 579 580 580 581 module_init(nf_conntrack_standalone_init);
+20 -8
net/netfilter/x_tables.c
··· 345 345 } 346 346 EXPORT_SYMBOL_GPL(xt_find_revision); 347 347 348 - static char *textify_hooks(char *buf, size_t size, unsigned int mask) 348 + static char * 349 + textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto) 349 350 { 350 - static const char *const names[] = { 351 + static const char *const inetbr_names[] = { 351 352 "PREROUTING", "INPUT", "FORWARD", 352 353 "OUTPUT", "POSTROUTING", "BROUTING", 353 354 }; 354 - unsigned int i; 355 + static const char *const arp_names[] = { 356 + "INPUT", "FORWARD", "OUTPUT", 357 + }; 358 + const char *const *names; 359 + unsigned int i, max; 355 360 char *p = buf; 356 361 bool np = false; 357 362 int res; 358 363 364 + names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names; 365 + max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) : 366 + ARRAY_SIZE(inetbr_names); 359 367 *p = '\0'; 360 - for (i = 0; i < ARRAY_SIZE(names); ++i) { 368 + for (i = 0; i < max; ++i) { 361 369 if (!(mask & (1 << i))) 362 370 continue; 363 371 res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]); ··· 410 402 pr_err("%s_tables: %s match: used from hooks %s, but only " 411 403 "valid from %s\n", 412 404 xt_prefix[par->family], par->match->name, 413 - textify_hooks(used, sizeof(used), par->hook_mask), 414 - textify_hooks(allow, sizeof(allow), par->match->hooks)); 405 + textify_hooks(used, sizeof(used), par->hook_mask, 406 + par->family), 407 + textify_hooks(allow, sizeof(allow), par->match->hooks, 408 + par->family)); 415 409 return -EINVAL; 416 410 } 417 411 if (par->match->proto && (par->match->proto != proto || inv_proto)) { ··· 585 575 pr_err("%s_tables: %s target: used from hooks %s, but only " 586 576 "usable from %s\n", 587 577 xt_prefix[par->family], par->target->name, 588 - textify_hooks(used, sizeof(used), par->hook_mask), 589 - textify_hooks(allow, sizeof(allow), par->target->hooks)); 578 + textify_hooks(used, sizeof(used), par->hook_mask, 579 + par->family), 580 + textify_hooks(allow, sizeof(allow), par->target->hooks, 581 + par->family)); 590 582 return -EINVAL; 591 583 } 592 584 if (par->target->proto && (par->target->proto != proto || inv_proto)) {
+2 -2
net/netfilter/xt_CT.c
··· 109 109 struct xt_ct_target_info *info = par->targinfo; 110 110 struct nf_conntrack_tuple t; 111 111 struct nf_conn *ct; 112 - int ret; 112 + int ret = -EOPNOTSUPP; 113 113 114 114 if (info->flags & ~XT_CT_NOTRACK) 115 115 return -EINVAL; ··· 247 247 struct xt_ct_target_info_v1 *info = par->targinfo; 248 248 struct nf_conntrack_tuple t; 249 249 struct nf_conn *ct; 250 - int ret; 250 + int ret = -EOPNOTSUPP; 251 251 252 252 if (info->flags & ~XT_CT_NOTRACK) 253 253 return -EINVAL;
+9 -7
net/openvswitch/vport-netdev.c
··· 35 35 /* Must be called with rcu_read_lock. */ 36 36 static void netdev_port_receive(struct vport *vport, struct sk_buff *skb) 37 37 { 38 - if (unlikely(!vport)) { 39 - kfree_skb(skb); 40 - return; 41 - } 38 + if (unlikely(!vport)) 39 + goto error; 40 + 41 + if (unlikely(skb_warn_if_lro(skb))) 42 + goto error; 42 43 43 44 /* Make our own copy of the packet. Otherwise we will mangle the 44 45 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). ··· 51 50 52 51 skb_push(skb, ETH_HLEN); 53 52 ovs_vport_receive(vport, skb); 53 + return; 54 + 55 + error: 56 + kfree_skb(skb); 54 57 } 55 58 56 59 /* Called with rcu_read_lock and bottom-halves disabled. */ ··· 173 168 packet_length(skb), mtu); 174 169 goto error; 175 170 } 176 - 177 - if (unlikely(skb_warn_if_lro(skb))) 178 - goto error; 179 171 180 172 skb->dev = netdev_vport->dev; 181 173 len = skb->len;
+6 -4
net/packet/af_packet.c
··· 2361 2361 2362 2362 packet_flush_mclist(sk); 2363 2363 2364 - memset(&req_u, 0, sizeof(req_u)); 2365 - 2366 - if (po->rx_ring.pg_vec) 2364 + if (po->rx_ring.pg_vec) { 2365 + memset(&req_u, 0, sizeof(req_u)); 2367 2366 packet_set_ring(sk, &req_u, 1, 0); 2367 + } 2368 2368 2369 - if (po->tx_ring.pg_vec) 2369 + if (po->tx_ring.pg_vec) { 2370 + memset(&req_u, 0, sizeof(req_u)); 2370 2371 packet_set_ring(sk, &req_u, 1, 1); 2372 + } 2371 2373 2372 2374 fanout_release(sk); 2373 2375
+6 -6
net/sched/sch_netem.c
··· 438 438 if (q->rate) { 439 439 struct sk_buff_head *list = &sch->q; 440 440 441 - delay += packet_len_2_sched_time(skb->len, q); 442 - 443 441 if (!skb_queue_empty(list)) { 444 442 /* 445 - * Last packet in queue is reference point (now). 446 - * First packet in queue is already in flight, 447 - * calculate this time bonus and substract 443 + * Last packet in queue is reference point (now), 444 + * calculate this time bonus and subtract 448 445 * from delay. 449 446 */ 450 - delay -= now - netem_skb_cb(skb_peek(list))->time_to_send; 447 + delay -= netem_skb_cb(skb_peek_tail(list))->time_to_send - now; 448 + delay = max_t(psched_tdiff_t, 0, delay); 451 449 now = netem_skb_cb(skb_peek_tail(list))->time_to_send; 452 450 } 451 + 452 + delay += packet_len_2_sched_time(skb->len, q); 453 453 } 454 454 455 455 cb->time_to_send = now + delay;
+1 -1
net/sctp/auth.c
··· 71 71 return; 72 72 73 73 if (atomic_dec_and_test(&key->refcnt)) { 74 - kfree(key); 74 + kzfree(key); 75 75 SCTP_DBG_OBJCNT_DEC(keys); 76 76 } 77 77 }
+5
net/sctp/endpointola.c
··· 249 249 /* Final destructor for endpoint. */ 250 250 static void sctp_endpoint_destroy(struct sctp_endpoint *ep) 251 251 { 252 + int i; 253 + 252 254 SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return); 253 255 254 256 /* Free up the HMAC transform. */ ··· 272 270 /* Cleanup. */ 273 271 sctp_inq_free(&ep->base.inqueue); 274 272 sctp_bind_addr_free(&ep->base.bind_addr); 273 + 274 + for (i = 0; i < SCTP_HOW_MANY_SECRETS; ++i) 275 + memset(&ep->secret_key[i], 0, SCTP_SECRET_SIZE); 275 276 276 277 /* Remove and free the port */ 277 278 if (sctp_sk(ep->base.sk)->bind_hash)
+8 -4
net/sctp/outqueue.c
··· 224 224 225 225 /* Free the outqueue structure and any related pending chunks. 226 226 */ 227 - void sctp_outq_teardown(struct sctp_outq *q) 227 + static void __sctp_outq_teardown(struct sctp_outq *q) 228 228 { 229 229 struct sctp_transport *transport; 230 230 struct list_head *lchunk, *temp; ··· 277 277 sctp_chunk_free(chunk); 278 278 } 279 279 280 - q->error = 0; 281 - 282 280 /* Throw away any leftover control chunks. */ 283 281 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { 284 282 list_del_init(&chunk->list); ··· 284 286 } 285 287 } 286 288 289 + void sctp_outq_teardown(struct sctp_outq *q) 290 + { 291 + __sctp_outq_teardown(q); 292 + sctp_outq_init(q->asoc, q); 293 + } 294 + 287 295 /* Free the outqueue structure and any related pending chunks. */ 288 296 void sctp_outq_free(struct sctp_outq *q) 289 297 { 290 298 /* Throw away leftover chunks. */ 291 - sctp_outq_teardown(q); 299 + __sctp_outq_teardown(q); 292 300 293 301 /* If we were kmalloc()'d, free the memory. */ 294 302 if (q->malloced)
+3 -1
net/sctp/sm_statefuns.c
··· 1779 1779 1780 1780 /* Update the content of current association. */ 1781 1781 sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); 1782 - sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); 1783 1782 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); 1783 + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 1784 + SCTP_STATE(SCTP_STATE_ESTABLISHED)); 1785 + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); 1784 1786 return SCTP_DISPOSITION_CONSUME; 1785 1787 1786 1788 nomem_ev:
+1 -1
net/sctp/socket.c
··· 3390 3390 3391 3391 ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey); 3392 3392 out: 3393 - kfree(authkey); 3393 + kzfree(authkey); 3394 3394 return ret; 3395 3395 } 3396 3396
+4
net/sctp/sysctl.c
··· 366 366 367 367 void sctp_sysctl_net_unregister(struct net *net) 368 368 { 369 + struct ctl_table *table; 370 + 371 + table = net->sctp.sysctl_header->ctl_table_arg; 369 372 unregister_net_sysctl_table(net->sctp.sysctl_header); 373 + kfree(table); 370 374 } 371 375 372 376 static struct ctl_table_header * sctp_sysctl_header;
+17 -1
net/sunrpc/sched.c
··· 98 98 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); 99 99 } 100 100 101 + static void rpc_rotate_queue_owner(struct rpc_wait_queue *queue) 102 + { 103 + struct list_head *q = &queue->tasks[queue->priority]; 104 + struct rpc_task *task; 105 + 106 + if (!list_empty(q)) { 107 + task = list_first_entry(q, struct rpc_task, u.tk_wait.list); 108 + if (task->tk_owner == queue->owner) 109 + list_move_tail(&task->u.tk_wait.list, q); 110 + } 111 + } 112 + 101 113 static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) 102 114 { 103 - queue->priority = priority; 115 + if (queue->priority != priority) { 116 + /* Fairness: rotate the list when changing priority */ 117 + rpc_rotate_queue_owner(queue); 118 + queue->priority = priority; 119 + } 104 120 } 105 121 106 122 static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
+1 -1
net/sunrpc/svcsock.c
··· 465 465 } 466 466 467 467 /* 468 - * See net/ipv6/datagram.c : datagram_recv_ctl 468 + * See net/ipv6/datagram.c : ip6_datagram_recv_ctl 469 469 */ 470 470 static int svc_udp_get_dest_address6(struct svc_rqst *rqstp, 471 471 struct cmsghdr *cmh)
+1 -1
net/wireless/scan.c
··· 1358 1358 &iwe, IW_EV_UINT_LEN); 1359 1359 } 1360 1360 1361 - buf = kmalloc(30, GFP_ATOMIC); 1361 + buf = kmalloc(31, GFP_ATOMIC); 1362 1362 if (buf) { 1363 1363 memset(&iwe, 0, sizeof(iwe)); 1364 1364 iwe.cmd = IWEVCUSTOM;
+1 -1
net/xfrm/xfrm_policy.c
··· 2656 2656 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir])); 2657 2657 2658 2658 htab = &net->xfrm.policy_bydst[dir]; 2659 - sz = (htab->hmask + 1); 2659 + sz = (htab->hmask + 1) * sizeof(struct hlist_head); 2660 2660 WARN_ON(!hlist_empty(htab->table)); 2661 2661 xfrm_hash_free(htab->table, sz); 2662 2662 }
+3 -1
net/xfrm/xfrm_replay.c
··· 242 242 u32 diff; 243 243 struct xfrm_replay_state_esn *replay_esn = x->replay_esn; 244 244 u32 seq = ntohl(net_seq); 245 - u32 pos = (replay_esn->seq - 1) % replay_esn->replay_window; 245 + u32 pos; 246 246 247 247 if (!replay_esn->replay_window) 248 248 return; 249 + 250 + pos = (replay_esn->seq - 1) % replay_esn->replay_window; 249 251 250 252 if (seq > replay_esn->seq) { 251 253 diff = seq - replay_esn->seq;
+2
samples/seccomp/Makefile
··· 19 19 20 20 # Try to match the kernel target. 21 21 ifndef CONFIG_64BIT 22 + ifndef CROSS_COMPILE 22 23 23 24 # s390 has -m31 flag to build 31 bit binaries 24 25 ifndef CONFIG_S390 ··· 35 34 HOSTLOADLIBES_bpf-direct += $(MFLAG) 36 35 HOSTLOADLIBES_bpf-fancy += $(MFLAG) 37 36 HOSTLOADLIBES_dropper += $(MFLAG) 37 + endif 38 38 endif 39 39 40 40 # Tell kbuild to always build the programs
+5 -5
scripts/checkpatch.pl
··· 230 230 our $Member = qr{->$Ident|\.$Ident|\[[^]]*\]}; 231 231 our $Lval = qr{$Ident(?:$Member)*}; 232 232 233 - our $Float_hex = qr{(?i:0x[0-9a-f]+p-?[0-9]+[fl]?)}; 234 - our $Float_dec = qr{(?i:((?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+)(?:e-?[0-9]+)?[fl]?))}; 235 - our $Float_int = qr{(?i:[0-9]+e-?[0-9]+[fl]?)}; 233 + our $Float_hex = qr{(?i)0x[0-9a-f]+p-?[0-9]+[fl]?}; 234 + our $Float_dec = qr{(?i)(?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+)(?:e-?[0-9]+)?[fl]?}; 235 + our $Float_int = qr{(?i)[0-9]+e-?[0-9]+[fl]?}; 236 236 our $Float = qr{$Float_hex|$Float_dec|$Float_int}; 237 - our $Constant = qr{(?:$Float|(?i:(?:0x[0-9a-f]+|[0-9]+)[ul]*))}; 238 - our $Assignment = qr{(?:\*\=|/=|%=|\+=|-=|<<=|>>=|&=|\^=|\|=|=)}; 237 + our $Constant = qr{$Float|(?i)(?:0x[0-9a-f]+|[0-9]+)[ul]*}; 238 + our $Assignment = qr{\*\=|/=|%=|\+=|-=|<<=|>>=|&=|\^=|\|=|=}; 239 239 our $Compare = qr{<=|>=|==|!=|<|>}; 240 240 our $Operators = qr{ 241 241 <=|>=|==|!=|
+21 -3
security/capability.c
··· 709 709 { 710 710 } 711 711 712 + static int cap_tun_dev_alloc_security(void **security) 713 + { 714 + return 0; 715 + } 716 + 717 + static void cap_tun_dev_free_security(void *security) 718 + { 719 + } 720 + 712 721 static int cap_tun_dev_create(void) 713 722 { 714 723 return 0; 715 724 } 716 725 717 - static void cap_tun_dev_post_create(struct sock *sk) 726 + static int cap_tun_dev_attach_queue(void *security) 718 727 { 728 + return 0; 719 729 } 720 730 721 - static int cap_tun_dev_attach(struct sock *sk) 731 + static int cap_tun_dev_attach(struct sock *sk, void *security) 732 + { 733 + return 0; 734 + } 735 + 736 + static int cap_tun_dev_open(void *security) 722 737 { 723 738 return 0; 724 739 } ··· 1065 1050 set_to_cap_if_null(ops, secmark_refcount_inc); 1066 1051 set_to_cap_if_null(ops, secmark_refcount_dec); 1067 1052 set_to_cap_if_null(ops, req_classify_flow); 1053 + set_to_cap_if_null(ops, tun_dev_alloc_security); 1054 + set_to_cap_if_null(ops, tun_dev_free_security); 1068 1055 set_to_cap_if_null(ops, tun_dev_create); 1069 - set_to_cap_if_null(ops, tun_dev_post_create); 1056 + set_to_cap_if_null(ops, tun_dev_open); 1057 + set_to_cap_if_null(ops, tun_dev_attach_queue); 1070 1058 set_to_cap_if_null(ops, tun_dev_attach); 1071 1059 #endif /* CONFIG_SECURITY_NETWORK */ 1072 1060 #ifdef CONFIG_SECURITY_NETWORK_XFRM
+23 -5
security/security.c
··· 1254 1254 } 1255 1255 EXPORT_SYMBOL(security_secmark_refcount_dec); 1256 1256 1257 + int security_tun_dev_alloc_security(void **security) 1258 + { 1259 + return security_ops->tun_dev_alloc_security(security); 1260 + } 1261 + EXPORT_SYMBOL(security_tun_dev_alloc_security); 1262 + 1263 + void security_tun_dev_free_security(void *security) 1264 + { 1265 + security_ops->tun_dev_free_security(security); 1266 + } 1267 + EXPORT_SYMBOL(security_tun_dev_free_security); 1268 + 1257 1269 int security_tun_dev_create(void) 1258 1270 { 1259 1271 return security_ops->tun_dev_create(); 1260 1272 } 1261 1273 EXPORT_SYMBOL(security_tun_dev_create); 1262 1274 1263 - void security_tun_dev_post_create(struct sock *sk) 1275 + int security_tun_dev_attach_queue(void *security) 1264 1276 { 1265 - return security_ops->tun_dev_post_create(sk); 1277 + return security_ops->tun_dev_attach_queue(security); 1266 1278 } 1267 - EXPORT_SYMBOL(security_tun_dev_post_create); 1279 + EXPORT_SYMBOL(security_tun_dev_attach_queue); 1268 1280 1269 - int security_tun_dev_attach(struct sock *sk) 1281 + int security_tun_dev_attach(struct sock *sk, void *security) 1270 1282 { 1271 - return security_ops->tun_dev_attach(sk); 1283 + return security_ops->tun_dev_attach(sk, security); 1272 1284 } 1273 1285 EXPORT_SYMBOL(security_tun_dev_attach); 1286 + 1287 + int security_tun_dev_open(void *security) 1288 + { 1289 + return security_ops->tun_dev_open(security); 1290 + } 1291 + EXPORT_SYMBOL(security_tun_dev_open); 1274 1292 1275 1293 #endif /* CONFIG_SECURITY_NETWORK */ 1276 1294
+39 -11
security/selinux/hooks.c
··· 4399 4399 fl->flowi_secid = req->secid; 4400 4400 } 4401 4401 4402 + static int selinux_tun_dev_alloc_security(void **security) 4403 + { 4404 + struct tun_security_struct *tunsec; 4405 + 4406 + tunsec = kzalloc(sizeof(*tunsec), GFP_KERNEL); 4407 + if (!tunsec) 4408 + return -ENOMEM; 4409 + tunsec->sid = current_sid(); 4410 + 4411 + *security = tunsec; 4412 + return 0; 4413 + } 4414 + 4415 + static void selinux_tun_dev_free_security(void *security) 4416 + { 4417 + kfree(security); 4418 + } 4419 + 4402 4420 static int selinux_tun_dev_create(void) 4403 4421 { 4404 4422 u32 sid = current_sid(); ··· 4432 4414 NULL); 4433 4415 } 4434 4416 4435 - static void selinux_tun_dev_post_create(struct sock *sk) 4417 + static int selinux_tun_dev_attach_queue(void *security) 4436 4418 { 4419 + struct tun_security_struct *tunsec = security; 4420 + 4421 + return avc_has_perm(current_sid(), tunsec->sid, SECCLASS_TUN_SOCKET, 4422 + TUN_SOCKET__ATTACH_QUEUE, NULL); 4423 + } 4424 + 4425 + static int selinux_tun_dev_attach(struct sock *sk, void *security) 4426 + { 4427 + struct tun_security_struct *tunsec = security; 4437 4428 struct sk_security_struct *sksec = sk->sk_security; 4438 4429 4439 4430 /* we don't currently perform any NetLabel based labeling here and it ··· 4452 4425 * cause confusion to the TUN user that had no idea network labeling 4453 4426 * protocols were being used */ 4454 4427 4455 - /* see the comments in selinux_tun_dev_create() about why we don't use 4456 - * the sockcreate SID here */ 4457 - 4458 - sksec->sid = current_sid(); 4428 + sksec->sid = tunsec->sid; 4459 4429 sksec->sclass = SECCLASS_TUN_SOCKET; 4430 + 4431 + return 0; 4460 4432 } 4461 4433 4462 - static int selinux_tun_dev_attach(struct sock *sk) 4434 + static int selinux_tun_dev_open(void *security) 4463 4435 { 4464 - struct sk_security_struct *sksec = sk->sk_security; 4436 + struct tun_security_struct *tunsec = security; 4465 4437 u32 sid = current_sid(); 4466 4438 int err; 4467 4439 4468 - err = avc_has_perm(sid, sksec->sid, SECCLASS_TUN_SOCKET, 4440 + err = avc_has_perm(sid, tunsec->sid, SECCLASS_TUN_SOCKET, 4469 4441 TUN_SOCKET__RELABELFROM, NULL); 4470 4442 if (err) 4471 4443 return err; ··· 4472 4446 TUN_SOCKET__RELABELTO, NULL); 4473 4447 if (err) 4474 4448 return err; 4475 - 4476 - sksec->sid = sid; 4449 + tunsec->sid = sid; 4477 4450 4478 4451 return 0; 4479 4452 } ··· 5667 5642 .secmark_refcount_inc = selinux_secmark_refcount_inc, 5668 5643 .secmark_refcount_dec = selinux_secmark_refcount_dec, 5669 5644 .req_classify_flow = selinux_req_classify_flow, 5645 + .tun_dev_alloc_security = selinux_tun_dev_alloc_security, 5646 + .tun_dev_free_security = selinux_tun_dev_free_security, 5670 5647 .tun_dev_create = selinux_tun_dev_create, 5671 - .tun_dev_post_create = selinux_tun_dev_post_create, 5648 + .tun_dev_attach_queue = selinux_tun_dev_attach_queue, 5672 5649 .tun_dev_attach = selinux_tun_dev_attach, 5650 + .tun_dev_open = selinux_tun_dev_open, 5673 5651 5674 5652 #ifdef CONFIG_SECURITY_NETWORK_XFRM 5675 5653 .xfrm_policy_alloc_security = selinux_xfrm_policy_alloc,
+1 -1
security/selinux/include/classmap.h
··· 150 150 NULL } }, 151 151 { "kernel_service", { "use_as_override", "create_files_as", NULL } }, 152 152 { "tun_socket", 153 - { COMMON_SOCK_PERMS, NULL } }, 153 + { COMMON_SOCK_PERMS, "attach_queue", NULL } }, 154 154 { NULL } 155 155 };
+4
security/selinux/include/objsec.h
··· 110 110 u16 sclass; /* sock security class */ 111 111 }; 112 112 113 + struct tun_security_struct { 114 + u32 sid; /* SID for the tun device sockets */ 115 + }; 116 + 113 117 struct key_security_struct { 114 118 u32 sid; /* SID of key */ 115 119 };
+30 -19
sound/pci/hda/hda_intel.c
··· 656 656 #define get_azx_dev(substream) (substream->runtime->private_data) 657 657 658 658 #ifdef CONFIG_X86 659 - static void __mark_pages_wc(struct azx *chip, void *addr, size_t size, bool on) 659 + static void __mark_pages_wc(struct azx *chip, struct snd_dma_buffer *dmab, bool on) 660 660 { 661 + int pages; 662 + 661 663 if (azx_snoop(chip)) 662 664 return; 663 - if (addr && size) { 664 - int pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 665 + if (!dmab || !dmab->area || !dmab->bytes) 666 + return; 667 + 668 + #ifdef CONFIG_SND_DMA_SGBUF 669 + if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_SG) { 670 + struct snd_sg_buf *sgbuf = dmab->private_data; 665 671 if (on) 666 - set_memory_wc((unsigned long)addr, pages); 672 + set_pages_array_wc(sgbuf->page_table, sgbuf->pages); 667 673 else 668 - set_memory_wb((unsigned long)addr, pages); 674 + set_pages_array_wb(sgbuf->page_table, sgbuf->pages); 675 + return; 669 676 } 677 + #endif 678 + 679 + pages = (dmab->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT; 680 + if (on) 681 + set_memory_wc((unsigned long)dmab->area, pages); 682 + else 683 + set_memory_wb((unsigned long)dmab->area, pages); 670 684 } 671 685 672 686 static inline void mark_pages_wc(struct azx *chip, struct snd_dma_buffer *buf, 673 687 bool on) 674 688 { 675 - __mark_pages_wc(chip, buf->area, buf->bytes, on); 689 + __mark_pages_wc(chip, buf, on); 676 690 } 677 691 static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev, 678 - struct snd_pcm_runtime *runtime, bool on) 692 + struct snd_pcm_substream *substream, bool on) 679 693 { 680 694 if (azx_dev->wc_marked != on) { 681 - __mark_pages_wc(chip, runtime->dma_area, runtime->dma_bytes, on); 695 + __mark_pages_wc(chip, snd_pcm_get_dma_buf(substream), on); 682 696 azx_dev->wc_marked = on; 683 697 } 684 698 } ··· 703 689 { 704 690 } 705 691 static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev, 706 - struct snd_pcm_runtime *runtime, bool on) 692 + struct snd_pcm_substream *substream, bool on) 707 693 { 708 694 } 709 695 #endif ··· 1982 1968 { 1983 1969 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 1984 1970 struct azx *chip = apcm->chip; 1985 - struct snd_pcm_runtime *runtime = substream->runtime; 1986 1971 struct azx_dev *azx_dev = get_azx_dev(substream); 1987 1972 int ret; 1988 1973 1989 - mark_runtime_wc(chip, azx_dev, runtime, false); 1974 + mark_runtime_wc(chip, azx_dev, substream, false); 1990 1975 azx_dev->bufsize = 0; 1991 1976 azx_dev->period_bytes = 0; 1992 1977 azx_dev->format_val = 0; ··· 1993 1980 params_buffer_bytes(hw_params)); 1994 1981 if (ret < 0) 1995 1982 return ret; 1996 - mark_runtime_wc(chip, azx_dev, runtime, true); 1983 + mark_runtime_wc(chip, azx_dev, substream, true); 1997 1984 return ret; 1998 1985 } 1999 1986 ··· 2002 1989 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 2003 1990 struct azx_dev *azx_dev = get_azx_dev(substream); 2004 1991 struct azx *chip = apcm->chip; 2005 - struct snd_pcm_runtime *runtime = substream->runtime; 2006 1992 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream]; 2007 1993 2008 1994 /* reset BDL address */ ··· 2014 2002 2015 2003 snd_hda_codec_cleanup(apcm->codec, hinfo, substream); 2016 2004 2017 - mark_runtime_wc(chip, azx_dev, runtime, false); 2005 + mark_runtime_wc(chip, azx_dev, substream, false); 2018 2006 return snd_pcm_lib_free_pages(substream); 2019 2007 } 2020 2008 ··· 3625 3613 /* 5 Series/3400 */ 3626 3614 { PCI_DEVICE(0x8086, 0x3b56), 3627 3615 .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH }, 3628 - /* SCH */ 3616 + /* Poulsbo */ 3629 3617 { PCI_DEVICE(0x8086, 0x811b), 3630 - .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP | 3631 - AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_LPIB }, /* Poulsbo */ 3618 + .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM }, 3619 + /* Oaktrail */ 3632 3620 { PCI_DEVICE(0x8086, 0x080a), 3633 - .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP | 3634 - AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_LPIB }, /* Oaktrail */ 3621 + .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM }, 3635 3622 /* ICH */ 3636 3623 { PCI_DEVICE(0x8086, 0x2668), 3637 3624 .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_OLD_SSYNC |
+2
sound/pci/hda/patch_realtek.c
··· 4694 4694 SND_PCI_QUIRK(0x1584, 0x9077, "Uniwill P53", ALC880_FIXUP_VOL_KNOB), 4695 4695 SND_PCI_QUIRK(0x161f, 0x203d, "W810", ALC880_FIXUP_W810), 4696 4696 SND_PCI_QUIRK(0x161f, 0x205d, "Medion Rim 2150", ALC880_FIXUP_MEDION_RIM), 4697 + SND_PCI_QUIRK(0x1631, 0xe011, "PB 13201056", ALC880_FIXUP_6ST), 4697 4698 SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_FIXUP_F1734), 4698 4699 SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FIXUP_FUJITSU), 4699 4700 SND_PCI_QUIRK(0x1734, 0x10ac, "FSC AMILO Xi 1526", ALC880_FIXUP_F1734), ··· 5709 5708 }; 5710 5709 5711 5710 static const struct snd_pci_quirk alc268_fixup_tbl[] = { 5711 + SND_PCI_QUIRK(0x1025, 0x015b, "Acer AOA 150 (ZG5)", ALC268_FIXUP_INV_DMIC), 5712 5712 /* below is codec SSID since multiple Toshiba laptops have the 5713 5713 * same PCI SSID 1179:ff00 5714 5714 */
+4 -1
sound/soc/codecs/arizona.c
··· 685 685 } 686 686 sr_val = i; 687 687 688 - lrclk = snd_soc_params_to_bclk(params) / params_rate(params); 688 + lrclk = rates[bclk] / params_rate(params); 689 689 690 690 arizona_aif_dbg(dai, "BCLK %dHz LRCLK %dHz\n", 691 691 rates[bclk], rates[bclk] / lrclk); ··· 1081 1081 dev_err(arizona->dev, "Failed to get FLL%d clock OK IRQ: %d\n", 1082 1082 id, ret); 1083 1083 } 1084 + 1085 + regmap_update_bits(arizona->regmap, fll->base + 1, 1086 + ARIZONA_FLL1_FREERUN, 0); 1084 1087 1085 1088 return 0; 1086 1089 }
-3
sound/soc/codecs/wm2200.c
··· 1019 1019 "EQR", 1020 1020 "LHPF1", 1021 1021 "LHPF2", 1022 - "LHPF3", 1023 - "LHPF4", 1024 1022 "DSP1.1", 1025 1023 "DSP1.2", 1026 1024 "DSP1.3", ··· 1051 1053 0x25, 1052 1054 0x50, /* EQ */ 1053 1055 0x51, 1054 - 0x52, 1055 1056 0x60, /* LHPF1 */ 1056 1057 0x61, /* LHPF2 */ 1057 1058 0x68, /* DSP1 */
+1 -2
sound/soc/codecs/wm5102.c
··· 896 896 897 897 static const struct soc_enum wm5102_aec_loopback = 898 898 SOC_VALUE_ENUM_SINGLE(ARIZONA_DAC_AEC_CONTROL_1, 899 - ARIZONA_AEC_LOOPBACK_SRC_SHIFT, 900 - ARIZONA_AEC_LOOPBACK_SRC_MASK, 899 + ARIZONA_AEC_LOOPBACK_SRC_SHIFT, 0xf, 901 900 ARRAY_SIZE(wm5102_aec_loopback_texts), 902 901 wm5102_aec_loopback_texts, 903 902 wm5102_aec_loopback_values);
+1 -2
sound/soc/codecs/wm5110.c
··· 344 344 345 345 static const struct soc_enum wm5110_aec_loopback = 346 346 SOC_VALUE_ENUM_SINGLE(ARIZONA_DAC_AEC_CONTROL_1, 347 - ARIZONA_AEC_LOOPBACK_SRC_SHIFT, 348 - ARIZONA_AEC_LOOPBACK_SRC_MASK, 347 + ARIZONA_AEC_LOOPBACK_SRC_SHIFT, 0xf, 349 348 ARRAY_SIZE(wm5110_aec_loopback_texts), 350 349 wm5110_aec_loopback_texts, 351 350 wm5110_aec_loopback_values);
+3 -3
sound/soc/codecs/wm_adsp.c
··· 324 324 325 325 if (reg) { 326 326 buf = kmemdup(region->data, le32_to_cpu(region->len), 327 - GFP_KERNEL); 327 + GFP_KERNEL | GFP_DMA); 328 328 if (!buf) { 329 329 adsp_err(dsp, "Out of memory\n"); 330 330 return -ENOMEM; ··· 396 396 hdr = (void*)&firmware->data[0]; 397 397 if (memcmp(hdr->magic, "WMDR", 4) != 0) { 398 398 adsp_err(dsp, "%s: invalid magic\n", file); 399 - return -EINVAL; 399 + goto out_fw; 400 400 } 401 401 402 402 adsp_dbg(dsp, "%s: v%d.%d.%d\n", file, ··· 439 439 440 440 if (reg) { 441 441 buf = kmemdup(blk->data, le32_to_cpu(blk->len), 442 - GFP_KERNEL); 442 + GFP_KERNEL | GFP_DMA); 443 443 if (!buf) { 444 444 adsp_err(dsp, "Out of memory\n"); 445 445 return -ENOMEM;
+1 -20
sound/soc/fsl/imx-pcm-dma.c
··· 154 154 .pcm_free = imx_pcm_free, 155 155 }; 156 156 157 - static int imx_soc_platform_probe(struct platform_device *pdev) 157 + int imx_pcm_dma_init(struct platform_device *pdev) 158 158 { 159 159 return snd_soc_register_platform(&pdev->dev, &imx_soc_platform_mx2); 160 160 } 161 - 162 - static int imx_soc_platform_remove(struct platform_device *pdev) 163 - { 164 - snd_soc_unregister_platform(&pdev->dev); 165 - return 0; 166 - } 167 - 168 - static struct platform_driver imx_pcm_driver = { 169 - .driver = { 170 - .name = "imx-pcm-audio", 171 - .owner = THIS_MODULE, 172 - }, 173 - .probe = imx_soc_platform_probe, 174 - .remove = imx_soc_platform_remove, 175 - }; 176 - 177 - module_platform_driver(imx_pcm_driver); 178 - MODULE_LICENSE("GPL"); 179 - MODULE_ALIAS("platform:imx-pcm-audio");
+1 -21
sound/soc/fsl/imx-pcm-fiq.c
··· 281 281 .pcm_free = imx_pcm_fiq_free, 282 282 }; 283 283 284 - static int imx_soc_platform_probe(struct platform_device *pdev) 284 + int imx_pcm_fiq_init(struct platform_device *pdev) 285 285 { 286 286 struct imx_ssi *ssi = platform_get_drvdata(pdev); 287 287 int ret; ··· 314 314 315 315 return ret; 316 316 } 317 - 318 - static int imx_soc_platform_remove(struct platform_device *pdev) 319 - { 320 - snd_soc_unregister_platform(&pdev->dev); 321 - return 0; 322 - } 323 - 324 - static struct platform_driver imx_pcm_driver = { 325 - .driver = { 326 - .name = "imx-fiq-pcm-audio", 327 - .owner = THIS_MODULE, 328 - }, 329 - 330 - .probe = imx_soc_platform_probe, 331 - .remove = imx_soc_platform_remove, 332 - }; 333 - 334 - module_platform_driver(imx_pcm_driver); 335 - 336 - MODULE_LICENSE("GPL");
+32
sound/soc/fsl/imx-pcm.c
··· 104 104 } 105 105 EXPORT_SYMBOL_GPL(imx_pcm_free); 106 106 107 + static int imx_pcm_probe(struct platform_device *pdev) 108 + { 109 + if (strcmp(pdev->id_entry->name, "imx-fiq-pcm-audio") == 0) 110 + return imx_pcm_fiq_init(pdev); 111 + 112 + return imx_pcm_dma_init(pdev); 113 + } 114 + 115 + static int imx_pcm_remove(struct platform_device *pdev) 116 + { 117 + snd_soc_unregister_platform(&pdev->dev); 118 + return 0; 119 + } 120 + 121 + static struct platform_device_id imx_pcm_devtype[] = { 122 + { .name = "imx-pcm-audio", }, 123 + { .name = "imx-fiq-pcm-audio", }, 124 + { /* sentinel */ } 125 + }; 126 + MODULE_DEVICE_TABLE(platform, imx_pcm_devtype); 127 + 128 + static struct platform_driver imx_pcm_driver = { 129 + .driver = { 130 + .name = "imx-pcm", 131 + .owner = THIS_MODULE, 132 + }, 133 + .id_table = imx_pcm_devtype, 134 + .probe = imx_pcm_probe, 135 + .remove = imx_pcm_remove, 136 + }; 137 + module_platform_driver(imx_pcm_driver); 138 + 107 139 MODULE_DESCRIPTION("Freescale i.MX PCM driver"); 108 140 MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>"); 109 141 MODULE_LICENSE("GPL");
+18
sound/soc/fsl/imx-pcm.h
··· 30 30 int imx_pcm_new(struct snd_soc_pcm_runtime *rtd); 31 31 void imx_pcm_free(struct snd_pcm *pcm); 32 32 33 + #ifdef CONFIG_SND_SOC_IMX_PCM_DMA 34 + int imx_pcm_dma_init(struct platform_device *pdev); 35 + #else 36 + static inline int imx_pcm_dma_init(struct platform_device *pdev) 37 + { 38 + return -ENODEV; 39 + } 40 + #endif 41 + 42 + #ifdef CONFIG_SND_SOC_IMX_PCM_FIQ 43 + int imx_pcm_fiq_init(struct platform_device *pdev); 44 + #else 45 + static inline int imx_pcm_fiq_init(struct platform_device *pdev) 46 + { 47 + return -ENODEV; 48 + } 49 + #endif 50 + 33 51 #endif /* _IMX_PCM_H */
+10 -2
sound/soc/soc-dapm.c
··· 1023 1023 1024 1024 if (SND_SOC_DAPM_EVENT_ON(event)) { 1025 1025 if (w->invert & SND_SOC_DAPM_REGULATOR_BYPASS) { 1026 - ret = regulator_allow_bypass(w->regulator, true); 1026 + ret = regulator_allow_bypass(w->regulator, false); 1027 1027 if (ret != 0) 1028 1028 dev_warn(w->dapm->dev, 1029 1029 "ASoC: Failed to bypass %s: %d\n", ··· 1033 1033 return regulator_enable(w->regulator); 1034 1034 } else { 1035 1035 if (w->invert & SND_SOC_DAPM_REGULATOR_BYPASS) { 1036 - ret = regulator_allow_bypass(w->regulator, false); 1036 + ret = regulator_allow_bypass(w->regulator, true); 1037 1037 if (ret != 0) 1038 1038 dev_warn(w->dapm->dev, 1039 1039 "ASoC: Failed to unbypass %s: %d\n", ··· 3038 3038 dev_err(dapm->dev, "ASoC: Failed to request %s: %d\n", 3039 3039 w->name, ret); 3040 3040 return NULL; 3041 + } 3042 + 3043 + if (w->invert & SND_SOC_DAPM_REGULATOR_BYPASS) { 3044 + ret = regulator_allow_bypass(w->regulator, true); 3045 + if (ret != 0) 3046 + dev_warn(w->dapm->dev, 3047 + "ASoC: Failed to unbypass %s: %d\n", 3048 + w->name, ret); 3041 3049 } 3042 3050 break; 3043 3051 case snd_soc_dapm_clock_supply:
+12 -5
sound/usb/mixer.c
··· 1331 1331 } 1332 1332 channels = (hdr->bLength - 7) / csize - 1; 1333 1333 bmaControls = hdr->bmaControls; 1334 + if (hdr->bLength < 7 + csize) { 1335 + snd_printk(KERN_ERR "usbaudio: unit %u: " 1336 + "invalid UAC_FEATURE_UNIT descriptor\n", 1337 + unitid); 1338 + return -EINVAL; 1339 + } 1334 1340 } else { 1335 1341 struct uac2_feature_unit_descriptor *ftr = _ftr; 1336 1342 csize = 4; 1337 1343 channels = (hdr->bLength - 6) / 4 - 1; 1338 1344 bmaControls = ftr->bmaControls; 1339 - } 1340 - 1341 - if (hdr->bLength < 7 || !csize || hdr->bLength < 7 + csize) { 1342 - snd_printk(KERN_ERR "usbaudio: unit %u: invalid UAC_FEATURE_UNIT descriptor\n", unitid); 1343 - return -EINVAL; 1345 + if (hdr->bLength < 6 + csize) { 1346 + snd_printk(KERN_ERR "usbaudio: unit %u: " 1347 + "invalid UAC_FEATURE_UNIT descriptor\n", 1348 + unitid); 1349 + return -EINVAL; 1350 + } 1344 1351 } 1345 1352 1346 1353 /* parse the source unit */
+2
tools/vm/.gitignore
··· 1 + slabinfo 2 + page-types