Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge 3.8-rc5 into usb-next

This fixes up a conflict with drivers/usb/serial/io_ti.c that came up in
linux-next.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

+2060 -1229
+3 -2
Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt
··· 81 81 Required properties for pin configuration node: 82 82 - atmel,pins: 4 integers array, represents a group of pins mux and config 83 83 setting. The format is atmel,pins = <PIN_BANK PIN_BANK_NUM PERIPH CONFIG>. 84 - The PERIPH 0 means gpio. 84 + The PERIPH 0 means gpio, PERIPH 1 is periph A, PERIPH 2 is periph B... 85 + PIN_BANK 0 is pioA, PIN_BANK 1 is pioB... 85 86 86 87 Bits used for CONFIG: 87 88 PULL_UP (1 << 0): indicate this pin need a pull up. ··· 127 126 pinctrl_dbgu: dbgu-0 { 128 127 atmel,pins = 129 128 <1 14 0x1 0x0 /* PB14 periph A */ 130 - 1 15 0x1 0x1>; /* PB15 periph with pullup */ 129 + 1 15 0x1 0x1>; /* PB15 periph A with pullup */ 131 130 }; 132 131 }; 133 132 };
+9 -9
Documentation/filesystems/f2fs.txt
··· 175 175 align with the zone size <-| 176 176 |-> align with the segment size 177 177 _________________________________________________________________________ 178 - | | | Node | Segment | Segment | | 179 - | Superblock | Checkpoint | Address | Info. | Summary | Main | 180 - | (SB) | (CP) | Table (NAT) | Table (SIT) | Area (SSA) | | 178 + | | | Segment | Node | Segment | | 179 + | Superblock | Checkpoint | Info. | Address | Summary | Main | 180 + | (SB) | (CP) | Table (SIT) | Table (NAT) | Area (SSA) | | 181 181 |____________|_____2______|______N______|______N______|______N_____|__N___| 182 182 . . 183 183 . . ··· 200 200 : It contains file system information, bitmaps for valid NAT/SIT sets, orphan 201 201 inode lists, and summary entries of current active segments. 202 202 203 - - Node Address Table (NAT) 204 - : It is composed of a block address table for all the node blocks stored in 205 - Main area. 206 - 207 203 - Segment Information Table (SIT) 208 204 : It contains segment information such as valid block count and bitmap for the 209 205 validity of all the blocks. 206 + 207 + - Node Address Table (NAT) 208 + : It is composed of a block address table for all the node blocks stored in 209 + Main area. 210 210 211 211 - Segment Summary Area (SSA) 212 212 : It contains summary entries which contains the owner information of all the ··· 236 236 valid, as shown as below. 237 237 238 238 +--------+----------+---------+ 239 - | CP | NAT | SIT | 239 + | CP | SIT | NAT | 240 240 +--------+----------+---------+ 241 241 . . . . 242 242 . . . . 243 243 . . . . 244 244 +-------+-------+--------+--------+--------+--------+ 245 - | CP #0 | CP #1 | NAT #0 | NAT #1 | SIT #0 | SIT #1 | 245 + | CP #0 | CP #1 | SIT #0 | SIT #1 | NAT #0 | NAT #1 | 246 246 +-------+-------+--------+--------+--------+--------+ 247 247 | ^ ^ 248 248 | | |
+1 -1
MAINTAINERS
··· 6585 6585 F: include/media/s3c_camif.h 6586 6586 6587 6587 SERIAL DRIVERS 6588 - M: Alan Cox <alan@linux.intel.com> 6588 + M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 6589 6589 L: linux-serial@vger.kernel.org 6590 6590 S: Maintained 6591 6591 F: drivers/tty/serial
+2 -2
Makefile
··· 1 1 VERSION = 3 2 2 PATCHLEVEL = 8 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc4 4 + EXTRAVERSION = -rc5 5 5 NAME = Terrified Chipmunk 6 6 7 7 # *DOCUMENTATION* ··· 169 169 -e s/arm.*/arm/ -e s/sa110/arm/ \ 170 170 -e s/s390x/s390/ -e s/parisc64/parisc/ \ 171 171 -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ 172 - -e s/sh[234].*/sh/ ) 172 + -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ ) 173 173 174 174 # Cross compiling and selecting different set of gcc/bin-utils 175 175 # ---------------------------------------------------------------------------
+1 -1
arch/arm/boot/dts/armada-370-db.dts
··· 26 26 27 27 memory { 28 28 device_type = "memory"; 29 - reg = <0x00000000 0x20000000>; /* 512 MB */ 29 + reg = <0x00000000 0x40000000>; /* 1 GB */ 30 30 }; 31 31 32 32 soc {
+6 -8
arch/arm/boot/dts/armada-xp-mv78230.dtsi
··· 50 50 }; 51 51 52 52 gpio0: gpio@d0018100 { 53 - compatible = "marvell,armadaxp-gpio"; 54 - reg = <0xd0018100 0x40>, 55 - <0xd0018800 0x30>; 53 + compatible = "marvell,orion-gpio"; 54 + reg = <0xd0018100 0x40>; 56 55 ngpios = <32>; 57 56 gpio-controller; 58 57 #gpio-cells = <2>; 59 58 interrupt-controller; 60 59 #interrupts-cells = <2>; 61 - interrupts = <16>, <17>, <18>, <19>; 60 + interrupts = <82>, <83>, <84>, <85>; 62 61 }; 63 62 64 63 gpio1: gpio@d0018140 { 65 - compatible = "marvell,armadaxp-gpio"; 66 - reg = <0xd0018140 0x40>, 67 - <0xd0018840 0x30>; 64 + compatible = "marvell,orion-gpio"; 65 + reg = <0xd0018140 0x40>; 68 66 ngpios = <17>; 69 67 gpio-controller; 70 68 #gpio-cells = <2>; 71 69 interrupt-controller; 72 70 #interrupts-cells = <2>; 73 - interrupts = <20>, <21>, <22>; 71 + interrupts = <87>, <88>, <89>; 74 72 }; 75 73 }; 76 74 };
+9 -12
arch/arm/boot/dts/armada-xp-mv78260.dtsi
··· 51 51 }; 52 52 53 53 gpio0: gpio@d0018100 { 54 - compatible = "marvell,armadaxp-gpio"; 55 - reg = <0xd0018100 0x40>, 56 - <0xd0018800 0x30>; 54 + compatible = "marvell,orion-gpio"; 55 + reg = <0xd0018100 0x40>; 57 56 ngpios = <32>; 58 57 gpio-controller; 59 58 #gpio-cells = <2>; 60 59 interrupt-controller; 61 60 #interrupts-cells = <2>; 62 - interrupts = <16>, <17>, <18>, <19>; 61 + interrupts = <82>, <83>, <84>, <85>; 63 62 }; 64 63 65 64 gpio1: gpio@d0018140 { 66 - compatible = "marvell,armadaxp-gpio"; 67 - reg = <0xd0018140 0x40>, 68 - <0xd0018840 0x30>; 65 + compatible = "marvell,orion-gpio"; 66 + reg = <0xd0018140 0x40>; 69 67 ngpios = <32>; 70 68 gpio-controller; 71 69 #gpio-cells = <2>; 72 70 interrupt-controller; 73 71 #interrupts-cells = <2>; 74 - interrupts = <20>, <21>, <22>, <23>; 72 + interrupts = <87>, <88>, <89>, <90>; 75 73 }; 76 74 77 75 gpio2: gpio@d0018180 { 78 - compatible = "marvell,armadaxp-gpio"; 79 - reg = <0xd0018180 0x40>, 80 - <0xd0018870 0x30>; 76 + compatible = "marvell,orion-gpio"; 77 + reg = <0xd0018180 0x40>; 81 78 ngpios = <3>; 82 79 gpio-controller; 83 80 #gpio-cells = <2>; 84 81 interrupt-controller; 85 82 #interrupts-cells = <2>; 86 - interrupts = <24>; 83 + interrupts = <91>; 87 84 }; 88 85 89 86 ethernet@d0034000 {
+9 -12
arch/arm/boot/dts/armada-xp-mv78460.dtsi
··· 66 66 }; 67 67 68 68 gpio0: gpio@d0018100 { 69 - compatible = "marvell,armadaxp-gpio"; 70 - reg = <0xd0018100 0x40>, 71 - <0xd0018800 0x30>; 69 + compatible = "marvell,orion-gpio"; 70 + reg = <0xd0018100 0x40>; 72 71 ngpios = <32>; 73 72 gpio-controller; 74 73 #gpio-cells = <2>; 75 74 interrupt-controller; 76 75 #interrupts-cells = <2>; 77 - interrupts = <16>, <17>, <18>, <19>; 76 + interrupts = <82>, <83>, <84>, <85>; 78 77 }; 79 78 80 79 gpio1: gpio@d0018140 { 81 - compatible = "marvell,armadaxp-gpio"; 82 - reg = <0xd0018140 0x40>, 83 - <0xd0018840 0x30>; 80 + compatible = "marvell,orion-gpio"; 81 + reg = <0xd0018140 0x40>; 84 82 ngpios = <32>; 85 83 gpio-controller; 86 84 #gpio-cells = <2>; 87 85 interrupt-controller; 88 86 #interrupts-cells = <2>; 89 - interrupts = <20>, <21>, <22>, <23>; 87 + interrupts = <87>, <88>, <89>, <90>; 90 88 }; 91 89 92 90 gpio2: gpio@d0018180 { 93 - compatible = "marvell,armadaxp-gpio"; 94 - reg = <0xd0018180 0x40>, 95 - <0xd0018870 0x30>; 91 + compatible = "marvell,orion-gpio"; 92 + reg = <0xd0018180 0x40>; 96 93 ngpios = <3>; 97 94 gpio-controller; 98 95 #gpio-cells = <2>; 99 96 interrupt-controller; 100 97 #interrupts-cells = <2>; 101 - interrupts = <24>; 98 + interrupts = <91>; 102 99 }; 103 100 104 101 ethernet@d0034000 {
+2 -2
arch/arm/boot/dts/at91rm9200.dtsi
··· 336 336 337 337 i2c@0 { 338 338 compatible = "i2c-gpio"; 339 - gpios = <&pioA 23 0 /* sda */ 340 - &pioA 24 0 /* scl */ 339 + gpios = <&pioA 25 0 /* sda */ 340 + &pioA 26 0 /* scl */ 341 341 >; 342 342 i2c-gpio,sda-open-drain; 343 343 i2c-gpio,scl-open-drain;
+40 -20
arch/arm/boot/dts/at91sam9x5.dtsi
··· 143 143 atmel,pins = 144 144 <0 3 0x1 0x0>; /* PA3 periph A */ 145 145 }; 146 + 147 + pinctrl_usart0_sck: usart0_sck-0 { 148 + atmel,pins = 149 + <0 4 0x1 0x0>; /* PA4 periph A */ 150 + }; 146 151 }; 147 152 148 153 usart1 { ··· 159 154 160 155 pinctrl_usart1_rts: usart1_rts-0 { 161 156 atmel,pins = 162 - <3 27 0x3 0x0>; /* PC27 periph C */ 157 + <2 27 0x3 0x0>; /* PC27 periph C */ 163 158 }; 164 159 165 160 pinctrl_usart1_cts: usart1_cts-0 { 166 161 atmel,pins = 167 - <3 28 0x3 0x0>; /* PC28 periph C */ 162 + <2 28 0x3 0x0>; /* PC28 periph C */ 163 + }; 164 + 165 + pinctrl_usart1_sck: usart1_sck-0 { 166 + atmel,pins = 167 + <2 28 0x3 0x0>; /* PC29 periph C */ 168 168 }; 169 169 }; 170 170 ··· 182 172 183 173 pinctrl_uart2_rts: uart2_rts-0 { 184 174 atmel,pins = 185 - <0 0 0x2 0x0>; /* PB0 periph B */ 175 + <1 0 0x2 0x0>; /* PB0 periph B */ 186 176 }; 187 177 188 178 pinctrl_uart2_cts: uart2_cts-0 { 189 179 atmel,pins = 190 - <0 1 0x2 0x0>; /* PB1 periph B */ 180 + <1 1 0x2 0x0>; /* PB1 periph B */ 181 + }; 182 + 183 + pinctrl_usart2_sck: usart2_sck-0 { 184 + atmel,pins = 185 + <1 2 0x2 0x0>; /* PB2 periph B */ 191 186 }; 192 187 }; 193 188 194 189 usart3 { 195 190 pinctrl_uart3: usart3-0 { 196 191 atmel,pins = 197 - <3 23 0x2 0x1 /* PC22 periph B with pullup */ 198 - 3 23 0x2 0x0>; /* PC23 periph B */ 192 + <2 23 0x2 0x1 /* PC22 periph B with pullup */ 193 + 2 23 0x2 0x0>; /* PC23 periph B */ 199 194 }; 200 195 201 196 pinctrl_usart3_rts: usart3_rts-0 { 202 197 atmel,pins = 203 - <3 24 0x2 0x0>; /* PC24 periph B */ 198 + <2 24 0x2 0x0>; /* PC24 periph B */ 204 199 }; 205 200 206 201 pinctrl_usart3_cts: usart3_cts-0 { 207 202 atmel,pins = 208 - <3 25 0x2 0x0>; /* PC25 periph B */ 203 + <2 25 0x2 0x0>; /* PC25 periph B */ 204 + }; 205 + 206 + pinctrl_usart3_sck: usart3_sck-0 { 207 + atmel,pins = 208 + <2 26 0x2 0x0>; /* PC26 periph B */ 209 209 }; 210 210 }; 211 211 212 212 uart0 { 213 213 pinctrl_uart0: uart0-0 { 214 214 atmel,pins = 215 - <3 8 0x3 0x0 /* PC8 periph C */ 216 - 3 9 0x3 0x1>; /* PC9 periph C with pullup */ 215 + <2 8 0x3 0x0 /* PC8 periph C */ 216 + 2 9 0x3 0x1>; /* PC9 periph C with pullup */ 217 217 }; 218 218 }; 219 219 220 220 uart1 { 221 221 pinctrl_uart1: uart1-0 { 222 222 atmel,pins = 223 - <3 16 0x3 0x0 /* PC16 periph C */ 224 - 3 17 0x3 0x1>; /* PC17 periph C with pullup */ 223 + <2 16 0x3 0x0 /* PC16 periph C */ 224 + 2 17 0x3 0x1>; /* PC17 periph C with pullup */ 225 225 }; 226 226 }; 227 227 ··· 260 240 261 241 pinctrl_macb0_rmii_mii: macb0_rmii_mii-0 { 262 242 atmel,pins = 263 - <1 8 0x1 0x0 /* PA8 periph A */ 264 - 1 11 0x1 0x0 /* PA11 periph A */ 265 - 1 12 0x1 0x0 /* PA12 periph A */ 266 - 1 13 0x1 0x0 /* PA13 periph A */ 267 - 1 14 0x1 0x0 /* PA14 periph A */ 268 - 1 15 0x1 0x0 /* PA15 periph A */ 269 - 1 16 0x1 0x0 /* PA16 periph A */ 270 - 1 17 0x1 0x0>; /* PA17 periph A */ 243 + <1 8 0x1 0x0 /* PB8 periph A */ 244 + 1 11 0x1 0x0 /* PB11 periph A */ 245 + 1 12 0x1 0x0 /* PB12 periph A */ 246 + 1 13 0x1 0x0 /* PB13 periph A */ 247 + 1 14 0x1 0x0 /* PB14 periph A */ 248 + 1 15 0x1 0x0 /* PB15 periph A */ 249 + 1 16 0x1 0x0 /* PB16 periph A */ 250 + 1 17 0x1 0x0>; /* PB17 periph A */ 271 251 }; 272 252 }; 273 253
+6 -6
arch/arm/boot/dts/cros5250-common.dtsi
··· 96 96 fifo-depth = <0x80>; 97 97 card-detect-delay = <200>; 98 98 samsung,dw-mshc-ciu-div = <3>; 99 - samsung,dw-mshc-sdr-timing = <2 3 3>; 100 - samsung,dw-mshc-ddr-timing = <1 2 3>; 99 + samsung,dw-mshc-sdr-timing = <2 3>; 100 + samsung,dw-mshc-ddr-timing = <1 2>; 101 101 102 102 slot@0 { 103 103 reg = <0>; ··· 120 120 fifo-depth = <0x80>; 121 121 card-detect-delay = <200>; 122 122 samsung,dw-mshc-ciu-div = <3>; 123 - samsung,dw-mshc-sdr-timing = <2 3 3>; 124 - samsung,dw-mshc-ddr-timing = <1 2 3>; 123 + samsung,dw-mshc-sdr-timing = <2 3>; 124 + samsung,dw-mshc-ddr-timing = <1 2>; 125 125 126 126 slot@0 { 127 127 reg = <0>; ··· 141 141 fifo-depth = <0x80>; 142 142 card-detect-delay = <200>; 143 143 samsung,dw-mshc-ciu-div = <3>; 144 - samsung,dw-mshc-sdr-timing = <2 3 3>; 145 - samsung,dw-mshc-ddr-timing = <1 2 3>; 144 + samsung,dw-mshc-sdr-timing = <2 3>; 145 + samsung,dw-mshc-ddr-timing = <1 2>; 146 146 147 147 slot@0 { 148 148 reg = <0>;
+12 -2
arch/arm/boot/dts/dove-cubox.dts
··· 26 26 }; 27 27 28 28 &uart0 { status = "okay"; }; 29 - &sdio0 { status = "okay"; }; 30 29 &sata0 { status = "okay"; }; 31 30 &i2c0 { status = "okay"; }; 31 + 32 + &sdio0 { 33 + status = "okay"; 34 + /* sdio0 card detect is connected to wrong pin on CuBox */ 35 + cd-gpios = <&gpio0 12 1>; 36 + }; 32 37 33 38 &spi0 { 34 39 status = "okay"; ··· 47 42 }; 48 43 49 44 &pinctrl { 50 - pinctrl-0 = <&pmx_gpio_18>; 45 + pinctrl-0 = <&pmx_gpio_12 &pmx_gpio_18>; 51 46 pinctrl-names = "default"; 47 + 48 + pmx_gpio_12: pmx-gpio-12 { 49 + marvell,pins = "mpp12"; 50 + marvell,function = "gpio"; 51 + }; 52 52 53 53 pmx_gpio_18: pmx-gpio-18 { 54 54 marvell,pins = "mpp18";
+4 -4
arch/arm/boot/dts/exynos5250-smdk5250.dts
··· 115 115 fifo-depth = <0x80>; 116 116 card-detect-delay = <200>; 117 117 samsung,dw-mshc-ciu-div = <3>; 118 - samsung,dw-mshc-sdr-timing = <2 3 3>; 119 - samsung,dw-mshc-ddr-timing = <1 2 3>; 118 + samsung,dw-mshc-sdr-timing = <2 3>; 119 + samsung,dw-mshc-ddr-timing = <1 2>; 120 120 121 121 slot@0 { 122 122 reg = <0>; ··· 139 139 fifo-depth = <0x80>; 140 140 card-detect-delay = <200>; 141 141 samsung,dw-mshc-ciu-div = <3>; 142 - samsung,dw-mshc-sdr-timing = <2 3 3>; 143 - samsung,dw-mshc-ddr-timing = <1 2 3>; 142 + samsung,dw-mshc-sdr-timing = <2 3>; 143 + samsung,dw-mshc-ddr-timing = <1 2>; 144 144 145 145 slot@0 { 146 146 reg = <0>;
+16
arch/arm/boot/dts/kirkwood-ns2-common.dtsi
··· 1 1 /include/ "kirkwood.dtsi" 2 + /include/ "kirkwood-6281.dtsi" 2 3 3 4 / { 4 5 chosen { ··· 7 6 }; 8 7 9 8 ocp@f1000000 { 9 + pinctrl: pinctrl@10000 { 10 + pinctrl-0 = < &pmx_spi &pmx_twsi0 &pmx_uart0 11 + &pmx_ns2_sata0 &pmx_ns2_sata1>; 12 + pinctrl-names = "default"; 13 + 14 + pmx_ns2_sata0: pmx-ns2-sata0 { 15 + marvell,pins = "mpp21"; 16 + marvell,function = "sata0"; 17 + }; 18 + pmx_ns2_sata1: pmx-ns2-sata1 { 19 + marvell,pins = "mpp20"; 20 + marvell,function = "sata1"; 21 + }; 22 + }; 23 + 10 24 serial@12000 { 11 25 clock-frequency = <166666667>; 12 26 status = "okay";
+2
arch/arm/boot/dts/kirkwood.dtsi
··· 36 36 reg = <0x10100 0x40>; 37 37 ngpios = <32>; 38 38 interrupt-controller; 39 + #interrupt-cells = <2>; 39 40 interrupts = <35>, <36>, <37>, <38>; 40 41 }; 41 42 ··· 47 46 reg = <0x10140 0x40>; 48 47 ngpios = <18>; 49 48 interrupt-controller; 49 + #interrupt-cells = <2>; 50 50 interrupts = <39>, <40>, <41>; 51 51 }; 52 52
+2
arch/arm/boot/dts/kizbox.dts
··· 48 48 49 49 macb0: ethernet@fffc4000 { 50 50 phy-mode = "mii"; 51 + pinctrl-0 = <&pinctrl_macb_rmii 52 + &pinctrl_macb_rmii_mii_alt>; 51 53 status = "okay"; 52 54 }; 53 55
+4 -2
arch/arm/boot/dts/sunxi.dtsi
··· 60 60 }; 61 61 62 62 uart0: uart@01c28000 { 63 - compatible = "ns8250"; 63 + compatible = "snps,dw-apb-uart"; 64 64 reg = <0x01c28000 0x400>; 65 65 interrupts = <1>; 66 66 reg-shift = <2>; 67 + reg-io-width = <4>; 67 68 clock-frequency = <24000000>; 68 69 status = "disabled"; 69 70 }; 70 71 71 72 uart1: uart@01c28400 { 72 - compatible = "ns8250"; 73 + compatible = "snps,dw-apb-uart"; 73 74 reg = <0x01c28400 0x400>; 74 75 interrupts = <2>; 75 76 reg-shift = <2>; 77 + reg-io-width = <4>; 76 78 clock-frequency = <24000000>; 77 79 status = "disabled"; 78 80 };
-2
arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
··· 45 45 reg = <1>; 46 46 }; 47 47 48 - /* A7s disabled till big.LITTLE patches are available... 49 48 cpu2: cpu@2 { 50 49 device_type = "cpu"; 51 50 compatible = "arm,cortex-a7"; ··· 62 63 compatible = "arm,cortex-a7"; 63 64 reg = <0x102>; 64 65 }; 65 - */ 66 66 }; 67 67 68 68 memory@80000000 {
+2 -1
arch/arm/configs/at91_dt_defconfig
··· 19 19 CONFIG_SOC_AT91SAM9263=y 20 20 CONFIG_SOC_AT91SAM9G45=y 21 21 CONFIG_SOC_AT91SAM9X5=y 22 + CONFIG_SOC_AT91SAM9N12=y 22 23 CONFIG_MACH_AT91SAM_DT=y 23 24 CONFIG_AT91_PROGRAMMABLE_CLOCKS=y 24 25 CONFIG_AT91_TIMER_HZ=128 ··· 32 31 CONFIG_ZBOOT_ROM_BSS=0x0 33 32 CONFIG_ARM_APPENDED_DTB=y 34 33 CONFIG_ARM_ATAG_DTB_COMPAT=y 35 - CONFIG_CMDLINE="mem=128M console=ttyS0,115200 initrd=0x21100000,25165824 root=/dev/ram0 rw" 34 + CONFIG_CMDLINE="console=ttyS0,115200 initrd=0x21100000,25165824 root=/dev/ram0 rw" 36 35 CONFIG_KEXEC=y 37 36 CONFIG_AUTO_ZRELADDR=y 38 37 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+2
arch/arm/kernel/debug.S
··· 100 100 b 1b 101 101 ENDPROC(printch) 102 102 103 + #ifdef CONFIG_MMU 103 104 ENTRY(debug_ll_addr) 104 105 addruart r2, r3, ip 105 106 str r2, [r0] 106 107 str r3, [r1] 107 108 mov pc, lr 108 109 ENDPROC(debug_ll_addr) 110 + #endif 109 111 110 112 #else 111 113
+4 -1
arch/arm/kernel/head.S
··· 246 246 247 247 /* 248 248 * Then map boot params address in r2 if specified. 249 + * We map 2 sections in case the ATAGs/DTB crosses a section boundary. 249 250 */ 250 251 mov r0, r2, lsr #SECTION_SHIFT 251 252 movs r0, r0, lsl #SECTION_SHIFT ··· 254 253 addne r3, r3, #PAGE_OFFSET 255 254 addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER) 256 255 orrne r6, r7, r0 256 + strne r6, [r3], #1 << PMD_ORDER 257 + addne r6, r6, #1 << SECTION_SHIFT 257 258 strne r6, [r3] 258 259 259 260 #ifdef CONFIG_DEBUG_LL ··· 334 331 * as it has already been validated by the primary processor. 335 332 */ 336 333 #ifdef CONFIG_ARM_VIRT_EXT 337 - bl __hyp_stub_install 334 + bl __hyp_stub_install_secondary 338 335 #endif 339 336 safe_svcmode_maskall r9 340 337
+6 -12
arch/arm/kernel/hyp-stub.S
··· 99 99 * immediately. 100 100 */ 101 101 compare_cpu_mode_with_primary r4, r5, r6, r7 102 - bxne lr 102 + movne pc, lr 103 103 104 104 /* 105 105 * Once we have given up on one CPU, we do not try to install the ··· 111 111 */ 112 112 113 113 cmp r4, #HYP_MODE 114 - bxne lr @ give up if the CPU is not in HYP mode 114 + movne pc, lr @ give up if the CPU is not in HYP mode 115 115 116 116 /* 117 117 * Configure HSCTLR to set correct exception endianness/instruction set ··· 120 120 * Eventually, CPU-specific code might be needed -- assume not for now 121 121 * 122 122 * This code relies on the "eret" instruction to synchronize the 123 - * various coprocessor accesses. 123 + * various coprocessor accesses. This is done when we switch to SVC 124 + * (see safe_svcmode_maskall). 124 125 */ 125 126 @ Now install the hypervisor stub: 126 127 adr r7, __hyp_stub_vectors ··· 156 155 1: 157 156 #endif 158 157 159 - bic r7, r4, #MODE_MASK 160 - orr r7, r7, #SVC_MODE 161 - THUMB( orr r7, r7, #PSR_T_BIT ) 162 - msr spsr_cxsf, r7 @ This is SPSR_hyp. 163 - 164 - __MSR_ELR_HYP(14) @ msr elr_hyp, lr 165 - __ERET @ return, switching to SVC mode 166 - @ The boot CPU mode is left in r4. 158 + bx lr @ The boot CPU mode is left in r4. 167 159 ENDPROC(__hyp_stub_install_secondary) 168 160 169 161 __hyp_stub_do_trap: ··· 194 200 @ fall through 195 201 ENTRY(__hyp_set_vectors) 196 202 __HVC(0) 197 - bx lr 203 + mov pc, lr 198 204 ENDPROC(__hyp_set_vectors) 199 205 200 206 #ifndef ZIMAGE
+2
arch/arm/mach-at91/setup.c
··· 105 105 switch (socid) { 106 106 case ARCH_ID_AT91RM9200: 107 107 at91_soc_initdata.type = AT91_SOC_RM9200; 108 + if (at91_soc_initdata.subtype == AT91_SOC_SUBTYPE_NONE) 109 + at91_soc_initdata.subtype = AT91_SOC_RM9200_BGA; 108 110 at91_boot_soc = at91rm9200_soc; 109 111 break; 110 112
+1
arch/arm/mach-imx/Kconfig
··· 851 851 select HAVE_CAN_FLEXCAN if CAN 852 852 select HAVE_IMX_GPC 853 853 select HAVE_IMX_MMDC 854 + select HAVE_IMX_SRC 854 855 select HAVE_SMP 855 856 select MFD_SYSCON 856 857 select PINCTRL
+3 -3
arch/arm/mach-imx/clk-imx25.c
··· 254 254 clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2"); 255 255 clk_register_clkdev(clk[usbotg_ahb], "ahb", "mxc-ehci.2"); 256 256 clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.2"); 257 - clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc"); 258 - clk_register_clkdev(clk[usbotg_ahb], "ahb", "fsl-usb2-udc"); 259 - clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc"); 257 + clk_register_clkdev(clk[ipg], "ipg", "imx-udc-mx27"); 258 + clk_register_clkdev(clk[usbotg_ahb], "ahb", "imx-udc-mx27"); 259 + clk_register_clkdev(clk[usb_div], "per", "imx-udc-mx27"); 260 260 clk_register_clkdev(clk[nfc_ipg_per], NULL, "imx25-nand.0"); 261 261 /* i.mx25 has the i.mx35 type cspi */ 262 262 clk_register_clkdev(clk[cspi1_ipg], NULL, "imx35-cspi.0");
+3 -3
arch/arm/mach-imx/clk-imx27.c
··· 236 236 clk_register_clkdev(clk[lcdc_ahb_gate], "ahb", "imx21-fb.0"); 237 237 clk_register_clkdev(clk[csi_ahb_gate], "ahb", "imx27-camera.0"); 238 238 clk_register_clkdev(clk[per4_gate], "per", "imx27-camera.0"); 239 - clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc"); 240 - clk_register_clkdev(clk[usb_ipg_gate], "ipg", "fsl-usb2-udc"); 241 - clk_register_clkdev(clk[usb_ahb_gate], "ahb", "fsl-usb2-udc"); 239 + clk_register_clkdev(clk[usb_div], "per", "imx-udc-mx27"); 240 + clk_register_clkdev(clk[usb_ipg_gate], "ipg", "imx-udc-mx27"); 241 + clk_register_clkdev(clk[usb_ahb_gate], "ahb", "imx-udc-mx27"); 242 242 clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.0"); 243 243 clk_register_clkdev(clk[usb_ipg_gate], "ipg", "mxc-ehci.0"); 244 244 clk_register_clkdev(clk[usb_ahb_gate], "ahb", "mxc-ehci.0");
+3 -3
arch/arm/mach-imx/clk-imx31.c
··· 139 139 clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.2"); 140 140 clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.2"); 141 141 clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2"); 142 - clk_register_clkdev(clk[usb_div_post], "per", "fsl-usb2-udc"); 143 - clk_register_clkdev(clk[usb_gate], "ahb", "fsl-usb2-udc"); 144 - clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc"); 142 + clk_register_clkdev(clk[usb_div_post], "per", "imx-udc-mx27"); 143 + clk_register_clkdev(clk[usb_gate], "ahb", "imx-udc-mx27"); 144 + clk_register_clkdev(clk[ipg], "ipg", "imx-udc-mx27"); 145 145 clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0"); 146 146 /* i.mx31 has the i.mx21 type uart */ 147 147 clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0");
+3 -3
arch/arm/mach-imx/clk-imx35.c
··· 251 251 clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.2"); 252 252 clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2"); 253 253 clk_register_clkdev(clk[usbotg_gate], "ahb", "mxc-ehci.2"); 254 - clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc"); 255 - clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc"); 256 - clk_register_clkdev(clk[usbotg_gate], "ahb", "fsl-usb2-udc"); 254 + clk_register_clkdev(clk[usb_div], "per", "imx-udc-mx27"); 255 + clk_register_clkdev(clk[ipg], "ipg", "imx-udc-mx27"); 256 + clk_register_clkdev(clk[usbotg_gate], "ahb", "imx-udc-mx27"); 257 257 clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0"); 258 258 clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0"); 259 259 clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0");
+3 -3
arch/arm/mach-imx/clk-imx51-imx53.c
··· 269 269 clk_register_clkdev(clk[usboh3_per_gate], "per", "mxc-ehci.2"); 270 270 clk_register_clkdev(clk[usboh3_gate], "ipg", "mxc-ehci.2"); 271 271 clk_register_clkdev(clk[usboh3_gate], "ahb", "mxc-ehci.2"); 272 - clk_register_clkdev(clk[usboh3_per_gate], "per", "fsl-usb2-udc"); 273 - clk_register_clkdev(clk[usboh3_gate], "ipg", "fsl-usb2-udc"); 274 - clk_register_clkdev(clk[usboh3_gate], "ahb", "fsl-usb2-udc"); 272 + clk_register_clkdev(clk[usboh3_per_gate], "per", "imx-udc-mx51"); 273 + clk_register_clkdev(clk[usboh3_gate], "ipg", "imx-udc-mx51"); 274 + clk_register_clkdev(clk[usboh3_gate], "ahb", "imx-udc-mx51"); 275 275 clk_register_clkdev(clk[nfc_gate], NULL, "imx51-nand"); 276 276 clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "imx-ssi.0"); 277 277 clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "imx-ssi.1");
+3
arch/arm/mach-imx/clk-imx6q.c
··· 436 436 for (i = 0; i < ARRAY_SIZE(clks_init_on); i++) 437 437 clk_prepare_enable(clk[clks_init_on[i]]); 438 438 439 + /* Set initial power mode */ 440 + imx6q_set_lpm(WAIT_CLOCKED); 441 + 439 442 np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt"); 440 443 base = of_iomap(np, 0); 441 444 WARN_ON(!base);
+1
arch/arm/mach-imx/common.h
··· 142 142 extern void imx6q_clock_map_io(void); 143 143 144 144 extern void imx_cpu_die(unsigned int cpu); 145 + extern int imx_cpu_kill(unsigned int cpu); 145 146 146 147 #ifdef CONFIG_PM 147 148 extern void imx6q_pm_init(void);
+1
arch/arm/mach-imx/devices/devices-common.h
··· 63 63 64 64 #include <linux/fsl_devices.h> 65 65 struct imx_fsl_usb2_udc_data { 66 + const char *devid; 66 67 resource_size_t iobase; 67 68 resource_size_t irq; 68 69 };
+8 -7
arch/arm/mach-imx/devices/platform-fsl-usb2-udc.c
··· 11 11 #include "../hardware.h" 12 12 #include "devices-common.h" 13 13 14 - #define imx_fsl_usb2_udc_data_entry_single(soc) \ 14 + #define imx_fsl_usb2_udc_data_entry_single(soc, _devid) \ 15 15 { \ 16 + .devid = _devid, \ 16 17 .iobase = soc ## _USB_OTG_BASE_ADDR, \ 17 18 .irq = soc ## _INT_USB_OTG, \ 18 19 } 19 20 20 21 #ifdef CONFIG_SOC_IMX25 21 22 const struct imx_fsl_usb2_udc_data imx25_fsl_usb2_udc_data __initconst = 22 - imx_fsl_usb2_udc_data_entry_single(MX25); 23 + imx_fsl_usb2_udc_data_entry_single(MX25, "imx-udc-mx27"); 23 24 #endif /* ifdef CONFIG_SOC_IMX25 */ 24 25 25 26 #ifdef CONFIG_SOC_IMX27 26 27 const struct imx_fsl_usb2_udc_data imx27_fsl_usb2_udc_data __initconst = 27 - imx_fsl_usb2_udc_data_entry_single(MX27); 28 + imx_fsl_usb2_udc_data_entry_single(MX27, "imx-udc-mx27"); 28 29 #endif /* ifdef CONFIG_SOC_IMX27 */ 29 30 30 31 #ifdef CONFIG_SOC_IMX31 31 32 const struct imx_fsl_usb2_udc_data imx31_fsl_usb2_udc_data __initconst = 32 - imx_fsl_usb2_udc_data_entry_single(MX31); 33 + imx_fsl_usb2_udc_data_entry_single(MX31, "imx-udc-mx27"); 33 34 #endif /* ifdef CONFIG_SOC_IMX31 */ 34 35 35 36 #ifdef CONFIG_SOC_IMX35 36 37 const struct imx_fsl_usb2_udc_data imx35_fsl_usb2_udc_data __initconst = 37 - imx_fsl_usb2_udc_data_entry_single(MX35); 38 + imx_fsl_usb2_udc_data_entry_single(MX35, "imx-udc-mx27"); 38 39 #endif /* ifdef CONFIG_SOC_IMX35 */ 39 40 40 41 #ifdef CONFIG_SOC_IMX51 41 42 const struct imx_fsl_usb2_udc_data imx51_fsl_usb2_udc_data __initconst = 42 - imx_fsl_usb2_udc_data_entry_single(MX51); 43 + imx_fsl_usb2_udc_data_entry_single(MX51, "imx-udc-mx51"); 43 44 #endif 44 45 45 46 struct platform_device *__init imx_add_fsl_usb2_udc( ··· 58 57 .flags = IORESOURCE_IRQ, 59 58 }, 60 59 }; 61 - return imx_add_platform_device_dmamask("fsl-usb2-udc", -1, 60 + return imx_add_platform_device_dmamask(data->devid, -1, 62 61 res, ARRAY_SIZE(res), 63 62 pdata, sizeof(*pdata), DMA_BIT_MASK(32)); 64 63 }
+1 -1
arch/arm/mach-imx/devices/platform-imx-fb.c
··· 54 54 .flags = IORESOURCE_IRQ, 55 55 }, 56 56 }; 57 - return imx_add_platform_device_dmamask("imx-fb", 0, 57 + return imx_add_platform_device_dmamask(data->devid, 0, 58 58 res, ARRAY_SIZE(res), 59 59 pdata, sizeof(*pdata), DMA_BIT_MASK(32)); 60 60 }
+6 -4
arch/arm/mach-imx/hotplug.c
··· 46 46 void imx_cpu_die(unsigned int cpu) 47 47 { 48 48 cpu_enter_lowpower(); 49 - imx_enable_cpu(cpu, false); 49 + cpu_do_idle(); 50 + } 50 51 51 - /* spin here until hardware takes it down */ 52 - while (1) 53 - ; 52 + int imx_cpu_kill(unsigned int cpu) 53 + { 54 + imx_enable_cpu(cpu, false); 55 + return 1; 54 56 }
arch/arm/mach-imx/iram.h include/linux/platform_data/imx-iram.h
+1 -2
arch/arm/mach-imx/iram_alloc.c
··· 22 22 #include <linux/module.h> 23 23 #include <linux/spinlock.h> 24 24 #include <linux/genalloc.h> 25 - 26 - #include "iram.h" 25 + #include "linux/platform_data/imx-iram.h" 27 26 28 27 static unsigned long iram_phys_base; 29 28 static void __iomem *iram_virt_base;
+1
arch/arm/mach-imx/platsmp.c
··· 92 92 .smp_boot_secondary = imx_boot_secondary, 93 93 #ifdef CONFIG_HOTPLUG_CPU 94 94 .cpu_die = imx_cpu_die, 95 + .cpu_kill = imx_cpu_kill, 95 96 #endif 96 97 };
+1
arch/arm/mach-imx/pm-imx6q.c
··· 41 41 cpu_suspend(0, imx6q_suspend_finish); 42 42 imx_smp_prepare(); 43 43 imx_gpc_post_resume(); 44 + imx6q_set_lpm(WAIT_CLOCKED); 44 45 break; 45 46 default: 46 47 return -EINVAL;
+10 -4
arch/arm/mach-integrator/pci_v3.c
··· 475 475 { 476 476 int ret = 0; 477 477 478 + if (!ap_syscon_base) 479 + return -EINVAL; 480 + 478 481 if (nr == 0) { 479 482 sys->mem_offset = PHYS_PCI_MEM_BASE; 480 483 ret = pci_v3_setup_resources(sys); 481 - /* Remap the Integrator system controller */ 482 - ap_syscon_base = ioremap(INTEGRATOR_SC_BASE, 0x100); 483 - if (!ap_syscon_base) 484 - return -EINVAL; 485 484 } 486 485 487 486 return ret; ··· 495 496 unsigned long flags; 496 497 unsigned int temp; 497 498 int ret; 499 + 500 + /* Remap the Integrator system controller */ 501 + ap_syscon_base = ioremap(INTEGRATOR_SC_BASE, 0x100); 502 + if (!ap_syscon_base) { 503 + pr_err("unable to remap the AP syscon for PCIv3\n"); 504 + return; 505 + } 498 506 499 507 pcibios_min_mem = 0x00100000; 500 508
-38
arch/arm/mach-kirkwood/board-ns2.c
··· 18 18 #include <linux/gpio.h> 19 19 #include <linux/of.h> 20 20 #include "common.h" 21 - #include "mpp.h" 22 21 23 22 static struct mv643xx_eth_platform_data ns2_ge00_data = { 24 23 .phy_addr = MV643XX_ETH_PHY_ADDR(8), 25 - }; 26 - 27 - static unsigned int ns2_mpp_config[] __initdata = { 28 - MPP0_SPI_SCn, 29 - MPP1_SPI_MOSI, 30 - MPP2_SPI_SCK, 31 - MPP3_SPI_MISO, 32 - MPP4_NF_IO6, 33 - MPP5_NF_IO7, 34 - MPP6_SYSRST_OUTn, 35 - MPP7_GPO, /* Fan speed (bit 1) */ 36 - MPP8_TW0_SDA, 37 - MPP9_TW0_SCK, 38 - MPP10_UART0_TXD, 39 - MPP11_UART0_RXD, 40 - MPP12_GPO, /* Red led */ 41 - MPP14_GPIO, /* USB fuse */ 42 - MPP16_GPIO, /* SATA 0 power */ 43 - MPP17_GPIO, /* SATA 1 power */ 44 - MPP18_NF_IO0, 45 - MPP19_NF_IO1, 46 - MPP20_SATA1_ACTn, 47 - MPP21_SATA0_ACTn, 48 - MPP22_GPIO, /* Fan speed (bit 0) */ 49 - MPP23_GPIO, /* Fan power */ 50 - MPP24_GPIO, /* USB mode select */ 51 - MPP25_GPIO, /* Fan rotation fail */ 52 - MPP26_GPIO, /* USB device vbus */ 53 - MPP28_GPIO, /* USB enable host vbus */ 54 - MPP29_GPIO, /* Blue led (slow register) */ 55 - MPP30_GPIO, /* Blue led (command register) */ 56 - MPP31_GPIO, /* Board power off */ 57 - MPP32_GPIO, /* Power button (0 = Released, 1 = Pushed) */ 58 - MPP33_GPO, /* Fan speed (bit 2) */ 59 - 0 60 24 }; 61 25 62 26 #define NS2_GPIO_POWER_OFF 31 ··· 35 71 /* 36 72 * Basic setup. Needs to be called early. 37 73 */ 38 - kirkwood_mpp_conf(ns2_mpp_config); 39 - 40 74 if (of_machine_is_compatible("lacie,netspace_lite_v2") || 41 75 of_machine_is_compatible("lacie,netspace_mini_v2")) 42 76 ns2_ge00_data.phy_addr = MV643XX_ETH_PHY_ADDR(0);
+2
arch/arm/mach-mvebu/Makefile
··· 1 1 ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \ 2 2 -I$(srctree)/arch/arm/plat-orion/include 3 3 4 + AFLAGS_coherency_ll.o := -Wa,-march=armv7-a 5 + 4 6 obj-y += system-controller.o 5 7 obj-$(CONFIG_MACH_ARMADA_370_XP) += armada-370-xp.o irq-armada-370-xp.o addr-map.o coherency.o coherency_ll.o pmsu.o 6 8 obj-$(CONFIG_SMP) += platsmp.o headsmp.o
+6
arch/arm/mach-omap2/board-omap4panda.c
··· 397 397 OMAP_PULL_ENA), 398 398 OMAP4_MUX(ABE_MCBSP1_FSX, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), 399 399 400 + /* UART2 - BT/FM/GPS shared transport */ 401 + OMAP4_MUX(UART2_CTS, OMAP_PIN_INPUT | OMAP_MUX_MODE0), 402 + OMAP4_MUX(UART2_RTS, OMAP_PIN_OUTPUT | OMAP_MUX_MODE0), 403 + OMAP4_MUX(UART2_RX, OMAP_PIN_INPUT | OMAP_MUX_MODE0), 404 + OMAP4_MUX(UART2_TX, OMAP_PIN_OUTPUT | OMAP_MUX_MODE0), 405 + 400 406 { .reg_offset = OMAP_MUX_TERMINATOR }, 401 407 }; 402 408
+2
arch/arm/mach-omap2/cclock2420_data.c
··· 1935 1935 omap2_init_clk_hw_omap_clocks(c->lk.clk); 1936 1936 } 1937 1937 1938 + omap2xxx_clkt_vps_late_init(); 1939 + 1938 1940 omap2_clk_disable_autoidle_all(); 1939 1941 1940 1942 omap2_clk_enable_init_clocks(enable_init_clks,
+2
arch/arm/mach-omap2/cclock2430_data.c
··· 2050 2050 omap2_init_clk_hw_omap_clocks(c->lk.clk); 2051 2051 } 2052 2052 2053 + omap2xxx_clkt_vps_late_init(); 2054 + 2053 2055 omap2_clk_disable_autoidle_all(); 2054 2056 2055 2057 omap2_clk_enable_init_clocks(enable_init_clks,
+6 -7
arch/arm/mach-omap2/cclock44xx_data.c
··· 2026 2026 * On OMAP4460 the ABE DPLL fails to turn on if in idle low-power 2027 2027 * state when turning the ABE clock domain. Workaround this by 2028 2028 * locking the ABE DPLL on boot. 2029 + * Lock the ABE DPLL in any case to avoid issues with audio. 2029 2030 */ 2030 - if (cpu_is_omap446x()) { 2031 - rc = clk_set_parent(&abe_dpll_refclk_mux_ck, &sys_32k_ck); 2032 - if (!rc) 2033 - rc = clk_set_rate(&dpll_abe_ck, OMAP4_DPLL_ABE_DEFFREQ); 2034 - if (rc) 2035 - pr_err("%s: failed to configure ABE DPLL!\n", __func__); 2036 - } 2031 + rc = clk_set_parent(&abe_dpll_refclk_mux_ck, &sys_32k_ck); 2032 + if (!rc) 2033 + rc = clk_set_rate(&dpll_abe_ck, OMAP4_DPLL_ABE_DEFFREQ); 2034 + if (rc) 2035 + pr_err("%s: failed to configure ABE DPLL!\n", __func__); 2037 2036 2038 2037 return 0; 2039 2038 }
+1 -1
arch/arm/mach-omap2/devices.c
··· 639 639 return cnt; 640 640 } 641 641 642 - static void omap_init_ocp2scp(void) 642 + static void __init omap_init_ocp2scp(void) 643 643 { 644 644 struct omap_hwmod *oh; 645 645 struct platform_device *pdev;
+2 -1
arch/arm/mach-omap2/drm.c
··· 25 25 #include <linux/dma-mapping.h> 26 26 #include <linux/platform_data/omap_drm.h> 27 27 28 + #include "soc.h" 28 29 #include "omap_device.h" 29 30 #include "omap_hwmod.h" 30 31 ··· 57 56 oh->name); 58 57 } 59 58 60 - platform_data.omaprev = GET_OMAP_REVISION(); 59 + platform_data.omaprev = GET_OMAP_TYPE; 61 60 62 61 return platform_device_register(&omap_drm_device); 63 62
+5 -1
arch/arm/mach-omap2/omap_hwmod_44xx_data.c
··· 2132 2132 * currently reset very early during boot, before I2C is 2133 2133 * available, so it doesn't seem that we have any choice in 2134 2134 * the kernel other than to avoid resetting it. 2135 + * 2136 + * Also, McPDM needs to be configured to NO_IDLE mode when it 2137 + * is in used otherwise vital clocks will be gated which 2138 + * results 'slow motion' audio playback. 2135 2139 */ 2136 - .flags = HWMOD_EXT_OPT_MAIN_CLK, 2140 + .flags = HWMOD_EXT_OPT_MAIN_CLK | HWMOD_SWSUP_SIDLE, 2137 2141 .mpu_irqs = omap44xx_mcpdm_irqs, 2138 2142 .sdma_reqs = omap44xx_mcpdm_sdma_reqs, 2139 2143 .main_clk = "mcpdm_fck",
+2 -6
arch/arm/mach-omap2/timer.c
··· 165 165 struct device_node *np; 166 166 167 167 for_each_matching_node(np, match) { 168 - if (!of_device_is_available(np)) { 169 - of_node_put(np); 168 + if (!of_device_is_available(np)) 170 169 continue; 171 - } 172 170 173 - if (property && !of_get_property(np, property, NULL)) { 174 - of_node_put(np); 171 + if (property && !of_get_property(np, property, NULL)) 175 172 continue; 176 - } 177 173 178 174 of_add_property(np, &device_disabled); 179 175 return np;
+1 -1
arch/arm/mach-s3c64xx/mach-crag6410-module.c
··· 47 47 .bus_num = 0, 48 48 .chip_select = 0, 49 49 .mode = SPI_MODE_0, 50 - .irq = S3C_EINT(5), 50 + .irq = S3C_EINT(4), 51 51 .controller_data = &wm0010_spi_csinfo, 52 52 .platform_data = &wm0010_pdata, 53 53 },
+2
arch/arm/mach-s3c64xx/pm.c
··· 338 338 for (i = 0; i < ARRAY_SIZE(s3c64xx_pm_domains); i++) 339 339 pm_genpd_init(&s3c64xx_pm_domains[i]->pd, NULL, false); 340 340 341 + #ifdef CONFIG_S3C_DEV_FB 341 342 if (dev_get_platdata(&s3c_device_fb.dev)) 342 343 pm_genpd_add_device(&s3c64xx_pm_f.pd, &s3c_device_fb.dev); 344 + #endif 343 345 344 346 return 0; 345 347 }
+10 -8
arch/arm/mm/dma-mapping.c
··· 774 774 size_t size, enum dma_data_direction dir, 775 775 void (*op)(const void *, size_t, int)) 776 776 { 777 + unsigned long pfn; 778 + size_t left = size; 779 + 780 + pfn = page_to_pfn(page) + offset / PAGE_SIZE; 781 + offset %= PAGE_SIZE; 782 + 777 783 /* 778 784 * A single sg entry may refer to multiple physically contiguous 779 785 * pages. But we still need to process highmem pages individually. 780 786 * If highmem is not configured then the bulk of this loop gets 781 787 * optimized out. 782 788 */ 783 - size_t left = size; 784 789 do { 785 790 size_t len = left; 786 791 void *vaddr; 787 792 793 + page = pfn_to_page(pfn); 794 + 788 795 if (PageHighMem(page)) { 789 - if (len + offset > PAGE_SIZE) { 790 - if (offset >= PAGE_SIZE) { 791 - page += offset / PAGE_SIZE; 792 - offset %= PAGE_SIZE; 793 - } 796 + if (len + offset > PAGE_SIZE) 794 797 len = PAGE_SIZE - offset; 795 - } 796 798 vaddr = kmap_high_get(page); 797 799 if (vaddr) { 798 800 vaddr += offset; ··· 811 809 op(vaddr, len, dir); 812 810 } 813 811 offset = 0; 814 - page++; 812 + pfn++; 815 813 left -= len; 816 814 } while (left); 817 815 }
+1 -1
arch/arm/mm/mmu.c
··· 283 283 }, 284 284 [MT_MEMORY_SO] = { 285 285 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 286 - L_PTE_MT_UNCACHED, 286 + L_PTE_MT_UNCACHED | L_PTE_XN, 287 287 .prot_l1 = PMD_TYPE_TABLE, 288 288 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S | 289 289 PMD_SECT_UNCACHED | PMD_SECT_XN,
+1 -1
arch/arm/plat-versatile/headsmp.S
··· 20 20 */ 21 21 ENTRY(versatile_secondary_startup) 22 22 mrc p15, 0, r0, c0, c0, 5 23 - and r0, r0, #15 23 + bic r0, #0xff000000 24 24 adr r4, 1f 25 25 ldmia r4, {r5, r6} 26 26 sub r4, r4, r5
+3 -3
arch/arm/vfp/entry.S
··· 22 22 @ IRQs disabled. 23 23 @ 24 24 ENTRY(do_vfp) 25 - #ifdef CONFIG_PREEMPT 25 + #ifdef CONFIG_PREEMPT_COUNT 26 26 ldr r4, [r10, #TI_PREEMPT] @ get preempt count 27 27 add r11, r4, #1 @ increment it 28 28 str r11, [r10, #TI_PREEMPT] ··· 35 35 ENDPROC(do_vfp) 36 36 37 37 ENTRY(vfp_null_entry) 38 - #ifdef CONFIG_PREEMPT 38 + #ifdef CONFIG_PREEMPT_COUNT 39 39 get_thread_info r10 40 40 ldr r4, [r10, #TI_PREEMPT] @ get preempt count 41 41 sub r11, r4, #1 @ decrement it ··· 53 53 54 54 __INIT 55 55 ENTRY(vfp_testing_entry) 56 - #ifdef CONFIG_PREEMPT 56 + #ifdef CONFIG_PREEMPT_COUNT 57 57 get_thread_info r10 58 58 ldr r4, [r10, #TI_PREEMPT] @ get preempt count 59 59 sub r11, r4, #1 @ decrement it
+2 -2
arch/arm/vfp/vfphw.S
··· 168 168 @ else it's one 32-bit instruction, so 169 169 @ always subtract 4 from the following 170 170 @ instruction address. 171 - #ifdef CONFIG_PREEMPT 171 + #ifdef CONFIG_PREEMPT_COUNT 172 172 get_thread_info r10 173 173 ldr r4, [r10, #TI_PREEMPT] @ get preempt count 174 174 sub r11, r4, #1 @ decrement it ··· 192 192 @ not recognised by VFP 193 193 194 194 DBGSTR "not VFP" 195 - #ifdef CONFIG_PREEMPT 195 + #ifdef CONFIG_PREEMPT_COUNT 196 196 get_thread_info r10 197 197 ldr r4, [r10, #TI_PREEMPT] @ get preempt count 198 198 sub r11, r4, #1 @ decrement it
+4 -1
arch/arm64/include/asm/elf.h
··· 26 26 27 27 typedef unsigned long elf_greg_t; 28 28 29 - #define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t)) 29 + #define ELF_NGREG (sizeof(struct user_pt_regs) / sizeof(elf_greg_t)) 30 + #define ELF_CORE_COPY_REGS(dest, regs) \ 31 + *(struct user_pt_regs *)&(dest) = (regs)->user_regs; 32 + 30 33 typedef elf_greg_t elf_gregset_t[ELF_NGREG]; 31 34 typedef struct user_fpsimd_state elf_fpregset_t; 32 35
-27
arch/ia64/kernel/ptrace.c
··· 672 672 read_unlock(&tasklist_lock); 673 673 } 674 674 675 - static inline int 676 - thread_matches (struct task_struct *thread, unsigned long addr) 677 - { 678 - unsigned long thread_rbs_end; 679 - struct pt_regs *thread_regs; 680 - 681 - if (ptrace_check_attach(thread, 0) < 0) 682 - /* 683 - * If the thread is not in an attachable state, we'll 684 - * ignore it. The net effect is that if ADDR happens 685 - * to overlap with the portion of the thread's 686 - * register backing store that is currently residing 687 - * on the thread's kernel stack, then ptrace() may end 688 - * up accessing a stale value. But if the thread 689 - * isn't stopped, that's a problem anyhow, so we're 690 - * doing as well as we can... 691 - */ 692 - return 0; 693 - 694 - thread_regs = task_pt_regs(thread); 695 - thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL); 696 - if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end)) 697 - return 0; 698 - 699 - return 1; /* looks like we've got a winner */ 700 - } 701 - 702 675 /* 703 676 * Write f32-f127 back to task->thread.fph if it has been modified. 704 677 */
+16
arch/m68k/include/asm/dma-mapping.h
··· 21 21 extern void dma_free_coherent(struct device *, size_t, 22 22 void *, dma_addr_t); 23 23 24 + static inline void *dma_alloc_attrs(struct device *dev, size_t size, 25 + dma_addr_t *dma_handle, gfp_t flag, 26 + struct dma_attrs *attrs) 27 + { 28 + /* attrs is not supported and ignored */ 29 + return dma_alloc_coherent(dev, size, dma_handle, flag); 30 + } 31 + 32 + static inline void dma_free_attrs(struct device *dev, size_t size, 33 + void *cpu_addr, dma_addr_t dma_handle, 34 + struct dma_attrs *attrs) 35 + { 36 + /* attrs is not supported and ignored */ 37 + dma_free_coherent(dev, size, cpu_addr, dma_handle); 38 + } 39 + 24 40 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, 25 41 dma_addr_t *handle, gfp_t flag) 26 42 {
+2
arch/m68k/include/asm/pgtable_no.h
··· 64 64 */ 65 65 #define VMALLOC_START 0 66 66 #define VMALLOC_END 0xffffffff 67 + #define KMAP_START 0 68 + #define KMAP_END 0xffffffff 67 69 68 70 #include <asm-generic/pgtable.h> 69 71
+1 -1
arch/m68k/include/asm/unistd.h
··· 4 4 #include <uapi/asm/unistd.h> 5 5 6 6 7 - #define NR_syscalls 348 7 + #define NR_syscalls 349 8 8 9 9 #define __ARCH_WANT_OLD_READDIR 10 10 #define __ARCH_WANT_OLD_STAT
+1
arch/m68k/include/uapi/asm/unistd.h
··· 353 353 #define __NR_process_vm_readv 345 354 354 #define __NR_process_vm_writev 346 355 355 #define __NR_kcmp 347 356 + #define __NR_finit_module 348 356 357 357 358 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */
+1
arch/m68k/kernel/syscalltable.S
··· 368 368 .long sys_process_vm_readv /* 345 */ 369 369 .long sys_process_vm_writev 370 370 .long sys_kcmp 371 + .long sys_finit_module 371 372
+5 -3
arch/m68k/mm/init.c
··· 39 39 void *empty_zero_page; 40 40 EXPORT_SYMBOL(empty_zero_page); 41 41 42 + #if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE) 43 + extern void init_pointer_table(unsigned long ptable); 44 + extern pmd_t *zero_pgtable; 45 + #endif 46 + 42 47 #ifdef CONFIG_MMU 43 48 44 49 pg_data_t pg_data_map[MAX_NUMNODES]; ··· 73 68 pg_data_map[node].bdata = bootmem_node_data + node; 74 69 node_set_online(node); 75 70 } 76 - 77 - extern void init_pointer_table(unsigned long ptable); 78 - extern pmd_t *zero_pgtable; 79 71 80 72 #else /* CONFIG_MMU */ 81 73
+14 -6
arch/parisc/kernel/entry.S
··· 1865 1865 1866 1866 /* Are we being ptraced? */ 1867 1867 ldw TASK_FLAGS(%r1),%r19 1868 - ldi (_TIF_SINGLESTEP|_TIF_BLOCKSTEP),%r2 1868 + ldi _TIF_SYSCALL_TRACE_MASK,%r2 1869 1869 and,COND(=) %r19,%r2,%r0 1870 1870 b,n syscall_restore_rfi 1871 1871 ··· 1978 1978 /* sr2 should be set to zero for userspace syscalls */ 1979 1979 STREG %r0,TASK_PT_SR2(%r1) 1980 1980 1981 - pt_regs_ok: 1982 1981 LDREG TASK_PT_GR31(%r1),%r2 1983 - depi 3,31,2,%r2 /* ensure return to user mode. */ 1984 - STREG %r2,TASK_PT_IAOQ0(%r1) 1982 + depi 3,31,2,%r2 /* ensure return to user mode. */ 1983 + STREG %r2,TASK_PT_IAOQ0(%r1) 1985 1984 ldo 4(%r2),%r2 1986 1985 STREG %r2,TASK_PT_IAOQ1(%r1) 1987 - copy %r25,%r16 1988 1986 b intr_restore 1989 - nop 1987 + copy %r25,%r16 1988 + 1989 + pt_regs_ok: 1990 + LDREG TASK_PT_IAOQ0(%r1),%r2 1991 + depi 3,31,2,%r2 /* ensure return to user mode. */ 1992 + STREG %r2,TASK_PT_IAOQ0(%r1) 1993 + LDREG TASK_PT_IAOQ1(%r1),%r2 1994 + depi 3,31,2,%r2 1995 + STREG %r2,TASK_PT_IAOQ1(%r1) 1996 + b intr_restore 1997 + copy %r25,%r16 1990 1998 1991 1999 .import schedule,code 1992 2000 syscall_do_resched:
+4 -2
arch/parisc/kernel/irq.c
··· 410 410 { 411 411 local_irq_disable(); /* PARANOID - should already be disabled */ 412 412 mtctl(~0UL, 23); /* EIRR : clear all pending external intr */ 413 - claim_cpu_irqs(); 414 413 #ifdef CONFIG_SMP 415 - if (!cpu_eiem) 414 + if (!cpu_eiem) { 415 + claim_cpu_irqs(); 416 416 cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ); 417 + } 417 418 #else 419 + claim_cpu_irqs(); 418 420 cpu_eiem = EIEM_MASK(TIMER_IRQ); 419 421 #endif 420 422 set_eiem(cpu_eiem); /* EIEM : enable all external intr */
+1 -1
arch/parisc/kernel/ptrace.c
··· 26 26 #include <asm/asm-offsets.h> 27 27 28 28 /* PSW bits we allow the debugger to modify */ 29 - #define USER_PSW_BITS (PSW_N | PSW_V | PSW_CB) 29 + #define USER_PSW_BITS (PSW_N | PSW_B | PSW_V | PSW_CB) 30 30 31 31 /* 32 32 * Called by kernel/ptrace.c when detaching..
+3 -1
arch/parisc/kernel/signal.c
··· 190 190 DBG(1,"get_sigframe: ka = %#lx, sp = %#lx, frame_size = %#lx\n", 191 191 (unsigned long)ka, sp, frame_size); 192 192 193 + /* Align alternate stack and reserve 64 bytes for the signal 194 + handler's frame marker. */ 193 195 if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp)) 194 - sp = current->sas_ss_sp; /* Stacks grow up! */ 196 + sp = (current->sas_ss_sp + 0x7f) & ~0x3f; /* Stacks grow up! */ 195 197 196 198 DBG(1,"get_sigframe: Returning sp = %#lx\n", (unsigned long)sp); 197 199 return (void __user *) sp; /* Stacks grow up. Fun. */
+5 -6
arch/parisc/math-emu/cnv_float.h
··· 347 347 Sgl_isinexact_to_fix(sgl_value,exponent) 348 348 349 349 #define Duint_from_sgl_mantissa(sgl_value,exponent,dresultA,dresultB) \ 350 - {Sall(sgl_value) <<= SGL_EXP_LENGTH; /* left-justify */ \ 350 + {unsigned int val = Sall(sgl_value) << SGL_EXP_LENGTH; \ 351 351 if (exponent <= 31) { \ 352 - Dintp1(dresultA) = 0; \ 353 - Dintp2(dresultB) = (unsigned)Sall(sgl_value) >> (31 - exponent); \ 352 + Dintp1(dresultA) = 0; \ 353 + Dintp2(dresultB) = val >> (31 - exponent); \ 354 354 } \ 355 355 else { \ 356 - Dintp1(dresultA) = Sall(sgl_value) >> (63 - exponent); \ 357 - Dintp2(dresultB) = Sall(sgl_value) << (exponent - 31); \ 356 + Dintp1(dresultA) = val >> (63 - exponent); \ 357 + Dintp2(dresultB) = exponent <= 62 ? val << (exponent - 31) : 0; \ 358 358 } \ 359 - Sall(sgl_value) >>= SGL_EXP_LENGTH; /* return to original */ \ 360 359 } 361 360 362 361 #define Duint_setzero(dresultA,dresultB) \
+2
arch/powerpc/kvm/emulate.c
··· 39 39 #define OP_31_XOP_TRAP 4 40 40 #define OP_31_XOP_LWZX 23 41 41 #define OP_31_XOP_TRAP_64 68 42 + #define OP_31_XOP_DCBF 86 42 43 #define OP_31_XOP_LBZX 87 43 44 #define OP_31_XOP_STWX 151 44 45 #define OP_31_XOP_STBX 215 ··· 375 374 emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs); 376 375 break; 377 376 377 + case OP_31_XOP_DCBF: 378 378 case OP_31_XOP_DCBI: 379 379 /* Do nothing. The guest is performing dcbi because 380 380 * hardware DMA is not snooped by the dcache, but
-6
arch/x86/kernel/cpu/perf_event.c
··· 340 340 /* BTS is currently only allowed for user-mode. */ 341 341 if (!attr->exclude_kernel) 342 342 return -EOPNOTSUPP; 343 - 344 - if (!attr->exclude_guest) 345 - return -EOPNOTSUPP; 346 343 } 347 344 348 345 hwc->config |= config; ··· 381 384 { 382 385 if (event->attr.precise_ip) { 383 386 int precise = 0; 384 - 385 - if (!event->attr.exclude_guest) 386 - return -EOPNOTSUPP; 387 387 388 388 /* Support for constant skid */ 389 389 if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) {
-1
arch/x86/kernel/entry_32.S
··· 1065 1065 lea 16(%esp),%esp 1066 1066 CFI_ADJUST_CFA_OFFSET -16 1067 1067 jz 5f 1068 - addl $16,%esp 1069 1068 jmp iret_exc 1070 1069 5: pushl_cfi $-1 /* orig_ax = -1 => not a system call */ 1071 1070 SAVE_ALL
+5 -4
arch/x86/kernel/step.c
··· 165 165 * Ensure irq/preemption can't change debugctl in between. 166 166 * Note also that both TIF_BLOCKSTEP and debugctl should 167 167 * be changed atomically wrt preemption. 168 - * FIXME: this means that set/clear TIF_BLOCKSTEP is simply 169 - * wrong if task != current, SIGKILL can wakeup the stopped 170 - * tracee and set/clear can play with the running task, this 171 - * can confuse the next __switch_to_xtra(). 168 + * 169 + * NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if 170 + * task is current or it can't be running, otherwise we can race 171 + * with __switch_to_xtra(). We rely on ptrace_freeze_traced() but 172 + * PTRACE_KILL is not safe. 172 173 */ 173 174 local_irq_disable(); 174 175 debugctl = get_debugctlmsr();
-7
arch/x86/xen/smp.c
··· 432 432 play_dead_common(); 433 433 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); 434 434 cpu_bringup(); 435 - /* 436 - * Balance out the preempt calls - as we are running in cpu_idle 437 - * loop which has been called at bootup from cpu_bringup_and_idle. 438 - * The cpucpu_bringup_and_idle called cpu_bringup which made a 439 - * preempt_disable() So this preempt_enable will balance it out. 440 - */ 441 - preempt_enable(); 442 435 } 443 436 444 437 #else /* !CONFIG_HOTPLUG_CPU */
+3
drivers/acpi/apei/apei-base.c
··· 590 590 if (bit_width == 32 && bit_offset == 0 && (*paddr & 0x03) == 0 && 591 591 *access_bit_width < 32) 592 592 *access_bit_width = 32; 593 + else if (bit_width == 64 && bit_offset == 0 && (*paddr & 0x07) == 0 && 594 + *access_bit_width < 64) 595 + *access_bit_width = 64; 593 596 594 597 if ((bit_width + bit_offset) > *access_bit_width) { 595 598 pr_warning(FW_BUG APEI_PFX
+4
drivers/acpi/processor_idle.c
··· 958 958 return -EINVAL; 959 959 } 960 960 961 + if (!dev) 962 + return -EINVAL; 963 + 961 964 dev->cpu = pr->id; 962 965 963 966 if (max_cstate == 0) ··· 1152 1149 } 1153 1150 1154 1151 /* Populate Updated C-state information */ 1152 + acpi_processor_get_power_info(pr); 1155 1153 acpi_processor_setup_cpuidle_states(pr); 1156 1154 1157 1155 /* Enable all cpuidle devices */
+7
drivers/acpi/processor_perflib.c
··· 340 340 if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10) 341 341 || boot_cpu_data.x86 == 0x11) { 342 342 rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi); 343 + /* 344 + * MSR C001_0064+: 345 + * Bit 63: PstateEn. Read-write. If set, the P-state is valid. 346 + */ 347 + if (!(hi & BIT(31))) 348 + return; 349 + 343 350 fid = lo & 0x3f; 344 351 did = (lo >> 6) & 7; 345 352 if (boot_cpu_data.x86 == 0x10)
+7 -1
drivers/ata/ahci.c
··· 53 53 54 54 enum { 55 55 AHCI_PCI_BAR_STA2X11 = 0, 56 + AHCI_PCI_BAR_ENMOTUS = 2, 56 57 AHCI_PCI_BAR_STANDARD = 5, 57 58 }; 58 59 ··· 410 409 { PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci }, /* ASM1060 */ 411 410 { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */ 412 411 { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */ 412 + 413 + /* Enmotus */ 414 + { PCI_DEVICE(0x1c44, 0x8000), board_ahci }, 413 415 414 416 /* Generic, PCI class code for AHCI */ 415 417 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, ··· 1102 1098 dev_info(&pdev->dev, 1103 1099 "PDC42819 can only drive SATA devices with this driver\n"); 1104 1100 1105 - /* The Connext uses non-standard BAR */ 1101 + /* Both Connext and Enmotus devices use non-standard BARs */ 1106 1102 if (pdev->vendor == PCI_VENDOR_ID_STMICRO && pdev->device == 0xCC06) 1107 1103 ahci_pci_bar = AHCI_PCI_BAR_STA2X11; 1104 + else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000) 1105 + ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS; 1108 1106 1109 1107 /* acquire resources */ 1110 1108 rc = pcim_enable_device(pdev);
+3 -3
drivers/ata/libahci.c
··· 1951 1951 /* Use the nominal value 10 ms if the read MDAT is zero, 1952 1952 * the nominal value of DETO is 20 ms. 1953 1953 */ 1954 - if (dev->sata_settings[ATA_LOG_DEVSLP_VALID] & 1954 + if (dev->devslp_timing[ATA_LOG_DEVSLP_VALID] & 1955 1955 ATA_LOG_DEVSLP_VALID_MASK) { 1956 - mdat = dev->sata_settings[ATA_LOG_DEVSLP_MDAT] & 1956 + mdat = dev->devslp_timing[ATA_LOG_DEVSLP_MDAT] & 1957 1957 ATA_LOG_DEVSLP_MDAT_MASK; 1958 1958 if (!mdat) 1959 1959 mdat = 10; 1960 - deto = dev->sata_settings[ATA_LOG_DEVSLP_DETO]; 1960 + deto = dev->devslp_timing[ATA_LOG_DEVSLP_DETO]; 1961 1961 if (!deto) 1962 1962 deto = 20; 1963 1963 } else {
+13 -9
drivers/ata/libata-core.c
··· 2325 2325 } 2326 2326 } 2327 2327 2328 - /* check and mark DevSlp capability */ 2329 - if (ata_id_has_devslp(dev->id)) 2330 - dev->flags |= ATA_DFLAG_DEVSLP; 2331 - 2332 - /* Obtain SATA Settings page from Identify Device Data Log, 2333 - * which contains DevSlp timing variables etc. 2334 - * Exclude old devices with ata_id_has_ncq() 2328 + /* Check and mark DevSlp capability. Get DevSlp timing variables 2329 + * from SATA Settings page of Identify Device Data Log. 2335 2330 */ 2336 - if (ata_id_has_ncq(dev->id)) { 2331 + if (ata_id_has_devslp(dev->id)) { 2332 + u8 sata_setting[ATA_SECT_SIZE]; 2333 + int i, j; 2334 + 2335 + dev->flags |= ATA_DFLAG_DEVSLP; 2337 2336 err_mask = ata_read_log_page(dev, 2338 2337 ATA_LOG_SATA_ID_DEV_DATA, 2339 2338 ATA_LOG_SATA_SETTINGS, 2340 - dev->sata_settings, 2339 + sata_setting, 2341 2340 1); 2342 2341 if (err_mask) 2343 2342 ata_dev_dbg(dev, 2344 2343 "failed to get Identify Device Data, Emask 0x%x\n", 2345 2344 err_mask); 2345 + else 2346 + for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) { 2347 + j = ATA_LOG_DEVSLP_OFFSET + i; 2348 + dev->devslp_timing[i] = sata_setting[j]; 2349 + } 2346 2350 } 2347 2351 2348 2352 dev->cdb_len = 16;
+1 -1
drivers/ata/libata-eh.c
··· 2094 2094 */ 2095 2095 static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc) 2096 2096 { 2097 - if (qc->flags & AC_ERR_MEDIA) 2097 + if (qc->err_mask & AC_ERR_MEDIA) 2098 2098 return 0; /* don't retry media errors */ 2099 2099 if (qc->flags & ATA_QCFLAG_IO) 2100 2100 return 1; /* otherwise retry anything from fs stack */
-2
drivers/base/regmap/regmap-debugfs.c
··· 121 121 c->max = p - 1; 122 122 list_add_tail(&c->list, 123 123 &map->debugfs_off_cache); 124 - } else { 125 - return base; 126 124 } 127 125 128 126 /*
+1 -1
drivers/base/regmap/regmap.c
··· 1106 1106 * @val_count: Number of registers to write 1107 1107 * 1108 1108 * This function is intended to be used for writing a large block of 1109 - * data to be device either in single transfer or multiple transfer. 1109 + * data to the device either in single transfer or multiple transfer. 1110 1110 * 1111 1111 * A value of zero will be returned on success, a negative errno will 1112 1112 * be returned in error cases.
+6 -1
drivers/block/virtio_blk.c
··· 889 889 { 890 890 struct virtio_blk *vblk = vdev->priv; 891 891 int index = vblk->index; 892 + int refc; 892 893 893 894 /* Prevent config work handler from accessing the device. */ 894 895 mutex_lock(&vblk->config_lock); ··· 904 903 905 904 flush_work(&vblk->config_work); 906 905 906 + refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount); 907 907 put_disk(vblk->disk); 908 908 mempool_destroy(vblk->pool); 909 909 vdev->config->del_vqs(vdev); 910 910 kfree(vblk); 911 - ida_simple_remove(&vd_index_ida, index); 911 + 912 + /* Only free device id if we don't have any users */ 913 + if (refc == 1) 914 + ida_simple_remove(&vd_index_ida, index); 912 915 } 913 916 914 917 #ifdef CONFIG_PM
+6 -3
drivers/clk/mvebu/clk-cpu.c
··· 124 124 125 125 clks = kzalloc(ncpus * sizeof(*clks), GFP_KERNEL); 126 126 if (WARN_ON(!clks)) 127 - return; 127 + goto clks_out; 128 128 129 129 for_each_node_by_type(dn, "cpu") { 130 130 struct clk_init_data init; ··· 134 134 int cpu, err; 135 135 136 136 if (WARN_ON(!clk_name)) 137 - return; 137 + goto bail_out; 138 138 139 139 err = of_property_read_u32(dn, "reg", &cpu); 140 140 if (WARN_ON(err)) 141 - return; 141 + goto bail_out; 142 142 143 143 sprintf(clk_name, "cpu%d", cpu); 144 144 parent_clk = of_clk_get(node, 0); ··· 167 167 return; 168 168 bail_out: 169 169 kfree(clks); 170 + while(ncpus--) 171 + kfree(cpuclk[ncpus].clk_name); 172 + clks_out: 170 173 kfree(cpuclk); 171 174 } 172 175
+1 -1
drivers/cpufreq/Kconfig.x86
··· 106 106 config X86_POWERNOW_K8 107 107 tristate "AMD Opteron/Athlon64 PowerNow!" 108 108 select CPU_FREQ_TABLE 109 - depends on ACPI && ACPI_PROCESSOR 109 + depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ 110 110 help 111 111 This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors. 112 112 Support for K10 and newer processors is now in acpi-cpufreq.
+7
drivers/cpufreq/acpi-cpufreq.c
··· 1030 1030 late_initcall(acpi_cpufreq_init); 1031 1031 module_exit(acpi_cpufreq_exit); 1032 1032 1033 + static const struct x86_cpu_id acpi_cpufreq_ids[] = { 1034 + X86_FEATURE_MATCH(X86_FEATURE_ACPI), 1035 + X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE), 1036 + {} 1037 + }; 1038 + MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids); 1039 + 1033 1040 MODULE_ALIAS("acpi");
+5
drivers/cpufreq/cpufreq-cpu0.c
··· 71 71 } 72 72 73 73 if (cpu_reg) { 74 + rcu_read_lock(); 74 75 opp = opp_find_freq_ceil(cpu_dev, &freq_Hz); 75 76 if (IS_ERR(opp)) { 77 + rcu_read_unlock(); 76 78 pr_err("failed to find OPP for %ld\n", freq_Hz); 77 79 return PTR_ERR(opp); 78 80 } 79 81 volt = opp_get_voltage(opp); 82 + rcu_read_unlock(); 80 83 tol = volt * voltage_tolerance / 100; 81 84 volt_old = regulator_get_voltage(cpu_reg); 82 85 } ··· 239 236 */ 240 237 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) 241 238 ; 239 + rcu_read_lock(); 242 240 opp = opp_find_freq_exact(cpu_dev, 243 241 freq_table[0].frequency * 1000, true); 244 242 min_uV = opp_get_voltage(opp); 245 243 opp = opp_find_freq_exact(cpu_dev, 246 244 freq_table[i-1].frequency * 1000, true); 247 245 max_uV = opp_get_voltage(opp); 246 + rcu_read_unlock(); 248 247 ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV); 249 248 if (ret > 0) 250 249 transition_latency += ret * 1000;
+3
drivers/cpufreq/omap-cpufreq.c
··· 110 110 freq = ret; 111 111 112 112 if (mpu_reg) { 113 + rcu_read_lock(); 113 114 opp = opp_find_freq_ceil(mpu_dev, &freq); 114 115 if (IS_ERR(opp)) { 116 + rcu_read_unlock(); 115 117 dev_err(mpu_dev, "%s: unable to find MPU OPP for %d\n", 116 118 __func__, freqs.new); 117 119 return -EINVAL; 118 120 } 119 121 volt = opp_get_voltage(opp); 122 + rcu_read_unlock(); 120 123 tol = volt * OPP_TOLERANCE / 100; 121 124 volt_old = regulator_get_voltage(mpu_reg); 122 125 }
+5
drivers/devfreq/devfreq.c
··· 994 994 * @freq: The frequency given to target function 995 995 * @flags: Flags handed from devfreq framework. 996 996 * 997 + * Locking: This function must be called under rcu_read_lock(). opp is a rcu 998 + * protected pointer. The reason for the same is that the opp pointer which is 999 + * returned will remain valid for use with opp_get_{voltage, freq} only while 1000 + * under the locked area. The pointer returned must be used prior to unlocking 1001 + * with rcu_read_unlock() to maintain the integrity of the pointer. 997 1002 */ 998 1003 struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq, 999 1004 u32 flags)
+67 -27
drivers/devfreq/exynos4_bus.c
··· 73 73 #define EX4210_LV_NUM (LV_2 + 1) 74 74 #define EX4x12_LV_NUM (LV_4 + 1) 75 75 76 + /** 77 + * struct busfreq_opp_info - opp information for bus 78 + * @rate: Frequency in hertz 79 + * @volt: Voltage in microvolts corresponding to this OPP 80 + */ 81 + struct busfreq_opp_info { 82 + unsigned long rate; 83 + unsigned long volt; 84 + }; 85 + 76 86 struct busfreq_data { 77 87 enum exynos4_busf_type type; 78 88 struct device *dev; ··· 90 80 bool disabled; 91 81 struct regulator *vdd_int; 92 82 struct regulator *vdd_mif; /* Exynos4412/4212 only */ 93 - struct opp *curr_opp; 83 + struct busfreq_opp_info curr_oppinfo; 94 84 struct exynos4_ppmu dmc[2]; 95 85 96 86 struct notifier_block pm_notifier; ··· 306 296 }; 307 297 308 298 309 - static int exynos4210_set_busclk(struct busfreq_data *data, struct opp *opp) 299 + static int exynos4210_set_busclk(struct busfreq_data *data, 300 + struct busfreq_opp_info *oppi) 310 301 { 311 302 unsigned int index; 312 303 unsigned int tmp; 313 304 314 305 for (index = LV_0; index < EX4210_LV_NUM; index++) 315 - if (opp_get_freq(opp) == exynos4210_busclk_table[index].clk) 306 + if (oppi->rate == exynos4210_busclk_table[index].clk) 316 307 break; 317 308 318 309 if (index == EX4210_LV_NUM) ··· 372 361 return 0; 373 362 } 374 363 375 - static int exynos4x12_set_busclk(struct busfreq_data *data, struct opp *opp) 364 + static int exynos4x12_set_busclk(struct busfreq_data *data, 365 + struct busfreq_opp_info *oppi) 376 366 { 377 367 unsigned int index; 378 368 unsigned int tmp; 379 369 380 370 for (index = LV_0; index < EX4x12_LV_NUM; index++) 381 - if (opp_get_freq(opp) == exynos4x12_mifclk_table[index].clk) 371 + if (oppi->rate == exynos4x12_mifclk_table[index].clk) 382 372 break; 383 373 384 374 if (index == EX4x12_LV_NUM) ··· 588 576 return -EINVAL; 589 577 } 590 578 591 - static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp, 592 - struct opp *oldopp) 579 + static int exynos4_bus_setvolt(struct busfreq_data *data, 580 + struct busfreq_opp_info *oppi, 581 + struct busfreq_opp_info *oldoppi) 593 582 { 594 583 int err = 0, tmp; 595 - unsigned long volt = opp_get_voltage(opp); 584 + unsigned long volt = oppi->volt; 596 585 597 586 switch (data->type) { 598 587 case TYPE_BUSF_EXYNOS4210: ··· 608 595 if (err) 609 596 break; 610 597 611 - tmp = exynos4x12_get_intspec(opp_get_freq(opp)); 598 + tmp = exynos4x12_get_intspec(oppi->rate); 612 599 if (tmp < 0) { 613 600 err = tmp; 614 601 regulator_set_voltage(data->vdd_mif, 615 - opp_get_voltage(oldopp), 602 + oldoppi->volt, 616 603 MAX_SAFEVOLT); 617 604 break; 618 605 } ··· 622 609 /* Try to recover */ 623 610 if (err) 624 611 regulator_set_voltage(data->vdd_mif, 625 - opp_get_voltage(oldopp), 612 + oldoppi->volt, 626 613 MAX_SAFEVOLT); 627 614 break; 628 615 default: ··· 639 626 struct platform_device *pdev = container_of(dev, struct platform_device, 640 627 dev); 641 628 struct busfreq_data *data = platform_get_drvdata(pdev); 642 - struct opp *opp = devfreq_recommended_opp(dev, _freq, flags); 643 - unsigned long freq = opp_get_freq(opp); 644 - unsigned long old_freq = opp_get_freq(data->curr_opp); 629 + struct opp *opp; 630 + unsigned long freq; 631 + unsigned long old_freq = data->curr_oppinfo.rate; 632 + struct busfreq_opp_info new_oppinfo; 645 633 646 - if (IS_ERR(opp)) 634 + rcu_read_lock(); 635 + opp = devfreq_recommended_opp(dev, _freq, flags); 636 + if (IS_ERR(opp)) { 637 + rcu_read_unlock(); 647 638 return PTR_ERR(opp); 639 + } 640 + new_oppinfo.rate = opp_get_freq(opp); 641 + new_oppinfo.volt = opp_get_voltage(opp); 642 + rcu_read_unlock(); 643 + freq = new_oppinfo.rate; 648 644 649 645 if (old_freq == freq) 650 646 return 0; 651 647 652 - dev_dbg(dev, "targetting %lukHz %luuV\n", freq, opp_get_voltage(opp)); 648 + dev_dbg(dev, "targetting %lukHz %luuV\n", freq, new_oppinfo.volt); 653 649 654 650 mutex_lock(&data->lock); 655 651 ··· 666 644 goto out; 667 645 668 646 if (old_freq < freq) 669 - err = exynos4_bus_setvolt(data, opp, data->curr_opp); 647 + err = exynos4_bus_setvolt(data, &new_oppinfo, 648 + &data->curr_oppinfo); 670 649 if (err) 671 650 goto out; 672 651 673 652 if (old_freq != freq) { 674 653 switch (data->type) { 675 654 case TYPE_BUSF_EXYNOS4210: 676 - err = exynos4210_set_busclk(data, opp); 655 + err = exynos4210_set_busclk(data, &new_oppinfo); 677 656 break; 678 657 case TYPE_BUSF_EXYNOS4x12: 679 - err = exynos4x12_set_busclk(data, opp); 658 + err = exynos4x12_set_busclk(data, &new_oppinfo); 680 659 break; 681 660 default: 682 661 err = -EINVAL; ··· 687 664 goto out; 688 665 689 666 if (old_freq > freq) 690 - err = exynos4_bus_setvolt(data, opp, data->curr_opp); 667 + err = exynos4_bus_setvolt(data, &new_oppinfo, 668 + &data->curr_oppinfo); 691 669 if (err) 692 670 goto out; 693 671 694 - data->curr_opp = opp; 672 + data->curr_oppinfo = new_oppinfo; 695 673 out: 696 674 mutex_unlock(&data->lock); 697 675 return err; ··· 726 702 727 703 exynos4_read_ppmu(data); 728 704 busier_dmc = exynos4_get_busier_dmc(data); 729 - stat->current_frequency = opp_get_freq(data->curr_opp); 705 + stat->current_frequency = data->curr_oppinfo.rate; 730 706 731 707 if (busier_dmc) 732 708 addr = S5P_VA_DMC1; ··· 957 933 struct busfreq_data *data = container_of(this, struct busfreq_data, 958 934 pm_notifier); 959 935 struct opp *opp; 936 + struct busfreq_opp_info new_oppinfo; 960 937 unsigned long maxfreq = ULONG_MAX; 961 938 int err = 0; 962 939 ··· 968 943 969 944 data->disabled = true; 970 945 946 + rcu_read_lock(); 971 947 opp = opp_find_freq_floor(data->dev, &maxfreq); 948 + if (IS_ERR(opp)) { 949 + rcu_read_unlock(); 950 + dev_err(data->dev, "%s: unable to find a min freq\n", 951 + __func__); 952 + return PTR_ERR(opp); 953 + } 954 + new_oppinfo.rate = opp_get_freq(opp); 955 + new_oppinfo.volt = opp_get_voltage(opp); 956 + rcu_read_unlock(); 972 957 973 - err = exynos4_bus_setvolt(data, opp, data->curr_opp); 958 + err = exynos4_bus_setvolt(data, &new_oppinfo, 959 + &data->curr_oppinfo); 974 960 if (err) 975 961 goto unlock; 976 962 977 963 switch (data->type) { 978 964 case TYPE_BUSF_EXYNOS4210: 979 - err = exynos4210_set_busclk(data, opp); 965 + err = exynos4210_set_busclk(data, &new_oppinfo); 980 966 break; 981 967 case TYPE_BUSF_EXYNOS4x12: 982 - err = exynos4x12_set_busclk(data, opp); 968 + err = exynos4x12_set_busclk(data, &new_oppinfo); 983 969 break; 984 970 default: 985 971 err = -EINVAL; ··· 998 962 if (err) 999 963 goto unlock; 1000 964 1001 - data->curr_opp = opp; 965 + data->curr_oppinfo = new_oppinfo; 1002 966 unlock: 1003 967 mutex_unlock(&data->lock); 1004 968 if (err) ··· 1063 1027 } 1064 1028 } 1065 1029 1030 + rcu_read_lock(); 1066 1031 opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq); 1067 1032 if (IS_ERR(opp)) { 1033 + rcu_read_unlock(); 1068 1034 dev_err(dev, "Invalid initial frequency %lu kHz.\n", 1069 1035 exynos4_devfreq_profile.initial_freq); 1070 1036 return PTR_ERR(opp); 1071 1037 } 1072 - data->curr_opp = opp; 1038 + data->curr_oppinfo.rate = opp_get_freq(opp); 1039 + data->curr_oppinfo.volt = opp_get_voltage(opp); 1040 + rcu_read_unlock(); 1073 1041 1074 1042 platform_set_drvdata(pdev, data); 1075 1043
+2 -3
drivers/dma/imx-dma.c
··· 684 684 break; 685 685 } 686 686 687 - imxdmac->hw_chaining = 1; 688 - if (!imxdma_hw_chain(imxdmac)) 689 - return -EINVAL; 687 + imxdmac->hw_chaining = 0; 688 + 690 689 imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) | 691 690 ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) | 692 691 CCR_REN;
+1 -1
drivers/dma/ioat/dma_v3.c
··· 951 951 goto free_resources; 952 952 } 953 953 } 954 - dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_TO_DEVICE); 954 + dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); 955 955 956 956 /* skip validate if the capability is not present */ 957 957 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
+6 -2
drivers/dma/tegra20-apb-dma.c
··· 266 266 if (async_tx_test_ack(&dma_desc->txd)) { 267 267 list_del(&dma_desc->node); 268 268 spin_unlock_irqrestore(&tdc->lock, flags); 269 + dma_desc->txd.flags = 0; 269 270 return dma_desc; 270 271 } 271 272 } ··· 1051 1050 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT; 1052 1051 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32; 1053 1052 1054 - csr |= TEGRA_APBDMA_CSR_FLOW | TEGRA_APBDMA_CSR_IE_EOC; 1053 + csr |= TEGRA_APBDMA_CSR_FLOW; 1054 + if (flags & DMA_PREP_INTERRUPT) 1055 + csr |= TEGRA_APBDMA_CSR_IE_EOC; 1055 1056 csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; 1056 1057 1057 1058 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; ··· 1098 1095 mem += len; 1099 1096 } 1100 1097 sg_req->last_sg = true; 1101 - dma_desc->txd.flags = 0; 1098 + if (flags & DMA_CTRL_ACK) 1099 + dma_desc->txd.flags = DMA_CTRL_ACK; 1102 1100 1103 1101 /* 1104 1102 * Make sure that mode should not be conflicting with currently
-6
drivers/gpio/gpio-mvebu.c
··· 547 547 mvchip->membase = devm_request_and_ioremap(&pdev->dev, res); 548 548 if (! mvchip->membase) { 549 549 dev_err(&pdev->dev, "Cannot ioremap\n"); 550 - kfree(mvchip->chip.label); 551 550 return -ENOMEM; 552 551 } 553 552 ··· 556 557 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 557 558 if (! res) { 558 559 dev_err(&pdev->dev, "Cannot get memory resource\n"); 559 - kfree(mvchip->chip.label); 560 560 return -ENODEV; 561 561 } 562 562 563 563 mvchip->percpu_membase = devm_request_and_ioremap(&pdev->dev, res); 564 564 if (! mvchip->percpu_membase) { 565 565 dev_err(&pdev->dev, "Cannot ioremap\n"); 566 - kfree(mvchip->chip.label); 567 566 return -ENOMEM; 568 567 } 569 568 } ··· 622 625 mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1); 623 626 if (mvchip->irqbase < 0) { 624 627 dev_err(&pdev->dev, "no irqs\n"); 625 - kfree(mvchip->chip.label); 626 628 return -ENOMEM; 627 629 } 628 630 ··· 629 633 mvchip->membase, handle_level_irq); 630 634 if (! gc) { 631 635 dev_err(&pdev->dev, "Cannot allocate generic irq_chip\n"); 632 - kfree(mvchip->chip.label); 633 636 return -ENOMEM; 634 637 } 635 638 ··· 663 668 irq_remove_generic_chip(gc, IRQ_MSK(ngpios), IRQ_NOREQUEST, 664 669 IRQ_LEVEL | IRQ_NOPROBE); 665 670 kfree(gc); 666 - kfree(mvchip->chip.label); 667 671 return -ENODEV; 668 672 } 669 673
+7 -7
drivers/gpio/gpio-samsung.c
··· 32 32 33 33 #include <mach/hardware.h> 34 34 #include <mach/map.h> 35 - #include <mach/regs-clock.h> 36 35 #include <mach/regs-gpio.h> 37 36 38 37 #include <plat/cpu.h> ··· 445 446 }; 446 447 #endif 447 448 448 - #if defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_ARCH_EXYNOS5) 449 + #if defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_SOC_EXYNOS5250) 449 450 static struct samsung_gpio_cfg exynos_gpio_cfg = { 450 451 .set_pull = exynos_gpio_setpull, 451 452 .get_pull = exynos_gpio_getpull, ··· 2445 2446 }; 2446 2447 #endif 2447 2448 2448 - #ifdef CONFIG_ARCH_EXYNOS5 2449 + #ifdef CONFIG_SOC_EXYNOS5250 2449 2450 static struct samsung_gpio_chip exynos5_gpios_1[] = { 2450 2451 { 2451 2452 .chip = { ··· 2613 2614 }; 2614 2615 #endif 2615 2616 2616 - #ifdef CONFIG_ARCH_EXYNOS5 2617 + #ifdef CONFIG_SOC_EXYNOS5250 2617 2618 static struct samsung_gpio_chip exynos5_gpios_2[] = { 2618 2619 { 2619 2620 .chip = { ··· 2674 2675 }; 2675 2676 #endif 2676 2677 2677 - #ifdef CONFIG_ARCH_EXYNOS5 2678 + #ifdef CONFIG_SOC_EXYNOS5250 2678 2679 static struct samsung_gpio_chip exynos5_gpios_3[] = { 2679 2680 { 2680 2681 .chip = { ··· 2710 2711 }; 2711 2712 #endif 2712 2713 2713 - #ifdef CONFIG_ARCH_EXYNOS5 2714 + #ifdef CONFIG_SOC_EXYNOS5250 2714 2715 static struct samsung_gpio_chip exynos5_gpios_4[] = { 2715 2716 { 2716 2717 .chip = { ··· 3009 3010 int i, nr_chips; 3010 3011 int group = 0; 3011 3012 3012 - #ifdef CONFIG_PINCTRL_SAMSUNG 3013 + #if defined(CONFIG_PINCTRL_EXYNOS) || defined(CONFIG_PINCTRL_EXYNOS5440) 3013 3014 /* 3014 3015 * This gpio driver includes support for device tree support and there 3015 3016 * are platforms using it. In order to maintain compatibility with those ··· 3025 3026 static const struct of_device_id exynos_pinctrl_ids[] = { 3026 3027 { .compatible = "samsung,pinctrl-exynos4210", }, 3027 3028 { .compatible = "samsung,pinctrl-exynos4x12", }, 3029 + { .compatible = "samsung,pinctrl-exynos5440", }, 3028 3030 }; 3029 3031 for_each_matching_node(pctrl_np, exynos_pinctrl_ids) 3030 3032 if (pctrl_np && of_device_is_available(pctrl_np))
+3
drivers/gpu/drm/i915/i915_debugfs.c
··· 641 641 seq_printf(m, "%s command stream:\n", ring_str(ring)); 642 642 seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]); 643 643 seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); 644 + seq_printf(m, " CTL: 0x%08x\n", error->ctl[ring]); 644 645 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]); 645 646 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); 646 647 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); ··· 694 693 seq_printf(m, "EIR: 0x%08x\n", error->eir); 695 694 seq_printf(m, "IER: 0x%08x\n", error->ier); 696 695 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); 696 + seq_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake); 697 + seq_printf(m, "DERRMR: 0x%08x\n", error->derrmr); 697 698 seq_printf(m, "CCID: 0x%08x\n", error->ccid); 698 699 699 700 for (i = 0; i < dev_priv->num_fence_regs; i++)
+3
drivers/gpu/drm/i915/i915_drv.h
··· 188 188 u32 pgtbl_er; 189 189 u32 ier; 190 190 u32 ccid; 191 + u32 derrmr; 192 + u32 forcewake; 191 193 bool waiting[I915_NUM_RINGS]; 192 194 u32 pipestat[I915_MAX_PIPES]; 193 195 u32 tail[I915_NUM_RINGS]; 194 196 u32 head[I915_NUM_RINGS]; 197 + u32 ctl[I915_NUM_RINGS]; 195 198 u32 ipeir[I915_NUM_RINGS]; 196 199 u32 ipehr[I915_NUM_RINGS]; 197 200 u32 instdone[I915_NUM_RINGS];
+21
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 539 539 total = 0; 540 540 for (i = 0; i < count; i++) { 541 541 struct drm_i915_gem_relocation_entry __user *user_relocs; 542 + u64 invalid_offset = (u64)-1; 543 + int j; 542 544 543 545 user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr; 544 546 ··· 549 547 ret = -EFAULT; 550 548 mutex_lock(&dev->struct_mutex); 551 549 goto err; 550 + } 551 + 552 + /* As we do not update the known relocation offsets after 553 + * relocating (due to the complexities in lock handling), 554 + * we need to mark them as invalid now so that we force the 555 + * relocation processing next time. Just in case the target 556 + * object is evicted and then rebound into its old 557 + * presumed_offset before the next execbuffer - if that 558 + * happened we would make the mistake of assuming that the 559 + * relocations were valid. 560 + */ 561 + for (j = 0; j < exec[i].relocation_count; j++) { 562 + if (copy_to_user(&user_relocs[j].presumed_offset, 563 + &invalid_offset, 564 + sizeof(invalid_offset))) { 565 + ret = -EFAULT; 566 + mutex_lock(&dev->struct_mutex); 567 + goto err; 568 + } 552 569 } 553 570 554 571 reloc_offset[i] = total;
+11
drivers/gpu/drm/i915/i915_irq.c
··· 1157 1157 error->acthd[ring->id] = intel_ring_get_active_head(ring); 1158 1158 error->head[ring->id] = I915_READ_HEAD(ring); 1159 1159 error->tail[ring->id] = I915_READ_TAIL(ring); 1160 + error->ctl[ring->id] = I915_READ_CTL(ring); 1160 1161 1161 1162 error->cpu_ring_head[ring->id] = ring->head; 1162 1163 error->cpu_ring_tail[ring->id] = ring->tail; ··· 1251 1250 error->ier = I915_READ16(IER); 1252 1251 else 1253 1252 error->ier = I915_READ(IER); 1253 + 1254 + if (INTEL_INFO(dev)->gen >= 6) 1255 + error->derrmr = I915_READ(DERRMR); 1256 + 1257 + if (IS_VALLEYVIEW(dev)) 1258 + error->forcewake = I915_READ(FORCEWAKE_VLV); 1259 + else if (INTEL_INFO(dev)->gen >= 7) 1260 + error->forcewake = I915_READ(FORCEWAKE_MT); 1261 + else if (INTEL_INFO(dev)->gen == 6) 1262 + error->forcewake = I915_READ(FORCEWAKE); 1254 1263 1255 1264 for_each_pipe(pipe) 1256 1265 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
+2
drivers/gpu/drm/i915/i915_reg.h
··· 512 512 #define GEN7_ERR_INT 0x44040 513 513 #define ERR_INT_MMIO_UNCLAIMED (1<<13) 514 514 515 + #define DERRMR 0x44050 516 + 515 517 /* GM45+ chicken bits -- debug workaround bits that may be required 516 518 * for various sorts of correct behavior. The top 16 bits of each are 517 519 * the enables for writing to the corresponding low bit.
+32 -15
drivers/gpu/drm/i915/intel_dp.c
··· 2579 2579 2580 2580 static void 2581 2581 intel_dp_init_panel_power_sequencer(struct drm_device *dev, 2582 - struct intel_dp *intel_dp) 2582 + struct intel_dp *intel_dp, 2583 + struct edp_power_seq *out) 2583 2584 { 2584 2585 struct drm_i915_private *dev_priv = dev->dev_private; 2585 2586 struct edp_power_seq cur, vbt, spec, final; ··· 2651 2650 intel_dp->panel_power_cycle_delay = get_delay(t11_t12); 2652 2651 #undef get_delay 2653 2652 2653 + DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", 2654 + intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, 2655 + intel_dp->panel_power_cycle_delay); 2656 + 2657 + DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", 2658 + intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); 2659 + 2660 + if (out) 2661 + *out = final; 2662 + } 2663 + 2664 + static void 2665 + intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, 2666 + struct intel_dp *intel_dp, 2667 + struct edp_power_seq *seq) 2668 + { 2669 + struct drm_i915_private *dev_priv = dev->dev_private; 2670 + u32 pp_on, pp_off, pp_div; 2671 + 2654 2672 /* And finally store the new values in the power sequencer. */ 2655 - pp_on = (final.t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | 2656 - (final.t8 << PANEL_LIGHT_ON_DELAY_SHIFT); 2657 - pp_off = (final.t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | 2658 - (final.t10 << PANEL_POWER_DOWN_DELAY_SHIFT); 2673 + pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | 2674 + (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT); 2675 + pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | 2676 + (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); 2659 2677 /* Compute the divisor for the pp clock, simply match the Bspec 2660 2678 * formula. */ 2661 2679 pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1) 2662 2680 << PP_REFERENCE_DIVIDER_SHIFT; 2663 - pp_div |= (DIV_ROUND_UP(final.t11_t12, 1000) 2681 + pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000) 2664 2682 << PANEL_POWER_CYCLE_DELAY_SHIFT); 2665 2683 2666 2684 /* Haswell doesn't have any port selection bits for the panel ··· 2694 2674 I915_WRITE(PCH_PP_ON_DELAYS, pp_on); 2695 2675 I915_WRITE(PCH_PP_OFF_DELAYS, pp_off); 2696 2676 I915_WRITE(PCH_PP_DIVISOR, pp_div); 2697 - 2698 - 2699 - DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", 2700 - intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, 2701 - intel_dp->panel_power_cycle_delay); 2702 - 2703 - DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", 2704 - intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); 2705 2677 2706 2678 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", 2707 2679 I915_READ(PCH_PP_ON_DELAYS), ··· 2711 2699 struct drm_device *dev = intel_encoder->base.dev; 2712 2700 struct drm_i915_private *dev_priv = dev->dev_private; 2713 2701 struct drm_display_mode *fixed_mode = NULL; 2702 + struct edp_power_seq power_seq = { 0 }; 2714 2703 enum port port = intel_dig_port->port; 2715 2704 const char *name = NULL; 2716 2705 int type; ··· 2784 2771 } 2785 2772 2786 2773 if (is_edp(intel_dp)) 2787 - intel_dp_init_panel_power_sequencer(dev, intel_dp); 2774 + intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); 2788 2775 2789 2776 intel_dp_i2c_init(intel_dp, intel_connector, name); 2790 2777 ··· 2810 2797 intel_dp_destroy(connector); 2811 2798 return; 2812 2799 } 2800 + 2801 + /* We now know it's not a ghost, init power sequence regs. */ 2802 + intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, 2803 + &power_seq); 2813 2804 2814 2805 ironlake_edp_panel_vdd_on(intel_dp); 2815 2806 edid = drm_get_edid(connector, &intel_dp->adapter);
+12 -5
drivers/gpu/drm/i915/intel_pm.c
··· 4250 4250 static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) 4251 4251 { 4252 4252 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff)); 4253 - POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ 4253 + /* something from same cacheline, but !FORCEWAKE_MT */ 4254 + POSTING_READ(ECOBUS); 4254 4255 } 4255 4256 4256 4257 static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) ··· 4268 4267 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); 4269 4268 4270 4269 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 4271 - POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ 4270 + /* something from same cacheline, but !FORCEWAKE_MT */ 4271 + POSTING_READ(ECOBUS); 4272 4272 4273 4273 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), 4274 4274 FORCEWAKE_ACK_TIMEOUT_MS)) ··· 4306 4304 static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) 4307 4305 { 4308 4306 I915_WRITE_NOTRACE(FORCEWAKE, 0); 4309 - /* gen6_gt_check_fifodbg doubles as the POSTING_READ */ 4307 + /* something from same cacheline, but !FORCEWAKE */ 4308 + POSTING_READ(ECOBUS); 4310 4309 gen6_gt_check_fifodbg(dev_priv); 4311 4310 } 4312 4311 4313 4312 static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) 4314 4313 { 4315 4314 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 4316 - /* gen6_gt_check_fifodbg doubles as the POSTING_READ */ 4315 + /* something from same cacheline, but !FORCEWAKE_MT */ 4316 + POSTING_READ(ECOBUS); 4317 4317 gen6_gt_check_fifodbg(dev_priv); 4318 4318 } 4319 4319 ··· 4355 4351 static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) 4356 4352 { 4357 4353 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff)); 4354 + /* something from same cacheline, but !FORCEWAKE_VLV */ 4355 + POSTING_READ(FORCEWAKE_ACK_VLV); 4358 4356 } 4359 4357 4360 4358 static void vlv_force_wake_get(struct drm_i915_private *dev_priv) ··· 4377 4371 static void vlv_force_wake_put(struct drm_i915_private *dev_priv) 4378 4372 { 4379 4373 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 4380 - /* The below doubles as a POSTING_READ */ 4374 + /* something from same cacheline, but !FORCEWAKE_VLV */ 4375 + POSTING_READ(FORCEWAKE_ACK_VLV); 4381 4376 gen6_gt_check_fifodbg(dev_priv); 4382 4377 } 4383 4378
+6
drivers/gpu/drm/radeon/evergreen.c
··· 2401 2401 { 2402 2402 struct evergreen_mc_save save; 2403 2403 2404 + if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 2405 + reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE); 2406 + 2407 + if (RREG32(DMA_STATUS_REG) & DMA_IDLE) 2408 + reset_mask &= ~RADEON_RESET_DMA; 2409 + 2404 2410 if (reset_mask == 0) 2405 2411 return 0; 2406 2412
+6
drivers/gpu/drm/radeon/ni.c
··· 1409 1409 { 1410 1410 struct evergreen_mc_save save; 1411 1411 1412 + if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 1413 + reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE); 1414 + 1415 + if (RREG32(DMA_STATUS_REG) & DMA_IDLE) 1416 + reset_mask &= ~RADEON_RESET_DMA; 1417 + 1412 1418 if (reset_mask == 0) 1413 1419 return 0; 1414 1420
+6
drivers/gpu/drm/radeon/r600.c
··· 1378 1378 { 1379 1379 struct rv515_mc_save save; 1380 1380 1381 + if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 1382 + reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE); 1383 + 1384 + if (RREG32(DMA_STATUS_REG) & DMA_IDLE) 1385 + reset_mask &= ~RADEON_RESET_DMA; 1386 + 1381 1387 if (reset_mask == 0) 1382 1388 return 0; 1383 1389
+2 -1
drivers/gpu/drm/radeon/radeon.h
··· 324 324 struct list_head list; 325 325 /* Protected by tbo.reserved */ 326 326 u32 placements[3]; 327 - u32 busy_placements[3]; 328 327 struct ttm_placement placement; 329 328 struct ttm_buffer_object tbo; 330 329 struct ttm_bo_kmap_obj kmap; ··· 653 654 u32 ptr_reg_mask; 654 655 u32 nop; 655 656 u32 idx; 657 + u64 last_semaphore_signal_addr; 658 + u64 last_semaphore_wait_addr; 656 659 }; 657 660 658 661 /*
+2 -1
drivers/gpu/drm/radeon/radeon_drv.c
··· 69 69 * 2.26.0 - r600-eg: fix htile size computation 70 70 * 2.27.0 - r600-SI: Add CS ioctl support for async DMA 71 71 * 2.28.0 - r600-eg: Add MEM_WRITE packet support 72 + * 2.29.0 - R500 FP16 color clear registers 72 73 */ 73 74 #define KMS_DRIVER_MAJOR 2 74 - #define KMS_DRIVER_MINOR 28 75 + #define KMS_DRIVER_MINOR 29 75 76 #define KMS_DRIVER_PATCHLEVEL 0 76 77 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 77 78 int radeon_driver_unload_kms(struct drm_device *dev);
+10 -8
drivers/gpu/drm/radeon/radeon_object.c
··· 84 84 rbo->placement.fpfn = 0; 85 85 rbo->placement.lpfn = 0; 86 86 rbo->placement.placement = rbo->placements; 87 + rbo->placement.busy_placement = rbo->placements; 87 88 if (domain & RADEON_GEM_DOMAIN_VRAM) 88 89 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | 89 90 TTM_PL_FLAG_VRAM; ··· 105 104 if (!c) 106 105 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 107 106 rbo->placement.num_placement = c; 108 - 109 - c = 0; 110 - rbo->placement.busy_placement = rbo->busy_placements; 111 - if (rbo->rdev->flags & RADEON_IS_AGP) { 112 - rbo->busy_placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT; 113 - } else { 114 - rbo->busy_placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; 115 - } 116 107 rbo->placement.num_busy_placement = c; 117 108 } 118 109 ··· 350 357 { 351 358 struct radeon_bo_list *lobj; 352 359 struct radeon_bo *bo; 360 + u32 domain; 353 361 int r; 354 362 355 363 r = ttm_eu_reserve_buffers(head); ··· 360 366 list_for_each_entry(lobj, head, tv.head) { 361 367 bo = lobj->bo; 362 368 if (!bo->pin_count) { 369 + domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain; 370 + 371 + retry: 372 + radeon_ttm_placement_from_domain(bo, domain); 363 373 r = ttm_bo_validate(&bo->tbo, &bo->placement, 364 374 true, false); 365 375 if (unlikely(r)) { 376 + if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) { 377 + domain |= RADEON_GEM_DOMAIN_GTT; 378 + goto retry; 379 + } 366 380 return r; 367 381 } 368 382 }
+2
drivers/gpu/drm/radeon/radeon_ring.c
··· 784 784 } 785 785 seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr); 786 786 seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr); 787 + seq_printf(m, "last semaphore signal addr : 0x%016llx\n", ring->last_semaphore_signal_addr); 788 + seq_printf(m, "last semaphore wait addr : 0x%016llx\n", ring->last_semaphore_wait_addr); 787 789 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); 788 790 seq_printf(m, "%u dwords in ring\n", count); 789 791 /* print 8 dw before current rptr as often it's the last executed
+4
drivers/gpu/drm/radeon/radeon_semaphore.c
··· 95 95 /* we assume caller has already allocated space on waiters ring */ 96 96 radeon_semaphore_emit_wait(rdev, waiter, semaphore); 97 97 98 + /* for debugging lockup only, used by sysfs debug files */ 99 + rdev->ring[signaler].last_semaphore_signal_addr = semaphore->gpu_addr; 100 + rdev->ring[waiter].last_semaphore_wait_addr = semaphore->gpu_addr; 101 + 98 102 return 0; 99 103 } 100 104
+2
drivers/gpu/drm/radeon/reg_srcs/rv515
··· 324 324 0x46AC US_OUT_FMT_2 325 325 0x46B0 US_OUT_FMT_3 326 326 0x46B4 US_W_FMT 327 + 0x46C0 RB3D_COLOR_CLEAR_VALUE_AR 328 + 0x46C4 RB3D_COLOR_CLEAR_VALUE_GB 327 329 0x4BC0 FG_FOG_BLEND 328 330 0x4BC4 FG_FOG_FACTOR 329 331 0x4BC8 FG_FOG_COLOR_R
+6
drivers/gpu/drm/radeon/si.c
··· 2215 2215 { 2216 2216 struct evergreen_mc_save save; 2217 2217 2218 + if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 2219 + reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE); 2220 + 2221 + if (RREG32(DMA_STATUS_REG) & DMA_IDLE) 2222 + reset_mask &= ~RADEON_RESET_DMA; 2223 + 2218 2224 if (reset_mask == 0) 2219 2225 return 0; 2220 2226
+1
drivers/gpu/drm/ttm/ttm_bo.c
··· 434 434 bo->mem = tmp_mem; 435 435 bdev->driver->move_notify(bo, mem); 436 436 bo->mem = *mem; 437 + *mem = tmp_mem; 437 438 } 438 439 439 440 goto out_err;
+9 -2
drivers/gpu/drm/ttm/ttm_bo_util.c
··· 344 344 345 345 if (ttm->state == tt_unpopulated) { 346 346 ret = ttm->bdev->driver->ttm_tt_populate(ttm); 347 - if (ret) 347 + if (ret) { 348 + /* if we fail here don't nuke the mm node 349 + * as the bo still owns it */ 350 + old_copy.mm_node = NULL; 348 351 goto out1; 352 + } 349 353 } 350 354 351 355 add = 0; ··· 375 371 prot); 376 372 } else 377 373 ret = ttm_copy_io_page(new_iomap, old_iomap, page); 378 - if (ret) 374 + if (ret) { 375 + /* failing here, means keep old copy as-is */ 376 + old_copy.mm_node = NULL; 379 377 goto out1; 378 + } 380 379 } 381 380 mb(); 382 381 out2:
+21 -14
drivers/hv/hv_balloon.c
··· 403 403 */ 404 404 405 405 struct dm_info_msg { 406 - struct dm_info_header header; 406 + struct dm_header hdr; 407 407 __u32 reserved; 408 408 __u32 info_size; 409 409 __u8 info[]; ··· 503 503 504 504 static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg) 505 505 { 506 - switch (msg->header.type) { 506 + struct dm_info_header *info_hdr; 507 + 508 + info_hdr = (struct dm_info_header *)msg->info; 509 + 510 + switch (info_hdr->type) { 507 511 case INFO_TYPE_MAX_PAGE_CNT: 508 512 pr_info("Received INFO_TYPE_MAX_PAGE_CNT\n"); 509 - pr_info("Data Size is %d\n", msg->header.data_size); 513 + pr_info("Data Size is %d\n", info_hdr->data_size); 510 514 break; 511 515 default: 512 - pr_info("Received Unknown type: %d\n", msg->header.type); 516 + pr_info("Received Unknown type: %d\n", info_hdr->type); 513 517 } 514 518 } 515 519 ··· 883 879 balloon_onchannelcallback, dev); 884 880 885 881 if (ret) 886 - return ret; 882 + goto probe_error0; 887 883 888 884 dm_device.dev = dev; 889 885 dm_device.state = DM_INITIALIZING; ··· 895 891 kthread_run(dm_thread_func, &dm_device, "hv_balloon"); 896 892 if (IS_ERR(dm_device.thread)) { 897 893 ret = PTR_ERR(dm_device.thread); 898 - goto probe_error0; 894 + goto probe_error1; 899 895 } 900 896 901 897 hv_set_drvdata(dev, &dm_device); ··· 918 914 VM_PKT_DATA_INBAND, 919 915 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 920 916 if (ret) 921 - goto probe_error1; 917 + goto probe_error2; 922 918 923 919 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ); 924 920 if (t == 0) { 925 921 ret = -ETIMEDOUT; 926 - goto probe_error1; 922 + goto probe_error2; 927 923 } 928 924 929 925 /* ··· 932 928 */ 933 929 if (dm_device.state == DM_INIT_ERROR) { 934 930 ret = -ETIMEDOUT; 935 - goto probe_error1; 931 + goto probe_error2; 936 932 } 937 933 /* 938 934 * Now submit our capabilities to the host. ··· 965 961 VM_PKT_DATA_INBAND, 966 962 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 967 963 if (ret) 968 - goto probe_error1; 964 + goto probe_error2; 969 965 970 966 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ); 971 967 if (t == 0) { 972 968 ret = -ETIMEDOUT; 973 - goto probe_error1; 969 + goto probe_error2; 974 970 } 975 971 976 972 /* ··· 979 975 */ 980 976 if (dm_device.state == DM_INIT_ERROR) { 981 977 ret = -ETIMEDOUT; 982 - goto probe_error1; 978 + goto probe_error2; 983 979 } 984 980 985 981 dm_device.state = DM_INITIALIZED; 986 982 987 983 return 0; 988 984 989 - probe_error1: 985 + probe_error2: 990 986 kthread_stop(dm_device.thread); 991 987 992 - probe_error0: 988 + probe_error1: 993 989 vmbus_close(dev->channel); 990 + probe_error0: 991 + kfree(send_buffer); 994 992 return ret; 995 993 } 996 994 ··· 1005 999 1006 1000 vmbus_close(dev->channel); 1007 1001 kthread_stop(dm->thread); 1002 + kfree(send_buffer); 1008 1003 1009 1004 return 0; 1010 1005 }
+4
drivers/i2c/busses/i2c-designware-core.c
··· 34 34 #include <linux/io.h> 35 35 #include <linux/pm_runtime.h> 36 36 #include <linux/delay.h> 37 + #include <linux/module.h> 37 38 #include "i2c-designware-core.h" 38 39 39 40 /* ··· 726 725 return dw_readl(dev, DW_IC_COMP_PARAM_1); 727 726 } 728 727 EXPORT_SYMBOL_GPL(i2c_dw_read_comp_param); 728 + 729 + MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter core"); 730 + MODULE_LICENSE("GPL");
+4 -2
drivers/i2c/busses/i2c-mxs.c
··· 127 127 struct device *dev; 128 128 void __iomem *regs; 129 129 struct completion cmd_complete; 130 - u32 cmd_err; 130 + int cmd_err; 131 131 struct i2c_adapter adapter; 132 132 const struct mxs_i2c_speed_config *speed; 133 133 ··· 316 316 if (msg->len == 0) 317 317 return -EINVAL; 318 318 319 - init_completion(&i2c->cmd_complete); 319 + INIT_COMPLETION(i2c->cmd_complete); 320 320 i2c->cmd_err = 0; 321 321 322 322 ret = mxs_i2c_dma_setup_xfer(adap, msg, flags); ··· 472 472 473 473 i2c->dev = dev; 474 474 i2c->speed = &mxs_i2c_95kHz_config; 475 + 476 + init_completion(&i2c->cmd_complete); 475 477 476 478 if (dev->of_node) { 477 479 err = mxs_i2c_get_ofdata(i2c);
+3 -3
drivers/i2c/busses/i2c-omap.c
··· 803 803 if (stat & OMAP_I2C_STAT_AL) { 804 804 dev_err(dev->dev, "Arbitration lost\n"); 805 805 dev->cmd_err |= OMAP_I2C_STAT_AL; 806 - omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK); 806 + omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL); 807 807 } 808 808 809 809 return -EIO; ··· 963 963 i2c_omap_errata_i207(dev, stat); 964 964 965 965 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR); 966 - break; 966 + continue; 967 967 } 968 968 969 969 if (stat & OMAP_I2C_STAT_RRDY) { ··· 989 989 break; 990 990 991 991 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_XDR); 992 - break; 992 + continue; 993 993 } 994 994 995 995 if (stat & OMAP_I2C_STAT_XRDY) {
+4
drivers/i2c/busses/i2c-sirf.c
··· 12 12 #include <linux/slab.h> 13 13 #include <linux/platform_device.h> 14 14 #include <linux/i2c.h> 15 + #include <linux/of_i2c.h> 15 16 #include <linux/clk.h> 16 17 #include <linux/err.h> 17 18 #include <linux/io.h> ··· 329 328 adap->algo = &i2c_sirfsoc_algo; 330 329 adap->algo_data = siic; 331 330 331 + adap->dev.of_node = pdev->dev.of_node; 332 332 adap->dev.parent = &pdev->dev; 333 333 adap->nr = pdev->id; 334 334 ··· 372 370 } 373 371 374 372 clk_disable(clk); 373 + 374 + of_i2c_register_devices(adap); 375 375 376 376 dev_info(&pdev->dev, " I2C adapter ready to operate\n"); 377 377
+1 -1
drivers/i2c/muxes/i2c-mux-pinctrl.c
··· 167 167 } 168 168 169 169 mux->busses = devm_kzalloc(&pdev->dev, 170 - sizeof(mux->busses) * mux->pdata->bus_count, 170 + sizeof(*mux->busses) * mux->pdata->bus_count, 171 171 GFP_KERNEL); 172 172 if (!mux->busses) { 173 173 dev_err(&pdev->dev, "Cannot allocate busses\n");
+1 -2
drivers/idle/intel_idle.c
··· 448 448 else 449 449 on_each_cpu(__setup_broadcast_timer, (void *)true, 1); 450 450 451 - register_cpu_notifier(&cpu_hotplug_notifier); 452 - 453 451 pr_debug(PREFIX "v" INTEL_IDLE_VERSION 454 452 " model 0x%X\n", boot_cpu_data.x86_model); 455 453 ··· 610 612 return retval; 611 613 } 612 614 } 615 + register_cpu_notifier(&cpu_hotplug_notifier); 613 616 614 617 return 0; 615 618 }
+1 -1
drivers/media/i2c/m5mols/m5mols_core.c
··· 556 556 mutex_lock(&info->lock); 557 557 558 558 format = __find_format(info, fh, fmt->which, info->res_type); 559 - if (!format) 559 + if (format) 560 560 fmt->format = *format; 561 561 else 562 562 ret = -EINVAL;
+1 -1
drivers/media/platform/coda.c
··· 23 23 #include <linux/slab.h> 24 24 #include <linux/videodev2.h> 25 25 #include <linux/of.h> 26 + #include <linux/platform_data/imx-iram.h> 26 27 27 - #include <mach/iram.h> 28 28 #include <media/v4l2-ctrls.h> 29 29 #include <media/v4l2-device.h> 30 30 #include <media/v4l2-ioctl.h>
-3
drivers/media/platform/omap3isp/ispvideo.c
··· 35 35 #include <linux/vmalloc.h> 36 36 #include <media/v4l2-dev.h> 37 37 #include <media/v4l2-ioctl.h> 38 - #include <plat/iommu.h> 39 - #include <plat/iovmm.h> 40 - #include <plat/omap-pm.h> 41 38 42 39 #include "ispvideo.h" 43 40 #include "isp.h"
+1 -1
drivers/media/platform/s5p-fimc/fimc-mdevice.c
··· 593 593 { 594 594 struct media_entity *source, *sink; 595 595 unsigned int flags = MEDIA_LNK_FL_ENABLED; 596 - int i, ret; 596 + int i, ret = 0; 597 597 598 598 for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) { 599 599 struct fimc_lite *fimc = fmd->fimc_lite[i];
+37 -51
drivers/media/platform/s5p-mfc/s5p_mfc.c
··· 412 412 } 413 413 414 414 /* Error handling for interrupt */ 415 - static void s5p_mfc_handle_error(struct s5p_mfc_ctx *ctx, 416 - unsigned int reason, unsigned int err) 415 + static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev, 416 + struct s5p_mfc_ctx *ctx, unsigned int reason, unsigned int err) 417 417 { 418 - struct s5p_mfc_dev *dev; 419 418 unsigned long flags; 420 419 421 - /* If no context is available then all necessary 422 - * processing has been done. */ 423 - if (ctx == NULL) 424 - return; 425 - 426 - dev = ctx->dev; 427 420 mfc_err("Interrupt Error: %08x\n", err); 428 - s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); 429 - wake_up_dev(dev, reason, err); 430 421 431 - /* Error recovery is dependent on the state of context */ 432 - switch (ctx->state) { 433 - case MFCINST_INIT: 434 - /* This error had to happen while acquireing instance */ 435 - case MFCINST_GOT_INST: 436 - /* This error had to happen while parsing the header */ 437 - case MFCINST_HEAD_PARSED: 438 - /* This error had to happen while setting dst buffers */ 439 - case MFCINST_RETURN_INST: 440 - /* This error had to happen while releasing instance */ 441 - clear_work_bit(ctx); 442 - wake_up_ctx(ctx, reason, err); 443 - if (test_and_clear_bit(0, &dev->hw_lock) == 0) 444 - BUG(); 445 - s5p_mfc_clock_off(); 446 - ctx->state = MFCINST_ERROR; 447 - break; 448 - case MFCINST_FINISHING: 449 - case MFCINST_FINISHED: 450 - case MFCINST_RUNNING: 451 - /* It is higly probable that an error occured 452 - * while decoding a frame */ 453 - clear_work_bit(ctx); 454 - ctx->state = MFCINST_ERROR; 455 - /* Mark all dst buffers as having an error */ 456 - spin_lock_irqsave(&dev->irqlock, flags); 457 - s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->dst_queue, 458 - &ctx->vq_dst); 459 - /* Mark all src buffers as having an error */ 460 - s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->src_queue, 461 - &ctx->vq_src); 462 - spin_unlock_irqrestore(&dev->irqlock, flags); 463 - if (test_and_clear_bit(0, &dev->hw_lock) == 0) 464 - BUG(); 465 - s5p_mfc_clock_off(); 466 - break; 467 - default: 468 - mfc_err("Encountered an error interrupt which had not been handled\n"); 469 - break; 422 + if (ctx != NULL) { 423 + /* Error recovery is dependent on the state of context */ 424 + switch (ctx->state) { 425 + case MFCINST_RES_CHANGE_INIT: 426 + case MFCINST_RES_CHANGE_FLUSH: 427 + case MFCINST_RES_CHANGE_END: 428 + case MFCINST_FINISHING: 429 + case MFCINST_FINISHED: 430 + case MFCINST_RUNNING: 431 + /* It is higly probable that an error occured 432 + * while decoding a frame */ 433 + clear_work_bit(ctx); 434 + ctx->state = MFCINST_ERROR; 435 + /* Mark all dst buffers as having an error */ 436 + spin_lock_irqsave(&dev->irqlock, flags); 437 + s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, 438 + &ctx->dst_queue, &ctx->vq_dst); 439 + /* Mark all src buffers as having an error */ 440 + s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, 441 + &ctx->src_queue, &ctx->vq_src); 442 + spin_unlock_irqrestore(&dev->irqlock, flags); 443 + wake_up_ctx(ctx, reason, err); 444 + break; 445 + default: 446 + clear_work_bit(ctx); 447 + ctx->state = MFCINST_ERROR; 448 + wake_up_ctx(ctx, reason, err); 449 + break; 450 + } 470 451 } 452 + if (test_and_clear_bit(0, &dev->hw_lock) == 0) 453 + BUG(); 454 + s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); 455 + s5p_mfc_clock_off(); 456 + wake_up_dev(dev, reason, err); 471 457 return; 472 458 } 473 459 ··· 618 632 dev->warn_start) 619 633 s5p_mfc_handle_frame(ctx, reason, err); 620 634 else 621 - s5p_mfc_handle_error(ctx, reason, err); 635 + s5p_mfc_handle_error(dev, ctx, reason, err); 622 636 clear_bit(0, &dev->enter_suspend); 623 637 break; 624 638
+1
drivers/media/usb/gspca/kinect.c
··· 381 381 /* -- module initialisation -- */ 382 382 static const struct usb_device_id device_table[] = { 383 383 {USB_DEVICE(0x045e, 0x02ae)}, 384 + {USB_DEVICE(0x045e, 0x02bf)}, 384 385 {} 385 386 }; 386 387
+8 -5
drivers/media/usb/gspca/sonixb.c
··· 496 496 } 497 497 } 498 498 499 - static void i2c_w(struct gspca_dev *gspca_dev, const __u8 *buffer) 499 + static void i2c_w(struct gspca_dev *gspca_dev, const u8 *buf) 500 500 { 501 501 int retry = 60; 502 502 ··· 504 504 return; 505 505 506 506 /* is i2c ready */ 507 - reg_w(gspca_dev, 0x08, buffer, 8); 507 + reg_w(gspca_dev, 0x08, buf, 8); 508 508 while (retry--) { 509 509 if (gspca_dev->usb_err < 0) 510 510 return; 511 - msleep(10); 511 + msleep(1); 512 512 reg_r(gspca_dev, 0x08); 513 513 if (gspca_dev->usb_buf[0] & 0x04) { 514 514 if (gspca_dev->usb_buf[0] & 0x08) { 515 515 dev_err(gspca_dev->v4l2_dev.dev, 516 - "i2c write error\n"); 516 + "i2c error writing %02x %02x %02x %02x" 517 + " %02x %02x %02x %02x\n", 518 + buf[0], buf[1], buf[2], buf[3], 519 + buf[4], buf[5], buf[6], buf[7]); 517 520 gspca_dev->usb_err = -EIO; 518 521 } 519 522 return; ··· 533 530 for (;;) { 534 531 if (gspca_dev->usb_err < 0) 535 532 return; 536 - reg_w(gspca_dev, 0x08, *buffer, 8); 533 + i2c_w(gspca_dev, *buffer); 537 534 len -= 8; 538 535 if (len <= 0) 539 536 break;
+1
drivers/media/usb/gspca/sonixj.c
··· 1550 1550 0, 1551 1551 gspca_dev->usb_buf, 8, 1552 1552 500); 1553 + msleep(2); 1553 1554 if (ret < 0) { 1554 1555 pr_err("i2c_w1 err %d\n", ret); 1555 1556 gspca_dev->usb_err = ret;
+3 -1
drivers/media/usb/uvc/uvc_ctrl.c
··· 1431 1431 int ret; 1432 1432 1433 1433 ctrl = uvc_find_control(chain, xctrl->id, &mapping); 1434 - if (ctrl == NULL || (ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR) == 0) 1434 + if (ctrl == NULL) 1435 1435 return -EINVAL; 1436 + if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR)) 1437 + return -EACCES; 1436 1438 1437 1439 /* Clamp out of range values. */ 1438 1440 switch (mapping->v4l2_type) {
+2 -4
drivers/media/usb/uvc/uvc_v4l2.c
··· 657 657 ret = uvc_ctrl_get(chain, ctrl); 658 658 if (ret < 0) { 659 659 uvc_ctrl_rollback(handle); 660 - ctrls->error_idx = ret == -ENOENT 661 - ? ctrls->count : i; 660 + ctrls->error_idx = i; 662 661 return ret; 663 662 } 664 663 } ··· 685 686 ret = uvc_ctrl_set(chain, ctrl); 686 687 if (ret < 0) { 687 688 uvc_ctrl_rollback(handle); 688 - ctrls->error_idx = (ret == -ENOENT && 689 - cmd == VIDIOC_S_EXT_CTRLS) 689 + ctrls->error_idx = cmd == VIDIOC_S_EXT_CTRLS 690 690 ? ctrls->count : i; 691 691 return ret; 692 692 }
+3 -1
drivers/media/v4l2-core/videobuf2-core.c
··· 921 921 * In videobuf we use our internal V4l2_planes struct for 922 922 * single-planar buffers as well, for simplicity. 923 923 */ 924 - if (V4L2_TYPE_IS_OUTPUT(b->type)) 924 + if (V4L2_TYPE_IS_OUTPUT(b->type)) { 925 925 v4l2_planes[0].bytesused = b->bytesused; 926 + v4l2_planes[0].data_offset = 0; 927 + } 926 928 927 929 if (b->memory == V4L2_MEMORY_USERPTR) { 928 930 v4l2_planes[0].m.userptr = b->m.userptr;
+20 -12
drivers/mfd/vexpress-sysreg.c
··· 313 313 } 314 314 315 315 316 - void __init vexpress_sysreg_early_init(void __iomem *base) 316 + void __init vexpress_sysreg_setup(struct device_node *node) 317 317 { 318 - struct device_node *node = of_find_compatible_node(NULL, NULL, 319 - "arm,vexpress-sysreg"); 320 - 321 - if (node) 322 - base = of_iomap(node, 0); 323 - 324 - if (WARN_ON(!base)) 318 + if (WARN_ON(!vexpress_sysreg_base)) 325 319 return; 326 - 327 - vexpress_sysreg_base = base; 328 320 329 321 if (readl(vexpress_sysreg_base + SYS_MISC) & SYS_MISC_MASTERSITE) 330 322 vexpress_master_site = VEXPRESS_SITE_DB2; ··· 328 336 WARN_ON(!vexpress_sysreg_config_bridge); 329 337 } 330 338 339 + void __init vexpress_sysreg_early_init(void __iomem *base) 340 + { 341 + vexpress_sysreg_base = base; 342 + vexpress_sysreg_setup(NULL); 343 + } 344 + 331 345 void __init vexpress_sysreg_of_early_init(void) 332 346 { 333 - vexpress_sysreg_early_init(NULL); 347 + struct device_node *node = of_find_compatible_node(NULL, NULL, 348 + "arm,vexpress-sysreg"); 349 + 350 + if (node) { 351 + vexpress_sysreg_base = of_iomap(node, 0); 352 + vexpress_sysreg_setup(node); 353 + } else { 354 + pr_info("vexpress-sysreg: No Device Tree node found."); 355 + } 334 356 } 335 357 336 358 ··· 432 426 return -EBUSY; 433 427 } 434 428 435 - if (!vexpress_sysreg_base) 429 + if (!vexpress_sysreg_base) { 436 430 vexpress_sysreg_base = devm_ioremap(&pdev->dev, res->start, 437 431 resource_size(res)); 432 + vexpress_sysreg_setup(pdev->dev.of_node); 433 + } 438 434 439 435 if (!vexpress_sysreg_base) { 440 436 dev_err(&pdev->dev, "Failed to obtain base address!\n");
+36 -1
drivers/misc/ti-st/st_kim.c
··· 468 468 if (pdata->chip_enable) 469 469 pdata->chip_enable(kim_gdata); 470 470 471 + /* Configure BT nShutdown to HIGH state */ 472 + gpio_set_value(kim_gdata->nshutdown, GPIO_LOW); 473 + mdelay(5); /* FIXME: a proper toggle */ 474 + gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH); 475 + mdelay(100); 471 476 /* re-initialize the completion */ 472 477 INIT_COMPLETION(kim_gdata->ldisc_installed); 473 478 /* send notification to UIM */ ··· 514 509 * (b) upon failure to either install ldisc or download firmware. 515 510 * The function is responsible to (a) notify UIM about un-installation, 516 511 * (b) flush UART if the ldisc was installed. 517 - * (c) invoke platform's chip disabling routine. 512 + * (c) reset BT_EN - pull down nshutdown at the end. 513 + * (d) invoke platform's chip disabling routine. 518 514 */ 519 515 long st_kim_stop(void *kim_data) 520 516 { ··· 546 540 pr_err(" timed out waiting for ldisc to be un-installed"); 547 541 err = -ETIMEDOUT; 548 542 } 543 + 544 + /* By default configure BT nShutdown to LOW state */ 545 + gpio_set_value(kim_gdata->nshutdown, GPIO_LOW); 546 + mdelay(1); 547 + gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH); 548 + mdelay(1); 549 + gpio_set_value(kim_gdata->nshutdown, GPIO_LOW); 549 550 550 551 /* platform specific disable */ 551 552 if (pdata->chip_disable) ··· 746 733 /* refer to itself */ 747 734 kim_gdata->core_data->kim_data = kim_gdata; 748 735 736 + /* Claim the chip enable nShutdown gpio from the system */ 737 + kim_gdata->nshutdown = pdata->nshutdown_gpio; 738 + err = gpio_request(kim_gdata->nshutdown, "kim"); 739 + if (unlikely(err)) { 740 + pr_err(" gpio %ld request failed ", kim_gdata->nshutdown); 741 + return err; 742 + } 743 + 744 + /* Configure nShutdown GPIO as output=0 */ 745 + err = gpio_direction_output(kim_gdata->nshutdown, 0); 746 + if (unlikely(err)) { 747 + pr_err(" unable to configure gpio %ld", kim_gdata->nshutdown); 748 + return err; 749 + } 749 750 /* get reference of pdev for request_firmware 750 751 */ 751 752 kim_gdata->kim_pdev = pdev; ··· 806 779 807 780 static int kim_remove(struct platform_device *pdev) 808 781 { 782 + /* free the GPIOs requested */ 783 + struct ti_st_plat_data *pdata = pdev->dev.platform_data; 809 784 struct kim_data_s *kim_gdata; 810 785 811 786 kim_gdata = dev_get_drvdata(&pdev->dev); 787 + 788 + /* Free the Bluetooth/FM/GPIO 789 + * nShutdown gpio from the system 790 + */ 791 + gpio_free(pdata->nshutdown_gpio); 792 + pr_info("nshutdown GPIO Freed"); 812 793 813 794 debugfs_remove_recursive(kim_debugfs_dir); 814 795 sysfs_remove_group(&pdev->dev.kobj, &uim_attr_grp);
+30 -62
drivers/mmc/host/mvsdio.c
··· 50 50 struct timer_list timer; 51 51 struct mmc_host *mmc; 52 52 struct device *dev; 53 - struct resource *res; 54 - int irq; 55 53 struct clk *clk; 56 54 int gpio_card_detect; 57 55 int gpio_write_protect; ··· 716 718 if (!r || irq < 0 || !mvsd_data) 717 719 return -ENXIO; 718 720 719 - r = request_mem_region(r->start, SZ_1K, DRIVER_NAME); 720 - if (!r) 721 - return -EBUSY; 722 - 723 721 mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev); 724 722 if (!mmc) { 725 723 ret = -ENOMEM; ··· 725 731 host = mmc_priv(mmc); 726 732 host->mmc = mmc; 727 733 host->dev = &pdev->dev; 728 - host->res = r; 729 734 host->base_clock = mvsd_data->clock / 2; 735 + host->clk = ERR_PTR(-EINVAL); 730 736 731 737 mmc->ops = &mvsd_ops; 732 738 ··· 746 752 747 753 spin_lock_init(&host->lock); 748 754 749 - host->base = ioremap(r->start, SZ_4K); 755 + host->base = devm_request_and_ioremap(&pdev->dev, r); 750 756 if (!host->base) { 751 757 ret = -ENOMEM; 752 758 goto out; ··· 759 765 760 766 mvsd_power_down(host); 761 767 762 - ret = request_irq(irq, mvsd_irq, 0, DRIVER_NAME, host); 768 + ret = devm_request_irq(&pdev->dev, irq, mvsd_irq, 0, DRIVER_NAME, host); 763 769 if (ret) { 764 770 pr_err("%s: cannot assign irq %d\n", DRIVER_NAME, irq); 765 771 goto out; 766 - } else 767 - host->irq = irq; 772 + } 768 773 769 774 /* Not all platforms can gate the clock, so it is not 770 775 an error if the clock does not exists. */ 771 - host->clk = clk_get(&pdev->dev, NULL); 772 - if (!IS_ERR(host->clk)) { 776 + host->clk = devm_clk_get(&pdev->dev, NULL); 777 + if (!IS_ERR(host->clk)) 773 778 clk_prepare_enable(host->clk); 774 - } 775 779 776 780 if (mvsd_data->gpio_card_detect) { 777 - ret = gpio_request(mvsd_data->gpio_card_detect, 778 - DRIVER_NAME " cd"); 781 + ret = devm_gpio_request_one(&pdev->dev, 782 + mvsd_data->gpio_card_detect, 783 + GPIOF_IN, DRIVER_NAME " cd"); 779 784 if (ret == 0) { 780 - gpio_direction_input(mvsd_data->gpio_card_detect); 781 785 irq = gpio_to_irq(mvsd_data->gpio_card_detect); 782 - ret = request_irq(irq, mvsd_card_detect_irq, 783 - IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING, 784 - DRIVER_NAME " cd", host); 786 + ret = devm_request_irq(&pdev->dev, irq, 787 + mvsd_card_detect_irq, 788 + IRQ_TYPE_EDGE_RISING | 789 + IRQ_TYPE_EDGE_FALLING, 790 + DRIVER_NAME " cd", host); 785 791 if (ret == 0) 786 792 host->gpio_card_detect = 787 793 mvsd_data->gpio_card_detect; 788 794 else 789 - gpio_free(mvsd_data->gpio_card_detect); 795 + devm_gpio_free(&pdev->dev, 796 + mvsd_data->gpio_card_detect); 790 797 } 791 798 } 792 799 if (!host->gpio_card_detect) 793 800 mmc->caps |= MMC_CAP_NEEDS_POLL; 794 801 795 802 if (mvsd_data->gpio_write_protect) { 796 - ret = gpio_request(mvsd_data->gpio_write_protect, 797 - DRIVER_NAME " wp"); 803 + ret = devm_gpio_request_one(&pdev->dev, 804 + mvsd_data->gpio_write_protect, 805 + GPIOF_IN, DRIVER_NAME " wp"); 798 806 if (ret == 0) { 799 - gpio_direction_input(mvsd_data->gpio_write_protect); 800 807 host->gpio_write_protect = 801 808 mvsd_data->gpio_write_protect; 802 809 } ··· 819 824 return 0; 820 825 821 826 out: 822 - if (host) { 823 - if (host->irq) 824 - free_irq(host->irq, host); 825 - if (host->gpio_card_detect) { 826 - free_irq(gpio_to_irq(host->gpio_card_detect), host); 827 - gpio_free(host->gpio_card_detect); 828 - } 829 - if (host->gpio_write_protect) 830 - gpio_free(host->gpio_write_protect); 831 - if (host->base) 832 - iounmap(host->base); 833 - } 834 - if (r) 835 - release_resource(r); 836 - if (mmc) 837 - if (!IS_ERR_OR_NULL(host->clk)) { 827 + if (mmc) { 828 + if (!IS_ERR(host->clk)) 838 829 clk_disable_unprepare(host->clk); 839 - clk_put(host->clk); 840 - } 841 830 mmc_free_host(mmc); 831 + } 842 832 843 833 return ret; 844 834 } ··· 832 852 { 833 853 struct mmc_host *mmc = platform_get_drvdata(pdev); 834 854 835 - if (mmc) { 836 - struct mvsd_host *host = mmc_priv(mmc); 855 + struct mvsd_host *host = mmc_priv(mmc); 837 856 838 - if (host->gpio_card_detect) { 839 - free_irq(gpio_to_irq(host->gpio_card_detect), host); 840 - gpio_free(host->gpio_card_detect); 841 - } 842 - mmc_remove_host(mmc); 843 - free_irq(host->irq, host); 844 - if (host->gpio_write_protect) 845 - gpio_free(host->gpio_write_protect); 846 - del_timer_sync(&host->timer); 847 - mvsd_power_down(host); 848 - iounmap(host->base); 849 - release_resource(host->res); 857 + mmc_remove_host(mmc); 858 + del_timer_sync(&host->timer); 859 + mvsd_power_down(host); 850 860 851 - if (!IS_ERR(host->clk)) { 852 - clk_disable_unprepare(host->clk); 853 - clk_put(host->clk); 854 - } 855 - mmc_free_host(mmc); 856 - } 861 + if (!IS_ERR(host->clk)) 862 + clk_disable_unprepare(host->clk); 863 + mmc_free_host(mmc); 864 + 857 865 platform_set_drvdata(pdev, NULL); 858 866 return 0; 859 867 }
+1 -1
drivers/pci/hotplug/pciehp.h
··· 44 44 extern int pciehp_poll_time; 45 45 extern bool pciehp_debug; 46 46 extern bool pciehp_force; 47 - extern struct workqueue_struct *pciehp_wq; 48 47 49 48 #define dbg(format, arg...) \ 50 49 do { \ ··· 77 78 struct hotplug_slot *hotplug_slot; 78 79 struct delayed_work work; /* work for button event */ 79 80 struct mutex lock; 81 + struct workqueue_struct *wq; 80 82 }; 81 83 82 84 struct event_info {
+2 -9
drivers/pci/hotplug/pciehp_core.c
··· 42 42 bool pciehp_poll_mode; 43 43 int pciehp_poll_time; 44 44 bool pciehp_force; 45 - struct workqueue_struct *pciehp_wq; 46 45 47 46 #define DRIVER_VERSION "0.4" 48 47 #define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>" ··· 339 340 { 340 341 int retval = 0; 341 342 342 - pciehp_wq = alloc_workqueue("pciehp", 0, 0); 343 - if (!pciehp_wq) 344 - return -ENOMEM; 345 - 346 343 pciehp_firmware_init(); 347 344 retval = pcie_port_service_register(&hpdriver_portdrv); 348 345 dbg("pcie_port_service_register = %d\n", retval); 349 346 info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); 350 - if (retval) { 351 - destroy_workqueue(pciehp_wq); 347 + if (retval) 352 348 dbg("Failure to register service\n"); 353 - } 349 + 354 350 return retval; 355 351 } 356 352 ··· 353 359 { 354 360 dbg("unload_pciehpd()\n"); 355 361 pcie_port_service_unregister(&hpdriver_portdrv); 356 - destroy_workqueue(pciehp_wq); 357 362 info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n"); 358 363 } 359 364
+4 -4
drivers/pci/hotplug/pciehp_ctrl.c
··· 49 49 info->p_slot = p_slot; 50 50 INIT_WORK(&info->work, interrupt_event_handler); 51 51 52 - queue_work(pciehp_wq, &info->work); 52 + queue_work(p_slot->wq, &info->work); 53 53 54 54 return 0; 55 55 } ··· 344 344 kfree(info); 345 345 goto out; 346 346 } 347 - queue_work(pciehp_wq, &info->work); 347 + queue_work(p_slot->wq, &info->work); 348 348 out: 349 349 mutex_unlock(&p_slot->lock); 350 350 } ··· 377 377 if (ATTN_LED(ctrl)) 378 378 pciehp_set_attention_status(p_slot, 0); 379 379 380 - queue_delayed_work(pciehp_wq, &p_slot->work, 5*HZ); 380 + queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ); 381 381 break; 382 382 case BLINKINGOFF_STATE: 383 383 case BLINKINGON_STATE: ··· 439 439 else 440 440 p_slot->state = POWERON_STATE; 441 441 442 - queue_work(pciehp_wq, &info->work); 442 + queue_work(p_slot->wq, &info->work); 443 443 } 444 444 445 445 static void interrupt_event_handler(struct work_struct *work)
+10 -1
drivers/pci/hotplug/pciehp_hpc.c
··· 773 773 static int pcie_init_slot(struct controller *ctrl) 774 774 { 775 775 struct slot *slot; 776 + char name[32]; 776 777 777 778 slot = kzalloc(sizeof(*slot), GFP_KERNEL); 778 779 if (!slot) 779 780 return -ENOMEM; 781 + 782 + snprintf(name, sizeof(name), "pciehp-%u", PSN(ctrl)); 783 + slot->wq = alloc_workqueue(name, 0, 0); 784 + if (!slot->wq) 785 + goto abort; 780 786 781 787 slot->ctrl = ctrl; 782 788 mutex_init(&slot->lock); 783 789 INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work); 784 790 ctrl->slot = slot; 785 791 return 0; 792 + abort: 793 + kfree(slot); 794 + return -ENOMEM; 786 795 } 787 796 788 797 static void pcie_cleanup_slot(struct controller *ctrl) 789 798 { 790 799 struct slot *slot = ctrl->slot; 791 800 cancel_delayed_work(&slot->work); 792 - flush_workqueue(pciehp_wq); 801 + destroy_workqueue(slot->wq); 793 802 kfree(slot); 794 803 } 795 804
+1 -2
drivers/pci/hotplug/shpchp.h
··· 46 46 extern bool shpchp_poll_mode; 47 47 extern int shpchp_poll_time; 48 48 extern bool shpchp_debug; 49 - extern struct workqueue_struct *shpchp_wq; 50 - extern struct workqueue_struct *shpchp_ordered_wq; 51 49 52 50 #define dbg(format, arg...) \ 53 51 do { \ ··· 89 91 struct list_head slot_list; 90 92 struct delayed_work work; /* work for button event */ 91 93 struct mutex lock; 94 + struct workqueue_struct *wq; 92 95 u8 hp_slot; 93 96 }; 94 97
+14 -22
drivers/pci/hotplug/shpchp_core.c
··· 39 39 bool shpchp_debug; 40 40 bool shpchp_poll_mode; 41 41 int shpchp_poll_time; 42 - struct workqueue_struct *shpchp_wq; 43 - struct workqueue_struct *shpchp_ordered_wq; 44 42 45 43 #define DRIVER_VERSION "0.4" 46 44 #define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>" ··· 127 129 slot->device = ctrl->slot_device_offset + i; 128 130 slot->hpc_ops = ctrl->hpc_ops; 129 131 slot->number = ctrl->first_slot + (ctrl->slot_num_inc * i); 132 + 133 + snprintf(name, sizeof(name), "shpchp-%d", slot->number); 134 + slot->wq = alloc_workqueue(name, 0, 0); 135 + if (!slot->wq) { 136 + retval = -ENOMEM; 137 + goto error_info; 138 + } 139 + 130 140 mutex_init(&slot->lock); 131 141 INIT_DELAYED_WORK(&slot->work, shpchp_queue_pushbutton_work); 132 142 ··· 154 148 if (retval) { 155 149 ctrl_err(ctrl, "pci_hp_register failed with error %d\n", 156 150 retval); 157 - goto error_info; 151 + goto error_slotwq; 158 152 } 159 153 160 154 get_power_status(hotplug_slot, &info->power_status); ··· 166 160 } 167 161 168 162 return 0; 163 + error_slotwq: 164 + destroy_workqueue(slot->wq); 169 165 error_info: 170 166 kfree(info); 171 167 error_hpslot: ··· 188 180 slot = list_entry(tmp, struct slot, slot_list); 189 181 list_del(&slot->slot_list); 190 182 cancel_delayed_work(&slot->work); 191 - flush_workqueue(shpchp_wq); 192 - flush_workqueue(shpchp_ordered_wq); 183 + destroy_workqueue(slot->wq); 193 184 pci_hp_deregister(slot->hotplug_slot); 194 185 } 195 186 } ··· 371 364 372 365 static int __init shpcd_init(void) 373 366 { 374 - int retval = 0; 375 - 376 - shpchp_wq = alloc_ordered_workqueue("shpchp", 0); 377 - if (!shpchp_wq) 378 - return -ENOMEM; 379 - 380 - shpchp_ordered_wq = alloc_ordered_workqueue("shpchp_ordered", 0); 381 - if (!shpchp_ordered_wq) { 382 - destroy_workqueue(shpchp_wq); 383 - return -ENOMEM; 384 - } 367 + int retval; 385 368 386 369 retval = pci_register_driver(&shpc_driver); 387 370 dbg("%s: pci_register_driver = %d\n", __func__, retval); 388 371 info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); 389 - if (retval) { 390 - destroy_workqueue(shpchp_ordered_wq); 391 - destroy_workqueue(shpchp_wq); 392 - } 372 + 393 373 return retval; 394 374 } 395 375 ··· 384 390 { 385 391 dbg("unload_shpchpd()\n"); 386 392 pci_unregister_driver(&shpc_driver); 387 - destroy_workqueue(shpchp_ordered_wq); 388 - destroy_workqueue(shpchp_wq); 389 393 info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n"); 390 394 } 391 395
+3 -3
drivers/pci/hotplug/shpchp_ctrl.c
··· 51 51 info->p_slot = p_slot; 52 52 INIT_WORK(&info->work, interrupt_event_handler); 53 53 54 - queue_work(shpchp_wq, &info->work); 54 + queue_work(p_slot->wq, &info->work); 55 55 56 56 return 0; 57 57 } ··· 453 453 kfree(info); 454 454 goto out; 455 455 } 456 - queue_work(shpchp_ordered_wq, &info->work); 456 + queue_work(p_slot->wq, &info->work); 457 457 out: 458 458 mutex_unlock(&p_slot->lock); 459 459 } ··· 501 501 p_slot->hpc_ops->green_led_blink(p_slot); 502 502 p_slot->hpc_ops->set_attention_status(p_slot, 0); 503 503 504 - queue_delayed_work(shpchp_wq, &p_slot->work, 5*HZ); 504 + queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ); 505 505 break; 506 506 case BLINKINGOFF_STATE: 507 507 case BLINKINGON_STATE:
+1 -1
drivers/pci/pcie/Kconfig
··· 82 82 83 83 config PCIE_PME 84 84 def_bool y 85 - depends on PCIEPORTBUS && PM_RUNTIME && EXPERIMENTAL && ACPI 85 + depends on PCIEPORTBUS && PM_RUNTIME && ACPI
+1
drivers/pci/pcie/aer/aerdrv_core.c
··· 630 630 continue; 631 631 } 632 632 do_recovery(pdev, entry.severity); 633 + pci_dev_put(pdev); 633 634 } 634 635 } 635 636 #endif
+3
drivers/pci/pcie/aspm.c
··· 771 771 { 772 772 struct pci_dev *child; 773 773 774 + if (aspm_force) 775 + return; 776 + 774 777 /* 775 778 * Clear any ASPM setup that the firmware has carried out on this bus 776 779 */
+1 -1
drivers/staging/iio/adc/mxs-lradc.c
··· 239 239 struct mxs_lradc *lradc = iio_priv(iio); 240 240 const uint32_t chan_value = LRADC_CH_ACCUMULATE | 241 241 ((LRADC_DELAY_TIMER_LOOP - 1) << LRADC_CH_NUM_SAMPLES_OFFSET); 242 - int i, j = 0; 242 + unsigned int i, j = 0; 243 243 244 244 for_each_set_bit(i, iio->active_scan_mask, iio->masklength) { 245 245 lradc->buffer[j] = readl(lradc->base + LRADC_CH(j));
+1 -1
drivers/staging/iio/gyro/adis16080_core.c
··· 69 69 ret = spi_read(st->us, st->buf, 2); 70 70 71 71 if (ret == 0) 72 - *val = ((st->buf[0] & 0xF) << 8) | st->buf[1]; 72 + *val = sign_extend32(((st->buf[0] & 0xF) << 8) | st->buf[1], 11); 73 73 mutex_unlock(&st->buf_lock); 74 74 75 75 return ret;
+1 -1
drivers/staging/sb105x/sb_pci_mp.c
··· 3054 3054 sbdev->nr_ports = ((portnum_hex/16)*10) + (portnum_hex % 16); 3055 3055 } 3056 3056 break; 3057 - #ifdef CONFIG_PARPORT 3057 + #ifdef CONFIG_PARPORT_PC 3058 3058 case PCI_DEVICE_ID_MP2S1P : 3059 3059 sbdev->nr_ports = 2; 3060 3060
-1
drivers/staging/vt6656/bssdb.h
··· 90 90 } SRSNCapObject, *PSRSNCapObject; 91 91 92 92 // BSS info(AP) 93 - #pragma pack(1) 94 93 typedef struct tagKnownBSS { 95 94 // BSS info 96 95 BOOL bActive;
-1
drivers/staging/vt6656/int.h
··· 34 34 #include "device.h" 35 35 36 36 /*--------------------- Export Definitions -------------------------*/ 37 - #pragma pack(1) 38 37 typedef struct tagSINTData { 39 38 BYTE byTSR0; 40 39 BYTE byPkt0;
+16 -17
drivers/staging/vt6656/iocmd.h
··· 95 95 // Ioctl interface structure 96 96 // Command structure 97 97 // 98 - #pragma pack(1) 99 98 typedef struct tagSCmdRequest { 100 99 u8 name[16]; 101 100 void *data; 102 101 u16 wResult; 103 102 u16 wCmdCode; 104 - } SCmdRequest, *PSCmdRequest; 103 + } __packed SCmdRequest, *PSCmdRequest; 105 104 106 105 // 107 106 // Scan ··· 110 111 111 112 u8 ssid[SSID_MAXLEN + 2]; 112 113 113 - } SCmdScan, *PSCmdScan; 114 + } __packed SCmdScan, *PSCmdScan; 114 115 115 116 // 116 117 // BSS Join ··· 125 126 BOOL bPSEnable; 126 127 BOOL bShareKeyAuth; 127 128 128 - } SCmdBSSJoin, *PSCmdBSSJoin; 129 + } __packed SCmdBSSJoin, *PSCmdBSSJoin; 129 130 130 131 // 131 132 // Zonetype Setting ··· 136 137 BOOL bWrite; 137 138 WZONETYPE ZoneType; 138 139 139 - } SCmdZoneTypeSet, *PSCmdZoneTypeSet; 140 + } __packed SCmdZoneTypeSet, *PSCmdZoneTypeSet; 140 141 141 142 typedef struct tagSWPAResult { 142 143 char ifname[100]; ··· 144 145 u8 key_mgmt; 145 146 u8 eap_type; 146 147 BOOL authenticated; 147 - } SWPAResult, *PSWPAResult; 148 + } __packed SWPAResult, *PSWPAResult; 148 149 149 150 typedef struct tagSCmdStartAP { 150 151 ··· 156 157 BOOL bShareKeyAuth; 157 158 u8 byBasicRate; 158 159 159 - } SCmdStartAP, *PSCmdStartAP; 160 + } __packed SCmdStartAP, *PSCmdStartAP; 160 161 161 162 typedef struct tagSCmdSetWEP { 162 163 ··· 166 167 BOOL bWepKeyAvailable[WEP_NKEYS]; 167 168 u32 auWepKeyLength[WEP_NKEYS]; 168 169 169 - } SCmdSetWEP, *PSCmdSetWEP; 170 + } __packed SCmdSetWEP, *PSCmdSetWEP; 170 171 171 172 typedef struct tagSBSSIDItem { 172 173 ··· 179 180 BOOL bWEPOn; 180 181 u32 uRSSI; 181 182 182 - } SBSSIDItem; 183 + } __packed SBSSIDItem; 183 184 184 185 185 186 typedef struct tagSBSSIDList { 186 187 187 188 u32 uItem; 188 189 SBSSIDItem sBSSIDList[0]; 189 - } SBSSIDList, *PSBSSIDList; 190 + } __packed SBSSIDList, *PSBSSIDList; 190 191 191 192 192 193 typedef struct tagSNodeItem { ··· 207 208 u32 uTxAttempts; 208 209 u16 wFailureRatio; 209 210 210 - } SNodeItem; 211 + } __packed SNodeItem; 211 212 212 213 213 214 typedef struct tagSNodeList { ··· 215 216 u32 uItem; 216 217 SNodeItem sNodeList[0]; 217 218 218 - } SNodeList, *PSNodeList; 219 + } __packed SNodeList, *PSNodeList; 219 220 220 221 221 222 typedef struct tagSCmdLinkStatus { ··· 228 229 u32 uChannel; 229 230 u32 uLinkRate; 230 231 231 - } SCmdLinkStatus, *PSCmdLinkStatus; 232 + } __packed SCmdLinkStatus, *PSCmdLinkStatus; 232 233 233 234 // 234 235 // 802.11 counter ··· 246 247 u32 ReceivedFragmentCount; 247 248 u32 MulticastReceivedFrameCount; 248 249 u32 FCSErrorCount; 249 - } SDot11MIBCount, *PSDot11MIBCount; 250 + } __packed SDot11MIBCount, *PSDot11MIBCount; 250 251 251 252 252 253 ··· 354 355 u32 ullTxBroadcastBytes[2]; 355 356 u32 ullTxMulticastBytes[2]; 356 357 u32 ullTxDirectedBytes[2]; 357 - } SStatMIBCount, *PSStatMIBCount; 358 + } __packed SStatMIBCount, *PSStatMIBCount; 358 359 359 360 typedef struct tagSCmdValue { 360 361 361 362 u32 dwValue; 362 363 363 - } SCmdValue, *PSCmdValue; 364 + } __packed SCmdValue, *PSCmdValue; 364 365 365 366 // 366 367 // hostapd & viawget ioctl related ··· 430 431 u8 ssid[32]; 431 432 } scan_req; 432 433 } u; 433 - }; 434 + } __packed; 434 435 435 436 /*--------------------- Export Classes ----------------------------*/ 436 437
+3 -5
drivers/staging/vt6656/iowpa.h
··· 67 67 68 68 69 69 70 - #pragma pack(1) 71 70 typedef struct viawget_wpa_header { 72 71 u8 type; 73 72 u16 req_ie_len; 74 73 u16 resp_ie_len; 75 - } viawget_wpa_header; 74 + } __packed viawget_wpa_header; 76 75 77 76 struct viawget_wpa_param { 78 77 u32 cmd; ··· 112 113 u8 *buf; 113 114 } scan_results; 114 115 } u; 115 - }; 116 + } __packed; 116 117 117 - #pragma pack(1) 118 118 struct viawget_scan_result { 119 119 u8 bssid[6]; 120 120 u8 ssid[32]; ··· 128 130 int noise; 129 131 int level; 130 132 int maxrate; 131 - }; 133 + } __packed; 132 134 133 135 /*--------------------- Export Classes ----------------------------*/ 134 136
+1 -1
drivers/staging/wlan-ng/prism2mgmt.c
··· 406 406 /* SSID */ 407 407 req->ssid.status = P80211ENUM_msgitem_status_data_ok; 408 408 req->ssid.data.len = le16_to_cpu(item->ssid.len); 409 - req->ssid.data.len = min_t(u16, req->ssid.data.len, WLAN_BSSID_LEN); 409 + req->ssid.data.len = min_t(u16, req->ssid.data.len, WLAN_SSID_MAXLEN); 410 410 memcpy(req->ssid.data.data, item->ssid.data, req->ssid.data.len); 411 411 412 412 /* supported rates */
+2
drivers/tty/pty.c
··· 441 441 return pty_get_pktmode(tty, (int __user *)arg); 442 442 case TIOCSIG: /* Send signal to other side of pty */ 443 443 return pty_signal(tty, (int) arg); 444 + case TIOCGPTN: /* TTY returns ENOTTY, but glibc expects EINVAL here */ 445 + return -EINVAL; 444 446 } 445 447 return -ENOIOCTLCMD; 446 448 }
+11
drivers/tty/serial/8250/8250.c
··· 300 300 UART_FCR_R_TRIG_00 | UART_FCR_T_TRIG_00, 301 301 .flags = UART_CAP_FIFO, 302 302 }, 303 + [PORT_BRCM_TRUMANAGE] = { 304 + .name = "TruManage", 305 + .fifo_size = 1, 306 + .tx_loadsz = 1024, 307 + .flags = UART_CAP_HFIFO, 308 + }, 303 309 [PORT_8250_CIR] = { 304 310 .name = "CIR port" 305 311 } ··· 1496 1490 port->icount.tx++; 1497 1491 if (uart_circ_empty(xmit)) 1498 1492 break; 1493 + if (up->capabilities & UART_CAP_HFIFO) { 1494 + if ((serial_port_in(port, UART_LSR) & BOTH_EMPTY) != 1495 + BOTH_EMPTY) 1496 + break; 1497 + } 1499 1498 } while (--count > 0); 1500 1499 1501 1500 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+1
drivers/tty/serial/8250/8250.h
··· 40 40 #define UART_CAP_AFE (1 << 11) /* MCR-based hw flow control */ 41 41 #define UART_CAP_UUE (1 << 12) /* UART needs IER bit 6 set (Xscale) */ 42 42 #define UART_CAP_RTOIE (1 << 13) /* UART needs IER bit 4 set (Xscale, Tegra) */ 43 + #define UART_CAP_HFIFO (1 << 14) /* UART has a "hidden" FIFO */ 43 44 44 45 #define UART_BUG_QUOT (1 << 0) /* UART has buggy quot LSB */ 45 46 #define UART_BUG_TXEN (1 << 1) /* UART has buggy TX IIR status */
+1 -1
drivers/tty/serial/8250/8250_dw.c
··· 79 79 } else if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) { 80 80 /* Clear the USR and write the LCR again. */ 81 81 (void)p->serial_in(p, UART_USR); 82 - p->serial_out(p, d->last_lcr, UART_LCR); 82 + p->serial_out(p, UART_LCR, d->last_lcr); 83 83 84 84 return 1; 85 85 }
+40 -2
drivers/tty/serial/8250/8250_pci.c
··· 1085 1085 return setup_port(priv, port, 2, idx * 8, 0); 1086 1086 } 1087 1087 1088 + static int 1089 + pci_brcm_trumanage_setup(struct serial_private *priv, 1090 + const struct pciserial_board *board, 1091 + struct uart_8250_port *port, int idx) 1092 + { 1093 + int ret = pci_default_setup(priv, board, port, idx); 1094 + 1095 + port->port.type = PORT_BRCM_TRUMANAGE; 1096 + port->port.flags = (port->port.flags | UPF_FIXED_PORT | UPF_FIXED_TYPE); 1097 + return ret; 1098 + } 1099 + 1088 1100 static int skip_tx_en_setup(struct serial_private *priv, 1089 1101 const struct pciserial_board *board, 1090 1102 struct uart_8250_port *port, int idx) ··· 1313 1301 #define PCI_VENDOR_ID_AGESTAR 0x5372 1314 1302 #define PCI_DEVICE_ID_AGESTAR_9375 0x6872 1315 1303 #define PCI_VENDOR_ID_ASIX 0x9710 1316 - #define PCI_DEVICE_ID_COMMTECH_4222PCIE 0x0019 1317 1304 #define PCI_DEVICE_ID_COMMTECH_4224PCIE 0x0020 1318 1305 #define PCI_DEVICE_ID_COMMTECH_4228PCIE 0x0021 1306 + #define PCI_DEVICE_ID_COMMTECH_4222PCIE 0x0022 1307 + #define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a 1319 1308 1320 1309 1321 1310 /* Unknown vendors/cards - this should not be in linux/pci_ids.h */ ··· 1967 1954 .setup = pci_xr17v35x_setup, 1968 1955 }, 1969 1956 /* 1957 + * Broadcom TruManage (NetXtreme) 1958 + */ 1959 + { 1960 + .vendor = PCI_VENDOR_ID_BROADCOM, 1961 + .device = PCI_DEVICE_ID_BROADCOM_TRUMANAGE, 1962 + .subvendor = PCI_ANY_ID, 1963 + .subdevice = PCI_ANY_ID, 1964 + .setup = pci_brcm_trumanage_setup, 1965 + }, 1966 + 1967 + /* 1970 1968 * Default "match everything" terminator entry 1971 1969 */ 1972 1970 { ··· 2172 2148 pbn_ce4100_1_115200, 2173 2149 pbn_omegapci, 2174 2150 pbn_NETMOS9900_2s_115200, 2151 + pbn_brcm_trumanage, 2175 2152 }; 2176 2153 2177 2154 /* ··· 2271 2246 2272 2247 [pbn_b0_8_1152000_200] = { 2273 2248 .flags = FL_BASE0, 2274 - .num_ports = 2, 2249 + .num_ports = 8, 2275 2250 .base_baud = 1152000, 2276 2251 .uart_offset = 0x200, 2277 2252 }, ··· 2915 2890 [pbn_NETMOS9900_2s_115200] = { 2916 2891 .flags = FL_BASE0, 2917 2892 .num_ports = 2, 2893 + .base_baud = 115200, 2894 + }, 2895 + [pbn_brcm_trumanage] = { 2896 + .flags = FL_BASE0, 2897 + .num_ports = 1, 2898 + .reg_shift = 2, 2918 2899 .base_baud = 115200, 2919 2900 }, 2920 2901 }; ··· 4500 4469 { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_CRONYX_OMEGA, 4501 4470 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4502 4471 pbn_omegapci }, 4472 + 4473 + /* 4474 + * Broadcom TruManage 4475 + */ 4476 + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BROADCOM_TRUMANAGE, 4477 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4478 + pbn_brcm_trumanage }, 4503 4479 4504 4480 /* 4505 4481 * AgeStar as-prs2-009
+3 -1
drivers/tty/serial/ifx6x60.c
··· 637 637 638 638 clear_bit(IFX_SPI_STATE_IO_AVAILABLE, &ifx_dev->flags); 639 639 mrdy_set_low(ifx_dev); 640 + del_timer(&ifx_dev->spi_timer); 640 641 clear_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags); 641 642 tasklet_kill(&ifx_dev->io_work_tasklet); 642 643 } ··· 811 810 ifx_dev->spi_xfer.cs_change = 0; 812 811 ifx_dev->spi_xfer.speed_hz = ifx_dev->spi_dev->max_speed_hz; 813 812 /* ifx_dev->spi_xfer.speed_hz = 390625; */ 814 - ifx_dev->spi_xfer.bits_per_word = spi_bpw; 813 + ifx_dev->spi_xfer.bits_per_word = 814 + ifx_dev->spi_dev->bits_per_word; 815 815 816 816 ifx_dev->spi_xfer.tx_buf = ifx_dev->tx_buffer; 817 817 ifx_dev->spi_xfer.rx_buf = ifx_dev->rx_buffer;
+4 -2
drivers/tty/serial/mxs-auart.c
··· 253 253 struct circ_buf *xmit = &s->port.state->xmit; 254 254 255 255 if (auart_dma_enabled(s)) { 256 - int i = 0; 256 + u32 i = 0; 257 257 int size; 258 258 void *buffer = s->tx_dma_buf; 259 259 ··· 412 412 413 413 u32 ctrl = readl(u->membase + AUART_CTRL2); 414 414 415 - ctrl &= ~AUART_CTRL2_RTSEN; 415 + ctrl &= ~(AUART_CTRL2_RTSEN | AUART_CTRL2_RTS); 416 416 if (mctrl & TIOCM_RTS) { 417 417 if (tty_port_cts_enabled(&u->state->port)) 418 418 ctrl |= AUART_CTRL2_RTSEN; 419 + else 420 + ctrl |= AUART_CTRL2_RTS; 419 421 } 420 422 421 423 s->ctrl = mctrl;
-1
drivers/tty/serial/samsung.c
··· 1006 1006 1007 1007 ucon &= ucon_mask; 1008 1008 wr_regl(port, S3C2410_UCON, ucon | cfg->ucon); 1009 - wr_regl(port, S3C2410_ULCON, cfg->ulcon); 1010 1009 1011 1010 /* reset both fifos */ 1012 1011 wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
+1 -1
drivers/tty/serial/vt8500_serial.c
··· 604 604 vt8500_port->uart.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF; 605 605 606 606 vt8500_port->clk = of_clk_get(pdev->dev.of_node, 0); 607 - if (vt8500_port->clk) { 607 + if (!IS_ERR(vt8500_port->clk)) { 608 608 vt8500_port->uart.uartclk = clk_get_rate(vt8500_port->clk); 609 609 } else { 610 610 /* use the default of 24Mhz if not specified and warn */
+1
drivers/usb/dwc3/gadget.c
··· 1605 1605 1606 1606 if (epnum == 0 || epnum == 1) { 1607 1607 dep->endpoint.maxpacket = 512; 1608 + dep->endpoint.maxburst = 1; 1608 1609 dep->endpoint.ops = &dwc3_gadget_ep0_ops; 1609 1610 if (!epnum) 1610 1611 dwc->gadget.ep0 = &dep->endpoint;
+3 -3
drivers/usb/gadget/f_fs.c
··· 1152 1152 pr_err("%s: unmapped value: %lu\n", opts, value); 1153 1153 return -EINVAL; 1154 1154 } 1155 - } 1156 - else if (!memcmp(opts, "gid", 3)) 1155 + } else if (!memcmp(opts, "gid", 3)) { 1157 1156 data->perms.gid = make_kgid(current_user_ns(), value); 1158 1157 if (!gid_valid(data->perms.gid)) { 1159 1158 pr_err("%s: unmapped value: %lu\n", opts, value); 1160 1159 return -EINVAL; 1161 1160 } 1162 - else 1161 + } else { 1163 1162 goto invalid; 1163 + } 1164 1164 break; 1165 1165 1166 1166 default:
+26 -14
drivers/usb/gadget/fsl_mxc_udc.c
··· 18 18 #include <linux/platform_device.h> 19 19 #include <linux/io.h> 20 20 21 - #include <mach/hardware.h> 22 - 23 21 static struct clk *mxc_ahb_clk; 24 22 static struct clk *mxc_per_clk; 25 23 static struct clk *mxc_ipg_clk; 26 24 27 25 /* workaround ENGcm09152 for i.MX35 */ 28 - #define USBPHYCTRL_OTGBASE_OFFSET 0x608 26 + #define MX35_USBPHYCTRL_OFFSET 0x600 27 + #define USBPHYCTRL_OTGBASE_OFFSET 0x8 29 28 #define USBPHYCTRL_EVDO (1 << 23) 30 29 31 30 int fsl_udc_clk_init(struct platform_device *pdev) ··· 58 59 clk_prepare_enable(mxc_per_clk); 59 60 60 61 /* make sure USB_CLK is running at 60 MHz +/- 1000 Hz */ 61 - if (!cpu_is_mx51()) { 62 + if (!strcmp(pdev->id_entry->name, "imx-udc-mx27")) { 62 63 freq = clk_get_rate(mxc_per_clk); 63 64 if (pdata->phy_mode != FSL_USB2_PHY_ULPI && 64 65 (freq < 59999000 || freq > 60001000)) { ··· 78 79 return ret; 79 80 } 80 81 81 - void fsl_udc_clk_finalize(struct platform_device *pdev) 82 + int fsl_udc_clk_finalize(struct platform_device *pdev) 82 83 { 83 84 struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data; 84 - if (cpu_is_mx35()) { 85 - unsigned int v; 85 + int ret = 0; 86 86 87 - /* workaround ENGcm09152 for i.MX35 */ 88 - if (pdata->workaround & FLS_USB2_WORKAROUND_ENGCM09152) { 89 - v = readl(MX35_IO_ADDRESS(MX35_USB_BASE_ADDR + 90 - USBPHYCTRL_OTGBASE_OFFSET)); 91 - writel(v | USBPHYCTRL_EVDO, 92 - MX35_IO_ADDRESS(MX35_USB_BASE_ADDR + 93 - USBPHYCTRL_OTGBASE_OFFSET)); 87 + /* workaround ENGcm09152 for i.MX35 */ 88 + if (pdata->workaround & FLS_USB2_WORKAROUND_ENGCM09152) { 89 + unsigned int v; 90 + struct resource *res = platform_get_resource 91 + (pdev, IORESOURCE_MEM, 0); 92 + void __iomem *phy_regs = ioremap(res->start + 93 + MX35_USBPHYCTRL_OFFSET, 512); 94 + if (!phy_regs) { 95 + dev_err(&pdev->dev, "ioremap for phy address fails\n"); 96 + ret = -EINVAL; 97 + goto ioremap_err; 94 98 } 99 + 100 + v = readl(phy_regs + USBPHYCTRL_OTGBASE_OFFSET); 101 + writel(v | USBPHYCTRL_EVDO, 102 + phy_regs + USBPHYCTRL_OTGBASE_OFFSET); 103 + 104 + iounmap(phy_regs); 95 105 } 96 106 107 + 108 + ioremap_err: 97 109 /* ULPI transceivers don't need usbpll */ 98 110 if (pdata->phy_mode == FSL_USB2_PHY_ULPI) { 99 111 clk_disable_unprepare(mxc_per_clk); 100 112 mxc_per_clk = NULL; 101 113 } 114 + 115 + return ret; 102 116 } 103 117 104 118 void fsl_udc_clk_release(void)
+25 -17
drivers/usb/gadget/fsl_udc_core.c
··· 41 41 #include <linux/fsl_devices.h> 42 42 #include <linux/dmapool.h> 43 43 #include <linux/delay.h> 44 + #include <linux/of_device.h> 44 45 45 46 #include <asm/byteorder.h> 46 47 #include <asm/io.h> ··· 2403 2402 unsigned int i; 2404 2403 u32 dccparams; 2405 2404 2406 - if (strcmp(pdev->name, driver_name)) { 2407 - VDBG("Wrong device"); 2408 - return -ENODEV; 2409 - } 2410 - 2411 2405 udc_controller = kzalloc(sizeof(struct fsl_udc), GFP_KERNEL); 2412 2406 if (udc_controller == NULL) { 2413 2407 ERR("malloc udc failed\n"); ··· 2507 2511 dr_controller_setup(udc_controller); 2508 2512 } 2509 2513 2510 - fsl_udc_clk_finalize(pdev); 2514 + ret = fsl_udc_clk_finalize(pdev); 2515 + if (ret) 2516 + goto err_free_irq; 2511 2517 2512 2518 /* Setup gadget structure */ 2513 2519 udc_controller->gadget.ops = &fsl_gadget_ops; ··· 2718 2720 2719 2721 return fsl_udc_resume(NULL); 2720 2722 } 2721 - 2722 2723 /*------------------------------------------------------------------------- 2723 2724 Register entry point for the peripheral controller driver 2724 2725 --------------------------------------------------------------------------*/ 2725 - 2726 + static const struct platform_device_id fsl_udc_devtype[] = { 2727 + { 2728 + .name = "imx-udc-mx27", 2729 + }, { 2730 + .name = "imx-udc-mx51", 2731 + }, { 2732 + /* sentinel */ 2733 + } 2734 + }; 2735 + MODULE_DEVICE_TABLE(platform, fsl_udc_devtype); 2726 2736 static struct platform_driver udc_driver = { 2727 - .remove = __exit_p(fsl_udc_remove), 2737 + .remove = __exit_p(fsl_udc_remove), 2738 + /* Just for FSL i.mx SoC currently */ 2739 + .id_table = fsl_udc_devtype, 2728 2740 /* these suspend and resume are not usb suspend and resume */ 2729 - .suspend = fsl_udc_suspend, 2730 - .resume = fsl_udc_resume, 2731 - .driver = { 2732 - .name = (char *)driver_name, 2733 - .owner = THIS_MODULE, 2734 - /* udc suspend/resume called from OTG driver */ 2735 - .suspend = fsl_udc_otg_suspend, 2736 - .resume = fsl_udc_otg_resume, 2741 + .suspend = fsl_udc_suspend, 2742 + .resume = fsl_udc_resume, 2743 + .driver = { 2744 + .name = (char *)driver_name, 2745 + .owner = THIS_MODULE, 2746 + /* udc suspend/resume called from OTG driver */ 2747 + .suspend = fsl_udc_otg_suspend, 2748 + .resume = fsl_udc_otg_resume, 2737 2749 }, 2738 2750 }; 2739 2751
+3 -2
drivers/usb/gadget/fsl_usb2_udc.h
··· 592 592 struct platform_device; 593 593 #ifdef CONFIG_ARCH_MXC 594 594 int fsl_udc_clk_init(struct platform_device *pdev); 595 - void fsl_udc_clk_finalize(struct platform_device *pdev); 595 + int fsl_udc_clk_finalize(struct platform_device *pdev); 596 596 void fsl_udc_clk_release(void); 597 597 #else 598 598 static inline int fsl_udc_clk_init(struct platform_device *pdev) 599 599 { 600 600 return 0; 601 601 } 602 - static inline void fsl_udc_clk_finalize(struct platform_device *pdev) 602 + static inline int fsl_udc_clk_finalize(struct platform_device *pdev) 603 603 { 604 + return 0; 604 605 } 605 606 static inline void fsl_udc_clk_release(void) 606 607 {
+1 -1
drivers/usb/host/Kconfig
··· 148 148 Variation of ARC USB block used in some Freescale chips. 149 149 150 150 config USB_EHCI_MXC 151 - bool "Support for Freescale i.MX on-chip EHCI USB controller" 151 + tristate "Support for Freescale i.MX on-chip EHCI USB controller" 152 152 depends on USB_EHCI_HCD && ARCH_MXC 153 153 select USB_EHCI_ROOT_HUB_TT 154 154 ---help---
+1
drivers/usb/host/Makefile
··· 26 26 obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o 27 27 obj-$(CONFIG_USB_EHCI_PCI) += ehci-pci.o 28 28 obj-$(CONFIG_USB_EHCI_HCD_PLATFORM) += ehci-platform.o 29 + obj-$(CONFIG_USB_EHCI_MXC) += ehci-mxc.o 29 30 30 31 obj-$(CONFIG_USB_OXU210HP_HCD) += oxu210hp-hcd.o 31 32 obj-$(CONFIG_USB_ISP116X_HCD) += isp116x-hcd.o
+2 -10
drivers/usb/host/ehci-hcd.c
··· 74 74 #undef VERBOSE_DEBUG 75 75 #undef EHCI_URB_TRACE 76 76 77 - #ifdef DEBUG 78 - #define EHCI_STATS 79 - #endif 80 - 81 77 /* magic numbers that can affect system performance */ 82 78 #define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */ 83 79 #define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */ ··· 1246 1250 #define PLATFORM_DRIVER ehci_fsl_driver 1247 1251 #endif 1248 1252 1249 - #ifdef CONFIG_USB_EHCI_MXC 1250 - #include "ehci-mxc.c" 1251 - #define PLATFORM_DRIVER ehci_mxc_driver 1252 - #endif 1253 - 1254 1253 #ifdef CONFIG_USB_EHCI_SH 1255 1254 #include "ehci-sh.c" 1256 1255 #define PLATFORM_DRIVER ehci_hcd_sh_driver ··· 1343 1352 1344 1353 #if !IS_ENABLED(CONFIG_USB_EHCI_PCI) && \ 1345 1354 !IS_ENABLED(CONFIG_USB_EHCI_HCD_PLATFORM) && \ 1346 - !defined(CONFIG_USB_CHIPIDEA_HOST) && \ 1355 + !IS_ENABLED(CONFIG_USB_CHIPIDEA_HOST) && \ 1356 + !IS_ENABLED(CONFIG_USB_EHCI_MXC) && \ 1347 1357 !defined(PLATFORM_DRIVER) && \ 1348 1358 !defined(PS3_SYSTEM_BUS_DRIVER) && \ 1349 1359 !defined(OF_PLATFORM_DRIVER) && \
+50 -70
drivers/usb/host/ehci-mxc.c
··· 17 17 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 18 */ 19 19 20 + #include <linux/kernel.h> 21 + #include <linux/module.h> 22 + #include <linux/io.h> 20 23 #include <linux/platform_device.h> 21 24 #include <linux/clk.h> 22 25 #include <linux/delay.h> 23 26 #include <linux/usb/otg.h> 24 27 #include <linux/usb/ulpi.h> 25 28 #include <linux/slab.h> 29 + #include <linux/usb.h> 30 + #include <linux/usb/hcd.h> 26 31 27 32 #include <linux/platform_data/usb-ehci-mxc.h> 28 33 29 34 #include <asm/mach-types.h> 30 35 36 + #include "ehci.h" 37 + 38 + #define DRIVER_DESC "Freescale On-Chip EHCI Host driver" 39 + 40 + static const char hcd_name[] = "ehci-mxc"; 41 + 31 42 #define ULPI_VIEWPORT_OFFSET 0x170 32 43 33 44 struct ehci_mxc_priv { 34 45 struct clk *usbclk, *ahbclk, *phyclk; 35 - struct usb_hcd *hcd; 36 46 }; 37 47 38 - /* called during probe() after chip reset completes */ 39 - static int ehci_mxc_setup(struct usb_hcd *hcd) 40 - { 41 - hcd->has_tt = 1; 48 + static struct hc_driver __read_mostly ehci_mxc_hc_driver; 42 49 43 - return ehci_setup(hcd); 44 - } 45 - 46 - static const struct hc_driver ehci_mxc_hc_driver = { 47 - .description = hcd_name, 48 - .product_desc = "Freescale On-Chip EHCI Host Controller", 49 - .hcd_priv_size = sizeof(struct ehci_hcd), 50 - 51 - /* 52 - * generic hardware linkage 53 - */ 54 - .irq = ehci_irq, 55 - .flags = HCD_USB2 | HCD_MEMORY, 56 - 57 - /* 58 - * basic lifecycle operations 59 - */ 60 - .reset = ehci_mxc_setup, 61 - .start = ehci_run, 62 - .stop = ehci_stop, 63 - .shutdown = ehci_shutdown, 64 - 65 - /* 66 - * managing i/o requests and associated device resources 67 - */ 68 - .urb_enqueue = ehci_urb_enqueue, 69 - .urb_dequeue = ehci_urb_dequeue, 70 - .endpoint_disable = ehci_endpoint_disable, 71 - .endpoint_reset = ehci_endpoint_reset, 72 - 73 - /* 74 - * scheduling support 75 - */ 76 - .get_frame_number = ehci_get_frame, 77 - 78 - /* 79 - * root hub support 80 - */ 81 - .hub_status_data = ehci_hub_status_data, 82 - .hub_control = ehci_hub_control, 83 - .bus_suspend = ehci_bus_suspend, 84 - .bus_resume = ehci_bus_resume, 85 - .relinquish_port = ehci_relinquish_port, 86 - .port_handed_over = ehci_port_handed_over, 87 - 88 - .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, 50 + static const struct ehci_driver_overrides ehci_mxc_overrides __initdata = { 51 + .extra_priv_size = sizeof(struct ehci_mxc_priv), 89 52 }; 90 53 91 54 static int ehci_mxc_drv_probe(struct platform_device *pdev) ··· 74 111 if (!hcd) 75 112 return -ENOMEM; 76 113 77 - priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 78 - if (!priv) { 79 - ret = -ENOMEM; 80 - goto err_alloc; 81 - } 82 - 83 114 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 84 115 if (!res) { 85 116 dev_err(dev, "Found HC with no register addr. Check setup!\n"); ··· 90 133 ret = -EFAULT; 91 134 goto err_alloc; 92 135 } 136 + 137 + hcd->has_tt = 1; 138 + ehci = hcd_to_ehci(hcd); 139 + priv = (struct ehci_mxc_priv *) ehci->priv; 93 140 94 141 /* enable clocks */ 95 142 priv->usbclk = devm_clk_get(&pdev->dev, "ipg"); ··· 129 168 mdelay(10); 130 169 } 131 170 132 - ehci = hcd_to_ehci(hcd); 133 - 134 171 /* EHCI registers start at offset 0x100 */ 135 172 ehci->caps = hcd->regs + 0x100; 136 173 ehci->regs = hcd->regs + 0x100 + ··· 156 197 } 157 198 } 158 199 159 - priv->hcd = hcd; 160 - platform_set_drvdata(pdev, priv); 200 + platform_set_drvdata(pdev, hcd); 161 201 162 202 ret = usb_add_hcd(hcd, irq, IRQF_SHARED); 163 203 if (ret) ··· 182 224 static int __exit ehci_mxc_drv_remove(struct platform_device *pdev) 183 225 { 184 226 struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data; 185 - struct ehci_mxc_priv *priv = platform_get_drvdata(pdev); 186 - struct usb_hcd *hcd = priv->hcd; 227 + struct usb_hcd *hcd = platform_get_drvdata(pdev); 228 + struct ehci_hcd *ehci = hcd_to_ehci(hcd); 229 + struct ehci_mxc_priv *priv = (struct ehci_mxc_priv *) ehci->priv; 230 + 231 + usb_remove_hcd(hcd); 187 232 188 233 if (pdata && pdata->exit) 189 234 pdata->exit(pdev); ··· 194 233 if (pdata->otg) 195 234 usb_phy_shutdown(pdata->otg); 196 235 197 - usb_remove_hcd(hcd); 198 - usb_put_hcd(hcd); 199 - platform_set_drvdata(pdev, NULL); 200 - 201 236 clk_disable_unprepare(priv->usbclk); 202 237 clk_disable_unprepare(priv->ahbclk); 203 238 204 239 if (priv->phyclk) 205 240 clk_disable_unprepare(priv->phyclk); 206 241 242 + usb_put_hcd(hcd); 243 + platform_set_drvdata(pdev, NULL); 207 244 return 0; 208 245 } 209 246 210 247 static void ehci_mxc_drv_shutdown(struct platform_device *pdev) 211 248 { 212 - struct ehci_mxc_priv *priv = platform_get_drvdata(pdev); 213 - struct usb_hcd *hcd = priv->hcd; 249 + struct usb_hcd *hcd = platform_get_drvdata(pdev); 214 250 215 251 if (hcd->driver->shutdown) 216 252 hcd->driver->shutdown(hcd); ··· 217 259 218 260 static struct platform_driver ehci_mxc_driver = { 219 261 .probe = ehci_mxc_drv_probe, 220 - .remove = __exit_p(ehci_mxc_drv_remove), 262 + .remove = ehci_mxc_drv_remove, 221 263 .shutdown = ehci_mxc_drv_shutdown, 222 264 .driver = { 223 265 .name = "mxc-ehci", 224 266 }, 225 267 }; 268 + 269 + static int __init ehci_mxc_init(void) 270 + { 271 + if (usb_disabled()) 272 + return -ENODEV; 273 + 274 + pr_info("%s: " DRIVER_DESC "\n", hcd_name); 275 + 276 + ehci_init_driver(&ehci_mxc_hc_driver, &ehci_mxc_overrides); 277 + return platform_driver_register(&ehci_mxc_driver); 278 + } 279 + module_init(ehci_mxc_init); 280 + 281 + static void __exit ehci_mxc_cleanup(void) 282 + { 283 + platform_driver_unregister(&ehci_mxc_driver); 284 + } 285 + module_exit(ehci_mxc_cleanup); 286 + 287 + MODULE_DESCRIPTION(DRIVER_DESC); 288 + MODULE_AUTHOR("Sascha Hauer"); 289 + MODULE_LICENSE("GPL");
+7
drivers/usb/host/ehci.h
··· 38 38 #endif 39 39 40 40 /* statistics can be kept for tuning/monitoring */ 41 + #ifdef DEBUG 42 + #define EHCI_STATS 43 + #endif 44 + 41 45 struct ehci_stats { 42 46 /* irq usage */ 43 47 unsigned long normal; ··· 225 221 #ifdef DEBUG 226 222 struct dentry *debug_dir; 227 223 #endif 224 + 225 + /* platform-specific data -- must come last */ 226 + unsigned long priv[0] __aligned(sizeof(s64)); 228 227 }; 229 228 230 229 /* convert between an HCD pointer and the corresponding EHCI_HCD */
+9 -6
drivers/usb/host/uhci-hcd.c
··· 447 447 return IRQ_NONE; 448 448 uhci_writew(uhci, status, USBSTS); /* Clear it */ 449 449 450 + spin_lock(&uhci->lock); 451 + if (unlikely(!uhci->is_initialized)) /* not yet configured */ 452 + goto done; 453 + 450 454 if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) { 451 455 if (status & USBSTS_HSE) 452 456 dev_err(uhci_dev(uhci), ··· 459 455 dev_err(uhci_dev(uhci), 460 456 "host controller process error, something bad happened!\n"); 461 457 if (status & USBSTS_HCH) { 462 - spin_lock(&uhci->lock); 463 458 if (uhci->rh_state >= UHCI_RH_RUNNING) { 464 459 dev_err(uhci_dev(uhci), 465 460 "host controller halted, very bad!\n"); ··· 475 472 * pending unlinks */ 476 473 mod_timer(&hcd->rh_timer, jiffies); 477 474 } 478 - spin_unlock(&uhci->lock); 479 475 } 480 476 } 481 477 482 - if (status & USBSTS_RD) 478 + if (status & USBSTS_RD) { 479 + spin_unlock(&uhci->lock); 483 480 usb_hcd_poll_rh_status(hcd); 484 - else { 485 - spin_lock(&uhci->lock); 481 + } else { 486 482 uhci_scan_schedule(uhci); 483 + done: 487 484 spin_unlock(&uhci->lock); 488 485 } 489 486 ··· 664 661 */ 665 662 mb(); 666 663 664 + spin_lock_irq(&uhci->lock); 667 665 configure_hc(uhci); 668 666 uhci->is_initialized = 1; 669 - spin_lock_irq(&uhci->lock); 670 667 start_rh(uhci); 671 668 spin_unlock_irq(&uhci->lock); 672 669 return 0;
+2 -2
drivers/usb/musb/cppi_dma.c
··· 105 105 musb_writel(&tx->tx_complete, 0, ptr); 106 106 } 107 107 108 - static void __init cppi_pool_init(struct cppi *cppi, struct cppi_channel *c) 108 + static void cppi_pool_init(struct cppi *cppi, struct cppi_channel *c) 109 109 { 110 110 int j; 111 111 ··· 150 150 c->last_processed = NULL; 151 151 } 152 152 153 - static int __init cppi_controller_start(struct dma_controller *c) 153 + static int cppi_controller_start(struct dma_controller *c) 154 154 { 155 155 struct cppi *controller; 156 156 void __iomem *tibase;
+8 -1
drivers/usb/serial/option.c
··· 449 449 #define PETATEL_VENDOR_ID 0x1ff4 450 450 #define PETATEL_PRODUCT_NP10T 0x600e 451 451 452 + /* TP-LINK Incorporated products */ 453 + #define TPLINK_VENDOR_ID 0x2357 454 + #define TPLINK_PRODUCT_MA180 0x0201 455 + 452 456 /* some devices interfaces need special handling due to a number of reasons */ 453 457 enum option_blacklist_reason { 454 458 OPTION_BLACKLIST_NONE = 0, ··· 935 931 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0254, 0xff, 0xff, 0xff) }, 936 932 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */ 937 933 .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, 938 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff) }, 934 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff), /* ONDA MT8205 */ 935 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 939 936 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff), /* ZTE MF880 */ 940 937 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 941 938 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) }, ··· 1324 1319 { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) }, 1325 1320 { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) }, 1326 1321 { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) }, 1322 + { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180), 1323 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1327 1324 { } /* Terminating entry */ 1328 1325 }; 1329 1326 MODULE_DEVICE_TABLE(usb, option_ids);
+2 -2
drivers/vfio/pci/vfio_pci_rdwr.c
··· 240 240 filled = 1; 241 241 } else { 242 242 /* Drop writes, fill reads with FF */ 243 + filled = min((size_t)(x_end - pos), count); 243 244 if (!iswrite) { 244 245 char val = 0xFF; 245 246 size_t i; 246 247 247 - for (i = 0; i < x_end - pos; i++) { 248 + for (i = 0; i < filled; i++) { 248 249 if (put_user(val, buf + i)) 249 250 goto out; 250 251 } 251 252 } 252 253 253 - filled = x_end - pos; 254 254 } 255 255 256 256 count -= filled;
+12 -1
drivers/video/imxfb.c
··· 139 139 struct clk *clk_ahb; 140 140 struct clk *clk_per; 141 141 enum imxfb_type devtype; 142 + bool enabled; 142 143 143 144 /* 144 145 * These are the addresses we mapped ··· 537 536 538 537 static void imxfb_enable_controller(struct imxfb_info *fbi) 539 538 { 539 + 540 + if (fbi->enabled) 541 + return; 542 + 540 543 pr_debug("Enabling LCD controller\n"); 541 544 542 545 writel(fbi->screen_dma, fbi->regs + LCDC_SSA); ··· 561 556 clk_prepare_enable(fbi->clk_ipg); 562 557 clk_prepare_enable(fbi->clk_ahb); 563 558 clk_prepare_enable(fbi->clk_per); 559 + fbi->enabled = true; 564 560 565 561 if (fbi->backlight_power) 566 562 fbi->backlight_power(1); ··· 571 565 572 566 static void imxfb_disable_controller(struct imxfb_info *fbi) 573 567 { 568 + if (!fbi->enabled) 569 + return; 570 + 574 571 pr_debug("Disabling LCD controller\n"); 575 572 576 573 if (fbi->backlight_power) ··· 584 575 clk_disable_unprepare(fbi->clk_per); 585 576 clk_disable_unprepare(fbi->clk_ipg); 586 577 clk_disable_unprepare(fbi->clk_ahb); 578 + fbi->enabled = false; 587 579 588 580 writel(0, fbi->regs + LCDC_RMCR); 589 581 } ··· 739 729 740 730 memset(fbi, 0, sizeof(struct imxfb_info)); 741 731 732 + fbi->devtype = pdev->id_entry->driver_data; 733 + 742 734 strlcpy(info->fix.id, IMX_NAME, sizeof(info->fix.id)); 743 735 744 736 info->fix.type = FB_TYPE_PACKED_PIXELS; ··· 801 789 return -ENOMEM; 802 790 803 791 fbi = info->par; 804 - fbi->devtype = pdev->id_entry->driver_data; 805 792 806 793 if (!fb_mode) 807 794 fb_mode = pdata->mode[0].mode.name;
+2 -2
drivers/xen/cpu_hotplug.c
··· 25 25 static int vcpu_online(unsigned int cpu) 26 26 { 27 27 int err; 28 - char dir[32], state[32]; 28 + char dir[16], state[16]; 29 29 30 30 sprintf(dir, "cpu/%u", cpu); 31 - err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state); 31 + err = xenbus_scanf(XBT_NIL, dir, "availability", "%15s", state); 32 32 if (err != 1) { 33 33 if (!xen_initial_domain()) 34 34 printk(KERN_ERR "XENBUS: Unable to read cpu state\n");
+88 -42
drivers/xen/gntdev.c
··· 56 56 static atomic_t pages_mapped = ATOMIC_INIT(0); 57 57 58 58 static int use_ptemod; 59 + #define populate_freeable_maps use_ptemod 59 60 60 61 struct gntdev_priv { 62 + /* maps with visible offsets in the file descriptor */ 61 63 struct list_head maps; 62 - /* lock protects maps from concurrent changes */ 64 + /* maps that are not visible; will be freed on munmap. 65 + * Only populated if populate_freeable_maps == 1 */ 66 + struct list_head freeable_maps; 67 + /* lock protects maps and freeable_maps */ 63 68 spinlock_t lock; 64 69 struct mm_struct *mm; 65 70 struct mmu_notifier mn; ··· 198 193 return NULL; 199 194 } 200 195 201 - static void gntdev_put_map(struct grant_map *map) 196 + static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map) 202 197 { 203 198 if (!map) 204 199 return; ··· 211 206 if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) { 212 207 notify_remote_via_evtchn(map->notify.event); 213 208 evtchn_put(map->notify.event); 209 + } 210 + 211 + if (populate_freeable_maps && priv) { 212 + spin_lock(&priv->lock); 213 + list_del(&map->next); 214 + spin_unlock(&priv->lock); 214 215 } 215 216 216 217 if (map->pages && !use_ptemod) ··· 312 301 313 302 if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { 314 303 int pgno = (map->notify.addr >> PAGE_SHIFT); 315 - if (pgno >= offset && pgno < offset + pages && use_ptemod) { 316 - void __user *tmp = (void __user *) 317 - map->vma->vm_start + map->notify.addr; 318 - err = copy_to_user(tmp, &err, 1); 319 - if (err) 320 - return -EFAULT; 321 - map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; 322 - } else if (pgno >= offset && pgno < offset + pages) { 323 - uint8_t *tmp = kmap(map->pages[pgno]); 304 + if (pgno >= offset && pgno < offset + pages) { 305 + /* No need for kmap, pages are in lowmem */ 306 + uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno])); 324 307 tmp[map->notify.addr & (PAGE_SIZE-1)] = 0; 325 - kunmap(map->pages[pgno]); 326 308 map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; 327 309 } 328 310 } ··· 380 376 static void gntdev_vma_close(struct vm_area_struct *vma) 381 377 { 382 378 struct grant_map *map = vma->vm_private_data; 379 + struct file *file = vma->vm_file; 380 + struct gntdev_priv *priv = file->private_data; 383 381 384 382 pr_debug("gntdev_vma_close %p\n", vma); 385 - map->vma = NULL; 383 + if (use_ptemod) { 384 + /* It is possible that an mmu notifier could be running 385 + * concurrently, so take priv->lock to ensure that the vma won't 386 + * vanishing during the unmap_grant_pages call, since we will 387 + * spin here until that completes. Such a concurrent call will 388 + * not do any unmapping, since that has been done prior to 389 + * closing the vma, but it may still iterate the unmap_ops list. 390 + */ 391 + spin_lock(&priv->lock); 392 + map->vma = NULL; 393 + spin_unlock(&priv->lock); 394 + } 386 395 vma->vm_private_data = NULL; 387 - gntdev_put_map(map); 396 + gntdev_put_map(priv, map); 388 397 } 389 398 390 399 static struct vm_operations_struct gntdev_vmops = { ··· 407 390 408 391 /* ------------------------------------------------------------------ */ 409 392 393 + static void unmap_if_in_range(struct grant_map *map, 394 + unsigned long start, unsigned long end) 395 + { 396 + unsigned long mstart, mend; 397 + int err; 398 + 399 + if (!map->vma) 400 + return; 401 + if (map->vma->vm_start >= end) 402 + return; 403 + if (map->vma->vm_end <= start) 404 + return; 405 + mstart = max(start, map->vma->vm_start); 406 + mend = min(end, map->vma->vm_end); 407 + pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n", 408 + map->index, map->count, 409 + map->vma->vm_start, map->vma->vm_end, 410 + start, end, mstart, mend); 411 + err = unmap_grant_pages(map, 412 + (mstart - map->vma->vm_start) >> PAGE_SHIFT, 413 + (mend - mstart) >> PAGE_SHIFT); 414 + WARN_ON(err); 415 + } 416 + 410 417 static void mn_invl_range_start(struct mmu_notifier *mn, 411 418 struct mm_struct *mm, 412 419 unsigned long start, unsigned long end) 413 420 { 414 421 struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); 415 422 struct grant_map *map; 416 - unsigned long mstart, mend; 417 - int err; 418 423 419 424 spin_lock(&priv->lock); 420 425 list_for_each_entry(map, &priv->maps, next) { 421 - if (!map->vma) 422 - continue; 423 - if (map->vma->vm_start >= end) 424 - continue; 425 - if (map->vma->vm_end <= start) 426 - continue; 427 - mstart = max(start, map->vma->vm_start); 428 - mend = min(end, map->vma->vm_end); 429 - pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n", 430 - map->index, map->count, 431 - map->vma->vm_start, map->vma->vm_end, 432 - start, end, mstart, mend); 433 - err = unmap_grant_pages(map, 434 - (mstart - map->vma->vm_start) >> PAGE_SHIFT, 435 - (mend - mstart) >> PAGE_SHIFT); 436 - WARN_ON(err); 426 + unmap_if_in_range(map, start, end); 427 + } 428 + list_for_each_entry(map, &priv->freeable_maps, next) { 429 + unmap_if_in_range(map, start, end); 437 430 } 438 431 spin_unlock(&priv->lock); 439 432 } ··· 464 437 465 438 spin_lock(&priv->lock); 466 439 list_for_each_entry(map, &priv->maps, next) { 440 + if (!map->vma) 441 + continue; 442 + pr_debug("map %d+%d (%lx %lx)\n", 443 + map->index, map->count, 444 + map->vma->vm_start, map->vma->vm_end); 445 + err = unmap_grant_pages(map, /* offset */ 0, map->count); 446 + WARN_ON(err); 447 + } 448 + list_for_each_entry(map, &priv->freeable_maps, next) { 467 449 if (!map->vma) 468 450 continue; 469 451 pr_debug("map %d+%d (%lx %lx)\n", ··· 502 466 return -ENOMEM; 503 467 504 468 INIT_LIST_HEAD(&priv->maps); 469 + INIT_LIST_HEAD(&priv->freeable_maps); 505 470 spin_lock_init(&priv->lock); 506 471 507 472 if (use_ptemod) { ··· 537 500 while (!list_empty(&priv->maps)) { 538 501 map = list_entry(priv->maps.next, struct grant_map, next); 539 502 list_del(&map->next); 540 - gntdev_put_map(map); 503 + gntdev_put_map(NULL /* already removed */, map); 541 504 } 505 + WARN_ON(!list_empty(&priv->freeable_maps)); 542 506 543 507 if (use_ptemod) 544 508 mmu_notifier_unregister(&priv->mn, priv->mm); ··· 567 529 568 530 if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) { 569 531 pr_debug("can't map: over limit\n"); 570 - gntdev_put_map(map); 532 + gntdev_put_map(NULL, map); 571 533 return err; 572 534 } 573 535 574 536 if (copy_from_user(map->grants, &u->refs, 575 537 sizeof(map->grants[0]) * op.count) != 0) { 576 - gntdev_put_map(map); 577 - return err; 538 + gntdev_put_map(NULL, map); 539 + return -EFAULT; 578 540 } 579 541 580 542 spin_lock(&priv->lock); ··· 603 565 map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count); 604 566 if (map) { 605 567 list_del(&map->next); 568 + if (populate_freeable_maps) 569 + list_add_tail(&map->next, &priv->freeable_maps); 606 570 err = 0; 607 571 } 608 572 spin_unlock(&priv->lock); 609 573 if (map) 610 - gntdev_put_map(map); 574 + gntdev_put_map(priv, map); 611 575 return err; 612 576 } 613 577 ··· 619 579 struct ioctl_gntdev_get_offset_for_vaddr op; 620 580 struct vm_area_struct *vma; 621 581 struct grant_map *map; 582 + int rv = -EINVAL; 622 583 623 584 if (copy_from_user(&op, u, sizeof(op)) != 0) 624 585 return -EFAULT; 625 586 pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr); 626 587 588 + down_read(&current->mm->mmap_sem); 627 589 vma = find_vma(current->mm, op.vaddr); 628 590 if (!vma || vma->vm_ops != &gntdev_vmops) 629 - return -EINVAL; 591 + goto out_unlock; 630 592 631 593 map = vma->vm_private_data; 632 594 if (!map) 633 - return -EINVAL; 595 + goto out_unlock; 634 596 635 597 op.offset = map->index << PAGE_SHIFT; 636 598 op.count = map->count; 599 + rv = 0; 637 600 638 - if (copy_to_user(u, &op, sizeof(op)) != 0) 601 + out_unlock: 602 + up_read(&current->mm->mmap_sem); 603 + 604 + if (rv == 0 && copy_to_user(u, &op, sizeof(op)) != 0) 639 605 return -EFAULT; 640 - return 0; 606 + return rv; 641 607 } 642 608 643 609 static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u) ··· 824 778 out_put_map: 825 779 if (use_ptemod) 826 780 map->vma = NULL; 827 - gntdev_put_map(map); 781 + gntdev_put_map(priv, map); 828 782 return err; 829 783 } 830 784
+30 -20
drivers/xen/grant-table.c
··· 56 56 /* External tools reserve first few grant table entries. */ 57 57 #define NR_RESERVED_ENTRIES 8 58 58 #define GNTTAB_LIST_END 0xffffffff 59 - #define GREFS_PER_GRANT_FRAME \ 60 - (grant_table_version == 1 ? \ 61 - (PAGE_SIZE / sizeof(struct grant_entry_v1)) : \ 62 - (PAGE_SIZE / sizeof(union grant_entry_v2))) 63 59 64 60 static grant_ref_t **gnttab_list; 65 61 static unsigned int nr_grant_frames; ··· 150 154 static grant_status_t *grstatus; 151 155 152 156 static int grant_table_version; 157 + static int grefs_per_grant_frame; 153 158 154 159 static struct gnttab_free_callback *gnttab_free_callback_list; 155 160 ··· 764 767 unsigned int new_nr_grant_frames, extra_entries, i; 765 768 unsigned int nr_glist_frames, new_nr_glist_frames; 766 769 767 - new_nr_grant_frames = nr_grant_frames + more_frames; 768 - extra_entries = more_frames * GREFS_PER_GRANT_FRAME; 770 + BUG_ON(grefs_per_grant_frame == 0); 769 771 770 - nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP; 772 + new_nr_grant_frames = nr_grant_frames + more_frames; 773 + extra_entries = more_frames * grefs_per_grant_frame; 774 + 775 + nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP; 771 776 new_nr_glist_frames = 772 - (new_nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP; 777 + (new_nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP; 773 778 for (i = nr_glist_frames; i < new_nr_glist_frames; i++) { 774 779 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC); 775 780 if (!gnttab_list[i]) ··· 779 780 } 780 781 781 782 782 - for (i = GREFS_PER_GRANT_FRAME * nr_grant_frames; 783 - i < GREFS_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++) 783 + for (i = grefs_per_grant_frame * nr_grant_frames; 784 + i < grefs_per_grant_frame * new_nr_grant_frames - 1; i++) 784 785 gnttab_entry(i) = i + 1; 785 786 786 787 gnttab_entry(i) = gnttab_free_head; 787 - gnttab_free_head = GREFS_PER_GRANT_FRAME * nr_grant_frames; 788 + gnttab_free_head = grefs_per_grant_frame * nr_grant_frames; 788 789 gnttab_free_count += extra_entries; 789 790 790 791 nr_grant_frames = new_nr_grant_frames; ··· 956 957 957 958 static unsigned nr_status_frames(unsigned nr_grant_frames) 958 959 { 959 - return (nr_grant_frames * GREFS_PER_GRANT_FRAME + SPP - 1) / SPP; 960 + BUG_ON(grefs_per_grant_frame == 0); 961 + return (nr_grant_frames * grefs_per_grant_frame + SPP - 1) / SPP; 960 962 } 961 963 962 964 static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes) ··· 1115 1115 rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1); 1116 1116 if (rc == 0 && gsv.version == 2) { 1117 1117 grant_table_version = 2; 1118 + grefs_per_grant_frame = PAGE_SIZE / sizeof(union grant_entry_v2); 1118 1119 gnttab_interface = &gnttab_v2_ops; 1119 1120 } else if (grant_table_version == 2) { 1120 1121 /* ··· 1128 1127 panic("we need grant tables version 2, but only version 1 is available"); 1129 1128 } else { 1130 1129 grant_table_version = 1; 1130 + grefs_per_grant_frame = PAGE_SIZE / sizeof(struct grant_entry_v1); 1131 1131 gnttab_interface = &gnttab_v1_ops; 1132 1132 } 1133 1133 printk(KERN_INFO "Grant tables using version %d layout.\n", 1134 1134 grant_table_version); 1135 1135 } 1136 1136 1137 - int gnttab_resume(void) 1137 + static int gnttab_setup(void) 1138 1138 { 1139 1139 unsigned int max_nr_gframes; 1140 1140 1141 - gnttab_request_version(); 1142 1141 max_nr_gframes = gnttab_max_grant_frames(); 1143 1142 if (max_nr_gframes < nr_grant_frames) 1144 1143 return -ENOSYS; ··· 1161 1160 return 0; 1162 1161 } 1163 1162 1163 + int gnttab_resume(void) 1164 + { 1165 + gnttab_request_version(); 1166 + return gnttab_setup(); 1167 + } 1168 + 1164 1169 int gnttab_suspend(void) 1165 1170 { 1166 1171 gnttab_interface->unmap_frames(); ··· 1178 1171 int rc; 1179 1172 unsigned int cur, extra; 1180 1173 1174 + BUG_ON(grefs_per_grant_frame == 0); 1181 1175 cur = nr_grant_frames; 1182 - extra = ((req_entries + (GREFS_PER_GRANT_FRAME-1)) / 1183 - GREFS_PER_GRANT_FRAME); 1176 + extra = ((req_entries + (grefs_per_grant_frame-1)) / 1177 + grefs_per_grant_frame); 1184 1178 if (cur + extra > gnttab_max_grant_frames()) 1185 1179 return -ENOSPC; 1186 1180 ··· 1199 1191 unsigned int nr_init_grefs; 1200 1192 int ret; 1201 1193 1194 + gnttab_request_version(); 1202 1195 nr_grant_frames = 1; 1203 1196 boot_max_nr_grant_frames = __max_nr_grant_frames(); 1204 1197 1205 1198 /* Determine the maximum number of frames required for the 1206 1199 * grant reference free list on the current hypervisor. 1207 1200 */ 1201 + BUG_ON(grefs_per_grant_frame == 0); 1208 1202 max_nr_glist_frames = (boot_max_nr_grant_frames * 1209 - GREFS_PER_GRANT_FRAME / RPP); 1203 + grefs_per_grant_frame / RPP); 1210 1204 1211 1205 gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *), 1212 1206 GFP_KERNEL); 1213 1207 if (gnttab_list == NULL) 1214 1208 return -ENOMEM; 1215 1209 1216 - nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP; 1210 + nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP; 1217 1211 for (i = 0; i < nr_glist_frames; i++) { 1218 1212 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL); 1219 1213 if (gnttab_list[i] == NULL) { ··· 1224 1214 } 1225 1215 } 1226 1216 1227 - if (gnttab_resume() < 0) { 1217 + if (gnttab_setup() < 0) { 1228 1218 ret = -ENODEV; 1229 1219 goto ini_nomem; 1230 1220 } 1231 1221 1232 - nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME; 1222 + nr_init_grefs = nr_grant_frames * grefs_per_grant_frame; 1233 1223 1234 1224 for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) 1235 1225 gnttab_entry(i) = i + 1;
+47 -42
drivers/xen/privcmd.c
··· 199 199 LIST_HEAD(pagelist); 200 200 struct mmap_mfn_state state; 201 201 202 - if (!xen_initial_domain()) 203 - return -EPERM; 204 - 205 202 /* We only support privcmd_ioctl_mmap_batch for auto translated. */ 206 203 if (xen_feature(XENFEAT_auto_translated_physmap)) 207 204 return -ENOSYS; ··· 258 261 * -ENOENT if at least 1 -ENOENT has happened. 259 262 */ 260 263 int global_error; 261 - /* An array for individual errors */ 262 - int *err; 264 + int version; 263 265 264 266 /* User-space mfn array to store errors in the second pass for V1. */ 265 267 xen_pfn_t __user *user_mfn; 268 + /* User-space int array to store errors in the second pass for V2. */ 269 + int __user *user_err; 266 270 }; 267 271 268 272 /* auto translated dom0 note: if domU being created is PV, then mfn is ··· 286 288 &cur_page); 287 289 288 290 /* Store error code for second pass. */ 289 - *(st->err++) = ret; 291 + if (st->version == 1) { 292 + if (ret < 0) { 293 + /* 294 + * V1 encodes the error codes in the 32bit top nibble of the 295 + * mfn (with its known limitations vis-a-vis 64 bit callers). 296 + */ 297 + *mfnp |= (ret == -ENOENT) ? 298 + PRIVCMD_MMAPBATCH_PAGED_ERROR : 299 + PRIVCMD_MMAPBATCH_MFN_ERROR; 300 + } 301 + } else { /* st->version == 2 */ 302 + *((int *) mfnp) = ret; 303 + } 290 304 291 305 /* And see if it affects the global_error. */ 292 306 if (ret < 0) { ··· 315 305 return 0; 316 306 } 317 307 318 - static int mmap_return_errors_v1(void *data, void *state) 308 + static int mmap_return_errors(void *data, void *state) 319 309 { 320 - xen_pfn_t *mfnp = data; 321 310 struct mmap_batch_state *st = state; 322 - int err = *(st->err++); 323 311 324 - /* 325 - * V1 encodes the error codes in the 32bit top nibble of the 326 - * mfn (with its known limitations vis-a-vis 64 bit callers). 327 - */ 328 - *mfnp |= (err == -ENOENT) ? 329 - PRIVCMD_MMAPBATCH_PAGED_ERROR : 330 - PRIVCMD_MMAPBATCH_MFN_ERROR; 331 - return __put_user(*mfnp, st->user_mfn++); 312 + if (st->version == 1) { 313 + xen_pfn_t mfnp = *((xen_pfn_t *) data); 314 + if (mfnp & PRIVCMD_MMAPBATCH_MFN_ERROR) 315 + return __put_user(mfnp, st->user_mfn++); 316 + else 317 + st->user_mfn++; 318 + } else { /* st->version == 2 */ 319 + int err = *((int *) data); 320 + if (err) 321 + return __put_user(err, st->user_err++); 322 + else 323 + st->user_err++; 324 + } 325 + 326 + return 0; 332 327 } 333 328 334 329 /* Allocate pfns that are then mapped with gmfns from foreign domid. Update ··· 372 357 struct vm_area_struct *vma; 373 358 unsigned long nr_pages; 374 359 LIST_HEAD(pagelist); 375 - int *err_array = NULL; 376 360 struct mmap_batch_state state; 377 - 378 - if (!xen_initial_domain()) 379 - return -EPERM; 380 361 381 362 switch (version) { 382 363 case 1: ··· 407 396 goto out; 408 397 } 409 398 410 - err_array = kcalloc(m.num, sizeof(int), GFP_KERNEL); 411 - if (err_array == NULL) { 412 - ret = -ENOMEM; 413 - goto out; 399 + if (version == 2) { 400 + /* Zero error array now to only copy back actual errors. */ 401 + if (clear_user(m.err, sizeof(int) * m.num)) { 402 + ret = -EFAULT; 403 + goto out; 404 + } 414 405 } 415 406 416 407 down_write(&mm->mmap_sem); ··· 440 427 state.va = m.addr; 441 428 state.index = 0; 442 429 state.global_error = 0; 443 - state.err = err_array; 430 + state.version = version; 444 431 445 432 /* mmap_batch_fn guarantees ret == 0 */ 446 433 BUG_ON(traverse_pages(m.num, sizeof(xen_pfn_t), ··· 448 435 449 436 up_write(&mm->mmap_sem); 450 437 451 - if (version == 1) { 452 - if (state.global_error) { 453 - /* Write back errors in second pass. */ 454 - state.user_mfn = (xen_pfn_t *)m.arr; 455 - state.err = err_array; 456 - ret = traverse_pages(m.num, sizeof(xen_pfn_t), 457 - &pagelist, mmap_return_errors_v1, &state); 458 - } else 459 - ret = 0; 460 - 461 - } else if (version == 2) { 462 - ret = __copy_to_user(m.err, err_array, m.num * sizeof(int)); 463 - if (ret) 464 - ret = -EFAULT; 465 - } 438 + if (state.global_error) { 439 + /* Write back errors in second pass. */ 440 + state.user_mfn = (xen_pfn_t *)m.arr; 441 + state.user_err = m.err; 442 + ret = traverse_pages(m.num, sizeof(xen_pfn_t), 443 + &pagelist, mmap_return_errors, &state); 444 + } else 445 + ret = 0; 466 446 467 447 /* If we have not had any EFAULT-like global errors then set the global 468 448 * error to -ENOENT if necessary. */ ··· 463 457 ret = -ENOENT; 464 458 465 459 out: 466 - kfree(err_array); 467 460 free_page_list(&pagelist); 468 461 469 462 return ret;
+1 -1
drivers/xen/xen-pciback/pciback.h
··· 124 124 static inline void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev, 125 125 struct pci_dev *dev) 126 126 { 127 - if (xen_pcibk_backend && xen_pcibk_backend->free) 127 + if (xen_pcibk_backend && xen_pcibk_backend->release) 128 128 return xen_pcibk_backend->release(pdev, dev); 129 129 } 130 130
-10
fs/Kconfig
··· 68 68 source "fs/autofs4/Kconfig" 69 69 source "fs/fuse/Kconfig" 70 70 71 - config CUSE 72 - tristate "Character device in Userspace support" 73 - depends on FUSE_FS 74 - help 75 - This FUSE extension allows character devices to be 76 - implemented in userspace. 77 - 78 - If you want to develop or use userspace character device 79 - based on CUSE, answer Y or M. 80 - 81 71 config GENERIC_ACL 82 72 bool 83 73 select FS_POSIX_ACL
+4 -2
fs/btrfs/extent-tree.c
··· 3997 3997 * We make the other tasks wait for the flush only when we can flush 3998 3998 * all things. 3999 3999 */ 4000 - if (ret && flush == BTRFS_RESERVE_FLUSH_ALL) { 4000 + if (ret && flush != BTRFS_RESERVE_NO_FLUSH) { 4001 4001 flushing = true; 4002 4002 space_info->flush = 1; 4003 4003 } ··· 5560 5560 int empty_cluster = 2 * 1024 * 1024; 5561 5561 struct btrfs_space_info *space_info; 5562 5562 int loop = 0; 5563 - int index = 0; 5563 + int index = __get_raid_index(data); 5564 5564 int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ? 5565 5565 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC; 5566 5566 bool found_uncached_bg = false; ··· 6788 6788 &wc->flags[level]); 6789 6789 if (ret < 0) { 6790 6790 btrfs_tree_unlock_rw(eb, path->locks[level]); 6791 + path->locks[level] = 0; 6791 6792 return ret; 6792 6793 } 6793 6794 BUG_ON(wc->refs[level] == 0); 6794 6795 if (wc->refs[level] == 1) { 6795 6796 btrfs_tree_unlock_rw(eb, path->locks[level]); 6797 + path->locks[level] = 0; 6796 6798 return 1; 6797 6799 } 6798 6800 }
+12 -1
fs/btrfs/extent_map.c
··· 171 171 if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags)) 172 172 return 0; 173 173 174 + if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) || 175 + test_bit(EXTENT_FLAG_LOGGING, &next->flags)) 176 + return 0; 177 + 174 178 if (extent_map_end(prev) == next->start && 175 179 prev->flags == next->flags && 176 180 prev->bdev == next->bdev && ··· 259 255 if (!em) 260 256 goto out; 261 257 262 - list_move(&em->list, &tree->modified_extents); 258 + if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags)) 259 + list_move(&em->list, &tree->modified_extents); 263 260 em->generation = gen; 264 261 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 265 262 em->mod_start = em->start; ··· 283 278 write_unlock(&tree->lock); 284 279 return ret; 285 280 281 + } 282 + 283 + void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em) 284 + { 285 + clear_bit(EXTENT_FLAG_LOGGING, &em->flags); 286 + try_merge_map(tree, em); 286 287 } 287 288 288 289 /**
+1
fs/btrfs/extent_map.h
··· 69 69 int __init extent_map_init(void); 70 70 void extent_map_exit(void); 71 71 int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len, u64 gen); 72 + void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em); 72 73 struct extent_map *search_extent_mapping(struct extent_map_tree *tree, 73 74 u64 start, u64 len); 74 75 #endif
+2 -2
fs/btrfs/file-item.c
··· 460 460 if (!contig) 461 461 offset = page_offset(bvec->bv_page) + bvec->bv_offset; 462 462 463 - if (!contig && (offset >= ordered->file_offset + ordered->len || 464 - offset < ordered->file_offset)) { 463 + if (offset >= ordered->file_offset + ordered->len || 464 + offset < ordered->file_offset) { 465 465 unsigned long bytes_left; 466 466 sums->len = this_sum_bytes; 467 467 this_sum_bytes = 0;
+7 -3
fs/btrfs/file.c
··· 2241 2241 if (lockend <= lockstart) 2242 2242 lockend = lockstart + root->sectorsize; 2243 2243 2244 + lockend--; 2244 2245 len = lockend - lockstart + 1; 2245 2246 2246 2247 len = max_t(u64, len, root->sectorsize); ··· 2308 2307 } 2309 2308 } 2310 2309 2311 - *offset = start; 2312 - free_extent_map(em); 2313 - break; 2310 + if (!test_bit(EXTENT_FLAG_PREALLOC, 2311 + &em->flags)) { 2312 + *offset = start; 2313 + free_extent_map(em); 2314 + break; 2315 + } 2314 2316 } 2315 2317 } 2316 2318
+12 -8
fs/btrfs/free-space-cache.c
··· 1862 1862 { 1863 1863 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 1864 1864 struct btrfs_free_space *info; 1865 - int ret = 0; 1865 + int ret; 1866 + bool re_search = false; 1866 1867 1867 1868 spin_lock(&ctl->tree_lock); 1868 1869 1869 1870 again: 1871 + ret = 0; 1870 1872 if (!bytes) 1871 1873 goto out_lock; 1872 1874 ··· 1881 1879 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 1882 1880 1, 0); 1883 1881 if (!info) { 1884 - /* the tree logging code might be calling us before we 1885 - * have fully loaded the free space rbtree for this 1886 - * block group. So it is possible the entry won't 1887 - * be in the rbtree yet at all. The caching code 1888 - * will make sure not to put it in the rbtree if 1889 - * the logging code has pinned it. 1882 + /* 1883 + * If we found a partial bit of our free space in a 1884 + * bitmap but then couldn't find the other part this may 1885 + * be a problem, so WARN about it. 1890 1886 */ 1887 + WARN_ON(re_search); 1891 1888 goto out_lock; 1892 1889 } 1893 1890 } 1894 1891 1892 + re_search = false; 1895 1893 if (!info->bitmap) { 1896 1894 unlink_free_space(ctl, info); 1897 1895 if (offset == info->offset) { ··· 1937 1935 } 1938 1936 1939 1937 ret = remove_from_bitmap(ctl, info, &offset, &bytes); 1940 - if (ret == -EAGAIN) 1938 + if (ret == -EAGAIN) { 1939 + re_search = true; 1941 1940 goto again; 1941 + } 1942 1942 BUG_ON(ret); /* logic error */ 1943 1943 out_lock: 1944 1944 spin_unlock(&ctl->tree_lock);
+102 -35
fs/btrfs/inode.c
··· 88 88 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK, 89 89 }; 90 90 91 - static int btrfs_setsize(struct inode *inode, loff_t newsize); 91 + static int btrfs_setsize(struct inode *inode, struct iattr *attr); 92 92 static int btrfs_truncate(struct inode *inode); 93 93 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent); 94 94 static noinline int cow_file_range(struct inode *inode, ··· 2478 2478 continue; 2479 2479 } 2480 2480 nr_truncate++; 2481 + 2482 + /* 1 for the orphan item deletion. */ 2483 + trans = btrfs_start_transaction(root, 1); 2484 + if (IS_ERR(trans)) { 2485 + ret = PTR_ERR(trans); 2486 + goto out; 2487 + } 2488 + ret = btrfs_orphan_add(trans, inode); 2489 + btrfs_end_transaction(trans, root); 2490 + if (ret) 2491 + goto out; 2492 + 2481 2493 ret = btrfs_truncate(inode); 2482 2494 } else { 2483 2495 nr_unlink++; ··· 3677 3665 block_end - cur_offset, 0); 3678 3666 if (IS_ERR(em)) { 3679 3667 err = PTR_ERR(em); 3668 + em = NULL; 3680 3669 break; 3681 3670 } 3682 3671 last_byte = min(extent_map_end(em), block_end); ··· 3761 3748 return err; 3762 3749 } 3763 3750 3764 - static int btrfs_setsize(struct inode *inode, loff_t newsize) 3751 + static int btrfs_setsize(struct inode *inode, struct iattr *attr) 3765 3752 { 3766 3753 struct btrfs_root *root = BTRFS_I(inode)->root; 3767 3754 struct btrfs_trans_handle *trans; 3768 3755 loff_t oldsize = i_size_read(inode); 3756 + loff_t newsize = attr->ia_size; 3757 + int mask = attr->ia_valid; 3769 3758 int ret; 3770 3759 3771 3760 if (newsize == oldsize) 3772 3761 return 0; 3762 + 3763 + /* 3764 + * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a 3765 + * special case where we need to update the times despite not having 3766 + * these flags set. For all other operations the VFS set these flags 3767 + * explicitly if it wants a timestamp update. 3768 + */ 3769 + if (newsize != oldsize && (!(mask & (ATTR_CTIME | ATTR_MTIME)))) 3770 + inode->i_ctime = inode->i_mtime = current_fs_time(inode->i_sb); 3773 3771 3774 3772 if (newsize > oldsize) { 3775 3773 truncate_pagecache(inode, oldsize, newsize); ··· 3807 3783 set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE, 3808 3784 &BTRFS_I(inode)->runtime_flags); 3809 3785 3786 + /* 3787 + * 1 for the orphan item we're going to add 3788 + * 1 for the orphan item deletion. 3789 + */ 3790 + trans = btrfs_start_transaction(root, 2); 3791 + if (IS_ERR(trans)) 3792 + return PTR_ERR(trans); 3793 + 3794 + /* 3795 + * We need to do this in case we fail at _any_ point during the 3796 + * actual truncate. Once we do the truncate_setsize we could 3797 + * invalidate pages which forces any outstanding ordered io to 3798 + * be instantly completed which will give us extents that need 3799 + * to be truncated. If we fail to get an orphan inode down we 3800 + * could have left over extents that were never meant to live, 3801 + * so we need to garuntee from this point on that everything 3802 + * will be consistent. 3803 + */ 3804 + ret = btrfs_orphan_add(trans, inode); 3805 + btrfs_end_transaction(trans, root); 3806 + if (ret) 3807 + return ret; 3808 + 3810 3809 /* we don't support swapfiles, so vmtruncate shouldn't fail */ 3811 3810 truncate_setsize(inode, newsize); 3812 3811 ret = btrfs_truncate(inode); 3812 + if (ret && inode->i_nlink) 3813 + btrfs_orphan_del(NULL, inode); 3813 3814 } 3814 3815 3815 3816 return ret; ··· 3854 3805 return err; 3855 3806 3856 3807 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 3857 - err = btrfs_setsize(inode, attr->ia_size); 3808 + err = btrfs_setsize(inode, attr); 3858 3809 if (err) 3859 3810 return err; 3860 3811 } ··· 5621 5572 return em; 5622 5573 if (em) { 5623 5574 /* 5624 - * if our em maps to a hole, there might 5625 - * actually be delalloc bytes behind it 5575 + * if our em maps to 5576 + * - a hole or 5577 + * - a pre-alloc extent, 5578 + * there might actually be delalloc bytes behind it. 5626 5579 */ 5627 - if (em->block_start != EXTENT_MAP_HOLE) 5580 + if (em->block_start != EXTENT_MAP_HOLE && 5581 + !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 5628 5582 return em; 5629 5583 else 5630 5584 hole_em = em; ··· 5709 5657 */ 5710 5658 em->block_start = hole_em->block_start; 5711 5659 em->block_len = hole_len; 5660 + if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags)) 5661 + set_bit(EXTENT_FLAG_PREALLOC, &em->flags); 5712 5662 } else { 5713 5663 em->start = range_start; 5714 5664 em->len = found; ··· 6969 6915 6970 6916 /* 6971 6917 * 1 for the truncate slack space 6972 - * 1 for the orphan item we're going to add 6973 - * 1 for the orphan item deletion 6974 6918 * 1 for updating the inode. 6975 6919 */ 6976 - trans = btrfs_start_transaction(root, 4); 6920 + trans = btrfs_start_transaction(root, 2); 6977 6921 if (IS_ERR(trans)) { 6978 6922 err = PTR_ERR(trans); 6979 6923 goto out; ··· 6981 6929 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv, 6982 6930 min_size); 6983 6931 BUG_ON(ret); 6984 - 6985 - ret = btrfs_orphan_add(trans, inode); 6986 - if (ret) { 6987 - btrfs_end_transaction(trans, root); 6988 - goto out; 6989 - } 6990 6932 6991 6933 /* 6992 6934 * setattr is responsible for setting the ordered_data_close flag, ··· 7050 7004 ret = btrfs_orphan_del(trans, inode); 7051 7005 if (ret) 7052 7006 err = ret; 7053 - } else if (ret && inode->i_nlink > 0) { 7054 - /* 7055 - * Failed to do the truncate, remove us from the in memory 7056 - * orphan list. 7057 - */ 7058 - ret = btrfs_orphan_del(NULL, inode); 7059 7007 } 7060 7008 7061 7009 if (trans) { ··· 7571 7531 */ 7572 7532 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput) 7573 7533 { 7574 - struct list_head *head = &root->fs_info->delalloc_inodes; 7575 7534 struct btrfs_inode *binode; 7576 7535 struct inode *inode; 7577 7536 struct btrfs_delalloc_work *work, *next; 7578 7537 struct list_head works; 7538 + struct list_head splice; 7579 7539 int ret = 0; 7580 7540 7581 7541 if (root->fs_info->sb->s_flags & MS_RDONLY) 7582 7542 return -EROFS; 7583 7543 7584 7544 INIT_LIST_HEAD(&works); 7585 - 7545 + INIT_LIST_HEAD(&splice); 7546 + again: 7586 7547 spin_lock(&root->fs_info->delalloc_lock); 7587 - while (!list_empty(head)) { 7588 - binode = list_entry(head->next, struct btrfs_inode, 7548 + list_splice_init(&root->fs_info->delalloc_inodes, &splice); 7549 + while (!list_empty(&splice)) { 7550 + binode = list_entry(splice.next, struct btrfs_inode, 7589 7551 delalloc_inodes); 7552 + 7553 + list_del_init(&binode->delalloc_inodes); 7554 + 7590 7555 inode = igrab(&binode->vfs_inode); 7591 7556 if (!inode) 7592 - list_del_init(&binode->delalloc_inodes); 7557 + continue; 7558 + 7559 + list_add_tail(&binode->delalloc_inodes, 7560 + &root->fs_info->delalloc_inodes); 7593 7561 spin_unlock(&root->fs_info->delalloc_lock); 7594 - if (inode) { 7595 - work = btrfs_alloc_delalloc_work(inode, 0, delay_iput); 7596 - if (!work) { 7597 - ret = -ENOMEM; 7598 - goto out; 7599 - } 7600 - list_add_tail(&work->list, &works); 7601 - btrfs_queue_worker(&root->fs_info->flush_workers, 7602 - &work->work); 7562 + 7563 + work = btrfs_alloc_delalloc_work(inode, 0, delay_iput); 7564 + if (unlikely(!work)) { 7565 + ret = -ENOMEM; 7566 + goto out; 7603 7567 } 7568 + list_add_tail(&work->list, &works); 7569 + btrfs_queue_worker(&root->fs_info->flush_workers, 7570 + &work->work); 7571 + 7604 7572 cond_resched(); 7605 7573 spin_lock(&root->fs_info->delalloc_lock); 7574 + } 7575 + spin_unlock(&root->fs_info->delalloc_lock); 7576 + 7577 + list_for_each_entry_safe(work, next, &works, list) { 7578 + list_del_init(&work->list); 7579 + btrfs_wait_and_free_delalloc_work(work); 7580 + } 7581 + 7582 + spin_lock(&root->fs_info->delalloc_lock); 7583 + if (!list_empty(&root->fs_info->delalloc_inodes)) { 7584 + spin_unlock(&root->fs_info->delalloc_lock); 7585 + goto again; 7606 7586 } 7607 7587 spin_unlock(&root->fs_info->delalloc_lock); 7608 7588 ··· 7638 7578 atomic_read(&root->fs_info->async_delalloc_pages) == 0)); 7639 7579 } 7640 7580 atomic_dec(&root->fs_info->async_submit_draining); 7581 + return 0; 7641 7582 out: 7642 7583 list_for_each_entry_safe(work, next, &works, list) { 7643 7584 list_del_init(&work->list); 7644 7585 btrfs_wait_and_free_delalloc_work(work); 7586 + } 7587 + 7588 + if (!list_empty_careful(&splice)) { 7589 + spin_lock(&root->fs_info->delalloc_lock); 7590 + list_splice_tail(&splice, &root->fs_info->delalloc_inodes); 7591 + spin_unlock(&root->fs_info->delalloc_lock); 7645 7592 } 7646 7593 return ret; 7647 7594 }
+94 -35
fs/btrfs/ioctl.c
··· 1339 1339 if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running, 1340 1340 1)) { 1341 1341 pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n"); 1342 - return -EINPROGRESS; 1342 + mnt_drop_write_file(file); 1343 + return -EINVAL; 1343 1344 } 1344 1345 1345 1346 mutex_lock(&root->fs_info->volume_mutex); ··· 1363 1362 printk(KERN_INFO "btrfs: resizing devid %llu\n", 1364 1363 (unsigned long long)devid); 1365 1364 } 1365 + 1366 1366 device = btrfs_find_device(root->fs_info, devid, NULL, NULL); 1367 1367 if (!device) { 1368 1368 printk(KERN_INFO "btrfs: resizer unable to find device %llu\n", ··· 1371 1369 ret = -EINVAL; 1372 1370 goto out_free; 1373 1371 } 1374 - if (device->fs_devices && device->fs_devices->seeding) { 1372 + 1373 + if (!device->writeable) { 1375 1374 printk(KERN_INFO "btrfs: resizer unable to apply on " 1376 - "seeding device %llu\n", 1375 + "readonly device %llu\n", 1377 1376 (unsigned long long)devid); 1378 1377 ret = -EINVAL; 1379 1378 goto out_free; ··· 1446 1443 kfree(vol_args); 1447 1444 out: 1448 1445 mutex_unlock(&root->fs_info->volume_mutex); 1449 - mnt_drop_write_file(file); 1450 1446 atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0); 1447 + mnt_drop_write_file(file); 1451 1448 return ret; 1452 1449 } 1453 1450 ··· 2098 2095 err = inode_permission(inode, MAY_WRITE | MAY_EXEC); 2099 2096 if (err) 2100 2097 goto out_dput; 2101 - 2102 - /* check if subvolume may be deleted by a non-root user */ 2103 - err = btrfs_may_delete(dir, dentry, 1); 2104 - if (err) 2105 - goto out_dput; 2106 2098 } 2099 + 2100 + /* check if subvolume may be deleted by a user */ 2101 + err = btrfs_may_delete(dir, dentry, 1); 2102 + if (err) 2103 + goto out_dput; 2107 2104 2108 2105 if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) { 2109 2106 err = -EINVAL; ··· 2186 2183 struct btrfs_ioctl_defrag_range_args *range; 2187 2184 int ret; 2188 2185 2189 - if (btrfs_root_readonly(root)) 2190 - return -EROFS; 2186 + ret = mnt_want_write_file(file); 2187 + if (ret) 2188 + return ret; 2191 2189 2192 2190 if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running, 2193 2191 1)) { 2194 2192 pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n"); 2195 - return -EINPROGRESS; 2193 + mnt_drop_write_file(file); 2194 + return -EINVAL; 2196 2195 } 2197 - ret = mnt_want_write_file(file); 2198 - if (ret) { 2199 - atomic_set(&root->fs_info->mutually_exclusive_operation_running, 2200 - 0); 2201 - return ret; 2196 + 2197 + if (btrfs_root_readonly(root)) { 2198 + ret = -EROFS; 2199 + goto out; 2202 2200 } 2203 2201 2204 2202 switch (inode->i_mode & S_IFMT) { ··· 2251 2247 ret = -EINVAL; 2252 2248 } 2253 2249 out: 2254 - mnt_drop_write_file(file); 2255 2250 atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0); 2251 + mnt_drop_write_file(file); 2256 2252 return ret; 2257 2253 } 2258 2254 ··· 2267 2263 if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running, 2268 2264 1)) { 2269 2265 pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n"); 2270 - return -EINPROGRESS; 2266 + return -EINVAL; 2271 2267 } 2272 2268 2273 2269 mutex_lock(&root->fs_info->volume_mutex); ··· 2304 2300 1)) { 2305 2301 pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n"); 2306 2302 mnt_drop_write_file(file); 2307 - return -EINPROGRESS; 2303 + return -EINVAL; 2308 2304 } 2309 2305 2310 2306 mutex_lock(&root->fs_info->volume_mutex); ··· 2320 2316 kfree(vol_args); 2321 2317 out: 2322 2318 mutex_unlock(&root->fs_info->volume_mutex); 2323 - mnt_drop_write_file(file); 2324 2319 atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0); 2320 + mnt_drop_write_file(file); 2325 2321 return ret; 2326 2322 } 2327 2323 ··· 3441 3437 struct btrfs_fs_info *fs_info = root->fs_info; 3442 3438 struct btrfs_ioctl_balance_args *bargs; 3443 3439 struct btrfs_balance_control *bctl; 3440 + bool need_unlock; /* for mut. excl. ops lock */ 3444 3441 int ret; 3445 - int need_to_clear_lock = 0; 3446 3442 3447 3443 if (!capable(CAP_SYS_ADMIN)) 3448 3444 return -EPERM; ··· 3451 3447 if (ret) 3452 3448 return ret; 3453 3449 3454 - mutex_lock(&fs_info->volume_mutex); 3450 + again: 3451 + if (!atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)) { 3452 + mutex_lock(&fs_info->volume_mutex); 3453 + mutex_lock(&fs_info->balance_mutex); 3454 + need_unlock = true; 3455 + goto locked; 3456 + } 3457 + 3458 + /* 3459 + * mut. excl. ops lock is locked. Three possibilites: 3460 + * (1) some other op is running 3461 + * (2) balance is running 3462 + * (3) balance is paused -- special case (think resume) 3463 + */ 3455 3464 mutex_lock(&fs_info->balance_mutex); 3465 + if (fs_info->balance_ctl) { 3466 + /* this is either (2) or (3) */ 3467 + if (!atomic_read(&fs_info->balance_running)) { 3468 + mutex_unlock(&fs_info->balance_mutex); 3469 + if (!mutex_trylock(&fs_info->volume_mutex)) 3470 + goto again; 3471 + mutex_lock(&fs_info->balance_mutex); 3472 + 3473 + if (fs_info->balance_ctl && 3474 + !atomic_read(&fs_info->balance_running)) { 3475 + /* this is (3) */ 3476 + need_unlock = false; 3477 + goto locked; 3478 + } 3479 + 3480 + mutex_unlock(&fs_info->balance_mutex); 3481 + mutex_unlock(&fs_info->volume_mutex); 3482 + goto again; 3483 + } else { 3484 + /* this is (2) */ 3485 + mutex_unlock(&fs_info->balance_mutex); 3486 + ret = -EINPROGRESS; 3487 + goto out; 3488 + } 3489 + } else { 3490 + /* this is (1) */ 3491 + mutex_unlock(&fs_info->balance_mutex); 3492 + pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n"); 3493 + ret = -EINVAL; 3494 + goto out; 3495 + } 3496 + 3497 + locked: 3498 + BUG_ON(!atomic_read(&fs_info->mutually_exclusive_operation_running)); 3456 3499 3457 3500 if (arg) { 3458 3501 bargs = memdup_user(arg, sizeof(*bargs)); 3459 3502 if (IS_ERR(bargs)) { 3460 3503 ret = PTR_ERR(bargs); 3461 - goto out; 3504 + goto out_unlock; 3462 3505 } 3463 3506 3464 3507 if (bargs->flags & BTRFS_BALANCE_RESUME) { ··· 3525 3474 bargs = NULL; 3526 3475 } 3527 3476 3528 - if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running, 3529 - 1)) { 3530 - pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n"); 3477 + if (fs_info->balance_ctl) { 3531 3478 ret = -EINPROGRESS; 3532 3479 goto out_bargs; 3533 3480 } 3534 - need_to_clear_lock = 1; 3535 3481 3536 3482 bctl = kzalloc(sizeof(*bctl), GFP_NOFS); 3537 3483 if (!bctl) { ··· 3549 3501 } 3550 3502 3551 3503 do_balance: 3552 - ret = btrfs_balance(bctl, bargs); 3553 3504 /* 3554 - * bctl is freed in __cancel_balance or in free_fs_info if 3555 - * restriper was paused all the way until unmount 3505 + * Ownership of bctl and mutually_exclusive_operation_running 3506 + * goes to to btrfs_balance. bctl is freed in __cancel_balance, 3507 + * or, if restriper was paused all the way until unmount, in 3508 + * free_fs_info. mutually_exclusive_operation_running is 3509 + * cleared in __cancel_balance. 3556 3510 */ 3511 + need_unlock = false; 3512 + 3513 + ret = btrfs_balance(bctl, bargs); 3514 + 3557 3515 if (arg) { 3558 3516 if (copy_to_user(arg, bargs, sizeof(*bargs))) 3559 3517 ret = -EFAULT; ··· 3567 3513 3568 3514 out_bargs: 3569 3515 kfree(bargs); 3570 - out: 3571 - if (need_to_clear_lock) 3572 - atomic_set(&root->fs_info->mutually_exclusive_operation_running, 3573 - 0); 3516 + out_unlock: 3574 3517 mutex_unlock(&fs_info->balance_mutex); 3575 3518 mutex_unlock(&fs_info->volume_mutex); 3519 + if (need_unlock) 3520 + atomic_set(&fs_info->mutually_exclusive_operation_running, 0); 3521 + out: 3576 3522 mnt_drop_write_file(file); 3577 3523 return ret; 3578 3524 } ··· 3750 3696 if (IS_ERR(sa)) { 3751 3697 ret = PTR_ERR(sa); 3752 3698 goto drop_write; 3699 + } 3700 + 3701 + if (!sa->qgroupid) { 3702 + ret = -EINVAL; 3703 + goto out; 3753 3704 } 3754 3705 3755 3706 trans = btrfs_join_transaction(root);
+19 -1
fs/btrfs/qgroup.c
··· 379 379 380 380 ret = add_relation_rb(fs_info, found_key.objectid, 381 381 found_key.offset); 382 + if (ret == -ENOENT) { 383 + printk(KERN_WARNING 384 + "btrfs: orphan qgroup relation 0x%llx->0x%llx\n", 385 + (unsigned long long)found_key.objectid, 386 + (unsigned long long)found_key.offset); 387 + ret = 0; /* ignore the error */ 388 + } 382 389 if (ret) 383 390 goto out; 384 391 next2: ··· 963 956 struct btrfs_fs_info *fs_info, u64 qgroupid) 964 957 { 965 958 struct btrfs_root *quota_root; 959 + struct btrfs_qgroup *qgroup; 966 960 int ret = 0; 967 961 968 962 quota_root = fs_info->quota_root; 969 963 if (!quota_root) 970 964 return -EINVAL; 971 965 966 + /* check if there are no relations to this qgroup */ 967 + spin_lock(&fs_info->qgroup_lock); 968 + qgroup = find_qgroup_rb(fs_info, qgroupid); 969 + if (qgroup) { 970 + if (!list_empty(&qgroup->groups) || !list_empty(&qgroup->members)) { 971 + spin_unlock(&fs_info->qgroup_lock); 972 + return -EBUSY; 973 + } 974 + } 975 + spin_unlock(&fs_info->qgroup_lock); 976 + 972 977 ret = del_qgroup_item(trans, quota_root, qgroupid); 973 978 974 979 spin_lock(&fs_info->qgroup_lock); 975 980 del_qgroup_rb(quota_root->fs_info, qgroupid); 976 - 977 981 spin_unlock(&fs_info->qgroup_lock); 978 982 979 983 return ret;
+3 -1
fs/btrfs/send.c
··· 1814 1814 (unsigned long)nce->ino); 1815 1815 if (!nce_head) { 1816 1816 nce_head = kmalloc(sizeof(*nce_head), GFP_NOFS); 1817 - if (!nce_head) 1817 + if (!nce_head) { 1818 + kfree(nce); 1818 1819 return -ENOMEM; 1820 + } 1819 1821 INIT_LIST_HEAD(nce_head); 1820 1822 1821 1823 ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head);
+1 -1
fs/btrfs/super.c
··· 267 267 function, line, errstr); 268 268 return; 269 269 } 270 - trans->transaction->aborted = errno; 270 + ACCESS_ONCE(trans->transaction->aborted) = errno; 271 271 __btrfs_std_error(root->fs_info, function, line, errno, NULL); 272 272 } 273 273 /*
+18 -1
fs/btrfs/transaction.c
··· 1468 1468 goto cleanup_transaction; 1469 1469 } 1470 1470 1471 - if (cur_trans->aborted) { 1471 + /* Stop the commit early if ->aborted is set */ 1472 + if (unlikely(ACCESS_ONCE(cur_trans->aborted))) { 1472 1473 ret = cur_trans->aborted; 1473 1474 goto cleanup_transaction; 1474 1475 } ··· 1575 1574 wait_event(cur_trans->writer_wait, 1576 1575 atomic_read(&cur_trans->num_writers) == 1); 1577 1576 1577 + /* ->aborted might be set after the previous check, so check it */ 1578 + if (unlikely(ACCESS_ONCE(cur_trans->aborted))) { 1579 + ret = cur_trans->aborted; 1580 + goto cleanup_transaction; 1581 + } 1578 1582 /* 1579 1583 * the reloc mutex makes sure that we stop 1580 1584 * the balancing code from coming in and moving ··· 1658 1652 1659 1653 ret = commit_cowonly_roots(trans, root); 1660 1654 if (ret) { 1655 + mutex_unlock(&root->fs_info->tree_log_mutex); 1656 + mutex_unlock(&root->fs_info->reloc_mutex); 1657 + goto cleanup_transaction; 1658 + } 1659 + 1660 + /* 1661 + * The tasks which save the space cache and inode cache may also 1662 + * update ->aborted, check it. 1663 + */ 1664 + if (unlikely(ACCESS_ONCE(cur_trans->aborted))) { 1665 + ret = cur_trans->aborted; 1661 1666 mutex_unlock(&root->fs_info->tree_log_mutex); 1662 1667 mutex_unlock(&root->fs_info->reloc_mutex); 1663 1668 goto cleanup_transaction;
+8 -2
fs/btrfs/tree-log.c
··· 3357 3357 if (skip_csum) 3358 3358 return 0; 3359 3359 3360 + if (em->compress_type) { 3361 + csum_offset = 0; 3362 + csum_len = block_len; 3363 + } 3364 + 3360 3365 /* block start is already adjusted for the file extent offset. */ 3361 3366 ret = btrfs_lookup_csums_range(log->fs_info->csum_root, 3362 3367 em->block_start + csum_offset, ··· 3415 3410 em = list_entry(extents.next, struct extent_map, list); 3416 3411 3417 3412 list_del_init(&em->list); 3418 - clear_bit(EXTENT_FLAG_LOGGING, &em->flags); 3419 3413 3420 3414 /* 3421 3415 * If we had an error we just need to delete everybody from our 3422 3416 * private list. 3423 3417 */ 3424 3418 if (ret) { 3419 + clear_em_logging(tree, em); 3425 3420 free_extent_map(em); 3426 3421 continue; 3427 3422 } ··· 3429 3424 write_unlock(&tree->lock); 3430 3425 3431 3426 ret = log_one_extent(trans, inode, root, em, path); 3432 - free_extent_map(em); 3433 3427 write_lock(&tree->lock); 3428 + clear_em_logging(tree, em); 3429 + free_extent_map(em); 3434 3430 } 3435 3431 WARN_ON(!list_empty(&extents)); 3436 3432 write_unlock(&tree->lock);
+17 -6
fs/btrfs/volumes.c
··· 1431 1431 } 1432 1432 } else { 1433 1433 ret = btrfs_get_bdev_and_sb(device_path, 1434 - FMODE_READ | FMODE_EXCL, 1434 + FMODE_WRITE | FMODE_EXCL, 1435 1435 root->fs_info->bdev_holder, 0, 1436 1436 &bdev, &bh); 1437 1437 if (ret) ··· 2614 2614 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 2615 2615 chunk_used = btrfs_block_group_used(&cache->item); 2616 2616 2617 - user_thresh = div_factor_fine(cache->key.offset, bargs->usage); 2617 + if (bargs->usage == 0) 2618 + user_thresh = 0; 2619 + else if (bargs->usage > 100) 2620 + user_thresh = cache->key.offset; 2621 + else 2622 + user_thresh = div_factor_fine(cache->key.offset, 2623 + bargs->usage); 2624 + 2618 2625 if (chunk_used < user_thresh) 2619 2626 ret = 0; 2620 2627 ··· 2966 2959 unset_balance_control(fs_info); 2967 2960 ret = del_balance_item(fs_info->tree_root); 2968 2961 BUG_ON(ret); 2962 + 2963 + atomic_set(&fs_info->mutually_exclusive_operation_running, 0); 2969 2964 } 2970 2965 2971 2966 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock, ··· 3147 3138 out: 3148 3139 if (bctl->flags & BTRFS_BALANCE_RESUME) 3149 3140 __cancel_balance(fs_info); 3150 - else 3141 + else { 3151 3142 kfree(bctl); 3143 + atomic_set(&fs_info->mutually_exclusive_operation_running, 0); 3144 + } 3152 3145 return ret; 3153 3146 } 3154 3147 ··· 3167 3156 ret = btrfs_balance(fs_info->balance_ctl, NULL); 3168 3157 } 3169 3158 3170 - atomic_set(&fs_info->mutually_exclusive_operation_running, 0); 3171 3159 mutex_unlock(&fs_info->balance_mutex); 3172 3160 mutex_unlock(&fs_info->volume_mutex); 3173 3161 ··· 3189 3179 return 0; 3190 3180 } 3191 3181 3192 - WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)); 3193 3182 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 3194 3183 if (IS_ERR(tsk)) 3195 3184 return PTR_ERR(tsk); ··· 3241 3232 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs); 3242 3233 btrfs_balance_sys(leaf, item, &disk_bargs); 3243 3234 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 3235 + 3236 + WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)); 3244 3237 3245 3238 mutex_lock(&fs_info->volume_mutex); 3246 3239 mutex_lock(&fs_info->balance_mutex); ··· 3507 3496 { 1, 1, 2, 2, 2, 2 /* raid1 */ }, 3508 3497 { 1, 2, 1, 1, 1, 2 /* dup */ }, 3509 3498 { 1, 1, 0, 2, 1, 1 /* raid0 */ }, 3510 - { 1, 1, 0, 1, 1, 1 /* single */ }, 3499 + { 1, 1, 1, 1, 1, 1 /* single */ }, 3511 3500 }; 3512 3501 3513 3502 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
+2
fs/cifs/cifs_dfs_ref.c
··· 226 226 compose_mount_options_err: 227 227 kfree(mountdata); 228 228 mountdata = ERR_PTR(rc); 229 + kfree(*devname); 230 + *devname = NULL; 229 231 goto compose_mount_options_out; 230 232 } 231 233
+1 -1
fs/cifs/connect.c
··· 1917 1917 } 1918 1918 case AF_INET6: { 1919 1919 struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr; 1920 - struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)&rhs; 1920 + struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs; 1921 1921 return ipv6_addr_equal(&saddr6->sin6_addr, &vaddr6->sin6_addr); 1922 1922 } 1923 1923 default:
+6 -7
fs/f2fs/acl.c
··· 191 191 retval = f2fs_getxattr(inode, name_index, "", value, retval); 192 192 } 193 193 194 - if (retval < 0) { 195 - if (retval == -ENODATA) 196 - acl = NULL; 197 - else 198 - acl = ERR_PTR(retval); 199 - } else { 194 + if (retval > 0) 200 195 acl = f2fs_acl_from_disk(value, retval); 201 - } 196 + else if (retval == -ENODATA) 197 + acl = NULL; 198 + else 199 + acl = ERR_PTR(retval); 202 200 kfree(value); 201 + 203 202 if (!IS_ERR(acl)) 204 203 set_cached_acl(inode, type, acl); 205 204
+1 -2
fs/f2fs/checkpoint.c
··· 214 214 goto retry; 215 215 } 216 216 new->ino = ino; 217 - INIT_LIST_HEAD(&new->list); 218 217 219 218 /* add new_oentry into list which is sorted by inode number */ 220 219 if (orphan) { ··· 771 772 sbi->n_orphans = 0; 772 773 } 773 774 774 - int create_checkpoint_caches(void) 775 + int __init create_checkpoint_caches(void) 775 776 { 776 777 orphan_entry_slab = f2fs_kmem_cache_create("f2fs_orphan_entry", 777 778 sizeof(struct orphan_inode_entry), NULL);
+16 -1
fs/f2fs/data.c
··· 547 547 548 548 #define MAX_DESIRED_PAGES_WP 4096 549 549 550 + static int __f2fs_writepage(struct page *page, struct writeback_control *wbc, 551 + void *data) 552 + { 553 + struct address_space *mapping = data; 554 + int ret = mapping->a_ops->writepage(page, wbc); 555 + mapping_set_error(mapping, ret); 556 + return ret; 557 + } 558 + 550 559 static int f2fs_write_data_pages(struct address_space *mapping, 551 560 struct writeback_control *wbc) 552 561 { ··· 572 563 573 564 if (!S_ISDIR(inode->i_mode)) 574 565 mutex_lock(&sbi->writepages); 575 - ret = generic_writepages(mapping, wbc); 566 + ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping); 576 567 if (!S_ISDIR(inode->i_mode)) 577 568 mutex_unlock(&sbi->writepages); 578 569 f2fs_submit_bio(sbi, DATA, (wbc->sync_mode == WB_SYNC_ALL)); ··· 698 689 return 0; 699 690 } 700 691 692 + static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) 693 + { 694 + return generic_block_bmap(mapping, block, get_data_block_ro); 695 + } 696 + 701 697 const struct address_space_operations f2fs_dblock_aops = { 702 698 .readpage = f2fs_read_data_page, 703 699 .readpages = f2fs_read_data_pages, ··· 714 700 .invalidatepage = f2fs_invalidate_data_page, 715 701 .releasepage = f2fs_release_data_page, 716 702 .direct_IO = f2fs_direct_IO, 703 + .bmap = f2fs_bmap, 717 704 };
+21 -29
fs/f2fs/debug.c
··· 26 26 27 27 static LIST_HEAD(f2fs_stat_list); 28 28 static struct dentry *debugfs_root; 29 + static DEFINE_MUTEX(f2fs_stat_mutex); 29 30 30 31 static void update_general_status(struct f2fs_sb_info *sbi) 31 32 { ··· 181 180 int i = 0; 182 181 int j; 183 182 183 + mutex_lock(&f2fs_stat_mutex); 184 184 list_for_each_entry_safe(si, next, &f2fs_stat_list, stat_list) { 185 185 186 - mutex_lock(&si->stat_lock); 187 - if (!si->sbi) { 188 - mutex_unlock(&si->stat_lock); 189 - continue; 190 - } 191 186 update_general_status(si->sbi); 192 187 193 188 seq_printf(s, "\n=====[ partition info. #%d ]=====\n", i++); 194 - seq_printf(s, "[SB: 1] [CP: 2] [NAT: %d] [SIT: %d] ", 195 - si->nat_area_segs, si->sit_area_segs); 189 + seq_printf(s, "[SB: 1] [CP: 2] [SIT: %d] [NAT: %d] ", 190 + si->sit_area_segs, si->nat_area_segs); 196 191 seq_printf(s, "[SSA: %d] [MAIN: %d", 197 192 si->ssa_area_segs, si->main_area_segs); 198 193 seq_printf(s, "(OverProv:%d Resv:%d)]\n\n", ··· 283 286 seq_printf(s, "\nMemory: %u KB = static: %u + cached: %u\n", 284 287 (si->base_mem + si->cache_mem) >> 10, 285 288 si->base_mem >> 10, si->cache_mem >> 10); 286 - mutex_unlock(&si->stat_lock); 287 289 } 290 + mutex_unlock(&f2fs_stat_mutex); 288 291 return 0; 289 292 } 290 293 ··· 300 303 .release = single_release, 301 304 }; 302 305 303 - static int init_stats(struct f2fs_sb_info *sbi) 306 + int f2fs_build_stats(struct f2fs_sb_info *sbi) 304 307 { 305 308 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 306 309 struct f2fs_stat_info *si; ··· 310 313 return -ENOMEM; 311 314 312 315 si = sbi->stat_info; 313 - mutex_init(&si->stat_lock); 314 - list_add_tail(&si->stat_list, &f2fs_stat_list); 315 - 316 316 si->all_area_segs = le32_to_cpu(raw_super->segment_count); 317 317 si->sit_area_segs = le32_to_cpu(raw_super->segment_count_sit); 318 318 si->nat_area_segs = le32_to_cpu(raw_super->segment_count_nat); ··· 319 325 si->main_area_zones = si->main_area_sections / 320 326 le32_to_cpu(raw_super->secs_per_zone); 321 327 si->sbi = sbi; 322 - return 0; 323 - } 324 328 325 - int f2fs_build_stats(struct f2fs_sb_info *sbi) 326 - { 327 - int retval; 329 + mutex_lock(&f2fs_stat_mutex); 330 + list_add_tail(&si->stat_list, &f2fs_stat_list); 331 + mutex_unlock(&f2fs_stat_mutex); 328 332 329 - retval = init_stats(sbi); 330 - if (retval) 331 - return retval; 332 - 333 - if (!debugfs_root) 334 - debugfs_root = debugfs_create_dir("f2fs", NULL); 335 - 336 - debugfs_create_file("status", S_IRUGO, debugfs_root, NULL, &stat_fops); 337 333 return 0; 338 334 } 339 335 ··· 331 347 { 332 348 struct f2fs_stat_info *si = sbi->stat_info; 333 349 350 + mutex_lock(&f2fs_stat_mutex); 334 351 list_del(&si->stat_list); 335 - mutex_lock(&si->stat_lock); 336 - si->sbi = NULL; 337 - mutex_unlock(&si->stat_lock); 352 + mutex_unlock(&f2fs_stat_mutex); 353 + 338 354 kfree(sbi->stat_info); 339 355 } 340 356 341 - void destroy_root_stats(void) 357 + void __init f2fs_create_root_stats(void) 358 + { 359 + debugfs_root = debugfs_create_dir("f2fs", NULL); 360 + if (debugfs_root) 361 + debugfs_create_file("status", S_IRUGO, debugfs_root, 362 + NULL, &stat_fops); 363 + } 364 + 365 + void f2fs_destroy_root_stats(void) 342 366 { 343 367 debugfs_remove_recursive(debugfs_root); 344 368 debugfs_root = NULL;
+1 -1
fs/f2fs/dir.c
··· 503 503 } 504 504 505 505 if (inode) { 506 - inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 506 + inode->i_ctime = CURRENT_TIME; 507 507 drop_nlink(inode); 508 508 if (S_ISDIR(inode->i_mode)) { 509 509 drop_nlink(inode);
+11 -7
fs/f2fs/f2fs.h
··· 211 211 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode, 212 212 struct page *ipage, struct page *npage, nid_t nid) 213 213 { 214 + memset(dn, 0, sizeof(*dn)); 214 215 dn->inode = inode; 215 216 dn->inode_page = ipage; 216 217 dn->node_page = npage; 217 218 dn->nid = nid; 218 - dn->inode_page_locked = 0; 219 219 } 220 220 221 221 /* ··· 877 877 * super.c 878 878 */ 879 879 int f2fs_sync_fs(struct super_block *, int); 880 + extern __printf(3, 4) 881 + void f2fs_msg(struct super_block *, const char *, const char *, ...); 880 882 881 883 /* 882 884 * hash.c ··· 914 912 void flush_nat_entries(struct f2fs_sb_info *); 915 913 int build_node_manager(struct f2fs_sb_info *); 916 914 void destroy_node_manager(struct f2fs_sb_info *); 917 - int create_node_manager_caches(void); 915 + int __init create_node_manager_caches(void); 918 916 void destroy_node_manager_caches(void); 919 917 920 918 /* ··· 966 964 void block_operations(struct f2fs_sb_info *); 967 965 void write_checkpoint(struct f2fs_sb_info *, bool, bool); 968 966 void init_orphan_info(struct f2fs_sb_info *); 969 - int create_checkpoint_caches(void); 967 + int __init create_checkpoint_caches(void); 970 968 void destroy_checkpoint_caches(void); 971 969 972 970 /* ··· 986 984 int start_gc_thread(struct f2fs_sb_info *); 987 985 void stop_gc_thread(struct f2fs_sb_info *); 988 986 block_t start_bidx_of_node(unsigned int); 989 - int f2fs_gc(struct f2fs_sb_info *, int); 987 + int f2fs_gc(struct f2fs_sb_info *); 990 988 void build_gc_manager(struct f2fs_sb_info *); 991 - int create_gc_caches(void); 989 + int __init create_gc_caches(void); 992 990 void destroy_gc_caches(void); 993 991 994 992 /* ··· 1060 1058 1061 1059 int f2fs_build_stats(struct f2fs_sb_info *); 1062 1060 void f2fs_destroy_stats(struct f2fs_sb_info *); 1063 - void destroy_root_stats(void); 1061 + void __init f2fs_create_root_stats(void); 1062 + void f2fs_destroy_root_stats(void); 1064 1063 #else 1065 1064 #define stat_inc_call_count(si) 1066 1065 #define stat_inc_seg_count(si, type) ··· 1071 1068 1072 1069 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; } 1073 1070 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } 1074 - static inline void destroy_root_stats(void) { } 1071 + static inline void __init f2fs_create_root_stats(void) { } 1072 + static inline void f2fs_destroy_root_stats(void) { } 1075 1073 #endif 1076 1074 1077 1075 extern const struct file_operations f2fs_dir_operations;
+12 -4
fs/f2fs/file.c
··· 96 96 } 97 97 98 98 static const struct vm_operations_struct f2fs_file_vm_ops = { 99 - .fault = filemap_fault, 100 - .page_mkwrite = f2fs_vm_page_mkwrite, 99 + .fault = filemap_fault, 100 + .page_mkwrite = f2fs_vm_page_mkwrite, 101 + .remap_pages = generic_file_remap_pages, 101 102 }; 102 103 103 104 static int need_to_sync_dir(struct f2fs_sb_info *sbi, struct inode *inode) ··· 137 136 ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 138 137 if (ret) 139 138 return ret; 139 + 140 + /* guarantee free sections for fsync */ 141 + f2fs_balance_fs(sbi); 140 142 141 143 mutex_lock(&inode->i_mutex); 142 144 ··· 411 407 struct dnode_of_data dn; 412 408 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 413 409 410 + f2fs_balance_fs(sbi); 411 + 414 412 mutex_lock_op(sbi, DATA_TRUNC); 415 413 set_new_dnode(&dn, inode, NULL, NULL, 0); 416 414 err = get_dnode_of_data(&dn, index, RDONLY_NODE); ··· 540 534 loff_t offset, loff_t len) 541 535 { 542 536 struct inode *inode = file->f_path.dentry->d_inode; 543 - struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 544 537 long ret; 545 538 546 539 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) ··· 550 545 else 551 546 ret = expand_inode_data(inode, offset, len, mode); 552 547 553 - f2fs_balance_fs(sbi); 548 + if (!ret) { 549 + inode->i_mtime = inode->i_ctime = CURRENT_TIME; 550 + mark_inode_dirty(inode); 551 + } 554 552 return ret; 555 553 } 556 554
+27 -41
fs/f2fs/gc.c
··· 78 78 79 79 sbi->bg_gc++; 80 80 81 - if (f2fs_gc(sbi, 1) == GC_NONE) 81 + if (f2fs_gc(sbi) == GC_NONE) 82 82 wait_ms = GC_THREAD_NOGC_SLEEP_TIME; 83 83 else if (wait_ms == GC_THREAD_NOGC_SLEEP_TIME) 84 84 wait_ms = GC_THREAD_MAX_SLEEP_TIME; ··· 424 424 } 425 425 426 426 /* 427 - * Calculate start block index that this node page contains 427 + * Calculate start block index indicating the given node offset. 428 + * Be careful, caller should give this node offset only indicating direct node 429 + * blocks. If any node offsets, which point the other types of node blocks such 430 + * as indirect or double indirect node blocks, are given, it must be a caller's 431 + * bug. 428 432 */ 429 433 block_t start_bidx_of_node(unsigned int node_ofs) 430 434 { ··· 655 651 return ret; 656 652 } 657 653 658 - int f2fs_gc(struct f2fs_sb_info *sbi, int nGC) 654 + int f2fs_gc(struct f2fs_sb_info *sbi) 659 655 { 660 - unsigned int segno; 661 - int old_free_secs, cur_free_secs; 662 - int gc_status, nfree; 663 656 struct list_head ilist; 657 + unsigned int segno, i; 664 658 int gc_type = BG_GC; 659 + int gc_status = GC_NONE; 665 660 666 661 INIT_LIST_HEAD(&ilist); 667 662 gc_more: 668 - nfree = 0; 669 - gc_status = GC_NONE; 663 + if (!(sbi->sb->s_flags & MS_ACTIVE)) 664 + goto stop; 670 665 671 666 if (has_not_enough_free_secs(sbi)) 672 - old_free_secs = reserved_sections(sbi); 673 - else 674 - old_free_secs = free_sections(sbi); 667 + gc_type = FG_GC; 675 668 676 - while (sbi->sb->s_flags & MS_ACTIVE) { 677 - int i; 678 - if (has_not_enough_free_secs(sbi)) 679 - gc_type = FG_GC; 669 + if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE)) 670 + goto stop; 680 671 681 - cur_free_secs = free_sections(sbi) + nfree; 682 - 683 - /* We got free space successfully. */ 684 - if (nGC < cur_free_secs - old_free_secs) 672 + for (i = 0; i < sbi->segs_per_sec; i++) { 673 + /* 674 + * do_garbage_collect will give us three gc_status: 675 + * GC_ERROR, GC_DONE, and GC_BLOCKED. 676 + * If GC is finished uncleanly, we have to return 677 + * the victim to dirty segment list. 678 + */ 679 + gc_status = do_garbage_collect(sbi, segno + i, &ilist, gc_type); 680 + if (gc_status != GC_DONE) 685 681 break; 686 - 687 - if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE)) 688 - break; 689 - 690 - for (i = 0; i < sbi->segs_per_sec; i++) { 691 - /* 692 - * do_garbage_collect will give us three gc_status: 693 - * GC_ERROR, GC_DONE, and GC_BLOCKED. 694 - * If GC is finished uncleanly, we have to return 695 - * the victim to dirty segment list. 696 - */ 697 - gc_status = do_garbage_collect(sbi, segno + i, 698 - &ilist, gc_type); 699 - if (gc_status != GC_DONE) 700 - goto stop; 701 - nfree++; 702 - } 703 682 } 704 - stop: 705 - if (has_not_enough_free_secs(sbi) || gc_status == GC_BLOCKED) { 683 + if (has_not_enough_free_secs(sbi)) { 706 684 write_checkpoint(sbi, (gc_status == GC_BLOCKED), false); 707 - if (nfree) 685 + if (has_not_enough_free_secs(sbi)) 708 686 goto gc_more; 709 687 } 688 + stop: 710 689 mutex_unlock(&sbi->gc_mutex); 711 690 712 691 put_gc_inode(&ilist); 713 - BUG_ON(!list_empty(&ilist)); 714 692 return gc_status; 715 693 } 716 694 ··· 701 715 DIRTY_I(sbi)->v_ops = &default_v_ops; 702 716 } 703 717 704 - int create_gc_caches(void) 718 + int __init create_gc_caches(void) 705 719 { 706 720 winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes", 707 721 sizeof(struct inode_entry), NULL);
+3
fs/f2fs/inode.c
··· 217 217 inode->i_ino == F2FS_META_INO(sbi)) 218 218 return 0; 219 219 220 + if (wbc) 221 + f2fs_balance_fs(sbi); 222 + 220 223 node_page = get_node_page(sbi, inode->i_ino); 221 224 if (IS_ERR(node_page)) 222 225 return PTR_ERR(node_page);
+12 -7
fs/f2fs/node.c
··· 1124 1124 return 0; 1125 1125 } 1126 1126 1127 + /* 1128 + * It is very important to gather dirty pages and write at once, so that we can 1129 + * submit a big bio without interfering other data writes. 1130 + * Be default, 512 pages (2MB), a segment size, is quite reasonable. 1131 + */ 1132 + #define COLLECT_DIRTY_NODES 512 1127 1133 static int f2fs_write_node_pages(struct address_space *mapping, 1128 1134 struct writeback_control *wbc) 1129 1135 { ··· 1137 1131 struct block_device *bdev = sbi->sb->s_bdev; 1138 1132 long nr_to_write = wbc->nr_to_write; 1139 1133 1140 - if (wbc->for_kupdate) 1141 - return 0; 1142 - 1143 - if (get_pages(sbi, F2FS_DIRTY_NODES) == 0) 1144 - return 0; 1145 - 1134 + /* First check balancing cached NAT entries */ 1146 1135 if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) { 1147 1136 write_checkpoint(sbi, false, false); 1148 1137 return 0; 1149 1138 } 1139 + 1140 + /* collect a number of dirty node pages and write together */ 1141 + if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES) 1142 + return 0; 1150 1143 1151 1144 /* if mounting is failed, skip writing node pages */ 1152 1145 wbc->nr_to_write = bio_get_nr_vecs(bdev); ··· 1737 1732 kfree(nm_i); 1738 1733 } 1739 1734 1740 - int create_node_manager_caches(void) 1735 + int __init create_node_manager_caches(void) 1741 1736 { 1742 1737 nat_entry_slab = f2fs_kmem_cache_create("nat_entry", 1743 1738 sizeof(struct nat_entry), NULL);
+4 -6
fs/f2fs/recovery.c
··· 67 67 kunmap(page); 68 68 f2fs_put_page(page, 0); 69 69 } else { 70 - f2fs_add_link(&dent, inode); 70 + err = f2fs_add_link(&dent, inode); 71 71 } 72 72 iput(dir); 73 73 out: ··· 151 151 goto out; 152 152 } 153 153 154 - INIT_LIST_HEAD(&entry->list); 155 154 list_add_tail(&entry->list, head); 156 155 entry->blkaddr = blkaddr; 157 156 } ··· 173 174 static void destroy_fsync_dnodes(struct f2fs_sb_info *sbi, 174 175 struct list_head *head) 175 176 { 176 - struct list_head *this; 177 - struct fsync_inode_entry *entry; 178 - list_for_each(this, head) { 179 - entry = list_entry(this, struct fsync_inode_entry, list); 177 + struct fsync_inode_entry *entry, *tmp; 178 + 179 + list_for_each_entry_safe(entry, tmp, head, list) { 180 180 iput(entry->inode); 181 181 list_del(&entry->list); 182 182 kmem_cache_free(fsync_entry_slab, entry);
+1 -1
fs/f2fs/segment.c
··· 31 31 */ 32 32 if (has_not_enough_free_secs(sbi)) { 33 33 mutex_lock(&sbi->gc_mutex); 34 - f2fs_gc(sbi, 1); 34 + f2fs_gc(sbi); 35 35 } 36 36 } 37 37
+72 -25
fs/f2fs/super.c
··· 53 53 {Opt_err, NULL}, 54 54 }; 55 55 56 + void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...) 57 + { 58 + struct va_format vaf; 59 + va_list args; 60 + 61 + va_start(args, fmt); 62 + vaf.fmt = fmt; 63 + vaf.va = &args; 64 + printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf); 65 + va_end(args); 66 + } 67 + 56 68 static void init_once(void *foo) 57 69 { 58 70 struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo; ··· 137 125 138 126 if (sync) 139 127 write_checkpoint(sbi, false, false); 128 + else 129 + f2fs_balance_fs(sbi); 140 130 141 131 return 0; 142 132 } ··· 261 247 .get_parent = f2fs_get_parent, 262 248 }; 263 249 264 - static int parse_options(struct f2fs_sb_info *sbi, char *options) 250 + static int parse_options(struct super_block *sb, struct f2fs_sb_info *sbi, 251 + char *options) 265 252 { 266 253 substring_t args[MAX_OPT_ARGS]; 267 254 char *p; ··· 301 286 break; 302 287 #else 303 288 case Opt_nouser_xattr: 304 - pr_info("nouser_xattr options not supported\n"); 289 + f2fs_msg(sb, KERN_INFO, 290 + "nouser_xattr options not supported"); 305 291 break; 306 292 #endif 307 293 #ifdef CONFIG_F2FS_FS_POSIX_ACL ··· 311 295 break; 312 296 #else 313 297 case Opt_noacl: 314 - pr_info("noacl options not supported\n"); 298 + f2fs_msg(sb, KERN_INFO, "noacl options not supported"); 315 299 break; 316 300 #endif 317 301 case Opt_active_logs: ··· 325 309 set_opt(sbi, DISABLE_EXT_IDENTIFY); 326 310 break; 327 311 default: 328 - pr_err("Unrecognized mount option \"%s\" or missing value\n", 329 - p); 312 + f2fs_msg(sb, KERN_ERR, 313 + "Unrecognized mount option \"%s\" or missing value", 314 + p); 330 315 return -EINVAL; 331 316 } 332 317 } ··· 354 337 return result; 355 338 } 356 339 357 - static int sanity_check_raw_super(struct f2fs_super_block *raw_super) 340 + static int sanity_check_raw_super(struct super_block *sb, 341 + struct f2fs_super_block *raw_super) 358 342 { 359 343 unsigned int blocksize; 360 344 361 - if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) 345 + if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) { 346 + f2fs_msg(sb, KERN_INFO, 347 + "Magic Mismatch, valid(0x%x) - read(0x%x)", 348 + F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic)); 362 349 return 1; 350 + } 363 351 364 352 /* Currently, support only 4KB block size */ 365 353 blocksize = 1 << le32_to_cpu(raw_super->log_blocksize); 366 - if (blocksize != PAGE_CACHE_SIZE) 354 + if (blocksize != PAGE_CACHE_SIZE) { 355 + f2fs_msg(sb, KERN_INFO, 356 + "Invalid blocksize (%u), supports only 4KB\n", 357 + blocksize); 367 358 return 1; 359 + } 368 360 if (le32_to_cpu(raw_super->log_sectorsize) != 369 - F2FS_LOG_SECTOR_SIZE) 361 + F2FS_LOG_SECTOR_SIZE) { 362 + f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize"); 370 363 return 1; 364 + } 371 365 if (le32_to_cpu(raw_super->log_sectors_per_block) != 372 - F2FS_LOG_SECTORS_PER_BLOCK) 366 + F2FS_LOG_SECTORS_PER_BLOCK) { 367 + f2fs_msg(sb, KERN_INFO, "Invalid log sectors per block"); 373 368 return 1; 369 + } 374 370 return 0; 375 371 } 376 372 ··· 443 413 if (!sbi) 444 414 return -ENOMEM; 445 415 446 - /* set a temporary block size */ 447 - if (!sb_set_blocksize(sb, F2FS_BLKSIZE)) 416 + /* set a block size */ 417 + if (!sb_set_blocksize(sb, F2FS_BLKSIZE)) { 418 + f2fs_msg(sb, KERN_ERR, "unable to set blocksize"); 448 419 goto free_sbi; 420 + } 449 421 450 422 /* read f2fs raw super block */ 451 423 raw_super_buf = sb_bread(sb, 0); 452 424 if (!raw_super_buf) { 453 425 err = -EIO; 426 + f2fs_msg(sb, KERN_ERR, "unable to read superblock"); 454 427 goto free_sbi; 455 428 } 456 429 raw_super = (struct f2fs_super_block *) ··· 471 438 set_opt(sbi, POSIX_ACL); 472 439 #endif 473 440 /* parse mount options */ 474 - if (parse_options(sbi, (char *)data)) 441 + if (parse_options(sb, sbi, (char *)data)) 475 442 goto free_sb_buf; 476 443 477 444 /* sanity checking of raw super */ 478 - if (sanity_check_raw_super(raw_super)) 445 + if (sanity_check_raw_super(sb, raw_super)) { 446 + f2fs_msg(sb, KERN_ERR, "Can't find a valid F2FS filesystem"); 479 447 goto free_sb_buf; 448 + } 480 449 481 450 sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize)); 482 451 sb->s_max_links = F2FS_LINK_MAX; ··· 512 477 /* get an inode for meta space */ 513 478 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); 514 479 if (IS_ERR(sbi->meta_inode)) { 480 + f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode"); 515 481 err = PTR_ERR(sbi->meta_inode); 516 482 goto free_sb_buf; 517 483 } 518 484 519 485 err = get_valid_checkpoint(sbi); 520 - if (err) 486 + if (err) { 487 + f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint"); 521 488 goto free_meta_inode; 489 + } 522 490 523 491 /* sanity checking of checkpoint */ 524 492 err = -EINVAL; 525 - if (sanity_check_ckpt(raw_super, sbi->ckpt)) 493 + if (sanity_check_ckpt(raw_super, sbi->ckpt)) { 494 + f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint"); 526 495 goto free_cp; 496 + } 527 497 528 498 sbi->total_valid_node_count = 529 499 le32_to_cpu(sbi->ckpt->valid_node_count); ··· 542 502 INIT_LIST_HEAD(&sbi->dir_inode_list); 543 503 spin_lock_init(&sbi->dir_inode_lock); 544 504 545 - /* init super block */ 546 - if (!sb_set_blocksize(sb, sbi->blocksize)) 547 - goto free_cp; 548 - 549 505 init_orphan_info(sbi); 550 506 551 507 /* setup f2fs internal modules */ 552 508 err = build_segment_manager(sbi); 553 - if (err) 509 + if (err) { 510 + f2fs_msg(sb, KERN_ERR, 511 + "Failed to initialize F2FS segment manager"); 554 512 goto free_sm; 513 + } 555 514 err = build_node_manager(sbi); 556 - if (err) 515 + if (err) { 516 + f2fs_msg(sb, KERN_ERR, 517 + "Failed to initialize F2FS node manager"); 557 518 goto free_nm; 519 + } 558 520 559 521 build_gc_manager(sbi); 560 522 561 523 /* get an inode for node space */ 562 524 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi)); 563 525 if (IS_ERR(sbi->node_inode)) { 526 + f2fs_msg(sb, KERN_ERR, "Failed to read node inode"); 564 527 err = PTR_ERR(sbi->node_inode); 565 528 goto free_nm; 566 529 } ··· 576 533 /* read root inode and dentry */ 577 534 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi)); 578 535 if (IS_ERR(root)) { 536 + f2fs_msg(sb, KERN_ERR, "Failed to read root inode"); 579 537 err = PTR_ERR(root); 580 538 goto free_node_inode; 581 539 } ··· 640 596 .fs_flags = FS_REQUIRES_DEV, 641 597 }; 642 598 643 - static int init_inodecache(void) 599 + static int __init init_inodecache(void) 644 600 { 645 601 f2fs_inode_cachep = f2fs_kmem_cache_create("f2fs_inode_cache", 646 602 sizeof(struct f2fs_inode_info), NULL); ··· 675 631 err = create_checkpoint_caches(); 676 632 if (err) 677 633 goto fail; 678 - return register_filesystem(&f2fs_fs_type); 634 + err = register_filesystem(&f2fs_fs_type); 635 + if (err) 636 + goto fail; 637 + f2fs_create_root_stats(); 679 638 fail: 680 639 return err; 681 640 } 682 641 683 642 static void __exit exit_f2fs_fs(void) 684 643 { 685 - destroy_root_stats(); 644 + f2fs_destroy_root_stats(); 686 645 unregister_filesystem(&f2fs_fs_type); 687 646 destroy_checkpoint_caches(); 688 647 destroy_gc_caches();
+2
fs/f2fs/xattr.c
··· 318 318 if (name_len > 255 || value_len > MAX_VALUE_LEN) 319 319 return -ERANGE; 320 320 321 + f2fs_balance_fs(sbi); 322 + 321 323 mutex_lock_op(sbi, NODE_NEW); 322 324 if (!fi->i_xattr_nid) { 323 325 /* Allocate new attribute block */
+14 -2
fs/fuse/Kconfig
··· 4 4 With FUSE it is possible to implement a fully functional filesystem 5 5 in a userspace program. 6 6 7 - There's also companion library: libfuse. This library along with 8 - utilities is available from the FUSE homepage: 7 + There's also a companion library: libfuse2. This library is available 8 + from the FUSE homepage: 9 9 <http://fuse.sourceforge.net/> 10 + although chances are your distribution already has that library 11 + installed if you've installed the "fuse" package itself. 10 12 11 13 See <file:Documentation/filesystems/fuse.txt> for more information. 12 14 See <file:Documentation/Changes> for needed library/utility version. 13 15 14 16 If you want to develop a userspace FS, or if you want to use 15 17 a filesystem based on FUSE, answer Y or M. 18 + 19 + config CUSE 20 + tristate "Character device in Userspace support" 21 + depends on FUSE_FS 22 + help 23 + This FUSE extension allows character devices to be 24 + implemented in userspace. 25 + 26 + If you want to develop or use a userspace character device 27 + based on CUSE, answer Y or M.
+22 -14
fs/fuse/cuse.c
··· 45 45 #include <linux/miscdevice.h> 46 46 #include <linux/mutex.h> 47 47 #include <linux/slab.h> 48 - #include <linux/spinlock.h> 49 48 #include <linux/stat.h> 50 49 #include <linux/module.h> 51 50 ··· 62 63 bool unrestricted_ioctl; 63 64 }; 64 65 65 - static DEFINE_SPINLOCK(cuse_lock); /* protects cuse_conntbl */ 66 + static DEFINE_MUTEX(cuse_lock); /* protects registration */ 66 67 static struct list_head cuse_conntbl[CUSE_CONNTBL_LEN]; 67 68 static struct class *cuse_class; 68 69 ··· 113 114 int rc; 114 115 115 116 /* look up and get the connection */ 116 - spin_lock(&cuse_lock); 117 + mutex_lock(&cuse_lock); 117 118 list_for_each_entry(pos, cuse_conntbl_head(devt), list) 118 119 if (pos->dev->devt == devt) { 119 120 fuse_conn_get(&pos->fc); 120 121 cc = pos; 121 122 break; 122 123 } 123 - spin_unlock(&cuse_lock); 124 + mutex_unlock(&cuse_lock); 124 125 125 126 /* dead? */ 126 127 if (!cc) ··· 266 267 static int cuse_parse_devinfo(char *p, size_t len, struct cuse_devinfo *devinfo) 267 268 { 268 269 char *end = p + len; 269 - char *key, *val; 270 + char *uninitialized_var(key), *uninitialized_var(val); 270 271 int rc; 271 272 272 273 while (true) { ··· 304 305 */ 305 306 static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req) 306 307 { 307 - struct cuse_conn *cc = fc_to_cc(fc); 308 + struct cuse_conn *cc = fc_to_cc(fc), *pos; 308 309 struct cuse_init_out *arg = req->out.args[0].value; 309 310 struct page *page = req->pages[0]; 310 311 struct cuse_devinfo devinfo = { }; 311 312 struct device *dev; 312 313 struct cdev *cdev; 313 314 dev_t devt; 314 - int rc; 315 + int rc, i; 315 316 316 317 if (req->out.h.error || 317 318 arg->major != FUSE_KERNEL_VERSION || arg->minor < 11) { ··· 355 356 dev_set_drvdata(dev, cc); 356 357 dev_set_name(dev, "%s", devinfo.name); 357 358 359 + mutex_lock(&cuse_lock); 360 + 361 + /* make sure the device-name is unique */ 362 + for (i = 0; i < CUSE_CONNTBL_LEN; ++i) { 363 + list_for_each_entry(pos, &cuse_conntbl[i], list) 364 + if (!strcmp(dev_name(pos->dev), dev_name(dev))) 365 + goto err_unlock; 366 + } 367 + 358 368 rc = device_add(dev); 359 369 if (rc) 360 - goto err_device; 370 + goto err_unlock; 361 371 362 372 /* register cdev */ 363 373 rc = -ENOMEM; 364 374 cdev = cdev_alloc(); 365 375 if (!cdev) 366 - goto err_device; 376 + goto err_unlock; 367 377 368 378 cdev->owner = THIS_MODULE; 369 379 cdev->ops = &cuse_frontend_fops; ··· 385 377 cc->cdev = cdev; 386 378 387 379 /* make the device available */ 388 - spin_lock(&cuse_lock); 389 380 list_add(&cc->list, cuse_conntbl_head(devt)); 390 - spin_unlock(&cuse_lock); 381 + mutex_unlock(&cuse_lock); 391 382 392 383 /* announce device availability */ 393 384 dev_set_uevent_suppress(dev, 0); ··· 398 391 399 392 err_cdev: 400 393 cdev_del(cdev); 401 - err_device: 394 + err_unlock: 395 + mutex_unlock(&cuse_lock); 402 396 put_device(dev); 403 397 err_region: 404 398 unregister_chrdev_region(devt, 1); ··· 528 520 int rc; 529 521 530 522 /* remove from the conntbl, no more access from this point on */ 531 - spin_lock(&cuse_lock); 523 + mutex_lock(&cuse_lock); 532 524 list_del_init(&cc->list); 533 - spin_unlock(&cuse_lock); 525 + mutex_unlock(&cuse_lock); 534 526 535 527 /* remove device */ 536 528 if (cc->dev)
-5
fs/fuse/dev.c
··· 692 692 struct page *oldpage = *pagep; 693 693 struct page *newpage; 694 694 struct pipe_buffer *buf = cs->pipebufs; 695 - struct address_space *mapping; 696 - pgoff_t index; 697 695 698 696 unlock_request(cs->fc, cs->req); 699 697 fuse_copy_finish(cs); ··· 721 723 722 724 if (fuse_check_page(newpage) != 0) 723 725 goto out_fallback_unlock; 724 - 725 - mapping = oldpage->mapping; 726 - index = oldpage->index; 727 726 728 727 /* 729 728 * This is a new and locked page, it shouldn't be mapped or
+2 -3
fs/fuse/file.c
··· 2177 2177 return ret; 2178 2178 } 2179 2179 2180 - long fuse_file_fallocate(struct file *file, int mode, loff_t offset, 2181 - loff_t length) 2180 + static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, 2181 + loff_t length) 2182 2182 { 2183 2183 struct fuse_file *ff = file->private_data; 2184 2184 struct fuse_conn *fc = ff->fc; ··· 2213 2213 2214 2214 return err; 2215 2215 } 2216 - EXPORT_SYMBOL_GPL(fuse_file_fallocate); 2217 2216 2218 2217 static const struct file_operations fuse_file_operations = { 2219 2218 .llseek = fuse_file_llseek,
+16
include/asm-generic/dma-mapping-broken.h
··· 16 16 dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 17 17 dma_addr_t dma_handle); 18 18 19 + static inline void *dma_alloc_attrs(struct device *dev, size_t size, 20 + dma_addr_t *dma_handle, gfp_t flag, 21 + struct dma_attrs *attrs) 22 + { 23 + /* attrs is not supported and ignored */ 24 + return dma_alloc_coherent(dev, size, dma_handle, flag); 25 + } 26 + 27 + static inline void dma_free_attrs(struct device *dev, size_t size, 28 + void *cpu_addr, dma_addr_t dma_handle, 29 + struct dma_attrs *attrs) 30 + { 31 + /* attrs is not supported and ignored */ 32 + dma_free_coherent(dev, size, cpu_addr, dma_handle); 33 + } 34 + 19 35 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 20 36 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 21 37
+2 -4
include/asm-generic/pgtable.h
··· 461 461 return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT); 462 462 } 463 463 464 - static inline unsigned long my_zero_pfn(unsigned long addr) 465 - { 466 - return page_to_pfn(ZERO_PAGE(addr)); 467 - } 464 + #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) 465 + 468 466 #else 469 467 static inline int is_zero_pfn(unsigned long pfn) 470 468 {
+2
include/asm-generic/syscalls.h
··· 21 21 unsigned long fd, off_t pgoff); 22 22 #endif 23 23 24 + #ifndef CONFIG_GENERIC_SIGALTSTACK 24 25 #ifndef sys_sigaltstack 25 26 asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *, 26 27 struct pt_regs *); 28 + #endif 27 29 #endif 28 30 29 31 #ifndef sys_rt_sigreturn
+5 -3
include/linux/ata.h
··· 297 297 ATA_LOG_SATA_NCQ = 0x10, 298 298 ATA_LOG_SATA_ID_DEV_DATA = 0x30, 299 299 ATA_LOG_SATA_SETTINGS = 0x08, 300 - ATA_LOG_DEVSLP_MDAT = 0x30, 300 + ATA_LOG_DEVSLP_OFFSET = 0x30, 301 + ATA_LOG_DEVSLP_SIZE = 0x08, 302 + ATA_LOG_DEVSLP_MDAT = 0x00, 301 303 ATA_LOG_DEVSLP_MDAT_MASK = 0x1F, 302 - ATA_LOG_DEVSLP_DETO = 0x31, 303 - ATA_LOG_DEVSLP_VALID = 0x37, 304 + ATA_LOG_DEVSLP_DETO = 0x01, 305 + ATA_LOG_DEVSLP_VALID = 0x07, 304 306 ATA_LOG_DEVSLP_VALID_MASK = 0x80, 305 307 306 308 /* READ/WRITE LONG (obsolete) */
+2 -2
include/linux/libata.h
··· 652 652 u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */ 653 653 }; 654 654 655 - /* Identify Device Data Log (30h), SATA Settings (page 08h) */ 656 - u8 sata_settings[ATA_SECT_SIZE]; 655 + /* DEVSLP Timing Variables from Identify Device Data Log */ 656 + u8 devslp_timing[ATA_LOG_DEVSLP_SIZE]; 657 657 658 658 /* error history */ 659 659 int spdn_cnt;
+5 -5
include/linux/module.h
··· 199 199 struct module *source, *target; 200 200 }; 201 201 202 - enum module_state 203 - { 204 - MODULE_STATE_LIVE, 205 - MODULE_STATE_COMING, 206 - MODULE_STATE_GOING, 202 + enum module_state { 203 + MODULE_STATE_LIVE, /* Normal state. */ 204 + MODULE_STATE_COMING, /* Full formed, running module_init. */ 205 + MODULE_STATE_GOING, /* Going away. */ 206 + MODULE_STATE_UNFORMED, /* Still setting it up. */ 207 207 }; 208 208 209 209 /**
-1
include/linux/ptrace.h
··· 45 45 extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len); 46 46 extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); 47 47 extern void ptrace_disable(struct task_struct *); 48 - extern int ptrace_check_attach(struct task_struct *task, bool ignore_state); 49 48 extern int ptrace_request(struct task_struct *child, long request, 50 49 unsigned long addr, unsigned long data); 51 50 extern void ptrace_notify(int exit_code);
+10 -1
include/linux/sched.h
··· 2714 2714 extern void recalc_sigpending_and_wake(struct task_struct *t); 2715 2715 extern void recalc_sigpending(void); 2716 2716 2717 - extern void signal_wake_up(struct task_struct *t, int resume_stopped); 2717 + extern void signal_wake_up_state(struct task_struct *t, unsigned int state); 2718 + 2719 + static inline void signal_wake_up(struct task_struct *t, bool resume) 2720 + { 2721 + signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0); 2722 + } 2723 + static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) 2724 + { 2725 + signal_wake_up_state(t, resume ? __TASK_TRACED : 0); 2726 + } 2718 2727 2719 2728 /* 2720 2729 * Wrappers for p->thread_info->cpu access. No-op on UP.
+2 -1
include/uapi/linux/serial_core.h
··· 50 50 #define PORT_LPC3220 22 /* NXP LPC32xx SoC "Standard" UART */ 51 51 #define PORT_8250_CIR 23 /* CIR infrared port, has its own driver */ 52 52 #define PORT_XR17V35X 24 /* Exar XR17V35x UARTs */ 53 - #define PORT_MAX_8250 24 /* max port ID */ 53 + #define PORT_BRCM_TRUMANAGE 24 54 + #define PORT_MAX_8250 25 /* max port ID */ 54 55 55 56 /* 56 57 * ARM specific type numbers. These are not currently guaranteed
+4
init/do_mounts_initrd.c
··· 36 36 static int init_linuxrc(struct subprocess_info *info, struct cred *new) 37 37 { 38 38 sys_unshare(CLONE_FS | CLONE_FILES); 39 + /* stdin/stdout/stderr for /linuxrc */ 40 + sys_open("/dev/console", O_RDWR, 0); 41 + sys_dup(0); 42 + sys_dup(0); 39 43 /* move initrd over / and chdir/chroot in initrd root */ 40 44 sys_chdir("/root"); 41 45 sys_mount(".", "/", NULL, MS_MOVE, NULL);
+2 -2
init/main.c
··· 802 802 (const char __user *const __user *)envp_init); 803 803 } 804 804 805 - static void __init kernel_init_freeable(void); 805 + static noinline void __init kernel_init_freeable(void); 806 806 807 807 static int __ref kernel_init(void *unused) 808 808 { ··· 845 845 "See Linux Documentation/init.txt for guidance."); 846 846 } 847 847 848 - static void __init kernel_init_freeable(void) 848 + static noinline void __init kernel_init_freeable(void) 849 849 { 850 850 /* 851 851 * Wait until kthreadd is all set-up.
+20 -7
kernel/async.c
··· 86 86 */ 87 87 static async_cookie_t __lowest_in_progress(struct async_domain *running) 88 88 { 89 + async_cookie_t first_running = next_cookie; /* infinity value */ 90 + async_cookie_t first_pending = next_cookie; /* ditto */ 89 91 struct async_entry *entry; 90 92 93 + /* 94 + * Both running and pending lists are sorted but not disjoint. 95 + * Take the first cookies from both and return the min. 96 + */ 91 97 if (!list_empty(&running->domain)) { 92 98 entry = list_first_entry(&running->domain, typeof(*entry), list); 93 - return entry->cookie; 99 + first_running = entry->cookie; 94 100 } 95 101 96 - list_for_each_entry(entry, &async_pending, list) 97 - if (entry->running == running) 98 - return entry->cookie; 102 + list_for_each_entry(entry, &async_pending, list) { 103 + if (entry->running == running) { 104 + first_pending = entry->cookie; 105 + break; 106 + } 107 + } 99 108 100 - return next_cookie; /* "infinity" value */ 109 + return min(first_running, first_pending); 101 110 } 102 111 103 112 static async_cookie_t lowest_in_progress(struct async_domain *running) ··· 127 118 { 128 119 struct async_entry *entry = 129 120 container_of(work, struct async_entry, work); 121 + struct async_entry *pos; 130 122 unsigned long flags; 131 123 ktime_t uninitialized_var(calltime), delta, rettime; 132 124 struct async_domain *running = entry->running; 133 125 134 - /* 1) move self to the running queue */ 126 + /* 1) move self to the running queue, make sure it stays sorted */ 135 127 spin_lock_irqsave(&async_lock, flags); 136 - list_move_tail(&entry->list, &running->domain); 128 + list_for_each_entry_reverse(pos, &running->domain, list) 129 + if (entry->cookie < pos->cookie) 130 + break; 131 + list_move_tail(&entry->list, &pos->list); 137 132 spin_unlock_irqrestore(&async_lock, flags); 138 133 139 134 /* 2) run (and print duration) */
+15 -8
kernel/compat.c
··· 535 535 return 0; 536 536 } 537 537 538 - asmlinkage long 539 - compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options, 540 - struct compat_rusage __user *ru) 538 + COMPAT_SYSCALL_DEFINE4(wait4, 539 + compat_pid_t, pid, 540 + compat_uint_t __user *, stat_addr, 541 + int, options, 542 + struct compat_rusage __user *, ru) 541 543 { 542 544 if (!ru) { 543 545 return sys_wait4(pid, stat_addr, options, NULL); ··· 566 564 } 567 565 } 568 566 569 - asmlinkage long compat_sys_waitid(int which, compat_pid_t pid, 570 - struct compat_siginfo __user *uinfo, int options, 571 - struct compat_rusage __user *uru) 567 + COMPAT_SYSCALL_DEFINE5(waitid, 568 + int, which, compat_pid_t, pid, 569 + struct compat_siginfo __user *, uinfo, int, options, 570 + struct compat_rusage __user *, uru) 572 571 { 573 572 siginfo_t info; 574 573 struct rusage ru; ··· 587 584 return ret; 588 585 589 586 if (uru) { 590 - ret = put_compat_rusage(&ru, uru); 587 + /* sys_waitid() overwrites everything in ru */ 588 + if (COMPAT_USE_64BIT_TIME) 589 + ret = copy_to_user(uru, &ru, sizeof(ru)); 590 + else 591 + ret = put_compat_rusage(&ru, uru); 591 592 if (ret) 592 593 return ret; 593 594 } ··· 1001 994 sigset_from_compat(&s, &s32); 1002 995 1003 996 if (uts) { 1004 - if (get_compat_timespec(&t, uts)) 997 + if (compat_get_timespec(&t, uts)) 1005 998 return -EFAULT; 1006 999 } 1007 1000
+2
kernel/debug/kdb/kdb_main.c
··· 1970 1970 1971 1971 kdb_printf("Module Size modstruct Used by\n"); 1972 1972 list_for_each_entry(mod, kdb_modules, list) { 1973 + if (mod->state == MODULE_STATE_UNFORMED) 1974 + continue; 1973 1975 1974 1976 kdb_printf("%-20s%8u 0x%p ", mod->name, 1975 1977 mod->core_size, (void *)mod);
+4 -2
kernel/fork.c
··· 1668 1668 int, tls_val) 1669 1669 #endif 1670 1670 { 1671 - return do_fork(clone_flags, newsp, 0, 1672 - parent_tidptr, child_tidptr); 1671 + long ret = do_fork(clone_flags, newsp, 0, parent_tidptr, child_tidptr); 1672 + asmlinkage_protect(5, ret, clone_flags, newsp, 1673 + parent_tidptr, child_tidptr, tls_val); 1674 + return ret; 1673 1675 } 1674 1676 #endif 1675 1677
+108 -46
kernel/module.c
··· 188 188 ongoing or failed initialization etc. */ 189 189 static inline int strong_try_module_get(struct module *mod) 190 190 { 191 + BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED); 191 192 if (mod && mod->state == MODULE_STATE_COMING) 192 193 return -EBUSY; 193 194 if (try_module_get(mod)) ··· 344 343 #endif 345 344 }; 346 345 346 + if (mod->state == MODULE_STATE_UNFORMED) 347 + continue; 348 + 347 349 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data)) 348 350 return true; 349 351 } ··· 454 450 EXPORT_SYMBOL_GPL(find_symbol); 455 451 456 452 /* Search for module by name: must hold module_mutex. */ 457 - struct module *find_module(const char *name) 453 + static struct module *find_module_all(const char *name, 454 + bool even_unformed) 458 455 { 459 456 struct module *mod; 460 457 461 458 list_for_each_entry(mod, &modules, list) { 459 + if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) 460 + continue; 462 461 if (strcmp(mod->name, name) == 0) 463 462 return mod; 464 463 } 465 464 return NULL; 465 + } 466 + 467 + struct module *find_module(const char *name) 468 + { 469 + return find_module_all(name, false); 466 470 } 467 471 EXPORT_SYMBOL_GPL(find_module); 468 472 ··· 537 525 preempt_disable(); 538 526 539 527 list_for_each_entry_rcu(mod, &modules, list) { 528 + if (mod->state == MODULE_STATE_UNFORMED) 529 + continue; 540 530 if (!mod->percpu_size) 541 531 continue; 542 532 for_each_possible_cpu(cpu) { ··· 1062 1048 case MODULE_STATE_GOING: 1063 1049 state = "going"; 1064 1050 break; 1051 + default: 1052 + BUG(); 1065 1053 } 1066 1054 return sprintf(buffer, "%s\n", state); 1067 1055 } ··· 1802 1786 1803 1787 mutex_lock(&module_mutex); 1804 1788 list_for_each_entry_rcu(mod, &modules, list) { 1789 + if (mod->state == MODULE_STATE_UNFORMED) 1790 + continue; 1805 1791 if ((mod->module_core) && (mod->core_text_size)) { 1806 1792 set_page_attributes(mod->module_core, 1807 1793 mod->module_core + mod->core_text_size, ··· 1825 1807 1826 1808 mutex_lock(&module_mutex); 1827 1809 list_for_each_entry_rcu(mod, &modules, list) { 1810 + if (mod->state == MODULE_STATE_UNFORMED) 1811 + continue; 1828 1812 if ((mod->module_core) && (mod->core_text_size)) { 1829 1813 set_page_attributes(mod->module_core, 1830 1814 mod->module_core + mod->core_text_size, ··· 2547 2527 err = -EFBIG; 2548 2528 goto out; 2549 2529 } 2530 + 2531 + /* Don't hand 0 to vmalloc, it whines. */ 2532 + if (stat.size == 0) { 2533 + err = -EINVAL; 2534 + goto out; 2535 + } 2536 + 2550 2537 info->hdr = vmalloc(stat.size); 2551 2538 if (!info->hdr) { 2552 2539 err = -ENOMEM; ··· 3017 2990 bool ret; 3018 2991 3019 2992 mutex_lock(&module_mutex); 3020 - mod = find_module(name); 3021 - ret = !mod || mod->state != MODULE_STATE_COMING; 2993 + mod = find_module_all(name, true); 2994 + ret = !mod || mod->state == MODULE_STATE_LIVE 2995 + || mod->state == MODULE_STATE_GOING; 3022 2996 mutex_unlock(&module_mutex); 3023 2997 3024 2998 return ret; ··· 3164 3136 goto free_copy; 3165 3137 } 3166 3138 3139 + /* 3140 + * We try to place it in the list now to make sure it's unique 3141 + * before we dedicate too many resources. In particular, 3142 + * temporary percpu memory exhaustion. 3143 + */ 3144 + mod->state = MODULE_STATE_UNFORMED; 3145 + again: 3146 + mutex_lock(&module_mutex); 3147 + if ((old = find_module_all(mod->name, true)) != NULL) { 3148 + if (old->state == MODULE_STATE_COMING 3149 + || old->state == MODULE_STATE_UNFORMED) { 3150 + /* Wait in case it fails to load. */ 3151 + mutex_unlock(&module_mutex); 3152 + err = wait_event_interruptible(module_wq, 3153 + finished_loading(mod->name)); 3154 + if (err) 3155 + goto free_module; 3156 + goto again; 3157 + } 3158 + err = -EEXIST; 3159 + mutex_unlock(&module_mutex); 3160 + goto free_module; 3161 + } 3162 + list_add_rcu(&mod->list, &modules); 3163 + mutex_unlock(&module_mutex); 3164 + 3167 3165 #ifdef CONFIG_MODULE_SIG 3168 3166 mod->sig_ok = info->sig_ok; 3169 3167 if (!mod->sig_ok) ··· 3199 3145 /* Now module is in final location, initialize linked lists, etc. */ 3200 3146 err = module_unload_init(mod); 3201 3147 if (err) 3202 - goto free_module; 3148 + goto unlink_mod; 3203 3149 3204 3150 /* Now we've got everything in the final locations, we can 3205 3151 * find optional sections. */ ··· 3234 3180 goto free_arch_cleanup; 3235 3181 } 3236 3182 3237 - /* Mark state as coming so strong_try_module_get() ignores us. */ 3238 - mod->state = MODULE_STATE_COMING; 3239 - 3240 - /* Now sew it into the lists so we can get lockdep and oops 3241 - * info during argument parsing. No one should access us, since 3242 - * strong_try_module_get() will fail. 3243 - * lockdep/oops can run asynchronous, so use the RCU list insertion 3244 - * function to insert in a way safe to concurrent readers. 3245 - * The mutex protects against concurrent writers. 3246 - */ 3247 - again: 3248 - mutex_lock(&module_mutex); 3249 - if ((old = find_module(mod->name)) != NULL) { 3250 - if (old->state == MODULE_STATE_COMING) { 3251 - /* Wait in case it fails to load. */ 3252 - mutex_unlock(&module_mutex); 3253 - err = wait_event_interruptible(module_wq, 3254 - finished_loading(mod->name)); 3255 - if (err) 3256 - goto free_arch_cleanup; 3257 - goto again; 3258 - } 3259 - err = -EEXIST; 3260 - goto unlock; 3261 - } 3262 - 3263 - /* This has to be done once we're sure module name is unique. */ 3264 3183 dynamic_debug_setup(info->debug, info->num_debug); 3265 3184 3266 - /* Find duplicate symbols */ 3185 + mutex_lock(&module_mutex); 3186 + /* Find duplicate symbols (must be called under lock). */ 3267 3187 err = verify_export_symbols(mod); 3268 3188 if (err < 0) 3269 - goto ddebug; 3189 + goto ddebug_cleanup; 3270 3190 3191 + /* This relies on module_mutex for list integrity. */ 3271 3192 module_bug_finalize(info->hdr, info->sechdrs, mod); 3272 - list_add_rcu(&mod->list, &modules); 3193 + 3194 + /* Mark state as coming so strong_try_module_get() ignores us, 3195 + * but kallsyms etc. can see us. */ 3196 + mod->state = MODULE_STATE_COMING; 3197 + 3273 3198 mutex_unlock(&module_mutex); 3274 3199 3275 3200 /* Module is ready to execute: parsing args may do that. */ 3276 3201 err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, 3277 3202 -32768, 32767, &ddebug_dyndbg_module_param_cb); 3278 3203 if (err < 0) 3279 - goto unlink; 3204 + goto bug_cleanup; 3280 3205 3281 3206 /* Link in to syfs. */ 3282 3207 err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp); 3283 3208 if (err < 0) 3284 - goto unlink; 3209 + goto bug_cleanup; 3285 3210 3286 3211 /* Get rid of temporary copy. */ 3287 3212 free_copy(info); ··· 3270 3237 3271 3238 return do_init_module(mod); 3272 3239 3273 - unlink: 3240 + bug_cleanup: 3241 + /* module_bug_cleanup needs module_mutex protection */ 3274 3242 mutex_lock(&module_mutex); 3275 - /* Unlink carefully: kallsyms could be walking list. */ 3276 - list_del_rcu(&mod->list); 3277 3243 module_bug_cleanup(mod); 3278 - wake_up_all(&module_wq); 3279 - ddebug: 3280 - dynamic_debug_remove(info->debug); 3281 - unlock: 3244 + ddebug_cleanup: 3282 3245 mutex_unlock(&module_mutex); 3246 + dynamic_debug_remove(info->debug); 3283 3247 synchronize_sched(); 3284 3248 kfree(mod->args); 3285 3249 free_arch_cleanup: ··· 3285 3255 free_modinfo(mod); 3286 3256 free_unload: 3287 3257 module_unload_free(mod); 3258 + unlink_mod: 3259 + mutex_lock(&module_mutex); 3260 + /* Unlink carefully: kallsyms could be walking list. */ 3261 + list_del_rcu(&mod->list); 3262 + wake_up_all(&module_wq); 3263 + mutex_unlock(&module_mutex); 3288 3264 free_module: 3289 3265 module_deallocate(mod, info); 3290 3266 free_copy: ··· 3413 3377 3414 3378 preempt_disable(); 3415 3379 list_for_each_entry_rcu(mod, &modules, list) { 3380 + if (mod->state == MODULE_STATE_UNFORMED) 3381 + continue; 3416 3382 if (within_module_init(addr, mod) || 3417 3383 within_module_core(addr, mod)) { 3418 3384 if (modname) ··· 3438 3400 3439 3401 preempt_disable(); 3440 3402 list_for_each_entry_rcu(mod, &modules, list) { 3403 + if (mod->state == MODULE_STATE_UNFORMED) 3404 + continue; 3441 3405 if (within_module_init(addr, mod) || 3442 3406 within_module_core(addr, mod)) { 3443 3407 const char *sym; ··· 3464 3424 3465 3425 preempt_disable(); 3466 3426 list_for_each_entry_rcu(mod, &modules, list) { 3427 + if (mod->state == MODULE_STATE_UNFORMED) 3428 + continue; 3467 3429 if (within_module_init(addr, mod) || 3468 3430 within_module_core(addr, mod)) { 3469 3431 const char *sym; ··· 3493 3451 3494 3452 preempt_disable(); 3495 3453 list_for_each_entry_rcu(mod, &modules, list) { 3454 + if (mod->state == MODULE_STATE_UNFORMED) 3455 + continue; 3496 3456 if (symnum < mod->num_symtab) { 3497 3457 *value = mod->symtab[symnum].st_value; 3498 3458 *type = mod->symtab[symnum].st_info; ··· 3537 3493 ret = mod_find_symname(mod, colon+1); 3538 3494 *colon = ':'; 3539 3495 } else { 3540 - list_for_each_entry_rcu(mod, &modules, list) 3496 + list_for_each_entry_rcu(mod, &modules, list) { 3497 + if (mod->state == MODULE_STATE_UNFORMED) 3498 + continue; 3541 3499 if ((ret = mod_find_symname(mod, name)) != 0) 3542 3500 break; 3501 + } 3543 3502 } 3544 3503 preempt_enable(); 3545 3504 return ret; ··· 3557 3510 int ret; 3558 3511 3559 3512 list_for_each_entry(mod, &modules, list) { 3513 + if (mod->state == MODULE_STATE_UNFORMED) 3514 + continue; 3560 3515 for (i = 0; i < mod->num_symtab; i++) { 3561 3516 ret = fn(data, mod->strtab + mod->symtab[i].st_name, 3562 3517 mod, mod->symtab[i].st_value); ··· 3574 3525 { 3575 3526 int bx = 0; 3576 3527 3528 + BUG_ON(mod->state == MODULE_STATE_UNFORMED); 3577 3529 if (mod->taints || 3578 3530 mod->state == MODULE_STATE_GOING || 3579 3531 mod->state == MODULE_STATE_COMING) { ··· 3615 3565 { 3616 3566 struct module *mod = list_entry(p, struct module, list); 3617 3567 char buf[8]; 3568 + 3569 + /* We always ignore unformed modules. */ 3570 + if (mod->state == MODULE_STATE_UNFORMED) 3571 + return 0; 3618 3572 3619 3573 seq_printf(m, "%s %u", 3620 3574 mod->name, mod->init_size + mod->core_size); ··· 3680 3626 3681 3627 preempt_disable(); 3682 3628 list_for_each_entry_rcu(mod, &modules, list) { 3629 + if (mod->state == MODULE_STATE_UNFORMED) 3630 + continue; 3683 3631 if (mod->num_exentries == 0) 3684 3632 continue; 3685 3633 ··· 3730 3674 if (addr < module_addr_min || addr > module_addr_max) 3731 3675 return NULL; 3732 3676 3733 - list_for_each_entry_rcu(mod, &modules, list) 3677 + list_for_each_entry_rcu(mod, &modules, list) { 3678 + if (mod->state == MODULE_STATE_UNFORMED) 3679 + continue; 3734 3680 if (within_module_core(addr, mod) 3735 3681 || within_module_init(addr, mod)) 3736 3682 return mod; 3683 + } 3737 3684 return NULL; 3738 3685 } 3739 3686 EXPORT_SYMBOL_GPL(__module_address); ··· 3789 3730 printk(KERN_DEFAULT "Modules linked in:"); 3790 3731 /* Most callers should already have preempt disabled, but make sure */ 3791 3732 preempt_disable(); 3792 - list_for_each_entry_rcu(mod, &modules, list) 3733 + list_for_each_entry_rcu(mod, &modules, list) { 3734 + if (mod->state == MODULE_STATE_UNFORMED) 3735 + continue; 3793 3736 printk(" %s%s", mod->name, module_flags(mod, buf)); 3737 + } 3794 3738 preempt_enable(); 3795 3739 if (last_unloaded_module[0]) 3796 3740 printk(" [last unloaded: %s]", last_unloaded_module);
+59 -15
kernel/ptrace.c
··· 117 117 * TASK_KILLABLE sleeps. 118 118 */ 119 119 if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child)) 120 - signal_wake_up(child, task_is_traced(child)); 120 + ptrace_signal_wake_up(child, true); 121 121 122 122 spin_unlock(&child->sighand->siglock); 123 + } 124 + 125 + /* Ensure that nothing can wake it up, even SIGKILL */ 126 + static bool ptrace_freeze_traced(struct task_struct *task) 127 + { 128 + bool ret = false; 129 + 130 + /* Lockless, nobody but us can set this flag */ 131 + if (task->jobctl & JOBCTL_LISTENING) 132 + return ret; 133 + 134 + spin_lock_irq(&task->sighand->siglock); 135 + if (task_is_traced(task) && !__fatal_signal_pending(task)) { 136 + task->state = __TASK_TRACED; 137 + ret = true; 138 + } 139 + spin_unlock_irq(&task->sighand->siglock); 140 + 141 + return ret; 142 + } 143 + 144 + static void ptrace_unfreeze_traced(struct task_struct *task) 145 + { 146 + if (task->state != __TASK_TRACED) 147 + return; 148 + 149 + WARN_ON(!task->ptrace || task->parent != current); 150 + 151 + spin_lock_irq(&task->sighand->siglock); 152 + if (__fatal_signal_pending(task)) 153 + wake_up_state(task, __TASK_TRACED); 154 + else 155 + task->state = TASK_TRACED; 156 + spin_unlock_irq(&task->sighand->siglock); 123 157 } 124 158 125 159 /** ··· 173 139 * RETURNS: 174 140 * 0 on success, -ESRCH if %child is not ready. 175 141 */ 176 - int ptrace_check_attach(struct task_struct *child, bool ignore_state) 142 + static int ptrace_check_attach(struct task_struct *child, bool ignore_state) 177 143 { 178 144 int ret = -ESRCH; 179 145 ··· 185 151 * be changed by us so it's not changing right after this. 186 152 */ 187 153 read_lock(&tasklist_lock); 188 - if ((child->ptrace & PT_PTRACED) && child->parent == current) { 154 + if (child->ptrace && child->parent == current) { 155 + WARN_ON(child->state == __TASK_TRACED); 189 156 /* 190 157 * child->sighand can't be NULL, release_task() 191 158 * does ptrace_unlink() before __exit_signal(). 192 159 */ 193 - spin_lock_irq(&child->sighand->siglock); 194 - WARN_ON_ONCE(task_is_stopped(child)); 195 - if (ignore_state || (task_is_traced(child) && 196 - !(child->jobctl & JOBCTL_LISTENING))) 160 + if (ignore_state || ptrace_freeze_traced(child)) 197 161 ret = 0; 198 - spin_unlock_irq(&child->sighand->siglock); 199 162 } 200 163 read_unlock(&tasklist_lock); 201 164 202 - if (!ret && !ignore_state) 203 - ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH; 165 + if (!ret && !ignore_state) { 166 + if (!wait_task_inactive(child, __TASK_TRACED)) { 167 + /* 168 + * This can only happen if may_ptrace_stop() fails and 169 + * ptrace_stop() changes ->state back to TASK_RUNNING, 170 + * so we should not worry about leaking __TASK_TRACED. 171 + */ 172 + WARN_ON(child->state == __TASK_TRACED); 173 + ret = -ESRCH; 174 + } 175 + } 204 176 205 - /* All systems go.. */ 206 177 return ret; 207 178 } 208 179 ··· 356 317 */ 357 318 if (task_is_stopped(task) && 358 319 task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) 359 - signal_wake_up(task, 1); 320 + signal_wake_up_state(task, __TASK_STOPPED); 360 321 361 322 spin_unlock(&task->sighand->siglock); 362 323 ··· 776 737 * tracee into STOP. 777 738 */ 778 739 if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP))) 779 - signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); 740 + ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); 780 741 781 742 unlock_task_sighand(child, &flags); 782 743 ret = 0; ··· 802 763 * start of this trap and now. Trigger re-trap. 803 764 */ 804 765 if (child->jobctl & JOBCTL_TRAP_NOTIFY) 805 - signal_wake_up(child, true); 766 + ptrace_signal_wake_up(child, true); 806 767 ret = 0; 807 768 } 808 769 unlock_task_sighand(child, &flags); ··· 939 900 goto out_put_task_struct; 940 901 941 902 ret = arch_ptrace(child, request, addr, data); 903 + if (ret || request != PTRACE_DETACH) 904 + ptrace_unfreeze_traced(child); 942 905 943 906 out_put_task_struct: 944 907 put_task_struct(child); ··· 1080 1039 1081 1040 ret = ptrace_check_attach(child, request == PTRACE_KILL || 1082 1041 request == PTRACE_INTERRUPT); 1083 - if (!ret) 1042 + if (!ret) { 1084 1043 ret = compat_arch_ptrace(child, request, addr, data); 1044 + if (ret || request != PTRACE_DETACH) 1045 + ptrace_unfreeze_traced(child); 1046 + } 1085 1047 1086 1048 out_put_task_struct: 1087 1049 put_task_struct(child);
+2 -1
kernel/sched/core.c
··· 1523 1523 */ 1524 1524 int wake_up_process(struct task_struct *p) 1525 1525 { 1526 - return try_to_wake_up(p, TASK_ALL, 0); 1526 + WARN_ON(task_is_stopped_or_traced(p)); 1527 + return try_to_wake_up(p, TASK_NORMAL, 0); 1527 1528 } 1528 1529 EXPORT_SYMBOL(wake_up_process); 1529 1530
+12 -12
kernel/signal.c
··· 680 680 * No need to set need_resched since signal event passing 681 681 * goes through ->blocked 682 682 */ 683 - void signal_wake_up(struct task_struct *t, int resume) 683 + void signal_wake_up_state(struct task_struct *t, unsigned int state) 684 684 { 685 - unsigned int mask; 686 - 687 685 set_tsk_thread_flag(t, TIF_SIGPENDING); 688 - 689 686 /* 690 - * For SIGKILL, we want to wake it up in the stopped/traced/killable 687 + * TASK_WAKEKILL also means wake it up in the stopped/traced/killable 691 688 * case. We don't check t->state here because there is a race with it 692 689 * executing another processor and just now entering stopped state. 693 690 * By using wake_up_state, we ensure the process will wake up and 694 691 * handle its death signal. 695 692 */ 696 - mask = TASK_INTERRUPTIBLE; 697 - if (resume) 698 - mask |= TASK_WAKEKILL; 699 - if (!wake_up_state(t, mask)) 693 + if (!wake_up_state(t, state | TASK_INTERRUPTIBLE)) 700 694 kick_process(t); 701 695 } 702 696 ··· 838 844 assert_spin_locked(&t->sighand->siglock); 839 845 840 846 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); 841 - signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); 847 + ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); 842 848 } 843 849 844 850 /* ··· 1794 1800 * If SIGKILL was already sent before the caller unlocked 1795 1801 * ->siglock we must see ->core_state != NULL. Otherwise it 1796 1802 * is safe to enter schedule(). 1803 + * 1804 + * This is almost outdated, a task with the pending SIGKILL can't 1805 + * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported 1806 + * after SIGKILL was already dequeued. 1797 1807 */ 1798 1808 if (unlikely(current->mm->core_state) && 1799 1809 unlikely(current->mm == current->parent->mm)) ··· 1923 1925 if (gstop_done) 1924 1926 do_notify_parent_cldstop(current, false, why); 1925 1927 1928 + /* tasklist protects us from ptrace_freeze_traced() */ 1926 1929 __set_current_state(TASK_RUNNING); 1927 1930 if (clear_code) 1928 1931 current->exit_code = 0; ··· 3115 3116 3116 3117 #ifdef CONFIG_COMPAT 3117 3118 #ifdef CONFIG_GENERIC_SIGALTSTACK 3118 - asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr, 3119 - compat_stack_t __user *uoss_ptr) 3119 + COMPAT_SYSCALL_DEFINE2(sigaltstack, 3120 + const compat_stack_t __user *, uss_ptr, 3121 + compat_stack_t __user *, uoss_ptr) 3120 3122 { 3121 3123 stack_t uss, uoss; 3122 3124 int ret;
+1 -1
kernel/trace/ftrace.c
··· 3998 3998 3999 3999 struct notifier_block ftrace_module_nb = { 4000 4000 .notifier_call = ftrace_module_notify, 4001 - .priority = 0, 4001 + .priority = INT_MAX, /* Run before anything that can use kprobes */ 4002 4002 }; 4003 4003 4004 4004 extern unsigned long __start_mcount_loc[];
+1
lib/bug.c
··· 55 55 } 56 56 57 57 #ifdef CONFIG_MODULES 58 + /* Updates are protected by module mutex */ 58 59 static LIST_HEAD(module_bug_list); 59 60 60 61 static const struct bug_entry *module_find_bug(unsigned long bugaddr)
+2
security/device_cgroup.c
··· 215 215 struct dev_cgroup *dev_cgroup; 216 216 217 217 dev_cgroup = cgroup_to_devcgroup(cgroup); 218 + mutex_lock(&devcgroup_mutex); 218 219 dev_exception_clean(dev_cgroup); 220 + mutex_unlock(&devcgroup_mutex); 219 221 kfree(dev_cgroup); 220 222 } 221 223
+2 -2
security/integrity/evm/evm_crypto.c
··· 205 205 rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_EVM, 206 206 &xattr_data, 207 207 sizeof(xattr_data), 0); 208 - } 209 - else if (rc == -ENODATA) 208 + } else if (rc == -ENODATA && inode->i_op->removexattr) { 210 209 rc = inode->i_op->removexattr(dentry, XATTR_NAME_EVM); 210 + } 211 211 return rc; 212 212 } 213 213
+2 -3
sound/pci/hda/hda_codec.c
··· 3654 3654 hda_set_power_state(codec, AC_PWRST_D0); 3655 3655 restore_shutup_pins(codec); 3656 3656 hda_exec_init_verbs(codec); 3657 + snd_hda_jack_set_dirty_all(codec); 3657 3658 if (codec->patch_ops.resume) 3658 3659 codec->patch_ops.resume(codec); 3659 3660 else { ··· 3666 3665 3667 3666 if (codec->jackpoll_interval) 3668 3667 hda_jackpoll_work(&codec->jackpoll_work.work); 3669 - else { 3670 - snd_hda_jack_set_dirty_all(codec); 3668 + else 3671 3669 snd_hda_jack_report_sync(codec); 3672 - } 3673 3670 3674 3671 codec->in_pm = 0; 3675 3672 snd_hda_power_down(codec); /* flag down before returning */
+9
sound/pci/hda/patch_conexant.c
··· 4636 4636 .patch = patch_conexant_auto }, 4637 4637 { .id = 0x14f15111, .name = "CX20753/4", 4638 4638 .patch = patch_conexant_auto }, 4639 + { .id = 0x14f15113, .name = "CX20755", 4640 + .patch = patch_conexant_auto }, 4641 + { .id = 0x14f15114, .name = "CX20756", 4642 + .patch = patch_conexant_auto }, 4643 + { .id = 0x14f15115, .name = "CX20757", 4644 + .patch = patch_conexant_auto }, 4639 4645 {} /* terminator */ 4640 4646 }; 4641 4647 ··· 4665 4659 MODULE_ALIAS("snd-hda-codec-id:14f1510f"); 4666 4660 MODULE_ALIAS("snd-hda-codec-id:14f15110"); 4667 4661 MODULE_ALIAS("snd-hda-codec-id:14f15111"); 4662 + MODULE_ALIAS("snd-hda-codec-id:14f15113"); 4663 + MODULE_ALIAS("snd-hda-codec-id:14f15114"); 4664 + MODULE_ALIAS("snd-hda-codec-id:14f15115"); 4668 4665 4669 4666 MODULE_LICENSE("GPL"); 4670 4667 MODULE_DESCRIPTION("Conexant HD-audio codec");
+2
sound/pci/hda/patch_realtek.c
··· 6251 6251 SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC), 6252 6252 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_MIC2_MUTE_LED), 6253 6253 SND_PCI_QUIRK(0x103c, 0x1972, "HP Pavilion 17", ALC269_FIXUP_MIC1_MUTE_LED), 6254 + SND_PCI_QUIRK(0x103c, 0x1977, "HP Pavilion 14", ALC269_FIXUP_MIC1_MUTE_LED), 6254 6255 SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_DMIC), 6255 6256 SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_DMIC), 6256 6257 SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), ··· 6266 6265 SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ), 6267 6266 SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO), 6268 6267 SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), 6268 + SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK), 6269 6269 SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK), 6270 6270 SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC), 6271 6271 SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
+10
tools/perf/MANIFEST
··· 11 11 include/linux/swab.h 12 12 arch/*/include/asm/unistd*.h 13 13 arch/*/include/asm/perf_regs.h 14 + arch/*/include/uapi/asm/unistd*.h 15 + arch/*/include/uapi/asm/perf_regs.h 14 16 arch/*/lib/memcpy*.S 15 17 arch/*/lib/memset*.S 16 18 include/linux/poison.h 17 19 include/linux/magic.h 18 20 include/linux/hw_breakpoint.h 21 + include/linux/rbtree_augmented.h 22 + include/uapi/linux/perf_event.h 23 + include/uapi/linux/const.h 24 + include/uapi/linux/swab.h 25 + include/uapi/linux/hw_breakpoint.h 19 26 arch/x86/include/asm/svm.h 20 27 arch/x86/include/asm/vmx.h 21 28 arch/x86/include/asm/kvm_host.h 29 + arch/x86/include/uapi/asm/svm.h 30 + arch/x86/include/uapi/asm/vmx.h 31 + arch/x86/include/uapi/asm/kvm.h
+1 -1
tools/perf/Makefile
··· 58 58 -e s/arm.*/arm/ -e s/sa110/arm/ \ 59 59 -e s/s390x/s390/ -e s/parisc64/parisc/ \ 60 60 -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ 61 - -e s/sh[234].*/sh/ ) 61 + -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ ) 62 62 NO_PERF_REGS := 1 63 63 64 64 CC = $(CROSS_COMPILE)gcc