Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge 3.14-rc4 into usb-next

We want the USB fixes in here as well.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

+3475 -2190
+1 -2
Documentation/ABI/testing/sysfs-tty
··· 3 3 Contact: Kay Sievers <kay.sievers@vrfy.org> 4 4 Description: 5 5 Shows the list of currently configured 6 - tty devices used for the console, 7 - like 'tty1 ttyS0'. 6 + console devices, like 'tty1 ttyS0'. 8 7 The last entry in the file is the active 9 8 device connected to /dev/console. 10 9 The file supports poll() to detect virtual
+109 -10
Documentation/PCI/MSI-HOWTO.txt
··· 82 82 has to request that the PCI layer set up the MSI capability for this 83 83 device. 84 84 85 - 4.2.1 pci_enable_msi_range 85 + 4.2.1 pci_enable_msi 86 + 87 + int pci_enable_msi(struct pci_dev *dev) 88 + 89 + A successful call allocates ONE interrupt to the device, regardless 90 + of how many MSIs the device supports. The device is switched from 91 + pin-based interrupt mode to MSI mode. The dev->irq number is changed 92 + to a new number which represents the message signaled interrupt; 93 + consequently, this function should be called before the driver calls 94 + request_irq(), because an MSI is delivered via a vector that is 95 + different from the vector of a pin-based interrupt. 96 + 97 + 4.2.2 pci_enable_msi_range 86 98 87 99 int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) 88 100 ··· 159 147 return pci_enable_msi_range(pdev, nvec, nvec); 160 148 } 161 149 150 + Note, unlike pci_enable_msi_exact() function, which could be also used to 151 + enable a particular number of MSI-X interrupts, pci_enable_msi_range() 152 + returns either a negative errno or 'nvec' (not negative errno or 0 - as 153 + pci_enable_msi_exact() does). 154 + 162 155 4.2.1.3 Single MSI mode 163 156 164 157 The most notorious example of the request type described above is ··· 175 158 return pci_enable_msi_range(pdev, 1, 1); 176 159 } 177 160 178 - 4.2.2 pci_disable_msi 161 + Note, unlike pci_enable_msi() function, which could be also used to 162 + enable the single MSI mode, pci_enable_msi_range() returns either a 163 + negative errno or 1 (not negative errno or 0 - as pci_enable_msi() 164 + does). 165 + 166 + 4.2.3 pci_enable_msi_exact 167 + 168 + int pci_enable_msi_exact(struct pci_dev *dev, int nvec) 169 + 170 + This variation on pci_enable_msi_range() call allows a device driver to 171 + request exactly 'nvec' MSIs. 172 + 173 + If this function returns a negative number, it indicates an error and 174 + the driver should not attempt to request any more MSI interrupts for 175 + this device. 176 + 177 + By contrast with pci_enable_msi_range() function, pci_enable_msi_exact() 178 + returns zero in case of success, which indicates MSI interrupts have been 179 + successfully allocated. 180 + 181 + 4.2.4 pci_disable_msi 179 182 180 183 void pci_disable_msi(struct pci_dev *dev) 181 184 ··· 209 172 Failure to do so results in a BUG_ON(), leaving the device with 210 173 MSI enabled and thus leaking its vector. 211 174 212 - 4.2.3 pci_msi_vec_count 175 + 4.2.4 pci_msi_vec_count 213 176 214 177 int pci_msi_vec_count(struct pci_dev *dev) 215 178 ··· 294 257 295 258 static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec) 296 259 { 297 - return pci_enable_msi_range(adapter->pdev, adapter->msix_entries, 298 - 1, nvec); 260 + return pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 261 + 1, nvec); 299 262 } 300 263 301 264 Note the value of 'minvec' parameter is 1. As 'minvec' is inclusive, ··· 306 269 307 270 static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec) 308 271 { 309 - return pci_enable_msi_range(adapter->pdev, adapter->msix_entries, 310 - FOO_DRIVER_MINIMUM_NVEC, nvec); 272 + return pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 273 + FOO_DRIVER_MINIMUM_NVEC, nvec); 311 274 } 312 275 313 276 4.3.1.2 Exact number of MSI-X interrupts ··· 319 282 320 283 static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec) 321 284 { 322 - return pci_enable_msi_range(adapter->pdev, adapter->msix_entries, 323 - nvec, nvec); 285 + return pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 286 + nvec, nvec); 324 287 } 288 + 289 + Note, unlike pci_enable_msix_exact() function, which could be also used to 290 + enable a particular number of MSI-X interrupts, pci_enable_msix_range() 291 + returns either a negative errno or 'nvec' (not negative errno or 0 - as 292 + pci_enable_msix_exact() does). 325 293 326 294 4.3.1.3 Specific requirements to the number of MSI-X interrupts 327 295 ··· 374 332 any error code other than -ENOSPC indicates a fatal error and should not 375 333 be retried. 376 334 377 - 4.3.2 pci_disable_msix 335 + 4.3.2 pci_enable_msix_exact 336 + 337 + int pci_enable_msix_exact(struct pci_dev *dev, 338 + struct msix_entry *entries, int nvec) 339 + 340 + This variation on pci_enable_msix_range() call allows a device driver to 341 + request exactly 'nvec' MSI-Xs. 342 + 343 + If this function returns a negative number, it indicates an error and 344 + the driver should not attempt to allocate any more MSI-X interrupts for 345 + this device. 346 + 347 + By contrast with pci_enable_msix_range() function, pci_enable_msix_exact() 348 + returns zero in case of success, which indicates MSI-X interrupts have been 349 + successfully allocated. 350 + 351 + Another version of a routine that enables MSI-X mode for a device with 352 + specific requirements described in chapter 4.3.1.3 might look like this: 353 + 354 + /* 355 + * Assume 'minvec' and 'maxvec' are non-zero 356 + */ 357 + static int foo_driver_enable_msix(struct foo_adapter *adapter, 358 + int minvec, int maxvec) 359 + { 360 + int rc; 361 + 362 + minvec = roundup_pow_of_two(minvec); 363 + maxvec = rounddown_pow_of_two(maxvec); 364 + 365 + if (minvec > maxvec) 366 + return -ERANGE; 367 + 368 + retry: 369 + rc = pci_enable_msix_exact(adapter->pdev, 370 + adapter->msix_entries, maxvec); 371 + 372 + /* 373 + * -ENOSPC is the only error code allowed to be analyzed 374 + */ 375 + if (rc == -ENOSPC) { 376 + if (maxvec == 1) 377 + return -ENOSPC; 378 + 379 + maxvec /= 2; 380 + 381 + if (minvec > maxvec) 382 + return -ENOSPC; 383 + 384 + goto retry; 385 + } else if (rc < 0) { 386 + return rc; 387 + } 388 + 389 + return maxvec; 390 + } 391 + 392 + 4.3.3 pci_disable_msix 378 393 379 394 void pci_disable_msix(struct pci_dev *dev) 380 395
+1 -1
Documentation/devicetree/bindings/arm/omap/omap.txt
··· 91 91 compatible = "ti,omap3-beagle", "ti,omap3" 92 92 93 93 - OMAP3 Tobi with Overo : Commercial expansion board with daughter board 94 - compatible = "ti,omap3-tobi", "ti,omap3-overo", "ti,omap3" 94 + compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap3" 95 95 96 96 - OMAP4 SDP : Software Development Board 97 97 compatible = "ti,omap4-sdp", "ti,omap4430"
+58
Documentation/devicetree/bindings/net/sti-dwmac.txt
··· 1 + STMicroelectronics SoC DWMAC glue layer controller 2 + 3 + The device node has following properties. 4 + 5 + Required properties: 6 + - compatible : Can be "st,stih415-dwmac", "st,stih416-dwmac" or 7 + "st,stid127-dwmac". 8 + - reg : Offset of the glue configuration register map in system 9 + configuration regmap pointed by st,syscon property and size. 10 + 11 + - reg-names : Should be "sti-ethconf". 12 + 13 + - st,syscon : Should be phandle to system configuration node which 14 + encompases this glue registers. 15 + 16 + - st,tx-retime-src: On STi Parts for Giga bit speeds, 125Mhz clocks can be 17 + wired up in from different sources. One via TXCLK pin and other via CLK_125 18 + pin. This wiring is totally board dependent. However the retiming glue 19 + logic should be configured accordingly. Possible values for this property 20 + 21 + "txclk" - if 125Mhz clock is wired up via txclk line. 22 + "clk_125" - if 125Mhz clock is wired up via clk_125 line. 23 + 24 + This property is only valid for Giga bit setup( GMII, RGMII), and it is 25 + un-used for non-giga bit (MII and RMII) setups. Also note that internal 26 + clockgen can not generate stable 125Mhz clock. 27 + 28 + - st,ext-phyclk: This boolean property indicates who is generating the clock 29 + for tx and rx. This property is only valid for RMII case where the clock can 30 + be generated from the MAC or PHY. 31 + 32 + - clock-names: should be "sti-ethclk". 33 + - clocks: Should point to ethernet clockgen which can generate phyclk. 34 + 35 + 36 + Example: 37 + 38 + ethernet0: dwmac@fe810000 { 39 + device_type = "network"; 40 + compatible = "st,stih416-dwmac", "snps,dwmac", "snps,dwmac-3.710"; 41 + reg = <0xfe810000 0x8000>, <0x8bc 0x4>; 42 + reg-names = "stmmaceth", "sti-ethconf"; 43 + interrupts = <0 133 0>, <0 134 0>, <0 135 0>; 44 + interrupt-names = "macirq", "eth_wake_irq", "eth_lpi"; 45 + phy-mode = "mii"; 46 + 47 + st,syscon = <&syscfg_rear>; 48 + 49 + snps,pbl = <32>; 50 + snps,mixed-burst; 51 + 52 + resets = <&softreset STIH416_ETH0_SOFTRESET>; 53 + reset-names = "stmmaceth"; 54 + pinctrl-0 = <&pinctrl_mii0>; 55 + pinctrl-names = "default"; 56 + clocks = <&CLK_S_GMAC0_PHY>; 57 + clock-names = "stmmaceth"; 58 + };
-45
Documentation/networking/3c505.txt
··· 1 - The 3Com Etherlink Plus (3c505) driver. 2 - 3 - This driver now uses DMA. There is currently no support for PIO operation. 4 - The default DMA channel is 6; this is _not_ autoprobed, so you must 5 - make sure you configure it correctly. If loading the driver as a 6 - module, you can do this with "modprobe 3c505 dma=n". If the driver is 7 - linked statically into the kernel, you must either use an "ether=" 8 - statement on the command line, or change the definition of ELP_DMA in 3c505.h. 9 - 10 - The driver will warn you if it has to fall back on the compiled in 11 - default DMA channel. 12 - 13 - If no base address is given at boot time, the driver will autoprobe 14 - ports 0x300, 0x280 and 0x310 (in that order). If no IRQ is given, the driver 15 - will try to probe for it. 16 - 17 - The driver can be used as a loadable module. 18 - 19 - Theoretically, one instance of the driver can now run multiple cards, 20 - in the standard way (when loading a module, say "modprobe 3c505 21 - io=0x300,0x340 irq=10,11 dma=6,7" or whatever). I have not tested 22 - this, though. 23 - 24 - The driver may now support revision 2 hardware; the dependency on 25 - being able to read the host control register has been removed. This 26 - is also untested, since I don't have a suitable card. 27 - 28 - Known problems: 29 - I still see "DMA upload timed out" messages from time to time. These 30 - seem to be fairly non-fatal though. 31 - The card is old and slow. 32 - 33 - To do: 34 - Improve probe/setup code 35 - Test multicast and promiscuous operation 36 - 37 - Authors: 38 - The driver is mainly written by Craig Southeren, email 39 - <craigs@ineluki.apana.org.au>. 40 - Parts of the driver (adapting the driver to 1.1.4+ kernels, 41 - IRQ/address detection, some changes) and this README by 42 - Juha Laiho <jlaiho@ichaos.nullnet.fi>. 43 - DMA mode, more fixes, etc, by Philip Blundell <pjb27@cam.ac.uk> 44 - Multicard support, Software configurable DMA, etc., by 45 - Christopher Collins <ccollins@pcug.org.au>
+14 -1
MAINTAINERS
··· 1860 1860 1861 1861 BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE 1862 1862 M: Christian Daudt <bcm@fixthebug.org> 1863 + M: Matt Porter <mporter@linaro.org> 1863 1864 L: bcm-kernel-feedback-list@broadcom.com 1864 1865 T: git git://git.github.com/broadcom/bcm11351 1865 1866 S: Maintained ··· 2409 2408 2410 2409 CPUSETS 2411 2410 M: Li Zefan <lizefan@huawei.com> 2411 + L: cgroups@vger.kernel.org 2412 2412 W: http://www.bullopensource.org/cpuset/ 2413 2413 W: http://oss.sgi.com/projects/cpusets/ 2414 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git 2414 2415 S: Maintained 2415 2416 F: Documentation/cgroups/cpusets.txt 2416 2417 F: include/linux/cpuset.h ··· 3326 3323 S: Maintained 3327 3324 F: include/linux/netfilter_bridge/ 3328 3325 F: net/bridge/ 3326 + 3327 + ETHERNET PHY LIBRARY 3328 + M: Florian Fainelli <f.fainelli@gmail.com> 3329 + L: netdev@vger.kernel.org 3330 + S: Maintained 3331 + F: include/linux/phy.h 3332 + F: include/linux/phy_fixed.h 3333 + F: drivers/net/phy/ 3334 + F: Documentation/networking/phy.txt 3335 + F: drivers/of/of_mdio.c 3336 + F: drivers/of/of_net.c 3329 3337 3330 3338 EXT2 FILE SYSTEM 3331 3339 M: Jan Kara <jack@suse.cz> ··· 9729 9715 XFS FILESYSTEM 9730 9716 P: Silicon Graphics Inc 9731 9717 M: Dave Chinner <david@fromorbit.com> 9732 - M: Ben Myers <bpm@sgi.com> 9733 9718 M: xfs@oss.sgi.com 9734 9719 L: xfs@oss.sgi.com 9735 9720 W: http://oss.sgi.com/projects/xfs
+1 -1
Makefile
··· 1 1 VERSION = 3 2 2 PATCHLEVEL = 14 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc3 4 + EXTRAVERSION = -rc4 5 5 NAME = Shuffling Zombie Juror 6 6 7 7 # *DOCUMENTATION*
+2 -1
arch/arm/boot/dts/Makefile
··· 209 209 omap3-n900.dtb \ 210 210 omap3-n9.dtb \ 211 211 omap3-n950.dtb \ 212 - omap3-tobi.dtb \ 212 + omap3-overo-tobi.dtb \ 213 + omap3-overo-storm-tobi.dtb \ 213 214 omap3-gta04.dtb \ 214 215 omap3-igep0020.dtb \ 215 216 omap3-igep0030.dtb \
+10 -1
arch/arm/boot/dts/am335x-evmsk.dts
··· 121 121 ti,model = "AM335x-EVMSK"; 122 122 ti,audio-codec = <&tlv320aic3106>; 123 123 ti,mcasp-controller = <&mcasp1>; 124 - ti,codec-clock-rate = <24576000>; 124 + ti,codec-clock-rate = <24000000>; 125 125 ti,audio-routing = 126 126 "Headphone Jack", "HPLOUT", 127 127 "Headphone Jack", "HPROUT"; ··· 253 253 /* MDIO reset value */ 254 254 0x148 (PIN_INPUT_PULLDOWN | MUX_MODE7) 255 255 0x14c (PIN_INPUT_PULLDOWN | MUX_MODE7) 256 + >; 257 + }; 258 + 259 + mmc1_pins: pinmux_mmc1_pins { 260 + pinctrl-single,pins = < 261 + 0x160 (PIN_INPUT | MUX_MODE7) /* spi0_cs1.gpio0_6 */ 256 262 >; 257 263 }; 258 264 ··· 462 456 status = "okay"; 463 457 vmmc-supply = <&vmmc_reg>; 464 458 bus-width = <4>; 459 + pinctrl-names = "default"; 460 + pinctrl-0 = <&mmc1_pins>; 461 + cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>; 465 462 }; 466 463 467 464 &sham {
+2 -1
arch/arm/boot/dts/armada-xp-mv78260.dtsi
··· 23 23 gpio0 = &gpio0; 24 24 gpio1 = &gpio1; 25 25 gpio2 = &gpio2; 26 + eth3 = &eth3; 26 27 }; 27 28 28 29 cpus { ··· 292 291 interrupts = <91>; 293 292 }; 294 293 295 - ethernet@34000 { 294 + eth3: ethernet@34000 { 296 295 compatible = "marvell,armada-370-neta"; 297 296 reg = <0x34000 0x4000>; 298 297 interrupts = <14>;
-11
arch/arm/boot/dts/dove.dtsi
··· 379 379 #clock-cells = <1>; 380 380 }; 381 381 382 - pmu_intc: pmu-interrupt-ctrl@d0050 { 383 - compatible = "marvell,dove-pmu-intc"; 384 - interrupt-controller; 385 - #interrupt-cells = <1>; 386 - reg = <0xd0050 0x8>; 387 - interrupts = <33>; 388 - marvell,#interrupts = <7>; 389 - }; 390 - 391 382 pinctrl: pin-ctrl@d0200 { 392 383 compatible = "marvell,dove-pinctrl"; 393 384 reg = <0xd0200 0x10>; ··· 601 610 rtc: real-time-clock@d8500 { 602 611 compatible = "marvell,orion-rtc"; 603 612 reg = <0xd8500 0x20>; 604 - interrupt-parent = <&pmu_intc>; 605 - interrupts = <5>; 606 613 }; 607 614 608 615 gpio2: gpio-ctrl@e8400 {
+3 -7
arch/arm/boot/dts/imx6dl-hummingboard.dts
··· 52 52 }; 53 53 }; 54 54 55 - codec: spdif-transmitter { 56 - compatible = "linux,spdif-dit"; 57 - pinctrl-names = "default"; 58 - pinctrl-0 = <&pinctrl_hummingboard_spdif>; 59 - }; 60 - 61 55 sound-spdif { 62 56 compatible = "fsl,imx-audio-spdif"; 63 57 model = "imx-spdif"; ··· 105 111 }; 106 112 107 113 pinctrl_hummingboard_spdif: hummingboard-spdif { 108 - fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x1b0b0>; 114 + fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>; 109 115 }; 110 116 111 117 pinctrl_hummingboard_usbh1_vbus: hummingboard-usbh1-vbus { ··· 136 142 }; 137 143 138 144 &spdif { 145 + pinctrl-names = "default"; 146 + pinctrl-0 = <&pinctrl_hummingboard_spdif>; 139 147 status = "okay"; 140 148 }; 141 149
+3 -7
arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
··· 46 46 }; 47 47 }; 48 48 49 - codec: spdif-transmitter { 50 - compatible = "linux,spdif-dit"; 51 - pinctrl-names = "default"; 52 - pinctrl-0 = <&pinctrl_cubox_i_spdif>; 53 - }; 54 - 55 49 sound-spdif { 56 50 compatible = "fsl,imx-audio-spdif"; 57 51 model = "imx-spdif"; ··· 83 89 }; 84 90 85 91 pinctrl_cubox_i_spdif: cubox-i-spdif { 86 - fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x1b0b0>; 92 + fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>; 87 93 }; 88 94 89 95 pinctrl_cubox_i_usbh1_vbus: cubox-i-usbh1-vbus { ··· 115 121 }; 116 122 117 123 &spdif { 124 + pinctrl-names = "default"; 125 + pinctrl-0 = <&pinctrl_cubox_i_spdif>; 118 126 status = "okay"; 119 127 }; 120 128
+4 -2
arch/arm/boot/dts/omap3-gta04.dts
··· 32 32 aux-button { 33 33 label = "aux"; 34 34 linux,code = <169>; 35 - gpios = <&gpio1 7 GPIO_ACTIVE_LOW>; 35 + gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>; 36 36 gpio-key,wakeup; 37 37 }; 38 38 }; ··· 92 92 bmp085@77 { 93 93 compatible = "bosch,bmp085"; 94 94 reg = <0x77>; 95 + interrupt-parent = <&gpio4>; 96 + interrupts = <17 IRQ_TYPE_EDGE_RISING>; 95 97 }; 96 98 97 99 /* leds */ ··· 143 141 pinctrl-names = "default"; 144 142 pinctrl-0 = <&mmc1_pins>; 145 143 vmmc-supply = <&vmmc1>; 146 - vmmc_aux-supply = <&vsim>; 147 144 bus-width = <4>; 145 + ti,non-removable; 148 146 }; 149 147 150 148 &mmc2 {
+1 -1
arch/arm/boot/dts/omap3-n9.dts
··· 14 14 15 15 / { 16 16 model = "Nokia N9"; 17 - compatible = "nokia,omap3-n9", "ti,omap3"; 17 + compatible = "nokia,omap3-n9", "ti,omap36xx", "ti,omap3"; 18 18 };
+2 -2
arch/arm/boot/dts/omap3-n900.dts
··· 1 1 /* 2 2 * Copyright (C) 2013 Pavel Machek <pavel@ucw.cz> 3 - * Copyright 2013 Aaro Koskinen <aaro.koskinen@iki.fi> 3 + * Copyright (C) 2013-2014 Aaro Koskinen <aaro.koskinen@iki.fi> 4 4 * 5 5 * This program is free software; you can redistribute it and/or modify 6 6 * it under the terms of the GNU General Public License version 2 (or later) as ··· 13 13 14 14 / { 15 15 model = "Nokia N900"; 16 - compatible = "nokia,omap3-n900", "ti,omap3"; 16 + compatible = "nokia,omap3-n900", "ti,omap3430", "ti,omap3"; 17 17 18 18 cpus { 19 19 cpu@0 {
+1 -1
arch/arm/boot/dts/omap3-n950.dts
··· 14 14 15 15 / { 16 16 model = "Nokia N950"; 17 - compatible = "nokia,omap3-n950", "ti,omap3"; 17 + compatible = "nokia,omap3-n950", "ti,omap36xx", "ti,omap3"; 18 18 };
+22
arch/arm/boot/dts/omap3-overo-storm-tobi.dts
··· 1 + /* 2 + * Copyright (C) 2012 Florian Vaussard, EPFL Mobots group 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + */ 8 + 9 + /* 10 + * Tobi expansion board is manufactured by Gumstix Inc. 11 + */ 12 + 13 + /dts-v1/; 14 + 15 + #include "omap36xx.dtsi" 16 + #include "omap3-overo-tobi-common.dtsi" 17 + 18 + / { 19 + model = "OMAP36xx/AM37xx/DM37xx Gumstix Overo on Tobi"; 20 + compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap36xx", "ti,omap3"; 21 + }; 22 +
+22
arch/arm/boot/dts/omap3-overo-tobi.dts
··· 1 + /* 2 + * Copyright (C) 2012 Florian Vaussard, EPFL Mobots group 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + */ 8 + 9 + /* 10 + * Tobi expansion board is manufactured by Gumstix Inc. 11 + */ 12 + 13 + /dts-v1/; 14 + 15 + #include "omap34xx.dtsi" 16 + #include "omap3-overo-tobi-common.dtsi" 17 + 18 + / { 19 + model = "OMAP35xx Gumstix Overo on Tobi"; 20 + compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap3430", "ti,omap3"; 21 + }; 22 +
-3
arch/arm/boot/dts/omap3-overo.dtsi
··· 9 9 /* 10 10 * The Gumstix Overo must be combined with an expansion board. 11 11 */ 12 - /dts-v1/; 13 - 14 - #include "omap34xx.dtsi" 15 12 16 13 / { 17 14 pwmleds {
-3
arch/arm/boot/dts/omap3-tobi.dts arch/arm/boot/dts/omap3-overo-tobi-common.dtsi
··· 13 13 #include "omap3-overo.dtsi" 14 14 15 15 / { 16 - model = "TI OMAP3 Gumstix Overo on Tobi"; 17 - compatible = "ti,omap3-tobi", "ti,omap3-overo", "ti,omap3"; 18 - 19 16 leds { 20 17 compatible = "gpio-leds"; 21 18 heartbeat {
+4
arch/arm/boot/dts/tegra114.dtsi
··· 57 57 resets = <&tegra_car 27>; 58 58 reset-names = "dc"; 59 59 60 + nvidia,head = <0>; 61 + 60 62 rgb { 61 63 status = "disabled"; 62 64 }; ··· 73 71 clock-names = "dc", "parent"; 74 72 resets = <&tegra_car 26>; 75 73 reset-names = "dc"; 74 + 75 + nvidia,head = <1>; 76 76 77 77 rgb { 78 78 status = "disabled";
+4
arch/arm/boot/dts/tegra20.dtsi
··· 94 94 resets = <&tegra_car 27>; 95 95 reset-names = "dc"; 96 96 97 + nvidia,head = <0>; 98 + 97 99 rgb { 98 100 status = "disabled"; 99 101 }; ··· 110 108 clock-names = "dc", "parent"; 111 109 resets = <&tegra_car 26>; 112 110 reset-names = "dc"; 111 + 112 + nvidia,head = <1>; 113 113 114 114 rgb { 115 115 status = "disabled";
+1 -1
arch/arm/boot/dts/tegra30-cardhu.dtsi
··· 28 28 compatible = "nvidia,cardhu", "nvidia,tegra30"; 29 29 30 30 aliases { 31 - rtc0 = "/i2c@7000d000/tps6586x@34"; 31 + rtc0 = "/i2c@7000d000/tps65911@2d"; 32 32 rtc1 = "/rtc@7000e000"; 33 33 }; 34 34
+4
arch/arm/boot/dts/tegra30.dtsi
··· 170 170 resets = <&tegra_car 27>; 171 171 reset-names = "dc"; 172 172 173 + nvidia,head = <0>; 174 + 173 175 rgb { 174 176 status = "disabled"; 175 177 }; ··· 186 184 clock-names = "dc", "parent"; 187 185 resets = <&tegra_car 26>; 188 186 reset-names = "dc"; 187 + 188 + nvidia,head = <1>; 189 189 190 190 rgb { 191 191 status = "disabled";
arch/arm/boot/dts/testcases/tests-interrupts.dtsi drivers/of/testcase-data/tests-interrupts.dtsi
arch/arm/boot/dts/testcases/tests-phandle.dtsi drivers/of/testcase-data/tests-phandle.dtsi
-2
arch/arm/boot/dts/testcases/tests.dtsi
··· 1 - /include/ "tests-phandle.dtsi" 2 - /include/ "tests-interrupts.dtsi"
+2 -2
arch/arm/boot/dts/versatile-pb.dts
··· 1 - /include/ "versatile-ab.dts" 1 + #include <versatile-ab.dts> 2 2 3 3 / { 4 4 model = "ARM Versatile PB"; ··· 47 47 }; 48 48 }; 49 49 50 - /include/ "testcases/tests.dtsi" 50 + #include <testcases.dtsi>
+1
arch/arm/include/asm/cacheflush.h
··· 212 212 static inline void __flush_icache_all(void) 213 213 { 214 214 __flush_icache_preferred(); 215 + dsb(); 215 216 } 216 217 217 218 /*
+9 -6
arch/arm/include/asm/pgtable-3level.h
··· 120 120 /* 121 121 * 2nd stage PTE definitions for LPAE. 122 122 */ 123 - #define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x5) << 2) /* MemAttr[3:0] */ 124 - #define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */ 125 - #define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */ 126 - #define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ 127 - #define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ 123 + #define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x0) << 2) /* strongly ordered */ 124 + #define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* normal inner write-through */ 125 + #define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* normal inner write-back */ 126 + #define L_PTE_S2_MT_DEV_SHARED (_AT(pteval_t, 0x1) << 2) /* device */ 127 + #define L_PTE_S2_MT_MASK (_AT(pteval_t, 0xf) << 2) 128 128 129 - #define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ 129 + #define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ 130 + #define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ 131 + 132 + #define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ 130 133 131 134 /* 132 135 * Hyp-mode PL2 PTE definitions for LPAE.
+3 -12
arch/arm/include/asm/spinlock.h
··· 37 37 38 38 static inline void dsb_sev(void) 39 39 { 40 - #if __LINUX_ARM_ARCH__ >= 7 41 - __asm__ __volatile__ ( 42 - "dsb ishst\n" 43 - SEV 44 - ); 45 - #else 46 - __asm__ __volatile__ ( 47 - "mcr p15, 0, %0, c7, c10, 4\n" 48 - SEV 49 - : : "r" (0) 50 - ); 51 - #endif 40 + 41 + dsb(ishst); 42 + __asm__(SEV); 52 43 } 53 44 54 45 /*
+1 -1
arch/arm/kernel/setup.c
··· 731 731 kernel_data.end = virt_to_phys(_end - 1); 732 732 733 733 for_each_memblock(memory, region) { 734 - res = memblock_virt_alloc_low(sizeof(*res), 0); 734 + res = memblock_virt_alloc(sizeof(*res), 0); 735 735 res->name = "System RAM"; 736 736 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); 737 737 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
-2
arch/arm/mach-imx/Makefile
··· 101 101 obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o mach-imx6q.o 102 102 obj-$(CONFIG_SOC_IMX6SL) += clk-imx6sl.o mach-imx6sl.o 103 103 104 - ifeq ($(CONFIG_PM),y) 105 104 obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o headsmp.o 106 105 # i.MX6SL reuses i.MX6Q code 107 106 obj-$(CONFIG_SOC_IMX6SL) += pm-imx6q.o headsmp.o 108 - endif 109 107 110 108 # i.MX5 based machines 111 109 obj-$(CONFIG_MACH_MX51_BABBAGE) += mach-mx51_babbage.o
+1 -3
arch/arm/mach-imx/common.h
··· 144 144 void imx_cpu_die(unsigned int cpu); 145 145 int imx_cpu_kill(unsigned int cpu); 146 146 147 - #ifdef CONFIG_PM 148 147 void imx6q_pm_init(void); 149 148 void imx6q_pm_set_ccm_base(void __iomem *base); 149 + #ifdef CONFIG_PM 150 150 void imx5_pm_init(void); 151 151 #else 152 - static inline void imx6q_pm_init(void) {} 153 - static inline void imx6q_pm_set_ccm_base(void __iomem *base) {} 154 152 static inline void imx5_pm_init(void) {} 155 153 #endif 156 154
+1
arch/arm/mach-omap1/board-nokia770.c
··· 156 156 .register_dev = 1, 157 157 .hmc_mode = 16, 158 158 .pins[0] = 6, 159 + .extcon = "tahvo-usb", 159 160 }; 160 161 161 162 #if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
+4 -4
arch/arm/mach-omap2/Kconfig
··· 48 48 bool "TI OMAP5" 49 49 depends on ARCH_MULTI_V7 50 50 select ARCH_OMAP2PLUS 51 + select ARCH_HAS_OPP 51 52 select ARM_CPU_SUSPEND if PM 52 53 select ARM_GIC 53 54 select CPU_V7 ··· 62 61 bool "TI AM33XX" 63 62 depends on ARCH_MULTI_V7 64 63 select ARCH_OMAP2PLUS 64 + select ARCH_HAS_OPP 65 65 select ARM_CPU_SUSPEND if PM 66 66 select CPU_V7 67 67 select MULTI_IRQ_HANDLER ··· 72 70 depends on ARCH_MULTI_V7 73 71 select CPU_V7 74 72 select ARCH_OMAP2PLUS 73 + select ARCH_HAS_OPP 75 74 select MULTI_IRQ_HANDLER 76 75 select ARM_GIC 77 76 select MACH_OMAP_GENERIC ··· 81 78 bool "TI DRA7XX" 82 79 depends on ARCH_MULTI_V7 83 80 select ARCH_OMAP2PLUS 81 + select ARCH_HAS_OPP 84 82 select ARM_CPU_SUSPEND if PM 85 83 select ARM_GIC 86 84 select CPU_V7 ··· 270 266 default y 271 267 select OMAP_PACKAGE_CBB 272 268 273 - config MACH_NOKIA_N800 274 - bool 275 - 276 269 config MACH_NOKIA_N810 277 270 bool 278 271 ··· 280 279 bool "Nokia N800/N810" 281 280 depends on SOC_OMAP2420 282 281 default y 283 - select MACH_NOKIA_N800 284 282 select MACH_NOKIA_N810 285 283 select MACH_NOKIA_N810_WIMAX 286 284 select OMAP_PACKAGE_ZAC
+2 -2
arch/arm/mach-omap2/gpmc.c
··· 1339 1339 of_property_read_bool(np, "gpmc,time-para-granularity"); 1340 1340 } 1341 1341 1342 - #ifdef CONFIG_MTD_NAND 1342 + #if IS_ENABLED(CONFIG_MTD_NAND) 1343 1343 1344 1344 static const char * const nand_xfer_types[] = { 1345 1345 [NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled", ··· 1429 1429 } 1430 1430 #endif 1431 1431 1432 - #ifdef CONFIG_MTD_ONENAND 1432 + #if IS_ENABLED(CONFIG_MTD_ONENAND) 1433 1433 static int gpmc_probe_onenand_child(struct platform_device *pdev, 1434 1434 struct device_node *child) 1435 1435 {
-9
arch/arm/mach-omap2/io.c
··· 179 179 .length = L4_EMU_34XX_SIZE, 180 180 .type = MT_DEVICE 181 181 }, 182 - #if defined(CONFIG_DEBUG_LL) && \ 183 - (defined(CONFIG_MACH_OMAP_ZOOM2) || defined(CONFIG_MACH_OMAP_ZOOM3)) 184 - { 185 - .virtual = ZOOM_UART_VIRT, 186 - .pfn = __phys_to_pfn(ZOOM_UART_BASE), 187 - .length = SZ_1M, 188 - .type = MT_DEVICE 189 - }, 190 - #endif 191 182 }; 192 183 #endif 193 184
+9
arch/arm/mach-pxa/mioa701.c
··· 38 38 #include <linux/mtd/physmap.h> 39 39 #include <linux/usb/gpio_vbus.h> 40 40 #include <linux/reboot.h> 41 + #include <linux/regulator/fixed.h> 41 42 #include <linux/regulator/max1586.h> 42 43 #include <linux/slab.h> 43 44 #include <linux/i2c/pxa-i2c.h> ··· 715 714 { GPIO56_MT9M111_nOE, GPIOF_OUT_INIT_LOW, "Camera nOE" }, 716 715 }; 717 716 717 + static struct regulator_consumer_supply fixed_5v0_consumers[] = { 718 + REGULATOR_SUPPLY("power", "pwm-backlight"), 719 + }; 720 + 718 721 static void __init mioa701_machine_init(void) 719 722 { 720 723 int rc; ··· 758 753 pxa_set_i2c_info(&i2c_pdata); 759 754 pxa27x_set_i2c_power_info(NULL); 760 755 pxa_set_camera_info(&mioa701_pxacamera_platform_data); 756 + 757 + regulator_register_always_on(0, "fixed-5.0V", fixed_5v0_consumers, 758 + ARRAY_SIZE(fixed_5v0_consumers), 759 + 5000000); 761 760 } 762 761 763 762 static void mioa701_machine_exit(void)
+1
arch/arm/mach-tegra/pm.c
··· 24 24 #include <linux/cpu_pm.h> 25 25 #include <linux/suspend.h> 26 26 #include <linux/err.h> 27 + #include <linux/slab.h> 27 28 #include <linux/clk/tegra.h> 28 29 29 30 #include <asm/smp_plat.h>
+10
arch/arm/mach-tegra/tegra.c
··· 73 73 static void __init tegra_init_cache(void) 74 74 { 75 75 #ifdef CONFIG_CACHE_L2X0 76 + static const struct of_device_id pl310_ids[] __initconst = { 77 + { .compatible = "arm,pl310-cache", }, 78 + {} 79 + }; 80 + 81 + struct device_node *np; 76 82 int ret; 77 83 void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000; 78 84 u32 aux_ctrl, cache_type; 85 + 86 + np = of_find_matching_node(NULL, pl310_ids); 87 + if (!np) 88 + return; 79 89 80 90 cache_type = readl(p + L2X0_CACHE_TYPE); 81 91 aux_ctrl = (cache_type & 0x700) << (17-8);
+1 -1
arch/arm/mm/dma-mapping.c
··· 1358 1358 *handle = DMA_ERROR_CODE; 1359 1359 size = PAGE_ALIGN(size); 1360 1360 1361 - if (gfp & GFP_ATOMIC) 1361 + if (!(gfp & __GFP_WAIT)) 1362 1362 return __iommu_alloc_atomic(dev, size, handle); 1363 1363 1364 1364 /*
+1
arch/arm/mm/mm.h
··· 38 38 39 39 struct mem_type { 40 40 pteval_t prot_pte; 41 + pteval_t prot_pte_s2; 41 42 pmdval_t prot_l1; 42 43 pmdval_t prot_sect; 43 44 unsigned int domain;
+6 -1
arch/arm/mm/mmu.c
··· 232 232 #endif /* ifdef CONFIG_CPU_CP15 / else */ 233 233 234 234 #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN 235 + #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE 235 236 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE 236 237 237 238 static struct mem_type mem_types[] = { 238 239 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ 239 240 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | 241 + L_PTE_SHARED, 242 + .prot_pte_s2 = s2_policy(PROT_PTE_S2_DEVICE) | 243 + s2_policy(L_PTE_S2_MT_DEV_SHARED) | 240 244 L_PTE_SHARED, 241 245 .prot_l1 = PMD_TYPE_TABLE, 242 246 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S, ··· 512 508 cp = &cache_policies[cachepolicy]; 513 509 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; 514 510 s2_pgprot = cp->pte_s2; 515 - hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte; 511 + hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte; 512 + s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2; 516 513 517 514 /* 518 515 * ARMv6 and above have extended page tables.
+2 -1
arch/arm/mm/proc-v6.S
··· 208 208 mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache 209 209 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 210 210 mcr p15, 0, r0, c7, c15, 0 @ clean+invalidate cache 211 - mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 212 211 #ifdef CONFIG_MMU 213 212 mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs 214 213 mcr p15, 0, r0, c2, c0, 2 @ TTB control register ··· 217 218 ALT_UP(orr r8, r8, #TTB_FLAGS_UP) 218 219 mcr p15, 0, r8, c2, c0, 1 @ load TTB1 219 220 #endif /* CONFIG_MMU */ 221 + mcr p15, 0, r0, c7, c10, 4 @ drain write buffer and 222 + @ complete invalidations 220 223 adr r5, v6_crval 221 224 ldmia r5, {r5, r6} 222 225 ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables
+1 -1
arch/arm/mm/proc-v7.S
··· 351 351 352 352 4: mov r10, #0 353 353 mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate 354 - dsb 355 354 #ifdef CONFIG_MMU 356 355 mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs 357 356 v7_ttb_setup r10, r4, r8, r5 @ TTBCR, TTBRx setup ··· 359 360 mcr p15, 0, r5, c10, c2, 0 @ write PRRR 360 361 mcr p15, 0, r6, c10, c2, 1 @ write NMRR 361 362 #endif 363 + dsb @ Complete invalidations 362 364 #ifndef CONFIG_ARM_THUMBEE 363 365 mrc p15, 0, r0, c0, c1, 0 @ read ID_PFR0 for ThumbEE 364 366 and r0, r0, #(0xf << 12) @ ThumbEE enabled field
+1 -1
arch/avr32/Makefile
··· 11 11 12 12 KBUILD_DEFCONFIG := atstk1002_defconfig 13 13 14 - KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic 14 + KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic -D__linux__ 15 15 KBUILD_AFLAGS += -mrelax -mno-pic 16 16 KBUILD_CFLAGS_MODULE += -mno-relax 17 17 LDFLAGS_vmlinux += --relax
+1
arch/avr32/boards/mimc200/fram.c
··· 11 11 #define FRAM_VERSION "1.0" 12 12 13 13 #include <linux/miscdevice.h> 14 + #include <linux/module.h> 14 15 #include <linux/proc_fs.h> 15 16 #include <linux/mm.h> 16 17 #include <linux/io.h>
+1
arch/avr32/include/asm/Kbuild
··· 17 17 generic-y += sections.h 18 18 generic-y += topology.h 19 19 generic-y += trace_clock.h 20 + generic-y += vga.h 20 21 generic-y += xor.h 21 22 generic-y += hash.h
+2
arch/avr32/include/asm/io.h
··· 295 295 #define iounmap(addr) \ 296 296 __iounmap(addr) 297 297 298 + #define ioremap_wc ioremap_nocache 299 + 298 300 #define cached(addr) P1SEGADDR(addr) 299 301 #define uncached(addr) P2SEGADDR(addr) 300 302
+19 -2
arch/powerpc/include/asm/eeh.h
··· 172 172 }; 173 173 174 174 extern struct eeh_ops *eeh_ops; 175 - extern int eeh_subsystem_enabled; 175 + extern bool eeh_subsystem_enabled; 176 176 extern raw_spinlock_t confirm_error_lock; 177 177 extern int eeh_probe_mode; 178 + 179 + static inline bool eeh_enabled(void) 180 + { 181 + return eeh_subsystem_enabled; 182 + } 183 + 184 + static inline void eeh_set_enable(bool mode) 185 + { 186 + eeh_subsystem_enabled = mode; 187 + } 178 188 179 189 #define EEH_PROBE_MODE_DEV (1<<0) /* From PCI device */ 180 190 #define EEH_PROBE_MODE_DEVTREE (1<<1) /* From device tree */ ··· 256 246 * If this macro yields TRUE, the caller relays to eeh_check_failure() 257 247 * which does further tests out of line. 258 248 */ 259 - #define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_subsystem_enabled) 249 + #define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_enabled()) 260 250 261 251 /* 262 252 * Reads from a device which has been isolated by EEH will return ··· 266 256 #define EEH_IO_ERROR_VALUE(size) (~0U >> ((4 - (size)) * 8)) 267 257 268 258 #else /* !CONFIG_EEH */ 259 + 260 + static inline bool eeh_enabled(void) 261 + { 262 + return false; 263 + } 264 + 265 + static inline void eeh_set_enable(bool mode) { } 269 266 270 267 static inline int eeh_init(void) 271 268 {
+1 -1
arch/powerpc/include/asm/hugetlb.h
··· 127 127 unsigned long addr, pte_t *ptep) 128 128 { 129 129 #ifdef CONFIG_PPC64 130 - return __pte(pte_update(mm, addr, ptep, ~0UL, 1)); 130 + return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1)); 131 131 #else 132 132 return __pte(pte_update(ptep, ~0UL, 0)); 133 133 #endif
+15 -11
arch/powerpc/include/asm/pgtable-ppc64.h
··· 195 195 static inline unsigned long pte_update(struct mm_struct *mm, 196 196 unsigned long addr, 197 197 pte_t *ptep, unsigned long clr, 198 + unsigned long set, 198 199 int huge) 199 200 { 200 201 #ifdef PTE_ATOMIC_UPDATES ··· 206 205 andi. %1,%0,%6\n\ 207 206 bne- 1b \n\ 208 207 andc %1,%0,%4 \n\ 208 + or %1,%1,%7\n\ 209 209 stdcx. %1,0,%3 \n\ 210 210 bne- 1b" 211 211 : "=&r" (old), "=&r" (tmp), "=m" (*ptep) 212 - : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY) 212 + : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY), "r" (set) 213 213 : "cc" ); 214 214 #else 215 215 unsigned long old = pte_val(*ptep); 216 - *ptep = __pte(old & ~clr); 216 + *ptep = __pte((old & ~clr) | set); 217 217 #endif 218 218 /* huge pages use the old page table lock */ 219 219 if (!huge) ··· 233 231 { 234 232 unsigned long old; 235 233 236 - if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) 234 + if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) 237 235 return 0; 238 - old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0); 236 + old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); 239 237 return (old & _PAGE_ACCESSED) != 0; 240 238 } 241 239 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG ··· 254 252 if ((pte_val(*ptep) & _PAGE_RW) == 0) 255 253 return; 256 254 257 - pte_update(mm, addr, ptep, _PAGE_RW, 0); 255 + pte_update(mm, addr, ptep, _PAGE_RW, 0, 0); 258 256 } 259 257 260 258 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, ··· 263 261 if ((pte_val(*ptep) & _PAGE_RW) == 0) 264 262 return; 265 263 266 - pte_update(mm, addr, ptep, _PAGE_RW, 1); 264 + pte_update(mm, addr, ptep, _PAGE_RW, 0, 1); 267 265 } 268 266 269 267 /* ··· 286 284 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 287 285 unsigned long addr, pte_t *ptep) 288 286 { 289 - unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0); 287 + unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0); 290 288 return __pte(old); 291 289 } 292 290 293 291 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, 294 292 pte_t * ptep) 295 293 { 296 - pte_update(mm, addr, ptep, ~0UL, 0); 294 + pte_update(mm, addr, ptep, ~0UL, 0, 0); 297 295 } 298 296 299 297 ··· 508 506 509 507 extern unsigned long pmd_hugepage_update(struct mm_struct *mm, 510 508 unsigned long addr, 511 - pmd_t *pmdp, unsigned long clr); 509 + pmd_t *pmdp, 510 + unsigned long clr, 511 + unsigned long set); 512 512 513 513 static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, 514 514 unsigned long addr, pmd_t *pmdp) ··· 519 515 520 516 if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) 521 517 return 0; 522 - old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED); 518 + old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0); 523 519 return ((old & _PAGE_ACCESSED) != 0); 524 520 } 525 521 ··· 546 542 if ((pmd_val(*pmdp) & _PAGE_RW) == 0) 547 543 return; 548 544 549 - pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW); 545 + pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0); 550 546 } 551 547 552 548 #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
+22
arch/powerpc/include/asm/pgtable.h
··· 75 75 return pte; 76 76 } 77 77 78 + #define ptep_set_numa ptep_set_numa 79 + static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr, 80 + pte_t *ptep) 81 + { 82 + if ((pte_val(*ptep) & _PAGE_PRESENT) == 0) 83 + VM_BUG_ON(1); 84 + 85 + pte_update(mm, addr, ptep, _PAGE_PRESENT, _PAGE_NUMA, 0); 86 + return; 87 + } 88 + 78 89 #define pmd_numa pmd_numa 79 90 static inline int pmd_numa(pmd_t pmd) 80 91 { 81 92 return pte_numa(pmd_pte(pmd)); 93 + } 94 + 95 + #define pmdp_set_numa pmdp_set_numa 96 + static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, 97 + pmd_t *pmdp) 98 + { 99 + if ((pmd_val(*pmdp) & _PAGE_PRESENT) == 0) 100 + VM_BUG_ON(1); 101 + 102 + pmd_hugepage_update(mm, addr, pmdp, _PAGE_PRESENT, _PAGE_NUMA); 103 + return; 82 104 } 83 105 84 106 #define pmd_mknonnuma pmd_mknonnuma
+3 -3
arch/powerpc/include/asm/vdso.h
··· 4 4 #ifdef __KERNEL__ 5 5 6 6 /* Default link addresses for the vDSOs */ 7 - #define VDSO32_LBASE 0x100000 8 - #define VDSO64_LBASE 0x100000 7 + #define VDSO32_LBASE 0x0 8 + #define VDSO64_LBASE 0x0 9 9 10 10 /* Default map addresses for 32bit vDSO */ 11 - #define VDSO32_MBASE VDSO32_LBASE 11 + #define VDSO32_MBASE 0x100000 12 12 13 13 #define VDSO_VERSION_STRING LINUX_2.6.15 14 14
+26 -6
arch/powerpc/kernel/eeh.c
··· 28 28 #include <linux/pci.h> 29 29 #include <linux/proc_fs.h> 30 30 #include <linux/rbtree.h> 31 + #include <linux/reboot.h> 31 32 #include <linux/seq_file.h> 32 33 #include <linux/spinlock.h> 33 34 #include <linux/export.h> ··· 90 89 /* Platform dependent EEH operations */ 91 90 struct eeh_ops *eeh_ops = NULL; 92 91 93 - int eeh_subsystem_enabled; 92 + bool eeh_subsystem_enabled = false; 94 93 EXPORT_SYMBOL(eeh_subsystem_enabled); 95 94 96 95 /* ··· 365 364 366 365 eeh_stats.total_mmio_ffs++; 367 366 368 - if (!eeh_subsystem_enabled) 367 + if (!eeh_enabled()) 369 368 return 0; 370 369 371 370 if (!edev) { ··· 748 747 return -EEXIST; 749 748 } 750 749 750 + static int eeh_reboot_notifier(struct notifier_block *nb, 751 + unsigned long action, void *unused) 752 + { 753 + eeh_set_enable(false); 754 + return NOTIFY_DONE; 755 + } 756 + 757 + static struct notifier_block eeh_reboot_nb = { 758 + .notifier_call = eeh_reboot_notifier, 759 + }; 760 + 751 761 /** 752 762 * eeh_init - EEH initialization 753 763 * ··· 789 777 */ 790 778 if (machine_is(powernv) && cnt++ <= 0) 791 779 return ret; 780 + 781 + /* Register reboot notifier */ 782 + ret = register_reboot_notifier(&eeh_reboot_nb); 783 + if (ret) { 784 + pr_warn("%s: Failed to register notifier (%d)\n", 785 + __func__, ret); 786 + return ret; 787 + } 792 788 793 789 /* call platform initialization function */ 794 790 if (!eeh_ops) { ··· 842 822 return ret; 843 823 } 844 824 845 - if (eeh_subsystem_enabled) 825 + if (eeh_enabled()) 846 826 pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n"); 847 827 else 848 828 pr_warning("EEH: No capable adapters found\n"); ··· 917 897 struct device_node *dn; 918 898 struct eeh_dev *edev; 919 899 920 - if (!dev || !eeh_subsystem_enabled) 900 + if (!dev || !eeh_enabled()) 921 901 return; 922 902 923 903 pr_debug("EEH: Adding device %s\n", pci_name(dev)); ··· 1025 1005 { 1026 1006 struct eeh_dev *edev; 1027 1007 1028 - if (!dev || !eeh_subsystem_enabled) 1008 + if (!dev || !eeh_enabled()) 1029 1009 return; 1030 1010 edev = pci_dev_to_eeh_dev(dev); 1031 1011 ··· 1065 1045 1066 1046 static int proc_eeh_show(struct seq_file *m, void *v) 1067 1047 { 1068 - if (0 == eeh_subsystem_enabled) { 1048 + if (!eeh_enabled()) { 1069 1049 seq_printf(m, "EEH Subsystem is globally disabled\n"); 1070 1050 seq_printf(m, "eeh_total_mmio_ffs=%llu\n", eeh_stats.total_mmio_ffs); 1071 1051 } else {
+4 -1
arch/powerpc/kernel/misc_32.S
··· 57 57 mtlr r0 58 58 blr 59 59 60 + /* 61 + * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp); 62 + */ 60 63 _GLOBAL(call_do_irq) 61 64 mflr r0 62 65 stw r0,4(r1) 63 66 lwz r10,THREAD+KSP_LIMIT(r2) 64 - addi r11,r3,THREAD_INFO_GAP 67 + addi r11,r4,THREAD_INFO_GAP 65 68 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4) 66 69 mr r1,r4 67 70 stw r10,8(r1)
+1 -1
arch/powerpc/kernel/vdso32/vdso32_wrapper.S
··· 6 6 .globl vdso32_start, vdso32_end 7 7 .balign PAGE_SIZE 8 8 vdso32_start: 9 - .incbin "arch/powerpc/kernel/vdso32/vdso32.so" 9 + .incbin "arch/powerpc/kernel/vdso32/vdso32.so.dbg" 10 10 .balign PAGE_SIZE 11 11 vdso32_end: 12 12
+1 -1
arch/powerpc/kernel/vdso64/vdso64_wrapper.S
··· 6 6 .globl vdso64_start, vdso64_end 7 7 .balign PAGE_SIZE 8 8 vdso64_start: 9 - .incbin "arch/powerpc/kernel/vdso64/vdso64.so" 9 + .incbin "arch/powerpc/kernel/vdso64/vdso64.so.dbg" 10 10 .balign PAGE_SIZE 11 11 vdso64_end: 12 12
+7 -5
arch/powerpc/mm/pgtable_64.c
··· 510 510 } 511 511 512 512 unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, 513 - pmd_t *pmdp, unsigned long clr) 513 + pmd_t *pmdp, unsigned long clr, 514 + unsigned long set) 514 515 { 515 516 516 517 unsigned long old, tmp; ··· 527 526 andi. %1,%0,%6\n\ 528 527 bne- 1b \n\ 529 528 andc %1,%0,%4 \n\ 529 + or %1,%1,%7\n\ 530 530 stdcx. %1,0,%3 \n\ 531 531 bne- 1b" 532 532 : "=&r" (old), "=&r" (tmp), "=m" (*pmdp) 533 - : "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY) 533 + : "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY), "r" (set) 534 534 : "cc" ); 535 535 #else 536 536 old = pmd_val(*pmdp); 537 - *pmdp = __pmd(old & ~clr); 537 + *pmdp = __pmd((old & ~clr) | set); 538 538 #endif 539 539 if (old & _PAGE_HASHPTE) 540 540 hpte_do_hugepage_flush(mm, addr, pmdp); ··· 710 708 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 711 709 pmd_t *pmdp) 712 710 { 713 - pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT); 711 + pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0); 714 712 } 715 713 716 714 /* ··· 837 835 unsigned long old; 838 836 pgtable_t *pgtable_slot; 839 837 840 - old = pmd_hugepage_update(mm, addr, pmdp, ~0UL); 838 + old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0); 841 839 old_pmd = __pmd(old); 842 840 /* 843 841 * We have pmd == none and we are holding page_table_lock.
+1 -1
arch/powerpc/mm/subpage-prot.c
··· 78 78 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 79 79 arch_enter_lazy_mmu_mode(); 80 80 for (; npages > 0; --npages) { 81 - pte_update(mm, addr, pte, 0, 0); 81 + pte_update(mm, addr, pte, 0, 0, 0); 82 82 addr += PAGE_SIZE; 83 83 ++pte; 84 84 }
+6 -26
arch/powerpc/platforms/powernv/eeh-ioda.c
··· 44 44 45 45 /* We simply send special EEH event */ 46 46 if ((changed_evts & OPAL_EVENT_PCI_ERROR) && 47 - (events & OPAL_EVENT_PCI_ERROR)) 47 + (events & OPAL_EVENT_PCI_ERROR) && 48 + eeh_enabled()) 48 49 eeh_send_failure_event(NULL); 49 50 50 51 return 0; ··· 490 489 static int ioda_eeh_reset(struct eeh_pe *pe, int option) 491 490 { 492 491 struct pci_controller *hose = pe->phb; 493 - struct eeh_dev *edev; 494 - struct pci_dev *dev; 492 + struct pci_bus *bus; 495 493 int ret; 496 494 497 495 /* ··· 519 519 if (pe->type & EEH_PE_PHB) { 520 520 ret = ioda_eeh_phb_reset(hose, option); 521 521 } else { 522 - if (pe->type & EEH_PE_DEVICE) { 523 - /* 524 - * If it's device PE, we didn't refer to the parent 525 - * PCI bus yet. So we have to figure it out indirectly. 526 - */ 527 - edev = list_first_entry(&pe->edevs, 528 - struct eeh_dev, list); 529 - dev = eeh_dev_to_pci_dev(edev); 530 - dev = dev->bus->self; 531 - } else { 532 - /* 533 - * If it's bus PE, the parent PCI bus is already there 534 - * and just pick it up. 535 - */ 536 - dev = pe->bus->self; 537 - } 538 - 539 - /* 540 - * Do reset based on the fact that the direct upstream bridge 541 - * is root bridge (port) or not. 542 - */ 543 - if (dev->bus->number == 0) 522 + bus = eeh_pe_bus_get(pe); 523 + if (pci_is_root_bus(bus)) 544 524 ret = ioda_eeh_root_reset(hose, option); 545 525 else 546 - ret = ioda_eeh_bridge_reset(hose, dev, option); 526 + ret = ioda_eeh_bridge_reset(hose, bus->self, option); 547 527 } 548 528 549 529 return ret;
+1 -1
arch/powerpc/platforms/powernv/eeh-powernv.c
··· 145 145 * Enable EEH explicitly so that we will do EEH check 146 146 * while accessing I/O stuff 147 147 */ 148 - eeh_subsystem_enabled = 1; 148 + eeh_set_enable(true); 149 149 150 150 /* Save memory bars */ 151 151 eeh_save_bars(edev);
+1 -1
arch/powerpc/platforms/pseries/eeh_pseries.c
··· 265 265 enable = 1; 266 266 267 267 if (enable) { 268 - eeh_subsystem_enabled = 1; 268 + eeh_set_enable(true); 269 269 eeh_add_to_parent_pe(edev); 270 270 271 271 pr_debug("%s: EEH enabled on %s PHB#%d-PE#%x, config addr#%x\n",
+15 -7
arch/powerpc/platforms/pseries/pci.c
··· 113 113 { 114 114 struct device_node *dn, *pdn; 115 115 struct pci_bus *bus; 116 - const __be32 *pcie_link_speed_stats; 116 + u32 pcie_link_speed_stats[2]; 117 + int rc; 117 118 118 119 bus = bridge->bus; 119 120 ··· 123 122 return 0; 124 123 125 124 for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) { 126 - pcie_link_speed_stats = of_get_property(pdn, 127 - "ibm,pcie-link-speed-stats", NULL); 128 - if (pcie_link_speed_stats) 125 + rc = of_property_read_u32_array(pdn, 126 + "ibm,pcie-link-speed-stats", 127 + &pcie_link_speed_stats[0], 2); 128 + if (!rc) 129 129 break; 130 130 } 131 131 132 132 of_node_put(pdn); 133 133 134 - if (!pcie_link_speed_stats) { 134 + if (rc) { 135 135 pr_err("no ibm,pcie-link-speed-stats property\n"); 136 136 return 0; 137 137 } 138 138 139 - switch (be32_to_cpup(pcie_link_speed_stats)) { 139 + switch (pcie_link_speed_stats[0]) { 140 140 case 0x01: 141 141 bus->max_bus_speed = PCIE_SPEED_2_5GT; 142 142 break; 143 143 case 0x02: 144 144 bus->max_bus_speed = PCIE_SPEED_5_0GT; 145 145 break; 146 + case 0x04: 147 + bus->max_bus_speed = PCIE_SPEED_8_0GT; 148 + break; 146 149 default: 147 150 bus->max_bus_speed = PCI_SPEED_UNKNOWN; 148 151 break; 149 152 } 150 153 151 - switch (be32_to_cpup(pcie_link_speed_stats)) { 154 + switch (pcie_link_speed_stats[1]) { 152 155 case 0x01: 153 156 bus->cur_bus_speed = PCIE_SPEED_2_5GT; 154 157 break; 155 158 case 0x02: 156 159 bus->cur_bus_speed = PCIE_SPEED_5_0GT; 160 + break; 161 + case 0x04: 162 + bus->cur_bus_speed = PCIE_SPEED_8_0GT; 157 163 break; 158 164 default: 159 165 bus->cur_bus_speed = PCI_SPEED_UNKNOWN;
+1 -1
arch/sparc/Kconfig
··· 27 27 select RTC_DRV_M48T59 28 28 select HAVE_DMA_ATTRS 29 29 select HAVE_DMA_API_DEBUG 30 - select HAVE_ARCH_JUMP_LABEL 30 + select HAVE_ARCH_JUMP_LABEL if SPARC64 31 31 select GENERIC_IRQ_SHOW 32 32 select ARCH_WANT_IPC_PARSE_VERSION 33 33 select GENERIC_PCI_IOMAP
+2
arch/sparc/mm/srmmu.c
··· 14 14 #include <linux/pagemap.h> 15 15 #include <linux/vmalloc.h> 16 16 #include <linux/kdebug.h> 17 + #include <linux/export.h> 17 18 #include <linux/kernel.h> 18 19 #include <linux/init.h> 19 20 #include <linux/log2.h> ··· 63 62 static pgd_t *srmmu_swapper_pg_dir; 64 63 65 64 const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops; 65 + EXPORT_SYMBOL(sparc32_cachetlb_ops); 66 66 67 67 #ifdef CONFIG_SMP 68 68 const struct sparc32_cachetlb_ops *local_ops;
+1 -1
arch/x86/include/asm/tsc.h
··· 66 66 extern void tsc_restore_sched_clock_state(void); 67 67 68 68 /* MSR based TSC calibration for Intel Atom SoC platforms */ 69 - int try_msr_calibrate_tsc(unsigned long *fast_calibrate); 69 + unsigned long try_msr_calibrate_tsc(void); 70 70 71 71 #endif /* _ASM_X86_TSC_H */
+6 -2
arch/x86/kernel/cpu/perf_event.c
··· 1521 1521 1522 1522 pr_cont("%s PMU driver.\n", x86_pmu.name); 1523 1523 1524 + x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */ 1525 + 1524 1526 for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next) 1525 1527 quirk->func(); 1526 1528 ··· 1536 1534 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, 1537 1535 0, x86_pmu.num_counters, 0, 0); 1538 1536 1539 - x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */ 1540 1537 x86_pmu_format_group.attrs = x86_pmu.format_attrs; 1541 1538 1542 1539 if (x86_pmu.event_attrs) ··· 1821 1820 if (ret) 1822 1821 return ret; 1823 1822 1823 + if (x86_pmu.attr_rdpmc_broken) 1824 + return -ENOTSUPP; 1825 + 1824 1826 if (!!val != !!x86_pmu.attr_rdpmc) { 1825 1827 x86_pmu.attr_rdpmc = !!val; 1826 - smp_call_function(change_rdpmc, (void *)val, 1); 1828 + on_each_cpu(change_rdpmc, (void *)val, 1); 1827 1829 } 1828 1830 1829 1831 return count;
+1
arch/x86/kernel/cpu/perf_event.h
··· 409 409 /* 410 410 * sysfs attrs 411 411 */ 412 + int attr_rdpmc_broken; 412 413 int attr_rdpmc; 413 414 struct attribute **format_attrs; 414 415 struct attribute **event_attrs;
+3 -8
arch/x86/kernel/cpu/perf_event_intel.c
··· 1361 1361 intel_pmu_disable_all(); 1362 1362 handled = intel_pmu_drain_bts_buffer(); 1363 1363 status = intel_pmu_get_status(); 1364 - if (!status) { 1365 - intel_pmu_enable_all(0); 1366 - return handled; 1367 - } 1364 + if (!status) 1365 + goto done; 1368 1366 1369 1367 loops = 0; 1370 1368 again: ··· 2308 2310 if (version > 1) 2309 2311 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3); 2310 2312 2311 - /* 2312 - * v2 and above have a perf capabilities MSR 2313 - */ 2314 - if (version > 1) { 2313 + if (boot_cpu_has(X86_FEATURE_PDCM)) { 2315 2314 u64 capabilities; 2316 2315 2317 2316 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
+11 -3
arch/x86/kernel/cpu/perf_event_intel_uncore.c
··· 501 501 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, 502 502 SNBEP_CBO_PMON_CTL_TID_EN, 0x1), 503 503 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), 504 + SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6), 504 505 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), 506 + SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6), 505 507 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), 508 + SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6), 506 509 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6), 507 510 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8), 508 511 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8), ··· 1181 1178 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, 1182 1179 SNBEP_CBO_PMON_CTL_TID_EN, 0x1), 1183 1180 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2), 1184 - SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), 1185 - SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), 1186 - SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), 1181 + SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), 1187 1182 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc), 1183 + SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc), 1184 + SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), 1185 + SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc), 1186 + SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), 1187 + SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc), 1188 + SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), 1189 + SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc), 1188 1190 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10), 1189 1191 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10), 1190 1192 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
+33 -15
arch/x86/kernel/cpu/perf_event_p6.c
··· 231 231 232 232 }; 233 233 234 + static __init void p6_pmu_rdpmc_quirk(void) 235 + { 236 + if (boot_cpu_data.x86_mask < 9) { 237 + /* 238 + * PPro erratum 26; fixed in stepping 9 and above. 239 + */ 240 + pr_warn("Userspace RDPMC support disabled due to a CPU erratum\n"); 241 + x86_pmu.attr_rdpmc_broken = 1; 242 + x86_pmu.attr_rdpmc = 0; 243 + } 244 + } 245 + 234 246 __init int p6_pmu_init(void) 235 247 { 248 + x86_pmu = p6_pmu; 249 + 236 250 switch (boot_cpu_data.x86_model) { 237 - case 1: 238 - case 3: /* Pentium Pro */ 239 - case 5: 240 - case 6: /* Pentium II */ 241 - case 7: 242 - case 8: 243 - case 11: /* Pentium III */ 244 - case 9: 245 - case 13: 246 - /* Pentium M */ 251 + case 1: /* Pentium Pro */ 252 + x86_add_quirk(p6_pmu_rdpmc_quirk); 247 253 break; 254 + 255 + case 3: /* Pentium II - Klamath */ 256 + case 5: /* Pentium II - Deschutes */ 257 + case 6: /* Pentium II - Mendocino */ 258 + break; 259 + 260 + case 7: /* Pentium III - Katmai */ 261 + case 8: /* Pentium III - Coppermine */ 262 + case 10: /* Pentium III Xeon */ 263 + case 11: /* Pentium III - Tualatin */ 264 + break; 265 + 266 + case 9: /* Pentium M - Banias */ 267 + case 13: /* Pentium M - Dothan */ 268 + break; 269 + 248 270 default: 249 - pr_cont("unsupported p6 CPU model %d ", 250 - boot_cpu_data.x86_model); 271 + pr_cont("unsupported p6 CPU model %d ", boot_cpu_data.x86_model); 251 272 return -ENODEV; 252 273 } 253 274 254 - x86_pmu = p6_pmu; 255 - 256 275 memcpy(hw_cache_event_ids, p6_hw_cache_event_ids, 257 276 sizeof(hw_cache_event_ids)); 258 - 259 277 260 278 return 0; 261 279 }
+3 -1
arch/x86/kernel/pci-dma.c
··· 100 100 flag |= __GFP_ZERO; 101 101 again: 102 102 page = NULL; 103 - if (!(flag & GFP_ATOMIC)) 103 + /* CMA can be used only in the context which permits sleeping */ 104 + if (flag & __GFP_WAIT) 104 105 page = dma_alloc_from_contiguous(dev, count, get_order(size)); 106 + /* fallback */ 105 107 if (!page) 106 108 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size)); 107 109 if (!page)
+2 -5
arch/x86/kernel/tsc.c
··· 653 653 654 654 /* Calibrate TSC using MSR for Intel Atom SoCs */ 655 655 local_irq_save(flags); 656 - i = try_msr_calibrate_tsc(&fast_calibrate); 656 + fast_calibrate = try_msr_calibrate_tsc(); 657 657 local_irq_restore(flags); 658 - if (i >= 0) { 659 - if (i == 0) 660 - pr_warn("Fast TSC calibration using MSR failed\n"); 658 + if (fast_calibrate) 661 659 return fast_calibrate; 662 - } 663 660 664 661 local_irq_save(flags); 665 662 fast_calibrate = quick_pit_calibrate();
+15 -15
arch/x86/kernel/tsc_msr.c
··· 53 53 /* TNG */ 54 54 { 6, 0x4a, 1, { 0, FREQ_100, FREQ_133, 0, 0, 0, 0, 0 } }, 55 55 /* VLV2 */ 56 - { 6, 0x37, 1, { 0, FREQ_100, FREQ_133, FREQ_166, 0, 0, 0, 0 } }, 56 + { 6, 0x37, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_166, 0, 0, 0, 0 } }, 57 57 /* ANN */ 58 58 { 6, 0x5a, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_100, 0, 0, 0, 0 } }, 59 59 }; ··· 77 77 78 78 /* 79 79 * Do MSR calibration only for known/supported CPUs. 80 - * Return values: 81 - * -1: CPU is unknown/unsupported for MSR based calibration 82 - * 0: CPU is known/supported, but calibration failed 83 - * 1: CPU is known/supported, and calibration succeeded 80 + * 81 + * Returns the calibration value or 0 if MSR calibration failed. 84 82 */ 85 - int try_msr_calibrate_tsc(unsigned long *fast_calibrate) 83 + unsigned long try_msr_calibrate_tsc(void) 86 84 { 87 - int cpu_index; 88 85 u32 lo, hi, ratio, freq_id, freq; 86 + unsigned long res; 87 + int cpu_index; 89 88 90 89 cpu_index = match_cpu(boot_cpu_data.x86, boot_cpu_data.x86_model); 91 90 if (cpu_index < 0) 92 - return -1; 93 - 94 - *fast_calibrate = 0; 91 + return 0; 95 92 96 93 if (freq_desc_tables[cpu_index].msr_plat) { 97 94 rdmsr(MSR_PLATFORM_INFO, lo, hi); ··· 100 103 pr_info("Maximum core-clock to bus-clock ratio: 0x%x\n", ratio); 101 104 102 105 if (!ratio) 103 - return 0; 106 + goto fail; 104 107 105 108 /* Get FSB FREQ ID */ 106 109 rdmsr(MSR_FSB_FREQ, lo, hi); ··· 109 112 pr_info("Resolved frequency ID: %u, frequency: %u KHz\n", 110 113 freq_id, freq); 111 114 if (!freq) 112 - return 0; 115 + goto fail; 113 116 114 117 /* TSC frequency = maximum resolved freq * maximum resolved bus ratio */ 115 - *fast_calibrate = freq * ratio; 116 - pr_info("TSC runs at %lu KHz\n", *fast_calibrate); 118 + res = freq * ratio; 119 + pr_info("TSC runs at %lu KHz\n", res); 117 120 118 121 #ifdef CONFIG_X86_LOCAL_APIC 119 122 lapic_timer_frequency = (freq * 1000) / HZ; 120 123 pr_info("lapic_timer_frequency = %d\n", lapic_timer_frequency); 121 124 #endif 125 + return res; 122 126 123 - return 1; 127 + fail: 128 + pr_warn("Fast TSC calibration using MSR failed\n"); 129 + return 0; 124 130 }
+2
drivers/acpi/ac.c
··· 243 243 kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE); 244 244 return 0; 245 245 } 246 + #else 247 + #define acpi_ac_resume NULL 246 248 #endif 247 249 static SIMPLE_DEV_PM_OPS(acpi_ac_pm_ops, NULL, acpi_ac_resume); 248 250
+2
drivers/acpi/battery.c
··· 841 841 acpi_battery_update(battery); 842 842 return 0; 843 843 } 844 + #else 845 + #define acpi_battery_resume NULL 844 846 #endif 845 847 846 848 static SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume);
-58
drivers/acpi/blacklist.c
··· 260 260 }, 261 261 { 262 262 .callback = dmi_disable_osi_win8, 263 - .ident = "Dell Inspiron 15R SE", 264 - .matches = { 265 - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 266 - DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"), 267 - }, 268 - }, 269 - { 270 - .callback = dmi_disable_osi_win8, 271 263 .ident = "ThinkPad Edge E530", 272 264 .matches = { 273 265 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ··· 312 320 .matches = { 313 321 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 314 322 DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"), 315 - }, 316 - }, 317 - { 318 - .callback = dmi_disable_osi_win8, 319 - .ident = "HP ProBook 2013 models", 320 - .matches = { 321 - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 322 - DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook "), 323 - DMI_MATCH(DMI_PRODUCT_NAME, " G1"), 324 - }, 325 - }, 326 - { 327 - .callback = dmi_disable_osi_win8, 328 - .ident = "HP EliteBook 2013 models", 329 - .matches = { 330 - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 331 - DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook "), 332 - DMI_MATCH(DMI_PRODUCT_NAME, " G1"), 333 - }, 334 - }, 335 - { 336 - .callback = dmi_disable_osi_win8, 337 - .ident = "HP ZBook 14", 338 - .matches = { 339 - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 340 - DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 14"), 341 - }, 342 - }, 343 - { 344 - .callback = dmi_disable_osi_win8, 345 - .ident = "HP ZBook 15", 346 - .matches = { 347 - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 348 - DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 15"), 349 - }, 350 - }, 351 - { 352 - .callback = dmi_disable_osi_win8, 353 - .ident = "HP ZBook 17", 354 - .matches = { 355 - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 356 - DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 17"), 357 - }, 358 - }, 359 - { 360 - .callback = dmi_disable_osi_win8, 361 - .ident = "HP EliteBook 8780w", 362 - .matches = { 363 - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 364 - DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8780w"), 365 323 }, 366 324 }, 367 325
+2
drivers/acpi/button.c
··· 80 80 81 81 #ifdef CONFIG_PM_SLEEP 82 82 static int acpi_button_resume(struct device *dev); 83 + #else 84 + #define acpi_button_resume NULL 83 85 #endif 84 86 static SIMPLE_DEV_PM_OPS(acpi_button_pm, NULL, acpi_button_resume); 85 87
+3 -5
drivers/acpi/dock.c
··· 713 713 static ssize_t show_docked(struct device *dev, 714 714 struct device_attribute *attr, char *buf) 715 715 { 716 - struct acpi_device *tmp; 717 - 718 716 struct dock_station *dock_station = dev->platform_data; 717 + struct acpi_device *adev = NULL; 719 718 720 - if (!acpi_bus_get_device(dock_station->handle, &tmp)) 721 - return snprintf(buf, PAGE_SIZE, "1\n"); 722 - return snprintf(buf, PAGE_SIZE, "0\n"); 719 + acpi_bus_get_device(dock_station->handle, &adev); 720 + return snprintf(buf, PAGE_SIZE, "%u\n", acpi_device_enumerated(adev)); 723 721 } 724 722 static DEVICE_ATTR(docked, S_IRUGO, show_docked, NULL); 725 723
+3
drivers/acpi/fan.c
··· 55 55 #ifdef CONFIG_PM_SLEEP 56 56 static int acpi_fan_suspend(struct device *dev); 57 57 static int acpi_fan_resume(struct device *dev); 58 + #else 59 + #define acpi_fan_suspend NULL 60 + #define acpi_fan_resume NULL 58 61 #endif 59 62 static SIMPLE_DEV_PM_OPS(acpi_fan_pm, acpi_fan_suspend, acpi_fan_resume); 60 63
+1
drivers/acpi/pci_irq.c
··· 430 430 pin_name(pin)); 431 431 } 432 432 433 + kfree(entry); 433 434 return 0; 434 435 } 435 436
+3 -1
drivers/acpi/sbs.c
··· 450 450 { 451 451 unsigned long x; 452 452 struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev)); 453 - if (sscanf(buf, "%ld\n", &x) == 1) 453 + if (sscanf(buf, "%lu\n", &x) == 1) 454 454 battery->alarm_capacity = x / 455 455 (1000 * acpi_battery_scale(battery)); 456 456 if (battery->present) ··· 668 668 acpi_sbs_callback(sbs); 669 669 return 0; 670 670 } 671 + #else 672 + #define acpi_sbs_resume NULL 671 673 #endif 672 674 673 675 static SIMPLE_DEV_PM_OPS(acpi_sbs_pm, NULL, acpi_sbs_resume);
+2
drivers/acpi/thermal.c
··· 102 102 103 103 #ifdef CONFIG_PM_SLEEP 104 104 static int acpi_thermal_resume(struct device *dev); 105 + #else 106 + #define acpi_thermal_resume NULL 105 107 #endif 106 108 static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, NULL, acpi_thermal_resume); 107 109
+141 -6
drivers/acpi/video.c
··· 81 81 module_param(allow_duplicates, bool, 0644); 82 82 83 83 /* 84 - * For Windows 8 systems: if set ture and the GPU driver has 85 - * registered a backlight interface, skip registering ACPI video's. 84 + * For Windows 8 systems: used to decide if video module 85 + * should skip registering backlight interface of its own. 86 86 */ 87 - static bool use_native_backlight = false; 88 - module_param(use_native_backlight, bool, 0644); 87 + static int use_native_backlight_param = -1; 88 + module_param_named(use_native_backlight, use_native_backlight_param, int, 0444); 89 + static bool use_native_backlight_dmi = false; 89 90 90 91 static int register_count; 91 92 static struct mutex video_list_lock; ··· 232 231 static int acpi_video_switch_brightness(struct acpi_video_device *device, 233 232 int event); 234 233 234 + static bool acpi_video_use_native_backlight(void) 235 + { 236 + if (use_native_backlight_param != -1) 237 + return use_native_backlight_param; 238 + else 239 + return use_native_backlight_dmi; 240 + } 241 + 235 242 static bool acpi_video_verify_backlight_support(void) 236 243 { 237 - if (acpi_osi_is_win8() && use_native_backlight && 244 + if (acpi_osi_is_win8() && acpi_video_use_native_backlight() && 238 245 backlight_device_registered(BACKLIGHT_RAW)) 239 246 return false; 240 247 return acpi_video_backlight_support(); ··· 407 398 return 0; 408 399 } 409 400 401 + static int __init video_set_use_native_backlight(const struct dmi_system_id *d) 402 + { 403 + use_native_backlight_dmi = true; 404 + return 0; 405 + } 406 + 410 407 static struct dmi_system_id video_dmi_table[] __initdata = { 411 408 /* 412 409 * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121 ··· 455 440 .matches = { 456 441 DMI_MATCH(DMI_BOARD_VENDOR, "Acer"), 457 442 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"), 443 + }, 444 + }, 445 + { 446 + .callback = video_set_use_native_backlight, 447 + .ident = "ThinkPad T430s", 448 + .matches = { 449 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 450 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430s"), 451 + }, 452 + }, 453 + { 454 + .callback = video_set_use_native_backlight, 455 + .ident = "ThinkPad X230", 456 + .matches = { 457 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 458 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X230"), 459 + }, 460 + }, 461 + { 462 + .callback = video_set_use_native_backlight, 463 + .ident = "ThinkPad X1 Carbon", 464 + .matches = { 465 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 466 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X1 Carbon"), 467 + }, 468 + }, 469 + { 470 + .callback = video_set_use_native_backlight, 471 + .ident = "Lenovo Yoga 13", 472 + .matches = { 473 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 474 + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga 13"), 475 + }, 476 + }, 477 + { 478 + .callback = video_set_use_native_backlight, 479 + .ident = "Dell Inspiron 7520", 480 + .matches = { 481 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 482 + DMI_MATCH(DMI_PRODUCT_VERSION, "Inspiron 7520"), 483 + }, 484 + }, 485 + { 486 + .callback = video_set_use_native_backlight, 487 + .ident = "Acer Aspire 5733Z", 488 + .matches = { 489 + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 490 + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5733Z"), 491 + }, 492 + }, 493 + { 494 + .callback = video_set_use_native_backlight, 495 + .ident = "Acer Aspire V5-431", 496 + .matches = { 497 + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 498 + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-431"), 499 + }, 500 + }, 501 + { 502 + .callback = video_set_use_native_backlight, 503 + .ident = "HP ProBook 4340s", 504 + .matches = { 505 + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 506 + DMI_MATCH(DMI_PRODUCT_VERSION, "HP ProBook 4340s"), 507 + }, 508 + }, 509 + { 510 + .callback = video_set_use_native_backlight, 511 + .ident = "HP ProBook 2013 models", 512 + .matches = { 513 + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 514 + DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook "), 515 + DMI_MATCH(DMI_PRODUCT_NAME, " G1"), 516 + }, 517 + }, 518 + { 519 + .callback = video_set_use_native_backlight, 520 + .ident = "HP EliteBook 2013 models", 521 + .matches = { 522 + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 523 + DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook "), 524 + DMI_MATCH(DMI_PRODUCT_NAME, " G1"), 525 + }, 526 + }, 527 + { 528 + .callback = video_set_use_native_backlight, 529 + .ident = "HP ZBook 14", 530 + .matches = { 531 + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 532 + DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 14"), 533 + }, 534 + }, 535 + { 536 + .callback = video_set_use_native_backlight, 537 + .ident = "HP ZBook 15", 538 + .matches = { 539 + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 540 + DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 15"), 541 + }, 542 + }, 543 + { 544 + .callback = video_set_use_native_backlight, 545 + .ident = "HP ZBook 17", 546 + .matches = { 547 + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 548 + DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 17"), 549 + }, 550 + }, 551 + { 552 + .callback = video_set_use_native_backlight, 553 + .ident = "HP EliteBook 8780w", 554 + .matches = { 555 + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 556 + DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8780w"), 458 557 }, 459 558 }, 460 559 {} ··· 814 685 union acpi_object *o; 815 686 struct acpi_video_device_brightness *br = NULL; 816 687 int result = -EINVAL; 688 + u32 value; 817 689 818 690 if (!ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device, &obj))) { 819 691 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Could not query available " ··· 845 715 printk(KERN_ERR PREFIX "Invalid data\n"); 846 716 continue; 847 717 } 848 - br->levels[count] = (u32) o->integer.value; 718 + value = (u32) o->integer.value; 719 + /* Skip duplicate entries */ 720 + if (count > 2 && br->levels[count - 1] == value) 721 + continue; 722 + 723 + br->levels[count] = value; 849 724 850 725 if (br->levels[count] > max_level) 851 726 max_level = br->levels[count];
-16
drivers/acpi/video_detect.c
··· 168 168 DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"), 169 169 }, 170 170 }, 171 - { 172 - .callback = video_detect_force_vendor, 173 - .ident = "HP EliteBook Revolve 810", 174 - .matches = { 175 - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 176 - DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook Revolve 810 G1"), 177 - }, 178 - }, 179 - { 180 - .callback = video_detect_force_vendor, 181 - .ident = "Lenovo Yoga 13", 182 - .matches = { 183 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 184 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga 13"), 185 - }, 186 - }, 187 171 { }, 188 172 }; 189 173
+1
drivers/ata/Kconfig
··· 247 247 248 248 config SATA_MV 249 249 tristate "Marvell SATA support" 250 + select GENERIC_PHY 250 251 help 251 252 This option enables support for the Marvell Serial ATA family. 252 253 Currently supports 88SX[56]0[48][01] PCI(-X) chips,
+17 -1
drivers/ata/ahci.c
··· 61 61 /* board IDs by feature in alphabetical order */ 62 62 board_ahci, 63 63 board_ahci_ign_iferr, 64 + board_ahci_noncq, 64 65 board_ahci_nosntf, 65 66 board_ahci_yes_fbs, 66 67 ··· 117 116 }, 118 117 [board_ahci_ign_iferr] = { 119 118 AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR), 119 + .flags = AHCI_FLAG_COMMON, 120 + .pio_mask = ATA_PIO4, 121 + .udma_mask = ATA_UDMA6, 122 + .port_ops = &ahci_ops, 123 + }, 124 + [board_ahci_noncq] = { 125 + AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ), 120 126 .flags = AHCI_FLAG_COMMON, 121 127 .pio_mask = ATA_PIO4, 122 128 .udma_mask = ATA_UDMA6, ··· 459 451 { PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci }, /* ASM1060 */ 460 452 { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */ 461 453 { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */ 454 + 455 + /* 456 + * Samsung SSDs found on some macbooks. NCQ times out. 457 + * https://bugzilla.kernel.org/show_bug.cgi?id=60731 458 + */ 459 + { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_noncq }, 462 460 463 461 /* Enmotus */ 464 462 { PCI_DEVICE(0x1c44, 0x8000), board_ahci }, ··· 1184 1170 1185 1171 nvec = rc; 1186 1172 rc = pci_enable_msi_block(pdev, nvec); 1187 - if (rc) 1173 + if (rc < 0) 1188 1174 goto intx; 1175 + else if (rc > 0) 1176 + goto single_msi; 1189 1177 1190 1178 return nvec; 1191 1179
+5 -2
drivers/ata/libata-pmp.c
··· 447 447 * otherwise. Don't try hard to recover it. 448 448 */ 449 449 ap->pmp_link[ap->nr_pmp_links - 1].flags |= ATA_LFLAG_NO_RETRY; 450 - } else if (vendor == 0x197b && devid == 0x2352) { 451 - /* chip found in Thermaltake BlackX Duet, jmicron JMB350? */ 450 + } else if (vendor == 0x197b && (devid == 0x2352 || devid == 0x0325)) { 451 + /* 452 + * 0x2352: found in Thermaltake BlackX Duet, jmicron JMB350? 453 + * 0x0325: jmicron JMB394. 454 + */ 452 455 ata_for_each_link(link, ap, EDGE) { 453 456 /* SRST breaks detection and disks get misclassified 454 457 * LPM disabled to avoid potential problems
+6 -2
drivers/ata/pata_imx.c
··· 119 119 return PTR_ERR(priv->clk); 120 120 } 121 121 122 - clk_prepare_enable(priv->clk); 122 + ret = clk_prepare_enable(priv->clk); 123 + if (ret) 124 + return ret; 123 125 124 126 host = ata_host_alloc(&pdev->dev, 1); 125 127 if (!host) { ··· 214 212 struct ata_host *host = dev_get_drvdata(dev); 215 213 struct pata_imx_priv *priv = host->private_data; 216 214 217 - clk_prepare_enable(priv->clk); 215 + int ret = clk_prepare_enable(priv->clk); 216 + if (ret) 217 + return ret; 218 218 219 219 __raw_writel(priv->ata_ctl, priv->host_regs + PATA_IMX_ATA_CONTROL); 220 220
+8 -4
drivers/ata/sata_mv.c
··· 4104 4104 if (!hpriv->port_phys) 4105 4105 return -ENOMEM; 4106 4106 host->private_data = hpriv; 4107 - hpriv->n_ports = n_ports; 4108 4107 hpriv->board_idx = chip_soc; 4109 4108 4110 4109 host->iomap = NULL; ··· 4131 4132 rc = PTR_ERR(hpriv->port_phys[port]); 4132 4133 hpriv->port_phys[port] = NULL; 4133 4134 if (rc != -EPROBE_DEFER) 4134 - dev_warn(&pdev->dev, "error getting phy %d", 4135 - rc); 4135 + dev_warn(&pdev->dev, "error getting phy %d", rc); 4136 + 4137 + /* Cleanup only the initialized ports */ 4138 + hpriv->n_ports = port; 4136 4139 goto err; 4137 4140 } else 4138 4141 phy_power_on(hpriv->port_phys[port]); 4139 4142 } 4143 + 4144 + /* All the ports have been initialized */ 4145 + hpriv->n_ports = n_ports; 4140 4146 4141 4147 /* 4142 4148 * (Re-)program MBUS remapping windows if we are asked to. ··· 4180 4176 clk_disable_unprepare(hpriv->clk); 4181 4177 clk_put(hpriv->clk); 4182 4178 } 4183 - for (port = 0; port < n_ports; port++) { 4179 + for (port = 0; port < hpriv->n_ports; port++) { 4184 4180 if (!IS_ERR(hpriv->port_clks[port])) { 4185 4181 clk_disable_unprepare(hpriv->port_clks[port]); 4186 4182 clk_put(hpriv->port_clks[port]);
+1
drivers/ata/sata_sil.c
··· 157 157 { "ST380011ASL", SIL_QUIRK_MOD15WRITE }, 158 158 { "ST3120022ASL", SIL_QUIRK_MOD15WRITE }, 159 159 { "ST3160021ASL", SIL_QUIRK_MOD15WRITE }, 160 + { "TOSHIBA MK2561GSYN", SIL_QUIRK_MOD15WRITE }, 160 161 { "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX }, 161 162 { } 162 163 };
+12 -13
drivers/base/dma-buf.c
··· 616 616 if (ret) 617 617 return ret; 618 618 619 - seq_printf(s, "\nDma-buf Objects:\n"); 620 - seq_printf(s, "\texp_name\tsize\tflags\tmode\tcount\n"); 619 + seq_puts(s, "\nDma-buf Objects:\n"); 620 + seq_puts(s, "size\tflags\tmode\tcount\texp_name\n"); 621 621 622 622 list_for_each_entry(buf_obj, &db_list.head, list_node) { 623 623 ret = mutex_lock_interruptible(&buf_obj->lock); 624 624 625 625 if (ret) { 626 - seq_printf(s, 627 - "\tERROR locking buffer object: skipping\n"); 626 + seq_puts(s, 627 + "\tERROR locking buffer object: skipping\n"); 628 628 continue; 629 629 } 630 630 631 - seq_printf(s, "\t"); 632 - 633 - seq_printf(s, "\t%s\t%08zu\t%08x\t%08x\t%08ld\n", 634 - buf_obj->exp_name, buf_obj->size, 631 + seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n", 632 + buf_obj->size, 635 633 buf_obj->file->f_flags, buf_obj->file->f_mode, 636 - (long)(buf_obj->file->f_count.counter)); 634 + (long)(buf_obj->file->f_count.counter), 635 + buf_obj->exp_name); 637 636 638 - seq_printf(s, "\t\tAttached Devices:\n"); 637 + seq_puts(s, "\tAttached Devices:\n"); 639 638 attach_count = 0; 640 639 641 640 list_for_each_entry(attach_obj, &buf_obj->attachments, node) { 642 - seq_printf(s, "\t\t"); 641 + seq_puts(s, "\t"); 643 642 644 - seq_printf(s, "%s\n", attach_obj->dev->init_name); 643 + seq_printf(s, "%s\n", dev_name(attach_obj->dev)); 645 644 attach_count++; 646 645 } 647 646 648 - seq_printf(s, "\n\t\tTotal %d devices attached\n", 647 + seq_printf(s, "Total %d devices attached\n\n", 649 648 attach_count); 650 649 651 650 count++;
+1 -2
drivers/cpufreq/cpufreq.c
··· 1323 1323 up_read(&policy->rwsem); 1324 1324 1325 1325 if (cpu != policy->cpu) { 1326 - if (!frozen) 1327 - sysfs_remove_link(&dev->kobj, "cpufreq"); 1326 + sysfs_remove_link(&dev->kobj, "cpufreq"); 1328 1327 } else if (cpus > 1) { 1329 1328 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu); 1330 1329 if (new_cpu >= 0) {
+13 -4
drivers/cpufreq/intel_pstate.c
··· 34 34 35 35 #define SAMPLE_COUNT 3 36 36 37 - #define BYT_RATIOS 0x66a 38 - #define BYT_VIDS 0x66b 37 + #define BYT_RATIOS 0x66a 38 + #define BYT_VIDS 0x66b 39 + #define BYT_TURBO_RATIOS 0x66c 40 + 39 41 40 42 #define FRAC_BITS 8 41 43 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) ··· 359 357 { 360 358 u64 value; 361 359 rdmsrl(BYT_RATIOS, value); 362 - return value & 0xFF; 360 + return (value >> 8) & 0xFF; 363 361 } 364 362 365 363 static int byt_get_max_pstate(void) ··· 367 365 u64 value; 368 366 rdmsrl(BYT_RATIOS, value); 369 367 return (value >> 16) & 0xFF; 368 + } 369 + 370 + static int byt_get_turbo_pstate(void) 371 + { 372 + u64 value; 373 + rdmsrl(BYT_TURBO_RATIOS, value); 374 + return value & 0x3F; 370 375 } 371 376 372 377 static void byt_set_pstate(struct cpudata *cpudata, int pstate) ··· 478 469 .funcs = { 479 470 .get_max = byt_get_max_pstate, 480 471 .get_min = byt_get_min_pstate, 481 - .get_turbo = byt_get_max_pstate, 472 + .get_turbo = byt_get_turbo_pstate, 482 473 .set = byt_set_pstate, 483 474 .get_vid = byt_get_vid, 484 475 },
+7 -3
drivers/cpufreq/powernow-k8.c
··· 1076 1076 { 1077 1077 struct powernow_k8_data *data; 1078 1078 struct init_on_cpu init_on_cpu; 1079 - int rc; 1079 + int rc, cpu; 1080 1080 1081 1081 smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1); 1082 1082 if (rc) ··· 1140 1140 pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n", 1141 1141 data->currfid, data->currvid); 1142 1142 1143 - per_cpu(powernow_data, pol->cpu) = data; 1143 + /* Point all the CPUs in this policy to the same data */ 1144 + for_each_cpu(cpu, pol->cpus) 1145 + per_cpu(powernow_data, cpu) = data; 1144 1146 1145 1147 return 0; 1146 1148 ··· 1157 1155 static int powernowk8_cpu_exit(struct cpufreq_policy *pol) 1158 1156 { 1159 1157 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); 1158 + int cpu; 1160 1159 1161 1160 if (!data) 1162 1161 return -EINVAL; ··· 1168 1165 1169 1166 kfree(data->powernow_table); 1170 1167 kfree(data); 1171 - per_cpu(powernow_data, pol->cpu) = NULL; 1168 + for_each_cpu(cpu, pol->cpus) 1169 + per_cpu(powernow_data, cpu) = NULL; 1172 1170 1173 1171 return 0; 1174 1172 }
+12
drivers/gpu/drm/drm_ioctl.c
··· 296 296 case DRM_CAP_ASYNC_PAGE_FLIP: 297 297 req->value = dev->mode_config.async_page_flip; 298 298 break; 299 + case DRM_CAP_CURSOR_WIDTH: 300 + if (dev->mode_config.cursor_width) 301 + req->value = dev->mode_config.cursor_width; 302 + else 303 + req->value = 64; 304 + break; 305 + case DRM_CAP_CURSOR_HEIGHT: 306 + if (dev->mode_config.cursor_height) 307 + req->value = dev->mode_config.cursor_height; 308 + else 309 + req->value = 64; 310 + break; 299 311 default: 300 312 return -EINVAL; 301 313 }
+3 -1
drivers/gpu/drm/i2c/tda998x_drv.c
··· 1151 1151 1152 1152 priv->current_page = 0xff; 1153 1153 priv->cec = i2c_new_dummy(client->adapter, 0x34); 1154 - if (!priv->cec) 1154 + if (!priv->cec) { 1155 + kfree(priv); 1155 1156 return -ENODEV; 1157 + } 1156 1158 priv->dpms = DRM_MODE_DPMS_OFF; 1157 1159 1158 1160 encoder_slave->slave_priv = priv;
+14
drivers/gpu/drm/i915/intel_display.c
··· 8586 8586 if (ring->id == RCS) 8587 8587 len += 6; 8588 8588 8589 + /* 8590 + * BSpec MI_DISPLAY_FLIP for IVB: 8591 + * "The full packet must be contained within the same cache line." 8592 + * 8593 + * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same 8594 + * cacheline, if we ever start emitting more commands before 8595 + * the MI_DISPLAY_FLIP we may need to first emit everything else, 8596 + * then do the cacheline alignment, and finally emit the 8597 + * MI_DISPLAY_FLIP. 8598 + */ 8599 + ret = intel_ring_cacheline_align(ring); 8600 + if (ret) 8601 + goto err_unpin; 8602 + 8589 8603 ret = intel_ring_begin(ring, len); 8590 8604 if (ret) 8591 8605 goto err_unpin;
+13 -6
drivers/gpu/drm/i915/intel_dp.c
··· 537 537 uint8_t msg[20]; 538 538 int msg_bytes; 539 539 uint8_t ack; 540 + int retry; 540 541 541 542 if (WARN_ON(send_bytes > 16)) 542 543 return -E2BIG; ··· 549 548 msg[3] = send_bytes - 1; 550 549 memcpy(&msg[4], send, send_bytes); 551 550 msg_bytes = send_bytes + 4; 552 - for (;;) { 551 + for (retry = 0; retry < 7; retry++) { 553 552 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); 554 553 if (ret < 0) 555 554 return ret; 556 555 ack >>= 4; 557 556 if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) 558 - break; 557 + return send_bytes; 559 558 else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) 560 - udelay(100); 559 + usleep_range(400, 500); 561 560 else 562 561 return -EIO; 563 562 } 564 - return send_bytes; 563 + 564 + DRM_ERROR("too many retries, giving up\n"); 565 + return -EIO; 565 566 } 566 567 567 568 /* Write a single byte to the aux channel in native mode */ ··· 585 582 int reply_bytes; 586 583 uint8_t ack; 587 584 int ret; 585 + int retry; 588 586 589 587 if (WARN_ON(recv_bytes > 19)) 590 588 return -E2BIG; ··· 599 595 msg_bytes = 4; 600 596 reply_bytes = recv_bytes + 1; 601 597 602 - for (;;) { 598 + for (retry = 0; retry < 7; retry++) { 603 599 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, 604 600 reply, reply_bytes); 605 601 if (ret == 0) ··· 612 608 return ret - 1; 613 609 } 614 610 else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) 615 - udelay(100); 611 + usleep_range(400, 500); 616 612 else 617 613 return -EIO; 618 614 } 615 + 616 + DRM_ERROR("too many retries, giving up\n"); 617 + return -EIO; 619 618 } 620 619 621 620 static int
+21
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 1653 1653 return 0; 1654 1654 } 1655 1655 1656 + /* Align the ring tail to a cacheline boundary */ 1657 + int intel_ring_cacheline_align(struct intel_ring_buffer *ring) 1658 + { 1659 + int num_dwords = (64 - (ring->tail & 63)) / sizeof(uint32_t); 1660 + int ret; 1661 + 1662 + if (num_dwords == 0) 1663 + return 0; 1664 + 1665 + ret = intel_ring_begin(ring, num_dwords); 1666 + if (ret) 1667 + return ret; 1668 + 1669 + while (num_dwords--) 1670 + intel_ring_emit(ring, MI_NOOP); 1671 + 1672 + intel_ring_advance(ring); 1673 + 1674 + return 0; 1675 + } 1676 + 1656 1677 void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) 1657 1678 { 1658 1679 struct drm_i915_private *dev_priv = ring->dev->dev_private;
+1
drivers/gpu/drm/i915/intel_ringbuffer.h
··· 233 233 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); 234 234 235 235 int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); 236 + int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring); 236 237 static inline void intel_ring_emit(struct intel_ring_buffer *ring, 237 238 u32 data) 238 239 {
+1
drivers/gpu/drm/nouveau/Makefile
··· 141 141 nouveau-y += core/subdev/mc/nv04.o 142 142 nouveau-y += core/subdev/mc/nv40.o 143 143 nouveau-y += core/subdev/mc/nv44.o 144 + nouveau-y += core/subdev/mc/nv4c.o 144 145 nouveau-y += core/subdev/mc/nv50.o 145 146 nouveau-y += core/subdev/mc/nv94.o 146 147 nouveau-y += core/subdev/mc/nv98.o
+5 -5
drivers/gpu/drm/nouveau/core/engine/device/nv40.c
··· 311 311 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 312 312 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 313 313 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; 314 - device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; 314 + device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass; 315 315 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; 316 316 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 317 317 device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; ··· 334 334 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 335 335 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 336 336 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; 337 - device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; 337 + device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass; 338 338 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; 339 339 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 340 340 device->oclass[NVDEV_SUBDEV_FB ] = nv4e_fb_oclass; ··· 357 357 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 358 358 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 359 359 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; 360 - device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; 360 + device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass; 361 361 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; 362 362 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 363 363 device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; ··· 380 380 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 381 381 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 382 382 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; 383 - device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; 383 + device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass; 384 384 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; 385 385 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 386 386 device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; ··· 403 403 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 404 404 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 405 405 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; 406 - device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; 406 + device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass; 407 407 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; 408 408 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 409 409 device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
+1 -1
drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
··· 1142 1142 if (conf != ~0) { 1143 1143 if (outp.location == 0 && outp.type == DCB_OUTPUT_DP) { 1144 1144 u32 soff = (ffs(outp.or) - 1) * 0x08; 1145 - u32 ctrl = nv_rd32(priv, 0x610798 + soff); 1145 + u32 ctrl = nv_rd32(priv, 0x610794 + soff); 1146 1146 u32 datarate; 1147 1147 1148 1148 switch ((ctrl & 0x000f0000) >> 16) {
+1 -1
drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
··· 112 112 113 113 nv_wr32(priv, 0x002270, cur->addr >> 12); 114 114 nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3)); 115 - if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000)) 115 + if (!nv_wait(priv, 0x002284 + (engine * 8), 0x00100000, 0x00000000)) 116 116 nv_error(priv, "runlist %d update timeout\n", engine); 117 117 mutex_unlock(&nv_subdev(priv)->mutex); 118 118 }
+1 -1
drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
··· 539 539 ustatus &= ~0x04030000; 540 540 } 541 541 if (ustatus && display) { 542 - nv_error("%s - TP%d:", name, i); 542 + nv_error(priv, "%s - TP%d:", name, i); 543 543 nouveau_bitfield_print(nv50_mpc_traps, ustatus); 544 544 pr_cont("\n"); 545 545 ustatus = 0;
+1
drivers/gpu/drm/nouveau/core/include/subdev/mc.h
··· 47 47 extern struct nouveau_oclass *nv04_mc_oclass; 48 48 extern struct nouveau_oclass *nv40_mc_oclass; 49 49 extern struct nouveau_oclass *nv44_mc_oclass; 50 + extern struct nouveau_oclass *nv4c_mc_oclass; 50 51 extern struct nouveau_oclass *nv50_mc_oclass; 51 52 extern struct nouveau_oclass *nv94_mc_oclass; 52 53 extern struct nouveau_oclass *nv98_mc_oclass;
+4
drivers/gpu/drm/nouveau/core/subdev/bios/base.c
··· 130 130 u16 pcir; 131 131 int i; 132 132 133 + /* there is no prom on nv4x IGP's */ 134 + if (device->card_type == NV_40 && device->chipset >= 0x4c) 135 + return; 136 + 133 137 /* enable access to rom */ 134 138 if (device->card_type >= NV_50) 135 139 pcireg = 0x088050;
+1 -1
drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
··· 36 36 .fini = _nouveau_fb_fini, 37 37 }, 38 38 .base.memtype = nv04_fb_memtype_valid, 39 - .base.ram = &nv10_ram_oclass, 39 + .base.ram = &nv1a_ram_oclass, 40 40 .tile.regions = 8, 41 41 .tile.init = nv10_fb_tile_init, 42 42 .tile.fini = nv10_fb_tile_fini,
+1
drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
··· 14 14 extern const struct nouveau_mc_intr nv04_mc_intr[]; 15 15 int nv04_mc_init(struct nouveau_object *); 16 16 void nv40_mc_msi_rearm(struct nouveau_mc *); 17 + int nv44_mc_init(struct nouveau_object *object); 17 18 int nv50_mc_init(struct nouveau_object *); 18 19 extern const struct nouveau_mc_intr nv50_mc_intr[]; 19 20 extern const struct nouveau_mc_intr nvc0_mc_intr[];
+1 -1
drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
··· 24 24 25 25 #include "nv04.h" 26 26 27 - static int 27 + int 28 28 nv44_mc_init(struct nouveau_object *object) 29 29 { 30 30 struct nv04_mc_priv *priv = (void *)object;
+45
drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
··· 1 + /* 2 + * Copyright 2014 Ilia Mirkin 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + * Authors: Ilia Mirkin 23 + */ 24 + 25 + #include "nv04.h" 26 + 27 + static void 28 + nv4c_mc_msi_rearm(struct nouveau_mc *pmc) 29 + { 30 + struct nv04_mc_priv *priv = (void *)pmc; 31 + nv_wr08(priv, 0x088050, 0xff); 32 + } 33 + 34 + struct nouveau_oclass * 35 + nv4c_mc_oclass = &(struct nouveau_mc_oclass) { 36 + .base.handle = NV_SUBDEV(MC, 0x4c), 37 + .base.ofuncs = &(struct nouveau_ofuncs) { 38 + .ctor = nv04_mc_ctor, 39 + .dtor = _nouveau_mc_dtor, 40 + .init = nv44_mc_init, 41 + .fini = _nouveau_mc_fini, 42 + }, 43 + .intr = nv04_mc_intr, 44 + .msi_rearm = nv4c_mc_msi_rearm, 45 + }.base;
+24 -2
drivers/gpu/drm/nouveau/nouveau_acpi.c
··· 106 106 return 0; 107 107 } 108 108 109 + /* 110 + * On some platforms, _DSM(nouveau_op_dsm_muid, func0) has special 111 + * requirements on the fourth parameter, so a private implementation 112 + * instead of using acpi_check_dsm(). 113 + */ 114 + static int nouveau_check_optimus_dsm(acpi_handle handle) 115 + { 116 + int result; 117 + 118 + /* 119 + * Function 0 returns a Buffer containing available functions. 120 + * The args parameter is ignored for function 0, so just put 0 in it 121 + */ 122 + if (nouveau_optimus_dsm(handle, 0, 0, &result)) 123 + return 0; 124 + 125 + /* 126 + * ACPI Spec v4 9.14.1: if bit 0 is zero, no function is supported. 127 + * If the n-th bit is enabled, function n is supported 128 + */ 129 + return result & 1 && result & (1 << NOUVEAU_DSM_OPTIMUS_CAPS); 130 + } 131 + 109 132 static int nouveau_dsm(acpi_handle handle, int func, int arg) 110 133 { 111 134 int ret = 0; ··· 230 207 1 << NOUVEAU_DSM_POWER)) 231 208 retval |= NOUVEAU_DSM_HAS_MUX; 232 209 233 - if (acpi_check_dsm(dhandle, nouveau_op_dsm_muid, 0x00000100, 234 - 1 << NOUVEAU_DSM_OPTIMUS_CAPS)) 210 + if (nouveau_check_optimus_dsm(dhandle)) 235 211 retval |= NOUVEAU_DSM_HAS_OPT; 236 212 237 213 if (retval & NOUVEAU_DSM_HAS_OPT) {
+1 -1
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 1249 1249 mem->bus.is_iomem = !dev->agp->cant_use_aperture; 1250 1250 } 1251 1251 #endif 1252 - if (!node->memtype) 1252 + if (nv_device(drm->device)->card_type < NV_50 || !node->memtype) 1253 1253 /* untiled */ 1254 1254 break; 1255 1255 /* fallthrough, tiled memory */
+3
drivers/gpu/drm/nouveau/nouveau_drm.c
··· 376 376 if (ret) 377 377 goto fail_device; 378 378 379 + dev->irq_enabled = true; 380 + 379 381 /* workaround an odd issue on nvc1 by disabling the device's 380 382 * nosnoop capability. hopefully won't cause issues until a 381 383 * better fix is found - assuming there is one... ··· 477 475 struct nouveau_drm *drm = nouveau_drm(dev); 478 476 struct nouveau_object *device; 479 477 478 + dev->irq_enabled = false; 480 479 device = drm->client.base.device; 481 480 drm_put_dev(dev); 482 481
+3 -1
drivers/gpu/drm/nouveau/nouveau_vga.c
··· 14 14 { 15 15 struct nouveau_device *device = nouveau_dev(priv); 16 16 17 - if (device->chipset >= 0x40) 17 + if (device->card_type == NV_40 && device->chipset >= 0x4c) 18 + nv_wr32(device, 0x088060, state); 19 + else if (device->chipset >= 0x40) 18 20 nv_wr32(device, 0x088054, state); 19 21 else 20 22 nv_wr32(device, 0x001854, state);
+8 -7
drivers/gpu/drm/radeon/atombios_crtc.c
··· 559 559 u32 adjusted_clock = mode->clock; 560 560 int encoder_mode = atombios_get_encoder_mode(encoder); 561 561 u32 dp_clock = mode->clock; 562 - int bpc = radeon_get_monitor_bpc(connector); 562 + int bpc = radeon_crtc->bpc; 563 563 bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock); 564 564 565 565 /* reset the pll flags */ ··· 1176 1176 evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); 1177 1177 1178 1178 /* Set NUM_BANKS. */ 1179 - if (rdev->family >= CHIP_BONAIRE) { 1179 + if (rdev->family >= CHIP_TAHITI) { 1180 1180 unsigned tileb, index, num_banks, tile_split_bytes; 1181 1181 1182 1182 /* Calculate the macrotile mode index. */ ··· 1194 1194 return -EINVAL; 1195 1195 } 1196 1196 1197 - num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3; 1197 + if (rdev->family >= CHIP_BONAIRE) 1198 + num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3; 1199 + else 1200 + num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3; 1198 1201 fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks); 1199 1202 } else { 1200 - /* SI and older. */ 1201 - if (rdev->family >= CHIP_TAHITI) 1202 - tmp = rdev->config.si.tile_config; 1203 - else if (rdev->family >= CHIP_CAYMAN) 1203 + /* NI and older. */ 1204 + if (rdev->family >= CHIP_CAYMAN) 1204 1205 tmp = rdev->config.cayman.tile_config; 1205 1206 else 1206 1207 tmp = rdev->config.evergreen.tile_config;
+4 -3
drivers/gpu/drm/radeon/atombios_encoders.c
··· 464 464 465 465 static u8 radeon_atom_get_bpc(struct drm_encoder *encoder) 466 466 { 467 - struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 468 467 int bpc = 8; 469 468 470 - if (connector) 471 - bpc = radeon_get_monitor_bpc(connector); 469 + if (encoder->crtc) { 470 + struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); 471 + bpc = radeon_crtc->bpc; 472 + } 472 473 473 474 switch (bpc) { 474 475 case 0:
+1 -1
drivers/gpu/drm/radeon/evergreen.c
··· 1680 1680 case RADEON_HPD_6: 1681 1681 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE) 1682 1682 connected = true; 1683 - break; 1683 + break; 1684 1684 default: 1685 1685 break; 1686 1686 }
+1 -1
drivers/gpu/drm/radeon/ni_dpm.c
··· 2588 2588 if (NISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) 2589 2589 enable_sq_ramping = false; 2590 2590 2591 - if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) 2591 + if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) 2592 2592 enable_sq_ramping = false; 2593 2593 2594 2594 for (i = 0; i < state->performance_level_count; i++) {
+3 -1
drivers/gpu/drm/radeon/radeon.h
··· 135 135 /* R600+ */ 136 136 #define R600_RING_TYPE_UVD_INDEX 5 137 137 138 + /* number of hw syncs before falling back on blocking */ 139 + #define RADEON_NUM_SYNCS 4 140 + 138 141 /* hardcode those limit for now */ 139 142 #define RADEON_VA_IB_OFFSET (1 << 20) 140 143 #define RADEON_VA_RESERVED_SIZE (8 << 20) ··· 557 554 /* 558 555 * Semaphores. 559 556 */ 560 - /* everything here is constant */ 561 557 struct radeon_semaphore { 562 558 struct radeon_sa_bo *sa_bo; 563 559 signed waiters;
+2
drivers/gpu/drm/radeon/radeon_display.c
··· 571 571 radeon_crtc->max_cursor_width = CURSOR_WIDTH; 572 572 radeon_crtc->max_cursor_height = CURSOR_HEIGHT; 573 573 } 574 + dev->mode_config.cursor_width = radeon_crtc->max_cursor_width; 575 + dev->mode_config.cursor_height = radeon_crtc->max_cursor_height; 574 576 575 577 #if 0 576 578 radeon_crtc->mode_set.crtc = &radeon_crtc->base;
+1 -1
drivers/gpu/drm/radeon/radeon_ring.c
··· 139 139 } 140 140 141 141 /* 64 dwords should be enough for fence too */ 142 - r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8); 142 + r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8); 143 143 if (r) { 144 144 dev_err(rdev->dev, "scheduling IB failed (%d).\n", r); 145 145 return r;
+16 -3
drivers/gpu/drm/radeon/radeon_semaphore.c
··· 34 34 int radeon_semaphore_create(struct radeon_device *rdev, 35 35 struct radeon_semaphore **semaphore) 36 36 { 37 + uint32_t *cpu_addr; 37 38 int i, r; 38 39 39 40 *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL); 40 41 if (*semaphore == NULL) { 41 42 return -ENOMEM; 42 43 } 43 - r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, 44 - &(*semaphore)->sa_bo, 8, 8, true); 44 + r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*semaphore)->sa_bo, 45 + 8 * RADEON_NUM_SYNCS, 8, true); 45 46 if (r) { 46 47 kfree(*semaphore); 47 48 *semaphore = NULL; ··· 50 49 } 51 50 (*semaphore)->waiters = 0; 52 51 (*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo); 53 - *((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0; 52 + 53 + cpu_addr = radeon_sa_bo_cpu_addr((*semaphore)->sa_bo); 54 + for (i = 0; i < RADEON_NUM_SYNCS; ++i) 55 + cpu_addr[i] = 0; 54 56 55 57 for (i = 0; i < RADEON_NUM_RINGS; ++i) 56 58 (*semaphore)->sync_to[i] = NULL; ··· 129 125 struct radeon_semaphore *semaphore, 130 126 int ring) 131 127 { 128 + unsigned count = 0; 132 129 int i, r; 133 130 134 131 for (i = 0; i < RADEON_NUM_RINGS; ++i) { ··· 143 138 if (!rdev->ring[i].ready) { 144 139 dev_err(rdev->dev, "Syncing to a disabled ring!"); 145 140 return -EINVAL; 141 + } 142 + 143 + if (++count > RADEON_NUM_SYNCS) { 144 + /* not enough room, wait manually */ 145 + radeon_fence_wait_locked(fence); 146 + continue; 146 147 } 147 148 148 149 /* allocate enough space for sync command */ ··· 175 164 176 165 radeon_ring_commit(rdev, &rdev->ring[i]); 177 166 radeon_fence_note_sync(fence, ring); 167 + 168 + semaphore->gpu_addr += 8; 178 169 } 179 170 180 171 return 0;
+1 -8
drivers/gpu/drm/radeon/rv770_dpm.c
··· 2526 2526 bool rv770_dpm_vblank_too_short(struct radeon_device *rdev) 2527 2527 { 2528 2528 u32 vblank_time = r600_dpm_get_vblank_time(rdev); 2529 - u32 switch_limit = 300; 2530 - 2531 - /* quirks */ 2532 - /* ASUS K70AF */ 2533 - if ((rdev->pdev->device == 0x9553) && 2534 - (rdev->pdev->subsystem_vendor == 0x1043) && 2535 - (rdev->pdev->subsystem_device == 0x1c42)) 2536 - switch_limit = 200; 2529 + u32 switch_limit = 200; /* 300 */ 2537 2530 2538 2531 /* RV770 */ 2539 2532 /* mclk switching doesn't seem to work reliably on desktop RV770s */
+1 -1
drivers/gpu/drm/radeon/si_dpm.c
··· 2395 2395 if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) 2396 2396 enable_sq_ramping = false; 2397 2397 2398 - if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) 2398 + if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) 2399 2399 enable_sq_ramping = false; 2400 2400 2401 2401 for (i = 0; i < state->performance_level_count; i++) {
+1
drivers/gpu/drm/ttm/ttm_agp_backend.c
··· 126 126 agp_be->ttm.func = &ttm_agp_func; 127 127 128 128 if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) { 129 + kfree(agp_be); 129 130 return NULL; 130 131 } 131 132
+72 -50
drivers/gpu/drm/vmwgfx/svga3d_reg.h
··· 1223 1223 #define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129 1224 1224 1225 1225 #define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE 1130 1226 - 1226 + #define SVGA_3D_CMD_GB_SCREEN_DMA 1131 1227 + #define SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH 1132 1228 + #define SVGA_3D_CMD_GB_MOB_FENCE 1133 1229 + #define SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 1134 1227 1230 #define SVGA_3D_CMD_DEFINE_GB_MOB64 1135 1228 1231 #define SVGA_3D_CMD_REDEFINE_GB_MOB64 1136 1232 + #define SVGA_3D_CMD_NOP_ERROR 1137 1233 + 1234 + #define SVGA_3D_CMD_RESERVED1 1138 1235 + #define SVGA_3D_CMD_RESERVED2 1139 1236 + #define SVGA_3D_CMD_RESERVED3 1140 1237 + #define SVGA_3D_CMD_RESERVED4 1141 1238 + #define SVGA_3D_CMD_RESERVED5 1142 1229 1239 1230 1240 #define SVGA_3D_CMD_MAX 1142 1231 1241 #define SVGA_3D_CMD_FUTURE_MAX 3000 ··· 1983 1973 uint32 sizeInBytes; 1984 1974 uint32 validSizeInBytes; 1985 1975 SVGAMobFormat ptDepth; 1986 - } 1987 - __attribute__((__packed__)) 1976 + } __packed 1988 1977 SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */ 1989 1978 1990 1979 typedef ··· 1993 1984 uint32 sizeInBytes; 1994 1985 uint32 validSizeInBytes; 1995 1986 SVGAMobFormat ptDepth; 1996 - } 1997 - __attribute__((__packed__)) 1987 + } __packed 1998 1988 SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */ 1999 1989 2000 1990 typedef 2001 1991 struct { 2002 1992 SVGAOTableType type; 2003 - } 2004 - __attribute__((__packed__)) 1993 + } __packed 2005 1994 SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */ 2006 1995 2007 1996 /* ··· 2012 2005 SVGAMobFormat ptDepth; 2013 2006 PPN base; 2014 2007 uint32 sizeInBytes; 2015 - } 2016 - __attribute__((__packed__)) 2008 + } __packed 2017 2009 SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */ 2018 2010 2019 2011 ··· 2023 2017 typedef 2024 2018 struct SVGA3dCmdDestroyGBMob { 2025 2019 SVGAMobId mobid; 2026 - } 2027 - __attribute__((__packed__)) 2020 + } __packed 2028 2021 SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */ 2029 2022 2030 2023 /* ··· 2036 2031 SVGAMobFormat ptDepth; 2037 2032 PPN base; 2038 2033 uint32 sizeInBytes; 2039 - } 2040 - __attribute__((__packed__)) 2034 + } __packed 2041 2035 SVGA3dCmdRedefineGBMob; /* SVGA_3D_CMD_REDEFINE_GB_MOB */ 2042 2036 2043 2037 /* ··· 2049 2045 SVGAMobFormat ptDepth; 2050 2046 PPN64 base; 2051 2047 uint32 sizeInBytes; 2052 - } 2053 - __attribute__((__packed__)) 2048 + } __packed 2054 2049 SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */ 2055 2050 2056 2051 /* ··· 2062 2059 SVGAMobFormat ptDepth; 2063 2060 PPN64 base; 2064 2061 uint32 sizeInBytes; 2065 - } 2066 - __attribute__((__packed__)) 2062 + } __packed 2067 2063 SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */ 2068 2064 2069 2065 /* ··· 2072 2070 typedef 2073 2071 struct SVGA3dCmdUpdateGBMobMapping { 2074 2072 SVGAMobId mobid; 2075 - } 2076 - __attribute__((__packed__)) 2073 + } __packed 2077 2074 SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */ 2078 2075 2079 2076 /* ··· 2088 2087 uint32 multisampleCount; 2089 2088 SVGA3dTextureFilter autogenFilter; 2090 2089 SVGA3dSize size; 2091 - } SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */ 2090 + } __packed 2091 + SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */ 2092 2092 2093 2093 /* 2094 2094 * Destroy a guest-backed surface. ··· 2098 2096 typedef 2099 2097 struct SVGA3dCmdDestroyGBSurface { 2100 2098 uint32 sid; 2101 - } SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */ 2099 + } __packed 2100 + SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */ 2102 2101 2103 2102 /* 2104 2103 * Bind a guest-backed surface to an object. ··· 2109 2106 struct SVGA3dCmdBindGBSurface { 2110 2107 uint32 sid; 2111 2108 SVGAMobId mobid; 2112 - } SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */ 2109 + } __packed 2110 + SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */ 2113 2111 2114 2112 /* 2115 2113 * Conditionally bind a mob to a guest backed surface if testMobid ··· 2127 2123 SVGAMobId testMobid; 2128 2124 SVGAMobId mobid; 2129 2125 uint32 flags; 2130 - } 2126 + } __packed 2131 2127 SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */ 2132 2128 2133 2129 /* ··· 2139 2135 struct SVGA3dCmdUpdateGBImage { 2140 2136 SVGA3dSurfaceImageId image; 2141 2137 SVGA3dBox box; 2142 - } SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */ 2138 + } __packed 2139 + SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */ 2143 2140 2144 2141 /* 2145 2142 * Update an entire guest-backed surface. ··· 2150 2145 typedef 2151 2146 struct SVGA3dCmdUpdateGBSurface { 2152 2147 uint32 sid; 2153 - } SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */ 2148 + } __packed 2149 + SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */ 2154 2150 2155 2151 /* 2156 2152 * Readback an image in a guest-backed surface. ··· 2161 2155 typedef 2162 2156 struct SVGA3dCmdReadbackGBImage { 2163 2157 SVGA3dSurfaceImageId image; 2164 - } SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/ 2158 + } __packed 2159 + SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/ 2165 2160 2166 2161 /* 2167 2162 * Readback an entire guest-backed surface. ··· 2172 2165 typedef 2173 2166 struct SVGA3dCmdReadbackGBSurface { 2174 2167 uint32 sid; 2175 - } SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */ 2168 + } __packed 2169 + SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */ 2176 2170 2177 2171 /* 2178 2172 * Readback a sub rect of an image in a guest-backed surface. After ··· 2187 2179 SVGA3dSurfaceImageId image; 2188 2180 SVGA3dBox box; 2189 2181 uint32 invertBox; 2190 - } 2182 + } __packed 2191 2183 SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */ 2192 2184 2193 2185 /* ··· 2198 2190 typedef 2199 2191 struct SVGA3dCmdInvalidateGBImage { 2200 2192 SVGA3dSurfaceImageId image; 2201 - } SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */ 2193 + } __packed 2194 + SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */ 2202 2195 2203 2196 /* 2204 2197 * Invalidate an entire guest-backed surface. ··· 2209 2200 typedef 2210 2201 struct SVGA3dCmdInvalidateGBSurface { 2211 2202 uint32 sid; 2212 - } SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */ 2203 + } __packed 2204 + SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */ 2213 2205 2214 2206 /* 2215 2207 * Invalidate a sub rect of an image in a guest-backed surface. After ··· 2224 2214 SVGA3dSurfaceImageId image; 2225 2215 SVGA3dBox box; 2226 2216 uint32 invertBox; 2227 - } 2217 + } __packed 2228 2218 SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */ 2229 2219 2230 2220 /* ··· 2234 2224 typedef 2235 2225 struct SVGA3dCmdDefineGBContext { 2236 2226 uint32 cid; 2237 - } SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */ 2227 + } __packed 2228 + SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */ 2238 2229 2239 2230 /* 2240 2231 * Destroy a guest-backed context. ··· 2244 2233 typedef 2245 2234 struct SVGA3dCmdDestroyGBContext { 2246 2235 uint32 cid; 2247 - } SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */ 2236 + } __packed 2237 + SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */ 2248 2238 2249 2239 /* 2250 2240 * Bind a guest-backed context. ··· 2264 2252 uint32 cid; 2265 2253 SVGAMobId mobid; 2266 2254 uint32 validContents; 2267 - } SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */ 2255 + } __packed 2256 + SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */ 2268 2257 2269 2258 /* 2270 2259 * Readback a guest-backed context. ··· 2275 2262 typedef 2276 2263 struct SVGA3dCmdReadbackGBContext { 2277 2264 uint32 cid; 2278 - } SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */ 2265 + } __packed 2266 + SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */ 2279 2267 2280 2268 /* 2281 2269 * Invalidate a guest-backed context. ··· 2284 2270 typedef 2285 2271 struct SVGA3dCmdInvalidateGBContext { 2286 2272 uint32 cid; 2287 - } SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */ 2273 + } __packed 2274 + SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */ 2288 2275 2289 2276 /* 2290 2277 * Define a guest-backed shader. ··· 2296 2281 uint32 shid; 2297 2282 SVGA3dShaderType type; 2298 2283 uint32 sizeInBytes; 2299 - } SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */ 2284 + } __packed 2285 + SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */ 2300 2286 2301 2287 /* 2302 2288 * Bind a guest-backed shader. ··· 2307 2291 uint32 shid; 2308 2292 SVGAMobId mobid; 2309 2293 uint32 offsetInBytes; 2310 - } SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */ 2294 + } __packed 2295 + SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */ 2311 2296 2312 2297 /* 2313 2298 * Destroy a guest-backed shader. ··· 2316 2299 2317 2300 typedef struct SVGA3dCmdDestroyGBShader { 2318 2301 uint32 shid; 2319 - } SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */ 2302 + } __packed 2303 + SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */ 2320 2304 2321 2305 typedef 2322 2306 struct { ··· 2332 2314 * Note that FLOAT and INT constants are 4-dwords in length, while 2333 2315 * BOOL constants are 1-dword in length. 2334 2316 */ 2335 - } SVGA3dCmdSetGBShaderConstInline; 2317 + } __packed 2318 + SVGA3dCmdSetGBShaderConstInline; 2336 2319 /* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */ 2337 2320 2338 2321 typedef 2339 2322 struct { 2340 2323 uint32 cid; 2341 2324 SVGA3dQueryType type; 2342 - } SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */ 2325 + } __packed 2326 + SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */ 2343 2327 2344 2328 typedef 2345 2329 struct { ··· 2349 2329 SVGA3dQueryType type; 2350 2330 SVGAMobId mobid; 2351 2331 uint32 offset; 2352 - } SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */ 2332 + } __packed 2333 + SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */ 2353 2334 2354 2335 2355 2336 /* ··· 2367 2346 SVGA3dQueryType type; 2368 2347 SVGAMobId mobid; 2369 2348 uint32 offset; 2370 - } SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */ 2349 + } __packed 2350 + SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */ 2371 2351 2372 2352 typedef 2373 2353 struct { 2374 2354 SVGAMobId mobid; 2375 2355 uint32 fbOffset; 2376 2356 uint32 initalized; 2377 - } 2357 + } __packed 2378 2358 SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */ 2379 2359 2380 2360 typedef 2381 2361 struct { 2382 2362 SVGAMobId mobid; 2383 2363 uint32 gartOffset; 2384 - } 2364 + } __packed 2385 2365 SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */ 2386 2366 2387 2367 ··· 2390 2368 struct { 2391 2369 uint32 gartOffset; 2392 2370 uint32 numPages; 2393 - } 2371 + } __packed 2394 2372 SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */ 2395 2373 2396 2374 ··· 2407 2385 int32 xRoot; 2408 2386 int32 yRoot; 2409 2387 uint32 flags; 2410 - } 2388 + } __packed 2411 2389 SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */ 2412 2390 2413 2391 typedef 2414 2392 struct { 2415 2393 uint32 stid; 2416 - } 2394 + } __packed 2417 2395 SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */ 2418 2396 2419 2397 typedef 2420 2398 struct { 2421 2399 uint32 stid; 2422 2400 SVGA3dSurfaceImageId image; 2423 - } 2401 + } __packed 2424 2402 SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */ 2425 2403 2426 2404 typedef 2427 2405 struct { 2428 2406 uint32 stid; 2429 2407 SVGA3dBox box; 2430 - } 2408 + } __packed 2431 2409 SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */ 2432 2410 2433 2411 /*
+7 -4
drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
··· 38 38 39 39 #define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y)) 40 40 #define max_t(type, x, y) ((x) > (y) ? (x) : (y)) 41 + #define min_t(type, x, y) ((x) < (y) ? (x) : (y)) 41 42 #define surf_size_struct SVGA3dSize 42 43 #define u32 uint32 44 + #define u64 uint64_t 45 + #define U32_MAX ((u32)~0U) 43 46 44 47 #endif /* __KERNEL__ */ 45 48 ··· 707 704 708 705 static inline u32 clamped_umul32(u32 a, u32 b) 709 706 { 710 - uint64_t tmp = (uint64_t) a*b; 711 - return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp; 707 + u64 tmp = (u64) a*b; 708 + return (tmp > (u64) U32_MAX) ? U32_MAX : tmp; 712 709 } 713 710 714 711 static inline const struct svga3d_surface_desc * ··· 837 834 bool cubemap) 838 835 { 839 836 const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format); 840 - u32 total_size = 0; 837 + u64 total_size = 0; 841 838 u32 mip; 842 839 843 840 for (mip = 0; mip < num_mip_levels; mip++) { ··· 850 847 if (cubemap) 851 848 total_size *= SVGA3D_MAX_SURFACE_FACES; 852 849 853 - return total_size; 850 + return (u32) min_t(u64, total_size, (u64) U32_MAX); 854 851 } 855 852 856 853
+8 -1
drivers/gpu/drm/vmwgfx/svga_reg.h
··· 169 169 SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */ 170 170 SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */ 171 171 SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */ 172 + SVGA_REG_COMMAND_LOW = 48, /* Lower 32 bits and submits commands */ 173 + SVGA_REG_COMMAND_HIGH = 49, /* Upper 32 bits of command buffer PA */ 172 174 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */ 173 175 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */ 174 176 SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */ 175 - SVGA_REG_TOP = 53, /* Must be 1 more than the last register */ 177 + SVGA_REG_CMD_PREPEND_LOW = 53, 178 + SVGA_REG_CMD_PREPEND_HIGH = 54, 179 + SVGA_REG_SCREENTARGET_MAX_WIDTH = 55, 180 + SVGA_REG_SCREENTARGET_MAX_HEIGHT = 56, 181 + SVGA_REG_MOB_MAX_SIZE = 57, 182 + SVGA_REG_TOP = 58, /* Must be 1 more than the last register */ 176 183 177 184 SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */ 178 185 /* Next 768 (== 256*3) registers exist for colormap */
+3 -6
drivers/gpu/drm/vmwgfx/vmwgfx_context.c
··· 551 551 cmd->header.size = sizeof(cmd->body); 552 552 cmd->body.cid = bi->ctx->id; 553 553 cmd->body.type = bi->i1.shader_type; 554 - cmd->body.shid = 555 - cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID); 554 + cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); 556 555 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 557 556 558 557 return 0; ··· 584 585 cmd->header.size = sizeof(cmd->body); 585 586 cmd->body.cid = bi->ctx->id; 586 587 cmd->body.type = bi->i1.rt_type; 587 - cmd->body.target.sid = 588 - cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID); 588 + cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); 589 589 cmd->body.target.face = 0; 590 590 cmd->body.target.mipmap = 0; 591 591 vmw_fifo_commit(dev_priv, sizeof(*cmd)); ··· 626 628 cmd->body.c.cid = bi->ctx->id; 627 629 cmd->body.s1.stage = bi->i1.texture_stage; 628 630 cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE; 629 - cmd->body.s1.value = 630 - cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID); 631 + cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); 631 632 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 632 633 633 634 return 0;
+3
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
··· 667 667 dev_priv->memory_size = 512*1024*1024; 668 668 } 669 669 dev_priv->max_mob_pages = 0; 670 + dev_priv->max_mob_size = 0; 670 671 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { 671 672 uint64_t mem_size = 672 673 vmw_read(dev_priv, ··· 677 676 dev_priv->prim_bb_mem = 678 677 vmw_read(dev_priv, 679 678 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); 679 + dev_priv->max_mob_size = 680 + vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE); 680 681 } else 681 682 dev_priv->prim_bb_mem = dev_priv->vram_size; 682 683
+1
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
··· 386 386 uint32_t max_gmr_ids; 387 387 uint32_t max_gmr_pages; 388 388 uint32_t max_mob_pages; 389 + uint32_t max_mob_size; 389 390 uint32_t memory_size; 390 391 bool has_gmr; 391 392 bool has_mob;
+6 -3
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
··· 602 602 { 603 603 struct vmw_cid_cmd { 604 604 SVGA3dCmdHeader header; 605 - __le32 cid; 605 + uint32_t cid; 606 606 } *cmd; 607 607 608 608 cmd = container_of(header, struct vmw_cid_cmd, header); ··· 1835 1835 return 0; 1836 1836 } 1837 1837 1838 - static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = { 1838 + static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { 1839 1839 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid, 1840 1840 false, false, false), 1841 1841 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid, ··· 2032 2032 goto out_invalid; 2033 2033 2034 2034 entry = &vmw_cmd_entries[cmd_id]; 2035 + if (unlikely(!entry->func)) 2036 + goto out_invalid; 2037 + 2035 2038 if (unlikely(!entry->user_allow && !sw_context->kernel)) 2036 2039 goto out_privileged; 2037 2040 ··· 2472 2469 if (dev_priv->has_mob) { 2473 2470 ret = vmw_rebind_contexts(sw_context); 2474 2471 if (unlikely(ret != 0)) 2475 - goto out_err; 2472 + goto out_unlock_binding; 2476 2473 } 2477 2474 2478 2475 cmd = vmw_fifo_reserve(dev_priv, command_size);
+3
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
··· 102 102 vmw_fp->gb_aware = true; 103 103 param->value = dev_priv->max_mob_pages * PAGE_SIZE; 104 104 break; 105 + case DRM_VMW_PARAM_MAX_MOB_SIZE: 106 + param->value = dev_priv->max_mob_size; 107 + break; 105 108 default: 106 109 DRM_ERROR("Illegal vmwgfx get param request: %d\n", 107 110 param->param);
+9 -7
drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
··· 371 371 TTM_REF_USAGE); 372 372 } 373 373 374 - int vmw_shader_alloc(struct vmw_private *dev_priv, 375 - struct vmw_dma_buffer *buffer, 376 - size_t shader_size, 377 - size_t offset, 378 - SVGA3dShaderType shader_type, 379 - struct ttm_object_file *tfile, 380 - u32 *handle) 374 + static int vmw_shader_alloc(struct vmw_private *dev_priv, 375 + struct vmw_dma_buffer *buffer, 376 + size_t shader_size, 377 + size_t offset, 378 + SVGA3dShaderType shader_type, 379 + struct ttm_object_file *tfile, 380 + u32 *handle) 381 381 { 382 382 struct vmw_user_shader *ushader; 383 383 struct vmw_resource *res, *tmp; ··· 779 779 int ret; 780 780 781 781 man = kzalloc(sizeof(*man), GFP_KERNEL); 782 + if (man == NULL) 783 + return ERR_PTR(-ENOMEM); 782 784 783 785 man->dev_priv = dev_priv; 784 786 INIT_LIST_HEAD(&man->list);
+3
drivers/hid/hid-apple.c
··· 469 469 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, 470 470 USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI), 471 471 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, 472 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, 473 + USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS), 474 + .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, 472 475 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS), 473 476 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, 474 477 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
+3
drivers/hid/hid-core.c
··· 1679 1679 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, 1680 1680 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) }, 1681 1681 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) }, 1682 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) }, 1682 1683 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, 1683 1684 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, 1684 1685 { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) }, ··· 1780 1779 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) }, 1781 1780 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) }, 1782 1781 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) }, 1782 + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2) }, 1783 + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2) }, 1783 1784 { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, 1784 1785 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) }, 1785 1786 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) },
+8 -3
drivers/hid/hid-hyperv.c
··· 157 157 u32 report_desc_size; 158 158 struct hv_input_dev_info hid_dev_info; 159 159 struct hid_device *hid_device; 160 + u8 input_buf[HID_MAX_BUFFER_SIZE]; 160 161 }; 161 162 162 163 ··· 257 256 struct synthhid_msg *hid_msg; 258 257 struct mousevsc_dev *input_dev = hv_get_drvdata(device); 259 258 struct synthhid_input_report *input_report; 259 + size_t len; 260 260 261 261 pipe_msg = (struct pipe_prt_msg *)((unsigned long)packet + 262 262 (packet->offset8 << 3)); ··· 302 300 (struct synthhid_input_report *)pipe_msg->data; 303 301 if (!input_dev->init_complete) 304 302 break; 305 - hid_input_report(input_dev->hid_device, 306 - HID_INPUT_REPORT, input_report->buffer, 307 - input_report->header.size, 1); 303 + 304 + len = min(input_report->header.size, 305 + (u32)sizeof(input_dev->input_buf)); 306 + memcpy(input_dev->input_buf, input_report->buffer, len); 307 + hid_input_report(input_dev->hid_device, HID_INPUT_REPORT, 308 + input_dev->input_buf, len, 1); 308 309 break; 309 310 default: 310 311 pr_err("unsupported hid msg type - type %d len %d",
+8
drivers/hid/hid-ids.h
··· 135 135 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b 136 136 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI 0x0255 137 137 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256 138 + #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS 0x0257 138 139 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290 139 140 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291 140 141 #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292 ··· 241 240 242 241 #define USB_VENDOR_ID_CYGNAL 0x10c4 243 242 #define USB_DEVICE_ID_CYGNAL_RADIO_SI470X 0x818a 243 + #define USB_DEVICE_ID_FOCALTECH_FTXXXX_MULTITOUCH 0x81b9 244 244 245 245 #define USB_DEVICE_ID_CYGNAL_RADIO_SI4713 0x8244 246 246 ··· 453 451 #define USB_VENDOR_ID_INTEL_1 0x8087 454 452 #define USB_DEVICE_ID_INTEL_HID_SENSOR 0x09fa 455 453 454 + #define USB_VENDOR_ID_STM_0 0x0483 455 + #define USB_DEVICE_ID_STM_HID_SENSOR 0x91d1 456 + 456 457 #define USB_VENDOR_ID_ION 0x15e4 457 458 #define USB_DEVICE_ID_ICADE 0x0132 458 459 ··· 624 619 #define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713 625 620 #define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730 626 621 #define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c 622 + #define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7 623 + #define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9 627 624 628 625 #define USB_VENDOR_ID_MOJO 0x8282 629 626 #define USB_DEVICE_ID_RETRO_ADAPTER 0x3201 ··· 651 644 652 645 #define USB_VENDOR_ID_NEXIO 0x1870 653 646 #define USB_DEVICE_ID_NEXIO_MULTITOUCH_420 0x010d 647 + #define USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750 0x0110 654 648 655 649 #define USB_VENDOR_ID_NEXTWINDOW 0x1926 656 650 #define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003
+1 -1
drivers/hid/hid-input.c
··· 1178 1178 1179 1179 /* fall back to generic raw-output-report */ 1180 1180 len = ((report->size - 1) >> 3) + 1 + (report->id > 0); 1181 - buf = kmalloc(len, GFP_KERNEL); 1181 + buf = hid_alloc_report_buf(report, GFP_KERNEL); 1182 1182 if (!buf) 1183 1183 return; 1184 1184
+4
drivers/hid/hid-microsoft.c
··· 208 208 .driver_data = MS_NOGET }, 209 209 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500), 210 210 .driver_data = MS_DUPLICATE_USAGES }, 211 + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2), 212 + .driver_data = 0 }, 213 + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2), 214 + .driver_data = 0 }, 211 215 212 216 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT), 213 217 .driver_data = MS_PRESENTER },
+5
drivers/hid/hid-multitouch.c
··· 1166 1166 MT_USB_DEVICE(USB_VENDOR_ID_FLATFROG, 1167 1167 USB_DEVICE_ID_MULTITOUCH_3200) }, 1168 1168 1169 + /* FocalTech Panels */ 1170 + { .driver_data = MT_CLS_SERIAL, 1171 + MT_USB_DEVICE(USB_VENDOR_ID_CYGNAL, 1172 + USB_DEVICE_ID_FOCALTECH_FTXXXX_MULTITOUCH) }, 1173 + 1169 1174 /* GeneralTouch panel */ 1170 1175 { .driver_data = MT_CLS_GENERALTOUCH_TWOFINGERS, 1171 1176 MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+3
drivers/hid/hid-sensor-hub.c
··· 665 665 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_INTEL_1, 666 666 USB_DEVICE_ID_INTEL_HID_SENSOR), 667 667 .driver_data = HID_SENSOR_HUB_ENUM_QUIRK}, 668 + { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_STM_0, 669 + USB_DEVICE_ID_STM_HID_SENSOR), 670 + .driver_data = HID_SENSOR_HUB_ENUM_QUIRK}, 668 671 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, HID_ANY_ID, 669 672 HID_ANY_ID) }, 670 673 { }
+1 -1
drivers/hid/i2c-hid/i2c-hid.c
··· 582 582 int ret; 583 583 int len = i2c_hid_get_report_length(rep) - 2; 584 584 585 - buf = kzalloc(len, GFP_KERNEL); 585 + buf = hid_alloc_report_buf(rep, GFP_KERNEL); 586 586 if (!buf) 587 587 return; 588 588
+1
drivers/hid/usbhid/hid-quirks.c
··· 74 74 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, 75 75 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, 76 76 { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS }, 77 + { USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS }, 77 78 { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS }, 78 79 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS }, 79 80 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
+1 -1
drivers/hwmon/max1668.c
··· 243 243 data->temp_min[index] = clamp_val(temp/1000, -128, 127); 244 244 if (i2c_smbus_write_byte_data(client, 245 245 MAX1668_REG_LIML_WR(index), 246 - data->temp_max[index])) 246 + data->temp_min[index])) 247 247 count = -EIO; 248 248 mutex_unlock(&data->update_lock); 249 249
+63 -42
drivers/iommu/arm-smmu.c
··· 79 79 80 80 #define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES) 81 81 #define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1)) 82 - #define ARM_SMMU_PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(pte_t)) 83 82 84 83 /* Stage-1 PTE */ 85 84 #define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6) ··· 190 191 #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2)) 191 192 #define CBAR_VMID_SHIFT 0 192 193 #define CBAR_VMID_MASK 0xff 194 + #define CBAR_S1_BPSHCFG_SHIFT 8 195 + #define CBAR_S1_BPSHCFG_MASK 3 196 + #define CBAR_S1_BPSHCFG_NSH 3 193 197 #define CBAR_S1_MEMATTR_SHIFT 12 194 198 #define CBAR_S1_MEMATTR_MASK 0xf 195 199 #define CBAR_S1_MEMATTR_WB 0xf ··· 395 393 struct arm_smmu_cfg root_cfg; 396 394 phys_addr_t output_mask; 397 395 398 - struct mutex lock; 396 + spinlock_t lock; 399 397 }; 400 398 401 399 static DEFINE_SPINLOCK(arm_smmu_devices_lock); ··· 634 632 return IRQ_HANDLED; 635 633 } 636 634 635 + static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr, 636 + size_t size) 637 + { 638 + unsigned long offset = (unsigned long)addr & ~PAGE_MASK; 639 + 640 + 641 + /* Ensure new page tables are visible to the hardware walker */ 642 + if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) { 643 + dsb(); 644 + } else { 645 + /* 646 + * If the SMMU can't walk tables in the CPU caches, treat them 647 + * like non-coherent DMA since we need to flush the new entries 648 + * all the way out to memory. There's no possibility of 649 + * recursion here as the SMMU table walker will not be wired 650 + * through another SMMU. 651 + */ 652 + dma_map_page(smmu->dev, virt_to_page(addr), offset, size, 653 + DMA_TO_DEVICE); 654 + } 655 + } 656 + 637 657 static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) 638 658 { 639 659 u32 reg; ··· 674 650 if (smmu->version == 1) 675 651 reg |= root_cfg->irptndx << CBAR_IRPTNDX_SHIFT; 676 652 677 - /* Use the weakest memory type, so it is overridden by the pte */ 678 - if (stage1) 679 - reg |= (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT); 680 - else 653 + /* 654 + * Use the weakest shareability/memory types, so they are 655 + * overridden by the ttbcr/pte. 656 + */ 657 + if (stage1) { 658 + reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) | 659 + (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT); 660 + } else { 681 661 reg |= ARM_SMMU_CB_VMID(root_cfg) << CBAR_VMID_SHIFT; 662 + } 682 663 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(root_cfg->cbndx)); 683 664 684 665 if (smmu->version > 1) { ··· 744 715 } 745 716 746 717 /* TTBR0 */ 718 + arm_smmu_flush_pgtable(smmu, root_cfg->pgd, 719 + PTRS_PER_PGD * sizeof(pgd_t)); 747 720 reg = __pa(root_cfg->pgd); 748 721 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); 749 722 reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32; ··· 932 901 goto out_free_domain; 933 902 smmu_domain->root_cfg.pgd = pgd; 934 903 935 - mutex_init(&smmu_domain->lock); 904 + spin_lock_init(&smmu_domain->lock); 936 905 domain->priv = smmu_domain; 937 906 return 0; 938 907 ··· 1159 1128 struct arm_smmu_domain *smmu_domain = domain->priv; 1160 1129 struct arm_smmu_device *device_smmu = dev->archdata.iommu; 1161 1130 struct arm_smmu_master *master; 1131 + unsigned long flags; 1162 1132 1163 1133 if (!device_smmu) { 1164 1134 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); ··· 1170 1138 * Sanity check the domain. We don't currently support domains 1171 1139 * that cross between different SMMU chains. 1172 1140 */ 1173 - mutex_lock(&smmu_domain->lock); 1141 + spin_lock_irqsave(&smmu_domain->lock, flags); 1174 1142 if (!smmu_domain->leaf_smmu) { 1175 1143 /* Now that we have a master, we can finalise the domain */ 1176 1144 ret = arm_smmu_init_domain_context(domain, dev); ··· 1185 1153 dev_name(device_smmu->dev)); 1186 1154 goto err_unlock; 1187 1155 } 1188 - mutex_unlock(&smmu_domain->lock); 1156 + spin_unlock_irqrestore(&smmu_domain->lock, flags); 1189 1157 1190 1158 /* Looks ok, so add the device to the domain */ 1191 1159 master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node); ··· 1195 1163 return arm_smmu_domain_add_master(smmu_domain, master); 1196 1164 1197 1165 err_unlock: 1198 - mutex_unlock(&smmu_domain->lock); 1166 + spin_unlock_irqrestore(&smmu_domain->lock, flags); 1199 1167 return ret; 1200 1168 } 1201 1169 ··· 1207 1175 master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node); 1208 1176 if (master) 1209 1177 arm_smmu_domain_remove_master(smmu_domain, master); 1210 - } 1211 - 1212 - static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr, 1213 - size_t size) 1214 - { 1215 - unsigned long offset = (unsigned long)addr & ~PAGE_MASK; 1216 - 1217 - /* 1218 - * If the SMMU can't walk tables in the CPU caches, treat them 1219 - * like non-coherent DMA since we need to flush the new entries 1220 - * all the way out to memory. There's no possibility of recursion 1221 - * here as the SMMU table walker will not be wired through another 1222 - * SMMU. 1223 - */ 1224 - if (!(smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)) 1225 - dma_map_page(smmu->dev, virt_to_page(addr), offset, size, 1226 - DMA_TO_DEVICE); 1227 1178 } 1228 1179 1229 1180 static bool arm_smmu_pte_is_contiguous_range(unsigned long addr, ··· 1225 1210 1226 1211 if (pmd_none(*pmd)) { 1227 1212 /* Allocate a new set of tables */ 1228 - pgtable_t table = alloc_page(PGALLOC_GFP); 1213 + pgtable_t table = alloc_page(GFP_ATOMIC|__GFP_ZERO); 1229 1214 if (!table) 1230 1215 return -ENOMEM; 1231 1216 1232 - arm_smmu_flush_pgtable(smmu, page_address(table), 1233 - ARM_SMMU_PTE_HWTABLE_SIZE); 1217 + arm_smmu_flush_pgtable(smmu, page_address(table), PAGE_SIZE); 1234 1218 if (!pgtable_page_ctor(table)) { 1235 1219 __free_page(table); 1236 1220 return -ENOMEM; ··· 1331 1317 1332 1318 #ifndef __PAGETABLE_PMD_FOLDED 1333 1319 if (pud_none(*pud)) { 1334 - pmd = pmd_alloc_one(NULL, addr); 1320 + pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC); 1335 1321 if (!pmd) 1336 1322 return -ENOMEM; 1323 + 1324 + arm_smmu_flush_pgtable(smmu, pmd, PAGE_SIZE); 1325 + pud_populate(NULL, pud, pmd); 1326 + arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud)); 1327 + 1328 + pmd += pmd_index(addr); 1337 1329 } else 1338 1330 #endif 1339 1331 pmd = pmd_offset(pud, addr); ··· 1348 1328 next = pmd_addr_end(addr, end); 1349 1329 ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn, 1350 1330 flags, stage); 1351 - pud_populate(NULL, pud, pmd); 1352 - arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud)); 1353 1331 phys += next - addr; 1354 1332 } while (pmd++, addr = next, addr < end); 1355 1333 ··· 1364 1346 1365 1347 #ifndef __PAGETABLE_PUD_FOLDED 1366 1348 if (pgd_none(*pgd)) { 1367 - pud = pud_alloc_one(NULL, addr); 1349 + pud = (pud_t *)get_zeroed_page(GFP_ATOMIC); 1368 1350 if (!pud) 1369 1351 return -ENOMEM; 1352 + 1353 + arm_smmu_flush_pgtable(smmu, pud, PAGE_SIZE); 1354 + pgd_populate(NULL, pgd, pud); 1355 + arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd)); 1356 + 1357 + pud += pud_index(addr); 1370 1358 } else 1371 1359 #endif 1372 1360 pud = pud_offset(pgd, addr); ··· 1381 1357 next = pud_addr_end(addr, end); 1382 1358 ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys, 1383 1359 flags, stage); 1384 - pgd_populate(NULL, pud, pgd); 1385 - arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd)); 1386 1360 phys += next - addr; 1387 1361 } while (pud++, addr = next, addr < end); 1388 1362 ··· 1397 1375 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; 1398 1376 pgd_t *pgd = root_cfg->pgd; 1399 1377 struct arm_smmu_device *smmu = root_cfg->smmu; 1378 + unsigned long irqflags; 1400 1379 1401 1380 if (root_cfg->cbar == CBAR_TYPE_S2_TRANS) { 1402 1381 stage = 2; ··· 1420 1397 if (paddr & ~output_mask) 1421 1398 return -ERANGE; 1422 1399 1423 - mutex_lock(&smmu_domain->lock); 1400 + spin_lock_irqsave(&smmu_domain->lock, irqflags); 1424 1401 pgd += pgd_index(iova); 1425 1402 end = iova + size; 1426 1403 do { ··· 1436 1413 } while (pgd++, iova != end); 1437 1414 1438 1415 out_unlock: 1439 - mutex_unlock(&smmu_domain->lock); 1440 - 1441 - /* Ensure new page tables are visible to the hardware walker */ 1442 - if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) 1443 - dsb(); 1416 + spin_unlock_irqrestore(&smmu_domain->lock, irqflags); 1444 1417 1445 1418 return ret; 1446 1419 } ··· 2006 1987 if (!iommu_present(&platform_bus_type)) 2007 1988 bus_set_iommu(&platform_bus_type, &arm_smmu_ops); 2008 1989 1990 + #ifdef CONFIG_ARM_AMBA 2009 1991 if (!iommu_present(&amba_bustype)) 2010 1992 bus_set_iommu(&amba_bustype, &arm_smmu_ops); 1993 + #endif 2011 1994 2012 1995 return 0; 2013 1996 }
+19 -3
drivers/irqchip/irq-orion.c
··· 111 111 static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc) 112 112 { 113 113 struct irq_domain *d = irq_get_handler_data(irq); 114 - struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, irq); 114 + 115 + struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, 0); 115 116 u32 stat = readl_relaxed(gc->reg_base + ORION_BRIDGE_IRQ_CAUSE) & 116 117 gc->mask_cache; 117 118 ··· 122 121 generic_handle_irq(irq_find_mapping(d, gc->irq_base + hwirq)); 123 122 stat &= ~(1 << hwirq); 124 123 } 124 + } 125 + 126 + /* 127 + * Bridge IRQ_CAUSE is asserted regardless of IRQ_MASK register. 128 + * To avoid interrupt events on stale irqs, we clear them before unmask. 129 + */ 130 + static unsigned int orion_bridge_irq_startup(struct irq_data *d) 131 + { 132 + struct irq_chip_type *ct = irq_data_get_chip_type(d); 133 + 134 + ct->chip.irq_ack(d); 135 + ct->chip.irq_unmask(d); 136 + return 0; 125 137 } 126 138 127 139 static int __init orion_bridge_irq_init(struct device_node *np, ··· 157 143 } 158 144 159 145 ret = irq_alloc_domain_generic_chips(domain, nrirqs, 1, np->name, 160 - handle_level_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE); 146 + handle_edge_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE); 161 147 if (ret) { 162 148 pr_err("%s: unable to alloc irq domain gc\n", np->name); 163 149 return ret; ··· 190 176 191 177 gc->chip_types[0].regs.ack = ORION_BRIDGE_IRQ_CAUSE; 192 178 gc->chip_types[0].regs.mask = ORION_BRIDGE_IRQ_MASK; 179 + gc->chip_types[0].chip.irq_startup = orion_bridge_irq_startup; 193 180 gc->chip_types[0].chip.irq_ack = irq_gc_ack_clr_bit; 194 181 gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit; 195 182 gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit; 196 183 197 - /* mask all interrupts */ 184 + /* mask and clear all interrupts */ 198 185 writel(0, gc->reg_base + ORION_BRIDGE_IRQ_MASK); 186 + writel(0, gc->reg_base + ORION_BRIDGE_IRQ_CAUSE); 199 187 200 188 irq_set_handler_data(irq, domain); 201 189 irq_set_chained_handler(irq, orion_bridge_irq_handler);
+10 -2
drivers/mfd/da9055-i2c.c
··· 53 53 return 0; 54 54 } 55 55 56 + /* 57 + * DO NOT change the device Ids. The naming is intentionally specific as both 58 + * the PMIC and CODEC parts of this chip are instantiated separately as I2C 59 + * devices (both have configurable I2C addresses, and are to all intents and 60 + * purposes separate). As a result there are specific DA9055 ids for PMIC 61 + * and CODEC, which must be different to operate together. 62 + */ 56 63 static struct i2c_device_id da9055_i2c_id[] = { 57 - {"da9055", 0}, 64 + {"da9055-pmic", 0}, 58 65 { } 59 66 }; 67 + MODULE_DEVICE_TABLE(i2c, da9055_i2c_id); 60 68 61 69 static struct i2c_driver da9055_i2c_driver = { 62 70 .probe = da9055_i2c_probe, 63 71 .remove = da9055_i2c_remove, 64 72 .id_table = da9055_i2c_id, 65 73 .driver = { 66 - .name = "da9055", 74 + .name = "da9055-pmic", 67 75 .owner = THIS_MODULE, 68 76 }, 69 77 };
+2
drivers/mfd/max14577.c
··· 173 173 }; 174 174 MODULE_DEVICE_TABLE(i2c, max14577_i2c_id); 175 175 176 + #ifdef CONFIG_PM_SLEEP 176 177 static int max14577_suspend(struct device *dev) 177 178 { 178 179 struct i2c_client *i2c = container_of(dev, struct i2c_client, dev); ··· 209 208 210 209 return 0; 211 210 } 211 + #endif /* CONFIG_PM_SLEEP */ 212 212 213 213 static struct of_device_id max14577_dt_match[] = { 214 214 { .compatible = "maxim,max14577", },
+3 -3
drivers/mfd/max8997.c
··· 164 164 return pd; 165 165 } 166 166 167 - static inline int max8997_i2c_get_driver_data(struct i2c_client *i2c, 167 + static inline unsigned long max8997_i2c_get_driver_data(struct i2c_client *i2c, 168 168 const struct i2c_device_id *id) 169 169 { 170 170 if (IS_ENABLED(CONFIG_OF) && i2c->dev.of_node) { 171 171 const struct of_device_id *match; 172 172 match = of_match_node(max8997_pmic_dt_match, i2c->dev.of_node); 173 - return (int)match->data; 173 + return (unsigned long)match->data; 174 174 } 175 - return (int)id->driver_data; 175 + return id->driver_data; 176 176 } 177 177 178 178 static int max8997_i2c_probe(struct i2c_client *i2c,
+3 -3
drivers/mfd/max8998.c
··· 169 169 return pd; 170 170 } 171 171 172 - static inline int max8998_i2c_get_driver_data(struct i2c_client *i2c, 172 + static inline unsigned long max8998_i2c_get_driver_data(struct i2c_client *i2c, 173 173 const struct i2c_device_id *id) 174 174 { 175 175 if (IS_ENABLED(CONFIG_OF) && i2c->dev.of_node) { 176 176 const struct of_device_id *match; 177 177 match = of_match_node(max8998_dt_match, i2c->dev.of_node); 178 - return (int)(long)match->data; 178 + return (unsigned long)match->data; 179 179 } 180 180 181 - return (int)id->driver_data; 181 + return id->driver_data; 182 182 } 183 183 184 184 static int max8998_i2c_probe(struct i2c_client *i2c,
+2
drivers/mfd/sec-core.c
··· 315 315 return 0; 316 316 } 317 317 318 + #ifdef CONFIG_PM_SLEEP 318 319 static int sec_pmic_suspend(struct device *dev) 319 320 { 320 321 struct i2c_client *i2c = container_of(dev, struct i2c_client, dev); ··· 350 349 351 350 return 0; 352 351 } 352 + #endif /* CONFIG_PM_SLEEP */ 353 353 354 354 static SIMPLE_DEV_PM_OPS(sec_pmic_pm_ops, sec_pmic_suspend, sec_pmic_resume); 355 355
+2 -2
drivers/mfd/tps65217.c
··· 158 158 { 159 159 struct tps65217 *tps; 160 160 unsigned int version; 161 - unsigned int chip_id = ids->driver_data; 161 + unsigned long chip_id = ids->driver_data; 162 162 const struct of_device_id *match; 163 163 bool status_off = false; 164 164 int ret; ··· 170 170 "Failed to find matching dt id\n"); 171 171 return -EINVAL; 172 172 } 173 - chip_id = (unsigned int)(unsigned long)match->data; 173 + chip_id = (unsigned long)match->data; 174 174 status_off = of_property_read_bool(client->dev.of_node, 175 175 "ti,pmic-shutdown-controller"); 176 176 }
+1 -1
drivers/mfd/wm8994-core.c
··· 636 636 if (i2c->dev.of_node) { 637 637 of_id = of_match_device(wm8994_of_match, &i2c->dev); 638 638 if (of_id) 639 - wm8994->type = (int)of_id->data; 639 + wm8994->type = (enum wm8994_type)of_id->data; 640 640 } else { 641 641 wm8994->type = id->driver_data; 642 642 }
+3 -1
drivers/misc/mei/client.c
··· 666 666 goto err; 667 667 668 668 cb->fop_type = MEI_FOP_READ; 669 - cl->read_cb = cb; 670 669 if (dev->hbuf_is_ready) { 671 670 dev->hbuf_is_ready = false; 672 671 if (mei_hbm_cl_flow_control_req(dev, cl)) { ··· 677 678 } else { 678 679 list_add_tail(&cb->list, &dev->ctrl_wr_list.list); 679 680 } 681 + 682 + cl->read_cb = cb; 683 + 680 684 return rets; 681 685 err: 682 686 mei_io_cb_free(cb);
+1 -1
drivers/mmc/card/queue.c
··· 197 197 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; 198 198 199 199 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) 200 - limit = dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; 200 + limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; 201 201 202 202 mq->card = card; 203 203 mq->queue = blk_init_queue(mmc_request_fn, lock);
+1 -1
drivers/net/Kconfig
··· 139 139 This adds a specialized tap character device driver that is based 140 140 on the MAC-VLAN network interface, called macvtap. A macvtap device 141 141 can be added in the same way as a macvlan device, using 'type 142 - macvlan', and then be accessed through the tap user space interface. 142 + macvtap', and then be accessed through the tap user space interface. 143 143 144 144 To compile this driver as a module, choose M here: the module 145 145 will be called macvtap.
+2 -4
drivers/net/bonding/bond_3ad.c
··· 1796 1796 BOND_AD_INFO(bond).agg_select_timer = timeout; 1797 1797 } 1798 1798 1799 - static u16 aggregator_identifier; 1800 - 1801 1799 /** 1802 1800 * bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures 1803 1801 * @bond: bonding struct to work on ··· 1809 1811 if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr), 1810 1812 bond->dev->dev_addr)) { 1811 1813 1812 - aggregator_identifier = 0; 1814 + BOND_AD_INFO(bond).aggregator_identifier = 0; 1813 1815 1814 1816 BOND_AD_INFO(bond).system.sys_priority = 0xFFFF; 1815 1817 BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->dev->dev_addr); ··· 1878 1880 ad_initialize_agg(aggregator); 1879 1881 1880 1882 aggregator->aggregator_mac_address = *((struct mac_addr *)bond->dev->dev_addr); 1881 - aggregator->aggregator_identifier = (++aggregator_identifier); 1883 + aggregator->aggregator_identifier = ++BOND_AD_INFO(bond).aggregator_identifier; 1882 1884 aggregator->slave = slave; 1883 1885 aggregator->is_active = 0; 1884 1886 aggregator->num_of_ports = 0;
+1
drivers/net/bonding/bond_3ad.h
··· 253 253 struct ad_bond_info { 254 254 struct ad_system system; /* 802.3ad system structure */ 255 255 u32 agg_select_timer; // Timer to select aggregator after all adapter's hand shakes 256 + u16 aggregator_identifier; 256 257 }; 257 258 258 259 struct ad_slave_info {
+8 -1
drivers/net/bonding/bond_main.c
··· 1543 1543 bond_set_carrier(bond); 1544 1544 1545 1545 if (USES_PRIMARY(bond->params.mode)) { 1546 + block_netpoll_tx(); 1546 1547 write_lock_bh(&bond->curr_slave_lock); 1547 1548 bond_select_active_slave(bond); 1548 1549 write_unlock_bh(&bond->curr_slave_lock); 1550 + unblock_netpoll_tx(); 1549 1551 } 1550 1552 1551 1553 pr_info("%s: enslaving %s as a%s interface with a%s link.\n", ··· 1573 1571 if (bond->primary_slave == new_slave) 1574 1572 bond->primary_slave = NULL; 1575 1573 if (bond->curr_active_slave == new_slave) { 1574 + block_netpoll_tx(); 1576 1575 write_lock_bh(&bond->curr_slave_lock); 1577 1576 bond_change_active_slave(bond, NULL); 1578 1577 bond_select_active_slave(bond); 1579 1578 write_unlock_bh(&bond->curr_slave_lock); 1579 + unblock_netpoll_tx(); 1580 1580 } 1581 1581 slave_disable_netpoll(new_slave); 1582 1582 ··· 2868 2864 pr_info("%s: Primary slave changed to %s, reselecting active slave.\n", 2869 2865 bond->dev->name, bond->primary_slave ? slave_dev->name : 2870 2866 "none"); 2867 + 2868 + block_netpoll_tx(); 2871 2869 write_lock_bh(&bond->curr_slave_lock); 2872 2870 bond_select_active_slave(bond); 2873 2871 write_unlock_bh(&bond->curr_slave_lock); 2872 + unblock_netpoll_tx(); 2874 2873 break; 2875 2874 case NETDEV_FEAT_CHANGE: 2876 2875 bond_compute_features(bond); ··· 3707 3700 3708 3701 3709 3702 static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb, 3710 - void *accel_priv) 3703 + void *accel_priv, select_queue_fallback_t fallback) 3711 3704 { 3712 3705 /* 3713 3706 * This helper function exists to help dev_pick_tx get the correct
+1 -1
drivers/net/bonding/bond_options.c
··· 14 14 #include <linux/errno.h> 15 15 #include <linux/if.h> 16 16 #include <linux/netdevice.h> 17 - #include <linux/rwlock.h> 17 + #include <linux/spinlock.h> 18 18 #include <linux/rcupdate.h> 19 19 #include <linux/ctype.h> 20 20 #include <linux/inet.h>
+2
drivers/net/can/usb/kvaser_usb.c
··· 473 473 return err; 474 474 475 475 dev->nchannels = msg.u.cardinfo.nchannels; 476 + if (dev->nchannels > MAX_NET_DEVICES) 477 + return -EINVAL; 476 478 477 479 return 0; 478 480 }
+2 -2
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
··· 1873 1873 } 1874 1874 1875 1875 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, 1876 - void *accel_priv) 1876 + void *accel_priv, select_queue_fallback_t fallback) 1877 1877 { 1878 1878 struct bnx2x *bp = netdev_priv(dev); 1879 1879 ··· 1895 1895 } 1896 1896 1897 1897 /* select a non-FCoE queue */ 1898 - return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp); 1898 + return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp); 1899 1899 } 1900 1900 1901 1901 void bnx2x_set_num_queues(struct bnx2x *bp)
+1 -1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
··· 496 496 497 497 /* select_queue callback */ 498 498 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, 499 - void *accel_priv); 499 + void *accel_priv, select_queue_fallback_t fallback); 500 500 501 501 static inline void bnx2x_update_rx_prod(struct bnx2x *bp, 502 502 struct bnx2x_fastpath *fp,
+1
drivers/net/ethernet/dec/tulip/tulip_core.c
··· 1939 1939 pci_iounmap(pdev, tp->base_addr); 1940 1940 free_netdev (dev); 1941 1941 pci_release_regions (pdev); 1942 + pci_disable_device(pdev); 1942 1943 1943 1944 /* pci_power_off (pdev, -1); */ 1944 1945 }
+2 -2
drivers/net/ethernet/freescale/fec_main.c
··· 1778 1778 struct fec_enet_private *fep = netdev_priv(ndev); 1779 1779 int ret; 1780 1780 1781 - napi_enable(&fep->napi); 1782 - 1783 1781 /* I should reset the ring buffers here, but I don't yet know 1784 1782 * a simple way to do that. 1785 1783 */ ··· 1792 1794 fec_enet_free_buffers(ndev); 1793 1795 return ret; 1794 1796 } 1797 + 1798 + napi_enable(&fep->napi); 1795 1799 phy_start(fep->phy_dev); 1796 1800 netif_start_queue(ndev); 1797 1801 fep->opened = 1;
+3 -3
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 6881 6881 } 6882 6882 6883 6883 static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, 6884 - void *accel_priv) 6884 + void *accel_priv, select_queue_fallback_t fallback) 6885 6885 { 6886 6886 struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; 6887 6887 #ifdef IXGBE_FCOE ··· 6907 6907 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 6908 6908 break; 6909 6909 default: 6910 - return __netdev_pick_tx(dev, skb); 6910 + return fallback(dev, skb); 6911 6911 } 6912 6912 6913 6913 f = &adapter->ring_feature[RING_F_FCOE]; ··· 6920 6920 6921 6921 return txq + f->offset; 6922 6922 #else 6923 - return __netdev_pick_tx(dev, skb); 6923 + return fallback(dev, skb); 6924 6924 #endif 6925 6925 } 6926 6926
+1 -1
drivers/net/ethernet/lantiq_etop.c
··· 619 619 620 620 static u16 621 621 ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb, 622 - void *accel_priv) 622 + void *accel_priv, select_queue_fallback_t fallback) 623 623 { 624 624 /* we are currently only using the first queue */ 625 625 return 0;
+3 -3
drivers/net/ethernet/marvell/Kconfig
··· 43 43 This driver is used by the MV643XX_ETH and MVNETA drivers. 44 44 45 45 config MVNETA 46 - tristate "Marvell Armada 370/XP network interface support" 47 - depends on MACH_ARMADA_370_XP 46 + tristate "Marvell Armada 370/38x/XP network interface support" 47 + depends on PLAT_ORION 48 48 select MVMDIO 49 49 ---help--- 50 50 This driver supports the network interface units in the 51 - Marvell ARMADA XP and ARMADA 370 SoC family. 51 + Marvell ARMADA XP, ARMADA 370 and ARMADA 38x SoC family. 52 52 53 53 Note that this driver is distinct from the mv643xx_eth 54 54 driver, which should be used for the older Marvell SoCs
+2 -2
drivers/net/ethernet/mellanox/mlx4/en_tx.c
··· 629 629 } 630 630 631 631 u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, 632 - void *accel_priv) 632 + void *accel_priv, select_queue_fallback_t fallback) 633 633 { 634 634 struct mlx4_en_priv *priv = netdev_priv(dev); 635 635 u16 rings_p_up = priv->num_tx_rings_p_up; ··· 641 641 if (vlan_tx_tag_present(skb)) 642 642 up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT; 643 643 644 - return __netdev_pick_tx(dev, skb) % rings_p_up + up * rings_p_up; 644 + return fallback(dev, skb) % rings_p_up + up * rings_p_up; 645 645 } 646 646 647 647 static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt)
+1 -1
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
··· 723 723 724 724 void mlx4_en_tx_irq(struct mlx4_cq *mcq); 725 725 u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, 726 - void *accel_priv); 726 + void *accel_priv, select_queue_fallback_t fallback); 727 727 netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); 728 728 729 729 int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
+11
drivers/net/ethernet/stmicro/stmmac/Kconfig
··· 37 37 stmmac device driver. This driver is used for A20/A31 38 38 GMAC ethernet controller. 39 39 40 + config DWMAC_STI 41 + bool "STi GMAC support" 42 + depends on STMMAC_PLATFORM && ARCH_STI 43 + default y 44 + ---help--- 45 + Support for ethernet controller on STi SOCs. 46 + 47 + This selects STi SoC glue layer support for the stmmac 48 + device driver. This driver is used on for the STi series 49 + SOCs GMAC ethernet controller. 50 + 40 51 config STMMAC_PCI 41 52 bool "STMMAC PCI bus support" 42 53 depends on STMMAC_ETH && PCI
+1
drivers/net/ethernet/stmicro/stmmac/Makefile
··· 2 2 stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o 3 3 stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o 4 4 stmmac-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o 5 + stmmac-$(CONFIG_DWMAC_STI) += dwmac-sti.o 5 6 stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ 6 7 chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ 7 8 dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
+330
drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
··· 1 + /** 2 + * dwmac-sti.c - STMicroelectronics DWMAC Specific Glue layer 3 + * 4 + * Copyright (C) 2003-2014 STMicroelectronics (R&D) Limited 5 + * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com> 6 + * 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License as published by 10 + * the Free Software Foundation; either version 2 of the License, or 11 + * (at your option) any later version. 12 + */ 13 + 14 + #include <linux/kernel.h> 15 + #include <linux/slab.h> 16 + #include <linux/platform_device.h> 17 + #include <linux/stmmac.h> 18 + #include <linux/phy.h> 19 + #include <linux/mfd/syscon.h> 20 + #include <linux/regmap.h> 21 + #include <linux/clk.h> 22 + #include <linux/of.h> 23 + #include <linux/of_net.h> 24 + 25 + /** 26 + * STi GMAC glue logic. 27 + * -------------------- 28 + * 29 + * _ 30 + * | \ 31 + * --------|0 \ ETH_SEL_INTERNAL_NOTEXT_PHYCLK 32 + * phyclk | |___________________________________________ 33 + * | | | (phyclk-in) 34 + * --------|1 / | 35 + * int-clk |_ / | 36 + * | _ 37 + * | | \ 38 + * |_______|1 \ ETH_SEL_TX_RETIME_CLK 39 + * | |___________________________ 40 + * | | (tx-retime-clk) 41 + * _______|0 / 42 + * | |_ / 43 + * _ | 44 + * | \ | 45 + * --------|0 \ | 46 + * clk_125 | |__| 47 + * | | ETH_SEL_TXCLK_NOT_CLK125 48 + * --------|1 / 49 + * txclk |_ / 50 + * 51 + * 52 + * ETH_SEL_INTERNAL_NOTEXT_PHYCLK is valid only for RMII where PHY can 53 + * generate 50MHz clock or MAC can generate it. 54 + * This bit is configured by "st,ext-phyclk" property. 55 + * 56 + * ETH_SEL_TXCLK_NOT_CLK125 is only valid for gigabit modes, where the 125Mhz 57 + * clock either comes from clk-125 pin or txclk pin. This configuration is 58 + * totally driven by the board wiring. This bit is configured by 59 + * "st,tx-retime-src" property. 60 + * 61 + * TXCLK configuration is different for different phy interface modes 62 + * and changes according to link speed in modes like RGMII. 63 + * 64 + * Below table summarizes the clock requirement and clock sources for 65 + * supported phy interface modes with link speeds. 66 + * ________________________________________________ 67 + *| PHY_MODE | 1000 Mbit Link | 100 Mbit Link | 68 + * ------------------------------------------------ 69 + *| MII | n/a | 25Mhz | 70 + *| | | txclk | 71 + * ------------------------------------------------ 72 + *| GMII | 125Mhz | 25Mhz | 73 + *| | clk-125/txclk | txclk | 74 + * ------------------------------------------------ 75 + *| RGMII | 125Mhz | 25Mhz | 76 + *| | clk-125/txclk | clkgen | 77 + * ------------------------------------------------ 78 + *| RMII | n/a | 25Mhz | 79 + *| | |clkgen/phyclk-in | 80 + * ------------------------------------------------ 81 + * 82 + * TX lines are always retimed with a clk, which can vary depending 83 + * on the board configuration. Below is the table of these bits 84 + * in eth configuration register depending on source of retime clk. 85 + * 86 + *--------------------------------------------------------------- 87 + * src | tx_rt_clk | int_not_ext_phyclk | txclk_n_clk125| 88 + *--------------------------------------------------------------- 89 + * txclk | 0 | n/a | 1 | 90 + *--------------------------------------------------------------- 91 + * ck_125| 0 | n/a | 0 | 92 + *--------------------------------------------------------------- 93 + * phyclk| 1 | 0 | n/a | 94 + *--------------------------------------------------------------- 95 + * clkgen| 1 | 1 | n/a | 96 + *--------------------------------------------------------------- 97 + */ 98 + 99 + /* Register definition */ 100 + 101 + /* 3 bits [8:6] 102 + * [6:6] ETH_SEL_TXCLK_NOT_CLK125 103 + * [7:7] ETH_SEL_INTERNAL_NOTEXT_PHYCLK 104 + * [8:8] ETH_SEL_TX_RETIME_CLK 105 + * 106 + */ 107 + 108 + #define TX_RETIME_SRC_MASK GENMASK(8, 6) 109 + #define ETH_SEL_TX_RETIME_CLK BIT(8) 110 + #define ETH_SEL_INTERNAL_NOTEXT_PHYCLK BIT(7) 111 + #define ETH_SEL_TXCLK_NOT_CLK125 BIT(6) 112 + 113 + #define ENMII_MASK GENMASK(5, 5) 114 + #define ENMII BIT(5) 115 + 116 + /** 117 + * 3 bits [4:2] 118 + * 000-GMII/MII 119 + * 001-RGMII 120 + * 010-SGMII 121 + * 100-RMII 122 + */ 123 + #define MII_PHY_SEL_MASK GENMASK(4, 2) 124 + #define ETH_PHY_SEL_RMII BIT(4) 125 + #define ETH_PHY_SEL_SGMII BIT(3) 126 + #define ETH_PHY_SEL_RGMII BIT(2) 127 + #define ETH_PHY_SEL_GMII 0x0 128 + #define ETH_PHY_SEL_MII 0x0 129 + 130 + #define IS_PHY_IF_MODE_RGMII(iface) (iface == PHY_INTERFACE_MODE_RGMII || \ 131 + iface == PHY_INTERFACE_MODE_RGMII_ID || \ 132 + iface == PHY_INTERFACE_MODE_RGMII_RXID || \ 133 + iface == PHY_INTERFACE_MODE_RGMII_TXID) 134 + 135 + #define IS_PHY_IF_MODE_GBIT(iface) (IS_PHY_IF_MODE_RGMII(iface) || \ 136 + iface == PHY_INTERFACE_MODE_GMII) 137 + 138 + struct sti_dwmac { 139 + int interface; 140 + bool ext_phyclk; 141 + bool is_tx_retime_src_clk_125; 142 + struct clk *clk; 143 + int reg; 144 + struct device *dev; 145 + struct regmap *regmap; 146 + }; 147 + 148 + static u32 phy_intf_sels[] = { 149 + [PHY_INTERFACE_MODE_MII] = ETH_PHY_SEL_MII, 150 + [PHY_INTERFACE_MODE_GMII] = ETH_PHY_SEL_GMII, 151 + [PHY_INTERFACE_MODE_RGMII] = ETH_PHY_SEL_RGMII, 152 + [PHY_INTERFACE_MODE_RGMII_ID] = ETH_PHY_SEL_RGMII, 153 + [PHY_INTERFACE_MODE_SGMII] = ETH_PHY_SEL_SGMII, 154 + [PHY_INTERFACE_MODE_RMII] = ETH_PHY_SEL_RMII, 155 + }; 156 + 157 + enum { 158 + TX_RETIME_SRC_NA = 0, 159 + TX_RETIME_SRC_TXCLK = 1, 160 + TX_RETIME_SRC_CLK_125, 161 + TX_RETIME_SRC_PHYCLK, 162 + TX_RETIME_SRC_CLKGEN, 163 + }; 164 + 165 + static const char *const tx_retime_srcs[] = { 166 + [TX_RETIME_SRC_NA] = "", 167 + [TX_RETIME_SRC_TXCLK] = "txclk", 168 + [TX_RETIME_SRC_CLK_125] = "clk_125", 169 + [TX_RETIME_SRC_PHYCLK] = "phyclk", 170 + [TX_RETIME_SRC_CLKGEN] = "clkgen", 171 + }; 172 + 173 + static u32 tx_retime_val[] = { 174 + [TX_RETIME_SRC_TXCLK] = ETH_SEL_TXCLK_NOT_CLK125, 175 + [TX_RETIME_SRC_CLK_125] = 0x0, 176 + [TX_RETIME_SRC_PHYCLK] = ETH_SEL_TX_RETIME_CLK, 177 + [TX_RETIME_SRC_CLKGEN] = ETH_SEL_TX_RETIME_CLK | 178 + ETH_SEL_INTERNAL_NOTEXT_PHYCLK, 179 + }; 180 + 181 + static void setup_retime_src(struct sti_dwmac *dwmac, u32 spd) 182 + { 183 + u32 src = 0, freq = 0; 184 + 185 + if (spd == SPEED_100) { 186 + if (dwmac->interface == PHY_INTERFACE_MODE_MII || 187 + dwmac->interface == PHY_INTERFACE_MODE_GMII) { 188 + src = TX_RETIME_SRC_TXCLK; 189 + } else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) { 190 + if (dwmac->ext_phyclk) { 191 + src = TX_RETIME_SRC_PHYCLK; 192 + } else { 193 + src = TX_RETIME_SRC_CLKGEN; 194 + freq = 50000000; 195 + } 196 + 197 + } else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) { 198 + src = TX_RETIME_SRC_CLKGEN; 199 + freq = 25000000; 200 + } 201 + 202 + if (src == TX_RETIME_SRC_CLKGEN && dwmac->clk) 203 + clk_set_rate(dwmac->clk, freq); 204 + 205 + } else if (spd == SPEED_1000) { 206 + if (dwmac->is_tx_retime_src_clk_125) 207 + src = TX_RETIME_SRC_CLK_125; 208 + else 209 + src = TX_RETIME_SRC_TXCLK; 210 + } 211 + 212 + regmap_update_bits(dwmac->regmap, dwmac->reg, 213 + TX_RETIME_SRC_MASK, tx_retime_val[src]); 214 + } 215 + 216 + static void sti_dwmac_exit(struct platform_device *pdev, void *priv) 217 + { 218 + struct sti_dwmac *dwmac = priv; 219 + 220 + if (dwmac->clk) 221 + clk_disable_unprepare(dwmac->clk); 222 + } 223 + 224 + static void sti_fix_mac_speed(void *priv, unsigned int spd) 225 + { 226 + struct sti_dwmac *dwmac = priv; 227 + 228 + setup_retime_src(dwmac, spd); 229 + 230 + return; 231 + } 232 + 233 + static int sti_dwmac_parse_data(struct sti_dwmac *dwmac, 234 + struct platform_device *pdev) 235 + { 236 + struct resource *res; 237 + struct device *dev = &pdev->dev; 238 + struct device_node *np = dev->of_node; 239 + struct regmap *regmap; 240 + int err; 241 + 242 + if (!np) 243 + return -EINVAL; 244 + 245 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-ethconf"); 246 + if (!res) 247 + return -ENODATA; 248 + 249 + regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon"); 250 + if (IS_ERR(regmap)) 251 + return PTR_ERR(regmap); 252 + 253 + dwmac->dev = dev; 254 + dwmac->interface = of_get_phy_mode(np); 255 + dwmac->regmap = regmap; 256 + dwmac->reg = res->start; 257 + dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk"); 258 + dwmac->is_tx_retime_src_clk_125 = false; 259 + 260 + if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) { 261 + const char *rs; 262 + 263 + err = of_property_read_string(np, "st,tx-retime-src", &rs); 264 + if (err < 0) { 265 + dev_err(dev, "st,tx-retime-src not specified\n"); 266 + return err; 267 + } 268 + 269 + if (!strcasecmp(rs, "clk_125")) 270 + dwmac->is_tx_retime_src_clk_125 = true; 271 + } 272 + 273 + dwmac->clk = devm_clk_get(dev, "sti-ethclk"); 274 + 275 + if (IS_ERR(dwmac->clk)) 276 + dwmac->clk = NULL; 277 + 278 + return 0; 279 + } 280 + 281 + static int sti_dwmac_init(struct platform_device *pdev, void *priv) 282 + { 283 + struct sti_dwmac *dwmac = priv; 284 + struct regmap *regmap = dwmac->regmap; 285 + int iface = dwmac->interface; 286 + u32 reg = dwmac->reg; 287 + u32 val, spd; 288 + 289 + if (dwmac->clk) 290 + clk_prepare_enable(dwmac->clk); 291 + 292 + regmap_update_bits(regmap, reg, MII_PHY_SEL_MASK, phy_intf_sels[iface]); 293 + 294 + val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII; 295 + regmap_update_bits(regmap, reg, ENMII_MASK, val); 296 + 297 + if (IS_PHY_IF_MODE_GBIT(iface)) 298 + spd = SPEED_1000; 299 + else 300 + spd = SPEED_100; 301 + 302 + setup_retime_src(dwmac, spd); 303 + 304 + return 0; 305 + } 306 + 307 + static void *sti_dwmac_setup(struct platform_device *pdev) 308 + { 309 + struct sti_dwmac *dwmac; 310 + int ret; 311 + 312 + dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); 313 + if (!dwmac) 314 + return ERR_PTR(-ENOMEM); 315 + 316 + ret = sti_dwmac_parse_data(dwmac, pdev); 317 + if (ret) { 318 + dev_err(&pdev->dev, "Unable to parse OF data\n"); 319 + return ERR_PTR(ret); 320 + } 321 + 322 + return dwmac; 323 + } 324 + 325 + const struct stmmac_of_data sti_gmac_data = { 326 + .fix_mac_speed = sti_fix_mac_speed, 327 + .setup = sti_dwmac_setup, 328 + .init = sti_dwmac_init, 329 + .exit = sti_dwmac_exit, 330 + };
+3
drivers/net/ethernet/stmicro/stmmac/stmmac.h
··· 133 133 #ifdef CONFIG_DWMAC_SUNXI 134 134 extern const struct stmmac_of_data sun7i_gmac_data; 135 135 #endif 136 + #ifdef CONFIG_DWMAC_STI 137 + extern const struct stmmac_of_data sti_gmac_data; 138 + #endif 136 139 extern struct platform_driver stmmac_pltfr_driver; 137 140 static inline int stmmac_register_platform(void) 138 141 {
+5
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
··· 33 33 #ifdef CONFIG_DWMAC_SUNXI 34 34 { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data}, 35 35 #endif 36 + #ifdef CONFIG_DWMAC_STI 37 + { .compatible = "st,stih415-dwmac", .data = &sti_gmac_data}, 38 + { .compatible = "st,stih416-dwmac", .data = &sti_gmac_data}, 39 + { .compatible = "st,stih127-dwmac", .data = &sti_gmac_data}, 40 + #endif 36 41 /* SoC specific glue layers should come before generic bindings */ 37 42 { .compatible = "st,spear600-gmac"}, 38 43 { .compatible = "snps,dwmac-3.610"},
+8 -3
drivers/net/ethernet/ti/cpsw.c
··· 554 554 * common for both the interface as the interface shares 555 555 * the same hardware resource. 556 556 */ 557 - for (i = 0; i <= priv->data.slaves; i++) 557 + for (i = 0; i < priv->data.slaves; i++) 558 558 if (priv->slaves[i].ndev->flags & IFF_PROMISC) 559 559 flag = true; 560 560 ··· 578 578 unsigned long timeout = jiffies + HZ; 579 579 580 580 /* Disable Learn for all ports */ 581 - for (i = 0; i <= priv->data.slaves; i++) { 581 + for (i = 0; i < priv->data.slaves; i++) { 582 582 cpsw_ale_control_set(ale, i, 583 583 ALE_PORT_NOLEARN, 1); 584 584 cpsw_ale_control_set(ale, i, ··· 606 606 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0); 607 607 608 608 /* Enable Learn for all ports */ 609 - for (i = 0; i <= priv->data.slaves; i++) { 609 + for (i = 0; i < priv->data.slaves; i++) { 610 610 cpsw_ale_control_set(ale, i, 611 611 ALE_PORT_NOLEARN, 0); 612 612 cpsw_ale_control_set(ale, i, ··· 1896 1896 memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); 1897 1897 1898 1898 slave_data->phy_if = of_get_phy_mode(slave_node); 1899 + if (slave_data->phy_if < 0) { 1900 + pr_err("Missing or malformed slave[%d] phy-mode property\n", 1901 + i); 1902 + return slave_data->phy_if; 1903 + } 1899 1904 1900 1905 if (data->dual_emac) { 1901 1906 if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
+1 -1
drivers/net/ethernet/tile/tilegx.c
··· 2071 2071 2072 2072 /* Return subqueue id on this core (one per core). */ 2073 2073 static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb, 2074 - void *accel_priv) 2074 + void *accel_priv, select_queue_fallback_t fallback) 2075 2075 { 2076 2076 return smp_processor_id(); 2077 2077 }
+9 -4
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
··· 26 26 #include <linux/netdevice.h> 27 27 #include <linux/of_mdio.h> 28 28 #include <linux/of_platform.h> 29 + #include <linux/of_irq.h> 29 30 #include <linux/of_address.h> 30 31 #include <linux/skbuff.h> 31 32 #include <linux/spinlock.h> ··· 601 600 size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; 602 601 packets++; 603 602 604 - lp->tx_bd_ci = ++lp->tx_bd_ci % TX_BD_NUM; 603 + ++lp->tx_bd_ci; 604 + lp->tx_bd_ci %= TX_BD_NUM; 605 605 cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; 606 606 status = cur_p->status; 607 607 } ··· 688 686 skb_headlen(skb), DMA_TO_DEVICE); 689 687 690 688 for (ii = 0; ii < num_frag; ii++) { 691 - lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM; 689 + ++lp->tx_bd_tail; 690 + lp->tx_bd_tail %= TX_BD_NUM; 692 691 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 693 692 frag = &skb_shinfo(skb)->frags[ii]; 694 693 cur_p->phys = dma_map_single(ndev->dev.parent, ··· 705 702 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; 706 703 /* Start the transfer */ 707 704 axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); 708 - lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM; 705 + ++lp->tx_bd_tail; 706 + lp->tx_bd_tail %= TX_BD_NUM; 709 707 710 708 return NETDEV_TX_OK; 711 709 } ··· 778 774 cur_p->status = 0; 779 775 cur_p->sw_id_offset = (u32) new_skb; 780 776 781 - lp->rx_bd_ci = ++lp->rx_bd_ci % RX_BD_NUM; 777 + ++lp->rx_bd_ci; 778 + lp->rx_bd_ci %= RX_BD_NUM; 782 779 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 783 780 } 784 781
+38 -15
drivers/net/hyperv/netvsc_drv.c
··· 88 88 { 89 89 struct net_device_context *net_device_ctx = netdev_priv(net); 90 90 struct hv_device *device_obj = net_device_ctx->device_ctx; 91 + struct netvsc_device *nvdev; 92 + struct rndis_device *rdev; 91 93 int ret = 0; 94 + 95 + netif_carrier_off(net); 92 96 93 97 /* Open up the device */ 94 98 ret = rndis_filter_open(device_obj); ··· 102 98 } 103 99 104 100 netif_start_queue(net); 101 + 102 + nvdev = hv_get_drvdata(device_obj); 103 + rdev = nvdev->extension; 104 + if (!rdev->link_state) 105 + netif_carrier_on(net); 105 106 106 107 return ret; 107 108 } ··· 238 229 struct net_device *net; 239 230 struct net_device_context *ndev_ctx; 240 231 struct netvsc_device *net_device; 232 + struct rndis_device *rdev; 241 233 242 234 net_device = hv_get_drvdata(device_obj); 235 + rdev = net_device->extension; 236 + 237 + rdev->link_state = status != 1; 238 + 243 239 net = net_device->ndev; 244 240 245 - if (!net) { 246 - netdev_err(net, "got link status but net device " 247 - "not initialized yet\n"); 241 + if (!net || net->reg_state != NETREG_REGISTERED) 248 242 return; 249 - } 250 243 244 + ndev_ctx = netdev_priv(net); 251 245 if (status == 1) { 252 - netif_carrier_on(net); 253 - ndev_ctx = netdev_priv(net); 254 246 schedule_delayed_work(&ndev_ctx->dwork, 0); 255 247 schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20)); 256 248 } else { 257 - netif_carrier_off(net); 249 + schedule_delayed_work(&ndev_ctx->dwork, 0); 258 250 } 259 251 } 260 252 ··· 398 388 * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add 399 389 * another netif_notify_peers() into a delayed work, otherwise GARP packet 400 390 * will not be sent after quick migration, and cause network disconnection. 391 + * Also, we update the carrier status here. 401 392 */ 402 - static void netvsc_send_garp(struct work_struct *w) 393 + static void netvsc_link_change(struct work_struct *w) 403 394 { 404 395 struct net_device_context *ndev_ctx; 405 396 struct net_device *net; 406 397 struct netvsc_device *net_device; 398 + struct rndis_device *rdev; 399 + bool notify; 400 + 401 + rtnl_lock(); 407 402 408 403 ndev_ctx = container_of(w, struct net_device_context, dwork.work); 409 404 net_device = hv_get_drvdata(ndev_ctx->device_ctx); 405 + rdev = net_device->extension; 410 406 net = net_device->ndev; 411 - netdev_notify_peers(net); 407 + 408 + if (rdev->link_state) { 409 + netif_carrier_off(net); 410 + notify = false; 411 + } else { 412 + netif_carrier_on(net); 413 + notify = true; 414 + } 415 + 416 + rtnl_unlock(); 417 + 418 + if (notify) 419 + netdev_notify_peers(net); 412 420 } 413 421 414 422 ··· 442 414 if (!net) 443 415 return -ENOMEM; 444 416 445 - /* Set initial state */ 446 - netif_carrier_off(net); 447 - 448 417 net_device_ctx = netdev_priv(net); 449 418 net_device_ctx->device_ctx = dev; 450 419 hv_set_drvdata(dev, net); 451 - INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp); 420 + INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); 452 421 INIT_WORK(&net_device_ctx->work, do_set_multicast); 453 422 454 423 net->netdev_ops = &device_ops; ··· 467 442 return ret; 468 443 } 469 444 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); 470 - 471 - netif_carrier_on(net); 472 445 473 446 ret = register_netdev(net); 474 447 if (ret != 0) {
-1
drivers/net/irda/irtty-sir.c
··· 522 522 sirdev_put_instance(priv->dev); 523 523 524 524 /* Stop tty */ 525 - irtty_stop_receiver(tty, TRUE); 526 525 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 527 526 if (tty->ops->stop) 528 527 tty->ops->stop(tty);
+3 -2
drivers/net/macvlan.c
··· 879 879 dev->priv_flags |= IFF_MACVLAN; 880 880 err = netdev_upper_dev_link(lowerdev, dev); 881 881 if (err) 882 - goto destroy_port; 883 - 882 + goto unregister_netdev; 884 883 885 884 list_add_tail_rcu(&vlan->list, &port->vlans); 886 885 netif_stacked_transfer_operstate(lowerdev, dev); 887 886 888 887 return 0; 889 888 889 + unregister_netdev: 890 + unregister_netdevice(dev); 890 891 destroy_port: 891 892 port->count -= 1; 892 893 if (!port->count)
+8 -5
drivers/net/phy/dp83640.c
··· 1006 1006 } else 1007 1007 list_add_tail(&dp83640->list, &clock->phylist); 1008 1008 1009 - if (clock->chosen && !list_empty(&clock->phylist)) 1010 - recalibrate(clock); 1011 - else 1012 - enable_broadcast(dp83640->phydev, clock->page, 1); 1013 - 1014 1009 dp83640_clock_put(clock); 1015 1010 return 0; 1016 1011 ··· 1058 1063 1059 1064 static int dp83640_config_init(struct phy_device *phydev) 1060 1065 { 1066 + struct dp83640_private *dp83640 = phydev->priv; 1067 + struct dp83640_clock *clock = dp83640->clock; 1068 + 1069 + if (clock->chosen && !list_empty(&clock->phylist)) 1070 + recalibrate(clock); 1071 + else 1072 + enable_broadcast(phydev, clock->page, 1); 1073 + 1061 1074 enable_status_frames(phydev, true); 1062 1075 ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE); 1063 1076 return 0;
+1 -1
drivers/net/team/team.c
··· 1648 1648 } 1649 1649 1650 1650 static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb, 1651 - void *accel_priv) 1651 + void *accel_priv, select_queue_fallback_t fallback) 1652 1652 { 1653 1653 /* 1654 1654 * This helper function exists to help dev_pick_tx get the correct
+1 -1
drivers/net/tun.c
··· 366 366 * hope the rxq no. may help here. 367 367 */ 368 368 static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, 369 - void *accel_priv) 369 + void *accel_priv, select_queue_fallback_t fallback) 370 370 { 371 371 struct tun_struct *tun = netdev_priv(dev); 372 372 struct tun_flow_entry *e;
-1
drivers/net/usb/Kconfig
··· 296 296 tristate "CoreChip-sz SR9800 based USB 2.0 10/100 ethernet devices" 297 297 depends on USB_USBNET 298 298 select CRC32 299 - default y 300 299 ---help--- 301 300 Say Y if you want to use one of the following 100Mbps USB Ethernet 302 301 device based on the CoreChip-sz SR9800 chip.
+2 -1
drivers/net/usb/asix_devices.c
··· 917 917 .status = asix_status, 918 918 .link_reset = ax88178_link_reset, 919 919 .reset = ax88178_reset, 920 - .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR, 920 + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | 921 + FLAG_MULTI_PACKET, 921 922 .rx_fixup = asix_rx_fixup_common, 922 923 .tx_fixup = asix_tx_fixup, 923 924 };
+4
drivers/net/usb/ax88179_178a.c
··· 1118 1118 u16 hdr_off; 1119 1119 u32 *pkt_hdr; 1120 1120 1121 + /* This check is no longer done by usbnet */ 1122 + if (skb->len < dev->net->hard_header_len) 1123 + return 0; 1124 + 1121 1125 skb_trim(skb, skb->len - 4); 1122 1126 memcpy(&rx_hdr, skb_tail_pointer(skb), 4); 1123 1127 le32_to_cpus(&rx_hdr);
+4
drivers/net/usb/gl620a.c
··· 84 84 u32 size; 85 85 u32 count; 86 86 87 + /* This check is no longer done by usbnet */ 88 + if (skb->len < dev->net->hard_header_len) 89 + return 0; 90 + 87 91 header = (struct gl_header *) skb->data; 88 92 89 93 // get the packet count of the received skb
+3 -2
drivers/net/usb/mcs7830.c
··· 526 526 { 527 527 u8 status; 528 528 529 - if (skb->len == 0) { 530 - dev_err(&dev->udev->dev, "unexpected empty rx frame\n"); 529 + /* This check is no longer done by usbnet */ 530 + if (skb->len < dev->net->hard_header_len) { 531 + dev_err(&dev->udev->dev, "unexpected tiny rx frame\n"); 531 532 return 0; 532 533 } 533 534
+4
drivers/net/usb/net1080.c
··· 364 364 struct nc_trailer *trailer; 365 365 u16 hdr_len, packet_len; 366 366 367 + /* This check is no longer done by usbnet */ 368 + if (skb->len < dev->net->hard_header_len) 369 + return 0; 370 + 367 371 if (!(skb->len & 0x01)) { 368 372 netdev_dbg(dev->net, "rx framesize %d range %d..%d mtu %d\n", 369 373 skb->len, dev->net->hard_header_len, dev->hard_mtu,
+5 -4
drivers/net/usb/qmi_wwan.c
··· 80 80 { 81 81 __be16 proto; 82 82 83 - /* usbnet rx_complete guarantees that skb->len is at least 84 - * hard_header_len, so we can inspect the dest address without 85 - * checking skb->len 86 - */ 83 + /* This check is no longer done by usbnet */ 84 + if (skb->len < dev->net->hard_header_len) 85 + return 0; 86 + 87 87 switch (skb->data[0] & 0xf0) { 88 88 case 0x40: 89 89 proto = htons(ETH_P_IP); ··· 732 732 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ 733 733 {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */ 734 734 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ 735 + {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */ 735 736 736 737 /* 4. Gobi 1000 devices */ 737 738 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
+4
drivers/net/usb/rndis_host.c
··· 492 492 */ 493 493 int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb) 494 494 { 495 + /* This check is no longer done by usbnet */ 496 + if (skb->len < dev->net->hard_header_len) 497 + return 0; 498 + 495 499 /* peripheral may have batched packets to us... */ 496 500 while (likely(skb->len)) { 497 501 struct rndis_data_hdr *hdr = (void *)skb->data;
+4
drivers/net/usb/smsc75xx.c
··· 2106 2106 2107 2107 static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) 2108 2108 { 2109 + /* This check is no longer done by usbnet */ 2110 + if (skb->len < dev->net->hard_header_len) 2111 + return 0; 2112 + 2109 2113 while (skb->len > 0) { 2110 2114 u32 rx_cmd_a, rx_cmd_b, align_count, size; 2111 2115 struct sk_buff *ax_skb;
+4
drivers/net/usb/smsc95xx.c
··· 1723 1723 1724 1724 static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) 1725 1725 { 1726 + /* This check is no longer done by usbnet */ 1727 + if (skb->len < dev->net->hard_header_len) 1728 + return 0; 1729 + 1726 1730 while (skb->len > 0) { 1727 1731 u32 header, align_count; 1728 1732 struct sk_buff *ax_skb;
+5 -1
drivers/net/usb/sr9800.c
··· 63 63 { 64 64 int offset = 0; 65 65 66 + /* This check is no longer done by usbnet */ 67 + if (skb->len < dev->net->hard_header_len) 68 + return 0; 69 + 66 70 while (offset + sizeof(u32) < skb->len) { 67 71 struct sk_buff *sr_skb; 68 72 u16 size; ··· 827 823 dev->rx_urb_size = 828 824 SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_2K].size; 829 825 } 830 - netdev_dbg(dev->net, "%s : setting rx_urb_size with : %ld\n", __func__, 826 + netdev_dbg(dev->net, "%s : setting rx_urb_size with : %zu\n", __func__, 831 827 dev->rx_urb_size); 832 828 return 0; 833 829
+10 -15
drivers/net/usb/usbnet.c
··· 542 542 } 543 543 // else network stack removes extra byte if we forced a short packet 544 544 545 - if (skb->len) { 546 - /* all data was already cloned from skb inside the driver */ 547 - if (dev->driver_info->flags & FLAG_MULTI_PACKET) 548 - dev_kfree_skb_any(skb); 549 - else 550 - usbnet_skb_return(dev, skb); 545 + /* all data was already cloned from skb inside the driver */ 546 + if (dev->driver_info->flags & FLAG_MULTI_PACKET) 547 + goto done; 548 + 549 + if (skb->len < ETH_HLEN) { 550 + dev->net->stats.rx_errors++; 551 + dev->net->stats.rx_length_errors++; 552 + netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len); 553 + } else { 554 + usbnet_skb_return(dev, skb); 551 555 return; 552 556 } 553 557 554 - netif_dbg(dev, rx_err, dev->net, "drop\n"); 555 - dev->net->stats.rx_errors++; 556 558 done: 557 559 skb_queue_tail(&dev->done, skb); 558 560 } ··· 576 574 switch (urb_status) { 577 575 /* success */ 578 576 case 0: 579 - if (skb->len < dev->net->hard_header_len) { 580 - state = rx_cleanup; 581 - dev->net->stats.rx_errors++; 582 - dev->net->stats.rx_length_errors++; 583 - netif_dbg(dev, rx_err, dev->net, 584 - "rx length %d\n", skb->len); 585 - } 586 577 break; 587 578 588 579 /* stalls need manual reset. this is rare ... except that
+1 -1
drivers/net/wireless/ath/ath5k/phy.c
··· 110 110 ath5k_hw_reg_write(ah, 0x00010000, AR5K_PHY(0x20)); 111 111 112 112 if (ah->ah_version == AR5K_AR5210) { 113 - srev = ath5k_hw_reg_read(ah, AR5K_PHY(256) >> 28) & 0xf; 113 + srev = (ath5k_hw_reg_read(ah, AR5K_PHY(256)) >> 28) & 0xf; 114 114 ret = (u16)ath5k_hw_bitswap(srev, 4) + 1; 115 115 } else { 116 116 srev = (ath5k_hw_reg_read(ah, AR5K_PHY(0x100)) >> 24) & 0xff;
+1 -1
drivers/net/wireless/hostap/hostap_proc.c
··· 496 496 497 497 void hostap_remove_proc(local_info_t *local) 498 498 { 499 - remove_proc_subtree(local->ddev->name, hostap_proc); 499 + proc_remove(local->proc); 500 500 } 501 501 502 502
+20 -2
drivers/net/wireless/iwlwifi/dvm/mac80211.c
··· 696 696 return ret; 697 697 } 698 698 699 + static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg) 700 + { 701 + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) 702 + return false; 703 + return true; 704 + } 705 + 706 + static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg) 707 + { 708 + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) 709 + return false; 710 + if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG) 711 + return true; 712 + 713 + /* disabled by default */ 714 + return false; 715 + } 716 + 699 717 static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, 700 718 struct ieee80211_vif *vif, 701 719 enum ieee80211_ampdu_mlme_action action, ··· 735 717 736 718 switch (action) { 737 719 case IEEE80211_AMPDU_RX_START: 738 - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) 720 + if (!iwl_enable_rx_ampdu(priv->cfg)) 739 721 break; 740 722 IWL_DEBUG_HT(priv, "start Rx\n"); 741 723 ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn); ··· 747 729 case IEEE80211_AMPDU_TX_START: 748 730 if (!priv->trans->ops->txq_enable) 749 731 break; 750 - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) 732 + if (!iwl_enable_tx_ampdu(priv->cfg)) 751 733 break; 752 734 IWL_DEBUG_HT(priv, "start Tx\n"); 753 735 ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
+1 -1
drivers/net/wireless/iwlwifi/iwl-drv.c
··· 1286 1286 MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); 1287 1287 module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, S_IRUGO); 1288 1288 MODULE_PARM_DESC(11n_disable, 1289 - "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX"); 1289 + "disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX"); 1290 1290 module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K, 1291 1291 int, S_IRUGO); 1292 1292 MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)");
+7 -4
drivers/net/wireless/iwlwifi/iwl-modparams.h
··· 79 79 IWL_POWER_NUM 80 80 }; 81 81 82 - #define IWL_DISABLE_HT_ALL BIT(0) 83 - #define IWL_DISABLE_HT_TXAGG BIT(1) 84 - #define IWL_DISABLE_HT_RXAGG BIT(2) 82 + enum iwl_disable_11n { 83 + IWL_DISABLE_HT_ALL = BIT(0), 84 + IWL_DISABLE_HT_TXAGG = BIT(1), 85 + IWL_DISABLE_HT_RXAGG = BIT(2), 86 + IWL_ENABLE_HT_TXAGG = BIT(3), 87 + }; 85 88 86 89 /** 87 90 * struct iwl_mod_params ··· 93 90 * 94 91 * @sw_crypto: using hardware encryption, default = 0 95 92 * @disable_11n: disable 11n capabilities, default = 0, 96 - * use IWL_DISABLE_HT_* constants 93 + * use IWL_[DIS,EN]ABLE_HT_* constants 97 94 * @amsdu_size_8K: enable 8K amsdu size, default = 0 98 95 * @restart_fw: restart firmware, default = 1 99 96 * @wd_disable: enable stuck queue check, default = 0
+20 -2
drivers/net/wireless/iwlwifi/mvm/mac80211.c
··· 328 328 ieee80211_free_txskb(hw, skb); 329 329 } 330 330 331 + static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg) 332 + { 333 + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) 334 + return false; 335 + return true; 336 + } 337 + 338 + static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg) 339 + { 340 + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) 341 + return false; 342 + if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG) 343 + return true; 344 + 345 + /* enabled by default */ 346 + return true; 347 + } 348 + 331 349 static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, 332 350 struct ieee80211_vif *vif, 333 351 enum ieee80211_ampdu_mlme_action action, ··· 365 347 366 348 switch (action) { 367 349 case IEEE80211_AMPDU_RX_START: 368 - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) { 350 + if (!iwl_enable_rx_ampdu(mvm->cfg)) { 369 351 ret = -EINVAL; 370 352 break; 371 353 } ··· 375 357 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false); 376 358 break; 377 359 case IEEE80211_AMPDU_TX_START: 378 - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) { 360 + if (!iwl_enable_tx_ampdu(mvm->cfg)) { 379 361 ret = -EINVAL; 380 362 break; 381 363 }
+1 -1
drivers/net/wireless/mwifiex/main.c
··· 748 748 749 749 static u16 750 750 mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb, 751 - void *accel_priv) 751 + void *accel_priv, select_queue_fallback_t fallback) 752 752 { 753 753 skb->priority = cfg80211_classify8021d(skb, NULL); 754 754 return mwifiex_1d_to_wmm_queue[skb->priority];
+8 -2
drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
··· 15 15 #ifndef RTL8187_H 16 16 #define RTL8187_H 17 17 18 + #include <linux/cache.h> 19 + 18 20 #include "rtl818x.h" 19 21 #include "leds.h" 20 22 ··· 141 139 u8 aifsn[4]; 142 140 u8 rfkill_mask; 143 141 struct { 144 - __le64 buf; 142 + union { 143 + __le64 buf; 144 + u8 dummy1[L1_CACHE_BYTES]; 145 + } ____cacheline_aligned; 145 146 struct sk_buff_head queue; 146 147 } b_tx_status; /* This queue is used by both -b and non-b devices */ 147 148 struct mutex io_mutex; ··· 152 147 u8 bits8; 153 148 __le16 bits16; 154 149 __le32 bits32; 155 - } *io_dmabuf; 150 + u8 dummy2[L1_CACHE_BYTES]; 151 + } *io_dmabuf ____cacheline_aligned; 156 152 bool rfkill_off; 157 153 u16 seqno; 158 154 };
+1 -1
drivers/net/wireless/rtlwifi/ps.c
··· 48 48 49 49 /*<2> Enable Adapter */ 50 50 if (rtlpriv->cfg->ops->hw_init(hw)) 51 - return 1; 51 + return false; 52 52 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); 53 53 54 54 /*<3> Enable Interrupt */
+16 -2
drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
··· 937 937 bool is92c; 938 938 int err; 939 939 u8 tmp_u1b; 940 + unsigned long flags; 940 941 941 942 rtlpci->being_init_adapter = true; 943 + 944 + /* Since this function can take a very long time (up to 350 ms) 945 + * and can be called with irqs disabled, reenable the irqs 946 + * to let the other devices continue being serviced. 947 + * 948 + * It is safe doing so since our own interrupts will only be enabled 949 + * in a subsequent step. 950 + */ 951 + local_save_flags(flags); 952 + local_irq_enable(); 953 + 942 954 rtlpriv->intf_ops->disable_aspm(hw); 943 955 rtstatus = _rtl92ce_init_mac(hw); 944 956 if (!rtstatus) { 945 957 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n"); 946 958 err = 1; 947 - return err; 959 + goto exit; 948 960 } 949 961 950 962 err = rtl92c_download_fw(hw); ··· 964 952 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, 965 953 "Failed to download FW. Init HW without FW now..\n"); 966 954 err = 1; 967 - return err; 955 + goto exit; 968 956 } 969 957 970 958 rtlhal->last_hmeboxnum = 0; ··· 1044 1032 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n"); 1045 1033 } 1046 1034 rtl92c_dm_init(hw); 1035 + exit: 1036 + local_irq_restore(flags); 1047 1037 rtlpci->being_init_adapter = false; 1048 1038 return err; 1049 1039 }
+74 -76
drivers/of/base.c
··· 342 342 } 343 343 EXPORT_SYMBOL(of_get_cpu_node); 344 344 345 - /** Checks if the given "compat" string matches one of the strings in 346 - * the device's "compatible" property 345 + /** 346 + * __of_device_is_compatible() - Check if the node matches given constraints 347 + * @device: pointer to node 348 + * @compat: required compatible string, NULL or "" for any match 349 + * @type: required device_type value, NULL or "" for any match 350 + * @name: required node name, NULL or "" for any match 351 + * 352 + * Checks if the given @compat, @type and @name strings match the 353 + * properties of the given @device. A constraints can be skipped by 354 + * passing NULL or an empty string as the constraint. 355 + * 356 + * Returns 0 for no match, and a positive integer on match. The return 357 + * value is a relative score with larger values indicating better 358 + * matches. The score is weighted for the most specific compatible value 359 + * to get the highest score. Matching type is next, followed by matching 360 + * name. Practically speaking, this results in the following priority 361 + * order for matches: 362 + * 363 + * 1. specific compatible && type && name 364 + * 2. specific compatible && type 365 + * 3. specific compatible && name 366 + * 4. specific compatible 367 + * 5. general compatible && type && name 368 + * 6. general compatible && type 369 + * 7. general compatible && name 370 + * 8. general compatible 371 + * 9. type && name 372 + * 10. type 373 + * 11. name 347 374 */ 348 375 static int __of_device_is_compatible(const struct device_node *device, 349 - const char *compat) 376 + const char *compat, const char *type, const char *name) 350 377 { 351 - const char* cp; 352 - int cplen, l; 378 + struct property *prop; 379 + const char *cp; 380 + int index = 0, score = 0; 353 381 354 - cp = __of_get_property(device, "compatible", &cplen); 355 - if (cp == NULL) 356 - return 0; 357 - while (cplen > 0) { 358 - if (of_compat_cmp(cp, compat, strlen(compat)) == 0) 359 - return 1; 360 - l = strlen(cp) + 1; 361 - cp += l; 362 - cplen -= l; 382 + /* Compatible match has highest priority */ 383 + if (compat && compat[0]) { 384 + prop = __of_find_property(device, "compatible", NULL); 385 + for (cp = of_prop_next_string(prop, NULL); cp; 386 + cp = of_prop_next_string(prop, cp), index++) { 387 + if (of_compat_cmp(cp, compat, strlen(compat)) == 0) { 388 + score = INT_MAX/2 - (index << 2); 389 + break; 390 + } 391 + } 392 + if (!score) 393 + return 0; 363 394 } 364 395 365 - return 0; 396 + /* Matching type is better than matching name */ 397 + if (type && type[0]) { 398 + if (!device->type || of_node_cmp(type, device->type)) 399 + return 0; 400 + score += 2; 401 + } 402 + 403 + /* Matching name is a bit better than not */ 404 + if (name && name[0]) { 405 + if (!device->name || of_node_cmp(name, device->name)) 406 + return 0; 407 + score++; 408 + } 409 + 410 + return score; 366 411 } 367 412 368 413 /** Checks if the given "compat" string matches one of the strings in ··· 420 375 int res; 421 376 422 377 raw_spin_lock_irqsave(&devtree_lock, flags); 423 - res = __of_device_is_compatible(device, compat); 378 + res = __of_device_is_compatible(device, compat, NULL, NULL); 424 379 raw_spin_unlock_irqrestore(&devtree_lock, flags); 425 380 return res; 426 381 } ··· 726 681 raw_spin_lock_irqsave(&devtree_lock, flags); 727 682 np = from ? from->allnext : of_allnodes; 728 683 for (; np; np = np->allnext) { 729 - if (type 730 - && !(np->type && (of_node_cmp(np->type, type) == 0))) 731 - continue; 732 - if (__of_device_is_compatible(np, compatible) && 684 + if (__of_device_is_compatible(np, compatible, type, NULL) && 733 685 of_node_get(np)) 734 686 break; 735 687 } ··· 772 730 } 773 731 EXPORT_SYMBOL(of_find_node_with_property); 774 732 775 - static const struct of_device_id * 776 - of_match_compatible(const struct of_device_id *matches, 777 - const struct device_node *node) 778 - { 779 - const char *cp; 780 - int cplen, l; 781 - const struct of_device_id *m; 782 - 783 - cp = __of_get_property(node, "compatible", &cplen); 784 - while (cp && (cplen > 0)) { 785 - m = matches; 786 - while (m->name[0] || m->type[0] || m->compatible[0]) { 787 - /* Only match for the entries without type and name */ 788 - if (m->name[0] || m->type[0] || 789 - of_compat_cmp(m->compatible, cp, 790 - strlen(m->compatible))) 791 - m++; 792 - else 793 - return m; 794 - } 795 - 796 - /* Get node's next compatible string */ 797 - l = strlen(cp) + 1; 798 - cp += l; 799 - cplen -= l; 800 - } 801 - 802 - return NULL; 803 - } 804 - 805 733 static 806 734 const struct of_device_id *__of_match_node(const struct of_device_id *matches, 807 735 const struct device_node *node) 808 736 { 809 - const struct of_device_id *m; 737 + const struct of_device_id *best_match = NULL; 738 + int score, best_score = 0; 810 739 811 740 if (!matches) 812 741 return NULL; 813 742 814 - m = of_match_compatible(matches, node); 815 - if (m) 816 - return m; 817 - 818 - while (matches->name[0] || matches->type[0] || matches->compatible[0]) { 819 - int match = 1; 820 - if (matches->name[0]) 821 - match &= node->name 822 - && !strcmp(matches->name, node->name); 823 - if (matches->type[0]) 824 - match &= node->type 825 - && !strcmp(matches->type, node->type); 826 - if (matches->compatible[0]) 827 - match &= __of_device_is_compatible(node, 828 - matches->compatible); 829 - if (match) 830 - return matches; 831 - matches++; 743 + for (; matches->name[0] || matches->type[0] || matches->compatible[0]; matches++) { 744 + score = __of_device_is_compatible(node, matches->compatible, 745 + matches->type, matches->name); 746 + if (score > best_score) { 747 + best_match = matches; 748 + best_score = score; 749 + } 832 750 } 833 - return NULL; 751 + 752 + return best_match; 834 753 } 835 754 836 755 /** ··· 799 796 * @matches: array of of device match structures to search in 800 797 * @node: the of device structure to match against 801 798 * 802 - * Low level utility function used by device matching. We have two ways 803 - * of matching: 804 - * - Try to find the best compatible match by comparing each compatible 805 - * string of device node with all the given matches respectively. 806 - * - If the above method failed, then try to match the compatible by using 807 - * __of_device_is_compatible() besides the match in type and name. 799 + * Low level utility function used by device matching. 808 800 */ 809 801 const struct of_device_id *of_match_node(const struct of_device_id *matches, 810 802 const struct device_node *node)
+14 -8
drivers/of/of_mdio.c
··· 24 24 25 25 static void of_set_phy_supported(struct phy_device *phydev, u32 max_speed) 26 26 { 27 - phydev->supported |= PHY_DEFAULT_FEATURES; 27 + /* The default values for phydev->supported are provided by the PHY 28 + * driver "features" member, we want to reset to sane defaults fist 29 + * before supporting higher speeds. 30 + */ 31 + phydev->supported &= PHY_DEFAULT_FEATURES; 28 32 29 33 switch (max_speed) { 30 34 default: ··· 48 44 { 49 45 struct phy_device *phy; 50 46 bool is_c45; 51 - int rc, prev_irq; 47 + int rc; 52 48 u32 max_speed = 0; 53 49 54 50 is_c45 = of_device_is_compatible(child, ··· 58 54 if (!phy || IS_ERR(phy)) 59 55 return 1; 60 56 61 - if (mdio->irq) { 62 - prev_irq = mdio->irq[addr]; 63 - mdio->irq[addr] = 64 - irq_of_parse_and_map(child, 0); 65 - if (!mdio->irq[addr]) 66 - mdio->irq[addr] = prev_irq; 57 + rc = irq_of_parse_and_map(child, 0); 58 + if (rc > 0) { 59 + phy->irq = rc; 60 + if (mdio->irq) 61 + mdio->irq[addr] = rc; 62 + } else { 63 + if (mdio->irq) 64 + phy->irq = mdio->irq[addr]; 67 65 } 68 66 69 67 /* Associate the OF node with the device structure so it
+67
drivers/of/selftest.c
··· 300 300 of_node_put(np); 301 301 } 302 302 303 + static struct of_device_id match_node_table[] = { 304 + { .data = "A", .name = "name0", }, /* Name alone is lowest priority */ 305 + { .data = "B", .type = "type1", }, /* followed by type alone */ 306 + 307 + { .data = "Ca", .name = "name2", .type = "type1", }, /* followed by both together */ 308 + { .data = "Cb", .name = "name2", }, /* Only match when type doesn't match */ 309 + { .data = "Cc", .name = "name2", .type = "type2", }, 310 + 311 + { .data = "E", .compatible = "compat3" }, 312 + { .data = "G", .compatible = "compat2", }, 313 + { .data = "H", .compatible = "compat2", .name = "name5", }, 314 + { .data = "I", .compatible = "compat2", .type = "type1", }, 315 + { .data = "J", .compatible = "compat2", .type = "type1", .name = "name8", }, 316 + { .data = "K", .compatible = "compat2", .name = "name9", }, 317 + {} 318 + }; 319 + 320 + static struct { 321 + const char *path; 322 + const char *data; 323 + } match_node_tests[] = { 324 + { .path = "/testcase-data/match-node/name0", .data = "A", }, 325 + { .path = "/testcase-data/match-node/name1", .data = "B", }, 326 + { .path = "/testcase-data/match-node/a/name2", .data = "Ca", }, 327 + { .path = "/testcase-data/match-node/b/name2", .data = "Cb", }, 328 + { .path = "/testcase-data/match-node/c/name2", .data = "Cc", }, 329 + { .path = "/testcase-data/match-node/name3", .data = "E", }, 330 + { .path = "/testcase-data/match-node/name4", .data = "G", }, 331 + { .path = "/testcase-data/match-node/name5", .data = "H", }, 332 + { .path = "/testcase-data/match-node/name6", .data = "G", }, 333 + { .path = "/testcase-data/match-node/name7", .data = "I", }, 334 + { .path = "/testcase-data/match-node/name8", .data = "J", }, 335 + { .path = "/testcase-data/match-node/name9", .data = "K", }, 336 + }; 337 + 338 + static void __init of_selftest_match_node(void) 339 + { 340 + struct device_node *np; 341 + const struct of_device_id *match; 342 + int i; 343 + 344 + for (i = 0; i < ARRAY_SIZE(match_node_tests); i++) { 345 + np = of_find_node_by_path(match_node_tests[i].path); 346 + if (!np) { 347 + selftest(0, "missing testcase node %s\n", 348 + match_node_tests[i].path); 349 + continue; 350 + } 351 + 352 + match = of_match_node(match_node_table, np); 353 + if (!match) { 354 + selftest(0, "%s didn't match anything\n", 355 + match_node_tests[i].path); 356 + continue; 357 + } 358 + 359 + if (strcmp(match->data, match_node_tests[i].data) != 0) { 360 + selftest(0, "%s got wrong match. expected %s, got %s\n", 361 + match_node_tests[i].path, match_node_tests[i].data, 362 + (const char *)match->data); 363 + continue; 364 + } 365 + selftest(1, "passed"); 366 + } 367 + } 368 + 303 369 static int __init of_selftest(void) 304 370 { 305 371 struct device_node *np; ··· 382 316 of_selftest_property_match_string(); 383 317 of_selftest_parse_interrupts(); 384 318 of_selftest_parse_interrupts_extended(); 319 + of_selftest_match_node(); 385 320 pr_info("end of selftest - %i passed, %i failed\n", 386 321 selftest_results.passed, selftest_results.failed); 387 322 return 0;
+3
drivers/of/testcase-data/testcases.dtsi
··· 1 + #include "tests-phandle.dtsi" 2 + #include "tests-interrupts.dtsi" 3 + #include "tests-match.dtsi"
+19
drivers/of/testcase-data/tests-match.dtsi
··· 1 + 2 + / { 3 + testcase-data { 4 + match-node { 5 + name0 { }; 6 + name1 { device_type = "type1"; }; 7 + a { name2 { device_type = "type1"; }; }; 8 + b { name2 { }; }; 9 + c { name2 { device_type = "type2"; }; }; 10 + name3 { compatible = "compat3"; }; 11 + name4 { compatible = "compat2", "compat3"; }; 12 + name5 { compatible = "compat2", "compat3"; }; 13 + name6 { compatible = "compat1", "compat2", "compat3"; }; 14 + name7 { compatible = "compat2"; device_type = "type1"; }; 15 + name8 { compatible = "compat2"; device_type = "type1"; }; 16 + name9 { compatible = "compat2"; }; 17 + }; 18 + }; 19 + };
+2 -9
drivers/pci/host/pci-mvebu.c
··· 60 60 #define PCIE_DEBUG_CTRL 0x1a60 61 61 #define PCIE_DEBUG_SOFT_RESET BIT(20) 62 62 63 - /* 64 - * This product ID is registered by Marvell, and used when the Marvell 65 - * SoC is not the root complex, but an endpoint on the PCIe bus. It is 66 - * therefore safe to re-use this PCI ID for our emulated PCI-to-PCI 67 - * bridge. 68 - */ 69 - #define MARVELL_EMULATED_PCI_PCI_BRIDGE_ID 0x7846 70 - 71 63 /* PCI configuration space of a PCI-to-PCI bridge */ 72 64 struct mvebu_sw_pci_bridge { 73 65 u16 vendor; ··· 380 388 381 389 bridge->class = PCI_CLASS_BRIDGE_PCI; 382 390 bridge->vendor = PCI_VENDOR_ID_MARVELL; 383 - bridge->device = MARVELL_EMULATED_PCI_PCI_BRIDGE_ID; 391 + bridge->device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16; 392 + bridge->revision = mvebu_readl(port, PCIE_DEV_REV_OFF) & 0xff; 384 393 bridge->header_type = PCI_HEADER_TYPE_BRIDGE; 385 394 bridge->cache_line_size = 0x10; 386 395
+9 -3
drivers/pci/msi.c
··· 545 545 return -ENOMEM; 546 546 list_for_each_entry(entry, &pdev->msi_list, list) { 547 547 char *name = kmalloc(20, GFP_KERNEL); 548 - msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL); 549 - if (!msi_dev_attr) 548 + if (!name) 550 549 goto error_attrs; 550 + 551 + msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL); 552 + if (!msi_dev_attr) { 553 + kfree(name); 554 + goto error_attrs; 555 + } 556 + 551 557 sprintf(name, "%d", entry->irq); 552 558 sysfs_attr_init(&msi_dev_attr->attr); 553 559 msi_dev_attr->attr.name = name; ··· 595 589 ++count; 596 590 msi_attr = msi_attrs[count]; 597 591 } 592 + kfree(msi_attrs); 598 593 return ret; 599 594 } 600 595 ··· 966 959 /** 967 960 * pci_msix_vec_count - return the number of device's MSI-X table entries 968 961 * @dev: pointer to the pci_dev data structure of MSI-X device function 969 - 970 962 * This function returns the number of device's MSI-X table entries and 971 963 * therefore the number of MSI-X vectors device is capable of sending. 972 964 * It returns a negative errno if the device is not capable of sending MSI-X
+10
drivers/pci/pci.c
··· 1181 1181 static int do_pci_enable_device(struct pci_dev *dev, int bars) 1182 1182 { 1183 1183 int err; 1184 + u16 cmd; 1185 + u8 pin; 1184 1186 1185 1187 err = pci_set_power_state(dev, PCI_D0); 1186 1188 if (err < 0 && err != -EIO) ··· 1191 1189 if (err < 0) 1192 1190 return err; 1193 1191 pci_fixup_device(pci_fixup_enable, dev); 1192 + 1193 + pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); 1194 + if (pin) { 1195 + pci_read_config_word(dev, PCI_COMMAND, &cmd); 1196 + if (cmd & PCI_COMMAND_INTX_DISABLE) 1197 + pci_write_config_word(dev, PCI_COMMAND, 1198 + cmd & ~PCI_COMMAND_INTX_DISABLE); 1199 + } 1194 1200 1195 1201 return 0; 1196 1202 }
+1
drivers/phy/Kconfig
··· 61 61 config BCM_KONA_USB2_PHY 62 62 tristate "Broadcom Kona USB2 PHY Driver" 63 63 depends on GENERIC_PHY 64 + depends on HAS_IOMEM 64 65 help 65 66 Enable this to support the Broadcom Kona USB 2.0 PHY. 66 67
+6 -8
drivers/phy/phy-core.c
··· 176 176 dev_err(&phy->dev, "phy init failed --> %d\n", ret); 177 177 goto out; 178 178 } 179 + } else { 180 + ret = 0; /* Override possible ret == -ENOTSUPP */ 179 181 } 180 182 ++phy->init_count; 181 183 ··· 234 232 dev_err(&phy->dev, "phy poweron failed --> %d\n", ret); 235 233 goto out; 236 234 } 235 + } else { 236 + ret = 0; /* Override possible ret == -ENOTSUPP */ 237 237 } 238 238 ++phy->power_count; 239 239 mutex_unlock(&phy->mutex); ··· 408 404 index = of_property_match_string(dev->of_node, "phy-names", 409 405 string); 410 406 phy = of_phy_get(dev, index); 411 - if (IS_ERR(phy)) { 412 - dev_err(dev, "unable to find phy\n"); 413 - return phy; 414 - } 415 407 } else { 416 408 phy = phy_lookup(dev, string); 417 - if (IS_ERR(phy)) { 418 - dev_err(dev, "unable to find phy\n"); 419 - return phy; 420 - } 421 409 } 410 + if (IS_ERR(phy)) 411 + return phy; 422 412 423 413 if (!try_module_get(phy->ops->owner)) 424 414 return ERR_PTR(-EPROBE_DEFER);
+4 -4
drivers/phy/phy-exynos-dp-video.c
··· 76 76 if (IS_ERR(state->regs)) 77 77 return PTR_ERR(state->regs); 78 78 79 - phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); 80 - if (IS_ERR(phy_provider)) 81 - return PTR_ERR(phy_provider); 82 - 83 79 phy = devm_phy_create(dev, &exynos_dp_video_phy_ops, NULL); 84 80 if (IS_ERR(phy)) { 85 81 dev_err(dev, "failed to create Display Port PHY\n"); 86 82 return PTR_ERR(phy); 87 83 } 88 84 phy_set_drvdata(phy, state); 85 + 86 + phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); 87 + if (IS_ERR(phy_provider)) 88 + return PTR_ERR(phy_provider); 89 89 90 90 return 0; 91 91 }
+5 -5
drivers/phy/phy-exynos-mipi-video.c
··· 134 134 dev_set_drvdata(dev, state); 135 135 spin_lock_init(&state->slock); 136 136 137 - phy_provider = devm_of_phy_provider_register(dev, 138 - exynos_mipi_video_phy_xlate); 139 - if (IS_ERR(phy_provider)) 140 - return PTR_ERR(phy_provider); 141 - 142 137 for (i = 0; i < EXYNOS_MIPI_PHYS_NUM; i++) { 143 138 struct phy *phy = devm_phy_create(dev, 144 139 &exynos_mipi_video_phy_ops, NULL); ··· 146 151 state->phys[i].index = i; 147 152 phy_set_drvdata(phy, &state->phys[i]); 148 153 } 154 + 155 + phy_provider = devm_of_phy_provider_register(dev, 156 + exynos_mipi_video_phy_xlate); 157 + if (IS_ERR(phy_provider)) 158 + return PTR_ERR(phy_provider); 149 159 150 160 return 0; 151 161 }
+5 -5
drivers/phy/phy-mvebu-sata.c
··· 99 99 if (IS_ERR(priv->clk)) 100 100 return PTR_ERR(priv->clk); 101 101 102 - phy_provider = devm_of_phy_provider_register(&pdev->dev, 103 - of_phy_simple_xlate); 104 - if (IS_ERR(phy_provider)) 105 - return PTR_ERR(phy_provider); 106 - 107 102 phy = devm_phy_create(&pdev->dev, &phy_mvebu_sata_ops, NULL); 108 103 if (IS_ERR(phy)) 109 104 return PTR_ERR(phy); 110 105 111 106 phy_set_drvdata(phy, priv); 107 + 108 + phy_provider = devm_of_phy_provider_register(&pdev->dev, 109 + of_phy_simple_xlate); 110 + if (IS_ERR(phy_provider)) 111 + return PTR_ERR(phy_provider); 112 112 113 113 /* The boot loader may of left it on. Turn it off. */ 114 114 phy_mvebu_sata_power_off(phy);
+5 -5
drivers/phy/phy-omap-usb2.c
··· 177 177 phy->phy.otg = otg; 178 178 phy->phy.type = USB_PHY_TYPE_USB2; 179 179 180 - phy_provider = devm_of_phy_provider_register(phy->dev, 181 - of_phy_simple_xlate); 182 - if (IS_ERR(phy_provider)) 183 - return PTR_ERR(phy_provider); 184 - 185 180 control_node = of_parse_phandle(node, "ctrl-module", 0); 186 181 if (!control_node) { 187 182 dev_err(&pdev->dev, "Failed to get control device phandle\n"); ··· 208 213 return PTR_ERR(generic_phy); 209 214 210 215 phy_set_drvdata(generic_phy, phy); 216 + 217 + phy_provider = devm_of_phy_provider_register(phy->dev, 218 + of_phy_simple_xlate); 219 + if (IS_ERR(phy_provider)) 220 + return PTR_ERR(phy_provider); 211 221 212 222 phy->wkupclk = devm_clk_get(phy->dev, "usb_phy_cm_clk32k"); 213 223 if (IS_ERR(phy->wkupclk)) {
+5 -5
drivers/phy/phy-twl4030-usb.c
··· 695 695 otg->set_host = twl4030_set_host; 696 696 otg->set_peripheral = twl4030_set_peripheral; 697 697 698 - phy_provider = devm_of_phy_provider_register(twl->dev, 699 - of_phy_simple_xlate); 700 - if (IS_ERR(phy_provider)) 701 - return PTR_ERR(phy_provider); 702 - 703 698 phy = devm_phy_create(twl->dev, &ops, init_data); 704 699 if (IS_ERR(phy)) { 705 700 dev_dbg(&pdev->dev, "Failed to create PHY\n"); ··· 702 707 } 703 708 704 709 phy_set_drvdata(phy, twl); 710 + 711 + phy_provider = devm_of_phy_provider_register(twl->dev, 712 + of_phy_simple_xlate); 713 + if (IS_ERR(phy_provider)) 714 + return PTR_ERR(phy_provider); 705 715 706 716 /* init spinlock for workqueue */ 707 717 spin_lock_init(&twl->lock);
+1 -1
drivers/regulator/core.c
··· 1359 1359 goto found; 1360 1360 /* Don't log an error when called from regulator_get_optional() */ 1361 1361 } else if (!have_full_constraints() || exclusive) { 1362 - dev_err(dev, "dummy supplies not allowed\n"); 1362 + dev_warn(dev, "dummy supplies not allowed\n"); 1363 1363 } 1364 1364 1365 1365 mutex_unlock(&regulator_list_mutex);
+3 -1
drivers/regulator/da9063-regulator.c
··· 1 + 1 2 /* 2 3 * Regulator driver for DA9063 PMIC series 3 4 * ··· 61 60 .desc.ops = &da9063_ldo_ops, \ 62 61 .desc.min_uV = (min_mV) * 1000, \ 63 62 .desc.uV_step = (step_mV) * 1000, \ 64 - .desc.n_voltages = (((max_mV) - (min_mV))/(step_mV) + 1), \ 63 + .desc.n_voltages = (((max_mV) - (min_mV))/(step_mV) + 1 \ 64 + + (DA9063_V##regl_name##_BIAS)), \ 65 65 .desc.enable_reg = DA9063_REG_##regl_name##_CONT, \ 66 66 .desc.enable_mask = DA9063_LDO_EN, \ 67 67 .desc.vsel_reg = DA9063_REG_V##regl_name##_A, \
+3 -2
drivers/regulator/max14577.c
··· 166 166 167 167 ret = of_regulator_match(&pdev->dev, np, max14577_regulator_matches, 168 168 MAX14577_REG_MAX); 169 - if (ret < 0) { 169 + if (ret < 0) 170 170 dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret); 171 - } 171 + else 172 + ret = 0; 172 173 173 174 of_node_put(np); 174 175
+3 -1
drivers/regulator/s5m8767.c
··· 535 535 return -ENODEV; 536 536 } 537 537 538 - regulators_np = of_find_node_by_name(pmic_np, "regulators"); 538 + regulators_np = of_get_child_by_name(pmic_np, "regulators"); 539 539 if (!regulators_np) { 540 540 dev_err(iodev->dev, "could not find regulators sub-node\n"); 541 541 return -EINVAL; ··· 590 590 } 591 591 rmode++; 592 592 } 593 + 594 + of_node_put(regulators_np); 593 595 594 596 if (of_get_property(pmic_np, "s5m8767,pmic-buck2-uses-gpio-dvs", NULL)) { 595 597 pdata->buck2_gpiodvs = true;
-1
drivers/sbus/char/jsflash.c
··· 507 507 } 508 508 509 509 /* Let us be really paranoid for modifications to probing code. */ 510 - /* extern enum sparc_cpu sparc_cpu_model; */ /* in <asm/system.h> */ 511 510 if (sparc_cpu_model != sun4m) { 512 511 /* We must be on sun4m because we use MMU Bypass ASI. */ 513 512 return -ENXIO;
+1 -1
drivers/scsi/scsi_lib.c
··· 1684 1684 1685 1685 host_dev = scsi_get_device(shost); 1686 1686 if (host_dev && host_dev->dma_mask) 1687 - bounce_limit = dma_max_pfn(host_dev) << PAGE_SHIFT; 1687 + bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT; 1688 1688 1689 1689 return bounce_limit; 1690 1690 }
+1 -2
drivers/staging/android/binder.c
··· 2904 2904 refs++; 2905 2905 2906 2906 if (!ref->death) 2907 - goto out; 2907 + continue; 2908 2908 2909 2909 death++; 2910 2910 ··· 2917 2917 BUG(); 2918 2918 } 2919 2919 2920 - out: 2921 2920 binder_debug(BINDER_DEBUG_DEAD_BINDER, 2922 2921 "node %d now dead, refs %d, death %d\n", 2923 2922 node->debug_id, refs, death);
+1 -1
drivers/staging/bcm/Bcmnet.c
··· 40 40 } 41 41 42 42 static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb, 43 - void *accel_priv) 43 + void *accel_priv, select_queue_fallback_t fallback) 44 44 { 45 45 return ClassifyPacket(netdev_priv(dev), skb); 46 46 }
+1 -1
drivers/staging/netlogic/xlr_net.c
··· 307 307 } 308 308 309 309 static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb, 310 - void *accel_priv) 310 + void *accel_priv, select_queue_fallback_t fallback) 311 311 { 312 312 return (u16)smp_processor_id(); 313 313 }
+1 -1
drivers/staging/rtl8188eu/os_dep/os_intfs.c
··· 653 653 } 654 654 655 655 static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb, 656 - void *accel_priv) 656 + void *accel_priv, select_queue_fallback_t fallback) 657 657 { 658 658 struct adapter *padapter = rtw_netdev_priv(dev); 659 659 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+7 -18
drivers/tty/tty_io.c
··· 1267 1267 * @p: output buffer of at least 7 bytes 1268 1268 * 1269 1269 * Generate a name from a driver reference and write it to the output 1270 - * buffer. Return the number of bytes written. 1270 + * buffer. 1271 1271 * 1272 1272 * Locking: None 1273 1273 */ 1274 - static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p) 1274 + static void tty_line_name(struct tty_driver *driver, int index, char *p) 1275 1275 { 1276 1276 if (driver->flags & TTY_DRIVER_UNNUMBERED_NODE) 1277 - return sprintf(p, "%s", driver->name); 1277 + strcpy(p, driver->name); 1278 1278 else 1279 - return sprintf(p, "%s%d", driver->name, 1280 - index + driver->name_base); 1279 + sprintf(p, "%s%d", driver->name, index + driver->name_base); 1281 1280 } 1282 1281 1283 1282 /** ··· 3545 3546 if (i >= ARRAY_SIZE(cs)) 3546 3547 break; 3547 3548 } 3548 - while (i--) { 3549 - struct tty_driver *driver; 3550 - const char *name = cs[i]->name; 3551 - int index = cs[i]->index; 3552 - 3553 - driver = cs[i]->device(cs[i], &index); 3554 - if (driver) { 3555 - count += tty_line_name(driver, index, buf + count); 3556 - count += sprintf(buf + count, "%c", i ? ' ':'\n'); 3557 - } else 3558 - count += sprintf(buf + count, "%s%d%c", 3559 - name, index, i ? ' ':'\n'); 3560 - } 3549 + while (i--) 3550 + count += sprintf(buf + count, "%s%d%c", 3551 + cs[i]->name, cs[i]->index, i ? ' ':'\n'); 3561 3552 console_unlock(); 3562 3553 3563 3554 return count;
+2 -2
drivers/usb/chipidea/udc.c
··· 105 105 106 106 do { 107 107 /* flush any pending transfer */ 108 - hw_write(ci, OP_ENDPTFLUSH, BIT(n), BIT(n)); 108 + hw_write(ci, OP_ENDPTFLUSH, ~0, BIT(n)); 109 109 while (hw_read(ci, OP_ENDPTFLUSH, BIT(n))) 110 110 cpu_relax(); 111 111 } while (hw_read(ci, OP_ENDPTSTAT, BIT(n))); ··· 205 205 if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num))) 206 206 return -EAGAIN; 207 207 208 - hw_write(ci, OP_ENDPTPRIME, BIT(n), BIT(n)); 208 + hw_write(ci, OP_ENDPTPRIME, ~0, BIT(n)); 209 209 210 210 while (hw_read(ci, OP_ENDPTPRIME, BIT(n))) 211 211 cpu_relax();
+32 -26
drivers/usb/gadget/bcm63xx_udc.c
··· 360 360 bcm_writel(val, udc->iudma_regs + off); 361 361 } 362 362 363 - static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off) 363 + static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan) 364 364 { 365 - return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off); 365 + return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off + 366 + (ENETDMA_CHAN_WIDTH * chan)); 366 367 } 367 368 368 - static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off) 369 + static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off, 370 + int chan) 369 371 { 370 - bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off); 372 + bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off + 373 + (ENETDMA_CHAN_WIDTH * chan)); 371 374 } 372 375 373 - static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off) 376 + static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan) 374 377 { 375 - return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off); 378 + return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off + 379 + (ENETDMA_CHAN_WIDTH * chan)); 376 380 } 377 381 378 - static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off) 382 + static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off, 383 + int chan) 379 384 { 380 - bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off); 385 + bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off + 386 + (ENETDMA_CHAN_WIDTH * chan)); 381 387 } 382 388 383 389 static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled) ··· 644 638 } while (!last_bd); 645 639 646 640 usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK, 647 - ENETDMAC_CHANCFG_REG(iudma->ch_idx)); 641 + ENETDMAC_CHANCFG_REG, iudma->ch_idx); 648 642 } 649 643 650 644 /** ··· 700 694 bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num)); 701 695 702 696 /* stop DMA, then wait for the hardware to wrap up */ 703 - usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG(ch_idx)); 697 + usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx); 704 698 705 - while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG(ch_idx)) & 699 + while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) & 706 700 ENETDMAC_CHANCFG_EN_MASK) { 707 701 udelay(1); 708 702 ··· 719 713 dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n", 720 714 ch_idx); 721 715 usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK, 722 - ENETDMAC_CHANCFG_REG(ch_idx)); 716 + ENETDMAC_CHANCFG_REG, ch_idx); 723 717 } 724 718 } 725 - usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG(ch_idx)); 719 + usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx); 726 720 727 721 /* don't leave "live" HW-owned entries for the next guy to step on */ 728 722 for (d = iudma->bd_ring; d <= iudma->end_bd; d++) ··· 734 728 735 729 /* set up IRQs, UBUS burst size, and BD base for this channel */ 736 730 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK, 737 - ENETDMAC_IRMASK_REG(ch_idx)); 738 - usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG(ch_idx)); 731 + ENETDMAC_IRMASK_REG, ch_idx); 732 + usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx); 739 733 740 - usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG(ch_idx)); 741 - usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG(ch_idx)); 734 + usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx); 735 + usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx); 742 736 } 743 737 744 738 /** ··· 2041 2035 spin_lock(&udc->lock); 2042 2036 2043 2037 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK, 2044 - ENETDMAC_IR_REG(iudma->ch_idx)); 2038 + ENETDMAC_IR_REG, iudma->ch_idx); 2045 2039 bep = iudma->bep; 2046 2040 rc = iudma_read(udc, iudma); 2047 2041 ··· 2181 2175 seq_printf(s, " [ep%d]:\n", 2182 2176 max_t(int, iudma_defaults[ch_idx].ep_num, 0)); 2183 2177 seq_printf(s, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n", 2184 - usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG(ch_idx)), 2185 - usb_dmac_readl(udc, ENETDMAC_IR_REG(ch_idx)), 2186 - usb_dmac_readl(udc, ENETDMAC_IRMASK_REG(ch_idx)), 2187 - usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG(ch_idx))); 2178 + usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx), 2179 + usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx), 2180 + usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx), 2181 + usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx)); 2188 2182 2189 - sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG(ch_idx)); 2190 - sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG(ch_idx)); 2183 + sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx); 2184 + sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx); 2191 2185 seq_printf(s, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n", 2192 - usb_dmas_readl(udc, ENETDMAS_RSTART_REG(ch_idx)), 2186 + usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx), 2193 2187 sram2 >> 16, sram2 & 0xffff, 2194 2188 sram3 >> 16, sram3 & 0xffff, 2195 - usb_dmas_readl(udc, ENETDMAS_SRAM4_REG(ch_idx))); 2189 + usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx)); 2196 2190 seq_printf(s, " desc: %d/%d used", iudma->n_bds_used, 2197 2191 iudma->n_bds); 2198 2192
+6 -1
drivers/usb/gadget/f_fs.c
··· 585 585 char __user *buf, size_t len, int read) 586 586 { 587 587 struct ffs_epfile *epfile = file->private_data; 588 - struct usb_gadget *gadget = epfile->ffs->gadget; 589 588 struct ffs_ep *ep; 590 589 char *data = NULL; 591 590 ssize_t ret, data_len; ··· 620 621 621 622 /* Allocate & copy */ 622 623 if (!halt) { 624 + /* 625 + * if we _do_ wait above, the epfile->ffs->gadget might be NULL 626 + * before the waiting completes, so do not assign to 'gadget' earlier 627 + */ 628 + struct usb_gadget *gadget = epfile->ffs->gadget; 629 + 623 630 /* 624 631 * Controller may require buffer size to be aligned to 625 632 * maxpacketsize of an out endpoint.
+1 -1
drivers/usb/gadget/printer.c
··· 1157 1157 1158 1158 usb_gadget_set_selfpowered(gadget); 1159 1159 1160 - if (gadget->is_otg) { 1160 + if (gadget_is_otg(gadget)) { 1161 1161 otg_descriptor.bmAttributes |= USB_OTG_HNP; 1162 1162 printer_cfg_driver.descriptors = otg_desc; 1163 1163 printer_cfg_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+1 -1
drivers/usb/gadget/s3c2410_udc.c
··· 1629 1629 ep->ep.desc = NULL; 1630 1630 ep->halted = 0; 1631 1631 INIT_LIST_HEAD(&ep->queue); 1632 - usb_ep_set_maxpacket_limit(&ep->ep, &ep->ep.maxpacket); 1632 + usb_ep_set_maxpacket_limit(&ep->ep, ep->ep.maxpacket); 1633 1633 } 1634 1634 } 1635 1635
+22 -4
drivers/usb/host/ehci-hub.c
··· 238 238 int port; 239 239 int mask; 240 240 int changed; 241 + bool fs_idle_delay; 241 242 242 243 ehci_dbg(ehci, "suspend root hub\n"); 243 244 ··· 273 272 ehci->bus_suspended = 0; 274 273 ehci->owned_ports = 0; 275 274 changed = 0; 275 + fs_idle_delay = false; 276 276 port = HCS_N_PORTS(ehci->hcs_params); 277 277 while (port--) { 278 278 u32 __iomem *reg = &ehci->regs->port_status [port]; ··· 302 300 } 303 301 304 302 if (t1 != t2) { 303 + /* 304 + * On some controllers, Wake-On-Disconnect will 305 + * generate false wakeup signals until the bus 306 + * switches over to full-speed idle. For their 307 + * sake, add a delay if we need one. 308 + */ 309 + if ((t2 & PORT_WKDISC_E) && 310 + ehci_port_speed(ehci, t2) == 311 + USB_PORT_STAT_HIGH_SPEED) 312 + fs_idle_delay = true; 305 313 ehci_writel(ehci, t2, reg); 306 314 changed = 1; 307 315 } 308 316 } 317 + spin_unlock_irq(&ehci->lock); 318 + 319 + if ((changed && ehci->has_tdi_phy_lpm) || fs_idle_delay) { 320 + /* 321 + * Wait for HCD to enter low-power mode or for the bus 322 + * to switch to full-speed idle. 323 + */ 324 + usleep_range(5000, 5500); 325 + } 309 326 310 327 if (changed && ehci->has_tdi_phy_lpm) { 311 - spin_unlock_irq(&ehci->lock); 312 - msleep(5); /* 5 ms for HCD to enter low-power mode */ 313 328 spin_lock_irq(&ehci->lock); 314 - 315 329 port = HCS_N_PORTS(ehci->hcs_params); 316 330 while (port--) { 317 331 u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port]; ··· 340 322 port, (t3 & HOSTPC_PHCD) ? 341 323 "succeeded" : "failed"); 342 324 } 325 + spin_unlock_irq(&ehci->lock); 343 326 } 344 - spin_unlock_irq(&ehci->lock); 345 327 346 328 /* Apparently some devices need a >= 1-uframe delay here */ 347 329 if (ehci->bus_suspended)
+13 -2
drivers/usb/musb/musb_core.c
··· 477 477 musb->port1_status |= 478 478 (USB_PORT_STAT_C_SUSPEND << 16) 479 479 | MUSB_PORT_STAT_RESUME; 480 + musb->rh_timer = jiffies 481 + + msecs_to_jiffies(20); 480 482 schedule_delayed_work( 481 - &musb->finish_resume_work, 20); 483 + &musb->finish_resume_work, 484 + msecs_to_jiffies(20)); 482 485 483 486 musb->xceiv->state = OTG_STATE_A_HOST; 484 487 musb->is_active = 1; ··· 2160 2157 void __iomem *musb_base = musb->mregs; 2161 2158 void __iomem *ep_target_regs; 2162 2159 void __iomem *epio; 2160 + u8 power; 2163 2161 2164 2162 musb_writew(musb_base, MUSB_FRAME, musb->context.frame); 2165 2163 musb_writeb(musb_base, MUSB_TESTMODE, musb->context.testmode); 2166 2164 musb_write_ulpi_buscontrol(musb->mregs, musb->context.busctl); 2167 - musb_writeb(musb_base, MUSB_POWER, musb->context.power); 2165 + 2166 + /* Don't affect SUSPENDM/RESUME bits in POWER reg */ 2167 + power = musb_readb(musb_base, MUSB_POWER); 2168 + power &= MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME; 2169 + musb->context.power &= ~(MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME); 2170 + power |= musb->context.power; 2171 + musb_writeb(musb_base, MUSB_POWER, power); 2172 + 2168 2173 musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe); 2169 2174 musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe); 2170 2175 musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe);
+3
drivers/usb/musb/musb_host.c
··· 1183 1183 csr = MUSB_CSR0_H_STATUSPKT 1184 1184 | MUSB_CSR0_TXPKTRDY; 1185 1185 1186 + /* disable ping token in status phase */ 1187 + csr |= MUSB_CSR0_H_DIS_PING; 1188 + 1186 1189 /* flag status stage */ 1187 1190 musb->ep0_stage = MUSB_EP0_STATUS; 1188 1191
+19 -7
drivers/usb/musb/musb_virthub.c
··· 135 135 136 136 /* later, GetPortStatus will stop RESUME signaling */ 137 137 musb->port1_status |= MUSB_PORT_STAT_RESUME; 138 - schedule_delayed_work(&musb->finish_resume_work, 20); 138 + schedule_delayed_work(&musb->finish_resume_work, 139 + msecs_to_jiffies(20)); 139 140 } 140 141 } 141 142 ··· 159 158 */ 160 159 power = musb_readb(mbase, MUSB_POWER); 161 160 if (do_reset) { 162 - 163 161 /* 164 162 * If RESUME is set, we must make sure it stays minimum 20 ms. 165 163 * Then we must clear RESUME and wait a bit to let musb start ··· 167 167 * detected". 168 168 */ 169 169 if (power & MUSB_POWER_RESUME) { 170 - while (time_before(jiffies, musb->rh_timer)) 171 - msleep(1); 170 + long remain = (unsigned long) musb->rh_timer - jiffies; 171 + 172 + if (musb->rh_timer > 0 && remain > 0) { 173 + /* take into account the minimum delay after resume */ 174 + schedule_delayed_work( 175 + &musb->deassert_reset_work, remain); 176 + return; 177 + } 178 + 172 179 musb_writeb(mbase, MUSB_POWER, 173 - power & ~MUSB_POWER_RESUME); 174 - msleep(1); 180 + power & ~MUSB_POWER_RESUME); 181 + 182 + /* Give the core 1 ms to clear MUSB_POWER_RESUME */ 183 + schedule_delayed_work(&musb->deassert_reset_work, 184 + msecs_to_jiffies(1)); 185 + return; 175 186 } 176 187 177 188 power &= 0xf0; ··· 191 180 192 181 musb->port1_status |= USB_PORT_STAT_RESET; 193 182 musb->port1_status &= ~USB_PORT_STAT_ENABLE; 194 - schedule_delayed_work(&musb->deassert_reset_work, 50); 183 + schedule_delayed_work(&musb->deassert_reset_work, 184 + msecs_to_jiffies(50)); 195 185 } else { 196 186 dev_dbg(musb->controller, "root port reset stopped\n"); 197 187 musb_writeb(mbase, MUSB_POWER,
-2
drivers/usb/musb/omap2430.c
··· 659 659 OTG_INTERFSEL); 660 660 661 661 omap2430_low_level_exit(musb); 662 - phy_power_off(musb->phy); 663 662 } 664 663 665 664 return 0; ··· 673 674 omap2430_low_level_init(musb); 674 675 musb_writel(musb->mregs, OTG_INTERFSEL, 675 676 musb->context.otg_interfsel); 676 - phy_power_on(musb->phy); 677 677 } 678 678 679 679 return 0;
+26 -31
drivers/usb/phy/phy-msm-usb.c
··· 159 159 return rc; 160 160 } 161 161 162 - #ifdef CONFIG_PM_SLEEP 163 - #define USB_PHY_SUSP_DIG_VOL 500000 164 - static int msm_hsusb_config_vddcx(int high) 165 - { 166 - int max_vol = USB_PHY_VDD_DIG_VOL_MAX; 167 - int min_vol; 168 - int ret; 169 - 170 - if (high) 171 - min_vol = USB_PHY_VDD_DIG_VOL_MIN; 172 - else 173 - min_vol = USB_PHY_SUSP_DIG_VOL; 174 - 175 - ret = regulator_set_voltage(hsusb_vddcx, min_vol, max_vol); 176 - if (ret) { 177 - pr_err("%s: unable to set the voltage for regulator " 178 - "HSUSB_VDDCX\n", __func__); 179 - return ret; 180 - } 181 - 182 - pr_debug("%s: min_vol:%d max_vol:%d\n", __func__, min_vol, max_vol); 183 - 184 - return ret; 185 - } 186 - #endif 187 - 188 162 static int msm_hsusb_ldo_set_mode(int on) 189 163 { 190 164 int ret = 0; ··· 414 440 #define PHY_SUSPEND_TIMEOUT_USEC (500 * 1000) 415 441 #define PHY_RESUME_TIMEOUT_USEC (100 * 1000) 416 442 417 - #ifdef CONFIG_PM_SLEEP 443 + #ifdef CONFIG_PM 444 + 445 + #define USB_PHY_SUSP_DIG_VOL 500000 446 + static int msm_hsusb_config_vddcx(int high) 447 + { 448 + int max_vol = USB_PHY_VDD_DIG_VOL_MAX; 449 + int min_vol; 450 + int ret; 451 + 452 + if (high) 453 + min_vol = USB_PHY_VDD_DIG_VOL_MIN; 454 + else 455 + min_vol = USB_PHY_SUSP_DIG_VOL; 456 + 457 + ret = regulator_set_voltage(hsusb_vddcx, min_vol, max_vol); 458 + if (ret) { 459 + pr_err("%s: unable to set the voltage for regulator " 460 + "HSUSB_VDDCX\n", __func__); 461 + return ret; 462 + } 463 + 464 + pr_debug("%s: min_vol:%d max_vol:%d\n", __func__, min_vol, max_vol); 465 + 466 + return ret; 467 + } 468 + 418 469 static int msm_otg_suspend(struct msm_otg *motg) 419 470 { 420 471 struct usb_phy *phy = &motg->phy; ··· 1733 1734 } 1734 1735 #endif 1735 1736 1736 - #ifdef CONFIG_PM 1737 1737 static const struct dev_pm_ops msm_otg_dev_pm_ops = { 1738 1738 SET_SYSTEM_SLEEP_PM_OPS(msm_otg_pm_suspend, msm_otg_pm_resume) 1739 1739 SET_RUNTIME_PM_OPS(msm_otg_runtime_suspend, msm_otg_runtime_resume, 1740 1740 msm_otg_runtime_idle) 1741 1741 }; 1742 - #endif 1743 1742 1744 1743 static struct platform_driver msm_otg_driver = { 1745 1744 .remove = msm_otg_remove, 1746 1745 .driver = { 1747 1746 .name = DRIVER_NAME, 1748 1747 .owner = THIS_MODULE, 1749 - #ifdef CONFIG_PM 1750 1748 .pm = &msm_otg_dev_pm_ops, 1751 - #endif 1752 1749 }, 1753 1750 }; 1754 1751
+2 -1
drivers/usb/serial/option.c
··· 1526 1526 /* Cinterion */ 1527 1527 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) }, 1528 1528 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) }, 1529 - { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) }, 1529 + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8), 1530 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1530 1531 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) }, 1531 1532 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX), 1532 1533 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+26 -21
drivers/vhost/net.c
··· 70 70 }; 71 71 72 72 struct vhost_net_ubuf_ref { 73 - struct kref kref; 73 + /* refcount follows semantics similar to kref: 74 + * 0: object is released 75 + * 1: no outstanding ubufs 76 + * >1: outstanding ubufs 77 + */ 78 + atomic_t refcount; 74 79 wait_queue_head_t wait; 75 80 struct vhost_virtqueue *vq; 76 81 }; ··· 121 116 vhost_net_zcopy_mask |= 0x1 << vq; 122 117 } 123 118 124 - static void vhost_net_zerocopy_done_signal(struct kref *kref) 125 - { 126 - struct vhost_net_ubuf_ref *ubufs; 127 - 128 - ubufs = container_of(kref, struct vhost_net_ubuf_ref, kref); 129 - wake_up(&ubufs->wait); 130 - } 131 - 132 119 static struct vhost_net_ubuf_ref * 133 120 vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy) 134 121 { ··· 131 134 ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL); 132 135 if (!ubufs) 133 136 return ERR_PTR(-ENOMEM); 134 - kref_init(&ubufs->kref); 137 + atomic_set(&ubufs->refcount, 1); 135 138 init_waitqueue_head(&ubufs->wait); 136 139 ubufs->vq = vq; 137 140 return ubufs; 138 141 } 139 142 140 - static void vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) 143 + static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) 141 144 { 142 - kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal); 145 + int r = atomic_sub_return(1, &ubufs->refcount); 146 + if (unlikely(!r)) 147 + wake_up(&ubufs->wait); 148 + return r; 143 149 } 144 150 145 151 static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs) 146 152 { 147 - kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal); 148 - wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount)); 153 + vhost_net_ubuf_put(ubufs); 154 + wait_event(ubufs->wait, !atomic_read(&ubufs->refcount)); 149 155 } 150 156 151 157 static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs) ··· 306 306 { 307 307 struct vhost_net_ubuf_ref *ubufs = ubuf->ctx; 308 308 struct vhost_virtqueue *vq = ubufs->vq; 309 - int cnt = atomic_read(&ubufs->kref.refcount); 309 + int cnt; 310 + 311 + rcu_read_lock_bh(); 310 312 311 313 /* set len to mark this desc buffers done DMA */ 312 314 vq->heads[ubuf->desc].len = success ? 313 315 VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN; 314 - vhost_net_ubuf_put(ubufs); 316 + cnt = vhost_net_ubuf_put(ubufs); 315 317 316 318 /* 317 319 * Trigger polling thread if guest stopped submitting new buffers: 318 - * in this case, the refcount after decrement will eventually reach 1 319 - * so here it is 2. 320 + * in this case, the refcount after decrement will eventually reach 1. 320 321 * We also trigger polling periodically after each 16 packets 321 322 * (the value 16 here is more or less arbitrary, it's tuned to trigger 322 323 * less than 10% of times). 323 324 */ 324 - if (cnt <= 2 || !(cnt % 16)) 325 + if (cnt <= 1 || !(cnt % 16)) 325 326 vhost_poll_queue(&vq->poll); 327 + 328 + rcu_read_unlock_bh(); 326 329 } 327 330 328 331 /* Expects to be always run from workqueue - which acts as ··· 423 420 msg.msg_control = ubuf; 424 421 msg.msg_controllen = sizeof(ubuf); 425 422 ubufs = nvq->ubufs; 426 - kref_get(&ubufs->kref); 423 + atomic_inc(&ubufs->refcount); 427 424 nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV; 428 425 } else { 429 426 msg.msg_control = NULL; ··· 783 780 vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs); 784 781 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); 785 782 n->tx_flush = false; 786 - kref_init(&n->vqs[VHOST_NET_VQ_TX].ubufs->kref); 783 + atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1); 787 784 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); 788 785 } 789 786 } ··· 803 800 fput(tx_sock->file); 804 801 if (rx_sock) 805 802 fput(rx_sock->file); 803 + /* Make sure no callbacks are outstanding */ 804 + synchronize_rcu_bh(); 806 805 /* We do an extra flush before freeing memory, 807 806 * since jobs can re-queue themselves. */ 808 807 vhost_net_flush(n);
+1 -1
drivers/watchdog/w83697hf_wdt.c
··· 402 402 403 403 if (!found) { 404 404 pr_err("No W83697HF/HG could be found\n"); 405 - ret = -EIO; 405 + ret = -ENODEV; 406 406 goto out; 407 407 } 408 408
+1 -10
fs/ceph/acl.c
··· 54 54 return acl; 55 55 } 56 56 57 - void ceph_forget_all_cached_acls(struct inode *inode) 58 - { 59 - forget_all_cached_acls(inode); 60 - } 61 - 62 57 struct posix_acl *ceph_get_acl(struct inode *inode, int type) 63 58 { 64 59 int size; ··· 155 160 goto out_dput; 156 161 } 157 162 158 - if (value) 159 - ret = __ceph_setxattr(dentry, name, value, size, 0); 160 - else 161 - ret = __ceph_removexattr(dentry, name); 162 - 163 + ret = __ceph_setxattr(dentry, name, value, size, 0); 163 164 if (ret) { 164 165 if (new_mode != old_mode) { 165 166 newattrs.ia_mode = old_mode;
+17 -6
fs/ceph/dir.c
··· 100 100 return p & 0xffffffff; 101 101 } 102 102 103 + static int fpos_cmp(loff_t l, loff_t r) 104 + { 105 + int v = ceph_frag_compare(fpos_frag(l), fpos_frag(r)); 106 + if (v) 107 + return v; 108 + return (int)(fpos_off(l) - fpos_off(r)); 109 + } 110 + 103 111 /* 104 112 * When possible, we try to satisfy a readdir by peeking at the 105 113 * dcache. We make this work by carefully ordering dentries on ··· 164 156 if (!d_unhashed(dentry) && dentry->d_inode && 165 157 ceph_snap(dentry->d_inode) != CEPH_SNAPDIR && 166 158 ceph_ino(dentry->d_inode) != CEPH_INO_CEPH && 167 - ctx->pos <= di->offset) 159 + fpos_cmp(ctx->pos, di->offset) <= 0) 168 160 break; 169 161 dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry, 170 162 dentry->d_name.len, dentry->d_name.name, di->offset, ··· 703 695 ceph_mdsc_put_request(req); 704 696 705 697 if (!err) 706 - err = ceph_init_acl(dentry, dentry->d_inode, dir); 707 - 708 - if (err) 698 + ceph_init_acl(dentry, dentry->d_inode, dir); 699 + else 709 700 d_drop(dentry); 710 701 return err; 711 702 } ··· 742 735 if (!err && !req->r_reply_info.head->is_dentry) 743 736 err = ceph_handle_notrace_create(dir, dentry); 744 737 ceph_mdsc_put_request(req); 745 - if (err) 738 + if (!err) 739 + ceph_init_acl(dentry, dentry->d_inode, dir); 740 + else 746 741 d_drop(dentry); 747 742 return err; 748 743 } ··· 785 776 err = ceph_handle_notrace_create(dir, dentry); 786 777 ceph_mdsc_put_request(req); 787 778 out: 788 - if (err < 0) 779 + if (!err) 780 + ceph_init_acl(dentry, dentry->d_inode, dir); 781 + else 789 782 d_drop(dentry); 790 783 return err; 791 784 }
+1
fs/ceph/file.c
··· 286 286 } else { 287 287 dout("atomic_open finish_open on dn %p\n", dn); 288 288 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) { 289 + ceph_init_acl(dentry, dentry->d_inode, dir); 289 290 *opened |= FILE_CREATED; 290 291 } 291 292 err = finish_open(file, dentry, ceph_open, opened);
+28 -4
fs/ceph/super.c
··· 144 144 Opt_ino32, 145 145 Opt_noino32, 146 146 Opt_fscache, 147 - Opt_nofscache 147 + Opt_nofscache, 148 + #ifdef CONFIG_CEPH_FS_POSIX_ACL 149 + Opt_acl, 150 + #endif 151 + Opt_noacl 148 152 }; 149 153 150 154 static match_table_t fsopt_tokens = { ··· 176 172 {Opt_noino32, "noino32"}, 177 173 {Opt_fscache, "fsc"}, 178 174 {Opt_nofscache, "nofsc"}, 175 + #ifdef CONFIG_CEPH_FS_POSIX_ACL 176 + {Opt_acl, "acl"}, 177 + #endif 178 + {Opt_noacl, "noacl"}, 179 179 {-1, NULL} 180 180 }; 181 181 ··· 278 270 break; 279 271 case Opt_nofscache: 280 272 fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE; 273 + break; 274 + #ifdef CONFIG_CEPH_FS_POSIX_ACL 275 + case Opt_acl: 276 + fsopt->sb_flags |= MS_POSIXACL; 277 + break; 278 + #endif 279 + case Opt_noacl: 280 + fsopt->sb_flags &= ~MS_POSIXACL; 281 281 break; 282 282 default: 283 283 BUG_ON(token); ··· 453 437 seq_puts(m, ",fsc"); 454 438 else 455 439 seq_puts(m, ",nofsc"); 440 + 441 + #ifdef CONFIG_CEPH_FS_POSIX_ACL 442 + if (fsopt->sb_flags & MS_POSIXACL) 443 + seq_puts(m, ",acl"); 444 + else 445 + seq_puts(m, ",noacl"); 446 + #endif 456 447 457 448 if (fsopt->wsize) 458 449 seq_printf(m, ",wsize=%d", fsopt->wsize); ··· 842 819 843 820 s->s_flags = fsc->mount_options->sb_flags; 844 821 s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */ 845 - #ifdef CONFIG_CEPH_FS_POSIX_ACL 846 - s->s_flags |= MS_POSIXACL; 847 - #endif 848 822 849 823 s->s_xattr = ceph_xattr_handlers; 850 824 s->s_fs_info = fsc; ··· 931 911 struct ceph_options *opt = NULL; 932 912 933 913 dout("ceph_mount\n"); 914 + 915 + #ifdef CONFIG_CEPH_FS_POSIX_ACL 916 + flags |= MS_POSIXACL; 917 + #endif 934 918 err = parse_mount_options(&fsopt, &opt, flags, data, dev_name, &path); 935 919 if (err < 0) { 936 920 res = ERR_PTR(err);
+6 -1
fs/ceph/super.h
··· 13 13 #include <linux/wait.h> 14 14 #include <linux/writeback.h> 15 15 #include <linux/slab.h> 16 + #include <linux/posix_acl.h> 16 17 17 18 #include <linux/ceph/libceph.h> 18 19 ··· 744 743 struct posix_acl *ceph_get_acl(struct inode *, int); 745 744 int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type); 746 745 int ceph_init_acl(struct dentry *, struct inode *, struct inode *); 747 - void ceph_forget_all_cached_acls(struct inode *inode); 746 + 747 + static inline void ceph_forget_all_cached_acls(struct inode *inode) 748 + { 749 + forget_all_cached_acls(inode); 750 + } 748 751 749 752 #else 750 753
+40 -14
fs/ceph/xattr.c
··· 12 12 #define XATTR_CEPH_PREFIX "ceph." 13 13 #define XATTR_CEPH_PREFIX_LEN (sizeof (XATTR_CEPH_PREFIX) - 1) 14 14 15 + static int __remove_xattr(struct ceph_inode_info *ci, 16 + struct ceph_inode_xattr *xattr); 17 + 15 18 /* 16 19 * List of handlers for synthetic system.* attributes. Other 17 20 * attributes are handled directly. ··· 322 319 static int __set_xattr(struct ceph_inode_info *ci, 323 320 const char *name, int name_len, 324 321 const char *val, int val_len, 325 - int dirty, 326 - int should_free_name, int should_free_val, 322 + int flags, int update_xattr, 327 323 struct ceph_inode_xattr **newxattr) 328 324 { 329 325 struct rb_node **p; ··· 351 349 xattr = NULL; 352 350 } 353 351 352 + if (update_xattr) { 353 + int err = 0; 354 + if (xattr && (flags & XATTR_CREATE)) 355 + err = -EEXIST; 356 + else if (!xattr && (flags & XATTR_REPLACE)) 357 + err = -ENODATA; 358 + if (err) { 359 + kfree(name); 360 + kfree(val); 361 + return err; 362 + } 363 + if (update_xattr < 0) { 364 + if (xattr) 365 + __remove_xattr(ci, xattr); 366 + kfree(name); 367 + return 0; 368 + } 369 + } 370 + 354 371 if (!xattr) { 355 372 new = 1; 356 373 xattr = *newxattr; 357 374 xattr->name = name; 358 375 xattr->name_len = name_len; 359 - xattr->should_free_name = should_free_name; 376 + xattr->should_free_name = update_xattr; 360 377 361 378 ci->i_xattrs.count++; 362 379 dout("__set_xattr count=%d\n", ci->i_xattrs.count); ··· 385 364 if (xattr->should_free_val) 386 365 kfree((void *)xattr->val); 387 366 388 - if (should_free_name) { 367 + if (update_xattr) { 389 368 kfree((void *)name); 390 369 name = xattr->name; 391 370 } ··· 400 379 xattr->val = ""; 401 380 402 381 xattr->val_len = val_len; 403 - xattr->dirty = dirty; 404 - xattr->should_free_val = (val && should_free_val); 382 + xattr->dirty = update_xattr; 383 + xattr->should_free_val = (val && update_xattr); 405 384 406 385 if (new) { 407 386 rb_link_node(&xattr->node, parent, p); ··· 463 442 struct ceph_inode_xattr *xattr) 464 443 { 465 444 if (!xattr) 466 - return -EOPNOTSUPP; 445 + return -ENODATA; 467 446 468 447 rb_erase(&xattr->node, &ci->i_xattrs.index); 469 448 ··· 609 588 p += len; 610 589 611 590 err = __set_xattr(ci, name, namelen, val, len, 612 - 0, 0, 0, &xattrs[numattr]); 591 + 0, 0, &xattrs[numattr]); 613 592 614 593 if (err < 0) 615 594 goto bad; ··· 871 850 872 851 dout("setxattr value=%.*s\n", (int)size, value); 873 852 853 + if (!value) 854 + flags |= CEPH_XATTR_REMOVE; 855 + 874 856 /* do request */ 875 857 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETXATTR, 876 858 USE_AUTH_MDS); ··· 916 892 struct ceph_inode_info *ci = ceph_inode(inode); 917 893 int issued; 918 894 int err; 919 - int dirty; 895 + int dirty = 0; 920 896 int name_len = strlen(name); 921 897 int val_len = size; 922 898 char *newname = NULL; ··· 977 953 goto retry; 978 954 } 979 955 980 - err = __set_xattr(ci, newname, name_len, newval, 981 - val_len, 1, 1, 1, &xattr); 956 + err = __set_xattr(ci, newname, name_len, newval, val_len, 957 + flags, value ? 1 : -1, &xattr); 982 958 983 - dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL); 984 - ci->i_xattrs.dirty = true; 985 - inode->i_ctime = CURRENT_TIME; 959 + if (!err) { 960 + dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL); 961 + ci->i_xattrs.dirty = true; 962 + inode->i_ctime = CURRENT_TIME; 963 + } 986 964 987 965 spin_unlock(&ci->i_ceph_lock); 988 966 if (dirty)
+24 -9
fs/cifs/cifsacl.c
··· 865 865 return rc; 866 866 } 867 867 868 - static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb, 869 - __u16 fid, u32 *pacllen) 868 + struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb, 869 + const struct cifs_fid *cifsfid, u32 *pacllen) 870 870 { 871 871 struct cifs_ntsd *pntsd = NULL; 872 872 unsigned int xid; ··· 877 877 return ERR_CAST(tlink); 878 878 879 879 xid = get_xid(); 880 - rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), fid, &pntsd, pacllen); 880 + rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), cifsfid->netfid, &pntsd, 881 + pacllen); 881 882 free_xid(xid); 882 883 883 884 cifs_put_tlink(tlink); ··· 947 946 if (!open_file) 948 947 return get_cifs_acl_by_path(cifs_sb, path, pacllen); 949 948 950 - pntsd = get_cifs_acl_by_fid(cifs_sb, open_file->fid.netfid, pacllen); 949 + pntsd = get_cifs_acl_by_fid(cifs_sb, &open_file->fid, pacllen); 951 950 cifsFileInfo_put(open_file); 952 951 return pntsd; 953 952 } ··· 1007 1006 /* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */ 1008 1007 int 1009 1008 cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr, 1010 - struct inode *inode, const char *path, const __u16 *pfid) 1009 + struct inode *inode, const char *path, 1010 + const struct cifs_fid *pfid) 1011 1011 { 1012 1012 struct cifs_ntsd *pntsd = NULL; 1013 1013 u32 acllen = 0; 1014 1014 int rc = 0; 1015 + struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); 1016 + struct cifs_tcon *tcon; 1015 1017 1016 1018 cifs_dbg(NOISY, "converting ACL to mode for %s\n", path); 1017 1019 1018 - if (pfid) 1019 - pntsd = get_cifs_acl_by_fid(cifs_sb, *pfid, &acllen); 1020 - else 1021 - pntsd = get_cifs_acl(cifs_sb, inode, path, &acllen); 1020 + if (IS_ERR(tlink)) 1021 + return PTR_ERR(tlink); 1022 + tcon = tlink_tcon(tlink); 1022 1023 1024 + if (pfid && (tcon->ses->server->ops->get_acl_by_fid)) 1025 + pntsd = tcon->ses->server->ops->get_acl_by_fid(cifs_sb, pfid, 1026 + &acllen); 1027 + else if (tcon->ses->server->ops->get_acl) 1028 + pntsd = tcon->ses->server->ops->get_acl(cifs_sb, inode, path, 1029 + &acllen); 1030 + else { 1031 + cifs_put_tlink(tlink); 1032 + return -EOPNOTSUPP; 1033 + } 1023 1034 /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */ 1024 1035 if (IS_ERR(pntsd)) { 1025 1036 rc = PTR_ERR(pntsd); ··· 1042 1029 if (rc) 1043 1030 cifs_dbg(VFS, "parse sec desc failed rc = %d\n", rc); 1044 1031 } 1032 + 1033 + cifs_put_tlink(tlink); 1045 1034 1046 1035 return rc; 1047 1036 }
+2
fs/cifs/cifsglob.h
··· 398 398 const struct nls_table *, int); 399 399 struct cifs_ntsd * (*get_acl)(struct cifs_sb_info *, struct inode *, 400 400 const char *, u32 *); 401 + struct cifs_ntsd * (*get_acl_by_fid)(struct cifs_sb_info *, 402 + const struct cifs_fid *, u32 *); 401 403 int (*set_acl)(struct cifs_ntsd *, __u32, struct inode *, const char *, 402 404 int); 403 405 };
+4 -2
fs/cifs/cifsproto.h
··· 151 151 152 152 extern int cifs_get_inode_info(struct inode **inode, const char *full_path, 153 153 FILE_ALL_INFO *data, struct super_block *sb, 154 - int xid, const __u16 *fid); 154 + int xid, const struct cifs_fid *fid); 155 155 extern int cifs_get_inode_info_unix(struct inode **pinode, 156 156 const unsigned char *search_path, 157 157 struct super_block *sb, unsigned int xid); ··· 162 162 const unsigned int xid); 163 163 extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, 164 164 struct cifs_fattr *fattr, struct inode *inode, 165 - const char *path, const __u16 *pfid); 165 + const char *path, const struct cifs_fid *pfid); 166 166 extern int id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64, 167 167 kuid_t, kgid_t); 168 168 extern struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *, struct inode *, 169 169 const char *, u32 *); 170 + extern struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *, 171 + const struct cifs_fid *, u32 *); 170 172 extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *, 171 173 const char *, int); 172 174
+1 -1
fs/cifs/dir.c
··· 378 378 xid); 379 379 else { 380 380 rc = cifs_get_inode_info(&newinode, full_path, buf, inode->i_sb, 381 - xid, &fid->netfid); 381 + xid, fid); 382 382 if (newinode) { 383 383 if (server->ops->set_lease_key) 384 384 server->ops->set_lease_key(newinode, fid);
+35 -4
fs/cifs/file.c
··· 244 244 xid); 245 245 else 246 246 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb, 247 - xid, &fid->netfid); 247 + xid, fid); 248 248 249 249 out: 250 250 kfree(buf); ··· 2389 2389 unsigned long nr_segs, loff_t *poffset) 2390 2390 { 2391 2391 unsigned long nr_pages, i; 2392 - size_t copied, len, cur_len; 2392 + size_t bytes, copied, len, cur_len; 2393 2393 ssize_t total_written = 0; 2394 2394 loff_t offset; 2395 2395 struct iov_iter it; ··· 2444 2444 2445 2445 save_len = cur_len; 2446 2446 for (i = 0; i < nr_pages; i++) { 2447 - copied = min_t(const size_t, cur_len, PAGE_SIZE); 2447 + bytes = min_t(const size_t, cur_len, PAGE_SIZE); 2448 2448 copied = iov_iter_copy_from_user(wdata->pages[i], &it, 2449 - 0, copied); 2449 + 0, bytes); 2450 2450 cur_len -= copied; 2451 2451 iov_iter_advance(&it, copied); 2452 + /* 2453 + * If we didn't copy as much as we expected, then that 2454 + * may mean we trod into an unmapped area. Stop copying 2455 + * at that point. On the next pass through the big 2456 + * loop, we'll likely end up getting a zero-length 2457 + * write and bailing out of it. 2458 + */ 2459 + if (copied < bytes) 2460 + break; 2452 2461 } 2453 2462 cur_len = save_len - cur_len; 2463 + 2464 + /* 2465 + * If we have no data to send, then that probably means that 2466 + * the copy above failed altogether. That's most likely because 2467 + * the address in the iovec was bogus. Set the rc to -EFAULT, 2468 + * free anything we allocated and bail out. 2469 + */ 2470 + if (!cur_len) { 2471 + for (i = 0; i < nr_pages; i++) 2472 + put_page(wdata->pages[i]); 2473 + kfree(wdata); 2474 + rc = -EFAULT; 2475 + break; 2476 + } 2477 + 2478 + /* 2479 + * i + 1 now represents the number of pages we actually used in 2480 + * the copy phase above. Bring nr_pages down to that, and free 2481 + * any pages that we didn't use. 2482 + */ 2483 + for ( ; nr_pages > i + 1; nr_pages--) 2484 + put_page(wdata->pages[nr_pages - 1]); 2454 2485 2455 2486 wdata->sync_mode = WB_SYNC_ALL; 2456 2487 wdata->nr_pages = nr_pages;
+1 -1
fs/cifs/inode.c
··· 677 677 int 678 678 cifs_get_inode_info(struct inode **inode, const char *full_path, 679 679 FILE_ALL_INFO *data, struct super_block *sb, int xid, 680 - const __u16 *fid) 680 + const struct cifs_fid *fid) 681 681 { 682 682 bool validinum = false; 683 683 __u16 srchflgs;
+1
fs/cifs/smb1ops.c
··· 1073 1073 #endif /* CIFS_XATTR */ 1074 1074 #ifdef CONFIG_CIFS_ACL 1075 1075 .get_acl = get_cifs_acl, 1076 + .get_acl_by_fid = get_cifs_acl_by_fid, 1076 1077 .set_acl = set_cifs_acl, 1077 1078 #endif /* CIFS_ACL */ 1078 1079 };
+3
fs/cifs/smb2glob.h
··· 57 57 #define SMB2_CMACAES_SIZE (16) 58 58 #define SMB3_SIGNKEY_SIZE (16) 59 59 60 + /* Maximum buffer size value we can send with 1 credit */ 61 + #define SMB2_MAX_BUFFER_SIZE 65536 62 + 60 63 #endif /* _SMB2_GLOB_H */
+4 -10
fs/cifs/smb2ops.c
··· 182 182 /* start with specified wsize, or default */ 183 183 wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE; 184 184 wsize = min_t(unsigned int, wsize, server->max_write); 185 - /* 186 - * limit write size to 2 ** 16, because we don't support multicredit 187 - * requests now. 188 - */ 189 - wsize = min_t(unsigned int, wsize, 2 << 15); 185 + /* set it to the maximum buffer size value we can send with 1 credit */ 186 + wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE); 190 187 191 188 return wsize; 192 189 } ··· 197 200 /* start with specified rsize, or default */ 198 201 rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE; 199 202 rsize = min_t(unsigned int, rsize, server->max_read); 200 - /* 201 - * limit write size to 2 ** 16, because we don't support multicredit 202 - * requests now. 203 - */ 204 - rsize = min_t(unsigned int, rsize, 2 << 15); 203 + /* set it to the maximum buffer size value we can send with 1 credit */ 204 + rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE); 205 205 206 206 return rsize; 207 207 }
+3 -1
fs/cifs/smb2pdu.c
··· 413 413 414 414 /* SMB2 only has an extended negflavor */ 415 415 server->negflavor = CIFS_NEGFLAVOR_EXTENDED; 416 - server->maxBuf = le32_to_cpu(rsp->MaxTransactSize); 416 + /* set it to the maximum buffer size value we can send with 1 credit */ 417 + server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize), 418 + SMB2_MAX_BUFFER_SIZE); 417 419 server->max_read = le32_to_cpu(rsp->MaxReadSize); 418 420 server->max_write = le32_to_cpu(rsp->MaxWriteSize); 419 421 /* BB Do we need to validate the SecurityMode? */
+2
fs/ext4/ext4.h
··· 771 771 if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime)) \ 772 772 (einode)->xtime.tv_sec = \ 773 773 (signed)le32_to_cpu((raw_inode)->xtime); \ 774 + else \ 775 + (einode)->xtime.tv_sec = 0; \ 774 776 if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \ 775 777 ext4_decode_extra_time(&(einode)->xtime, \ 776 778 raw_inode->xtime ## _extra); \
+1
fs/ext4/extents.c
··· 3906 3906 } else 3907 3907 err = ret; 3908 3908 map->m_flags |= EXT4_MAP_MAPPED; 3909 + map->m_pblk = newblock; 3909 3910 if (allocated > map->m_len) 3910 3911 allocated = map->m_len; 3911 3912 map->m_len = allocated;
+2 -1
fs/ext4/ioctl.c
··· 140 140 handle = ext4_journal_start(inode_bl, EXT4_HT_MOVE_EXTENTS, 2); 141 141 if (IS_ERR(handle)) { 142 142 err = -EINVAL; 143 - goto swap_boot_out; 143 + goto journal_err_out; 144 144 } 145 145 146 146 /* Protect extent tree against block allocations via delalloc */ ··· 198 198 199 199 ext4_double_up_write_data_sem(inode, inode_bl); 200 200 201 + journal_err_out: 201 202 ext4_inode_resume_unlocked_dio(inode); 202 203 ext4_inode_resume_unlocked_dio(inode_bl); 203 204
+21 -13
fs/ext4/resize.c
··· 243 243 ext4_group_t group; 244 244 ext4_group_t last_group; 245 245 unsigned overhead; 246 + __u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0; 246 247 247 248 BUG_ON(flex_gd->count == 0 || group_data == NULL); 248 249 ··· 267 266 src_group++; 268 267 for (; src_group <= last_group; src_group++) { 269 268 overhead = ext4_group_overhead_blocks(sb, src_group); 270 - if (overhead != 0) 269 + if (overhead == 0) 271 270 last_blk += group_data[src_group - group].blocks_count; 272 271 else 273 272 break; ··· 281 280 group = ext4_get_group_number(sb, start_blk - 1); 282 281 group -= group_data[0].group; 283 282 group_data[group].free_blocks_count--; 284 - if (flexbg_size > 1) 285 - flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT; 283 + flex_gd->bg_flags[group] &= uninit_mask; 286 284 } 287 285 288 286 /* Allocate inode bitmaps */ ··· 292 292 group = ext4_get_group_number(sb, start_blk - 1); 293 293 group -= group_data[0].group; 294 294 group_data[group].free_blocks_count--; 295 - if (flexbg_size > 1) 296 - flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT; 295 + flex_gd->bg_flags[group] &= uninit_mask; 297 296 } 298 297 299 298 /* Allocate inode tables */ 300 299 for (; it_index < flex_gd->count; it_index++) { 301 - if (start_blk + EXT4_SB(sb)->s_itb_per_group > last_blk) 300 + unsigned int itb = EXT4_SB(sb)->s_itb_per_group; 301 + ext4_fsblk_t next_group_start; 302 + 303 + if (start_blk + itb > last_blk) 302 304 goto next_group; 303 305 group_data[it_index].inode_table = start_blk; 304 - group = ext4_get_group_number(sb, start_blk - 1); 306 + group = ext4_get_group_number(sb, start_blk); 307 + next_group_start = ext4_group_first_block_no(sb, group + 1); 305 308 group -= group_data[0].group; 306 - group_data[group].free_blocks_count -= 307 - EXT4_SB(sb)->s_itb_per_group; 308 - if (flexbg_size > 1) 309 - flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT; 310 309 310 + if (start_blk + itb > next_group_start) { 311 + flex_gd->bg_flags[group + 1] &= uninit_mask; 312 + overhead = start_blk + itb - next_group_start; 313 + group_data[group + 1].free_blocks_count -= overhead; 314 + itb -= overhead; 315 + } 316 + 317 + group_data[group].free_blocks_count -= itb; 318 + flex_gd->bg_flags[group] &= uninit_mask; 311 319 start_blk += EXT4_SB(sb)->s_itb_per_group; 312 320 } 313 321 ··· 409 401 start = ext4_group_first_block_no(sb, group); 410 402 group -= flex_gd->groups[0].group; 411 403 412 - count2 = sb->s_blocksize * 8 - (block - start); 404 + count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start); 413 405 if (count2 > count) 414 406 count2 = count; 415 407 ··· 628 620 if (err) 629 621 goto out; 630 622 count = group_table_count[j]; 631 - start = group_data[i].block_bitmap; 623 + start = (&group_data[i].block_bitmap)[j]; 632 624 block = start; 633 625 } 634 626
+13 -7
fs/ext4/super.c
··· 3695 3695 for (i = 0; i < 4; i++) 3696 3696 sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]); 3697 3697 sbi->s_def_hash_version = es->s_def_hash_version; 3698 - i = le32_to_cpu(es->s_flags); 3699 - if (i & EXT2_FLAGS_UNSIGNED_HASH) 3700 - sbi->s_hash_unsigned = 3; 3701 - else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) { 3698 + if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) { 3699 + i = le32_to_cpu(es->s_flags); 3700 + if (i & EXT2_FLAGS_UNSIGNED_HASH) 3701 + sbi->s_hash_unsigned = 3; 3702 + else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) { 3702 3703 #ifdef __CHAR_UNSIGNED__ 3703 - es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH); 3704 - sbi->s_hash_unsigned = 3; 3704 + if (!(sb->s_flags & MS_RDONLY)) 3705 + es->s_flags |= 3706 + cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH); 3707 + sbi->s_hash_unsigned = 3; 3705 3708 #else 3706 - es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH); 3709 + if (!(sb->s_flags & MS_RDONLY)) 3710 + es->s_flags |= 3711 + cpu_to_le32(EXT2_FLAGS_SIGNED_HASH); 3707 3712 #endif 3713 + } 3708 3714 } 3709 3715 3710 3716 /* Handle clustersize */
+5
fs/fscache/object-list.c
··· 50 50 struct fscache_object *xobj; 51 51 struct rb_node **p = &fscache_object_list.rb_node, *parent = NULL; 52 52 53 + ASSERT(RB_EMPTY_NODE(&obj->objlist_link)); 54 + 53 55 write_lock(&fscache_object_list_lock); 54 56 55 57 while (*p) { ··· 77 75 */ 78 76 void fscache_objlist_remove(struct fscache_object *obj) 79 77 { 78 + if (RB_EMPTY_NODE(&obj->objlist_link)) 79 + return; 80 + 80 81 write_lock(&fscache_object_list_lock); 81 82 82 83 BUG_ON(RB_EMPTY_ROOT(&fscache_object_list));
+3
fs/fscache/object.c
··· 314 314 object->cache = cache; 315 315 object->cookie = cookie; 316 316 object->parent = NULL; 317 + #ifdef CONFIG_FSCACHE_OBJECT_LIST 318 + RB_CLEAR_NODE(&object->objlist_link); 319 + #endif 317 320 318 321 object->oob_event_mask = 0; 319 322 for (t = object->oob_table; t->events; t++)
+4 -2
fs/jbd2/transaction.c
··· 514 514 * similarly constrained call sites 515 515 */ 516 516 ret = start_this_handle(journal, handle, GFP_NOFS); 517 - if (ret < 0) 517 + if (ret < 0) { 518 518 jbd2_journal_free_reserved(handle); 519 + return ret; 520 + } 519 521 handle->h_type = type; 520 522 handle->h_line_no = line_no; 521 - return ret; 523 + return 0; 522 524 } 523 525 EXPORT_SYMBOL(jbd2_journal_start_reserved); 524 526
+2
fs/jfs/acl.c
··· 86 86 rc = posix_acl_equiv_mode(acl, &inode->i_mode); 87 87 if (rc < 0) 88 88 return rc; 89 + inode->i_ctime = CURRENT_TIME; 90 + mark_inode_dirty(inode); 89 91 if (rc == 0) 90 92 acl = NULL; 91 93 break;
+10 -4
fs/nfs/inode.c
··· 164 164 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) { 165 165 nfs_fscache_invalidate(inode); 166 166 nfsi->cache_validity |= NFS_INO_INVALID_ATTR 167 - | NFS_INO_INVALID_LABEL 168 167 | NFS_INO_INVALID_DATA 169 168 | NFS_INO_INVALID_ACCESS 170 169 | NFS_INO_INVALID_ACL 171 170 | NFS_INO_REVAL_PAGECACHE; 172 171 } else 173 172 nfsi->cache_validity |= NFS_INO_INVALID_ATTR 174 - | NFS_INO_INVALID_LABEL 175 173 | NFS_INO_INVALID_ACCESS 176 174 | NFS_INO_INVALID_ACL 177 175 | NFS_INO_REVAL_PAGECACHE; 176 + nfs_zap_label_cache_locked(nfsi); 178 177 } 179 178 180 179 void nfs_zap_caches(struct inode *inode) ··· 265 266 } 266 267 267 268 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 269 + static void nfs_clear_label_invalid(struct inode *inode) 270 + { 271 + spin_lock(&inode->i_lock); 272 + NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_LABEL; 273 + spin_unlock(&inode->i_lock); 274 + } 275 + 268 276 void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr, 269 277 struct nfs4_label *label) 270 278 { ··· 289 283 __func__, 290 284 (char *)label->label, 291 285 label->len, error); 286 + nfs_clear_label_invalid(inode); 292 287 } 293 288 } 294 289 ··· 1655 1648 inode->i_blocks = fattr->du.nfs2.blocks; 1656 1649 1657 1650 /* Update attrtimeo value if we're out of the unstable period */ 1658 - if (invalid & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_LABEL)) { 1651 + if (invalid & NFS_INO_INVALID_ATTR) { 1659 1652 nfs_inc_stats(inode, NFSIOS_ATTRINVALIDATE); 1660 1653 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); 1661 1654 nfsi->attrtimeo_timestamp = now; ··· 1668 1661 } 1669 1662 } 1670 1663 invalid &= ~NFS_INO_INVALID_ATTR; 1671 - invalid &= ~NFS_INO_INVALID_LABEL; 1672 1664 /* Don't invalidate the data if we were to blame */ 1673 1665 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) 1674 1666 || S_ISLNK(inode->i_mode)))
+11 -1
fs/nfs/internal.h
··· 176 176 extern struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *, 177 177 struct nfs_fh *); 178 178 extern int nfs4_update_server(struct nfs_server *server, const char *hostname, 179 - struct sockaddr *sap, size_t salen); 179 + struct sockaddr *sap, size_t salen, 180 + struct net *net); 180 181 extern void nfs_free_server(struct nfs_server *server); 181 182 extern struct nfs_server *nfs_clone_server(struct nfs_server *, 182 183 struct nfs_fh *, ··· 280 279 } 281 280 return; 282 281 } 282 + 283 + static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi) 284 + { 285 + if (nfs_server_capable(&nfsi->vfs_inode, NFS_CAP_SECURITY_LABEL)) 286 + nfsi->cache_validity |= NFS_INO_INVALID_LABEL; 287 + } 283 288 #else 284 289 static inline struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags) { return NULL; } 285 290 static inline void nfs4_label_free(void *label) {} 291 + static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi) 292 + { 293 + } 286 294 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */ 287 295 288 296 /* proc.c */
+1
fs/nfs/nfs3proc.c
··· 18 18 #include <linux/lockd/bind.h> 19 19 #include <linux/nfs_mount.h> 20 20 #include <linux/freezer.h> 21 + #include <linux/xattr.h> 21 22 22 23 #include "iostat.h" 23 24 #include "internal.h"
+4 -3
fs/nfs/nfs4client.c
··· 1135 1135 * @hostname: new end-point's hostname 1136 1136 * @sap: new end-point's socket address 1137 1137 * @salen: size of "sap" 1138 + * @net: net namespace 1138 1139 * 1139 1140 * The nfs_server must be quiescent before this function is invoked. 1140 1141 * Either its session is drained (NFSv4.1+), or its transport is ··· 1144 1143 * Returns zero on success, or a negative errno value. 1145 1144 */ 1146 1145 int nfs4_update_server(struct nfs_server *server, const char *hostname, 1147 - struct sockaddr *sap, size_t salen) 1146 + struct sockaddr *sap, size_t salen, struct net *net) 1148 1147 { 1149 1148 struct nfs_client *clp = server->nfs_client; 1150 1149 struct rpc_clnt *clnt = server->client; 1151 1150 struct xprt_create xargs = { 1152 1151 .ident = clp->cl_proto, 1153 - .net = &init_net, 1152 + .net = net, 1154 1153 .dstaddr = sap, 1155 1154 .addrlen = salen, 1156 1155 .servername = hostname, ··· 1190 1189 error = nfs4_set_client(server, hostname, sap, salen, buf, 1191 1190 clp->cl_rpcclient->cl_auth->au_flavor, 1192 1191 clp->cl_proto, clnt->cl_timeout, 1193 - clp->cl_minorversion, clp->cl_net); 1192 + clp->cl_minorversion, net); 1194 1193 nfs_put_client(clp); 1195 1194 if (error != 0) { 1196 1195 nfs_server_insert_lists(server);
+6 -6
fs/nfs/nfs4namespace.c
··· 121 121 } 122 122 123 123 static size_t nfs_parse_server_name(char *string, size_t len, 124 - struct sockaddr *sa, size_t salen, struct nfs_server *server) 124 + struct sockaddr *sa, size_t salen, struct net *net) 125 125 { 126 - struct net *net = rpc_net_ns(server->client); 127 126 ssize_t ret; 128 127 129 128 ret = rpc_pton(net, string, len, sa, salen); ··· 222 223 const struct nfs4_fs_location *location) 223 224 { 224 225 const size_t addr_bufsize = sizeof(struct sockaddr_storage); 226 + struct net *net = rpc_net_ns(NFS_SB(mountdata->sb)->client); 225 227 struct vfsmount *mnt = ERR_PTR(-ENOENT); 226 228 char *mnt_path; 227 229 unsigned int maxbuflen; ··· 248 248 continue; 249 249 250 250 mountdata->addrlen = nfs_parse_server_name(buf->data, buf->len, 251 - mountdata->addr, addr_bufsize, 252 - NFS_SB(mountdata->sb)); 251 + mountdata->addr, addr_bufsize, net); 253 252 if (mountdata->addrlen == 0) 254 253 continue; 255 254 ··· 418 419 const struct nfs4_fs_location *location) 419 420 { 420 421 const size_t addr_bufsize = sizeof(struct sockaddr_storage); 422 + struct net *net = rpc_net_ns(server->client); 421 423 struct sockaddr *sap; 422 424 unsigned int s; 423 425 size_t salen; ··· 440 440 continue; 441 441 442 442 salen = nfs_parse_server_name(buf->data, buf->len, 443 - sap, addr_bufsize, server); 443 + sap, addr_bufsize, net); 444 444 if (salen == 0) 445 445 continue; 446 446 rpc_set_port(sap, NFS_PORT); ··· 450 450 if (hostname == NULL) 451 451 break; 452 452 453 - error = nfs4_update_server(server, hostname, sap, salen); 453 + error = nfs4_update_server(server, hostname, sap, salen, net); 454 454 kfree(hostname); 455 455 if (error == 0) 456 456 break;
+4 -1
fs/nfs/nfs4state.c
··· 1015 1015 if (ret == -EIO) 1016 1016 /* A lost lock - don't even consider delegations */ 1017 1017 goto out; 1018 - if (nfs4_copy_delegation_stateid(dst, state->inode, fmode)) 1018 + /* returns true if delegation stateid found and copied */ 1019 + if (nfs4_copy_delegation_stateid(dst, state->inode, fmode)) { 1020 + ret = 0; 1019 1021 goto out; 1022 + } 1020 1023 if (ret != -ENOENT) 1021 1024 /* nfs4_copy_delegation_stateid() didn't over-write 1022 1025 * dst, so it still has the lock stateid which we now
+190 -695
fs/reiserfs/do_balan.c
··· 324 324 switch (flag) { 325 325 case M_INSERT: /* insert item into L[0] */ 326 326 327 - if (item_pos == tb->lnum[0] - 1 328 - && tb->lbytes != -1) { 327 + if (item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) { 329 328 /* part of new item falls into L[0] */ 330 329 int new_item_len; 331 330 int version; 332 331 333 - ret_val = 334 - leaf_shift_left(tb, tb->lnum[0] - 1, 335 - -1); 332 + ret_val = leaf_shift_left(tb, tb->lnum[0] - 1, -1); 336 333 337 334 /* Calculate item length to insert to S[0] */ 338 - new_item_len = 339 - ih_item_len(ih) - tb->lbytes; 335 + new_item_len = ih_item_len(ih) - tb->lbytes; 340 336 /* Calculate and check item length to insert to L[0] */ 341 - put_ih_item_len(ih, 342 - ih_item_len(ih) - 343 - new_item_len); 337 + put_ih_item_len(ih, ih_item_len(ih) - new_item_len); 344 338 345 339 RFALSE(ih_item_len(ih) <= 0, 346 340 "PAP-12080: there is nothing to insert into L[0]: ih_item_len=%d", ··· 343 349 /* Insert new item into L[0] */ 344 350 buffer_info_init_left(tb, &bi); 345 351 leaf_insert_into_buf(&bi, 346 - n + item_pos - 347 - ret_val, ih, body, 348 - zeros_num > 349 - ih_item_len(ih) ? 350 - ih_item_len(ih) : 351 - zeros_num); 352 + n + item_pos - ret_val, ih, body, 353 + zeros_num > ih_item_len(ih) ? ih_item_len(ih) : zeros_num); 352 354 353 355 version = ih_version(ih); 354 356 355 357 /* Calculate key component, item length and body to insert into S[0] */ 356 - set_le_ih_k_offset(ih, 357 - le_ih_k_offset(ih) + 358 - (tb-> 359 - lbytes << 360 - (is_indirect_le_ih 361 - (ih) ? tb->tb_sb-> 362 - s_blocksize_bits - 363 - UNFM_P_SHIFT : 364 - 0))); 358 + set_le_ih_k_offset(ih, le_ih_k_offset(ih) + 359 + (tb-> lbytes << (is_indirect_le_ih(ih) ? tb->tb_sb-> s_blocksize_bits - UNFM_P_SHIFT : 0))); 365 360 366 361 put_ih_item_len(ih, new_item_len); 367 362 if (tb->lbytes > zeros_num) { 368 - body += 369 - (tb->lbytes - zeros_num); 363 + body += (tb->lbytes - zeros_num); 370 364 zeros_num = 0; 371 365 } else 372 366 zeros_num -= tb->lbytes; ··· 365 383 } else { 366 384 /* new item in whole falls into L[0] */ 367 385 /* Shift lnum[0]-1 items to L[0] */ 368 - ret_val = 369 - leaf_shift_left(tb, tb->lnum[0] - 1, 370 - tb->lbytes); 386 + ret_val = leaf_shift_left(tb, tb->lnum[0] - 1, tb->lbytes); 371 387 /* Insert new item into L[0] */ 372 388 buffer_info_init_left(tb, &bi); 373 - leaf_insert_into_buf(&bi, 374 - n + item_pos - 375 - ret_val, ih, body, 376 - zeros_num); 389 + leaf_insert_into_buf(&bi, n + item_pos - ret_val, ih, body, zeros_num); 377 390 tb->insert_size[0] = 0; 378 391 zeros_num = 0; 379 392 } ··· 376 399 377 400 case M_PASTE: /* append item in L[0] */ 378 401 379 - if (item_pos == tb->lnum[0] - 1 380 - && tb->lbytes != -1) { 402 + if (item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) { 381 403 /* we must shift the part of the appended item */ 382 - if (is_direntry_le_ih 383 - (B_N_PITEM_HEAD(tbS0, item_pos))) { 404 + if (is_direntry_le_ih(B_N_PITEM_HEAD(tbS0, item_pos))) { 384 405 385 406 RFALSE(zeros_num, 386 407 "PAP-12090: invalid parameter in case of a directory"); 387 408 /* directory item */ 388 409 if (tb->lbytes > pos_in_item) { 389 410 /* new directory entry falls into L[0] */ 390 - struct item_head 391 - *pasted; 392 - int l_pos_in_item = 393 - pos_in_item; 411 + struct item_head *pasted; 412 + int l_pos_in_item = pos_in_item; 394 413 395 414 /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 entries from given directory item */ 396 - ret_val = 397 - leaf_shift_left(tb, 398 - tb-> 399 - lnum 400 - [0], 401 - tb-> 402 - lbytes 403 - - 404 - 1); 405 - if (ret_val 406 - && !item_pos) { 407 - pasted = 408 - B_N_PITEM_HEAD 409 - (tb->L[0], 410 - B_NR_ITEMS 411 - (tb-> 412 - L[0]) - 413 - 1); 414 - l_pos_in_item += 415 - I_ENTRY_COUNT 416 - (pasted) - 417 - (tb-> 418 - lbytes - 419 - 1); 415 + ret_val = leaf_shift_left(tb, tb->lnum[0], tb->lbytes-1); 416 + if (ret_val && !item_pos) { 417 + pasted = B_N_PITEM_HEAD(tb->L[0], B_NR_ITEMS(tb->L[0]) - 1); 418 + l_pos_in_item += I_ENTRY_COUNT(pasted) - (tb->lbytes -1); 420 419 } 421 420 422 421 /* Append given directory entry to directory item */ 423 422 buffer_info_init_left(tb, &bi); 424 - leaf_paste_in_buffer 425 - (&bi, 426 - n + item_pos - 427 - ret_val, 428 - l_pos_in_item, 429 - tb->insert_size[0], 430 - body, zeros_num); 423 + leaf_paste_in_buffer(&bi, n + item_pos - ret_val, l_pos_in_item, tb->insert_size[0], body, zeros_num); 431 424 432 425 /* previous string prepared space for pasting new entry, following string pastes this entry */ 433 426 434 427 /* when we have merge directory item, pos_in_item has been changed too */ 435 428 436 429 /* paste new directory entry. 1 is entry number */ 437 - leaf_paste_entries(&bi, 438 - n + 439 - item_pos 440 - - 441 - ret_val, 442 - l_pos_in_item, 443 - 1, 444 - (struct 445 - reiserfs_de_head 446 - *) 447 - body, 448 - body 449 - + 450 - DEH_SIZE, 451 - tb-> 452 - insert_size 453 - [0] 454 - ); 430 + leaf_paste_entries(&bi, n + item_pos - ret_val, l_pos_in_item, 431 + 1, (struct reiserfs_de_head *) body, 432 + body + DEH_SIZE, tb->insert_size[0]); 455 433 tb->insert_size[0] = 0; 456 434 } else { 457 435 /* new directory item doesn't fall into L[0] */ 458 436 /* Shift lnum[0]-1 items in whole. Shift lbytes directory entries from directory item number lnum[0] */ 459 - leaf_shift_left(tb, 460 - tb-> 461 - lnum[0], 462 - tb-> 463 - lbytes); 437 + leaf_shift_left(tb, tb->lnum[0], tb->lbytes); 464 438 } 465 439 /* Calculate new position to append in item body */ 466 440 pos_in_item -= tb->lbytes; 467 441 } else { 468 442 /* regular object */ 469 - RFALSE(tb->lbytes <= 0, 470 - "PAP-12095: there is nothing to shift to L[0]. lbytes=%d", 471 - tb->lbytes); 472 - RFALSE(pos_in_item != 473 - ih_item_len 474 - (B_N_PITEM_HEAD 475 - (tbS0, item_pos)), 443 + RFALSE(tb->lbytes <= 0, "PAP-12095: there is nothing to shift to L[0]. lbytes=%d", tb->lbytes); 444 + RFALSE(pos_in_item != ih_item_len(B_N_PITEM_HEAD(tbS0, item_pos)), 476 445 "PAP-12100: incorrect position to paste: item_len=%d, pos_in_item=%d", 477 - ih_item_len 478 - (B_N_PITEM_HEAD 479 - (tbS0, item_pos)), 480 - pos_in_item); 446 + ih_item_len(B_N_PITEM_HEAD(tbS0, item_pos)),pos_in_item); 481 447 482 448 if (tb->lbytes >= pos_in_item) { 483 449 /* appended item will be in L[0] in whole */ 484 450 int l_n; 485 451 486 452 /* this bytes number must be appended to the last item of L[h] */ 487 - l_n = 488 - tb->lbytes - 489 - pos_in_item; 453 + l_n = tb->lbytes - pos_in_item; 490 454 491 455 /* Calculate new insert_size[0] */ 492 - tb->insert_size[0] -= 493 - l_n; 456 + tb->insert_size[0] -= l_n; 494 457 495 - RFALSE(tb-> 496 - insert_size[0] <= 497 - 0, 458 + RFALSE(tb->insert_size[0] <= 0, 498 459 "PAP-12105: there is nothing to paste into L[0]. insert_size=%d", 499 - tb-> 500 - insert_size[0]); 501 - ret_val = 502 - leaf_shift_left(tb, 503 - tb-> 504 - lnum 505 - [0], 506 - ih_item_len 507 - (B_N_PITEM_HEAD 508 - (tbS0, 509 - item_pos))); 460 + tb->insert_size[0]); 461 + ret_val = leaf_shift_left(tb, tb->lnum[0], ih_item_len 462 + (B_N_PITEM_HEAD(tbS0, item_pos))); 510 463 /* Append to body of item in L[0] */ 511 464 buffer_info_init_left(tb, &bi); 512 465 leaf_paste_in_buffer 513 - (&bi, 514 - n + item_pos - 515 - ret_val, 516 - ih_item_len 517 - (B_N_PITEM_HEAD 518 - (tb->L[0], 519 - n + item_pos - 520 - ret_val)), l_n, 521 - body, 522 - zeros_num > 523 - l_n ? l_n : 524 - zeros_num); 466 + (&bi, n + item_pos - ret_val, ih_item_len 467 + (B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val)), 468 + l_n, body, 469 + zeros_num > l_n ? l_n : zeros_num); 525 470 /* 0-th item in S0 can be only of DIRECT type when l_n != 0 */ 526 471 { 527 472 int version; 528 - int temp_l = 529 - l_n; 473 + int temp_l = l_n; 530 474 531 - RFALSE 532 - (ih_item_len 533 - (B_N_PITEM_HEAD 534 - (tbS0, 535 - 0)), 475 + RFALSE(ih_item_len(B_N_PITEM_HEAD(tbS0, 0)), 536 476 "PAP-12106: item length must be 0"); 537 - RFALSE 538 - (comp_short_le_keys 539 - (B_N_PKEY 540 - (tbS0, 0), 541 - B_N_PKEY 542 - (tb->L[0], 543 - n + 544 - item_pos 545 - - 546 - ret_val)), 477 + RFALSE(comp_short_le_keys(B_N_PKEY(tbS0, 0), B_N_PKEY 478 + (tb->L[0], n + item_pos - ret_val)), 547 479 "PAP-12107: items must be of the same file"); 548 480 if (is_indirect_le_ih(B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val))) { 549 - temp_l = 550 - l_n 551 - << 552 - (tb-> 553 - tb_sb-> 554 - s_blocksize_bits 555 - - 556 - UNFM_P_SHIFT); 481 + temp_l = l_n << (tb->tb_sb-> s_blocksize_bits - UNFM_P_SHIFT); 557 482 } 558 483 /* update key of first item in S0 */ 559 - version = 560 - ih_version 561 - (B_N_PITEM_HEAD 562 - (tbS0, 0)); 563 - set_le_key_k_offset 564 - (version, 565 - B_N_PKEY 566 - (tbS0, 0), 567 - le_key_k_offset 568 - (version, 569 - B_N_PKEY 570 - (tbS0, 571 - 0)) + 572 - temp_l); 484 + version = ih_version(B_N_PITEM_HEAD(tbS0, 0)); 485 + set_le_key_k_offset(version, B_N_PKEY(tbS0, 0), 486 + le_key_k_offset(version,B_N_PKEY(tbS0, 0)) + temp_l); 573 487 /* update left delimiting key */ 574 - set_le_key_k_offset 575 - (version, 576 - B_N_PDELIM_KEY 577 - (tb-> 578 - CFL[0], 579 - tb-> 580 - lkey[0]), 581 - le_key_k_offset 582 - (version, 583 - B_N_PDELIM_KEY 584 - (tb-> 585 - CFL[0], 586 - tb-> 587 - lkey[0])) 588 - + temp_l); 488 + set_le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0]), 489 + le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0])) + temp_l); 589 490 } 590 491 591 492 /* Calculate new body, position in item and insert_size[0] */ 592 493 if (l_n > zeros_num) { 593 - body += 594 - (l_n - 595 - zeros_num); 494 + body += (l_n - zeros_num); 596 495 zeros_num = 0; 597 496 } else 598 - zeros_num -= 599 - l_n; 497 + zeros_num -= l_n; 600 498 pos_in_item = 0; 601 499 602 - RFALSE 603 - (comp_short_le_keys 604 - (B_N_PKEY(tbS0, 0), 605 - B_N_PKEY(tb->L[0], 606 - B_NR_ITEMS 607 - (tb-> 608 - L[0]) - 609 - 1)) 610 - || 611 - !op_is_left_mergeable 612 - (B_N_PKEY(tbS0, 0), 613 - tbS0->b_size) 614 - || 615 - !op_is_left_mergeable 616 - (B_N_PDELIM_KEY 617 - (tb->CFL[0], 618 - tb->lkey[0]), 619 - tbS0->b_size), 500 + RFALSE(comp_short_le_keys(B_N_PKEY(tbS0, 0), B_N_PKEY(tb->L[0], B_NR_ITEMS(tb->L[0]) - 1)) 501 + || !op_is_left_mergeable(B_N_PKEY(tbS0, 0), tbS0->b_size) 502 + || !op_is_left_mergeable(B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0]), tbS0->b_size), 620 503 "PAP-12120: item must be merge-able with left neighboring item"); 621 504 } else { /* only part of the appended item will be in L[0] */ 622 505 623 506 /* Calculate position in item for append in S[0] */ 624 - pos_in_item -= 625 - tb->lbytes; 507 + pos_in_item -= tb->lbytes; 626 508 627 - RFALSE(pos_in_item <= 0, 628 - "PAP-12125: no place for paste. pos_in_item=%d", 629 - pos_in_item); 509 + RFALSE(pos_in_item <= 0, "PAP-12125: no place for paste. pos_in_item=%d", pos_in_item); 630 510 631 511 /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */ 632 - leaf_shift_left(tb, 633 - tb-> 634 - lnum[0], 635 - tb-> 636 - lbytes); 512 + leaf_shift_left(tb, tb->lnum[0], tb->lbytes); 637 513 } 638 514 } 639 515 } else { /* appended item will be in L[0] in whole */ ··· 495 665 496 666 if (!item_pos && op_is_left_mergeable(B_N_PKEY(tbS0, 0), tbS0->b_size)) { /* if we paste into first item of S[0] and it is left mergable */ 497 667 /* then increment pos_in_item by the size of the last item in L[0] */ 498 - pasted = 499 - B_N_PITEM_HEAD(tb->L[0], 500 - n - 1); 668 + pasted = B_N_PITEM_HEAD(tb->L[0], n - 1); 501 669 if (is_direntry_le_ih(pasted)) 502 - pos_in_item += 503 - ih_entry_count 504 - (pasted); 670 + pos_in_item += ih_entry_count(pasted); 505 671 else 506 - pos_in_item += 507 - ih_item_len(pasted); 672 + pos_in_item += ih_item_len(pasted); 508 673 } 509 674 510 675 /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */ 511 - ret_val = 512 - leaf_shift_left(tb, tb->lnum[0], 513 - tb->lbytes); 676 + ret_val = leaf_shift_left(tb, tb->lnum[0], tb->lbytes); 514 677 /* Append to body of item in L[0] */ 515 678 buffer_info_init_left(tb, &bi); 516 - leaf_paste_in_buffer(&bi, 517 - n + item_pos - 518 - ret_val, 679 + leaf_paste_in_buffer(&bi, n + item_pos - ret_val, 519 680 pos_in_item, 520 681 tb->insert_size[0], 521 682 body, zeros_num); 522 683 523 684 /* if appended item is directory, paste entry */ 524 - pasted = 525 - B_N_PITEM_HEAD(tb->L[0], 526 - n + item_pos - 527 - ret_val); 685 + pasted = B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val); 528 686 if (is_direntry_le_ih(pasted)) 529 - leaf_paste_entries(&bi, 530 - n + 531 - item_pos - 532 - ret_val, 533 - pos_in_item, 534 - 1, 535 - (struct 536 - reiserfs_de_head 537 - *)body, 538 - body + 539 - DEH_SIZE, 540 - tb-> 541 - insert_size 542 - [0] 543 - ); 687 + leaf_paste_entries(&bi, n + item_pos - ret_val, 688 + pos_in_item, 1, 689 + (struct reiserfs_de_head *) body, 690 + body + DEH_SIZE, 691 + tb->insert_size[0]); 544 692 /* if appended item is indirect item, put unformatted node into un list */ 545 693 if (is_indirect_le_ih(pasted)) 546 694 set_ih_free_space(pasted, 0); ··· 530 722 reiserfs_panic(tb->tb_sb, "PAP-12130", 531 723 "lnum > 0: unexpected mode: " 532 724 " %s(%d)", 533 - (flag == 534 - M_DELETE) ? "DELETE" : ((flag == 535 - M_CUT) 536 - ? "CUT" 537 - : 538 - "UNKNOWN"), 539 - flag); 725 + (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag); 540 726 } 541 727 } else { 542 728 /* new item doesn't fall into L[0] */ ··· 550 748 case M_INSERT: /* insert item */ 551 749 if (n - tb->rnum[0] < item_pos) { /* new item or its part falls to R[0] */ 552 750 if (item_pos == n - tb->rnum[0] + 1 && tb->rbytes != -1) { /* part of new item falls into R[0] */ 553 - loff_t old_key_comp, old_len, 554 - r_zeros_number; 751 + loff_t old_key_comp, old_len, r_zeros_number; 555 752 const char *r_body; 556 753 int version; 557 754 loff_t offset; 558 755 559 - leaf_shift_right(tb, tb->rnum[0] - 1, 560 - -1); 756 + leaf_shift_right(tb, tb->rnum[0] - 1, -1); 561 757 562 758 version = ih_version(ih); 563 759 /* Remember key component and item length */ ··· 563 763 old_len = ih_item_len(ih); 564 764 565 765 /* Calculate key component and item length to insert into R[0] */ 566 - offset = 567 - le_ih_k_offset(ih) + 568 - ((old_len - 569 - tb-> 570 - rbytes) << (is_indirect_le_ih(ih) 571 - ? tb->tb_sb-> 572 - s_blocksize_bits - 573 - UNFM_P_SHIFT : 0)); 766 + offset = le_ih_k_offset(ih) + ((old_len - tb->rbytes) << (is_indirect_le_ih(ih) ? tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT : 0)); 574 767 set_le_ih_k_offset(ih, offset); 575 768 put_ih_item_len(ih, tb->rbytes); 576 769 /* Insert part of the item into R[0] */ 577 770 buffer_info_init_right(tb, &bi); 578 771 if ((old_len - tb->rbytes) > zeros_num) { 579 772 r_zeros_number = 0; 580 - r_body = 581 - body + (old_len - 582 - tb->rbytes) - 583 - zeros_num; 773 + r_body = body + (old_len - tb->rbytes) - zeros_num; 584 774 } else { 585 775 r_body = body; 586 - r_zeros_number = 587 - zeros_num - (old_len - 588 - tb->rbytes); 776 + r_zeros_number = zeros_num - (old_len - tb->rbytes); 589 777 zeros_num -= r_zeros_number; 590 778 } 591 779 ··· 586 798 587 799 /* Calculate key component and item length to insert into S[0] */ 588 800 set_le_ih_k_offset(ih, old_key_comp); 589 - put_ih_item_len(ih, 590 - old_len - tb->rbytes); 801 + put_ih_item_len(ih, old_len - tb->rbytes); 591 802 592 803 tb->insert_size[0] -= tb->rbytes; 593 804 594 805 } else { /* whole new item falls into R[0] */ 595 806 596 807 /* Shift rnum[0]-1 items to R[0] */ 597 - ret_val = 598 - leaf_shift_right(tb, 599 - tb->rnum[0] - 1, 600 - tb->rbytes); 808 + ret_val = leaf_shift_right(tb, tb->rnum[0] - 1, tb->rbytes); 601 809 /* Insert new item into R[0] */ 602 810 buffer_info_init_right(tb, &bi); 603 - leaf_insert_into_buf(&bi, 604 - item_pos - n + 605 - tb->rnum[0] - 1, 606 - ih, body, 607 - zeros_num); 811 + leaf_insert_into_buf(&bi, item_pos - n + tb->rnum[0] - 1, 812 + ih, body, zeros_num); 608 813 609 814 if (item_pos - n + tb->rnum[0] - 1 == 0) { 610 815 replace_key(tb, tb->CFR[0], ··· 622 841 623 842 RFALSE(zeros_num, 624 843 "PAP-12145: invalid parameter in case of a directory"); 625 - entry_count = 626 - I_ENTRY_COUNT(B_N_PITEM_HEAD 627 - (tbS0, 628 - item_pos)); 844 + entry_count = I_ENTRY_COUNT(B_N_PITEM_HEAD 845 + (tbS0, item_pos)); 629 846 if (entry_count - tb->rbytes < 630 847 pos_in_item) 631 848 /* new directory entry falls into R[0] */ 632 849 { 633 850 int paste_entry_position; 634 851 635 - RFALSE(tb->rbytes - 1 >= 636 - entry_count 637 - || !tb-> 638 - insert_size[0], 852 + RFALSE(tb->rbytes - 1 >= entry_count || !tb-> insert_size[0], 639 853 "PAP-12150: no enough of entries to shift to R[0]: rbytes=%d, entry_count=%d", 640 - tb->rbytes, 641 - entry_count); 854 + tb->rbytes, entry_count); 642 855 /* Shift rnum[0]-1 items in whole. Shift rbytes-1 directory entries from directory item number rnum[0] */ 643 - leaf_shift_right(tb, 644 - tb-> 645 - rnum 646 - [0], 647 - tb-> 648 - rbytes 649 - - 1); 856 + leaf_shift_right(tb, tb->rnum[0], tb->rbytes - 1); 650 857 /* Paste given directory entry to directory item */ 651 - paste_entry_position = 652 - pos_in_item - 653 - entry_count + 654 - tb->rbytes - 1; 858 + paste_entry_position = pos_in_item - entry_count + tb->rbytes - 1; 655 859 buffer_info_init_right(tb, &bi); 656 - leaf_paste_in_buffer 657 - (&bi, 0, 658 - paste_entry_position, 659 - tb->insert_size[0], 660 - body, zeros_num); 860 + leaf_paste_in_buffer(&bi, 0, paste_entry_position, tb->insert_size[0], body, zeros_num); 661 861 /* paste entry */ 662 - leaf_paste_entries(&bi, 663 - 0, 664 - paste_entry_position, 665 - 1, 666 - (struct 667 - reiserfs_de_head 668 - *) 669 - body, 670 - body 671 - + 672 - DEH_SIZE, 673 - tb-> 674 - insert_size 675 - [0] 676 - ); 862 + leaf_paste_entries(&bi, 0, paste_entry_position, 1, 863 + (struct reiserfs_de_head *) body, 864 + body + DEH_SIZE, tb->insert_size[0]); 677 865 678 - if (paste_entry_position 679 - == 0) { 866 + if (paste_entry_position == 0) { 680 867 /* change delimiting keys */ 681 - replace_key(tb, 682 - tb-> 683 - CFR 684 - [0], 685 - tb-> 686 - rkey 687 - [0], 688 - tb-> 689 - R 690 - [0], 691 - 0); 868 + replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0],0); 692 869 } 693 870 694 871 tb->insert_size[0] = 0; 695 872 pos_in_item++; 696 873 } else { /* new directory entry doesn't fall into R[0] */ 697 874 698 - leaf_shift_right(tb, 699 - tb-> 700 - rnum 701 - [0], 702 - tb-> 703 - rbytes); 875 + leaf_shift_right(tb, tb->rnum[0], tb->rbytes); 704 876 } 705 877 } else { /* regular object */ 706 878 707 - int n_shift, n_rem, 708 - r_zeros_number; 879 + int n_shift, n_rem, r_zeros_number; 709 880 const char *r_body; 710 881 711 882 /* Calculate number of bytes which must be shifted from appended item */ 712 - if ((n_shift = 713 - tb->rbytes - 714 - tb->insert_size[0]) < 0) 883 + if ((n_shift = tb->rbytes - tb->insert_size[0]) < 0) 715 884 n_shift = 0; 716 885 717 - RFALSE(pos_in_item != 718 - ih_item_len 719 - (B_N_PITEM_HEAD 720 - (tbS0, item_pos)), 886 + RFALSE(pos_in_item != ih_item_len 887 + (B_N_PITEM_HEAD(tbS0, item_pos)), 721 888 "PAP-12155: invalid position to paste. ih_item_len=%d, pos_in_item=%d", 722 - pos_in_item, 723 - ih_item_len 724 - (B_N_PITEM_HEAD 725 - (tbS0, item_pos))); 889 + pos_in_item, ih_item_len 890 + (B_N_PITEM_HEAD(tbS0, item_pos))); 726 891 727 - leaf_shift_right(tb, 728 - tb->rnum[0], 729 - n_shift); 892 + leaf_shift_right(tb, tb->rnum[0], n_shift); 730 893 /* Calculate number of bytes which must remain in body after appending to R[0] */ 731 - if ((n_rem = 732 - tb->insert_size[0] - 733 - tb->rbytes) < 0) 894 + if ((n_rem = tb->insert_size[0] - tb->rbytes) < 0) 734 895 n_rem = 0; 735 896 736 897 { 737 898 int version; 738 - unsigned long temp_rem = 739 - n_rem; 899 + unsigned long temp_rem = n_rem; 740 900 741 - version = 742 - ih_version 743 - (B_N_PITEM_HEAD 744 - (tb->R[0], 0)); 745 - if (is_indirect_le_key 746 - (version, 747 - B_N_PKEY(tb->R[0], 748 - 0))) { 749 - temp_rem = 750 - n_rem << 751 - (tb->tb_sb-> 752 - s_blocksize_bits 753 - - 754 - UNFM_P_SHIFT); 901 + version = ih_version(B_N_PITEM_HEAD(tb->R[0], 0)); 902 + if (is_indirect_le_key(version, B_N_PKEY(tb->R[0], 0))) { 903 + temp_rem = n_rem << (tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT); 755 904 } 756 - set_le_key_k_offset 757 - (version, 758 - B_N_PKEY(tb->R[0], 759 - 0), 760 - le_key_k_offset 761 - (version, 762 - B_N_PKEY(tb->R[0], 763 - 0)) + 764 - temp_rem); 765 - set_le_key_k_offset 766 - (version, 767 - B_N_PDELIM_KEY(tb-> 768 - CFR 769 - [0], 770 - tb-> 771 - rkey 772 - [0]), 773 - le_key_k_offset 774 - (version, 775 - B_N_PDELIM_KEY 776 - (tb->CFR[0], 777 - tb->rkey[0])) + 778 - temp_rem); 905 + set_le_key_k_offset(version, B_N_PKEY(tb->R[0], 0), 906 + le_key_k_offset(version, B_N_PKEY(tb->R[0], 0)) + temp_rem); 907 + set_le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFR[0], tb->rkey[0]), 908 + le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFR[0], tb->rkey[0])) + temp_rem); 779 909 } 780 910 /* k_offset (B_N_PKEY(tb->R[0],0)) += n_rem; 781 911 k_offset (B_N_PDELIM_KEY(tb->CFR[0],tb->rkey[0])) += n_rem;*/ 782 - do_balance_mark_internal_dirty 783 - (tb, tb->CFR[0], 0); 912 + do_balance_mark_internal_dirty(tb, tb->CFR[0], 0); 784 913 785 914 /* Append part of body into R[0] */ 786 915 buffer_info_init_right(tb, &bi); 787 916 if (n_rem > zeros_num) { 788 917 r_zeros_number = 0; 789 - r_body = 790 - body + n_rem - 791 - zeros_num; 918 + r_body = body + n_rem - zeros_num; 792 919 } else { 793 920 r_body = body; 794 - r_zeros_number = 795 - zeros_num - n_rem; 796 - zeros_num -= 797 - r_zeros_number; 921 + r_zeros_number = zeros_num - n_rem; 922 + zeros_num -= r_zeros_number; 798 923 } 799 924 800 - leaf_paste_in_buffer(&bi, 0, 801 - n_shift, 802 - tb-> 803 - insert_size 804 - [0] - 805 - n_rem, 806 - r_body, 807 - r_zeros_number); 925 + leaf_paste_in_buffer(&bi, 0, n_shift, 926 + tb->insert_size[0] - n_rem, 927 + r_body, r_zeros_number); 808 928 809 - if (is_indirect_le_ih 810 - (B_N_PITEM_HEAD 811 - (tb->R[0], 0))) { 929 + if (is_indirect_le_ih(B_N_PITEM_HEAD(tb->R[0], 0))) { 812 930 #if 0 813 931 RFALSE(n_rem, 814 932 "PAP-12160: paste more than one unformatted node pointer"); 815 933 #endif 816 - set_ih_free_space 817 - (B_N_PITEM_HEAD 818 - (tb->R[0], 0), 0); 934 + set_ih_free_space(B_N_PITEM_HEAD(tb->R[0], 0), 0); 819 935 } 820 936 tb->insert_size[0] = n_rem; 821 937 if (!n_rem) ··· 722 1044 723 1045 struct item_head *pasted; 724 1046 725 - ret_val = 726 - leaf_shift_right(tb, tb->rnum[0], 727 - tb->rbytes); 1047 + ret_val = leaf_shift_right(tb, tb->rnum[0], tb->rbytes); 728 1048 /* append item in R[0] */ 729 1049 if (pos_in_item >= 0) { 730 1050 buffer_info_init_right(tb, &bi); 731 - leaf_paste_in_buffer(&bi, 732 - item_pos - 733 - n + 734 - tb-> 735 - rnum[0], 736 - pos_in_item, 737 - tb-> 738 - insert_size 739 - [0], body, 740 - zeros_num); 1051 + leaf_paste_in_buffer(&bi, item_pos - n + tb->rnum[0], pos_in_item, 1052 + tb->insert_size[0], body, zeros_num); 741 1053 } 742 1054 743 1055 /* paste new entry, if item is directory item */ 744 - pasted = 745 - B_N_PITEM_HEAD(tb->R[0], 746 - item_pos - n + 747 - tb->rnum[0]); 748 - if (is_direntry_le_ih(pasted) 749 - && pos_in_item >= 0) { 750 - leaf_paste_entries(&bi, 751 - item_pos - 752 - n + 753 - tb->rnum[0], 754 - pos_in_item, 755 - 1, 756 - (struct 757 - reiserfs_de_head 758 - *)body, 759 - body + 760 - DEH_SIZE, 761 - tb-> 762 - insert_size 763 - [0] 764 - ); 1056 + pasted = B_N_PITEM_HEAD(tb->R[0], item_pos - n + tb->rnum[0]); 1057 + if (is_direntry_le_ih(pasted) && pos_in_item >= 0) { 1058 + leaf_paste_entries(&bi, item_pos - n + tb->rnum[0], 1059 + pos_in_item, 1, 1060 + (struct reiserfs_de_head *) body, 1061 + body + DEH_SIZE, tb->insert_size[0]); 765 1062 if (!pos_in_item) { 766 1063 767 - RFALSE(item_pos - n + 768 - tb->rnum[0], 1064 + RFALSE(item_pos - n + tb->rnum[0], 769 1065 "PAP-12165: directory item must be first item of node when pasting is in 0th position"); 770 1066 771 1067 /* update delimiting keys */ 772 - replace_key(tb, 773 - tb->CFR[0], 774 - tb->rkey[0], 775 - tb->R[0], 776 - 0); 1068 + replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0); 777 1069 } 778 1070 } 779 1071 ··· 759 1111 default: /* cases d and t */ 760 1112 reiserfs_panic(tb->tb_sb, "PAP-12175", 761 1113 "rnum > 0: unexpected mode: %s(%d)", 762 - (flag == 763 - M_DELETE) ? "DELETE" : ((flag == 764 - M_CUT) ? "CUT" 765 - : "UNKNOWN"), 766 - flag); 1114 + (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag); 767 1115 } 768 1116 769 1117 } 770 1118 771 1119 /* tb->rnum[0] > 0 */ 772 1120 RFALSE(tb->blknum[0] > 3, 773 - "PAP-12180: blknum can not be %d. It must be <= 3", 774 - tb->blknum[0]); 1121 + "PAP-12180: blknum can not be %d. It must be <= 3", tb->blknum[0]); 775 1122 RFALSE(tb->blknum[0] < 0, 776 - "PAP-12185: blknum can not be %d. It must be >= 0", 777 - tb->blknum[0]); 1123 + "PAP-12185: blknum can not be %d. It must be >= 0", tb->blknum[0]); 778 1124 779 1125 /* if while adding to a node we discover that it is possible to split 780 1126 it in two, and merge the left part into the left neighbor and the ··· 819 1177 820 1178 if (n - snum[i] < item_pos) { /* new item or it's part falls to first new node S_new[i] */ 821 1179 if (item_pos == n - snum[i] + 1 && sbytes[i] != -1) { /* part of new item falls into S_new[i] */ 822 - int old_key_comp, old_len, 823 - r_zeros_number; 1180 + int old_key_comp, old_len, r_zeros_number; 824 1181 const char *r_body; 825 1182 int version; 826 1183 ··· 833 1192 old_len = ih_item_len(ih); 834 1193 835 1194 /* Calculate key component and item length to insert into S_new[i] */ 836 - set_le_ih_k_offset(ih, 837 - le_ih_k_offset(ih) + 838 - ((old_len - 839 - sbytes[i]) << 840 - (is_indirect_le_ih 841 - (ih) ? tb->tb_sb-> 842 - s_blocksize_bits - 843 - UNFM_P_SHIFT : 844 - 0))); 1195 + set_le_ih_k_offset(ih, le_ih_k_offset(ih) + 1196 + ((old_len - sbytes[i]) << (is_indirect_le_ih(ih) ? tb->tb_sb-> s_blocksize_bits - UNFM_P_SHIFT : 0))); 845 1197 846 1198 put_ih_item_len(ih, sbytes[i]); 847 1199 ··· 843 1209 844 1210 if ((old_len - sbytes[i]) > zeros_num) { 845 1211 r_zeros_number = 0; 846 - r_body = 847 - body + (old_len - 848 - sbytes[i]) - 849 - zeros_num; 1212 + r_body = body + (old_len - sbytes[i]) - zeros_num; 850 1213 } else { 851 1214 r_body = body; 852 - r_zeros_number = 853 - zeros_num - (old_len - 854 - sbytes[i]); 1215 + r_zeros_number = zeros_num - (old_len - sbytes[i]); 855 1216 zeros_num -= r_zeros_number; 856 1217 } 857 1218 858 - leaf_insert_into_buf(&bi, 0, ih, r_body, 859 - r_zeros_number); 1219 + leaf_insert_into_buf(&bi, 0, ih, r_body, r_zeros_number); 860 1220 861 1221 /* Calculate key component and item length to insert into S[i] */ 862 1222 set_le_ih_k_offset(ih, old_key_comp); 863 - put_ih_item_len(ih, 864 - old_len - sbytes[i]); 1223 + put_ih_item_len(ih, old_len - sbytes[i]); 865 1224 tb->insert_size[0] -= sbytes[i]; 866 1225 } else { /* whole new item falls into S_new[i] */ 867 1226 868 1227 /* Shift snum[0] - 1 items to S_new[i] (sbytes[i] of split item) */ 869 1228 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, 870 - snum[i] - 1, sbytes[i], 871 - S_new[i]); 1229 + snum[i] - 1, sbytes[i], S_new[i]); 872 1230 873 1231 /* Insert new item into S_new[i] */ 874 1232 buffer_info_init_bh(tb, &bi, S_new[i]); 875 - leaf_insert_into_buf(&bi, 876 - item_pos - n + 877 - snum[i] - 1, ih, 878 - body, zeros_num); 1233 + leaf_insert_into_buf(&bi, item_pos - n + snum[i] - 1, 1234 + ih, body, zeros_num); 879 1235 880 1236 zeros_num = tb->insert_size[0] = 0; 881 1237 } ··· 892 1268 893 1269 int entry_count; 894 1270 895 - entry_count = 896 - ih_entry_count(aux_ih); 1271 + entry_count = ih_entry_count(aux_ih); 897 1272 898 - if (entry_count - sbytes[i] < 899 - pos_in_item 900 - && pos_in_item <= 901 - entry_count) { 1273 + if (entry_count - sbytes[i] < pos_in_item && pos_in_item <= entry_count) { 902 1274 /* new directory entry falls into S_new[i] */ 903 1275 904 - RFALSE(!tb-> 905 - insert_size[0], 906 - "PAP-12215: insert_size is already 0"); 907 - RFALSE(sbytes[i] - 1 >= 908 - entry_count, 1276 + RFALSE(!tb->insert_size[0], "PAP-12215: insert_size is already 0"); 1277 + RFALSE(sbytes[i] - 1 >= entry_count, 909 1278 "PAP-12220: there are no so much entries (%d), only %d", 910 - sbytes[i] - 1, 911 - entry_count); 1279 + sbytes[i] - 1, entry_count); 912 1280 913 1281 /* Shift snum[i]-1 items in whole. Shift sbytes[i] directory entries from directory item number snum[i] */ 914 - leaf_move_items 915 - (LEAF_FROM_S_TO_SNEW, 916 - tb, snum[i], 917 - sbytes[i] - 1, 918 - S_new[i]); 1282 + leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, snum[i], sbytes[i] - 1, S_new[i]); 919 1283 /* Paste given directory entry to directory item */ 920 1284 buffer_info_init_bh(tb, &bi, S_new[i]); 921 - leaf_paste_in_buffer 922 - (&bi, 0, 923 - pos_in_item - 924 - entry_count + 925 - sbytes[i] - 1, 926 - tb->insert_size[0], 927 - body, zeros_num); 1285 + leaf_paste_in_buffer(&bi, 0, pos_in_item - entry_count + sbytes[i] - 1, 1286 + tb->insert_size[0], body, zeros_num); 928 1287 /* paste new directory entry */ 929 - leaf_paste_entries(&bi, 930 - 0, 931 - pos_in_item 932 - - 933 - entry_count 934 - + 935 - sbytes 936 - [i] - 937 - 1, 1, 938 - (struct 939 - reiserfs_de_head 940 - *) 941 - body, 942 - body 943 - + 944 - DEH_SIZE, 945 - tb-> 946 - insert_size 947 - [0] 948 - ); 1288 + leaf_paste_entries(&bi, 0, pos_in_item - entry_count + sbytes[i] - 1, 1, 1289 + (struct reiserfs_de_head *) body, 1290 + body + DEH_SIZE, tb->insert_size[0]); 949 1291 tb->insert_size[0] = 0; 950 1292 pos_in_item++; 951 1293 } else { /* new directory entry doesn't fall into S_new[i] */ 952 - leaf_move_items 953 - (LEAF_FROM_S_TO_SNEW, 954 - tb, snum[i], 955 - sbytes[i], 956 - S_new[i]); 1294 + leaf_move_items(LEAF_FROM_S_TO_SNEW,tb, snum[i], sbytes[i], S_new[i]); 957 1295 } 958 1296 } else { /* regular object */ 959 1297 960 - int n_shift, n_rem, 961 - r_zeros_number; 1298 + int n_shift, n_rem, r_zeros_number; 962 1299 const char *r_body; 963 1300 964 - RFALSE(pos_in_item != 965 - ih_item_len 966 - (B_N_PITEM_HEAD 967 - (tbS0, item_pos)) 968 - || tb->insert_size[0] <= 969 - 0, 1301 + RFALSE(pos_in_item != ih_item_len(B_N_PITEM_HEAD(tbS0, item_pos)) || tb->insert_size[0] <= 0, 970 1302 "PAP-12225: item too short or insert_size <= 0"); 971 1303 972 1304 /* Calculate number of bytes which must be shifted from appended item */ 973 - n_shift = 974 - sbytes[i] - 975 - tb->insert_size[0]; 1305 + n_shift = sbytes[i] - tb->insert_size[0]; 976 1306 if (n_shift < 0) 977 1307 n_shift = 0; 978 - leaf_move_items 979 - (LEAF_FROM_S_TO_SNEW, tb, 980 - snum[i], n_shift, 981 - S_new[i]); 1308 + leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, snum[i], n_shift, S_new[i]); 982 1309 983 1310 /* Calculate number of bytes which must remain in body after append to S_new[i] */ 984 - n_rem = 985 - tb->insert_size[0] - 986 - sbytes[i]; 1311 + n_rem = tb->insert_size[0] - sbytes[i]; 987 1312 if (n_rem < 0) 988 1313 n_rem = 0; 989 1314 /* Append part of body into S_new[0] */ 990 1315 buffer_info_init_bh(tb, &bi, S_new[i]); 991 1316 if (n_rem > zeros_num) { 992 1317 r_zeros_number = 0; 993 - r_body = 994 - body + n_rem - 995 - zeros_num; 1318 + r_body = body + n_rem - zeros_num; 996 1319 } else { 997 1320 r_body = body; 998 - r_zeros_number = 999 - zeros_num - n_rem; 1000 - zeros_num -= 1001 - r_zeros_number; 1321 + r_zeros_number = zeros_num - n_rem; 1322 + zeros_num -= r_zeros_number; 1002 1323 } 1003 1324 1004 - leaf_paste_in_buffer(&bi, 0, 1005 - n_shift, 1006 - tb-> 1007 - insert_size 1008 - [0] - 1009 - n_rem, 1010 - r_body, 1011 - r_zeros_number); 1325 + leaf_paste_in_buffer(&bi, 0, n_shift, 1326 + tb->insert_size[0] - n_rem, 1327 + r_body, r_zeros_number); 1012 1328 { 1013 1329 struct item_head *tmp; 1014 1330 1015 - tmp = 1016 - B_N_PITEM_HEAD(S_new 1017 - [i], 1018 - 0); 1331 + tmp = B_N_PITEM_HEAD(S_new[i], 0); 1019 1332 if (is_indirect_le_ih 1020 1333 (tmp)) { 1021 - set_ih_free_space 1022 - (tmp, 0); 1023 - set_le_ih_k_offset 1024 - (tmp, 1025 - le_ih_k_offset 1026 - (tmp) + 1027 - (n_rem << 1028 - (tb-> 1029 - tb_sb-> 1030 - s_blocksize_bits 1031 - - 1032 - UNFM_P_SHIFT))); 1334 + set_ih_free_space(tmp, 0); 1335 + set_le_ih_k_offset(tmp, le_ih_k_offset(tmp) + (n_rem << (tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT))); 1033 1336 } else { 1034 - set_le_ih_k_offset 1035 - (tmp, 1036 - le_ih_k_offset 1037 - (tmp) + 1038 - n_rem); 1337 + set_le_ih_k_offset(tmp, le_ih_k_offset(tmp) + n_rem); 1039 1338 } 1040 1339 } 1041 1340 ··· 973 1426 struct item_head *pasted; 974 1427 975 1428 #ifdef CONFIG_REISERFS_CHECK 976 - struct item_head *ih_check = 977 - B_N_PITEM_HEAD(tbS0, item_pos); 1429 + struct item_head *ih_check = B_N_PITEM_HEAD(tbS0, item_pos); 978 1430 979 1431 if (!is_direntry_le_ih(ih_check) 980 1432 && (pos_in_item != ih_item_len(ih_check) ··· 985 1439 "to ih_item_len"); 986 1440 #endif /* CONFIG_REISERFS_CHECK */ 987 1441 988 - leaf_mi = 989 - leaf_move_items(LEAF_FROM_S_TO_SNEW, 1442 + leaf_mi = leaf_move_items(LEAF_FROM_S_TO_SNEW, 990 1443 tb, snum[i], 991 1444 sbytes[i], 992 1445 S_new[i]); ··· 997 1452 /* paste into item */ 998 1453 buffer_info_init_bh(tb, &bi, S_new[i]); 999 1454 leaf_paste_in_buffer(&bi, 1000 - item_pos - n + 1001 - snum[i], 1455 + item_pos - n + snum[i], 1002 1456 pos_in_item, 1003 1457 tb->insert_size[0], 1004 1458 body, zeros_num); 1005 1459 1006 - pasted = 1007 - B_N_PITEM_HEAD(S_new[i], 1008 - item_pos - n + 1009 - snum[i]); 1460 + pasted = B_N_PITEM_HEAD(S_new[i], item_pos - n + snum[i]); 1010 1461 if (is_direntry_le_ih(pasted)) { 1011 1462 leaf_paste_entries(&bi, 1012 - item_pos - 1013 - n + snum[i], 1014 - pos_in_item, 1015 - 1, 1016 - (struct 1017 - reiserfs_de_head 1018 - *)body, 1019 - body + 1020 - DEH_SIZE, 1021 - tb-> 1022 - insert_size 1023 - [0] 1463 + item_pos - n + snum[i], 1464 + pos_in_item, 1, 1465 + (struct reiserfs_de_head *)body, 1466 + body + DEH_SIZE, 1467 + tb->insert_size[0] 1024 1468 ); 1025 1469 } 1026 1470 ··· 1029 1495 default: /* cases d and t */ 1030 1496 reiserfs_panic(tb->tb_sb, "PAP-12245", 1031 1497 "blknum > 2: unexpected mode: %s(%d)", 1032 - (flag == 1033 - M_DELETE) ? "DELETE" : ((flag == 1034 - M_CUT) ? "CUT" 1035 - : "UNKNOWN"), 1036 - flag); 1498 + (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag); 1037 1499 } 1038 1500 1039 1501 memcpy(insert_key + i, B_N_PKEY(S_new[i], 0), KEY_SIZE); ··· 1054 1524 /* If we insert the first key change the delimiting key */ 1055 1525 if (item_pos == 0) { 1056 1526 if (tb->CFL[0]) /* can be 0 in reiserfsck */ 1057 - replace_key(tb, tb->CFL[0], tb->lkey[0], 1058 - tbS0, 0); 1059 - 1527 + replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0); 1060 1528 } 1061 1529 break; 1062 1530 ··· 1064 1536 pasted = B_N_PITEM_HEAD(tbS0, item_pos); 1065 1537 /* when directory, may be new entry already pasted */ 1066 1538 if (is_direntry_le_ih(pasted)) { 1067 - if (pos_in_item >= 0 && 1068 - pos_in_item <= 1069 - ih_entry_count(pasted)) { 1539 + if (pos_in_item >= 0 && pos_in_item <= ih_entry_count(pasted)) { 1070 1540 1071 1541 RFALSE(!tb->insert_size[0], 1072 1542 "PAP-12260: insert_size is 0 already"); 1073 1543 1074 1544 /* prepare space */ 1075 1545 buffer_info_init_tbS0(tb, &bi); 1076 - leaf_paste_in_buffer(&bi, 1077 - item_pos, 1078 - pos_in_item, 1079 - tb-> 1080 - insert_size 1081 - [0], body, 1546 + leaf_paste_in_buffer(&bi, item_pos, pos_in_item, 1547 + tb->insert_size[0], body, 1082 1548 zeros_num); 1083 1549 1084 1550 /* paste entry */ 1085 - leaf_paste_entries(&bi, 1086 - item_pos, 1087 - pos_in_item, 1088 - 1, 1089 - (struct 1090 - reiserfs_de_head 1091 - *)body, 1092 - body + 1093 - DEH_SIZE, 1094 - tb-> 1095 - insert_size 1096 - [0] 1097 - ); 1551 + leaf_paste_entries(&bi, item_pos, pos_in_item, 1, 1552 + (struct reiserfs_de_head *)body, 1553 + body + DEH_SIZE, 1554 + tb->insert_size[0]); 1098 1555 if (!item_pos && !pos_in_item) { 1099 - RFALSE(!tb->CFL[0] 1100 - || !tb->L[0], 1556 + RFALSE(!tb->CFL[0] || !tb->L[0], 1101 1557 "PAP-12270: CFL[0]/L[0] must be specified"); 1102 - if (tb->CFL[0]) { 1103 - replace_key(tb, 1104 - tb-> 1105 - CFL 1106 - [0], 1107 - tb-> 1108 - lkey 1109 - [0], 1110 - tbS0, 1111 - 0); 1112 - 1113 - } 1558 + if (tb->CFL[0]) 1559 + replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0); 1114 1560 } 1115 1561 tb->insert_size[0] = 0; 1116 1562 } ··· 1095 1593 "PAP-12275: insert size must not be %d", 1096 1594 tb->insert_size[0]); 1097 1595 buffer_info_init_tbS0(tb, &bi); 1098 - leaf_paste_in_buffer(&bi, 1099 - item_pos, 1100 - pos_in_item, 1101 - tb-> 1102 - insert_size 1103 - [0], body, 1104 - zeros_num); 1596 + leaf_paste_in_buffer(&bi, item_pos, pos_in_item, 1597 + tb->insert_size[0], body, zeros_num); 1105 1598 1106 1599 if (is_indirect_le_ih(pasted)) { 1107 1600 #if 0 ··· 1108 1611 tb-> 1109 1612 insert_size[0]); 1110 1613 #endif 1111 - set_ih_free_space 1112 - (pasted, 0); 1614 + set_ih_free_space(pasted, 0); 1113 1615 } 1114 1616 tb->insert_size[0] = 0; 1115 1617 } ··· 1116 1620 else { 1117 1621 if (tb->insert_size[0]) { 1118 1622 print_cur_tb("12285"); 1119 - reiserfs_panic(tb-> 1120 - tb_sb, 1623 + reiserfs_panic(tb->tb_sb, 1121 1624 "PAP-12285", 1122 1625 "insert_size " 1123 1626 "must be 0 "
+8 -8
fs/xfs/xfs_iops.c
··· 705 705 { 706 706 struct xfs_mount *mp = ip->i_mount; 707 707 struct inode *inode = VFS_I(ip); 708 - int mask = iattr->ia_valid; 709 708 xfs_off_t oldsize, newsize; 710 709 struct xfs_trans *tp; 711 710 int error; ··· 725 726 726 727 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 727 728 ASSERT(S_ISREG(ip->i_d.di_mode)); 728 - ASSERT((mask & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET| 729 - ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0); 729 + ASSERT((iattr->ia_valid & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET| 730 + ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0); 730 731 731 732 oldsize = inode->i_size; 732 733 newsize = iattr->ia_size; ··· 735 736 * Short circuit the truncate case for zero length files. 736 737 */ 737 738 if (newsize == 0 && oldsize == 0 && ip->i_d.di_nextents == 0) { 738 - if (!(mask & (ATTR_CTIME|ATTR_MTIME))) 739 + if (!(iattr->ia_valid & (ATTR_CTIME|ATTR_MTIME))) 739 740 return 0; 740 741 741 742 /* ··· 823 824 * these flags set. For all other operations the VFS set these flags 824 825 * explicitly if it wants a timestamp update. 825 826 */ 826 - if (newsize != oldsize && (!(mask & (ATTR_CTIME | ATTR_MTIME)))) { 827 + if (newsize != oldsize && 828 + !(iattr->ia_valid & (ATTR_CTIME | ATTR_MTIME))) { 827 829 iattr->ia_ctime = iattr->ia_mtime = 828 830 current_fs_time(inode->i_sb); 829 - mask |= ATTR_CTIME | ATTR_MTIME; 831 + iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME; 830 832 } 831 833 832 834 /* ··· 863 863 xfs_inode_clear_eofblocks_tag(ip); 864 864 } 865 865 866 - if (mask & ATTR_MODE) 866 + if (iattr->ia_valid & ATTR_MODE) 867 867 xfs_setattr_mode(ip, iattr); 868 - if (mask & (ATTR_ATIME|ATTR_CTIME|ATTR_MTIME)) 868 + if (iattr->ia_valid & (ATTR_ATIME|ATTR_CTIME|ATTR_MTIME)) 869 869 xfs_setattr_time(ip, iattr); 870 870 871 871 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+15 -4
fs/xfs/xfs_log_cil.c
··· 205 205 /* 206 206 * We 64-bit align the length of each iovec so that the start 207 207 * of the next one is naturally aligned. We'll need to 208 - * account for that slack space here. 208 + * account for that slack space here. Then round nbytes up 209 + * to 64-bit alignment so that the initial buffer alignment is 210 + * easy to calculate and verify. 209 211 */ 210 212 nbytes += niovecs * sizeof(uint64_t); 213 + nbytes = round_up(nbytes, sizeof(uint64_t)); 211 214 212 215 /* grab the old item if it exists for reservation accounting */ 213 216 old_lv = lip->li_lv; 214 217 215 - /* calc buffer size */ 216 - buf_size = sizeof(struct xfs_log_vec) + nbytes + 217 - niovecs * sizeof(struct xfs_log_iovec); 218 + /* 219 + * The data buffer needs to start 64-bit aligned, so round up 220 + * that space to ensure we can align it appropriately and not 221 + * overrun the buffer. 222 + */ 223 + buf_size = nbytes + 224 + round_up((sizeof(struct xfs_log_vec) + 225 + niovecs * sizeof(struct xfs_log_iovec)), 226 + sizeof(uint64_t)); 218 227 219 228 /* compare to existing item size */ 220 229 if (lip->li_lv && buf_size <= lip->li_lv->lv_size) { ··· 260 251 /* The allocated data region lies beyond the iovec region */ 261 252 lv->lv_buf_len = 0; 262 253 lv->lv_buf = (char *)lv + buf_size - nbytes; 254 + ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t))); 255 + 263 256 lip->li_ops->iop_format(lip, lv); 264 257 insert: 265 258 ASSERT(lv->lv_buf_len <= nbytes);
+16 -8
fs/xfs/xfs_mount.c
··· 282 282 struct xfs_sb *sbp = &mp->m_sb; 283 283 int error; 284 284 int loud = !(flags & XFS_MFSI_QUIET); 285 + const struct xfs_buf_ops *buf_ops; 285 286 286 287 ASSERT(mp->m_sb_bp == NULL); 287 288 ASSERT(mp->m_ddev_targp != NULL); 289 + 290 + /* 291 + * For the initial read, we must guess at the sector 292 + * size based on the block device. It's enough to 293 + * get the sb_sectsize out of the superblock and 294 + * then reread with the proper length. 295 + * We don't verify it yet, because it may not be complete. 296 + */ 297 + sector_size = xfs_getsize_buftarg(mp->m_ddev_targp); 298 + buf_ops = NULL; 288 299 289 300 /* 290 301 * Allocate a (locked) buffer to hold the superblock. 291 302 * This will be kept around at all times to optimize 292 303 * access to the superblock. 293 304 */ 294 - sector_size = xfs_getsize_buftarg(mp->m_ddev_targp); 295 - 296 305 reread: 297 306 bp = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR, 298 - BTOBB(sector_size), 0, 299 - loud ? &xfs_sb_buf_ops 300 - : &xfs_sb_quiet_buf_ops); 307 + BTOBB(sector_size), 0, buf_ops); 301 308 if (!bp) { 302 309 if (loud) 303 310 xfs_warn(mp, "SB buffer read failed"); ··· 335 328 } 336 329 337 330 /* 338 - * If device sector size is smaller than the superblock size, 339 - * re-read the superblock so the buffer is correctly sized. 331 + * Re-read the superblock so the buffer is correctly sized, 332 + * and properly verified. 340 333 */ 341 - if (sector_size < sbp->sb_sectsize) { 334 + if (buf_ops == NULL) { 342 335 xfs_buf_relse(bp); 343 336 sector_size = sbp->sb_sectsize; 337 + buf_ops = loud ? &xfs_sb_buf_ops : &xfs_sb_quiet_buf_ops; 344 338 goto reread; 345 339 } 346 340
+4 -6
fs/xfs/xfs_sb.c
··· 295 295 sbp->sb_dblocks == 0 || 296 296 sbp->sb_dblocks > XFS_MAX_DBLOCKS(sbp) || 297 297 sbp->sb_dblocks < XFS_MIN_DBLOCKS(sbp))) { 298 - XFS_CORRUPTION_ERROR("SB sanity check failed", 299 - XFS_ERRLEVEL_LOW, mp, sbp); 298 + xfs_notice(mp, "SB sanity check failed"); 300 299 return XFS_ERROR(EFSCORRUPTED); 301 300 } 302 301 ··· 610 611 XFS_SB_VERSION_5) || 611 612 dsb->sb_crc != 0)) { 612 613 613 - if (!xfs_verify_cksum(bp->b_addr, be16_to_cpu(dsb->sb_sectsize), 614 + if (!xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length), 614 615 offsetof(struct xfs_sb, sb_crc))) { 615 616 /* Only fail bad secondaries on a known V5 filesystem */ 616 - if (bp->b_bn != XFS_SB_DADDR && 617 + if (bp->b_bn == XFS_SB_DADDR || 617 618 xfs_sb_version_hascrc(&mp->m_sb)) { 618 619 error = EFSCORRUPTED; 619 620 goto out_error; ··· 624 625 625 626 out_error: 626 627 if (error) { 627 - if (error != EWRONGFS) 628 + if (error == EFSCORRUPTED) 628 629 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, 629 630 mp, bp->b_addr); 630 631 xfs_buf_ioerror(bp, error); ··· 642 643 struct xfs_buf *bp) 643 644 { 644 645 struct xfs_dsb *dsb = XFS_BUF_TO_SBP(bp); 645 - 646 646 647 647 if (dsb->sb_magicnum == cpu_to_be32(XFS_SB_MAGIC)) { 648 648 /* XFS filesystem, verify noisily! */
+39
include/asm-generic/pgtable.h
··· 701 701 } 702 702 #endif 703 703 704 + #ifndef ptep_set_numa 705 + static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr, 706 + pte_t *ptep) 707 + { 708 + pte_t ptent = *ptep; 709 + 710 + ptent = pte_mknuma(ptent); 711 + set_pte_at(mm, addr, ptep, ptent); 712 + return; 713 + } 714 + #endif 715 + 704 716 #ifndef pmd_mknuma 705 717 static inline pmd_t pmd_mknuma(pmd_t pmd) 706 718 { 707 719 pmd = pmd_set_flags(pmd, _PAGE_NUMA); 708 720 return pmd_clear_flags(pmd, _PAGE_PRESENT); 721 + } 722 + #endif 723 + 724 + #ifndef pmdp_set_numa 725 + static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, 726 + pmd_t *pmdp) 727 + { 728 + pmd_t pmd = *pmdp; 729 + 730 + pmd = pmd_mknuma(pmd); 731 + set_pmd_at(mm, addr, pmdp, pmd); 732 + return; 709 733 } 710 734 #endif 711 735 #else ··· 739 715 extern pmd_t pmd_mknonnuma(pmd_t pmd); 740 716 extern pte_t pte_mknuma(pte_t pte); 741 717 extern pmd_t pmd_mknuma(pmd_t pmd); 718 + extern void ptep_set_numa(struct mm_struct *mm, unsigned long addr, pte_t *ptep); 719 + extern void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp); 742 720 #endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */ 743 721 #else 744 722 static inline int pmd_numa(pmd_t pmd) ··· 768 742 return pte; 769 743 } 770 744 745 + static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr, 746 + pte_t *ptep) 747 + { 748 + return; 749 + } 750 + 751 + 771 752 static inline pmd_t pmd_mknuma(pmd_t pmd) 772 753 { 773 754 return pmd; 755 + } 756 + 757 + static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, 758 + pmd_t *pmdp) 759 + { 760 + return ; 774 761 } 775 762 #endif /* CONFIG_NUMA_BALANCING */ 776 763
+3
include/drm/drm_crtc.h
··· 907 907 908 908 /* whether async page flip is supported or not */ 909 909 bool async_page_flip; 910 + 911 + /* cursor size */ 912 + uint32_t cursor_width, cursor_height; 910 913 }; 911 914 912 915 #define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
+2
include/drm/ttm/ttm_page_alloc.h
··· 29 29 #include <drm/ttm/ttm_bo_driver.h> 30 30 #include <drm/ttm/ttm_memory.h> 31 31 32 + struct device; 33 + 32 34 /** 33 35 * Initialize pool allocator. 34 36 */
+3 -2
include/linux/ceph/ceph_fs.h
··· 373 373 /* 374 374 * Ceph setxattr request flags. 375 375 */ 376 - #define CEPH_XATTR_CREATE 1 377 - #define CEPH_XATTR_REPLACE 2 376 + #define CEPH_XATTR_CREATE (1 << 0) 377 + #define CEPH_XATTR_REPLACE (1 << 1) 378 + #define CEPH_XATTR_REMOVE (1 << 31) 378 379 379 380 union ceph_mds_request_args { 380 381 struct {
+2
include/linux/cgroup.h
··· 166 166 * 167 167 * The ID of the root cgroup is always 0, and a new cgroup 168 168 * will be assigned with a smallest available ID. 169 + * 170 + * Allocating/Removing ID must be protected by cgroup_mutex. 169 171 */ 170 172 int id; 171 173
+1 -1
include/linux/dma-buf.h
··· 171 171 size_t size, int flags, const char *); 172 172 173 173 #define dma_buf_export(priv, ops, size, flags) \ 174 - dma_buf_export_named(priv, ops, size, flags, __FILE__) 174 + dma_buf_export_named(priv, ops, size, flags, KBUILD_MODNAME) 175 175 176 176 int dma_buf_fd(struct dma_buf *dmabuf, int flags); 177 177 struct dma_buf *dma_buf_get(int fd);
+1 -1
include/linux/mfd/max8997-private.h
··· 387 387 struct i2c_client *muic; /* slave addr 0x4a */ 388 388 struct mutex iolock; 389 389 390 - int type; 390 + unsigned long type; 391 391 struct platform_device *battery; /* battery control (not fuel gauge) */ 392 392 393 393 int irq;
+1 -1
include/linux/mfd/max8998-private.h
··· 163 163 int ono; 164 164 u8 irq_masks_cur[MAX8998_NUM_IRQ_REGS]; 165 165 u8 irq_masks_cache[MAX8998_NUM_IRQ_REGS]; 166 - int type; 166 + unsigned long type; 167 167 bool wakeup; 168 168 }; 169 169
+2 -2
include/linux/mfd/tps65217.h
··· 252 252 struct tps65217 { 253 253 struct device *dev; 254 254 struct tps65217_board *pdata; 255 - unsigned int id; 255 + unsigned long id; 256 256 struct regulator_desc desc[TPS65217_NUM_REGULATOR]; 257 257 struct regulator_dev *rdev[TPS65217_NUM_REGULATOR]; 258 258 struct regmap *regmap; ··· 263 263 return dev_get_drvdata(dev); 264 264 } 265 265 266 - static inline int tps65217_chip_id(struct tps65217 *tps65217) 266 + static inline unsigned long tps65217_chip_id(struct tps65217 *tps65217) 267 267 { 268 268 return tps65217->id; 269 269 }
+32 -4
include/linux/netdevice.h
··· 752 752 unsigned char id_len; 753 753 }; 754 754 755 + typedef u16 (*select_queue_fallback_t)(struct net_device *dev, 756 + struct sk_buff *skb); 757 + 755 758 /* 756 759 * This structure defines the management hooks for network devices. 757 760 * The following hooks can be defined; unless noted otherwise, they are ··· 786 783 * Required can not be NULL. 787 784 * 788 785 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, 789 - * void *accel_priv); 786 + * void *accel_priv, select_queue_fallback_t fallback); 790 787 * Called to decide which queue to when device supports multiple 791 788 * transmit queues. 792 789 * ··· 1008 1005 struct net_device *dev); 1009 1006 u16 (*ndo_select_queue)(struct net_device *dev, 1010 1007 struct sk_buff *skb, 1011 - void *accel_priv); 1008 + void *accel_priv, 1009 + select_queue_fallback_t fallback); 1012 1010 void (*ndo_change_rx_flags)(struct net_device *dev, 1013 1011 int flags); 1014 1012 void (*ndo_set_rx_mode)(struct net_device *dev); ··· 1555 1551 struct netdev_queue *netdev_pick_tx(struct net_device *dev, 1556 1552 struct sk_buff *skb, 1557 1553 void *accel_priv); 1558 - u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); 1559 1554 1560 1555 /* 1561 1556 * Net namespace inlines ··· 2276 2273 static inline void netdev_reset_queue(struct net_device *dev_queue) 2277 2274 { 2278 2275 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0)); 2276 + } 2277 + 2278 + /** 2279 + * netdev_cap_txqueue - check if selected tx queue exceeds device queues 2280 + * @dev: network device 2281 + * @queue_index: given tx queue index 2282 + * 2283 + * Returns 0 if given tx queue index >= number of device tx queues, 2284 + * otherwise returns the originally passed tx queue index. 2285 + */ 2286 + static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index) 2287 + { 2288 + if (unlikely(queue_index >= dev->real_num_tx_queues)) { 2289 + net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n", 2290 + dev->name, queue_index, 2291 + dev->real_num_tx_queues); 2292 + return 0; 2293 + } 2294 + 2295 + return queue_index; 2279 2296 } 2280 2297 2281 2298 /** ··· 3091 3068 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 3092 3069 struct net_device *dev); 3093 3070 3094 - netdev_features_t netif_skb_features(struct sk_buff *skb); 3071 + netdev_features_t netif_skb_dev_features(struct sk_buff *skb, 3072 + const struct net_device *dev); 3073 + static inline netdev_features_t netif_skb_features(struct sk_buff *skb) 3074 + { 3075 + return netif_skb_dev_features(skb, skb->dev); 3076 + } 3095 3077 3096 3078 static inline bool net_gso_ok(netdev_features_t features, int gso_type) 3097 3079 {
+20
include/linux/pci.h
··· 1169 1169 void pci_restore_msi_state(struct pci_dev *dev); 1170 1170 int pci_msi_enabled(void); 1171 1171 int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec); 1172 + static inline int pci_enable_msi_exact(struct pci_dev *dev, int nvec) 1173 + { 1174 + int rc = pci_enable_msi_range(dev, nvec, nvec); 1175 + if (rc < 0) 1176 + return rc; 1177 + return 0; 1178 + } 1172 1179 int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, 1173 1180 int minvec, int maxvec); 1181 + static inline int pci_enable_msix_exact(struct pci_dev *dev, 1182 + struct msix_entry *entries, int nvec) 1183 + { 1184 + int rc = pci_enable_msix_range(dev, entries, nvec, nvec); 1185 + if (rc < 0) 1186 + return rc; 1187 + return 0; 1188 + } 1174 1189 #else 1175 1190 static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; } 1176 1191 static inline int pci_enable_msi_block(struct pci_dev *dev, int nvec) ··· 1204 1189 static inline int pci_enable_msi_range(struct pci_dev *dev, int minvec, 1205 1190 int maxvec) 1206 1191 { return -ENOSYS; } 1192 + static inline int pci_enable_msi_exact(struct pci_dev *dev, int nvec) 1193 + { return -ENOSYS; } 1207 1194 static inline int pci_enable_msix_range(struct pci_dev *dev, 1208 1195 struct msix_entry *entries, int minvec, int maxvec) 1196 + { return -ENOSYS; } 1197 + static inline int pci_enable_msix_exact(struct pci_dev *dev, 1198 + struct msix_entry *entries, int nvec) 1209 1199 { return -ENOSYS; } 1210 1200 #endif 1211 1201
+17
include/linux/skbuff.h
··· 2916 2916 { 2917 2917 return !skb->head_frag || skb_cloned(skb); 2918 2918 } 2919 + 2920 + /** 2921 + * skb_gso_network_seglen - Return length of individual segments of a gso packet 2922 + * 2923 + * @skb: GSO skb 2924 + * 2925 + * skb_gso_network_seglen is used to determine the real size of the 2926 + * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP). 2927 + * 2928 + * The MAC/L2 header is not accounted for. 2929 + */ 2930 + static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb) 2931 + { 2932 + unsigned int hdr_len = skb_transport_header(skb) - 2933 + skb_network_header(skb); 2934 + return hdr_len + skb_gso_transport_seglen(skb); 2935 + } 2919 2936 #endif /* __KERNEL__ */ 2920 2937 #endif /* _LINUX_SKBUFF_H */
+4 -2
include/linux/syscalls.h
··· 281 281 asmlinkage long sys_sched_setparam(pid_t pid, 282 282 struct sched_param __user *param); 283 283 asmlinkage long sys_sched_setattr(pid_t pid, 284 - struct sched_attr __user *attr); 284 + struct sched_attr __user *attr, 285 + unsigned int flags); 285 286 asmlinkage long sys_sched_getscheduler(pid_t pid); 286 287 asmlinkage long sys_sched_getparam(pid_t pid, 287 288 struct sched_param __user *param); 288 289 asmlinkage long sys_sched_getattr(pid_t pid, 289 290 struct sched_attr __user *attr, 290 - unsigned int size); 291 + unsigned int size, 292 + unsigned int flags); 291 293 asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, 292 294 unsigned long __user *user_mask_ptr); 293 295 asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
+1 -4
include/linux/workqueue.h
··· 419 419 static struct lock_class_key __key; \ 420 420 const char *__lock_name; \ 421 421 \ 422 - if (__builtin_constant_p(fmt)) \ 423 - __lock_name = (fmt); \ 424 - else \ 425 - __lock_name = #fmt; \ 422 + __lock_name = #fmt#args; \ 426 423 \ 427 424 __alloc_workqueue_key((fmt), (flags), (max_active), \ 428 425 &__key, __lock_name, ##args); \
+1 -13
include/net/sctp/structs.h
··· 1653 1653 /* This is the last advertised value of rwnd over a SACK chunk. */ 1654 1654 __u32 a_rwnd; 1655 1655 1656 - /* Number of bytes by which the rwnd has slopped. The rwnd is allowed 1657 - * to slop over a maximum of the association's frag_point. 1658 - */ 1659 - __u32 rwnd_over; 1660 - 1661 - /* Keeps treack of rwnd pressure. This happens when we have 1662 - * a window, but not recevie buffer (i.e small packets). This one 1663 - * is releases slowly (1 PMTU at a time ). 1664 - */ 1665 - __u32 rwnd_press; 1666 - 1667 1656 /* This is the sndbuf size in use for the association. 1668 1657 * This corresponds to the sndbuf size for the association, 1669 1658 * as specified in the sk->sndbuf. ··· 1881 1892 __u32 sctp_association_get_next_tsn(struct sctp_association *); 1882 1893 1883 1894 void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *); 1884 - void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int); 1885 - void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int); 1895 + void sctp_assoc_rwnd_update(struct sctp_association *, bool); 1886 1896 void sctp_assoc_set_primary(struct sctp_association *, 1887 1897 struct sctp_transport *); 1888 1898 void sctp_assoc_del_nonprimary_peers(struct sctp_association *,
+2
include/uapi/drm/drm.h
··· 619 619 #define DRM_PRIME_CAP_EXPORT 0x2 620 620 #define DRM_CAP_TIMESTAMP_MONOTONIC 0x6 621 621 #define DRM_CAP_ASYNC_PAGE_FLIP 0x7 622 + #define DRM_CAP_CURSOR_WIDTH 0x8 623 + #define DRM_CAP_CURSOR_HEIGHT 0x9 622 624 623 625 /** DRM_IOCTL_GET_CAP ioctl argument type */ 624 626 struct drm_get_cap {
+1
include/uapi/drm/vmwgfx_drm.h
··· 87 87 #define DRM_VMW_PARAM_MAX_SURF_MEMORY 7 88 88 #define DRM_VMW_PARAM_3D_CAPS_SIZE 8 89 89 #define DRM_VMW_PARAM_MAX_MOB_MEMORY 9 90 + #define DRM_VMW_PARAM_MAX_MOB_SIZE 10 90 91 91 92 /** 92 93 * struct drm_vmw_getparam_arg
+32 -28
kernel/cgroup.c
··· 886 886 * per-subsystem and moved to css->id so that lookups are 887 887 * successful until the target css is released. 888 888 */ 889 + mutex_lock(&cgroup_mutex); 889 890 idr_remove(&cgrp->root->cgroup_idr, cgrp->id); 891 + mutex_unlock(&cgroup_mutex); 890 892 cgrp->id = -1; 891 893 892 894 call_rcu(&cgrp->rcu_head, cgroup_free_rcu); ··· 1568 1566 mutex_lock(&cgroup_mutex); 1569 1567 mutex_lock(&cgroup_root_mutex); 1570 1568 1571 - root_cgrp->id = idr_alloc(&root->cgroup_idr, root_cgrp, 1572 - 0, 1, GFP_KERNEL); 1573 - if (root_cgrp->id < 0) 1569 + ret = idr_alloc(&root->cgroup_idr, root_cgrp, 0, 1, GFP_KERNEL); 1570 + if (ret < 0) 1574 1571 goto unlock_drop; 1572 + root_cgrp->id = ret; 1575 1573 1576 1574 /* Check for name clashes with existing mounts */ 1577 1575 ret = -EBUSY; ··· 2765 2763 */ 2766 2764 update_before = cgroup_serial_nr_next; 2767 2765 2768 - mutex_unlock(&cgroup_mutex); 2769 - 2770 2766 /* add/rm files for all cgroups created before */ 2771 - rcu_read_lock(); 2772 2767 css_for_each_descendant_pre(css, cgroup_css(root, ss)) { 2773 2768 struct cgroup *cgrp = css->cgroup; 2774 2769 ··· 2774 2775 2775 2776 inode = cgrp->dentry->d_inode; 2776 2777 dget(cgrp->dentry); 2777 - rcu_read_unlock(); 2778 - 2779 2778 dput(prev); 2780 2779 prev = cgrp->dentry; 2781 2780 2781 + mutex_unlock(&cgroup_mutex); 2782 2782 mutex_lock(&inode->i_mutex); 2783 2783 mutex_lock(&cgroup_mutex); 2784 2784 if (cgrp->serial_nr < update_before && !cgroup_is_dead(cgrp)) 2785 2785 ret = cgroup_addrm_files(cgrp, cfts, is_add); 2786 - mutex_unlock(&cgroup_mutex); 2787 2786 mutex_unlock(&inode->i_mutex); 2788 - 2789 - rcu_read_lock(); 2790 2787 if (ret) 2791 2788 break; 2792 2789 } 2793 - rcu_read_unlock(); 2790 + mutex_unlock(&cgroup_mutex); 2794 2791 dput(prev); 2795 2792 deactivate_super(sb); 2796 2793 return ret; ··· 2905 2910 * We should check if the process is exiting, otherwise 2906 2911 * it will race with cgroup_exit() in that the list 2907 2912 * entry won't be deleted though the process has exited. 2913 + * Do it while holding siglock so that we don't end up 2914 + * racing against cgroup_exit(). 2908 2915 */ 2916 + spin_lock_irq(&p->sighand->siglock); 2909 2917 if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list)) 2910 2918 list_add(&p->cg_list, &task_css_set(p)->tasks); 2919 + spin_unlock_irq(&p->sighand->siglock); 2920 + 2911 2921 task_unlock(p); 2912 2922 } while_each_thread(g, p); 2913 2923 read_unlock(&tasklist_lock); ··· 4158 4158 struct cgroup *cgrp; 4159 4159 struct cgroup_name *name; 4160 4160 struct cgroupfs_root *root = parent->root; 4161 - int ssid, err = 0; 4161 + int ssid, err; 4162 4162 struct cgroup_subsys *ss; 4163 4163 struct super_block *sb = root->sb; 4164 4164 ··· 4168 4168 return -ENOMEM; 4169 4169 4170 4170 name = cgroup_alloc_name(dentry); 4171 - if (!name) 4171 + if (!name) { 4172 + err = -ENOMEM; 4172 4173 goto err_free_cgrp; 4174 + } 4173 4175 rcu_assign_pointer(cgrp->name, name); 4174 - 4175 - /* 4176 - * Temporarily set the pointer to NULL, so idr_find() won't return 4177 - * a half-baked cgroup. 4178 - */ 4179 - cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL); 4180 - if (cgrp->id < 0) 4181 - goto err_free_name; 4182 4176 4183 4177 /* 4184 4178 * Only live parents can have children. Note that the liveliness ··· 4183 4189 */ 4184 4190 if (!cgroup_lock_live_group(parent)) { 4185 4191 err = -ENODEV; 4186 - goto err_free_id; 4192 + goto err_free_name; 4193 + } 4194 + 4195 + /* 4196 + * Temporarily set the pointer to NULL, so idr_find() won't return 4197 + * a half-baked cgroup. 4198 + */ 4199 + cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL); 4200 + if (cgrp->id < 0) { 4201 + err = -ENOMEM; 4202 + goto err_unlock; 4187 4203 } 4188 4204 4189 4205 /* Grab a reference on the superblock so the hierarchy doesn't ··· 4225 4221 */ 4226 4222 err = cgroup_create_file(dentry, S_IFDIR | mode, sb); 4227 4223 if (err < 0) 4228 - goto err_unlock; 4224 + goto err_free_id; 4229 4225 lockdep_assert_held(&dentry->d_inode->i_mutex); 4230 4226 4231 4227 cgrp->serial_nr = cgroup_serial_nr_next++; ··· 4261 4257 4262 4258 return 0; 4263 4259 4264 - err_unlock: 4265 - mutex_unlock(&cgroup_mutex); 4266 - /* Release the reference count that we took on the superblock */ 4267 - deactivate_super(sb); 4268 4260 err_free_id: 4269 4261 idr_remove(&root->cgroup_idr, cgrp->id); 4262 + /* Release the reference count that we took on the superblock */ 4263 + deactivate_super(sb); 4264 + err_unlock: 4265 + mutex_unlock(&cgroup_mutex); 4270 4266 err_free_name: 4271 4267 kfree(rcu_dereference_raw(cgrp->name)); 4272 4268 err_free_cgrp:
+1
kernel/power/console.c
··· 9 9 #include <linux/kbd_kern.h> 10 10 #include <linux/vt.h> 11 11 #include <linux/module.h> 12 + #include <linux/slab.h> 12 13 #include "power.h" 13 14 14 15 #define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1)
-2
kernel/printk/printk.c
··· 1076 1076 next_seq = log_next_seq; 1077 1077 1078 1078 len = 0; 1079 - prev = 0; 1080 1079 while (len >= 0 && seq < next_seq) { 1081 1080 struct printk_log *msg = log_from_idx(idx); 1082 1081 int textlen; ··· 2787 2788 next_idx = idx; 2788 2789 2789 2790 l = 0; 2790 - prev = 0; 2791 2791 while (seq < dumper->next_seq) { 2792 2792 struct printk_log *msg = log_from_idx(idx); 2793 2793
+16 -12
kernel/sched/core.c
··· 1952 1952 { 1953 1953 1954 1954 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 1955 - u64 period = attr->sched_period; 1955 + u64 period = attr->sched_period ?: attr->sched_deadline; 1956 1956 u64 runtime = attr->sched_runtime; 1957 1957 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; 1958 1958 int cpus, err = -1; ··· 3661 3661 * @pid: the pid in question. 3662 3662 * @uattr: structure containing the extended parameters. 3663 3663 */ 3664 - SYSCALL_DEFINE2(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr) 3664 + SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, 3665 + unsigned int, flags) 3665 3666 { 3666 3667 struct sched_attr attr; 3667 3668 struct task_struct *p; 3668 3669 int retval; 3669 3670 3670 - if (!uattr || pid < 0) 3671 + if (!uattr || pid < 0 || flags) 3671 3672 return -EINVAL; 3672 3673 3673 3674 if (sched_copy_attr(uattr, &attr)) ··· 3787 3786 attr->size = usize; 3788 3787 } 3789 3788 3790 - ret = copy_to_user(uattr, attr, usize); 3789 + ret = copy_to_user(uattr, attr, attr->size); 3791 3790 if (ret) 3792 3791 return -EFAULT; 3793 3792 ··· 3805 3804 * @uattr: structure containing the extended parameters. 3806 3805 * @size: sizeof(attr) for fwd/bwd comp. 3807 3806 */ 3808 - SYSCALL_DEFINE3(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 3809 - unsigned int, size) 3807 + SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 3808 + unsigned int, size, unsigned int, flags) 3810 3809 { 3811 3810 struct sched_attr attr = { 3812 3811 .size = sizeof(struct sched_attr), ··· 3815 3814 int retval; 3816 3815 3817 3816 if (!uattr || pid < 0 || size > PAGE_SIZE || 3818 - size < SCHED_ATTR_SIZE_VER0) 3817 + size < SCHED_ATTR_SIZE_VER0 || flags) 3819 3818 return -EINVAL; 3820 3819 3821 3820 rcu_read_lock(); ··· 7423 7422 u64 period = global_rt_period(); 7424 7423 u64 new_bw = to_ratio(period, runtime); 7425 7424 int cpu, ret = 0; 7425 + unsigned long flags; 7426 7426 7427 7427 /* 7428 7428 * Here we want to check the bandwidth not being set to some ··· 7437 7435 for_each_possible_cpu(cpu) { 7438 7436 struct dl_bw *dl_b = dl_bw_of(cpu); 7439 7437 7440 - raw_spin_lock(&dl_b->lock); 7438 + raw_spin_lock_irqsave(&dl_b->lock, flags); 7441 7439 if (new_bw < dl_b->total_bw) 7442 7440 ret = -EBUSY; 7443 - raw_spin_unlock(&dl_b->lock); 7441 + raw_spin_unlock_irqrestore(&dl_b->lock, flags); 7444 7442 7445 7443 if (ret) 7446 7444 break; ··· 7453 7451 { 7454 7452 u64 new_bw = -1; 7455 7453 int cpu; 7454 + unsigned long flags; 7456 7455 7457 7456 def_dl_bandwidth.dl_period = global_rt_period(); 7458 7457 def_dl_bandwidth.dl_runtime = global_rt_runtime(); ··· 7467 7464 for_each_possible_cpu(cpu) { 7468 7465 struct dl_bw *dl_b = dl_bw_of(cpu); 7469 7466 7470 - raw_spin_lock(&dl_b->lock); 7467 + raw_spin_lock_irqsave(&dl_b->lock, flags); 7471 7468 dl_b->bw = new_bw; 7472 - raw_spin_unlock(&dl_b->lock); 7469 + raw_spin_unlock_irqrestore(&dl_b->lock, flags); 7473 7470 } 7474 7471 } 7475 7472 ··· 7478 7475 if (sysctl_sched_rt_period <= 0) 7479 7476 return -EINVAL; 7480 7477 7481 - if (sysctl_sched_rt_runtime > sysctl_sched_rt_period) 7478 + if ((sysctl_sched_rt_runtime != RUNTIME_INF) && 7479 + (sysctl_sched_rt_runtime > sysctl_sched_rt_period)) 7482 7480 return -EINVAL; 7483 7481 7484 7482 return 0;
+3 -3
kernel/sched/cpudeadline.c
··· 70 70 71 71 static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl) 72 72 { 73 - WARN_ON(idx > num_present_cpus() || idx == IDX_INVALID); 73 + WARN_ON(!cpu_present(idx) || idx == IDX_INVALID); 74 74 75 75 if (dl_time_before(new_dl, cp->elements[idx].dl)) { 76 76 cp->elements[idx].dl = new_dl; ··· 117 117 } 118 118 119 119 out: 120 - WARN_ON(best_cpu > num_present_cpus() && best_cpu != -1); 120 + WARN_ON(!cpu_present(best_cpu) && best_cpu != -1); 121 121 122 122 return best_cpu; 123 123 } ··· 137 137 int old_idx, new_cpu; 138 138 unsigned long flags; 139 139 140 - WARN_ON(cpu > num_present_cpus()); 140 + WARN_ON(!cpu_present(cpu)); 141 141 142 142 raw_spin_lock_irqsave(&cp->lock, flags); 143 143 old_idx = cp->cpu_to_idx[cpu];
+3 -7
kernel/sched/deadline.c
··· 121 121 122 122 static void update_dl_migration(struct dl_rq *dl_rq) 123 123 { 124 - if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_total > 1) { 124 + if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) { 125 125 if (!dl_rq->overloaded) { 126 126 dl_set_overload(rq_of_dl_rq(dl_rq)); 127 127 dl_rq->overloaded = 1; ··· 137 137 struct task_struct *p = dl_task_of(dl_se); 138 138 dl_rq = &rq_of_dl_rq(dl_rq)->dl; 139 139 140 - dl_rq->dl_nr_total++; 141 140 if (p->nr_cpus_allowed > 1) 142 141 dl_rq->dl_nr_migratory++; 143 142 ··· 148 149 struct task_struct *p = dl_task_of(dl_se); 149 150 dl_rq = &rq_of_dl_rq(dl_rq)->dl; 150 151 151 - dl_rq->dl_nr_total--; 152 152 if (p->nr_cpus_allowed > 1) 153 153 dl_rq->dl_nr_migratory--; 154 154 ··· 715 717 716 718 WARN_ON(!dl_prio(prio)); 717 719 dl_rq->dl_nr_running++; 720 + inc_nr_running(rq_of_dl_rq(dl_rq)); 718 721 719 722 inc_dl_deadline(dl_rq, deadline); 720 723 inc_dl_migration(dl_se, dl_rq); ··· 729 730 WARN_ON(!dl_prio(prio)); 730 731 WARN_ON(!dl_rq->dl_nr_running); 731 732 dl_rq->dl_nr_running--; 733 + dec_nr_running(rq_of_dl_rq(dl_rq)); 732 734 733 735 dec_dl_deadline(dl_rq, dl_se->deadline); 734 736 dec_dl_migration(dl_se, dl_rq); ··· 836 836 837 837 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) 838 838 enqueue_pushable_dl_task(rq, p); 839 - 840 - inc_nr_running(rq); 841 839 } 842 840 843 841 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) ··· 848 850 { 849 851 update_curr_dl(rq); 850 852 __dequeue_task_dl(rq, p, flags); 851 - 852 - dec_nr_running(rq); 853 853 } 854 854 855 855 /*
+2
kernel/sched/fair.c
··· 1757 1757 start = end; 1758 1758 if (pages <= 0) 1759 1759 goto out; 1760 + 1761 + cond_resched(); 1760 1762 } while (end != vma->vm_end); 1761 1763 } 1762 1764
-1
kernel/sched/sched.h
··· 462 462 } earliest_dl; 463 463 464 464 unsigned long dl_nr_migratory; 465 - unsigned long dl_nr_total; 466 465 int overloaded; 467 466 468 467 /*
+29 -17
kernel/time/sched_clock.c
··· 116 116 void __init sched_clock_register(u64 (*read)(void), int bits, 117 117 unsigned long rate) 118 118 { 119 + u64 res, wrap, new_mask, new_epoch, cyc, ns; 120 + u32 new_mult, new_shift; 121 + ktime_t new_wrap_kt; 119 122 unsigned long r; 120 - u64 res, wrap; 121 123 char r_unit; 122 124 123 125 if (cd.rate > rate) 124 126 return; 125 127 126 128 WARN_ON(!irqs_disabled()); 127 - read_sched_clock = read; 128 - sched_clock_mask = CLOCKSOURCE_MASK(bits); 129 - cd.rate = rate; 130 129 131 130 /* calculate the mult/shift to convert counter ticks to ns. */ 132 - clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 3600); 131 + clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600); 132 + 133 + new_mask = CLOCKSOURCE_MASK(bits); 134 + 135 + /* calculate how many ns until we wrap */ 136 + wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask); 137 + new_wrap_kt = ns_to_ktime(wrap - (wrap >> 3)); 138 + 139 + /* update epoch for new counter and update epoch_ns from old counter*/ 140 + new_epoch = read(); 141 + cyc = read_sched_clock(); 142 + ns = cd.epoch_ns + cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask, 143 + cd.mult, cd.shift); 144 + 145 + raw_write_seqcount_begin(&cd.seq); 146 + read_sched_clock = read; 147 + sched_clock_mask = new_mask; 148 + cd.rate = rate; 149 + cd.wrap_kt = new_wrap_kt; 150 + cd.mult = new_mult; 151 + cd.shift = new_shift; 152 + cd.epoch_cyc = new_epoch; 153 + cd.epoch_ns = ns; 154 + raw_write_seqcount_end(&cd.seq); 133 155 134 156 r = rate; 135 157 if (r >= 4000000) { ··· 163 141 } else 164 142 r_unit = ' '; 165 143 166 - /* calculate how many ns until we wrap */ 167 - wrap = clocks_calc_max_nsecs(cd.mult, cd.shift, 0, sched_clock_mask); 168 - cd.wrap_kt = ns_to_ktime(wrap - (wrap >> 3)); 169 - 170 144 /* calculate the ns resolution of this counter */ 171 - res = cyc_to_ns(1ULL, cd.mult, cd.shift); 145 + res = cyc_to_ns(1ULL, new_mult, new_shift); 146 + 172 147 pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n", 173 148 bits, r, r_unit, res, wrap); 174 - 175 - update_sched_clock(); 176 - 177 - /* 178 - * Ensure that sched_clock() starts off at 0ns 179 - */ 180 - cd.epoch_ns = 0; 181 149 182 150 /* Enable IRQ time accounting if we have a fast enough sched_clock */ 183 151 if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
+1 -1
kernel/user_namespace.c
··· 225 225 * 226 226 * When there is no mapping defined for the user-namespace uid 227 227 * pair INVALID_UID is returned. Callers are expected to test 228 - * for and handle handle INVALID_UID being returned. INVALID_UID 228 + * for and handle INVALID_UID being returned. INVALID_UID 229 229 * may be tested for using uid_valid(). 230 230 */ 231 231 kuid_t make_kuid(struct user_namespace *ns, uid_t uid)
+7
kernel/workqueue.c
··· 1851 1851 if (worker->flags & WORKER_IDLE) 1852 1852 pool->nr_idle--; 1853 1853 1854 + /* 1855 + * Once WORKER_DIE is set, the kworker may destroy itself at any 1856 + * point. Pin to ensure the task stays until we're done with it. 1857 + */ 1858 + get_task_struct(worker->task); 1859 + 1854 1860 list_del_init(&worker->entry); 1855 1861 worker->flags |= WORKER_DIE; 1856 1862 ··· 1865 1859 spin_unlock_irq(&pool->lock); 1866 1860 1867 1861 kthread_stop(worker->task); 1862 + put_task_struct(worker->task); 1868 1863 kfree(worker); 1869 1864 1870 1865 spin_lock_irq(&pool->lock);
+2 -7
mm/huge_memory.c
··· 1545 1545 entry = pmd_mknonnuma(entry); 1546 1546 entry = pmd_modify(entry, newprot); 1547 1547 ret = HPAGE_PMD_NR; 1548 + set_pmd_at(mm, addr, pmd, entry); 1548 1549 BUG_ON(pmd_write(entry)); 1549 1550 } else { 1550 1551 struct page *page = pmd_page(*pmd); ··· 1558 1557 */ 1559 1558 if (!is_huge_zero_page(page) && 1560 1559 !pmd_numa(*pmd)) { 1561 - entry = *pmd; 1562 - entry = pmd_mknuma(entry); 1560 + pmdp_set_numa(mm, addr, pmd); 1563 1561 ret = HPAGE_PMD_NR; 1564 1562 } 1565 1563 } 1566 - 1567 - /* Set PMD if cleared earlier */ 1568 - if (ret == HPAGE_PMD_NR) 1569 - set_pmd_at(mm, addr, pmd, entry); 1570 - 1571 1564 spin_unlock(ptl); 1572 1565 } 1573 1566
+8 -17
mm/mprotect.c
··· 58 58 if (pte_numa(ptent)) 59 59 ptent = pte_mknonnuma(ptent); 60 60 ptent = pte_modify(ptent, newprot); 61 + /* 62 + * Avoid taking write faults for pages we 63 + * know to be dirty. 64 + */ 65 + if (dirty_accountable && pte_dirty(ptent)) 66 + ptent = pte_mkwrite(ptent); 67 + ptep_modify_prot_commit(mm, addr, pte, ptent); 61 68 updated = true; 62 69 } else { 63 70 struct page *page; 64 71 65 - ptent = *pte; 66 72 page = vm_normal_page(vma, addr, oldpte); 67 73 if (page && !PageKsm(page)) { 68 74 if (!pte_numa(oldpte)) { 69 - ptent = pte_mknuma(ptent); 70 - set_pte_at(mm, addr, pte, ptent); 75 + ptep_set_numa(mm, addr, pte); 71 76 updated = true; 72 77 } 73 78 } 74 79 } 75 - 76 - /* 77 - * Avoid taking write faults for pages we know to be 78 - * dirty. 79 - */ 80 - if (dirty_accountable && pte_dirty(ptent)) { 81 - ptent = pte_mkwrite(ptent); 82 - updated = true; 83 - } 84 - 85 80 if (updated) 86 81 pages++; 87 - 88 - /* Only !prot_numa always clears the pte */ 89 - if (!prot_numa) 90 - ptep_modify_prot_commit(mm, addr, pte, ptent); 91 82 } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) { 92 83 swp_entry_t entry = pte_to_swp_entry(oldpte); 93 84
+1
mm/vmpressure.c
··· 19 19 #include <linux/mm.h> 20 20 #include <linux/vmstat.h> 21 21 #include <linux/eventfd.h> 22 + #include <linux/slab.h> 22 23 #include <linux/swap.h> 23 24 #include <linux/printk.h> 24 25 #include <linux/vmpressure.h>
+20 -10
net/batman-adv/bat_iv_ogm.c
··· 241 241 size = bat_priv->num_ifaces * sizeof(uint8_t); 242 242 orig_node->bat_iv.bcast_own_sum = kzalloc(size, GFP_ATOMIC); 243 243 if (!orig_node->bat_iv.bcast_own_sum) 244 - goto free_bcast_own; 244 + goto free_orig_node; 245 245 246 246 hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig, 247 247 batadv_choose_orig, orig_node, 248 248 &orig_node->hash_entry); 249 249 if (hash_added != 0) 250 - goto free_bcast_own; 250 + goto free_orig_node; 251 251 252 252 return orig_node; 253 253 254 - free_bcast_own: 255 - kfree(orig_node->bat_iv.bcast_own); 256 254 free_orig_node: 255 + /* free twice, as batadv_orig_node_new sets refcount to 2 */ 256 + batadv_orig_node_free_ref(orig_node); 257 257 batadv_orig_node_free_ref(orig_node); 258 258 259 259 return NULL; ··· 266 266 struct batadv_orig_node *orig_neigh) 267 267 { 268 268 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 269 - struct batadv_neigh_node *neigh_node; 269 + struct batadv_neigh_node *neigh_node, *tmp_neigh_node; 270 270 271 271 neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, orig_node); 272 272 if (!neigh_node) ··· 281 281 neigh_node->orig_node = orig_neigh; 282 282 neigh_node->if_incoming = hard_iface; 283 283 284 - batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 285 - "Creating new neighbor %pM for orig_node %pM on interface %s\n", 286 - neigh_addr, orig_node->orig, hard_iface->net_dev->name); 287 - 288 284 spin_lock_bh(&orig_node->neigh_list_lock); 289 - hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list); 285 + tmp_neigh_node = batadv_neigh_node_get(orig_node, hard_iface, 286 + neigh_addr); 287 + if (!tmp_neigh_node) { 288 + hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list); 289 + } else { 290 + kfree(neigh_node); 291 + batadv_hardif_free_ref(hard_iface); 292 + neigh_node = tmp_neigh_node; 293 + } 290 294 spin_unlock_bh(&orig_node->neigh_list_lock); 295 + 296 + if (!tmp_neigh_node) 297 + batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 298 + "Creating new neighbor %pM for orig_node %pM on interface %s\n", 299 + neigh_addr, orig_node->orig, 300 + hard_iface->net_dev->name); 291 301 292 302 out: 293 303 return neigh_node;
+14 -8
net/batman-adv/hard-interface.c
··· 241 241 { 242 242 struct batadv_priv *bat_priv = netdev_priv(soft_iface); 243 243 const struct batadv_hard_iface *hard_iface; 244 - int min_mtu = ETH_DATA_LEN; 244 + int min_mtu = INT_MAX; 245 245 246 246 rcu_read_lock(); 247 247 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { ··· 256 256 } 257 257 rcu_read_unlock(); 258 258 259 - atomic_set(&bat_priv->packet_size_max, min_mtu); 260 - 261 259 if (atomic_read(&bat_priv->fragmentation) == 0) 262 260 goto out; 263 261 ··· 266 268 min_mtu = min_t(int, min_mtu, BATADV_FRAG_MAX_FRAG_SIZE); 267 269 min_mtu -= sizeof(struct batadv_frag_packet); 268 270 min_mtu *= BATADV_FRAG_MAX_FRAGMENTS; 269 - atomic_set(&bat_priv->packet_size_max, min_mtu); 270 - 271 - /* with fragmentation enabled we can fragment external packets easily */ 272 - min_mtu = min_t(int, min_mtu, ETH_DATA_LEN); 273 271 274 272 out: 275 - return min_mtu - batadv_max_header_len(); 273 + /* report to the other components the maximum amount of bytes that 274 + * batman-adv can send over the wire (without considering the payload 275 + * overhead). For example, this value is used by TT to compute the 276 + * maximum local table table size 277 + */ 278 + atomic_set(&bat_priv->packet_size_max, min_mtu); 279 + 280 + /* the real soft-interface MTU is computed by removing the payload 281 + * overhead from the maximum amount of bytes that was just computed. 282 + * 283 + * However batman-adv does not support MTUs bigger than ETH_DATA_LEN 284 + */ 285 + return min_t(int, min_mtu - batadv_max_header_len(), ETH_DATA_LEN); 276 286 } 277 287 278 288 /* adjusts the MTU if a new interface with a smaller MTU appeared. */
+36
net/batman-adv/originator.c
··· 458 458 } 459 459 460 460 /** 461 + * batadv_neigh_node_get - retrieve a neighbour from the list 462 + * @orig_node: originator which the neighbour belongs to 463 + * @hard_iface: the interface where this neighbour is connected to 464 + * @addr: the address of the neighbour 465 + * 466 + * Looks for and possibly returns a neighbour belonging to this originator list 467 + * which is connected through the provided hard interface. 468 + * Returns NULL if the neighbour is not found. 469 + */ 470 + struct batadv_neigh_node * 471 + batadv_neigh_node_get(const struct batadv_orig_node *orig_node, 472 + const struct batadv_hard_iface *hard_iface, 473 + const uint8_t *addr) 474 + { 475 + struct batadv_neigh_node *tmp_neigh_node, *res = NULL; 476 + 477 + rcu_read_lock(); 478 + hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) { 479 + if (!batadv_compare_eth(tmp_neigh_node->addr, addr)) 480 + continue; 481 + 482 + if (tmp_neigh_node->if_incoming != hard_iface) 483 + continue; 484 + 485 + if (!atomic_inc_not_zero(&tmp_neigh_node->refcount)) 486 + continue; 487 + 488 + res = tmp_neigh_node; 489 + break; 490 + } 491 + rcu_read_unlock(); 492 + 493 + return res; 494 + } 495 + 496 + /** 461 497 * batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object 462 498 * @rcu: rcu pointer of the orig_ifinfo object 463 499 */
+4
net/batman-adv/originator.h
··· 29 29 struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv, 30 30 const uint8_t *addr); 31 31 struct batadv_neigh_node * 32 + batadv_neigh_node_get(const struct batadv_orig_node *orig_node, 33 + const struct batadv_hard_iface *hard_iface, 34 + const uint8_t *addr); 35 + struct batadv_neigh_node * 32 36 batadv_neigh_node_new(struct batadv_hard_iface *hard_iface, 33 37 const uint8_t *neigh_addr, 34 38 struct batadv_orig_node *orig_node);
+3 -1
net/batman-adv/routing.c
··· 688 688 int is_old_ttvn; 689 689 690 690 /* check if there is enough data before accessing it */ 691 - if (pskb_may_pull(skb, hdr_len + ETH_HLEN) < 0) 691 + if (!pskb_may_pull(skb, hdr_len + ETH_HLEN)) 692 692 return 0; 693 693 694 694 /* create a copy of the skb (in case of for re-routing) to modify it. */ ··· 918 918 919 919 if (ret != NET_RX_SUCCESS) 920 920 ret = batadv_route_unicast_packet(skb, recv_if); 921 + else 922 + consume_skb(skb); 921 923 922 924 return ret; 923 925 }
+7 -2
net/batman-adv/send.c
··· 254 254 struct batadv_orig_node *orig_node, 255 255 unsigned short vid) 256 256 { 257 - struct ethhdr *ethhdr = (struct ethhdr *)skb->data; 257 + struct ethhdr *ethhdr; 258 258 struct batadv_unicast_packet *unicast_packet; 259 - int ret = NET_XMIT_DROP; 259 + int ret = NET_XMIT_DROP, hdr_size; 260 260 261 261 if (!orig_node) 262 262 goto out; ··· 265 265 case BATADV_UNICAST: 266 266 if (!batadv_send_skb_prepare_unicast(skb, orig_node)) 267 267 goto out; 268 + 269 + hdr_size = sizeof(*unicast_packet); 268 270 break; 269 271 case BATADV_UNICAST_4ADDR: 270 272 if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb, 271 273 orig_node, 272 274 packet_subtype)) 273 275 goto out; 276 + 277 + hdr_size = sizeof(struct batadv_unicast_4addr_packet); 274 278 break; 275 279 default: 276 280 /* this function supports UNICAST and UNICAST_4ADDR only. It ··· 283 279 goto out; 284 280 } 285 281 282 + ethhdr = (struct ethhdr *)(skb->data + hdr_size); 286 283 unicast_packet = (struct batadv_unicast_packet *)skb->data; 287 284 288 285 /* inform the destination node that we are still missing a correct route
+17 -6
net/batman-adv/translation-table.c
··· 1975 1975 struct hlist_head *head; 1976 1976 uint32_t i, crc_tmp, crc = 0; 1977 1977 uint8_t flags; 1978 + __be16 tmp_vid; 1978 1979 1979 1980 for (i = 0; i < hash->size; i++) { 1980 1981 head = &hash->table[i]; ··· 2012 2011 orig_node)) 2013 2012 continue; 2014 2013 2015 - crc_tmp = crc32c(0, &tt_common->vid, 2016 - sizeof(tt_common->vid)); 2014 + /* use network order to read the VID: this ensures that 2015 + * every node reads the bytes in the same order. 2016 + */ 2017 + tmp_vid = htons(tt_common->vid); 2018 + crc_tmp = crc32c(0, &tmp_vid, sizeof(tmp_vid)); 2017 2019 2018 2020 /* compute the CRC on flags that have to be kept in sync 2019 2021 * among nodes ··· 2050 2046 struct hlist_head *head; 2051 2047 uint32_t i, crc_tmp, crc = 0; 2052 2048 uint8_t flags; 2049 + __be16 tmp_vid; 2053 2050 2054 2051 for (i = 0; i < hash->size; i++) { 2055 2052 head = &hash->table[i]; ··· 2069 2064 if (tt_common->flags & BATADV_TT_CLIENT_NEW) 2070 2065 continue; 2071 2066 2072 - crc_tmp = crc32c(0, &tt_common->vid, 2073 - sizeof(tt_common->vid)); 2067 + /* use network order to read the VID: this ensures that 2068 + * every node reads the bytes in the same order. 2069 + */ 2070 + tmp_vid = htons(tt_common->vid); 2071 + crc_tmp = crc32c(0, &tmp_vid, sizeof(tmp_vid)); 2074 2072 2075 2073 /* compute the CRC on flags that have to be kept in sync 2076 2074 * among nodes ··· 2270 2262 { 2271 2263 struct batadv_tvlv_tt_vlan_data *tt_vlan_tmp; 2272 2264 struct batadv_orig_node_vlan *vlan; 2265 + uint32_t crc; 2273 2266 int i; 2274 2267 2275 2268 /* check if each received CRC matches the locally stored one */ ··· 2290 2281 if (!vlan) 2291 2282 return false; 2292 2283 2293 - if (vlan->tt.crc != ntohl(tt_vlan_tmp->crc)) 2284 + crc = vlan->tt.crc; 2285 + batadv_orig_node_vlan_free_ref(vlan); 2286 + 2287 + if (crc != ntohl(tt_vlan_tmp->crc)) 2294 2288 return false; 2295 2289 } 2296 2290 ··· 3230 3218 3231 3219 spin_lock_bh(&orig_node->tt_lock); 3232 3220 3233 - tt_change = (struct batadv_tvlv_tt_change *)tt_buff; 3234 3221 batadv_tt_update_changes(bat_priv, orig_node, tt_num_changes, 3235 3222 ttvn, tt_change); 3236 3223
+14 -2
net/bluetooth/hidp/core.c
··· 430 430 del_timer(&session->timer); 431 431 } 432 432 433 + static void hidp_process_report(struct hidp_session *session, 434 + int type, const u8 *data, int len, int intr) 435 + { 436 + if (len > HID_MAX_BUFFER_SIZE) 437 + len = HID_MAX_BUFFER_SIZE; 438 + 439 + memcpy(session->input_buf, data, len); 440 + hid_input_report(session->hid, type, session->input_buf, len, intr); 441 + } 442 + 433 443 static void hidp_process_handshake(struct hidp_session *session, 434 444 unsigned char param) 435 445 { ··· 512 502 hidp_input_report(session, skb); 513 503 514 504 if (session->hid) 515 - hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 0); 505 + hidp_process_report(session, HID_INPUT_REPORT, 506 + skb->data, skb->len, 0); 516 507 break; 517 508 518 509 case HIDP_DATA_RTYPE_OTHER: ··· 595 584 hidp_input_report(session, skb); 596 585 597 586 if (session->hid) { 598 - hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 1); 587 + hidp_process_report(session, HID_INPUT_REPORT, 588 + skb->data, skb->len, 1); 599 589 BT_DBG("report len %d", skb->len); 600 590 } 601 591 } else {
+4
net/bluetooth/hidp/hidp.h
··· 24 24 #define __HIDP_H 25 25 26 26 #include <linux/types.h> 27 + #include <linux/hid.h> 27 28 #include <linux/kref.h> 28 29 #include <net/bluetooth/bluetooth.h> 29 30 #include <net/bluetooth/l2cap.h> ··· 180 179 181 180 /* Used in hidp_output_raw_report() */ 182 181 int output_report_success; /* boolean */ 182 + 183 + /* temporary input buffer */ 184 + u8 input_buf[HID_MAX_BUFFER_SIZE]; 183 185 }; 184 186 185 187 /* HIDP init defines */
+12 -10
net/core/dev.c
··· 2420 2420 * 2. No high memory really exists on this machine. 2421 2421 */ 2422 2422 2423 - static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) 2423 + static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb) 2424 2424 { 2425 2425 #ifdef CONFIG_HIGHMEM 2426 2426 int i; ··· 2495 2495 } 2496 2496 2497 2497 static netdev_features_t harmonize_features(struct sk_buff *skb, 2498 - netdev_features_t features) 2498 + const struct net_device *dev, 2499 + netdev_features_t features) 2499 2500 { 2500 2501 if (skb->ip_summed != CHECKSUM_NONE && 2501 2502 !can_checksum_protocol(features, skb_network_protocol(skb))) { 2502 2503 features &= ~NETIF_F_ALL_CSUM; 2503 - } else if (illegal_highdma(skb->dev, skb)) { 2504 + } else if (illegal_highdma(dev, skb)) { 2504 2505 features &= ~NETIF_F_SG; 2505 2506 } 2506 2507 2507 2508 return features; 2508 2509 } 2509 2510 2510 - netdev_features_t netif_skb_features(struct sk_buff *skb) 2511 + netdev_features_t netif_skb_dev_features(struct sk_buff *skb, 2512 + const struct net_device *dev) 2511 2513 { 2512 2514 __be16 protocol = skb->protocol; 2513 - netdev_features_t features = skb->dev->features; 2515 + netdev_features_t features = dev->features; 2514 2516 2515 - if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs) 2517 + if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs) 2516 2518 features &= ~NETIF_F_GSO_MASK; 2517 2519 2518 2520 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) { 2519 2521 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 2520 2522 protocol = veh->h_vlan_encapsulated_proto; 2521 2523 } else if (!vlan_tx_tag_present(skb)) { 2522 - return harmonize_features(skb, features); 2524 + return harmonize_features(skb, dev, features); 2523 2525 } 2524 2526 2525 - features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX | 2527 + features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX | 2526 2528 NETIF_F_HW_VLAN_STAG_TX); 2527 2529 2528 2530 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) ··· 2532 2530 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX | 2533 2531 NETIF_F_HW_VLAN_STAG_TX; 2534 2532 2535 - return harmonize_features(skb, features); 2533 + return harmonize_features(skb, dev, features); 2536 2534 } 2537 - EXPORT_SYMBOL(netif_skb_features); 2535 + EXPORT_SYMBOL(netif_skb_dev_features); 2538 2536 2539 2537 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2540 2538 struct netdev_queue *txq)
+4 -16
net/core/flow_dissector.c
··· 323 323 return poff; 324 324 } 325 325 326 - static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index) 327 - { 328 - if (unlikely(queue_index >= dev->real_num_tx_queues)) { 329 - net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n", 330 - dev->name, queue_index, 331 - dev->real_num_tx_queues); 332 - return 0; 333 - } 334 - return queue_index; 335 - } 336 - 337 326 static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) 338 327 { 339 328 #ifdef CONFIG_XPS ··· 361 372 #endif 362 373 } 363 374 364 - u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) 375 + static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) 365 376 { 366 377 struct sock *sk = skb->sk; 367 378 int queue_index = sk_tx_queue_get(sk); ··· 381 392 382 393 return queue_index; 383 394 } 384 - EXPORT_SYMBOL(__netdev_pick_tx); 385 395 386 396 struct netdev_queue *netdev_pick_tx(struct net_device *dev, 387 397 struct sk_buff *skb, ··· 391 403 if (dev->real_num_tx_queues != 1) { 392 404 const struct net_device_ops *ops = dev->netdev_ops; 393 405 if (ops->ndo_select_queue) 394 - queue_index = ops->ndo_select_queue(dev, skb, 395 - accel_priv); 406 + queue_index = ops->ndo_select_queue(dev, skb, accel_priv, 407 + __netdev_pick_tx); 396 408 else 397 409 queue_index = __netdev_pick_tx(dev, skb); 398 410 399 411 if (!accel_priv) 400 - queue_index = dev_cap_txqueue(dev, queue_index); 412 + queue_index = netdev_cap_txqueue(dev, queue_index); 401 413 } 402 414 403 415 skb_set_queue_mapping(skb, queue_index);
+12 -7
net/core/rtnetlink.c
··· 1963 1963 1964 1964 dev->ifindex = ifm->ifi_index; 1965 1965 1966 - if (ops->newlink) 1966 + if (ops->newlink) { 1967 1967 err = ops->newlink(net, dev, tb, data); 1968 - else 1968 + /* Drivers should call free_netdev() in ->destructor 1969 + * and unregister it on failure so that device could be 1970 + * finally freed in rtnl_unlock. 1971 + */ 1972 + if (err < 0) 1973 + goto out; 1974 + } else { 1969 1975 err = register_netdevice(dev); 1970 - 1971 - if (err < 0) { 1972 - free_netdev(dev); 1973 - goto out; 1976 + if (err < 0) { 1977 + free_netdev(dev); 1978 + goto out; 1979 + } 1974 1980 } 1975 - 1976 1981 err = rtnl_configure_link(dev, ifm); 1977 1982 if (err < 0) 1978 1983 unregister_netdevice(dev);
+1 -1
net/dccp/ccids/lib/tfrc.c
··· 8 8 #include "tfrc.h" 9 9 10 10 #ifdef CONFIG_IP_DCCP_TFRC_DEBUG 11 - static bool tfrc_debug; 11 + bool tfrc_debug; 12 12 module_param(tfrc_debug, bool, 0644); 13 13 MODULE_PARM_DESC(tfrc_debug, "Enable TFRC debug messages"); 14 14 #endif
+1
net/dccp/ccids/lib/tfrc.h
··· 21 21 #include "packet_history.h" 22 22 23 23 #ifdef CONFIG_IP_DCCP_TFRC_DEBUG 24 + extern bool tfrc_debug; 24 25 #define tfrc_pr_debug(format, a...) DCCP_PR_DEBUG(tfrc_debug, format, ##a) 25 26 #else 26 27 #define tfrc_pr_debug(format, a...)
+69 -2
net/ipv4/ip_forward.c
··· 39 39 #include <net/route.h> 40 40 #include <net/xfrm.h> 41 41 42 + static bool ip_may_fragment(const struct sk_buff *skb) 43 + { 44 + return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) || 45 + !skb->local_df; 46 + } 47 + 48 + static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) 49 + { 50 + if (skb->len <= mtu || skb->local_df) 51 + return false; 52 + 53 + if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu) 54 + return false; 55 + 56 + return true; 57 + } 58 + 59 + static bool ip_gso_exceeds_dst_mtu(const struct sk_buff *skb) 60 + { 61 + unsigned int mtu; 62 + 63 + if (skb->local_df || !skb_is_gso(skb)) 64 + return false; 65 + 66 + mtu = ip_dst_mtu_maybe_forward(skb_dst(skb), true); 67 + 68 + /* if seglen > mtu, do software segmentation for IP fragmentation on 69 + * output. DF bit cannot be set since ip_forward would have sent 70 + * icmp error. 71 + */ 72 + return skb_gso_network_seglen(skb) > mtu; 73 + } 74 + 75 + /* called if GSO skb needs to be fragmented on forward */ 76 + static int ip_forward_finish_gso(struct sk_buff *skb) 77 + { 78 + struct dst_entry *dst = skb_dst(skb); 79 + netdev_features_t features; 80 + struct sk_buff *segs; 81 + int ret = 0; 82 + 83 + features = netif_skb_dev_features(skb, dst->dev); 84 + segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); 85 + if (IS_ERR(segs)) { 86 + kfree_skb(skb); 87 + return -ENOMEM; 88 + } 89 + 90 + consume_skb(skb); 91 + 92 + do { 93 + struct sk_buff *nskb = segs->next; 94 + int err; 95 + 96 + segs->next = NULL; 97 + err = dst_output(segs); 98 + 99 + if (err && ret == 0) 100 + ret = err; 101 + segs = nskb; 102 + } while (segs); 103 + 104 + return ret; 105 + } 106 + 42 107 static int ip_forward_finish(struct sk_buff *skb) 43 108 { 44 109 struct ip_options *opt = &(IPCB(skb)->opt); ··· 113 48 114 49 if (unlikely(opt->optlen)) 115 50 ip_forward_options(skb); 51 + 52 + if (ip_gso_exceeds_dst_mtu(skb)) 53 + return ip_forward_finish_gso(skb); 116 54 117 55 return dst_output(skb); 118 56 } ··· 159 91 160 92 IPCB(skb)->flags |= IPSKB_FORWARDED; 161 93 mtu = ip_dst_mtu_maybe_forward(&rt->dst, true); 162 - if (unlikely(skb->len > mtu && !skb_is_gso(skb) && 163 - (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) { 94 + if (!ip_may_fragment(skb) && ip_exceeds_mtu(skb, mtu)) { 164 95 IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS); 165 96 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 166 97 htonl(mtu));
+1 -1
net/ipv4/ipconfig.c
··· 273 273 274 274 msleep(1); 275 275 276 - if time_before(jiffies, next_msg) 276 + if (time_before(jiffies, next_msg)) 277 277 continue; 278 278 279 279 elapsed = jiffies_to_msecs(jiffies - start);
+9 -4
net/ipv4/route.c
··· 1597 1597 rth->rt_gateway = 0; 1598 1598 rth->rt_uses_gateway = 0; 1599 1599 INIT_LIST_HEAD(&rth->rt_uncached); 1600 + RT_CACHE_STAT_INC(in_slow_tot); 1600 1601 1601 1602 rth->dst.input = ip_forward; 1602 1603 rth->dst.output = ip_output; ··· 1696 1695 fl4.daddr = daddr; 1697 1696 fl4.saddr = saddr; 1698 1697 err = fib_lookup(net, &fl4, &res); 1699 - if (err != 0) 1698 + if (err != 0) { 1699 + if (!IN_DEV_FORWARD(in_dev)) 1700 + err = -EHOSTUNREACH; 1700 1701 goto no_route; 1701 - 1702 - RT_CACHE_STAT_INC(in_slow_tot); 1702 + } 1703 1703 1704 1704 if (res.type == RTN_BROADCAST) 1705 1705 goto brd_input; ··· 1714 1712 goto local_input; 1715 1713 } 1716 1714 1717 - if (!IN_DEV_FORWARD(in_dev)) 1715 + if (!IN_DEV_FORWARD(in_dev)) { 1716 + err = -EHOSTUNREACH; 1718 1717 goto no_route; 1718 + } 1719 1719 if (res.type != RTN_UNICAST) 1720 1720 goto martian_destination; 1721 1721 ··· 1772 1768 rth->rt_gateway = 0; 1773 1769 rth->rt_uses_gateway = 0; 1774 1770 INIT_LIST_HEAD(&rth->rt_uncached); 1771 + RT_CACHE_STAT_INC(in_slow_tot); 1775 1772 if (res.type == RTN_UNREACHABLE) { 1776 1773 rth->dst.input= ip_error; 1777 1774 rth->dst.error= -err;
+2
net/ipv6/addrconf.c
··· 2783 2783 ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0); 2784 2784 if (!ipv6_generate_eui64(addr.s6_addr + 8, dev)) 2785 2785 addrconf_add_linklocal(idev, &addr); 2786 + else 2787 + addrconf_prefix_route(&addr, 64, dev, 0, 0); 2786 2788 } 2787 2789 #endif 2788 2790
+15 -2
net/ipv6/ip6_output.c
··· 342 342 return mtu; 343 343 } 344 344 345 + static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu) 346 + { 347 + if (skb->len <= mtu || skb->local_df) 348 + return false; 349 + 350 + if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu) 351 + return true; 352 + 353 + if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu) 354 + return false; 355 + 356 + return true; 357 + } 358 + 345 359 int ip6_forward(struct sk_buff *skb) 346 360 { 347 361 struct dst_entry *dst = skb_dst(skb); ··· 480 466 if (mtu < IPV6_MIN_MTU) 481 467 mtu = IPV6_MIN_MTU; 482 468 483 - if ((!skb->local_df && skb->len > mtu && !skb_is_gso(skb)) || 484 - (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)) { 469 + if (ip6_pkt_too_big(skb, mtu)) { 485 470 /* Again, force OUTPUT device used as source address */ 486 471 skb->dev = dst->dev; 487 472 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+4 -2
net/mac80211/iface.c
··· 1057 1057 1058 1058 static u16 ieee80211_netdev_select_queue(struct net_device *dev, 1059 1059 struct sk_buff *skb, 1060 - void *accel_priv) 1060 + void *accel_priv, 1061 + select_queue_fallback_t fallback) 1061 1062 { 1062 1063 return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); 1063 1064 } ··· 1076 1075 1077 1076 static u16 ieee80211_monitor_select_queue(struct net_device *dev, 1078 1077 struct sk_buff *skb, 1079 - void *accel_priv) 1078 + void *accel_priv, 1079 + select_queue_fallback_t fallback) 1080 1080 { 1081 1081 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1082 1082 struct ieee80211_local *local = sdata->local;
+22 -4
net/packet/af_packet.c
··· 308 308 return po->xmit == packet_direct_xmit; 309 309 } 310 310 311 - static u16 packet_pick_tx_queue(struct net_device *dev) 311 + static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) 312 312 { 313 313 return (u16) raw_smp_processor_id() % dev->real_num_tx_queues; 314 + } 315 + 316 + static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) 317 + { 318 + const struct net_device_ops *ops = dev->netdev_ops; 319 + u16 queue_index; 320 + 321 + if (ops->ndo_select_queue) { 322 + queue_index = ops->ndo_select_queue(dev, skb, NULL, 323 + __packet_pick_tx_queue); 324 + queue_index = netdev_cap_txqueue(dev, queue_index); 325 + } else { 326 + queue_index = __packet_pick_tx_queue(dev, skb); 327 + } 328 + 329 + skb_set_queue_mapping(skb, queue_index); 314 330 } 315 331 316 332 /* register_prot_hook must be invoked with the po->bind_lock held, ··· 2301 2285 } 2302 2286 } 2303 2287 2304 - skb_set_queue_mapping(skb, packet_pick_tx_queue(dev)); 2288 + packet_pick_tx_queue(dev, skb); 2289 + 2305 2290 skb->destructor = tpacket_destruct_skb; 2306 2291 __packet_set_status(po, ph, TP_STATUS_SENDING); 2307 2292 packet_inc_pending(&po->tx_ring); ··· 2516 2499 skb->dev = dev; 2517 2500 skb->priority = sk->sk_priority; 2518 2501 skb->mark = sk->sk_mark; 2519 - skb_set_queue_mapping(skb, packet_pick_tx_queue(dev)); 2502 + 2503 + packet_pick_tx_queue(dev, skb); 2520 2504 2521 2505 if (po->has_vnet_hdr) { 2522 2506 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { ··· 3804 3786 */ 3805 3787 if (!tx_ring) 3806 3788 init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring); 3807 - break; 3789 + break; 3808 3790 default: 3809 3791 break; 3810 3792 }
+16 -5
net/sched/sch_pie.c
··· 15 15 * 16 16 * ECN support is added by Naeem Khademi <naeemk@ifi.uio.no> 17 17 * University of Oslo, Norway. 18 + * 19 + * References: 20 + * IETF draft submission: http://tools.ietf.org/html/draft-pan-aqm-pie-00 21 + * IEEE Conference on High Performance Switching and Routing 2013 : 22 + * "PIE: A * Lightweight Control Scheme to Address the Bufferbloat Problem" 18 23 */ 19 24 20 25 #include <linux/module.h> ··· 41 36 psched_time_t target; /* user specified target delay in pschedtime */ 42 37 u32 tupdate; /* timer frequency (in jiffies) */ 43 38 u32 limit; /* number of packets that can be enqueued */ 44 - u32 alpha; /* alpha and beta are between -4 and 4 */ 39 + u32 alpha; /* alpha and beta are between 0 and 32 */ 45 40 u32 beta; /* and are used for shift relative to 1 */ 46 41 bool ecn; /* true if ecn is enabled */ 47 42 bool bytemode; /* to scale drop early prob based on pkt size */ ··· 331 326 if (qdelay == 0 && qlen != 0) 332 327 update_prob = false; 333 328 334 - /* Add ranges for alpha and beta, more aggressive for high dropping 335 - * mode and gentle steps for light dropping mode 336 - * In light dropping mode, take gentle steps; in medium dropping mode, 337 - * take medium steps; in high dropping mode, take big steps. 329 + /* In the algorithm, alpha and beta are between 0 and 2 with typical 330 + * value for alpha as 0.125. In this implementation, we use values 0-32 331 + * passed from user space to represent this. Also, alpha and beta have 332 + * unit of HZ and need to be scaled before they can used to update 333 + * probability. alpha/beta are updated locally below by 1) scaling them 334 + * appropriately 2) scaling down by 16 to come to 0-2 range. 335 + * Please see paper for details. 336 + * 337 + * We scale alpha and beta differently depending on whether we are in 338 + * light, medium or high dropping mode. 338 339 */ 339 340 if (q->vars.prob < MAX_PROB / 100) { 340 341 alpha =
+17 -65
net/sctp/associola.c
··· 1367 1367 return false; 1368 1368 } 1369 1369 1370 - /* Increase asoc's rwnd by len and send any window update SACK if needed. */ 1371 - void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len) 1370 + /* Update asoc's rwnd for the approximated state in the buffer, 1371 + * and check whether SACK needs to be sent. 1372 + */ 1373 + void sctp_assoc_rwnd_update(struct sctp_association *asoc, bool update_peer) 1372 1374 { 1375 + int rx_count; 1373 1376 struct sctp_chunk *sack; 1374 1377 struct timer_list *timer; 1375 1378 1376 - if (asoc->rwnd_over) { 1377 - if (asoc->rwnd_over >= len) { 1378 - asoc->rwnd_over -= len; 1379 - } else { 1380 - asoc->rwnd += (len - asoc->rwnd_over); 1381 - asoc->rwnd_over = 0; 1382 - } 1383 - } else { 1384 - asoc->rwnd += len; 1385 - } 1379 + if (asoc->ep->rcvbuf_policy) 1380 + rx_count = atomic_read(&asoc->rmem_alloc); 1381 + else 1382 + rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); 1386 1383 1387 - /* If we had window pressure, start recovering it 1388 - * once our rwnd had reached the accumulated pressure 1389 - * threshold. The idea is to recover slowly, but up 1390 - * to the initial advertised window. 1391 - */ 1392 - if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) { 1393 - int change = min(asoc->pathmtu, asoc->rwnd_press); 1394 - asoc->rwnd += change; 1395 - asoc->rwnd_press -= change; 1396 - } 1384 + if ((asoc->base.sk->sk_rcvbuf - rx_count) > 0) 1385 + asoc->rwnd = (asoc->base.sk->sk_rcvbuf - rx_count) >> 1; 1386 + else 1387 + asoc->rwnd = 0; 1397 1388 1398 - pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n", 1399 - __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, 1400 - asoc->a_rwnd); 1389 + pr_debug("%s: asoc:%p rwnd=%u, rx_count=%d, sk_rcvbuf=%d\n", 1390 + __func__, asoc, asoc->rwnd, rx_count, 1391 + asoc->base.sk->sk_rcvbuf); 1401 1392 1402 1393 /* Send a window update SACK if the rwnd has increased by at least the 1403 1394 * minimum of the association's PMTU and half of the receive buffer. 1404 1395 * The algorithm used is similar to the one described in 1405 1396 * Section 4.2.3.3 of RFC 1122. 1406 1397 */ 1407 - if (sctp_peer_needs_update(asoc)) { 1398 + if (update_peer && sctp_peer_needs_update(asoc)) { 1408 1399 asoc->a_rwnd = asoc->rwnd; 1409 1400 1410 1401 pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u " ··· 1417 1426 } 1418 1427 } 1419 1428 1420 - /* Decrease asoc's rwnd by len. */ 1421 - void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len) 1422 - { 1423 - int rx_count; 1424 - int over = 0; 1425 - 1426 - if (unlikely(!asoc->rwnd || asoc->rwnd_over)) 1427 - pr_debug("%s: association:%p has asoc->rwnd:%u, " 1428 - "asoc->rwnd_over:%u!\n", __func__, asoc, 1429 - asoc->rwnd, asoc->rwnd_over); 1430 - 1431 - if (asoc->ep->rcvbuf_policy) 1432 - rx_count = atomic_read(&asoc->rmem_alloc); 1433 - else 1434 - rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); 1435 - 1436 - /* If we've reached or overflowed our receive buffer, announce 1437 - * a 0 rwnd if rwnd would still be positive. Store the 1438 - * the potential pressure overflow so that the window can be restored 1439 - * back to original value. 1440 - */ 1441 - if (rx_count >= asoc->base.sk->sk_rcvbuf) 1442 - over = 1; 1443 - 1444 - if (asoc->rwnd >= len) { 1445 - asoc->rwnd -= len; 1446 - if (over) { 1447 - asoc->rwnd_press += asoc->rwnd; 1448 - asoc->rwnd = 0; 1449 - } 1450 - } else { 1451 - asoc->rwnd_over = len - asoc->rwnd; 1452 - asoc->rwnd = 0; 1453 - } 1454 - 1455 - pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n", 1456 - __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, 1457 - asoc->rwnd_press); 1458 - } 1459 1429 1460 1430 /* Build the bind address list for the association based on info from the 1461 1431 * local endpoint and the remote peer.
+1 -1
net/sctp/sm_statefuns.c
··· 6176 6176 * PMTU. In cases, such as loopback, this might be a rather 6177 6177 * large spill over. 6178 6178 */ 6179 - if ((!chunk->data_accepted) && (!asoc->rwnd || asoc->rwnd_over || 6179 + if ((!chunk->data_accepted) && (!asoc->rwnd || 6180 6180 (datalen > asoc->rwnd + asoc->frag_point))) { 6181 6181 6182 6182 /* If this is the next TSN, consider reneging to make
+32 -15
net/sctp/socket.c
··· 64 64 #include <linux/crypto.h> 65 65 #include <linux/slab.h> 66 66 #include <linux/file.h> 67 + #include <linux/compat.h> 67 68 68 69 #include <net/ip.h> 69 70 #include <net/icmp.h> ··· 1369 1368 /* 1370 1369 * New (hopefully final) interface for the API. 1371 1370 * We use the sctp_getaddrs_old structure so that use-space library 1372 - * can avoid any unnecessary allocations. The only defferent part 1371 + * can avoid any unnecessary allocations. The only different part 1373 1372 * is that we store the actual length of the address buffer into the 1374 - * addrs_num structure member. That way we can re-use the existing 1373 + * addrs_num structure member. That way we can re-use the existing 1375 1374 * code. 1376 1375 */ 1376 + #ifdef CONFIG_COMPAT 1377 + struct compat_sctp_getaddrs_old { 1378 + sctp_assoc_t assoc_id; 1379 + s32 addr_num; 1380 + compat_uptr_t addrs; /* struct sockaddr * */ 1381 + }; 1382 + #endif 1383 + 1377 1384 static int sctp_getsockopt_connectx3(struct sock *sk, int len, 1378 1385 char __user *optval, 1379 1386 int __user *optlen) ··· 1390 1381 sctp_assoc_t assoc_id = 0; 1391 1382 int err = 0; 1392 1383 1393 - if (len < sizeof(param)) 1394 - return -EINVAL; 1384 + #ifdef CONFIG_COMPAT 1385 + if (is_compat_task()) { 1386 + struct compat_sctp_getaddrs_old param32; 1395 1387 1396 - if (copy_from_user(&param, optval, sizeof(param))) 1397 - return -EFAULT; 1388 + if (len < sizeof(param32)) 1389 + return -EINVAL; 1390 + if (copy_from_user(&param32, optval, sizeof(param32))) 1391 + return -EFAULT; 1398 1392 1399 - err = __sctp_setsockopt_connectx(sk, 1400 - (struct sockaddr __user *)param.addrs, 1401 - param.addr_num, &assoc_id); 1393 + param.assoc_id = param32.assoc_id; 1394 + param.addr_num = param32.addr_num; 1395 + param.addrs = compat_ptr(param32.addrs); 1396 + } else 1397 + #endif 1398 + { 1399 + if (len < sizeof(param)) 1400 + return -EINVAL; 1401 + if (copy_from_user(&param, optval, sizeof(param))) 1402 + return -EFAULT; 1403 + } 1402 1404 1405 + err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *) 1406 + param.addrs, param.addr_num, 1407 + &assoc_id); 1403 1408 if (err == 0 || err == -EINPROGRESS) { 1404 1409 if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) 1405 1410 return -EFAULT; ··· 2115 2092 sctp_skb_pull(skb, copied); 2116 2093 skb_queue_head(&sk->sk_receive_queue, skb); 2117 2094 2118 - /* When only partial message is copied to the user, increase 2119 - * rwnd by that amount. If all the data in the skb is read, 2120 - * rwnd is updated when the event is freed. 2121 - */ 2122 - if (!sctp_ulpevent_is_notification(event)) 2123 - sctp_assoc_rwnd_increase(event->asoc, copied); 2124 2095 goto out; 2125 2096 } else if ((event->msg_flags & MSG_NOTIFICATION) || 2126 2097 (event->msg_flags & MSG_EOR))
+11 -7
net/sctp/sysctl.c
··· 151 151 }, 152 152 { 153 153 .procname = "cookie_hmac_alg", 154 + .data = &init_net.sctp.sctp_hmac_alg, 154 155 .maxlen = 8, 155 156 .mode = 0644, 156 157 .proc_handler = proc_sctp_do_hmac_alg, ··· 402 401 403 402 int sctp_sysctl_net_register(struct net *net) 404 403 { 405 - struct ctl_table *table; 406 - int i; 404 + struct ctl_table *table = sctp_net_table; 407 405 408 - table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL); 409 - if (!table) 410 - return -ENOMEM; 406 + if (!net_eq(net, &init_net)) { 407 + int i; 411 408 412 - for (i = 0; table[i].data; i++) 413 - table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp; 409 + table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL); 410 + if (!table) 411 + return -ENOMEM; 412 + 413 + for (i = 0; table[i].data; i++) 414 + table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp; 415 + } 414 416 415 417 net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table); 416 418 return 0;
+6 -2
net/sctp/ulpevent.c
··· 989 989 skb = sctp_event2skb(event); 990 990 /* Set the owner and charge rwnd for bytes received. */ 991 991 sctp_ulpevent_set_owner(event, asoc); 992 - sctp_assoc_rwnd_decrease(asoc, skb_headlen(skb)); 992 + sctp_assoc_rwnd_update(asoc, false); 993 993 994 994 if (!skb->data_len) 995 995 return; ··· 1011 1011 { 1012 1012 struct sk_buff *skb, *frag; 1013 1013 unsigned int len; 1014 + struct sctp_association *asoc; 1014 1015 1015 1016 /* Current stack structures assume that the rcv buffer is 1016 1017 * per socket. For UDP style sockets this is not true as ··· 1036 1035 } 1037 1036 1038 1037 done: 1039 - sctp_assoc_rwnd_increase(event->asoc, len); 1038 + asoc = event->asoc; 1039 + sctp_association_hold(asoc); 1040 1040 sctp_ulpevent_release_owner(event); 1041 + sctp_assoc_rwnd_update(asoc, true); 1042 + sctp_association_put(asoc); 1041 1043 } 1042 1044 1043 1045 static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event)
+16 -3
net/sunrpc/auth_gss/auth_gss.c
··· 108 108 static DEFINE_SPINLOCK(pipe_version_lock); 109 109 static struct rpc_wait_queue pipe_version_rpc_waitqueue; 110 110 static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue); 111 + static void gss_put_auth(struct gss_auth *gss_auth); 111 112 112 113 static void gss_free_ctx(struct gss_cl_ctx *); 113 114 static const struct rpc_pipe_ops gss_upcall_ops_v0; ··· 321 320 if (gss_msg->ctx != NULL) 322 321 gss_put_ctx(gss_msg->ctx); 323 322 rpc_destroy_wait_queue(&gss_msg->rpc_waitqueue); 323 + gss_put_auth(gss_msg->auth); 324 324 kfree(gss_msg); 325 325 } 326 326 ··· 500 498 default: 501 499 err = gss_encode_v1_msg(gss_msg, service_name, gss_auth->target_name); 502 500 if (err) 503 - goto err_free_msg; 501 + goto err_put_pipe_version; 504 502 }; 503 + kref_get(&gss_auth->kref); 505 504 return gss_msg; 505 + err_put_pipe_version: 506 + put_pipe_version(gss_auth->net); 506 507 err_free_msg: 507 508 kfree(gss_msg); 508 509 err: ··· 996 991 gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor); 997 992 if (gss_auth->service == 0) 998 993 goto err_put_mech; 994 + if (!gssd_running(gss_auth->net)) 995 + goto err_put_mech; 999 996 auth = &gss_auth->rpc_auth; 1000 997 auth->au_cslack = GSS_CRED_SLACK >> 2; 1001 998 auth->au_rslack = GSS_VERF_SLACK >> 2; ··· 1069 1062 } 1070 1063 1071 1064 static void 1065 + gss_put_auth(struct gss_auth *gss_auth) 1066 + { 1067 + kref_put(&gss_auth->kref, gss_free_callback); 1068 + } 1069 + 1070 + static void 1072 1071 gss_destroy(struct rpc_auth *auth) 1073 1072 { 1074 1073 struct gss_auth *gss_auth = container_of(auth, ··· 1095 1082 gss_auth->gss_pipe[1] = NULL; 1096 1083 rpcauth_destroy_credcache(auth); 1097 1084 1098 - kref_put(&gss_auth->kref, gss_free_callback); 1085 + gss_put_auth(gss_auth); 1099 1086 } 1100 1087 1101 1088 /* ··· 1266 1253 call_rcu(&cred->cr_rcu, gss_free_cred_callback); 1267 1254 if (ctx) 1268 1255 gss_put_ctx(ctx); 1269 - kref_put(&gss_auth->kref, gss_free_callback); 1256 + gss_put_auth(gss_auth); 1270 1257 } 1271 1258 1272 1259 static void
+4 -2
net/sunrpc/backchannel_rqst.c
··· 64 64 free_page((unsigned long)xbufp->head[0].iov_base); 65 65 xbufp = &req->rq_snd_buf; 66 66 free_page((unsigned long)xbufp->head[0].iov_base); 67 - list_del(&req->rq_bc_pa_list); 68 67 kfree(req); 69 68 } 70 69 ··· 167 168 /* 168 169 * Memory allocation failed, free the temporary list 169 170 */ 170 - list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list) 171 + list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list) { 172 + list_del(&req->rq_bc_pa_list); 171 173 xprt_free_allocation(req); 174 + } 172 175 173 176 dprintk("RPC: setup backchannel transport failed\n"); 174 177 return -ENOMEM; ··· 199 198 xprt_dec_alloc_count(xprt, max_reqs); 200 199 list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) { 201 200 dprintk("RPC: req=%p\n", req); 201 + list_del(&req->rq_bc_pa_list); 202 202 xprt_free_allocation(req); 203 203 if (--max_reqs == 0) 204 204 break;
+5 -1
net/sunrpc/xprtsock.c
··· 510 510 struct rpc_rqst *req = task->tk_rqstp; 511 511 struct rpc_xprt *xprt = req->rq_xprt; 512 512 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 513 + struct sock *sk = transport->inet; 513 514 int ret = -EAGAIN; 514 515 515 516 dprintk("RPC: %5u xmit incomplete (%u left of %u)\n", ··· 528 527 * window size 529 528 */ 530 529 set_bit(SOCK_NOSPACE, &transport->sock->flags); 531 - transport->inet->sk_write_pending++; 530 + sk->sk_write_pending++; 532 531 /* ...and wait for more buffer space */ 533 532 xprt_wait_for_buffer_space(task, xs_nospace_callback); 534 533 } ··· 538 537 } 539 538 540 539 spin_unlock_bh(&xprt->transport_lock); 540 + 541 + /* Race breaker in case memory is freed before above code is called */ 542 + sk->sk_write_space(sk); 541 543 return ret; 542 544 } 543 545
+1
net/tipc/core.h
··· 192 192 193 193 struct tipc_skb_cb { 194 194 void *handle; 195 + bool deferred; 195 196 }; 196 197 197 198 #define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
+7
net/tipc/link.c
··· 1391 1391 u32 hdr_size; 1392 1392 u32 min_hdr_size; 1393 1393 1394 + /* If this packet comes from the defer queue, the skb has already 1395 + * been validated 1396 + */ 1397 + if (unlikely(TIPC_SKB_CB(buf)->deferred)) 1398 + return 1; 1399 + 1394 1400 if (unlikely(buf->len < MIN_H_SIZE)) 1395 1401 return 0; 1396 1402 ··· 1709 1703 &l_ptr->newest_deferred_in, buf)) { 1710 1704 l_ptr->deferred_inqueue_sz++; 1711 1705 l_ptr->stats.deferred_recv++; 1706 + TIPC_SKB_CB(buf)->deferred = true; 1712 1707 if ((l_ptr->deferred_inqueue_sz % 16) == 1) 1713 1708 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 1714 1709 } else
+1
scripts/Makefile.lib
··· 152 152 dtc_cpp_flags = -Wp,-MD,$(depfile).pre.tmp -nostdinc \ 153 153 -I$(srctree)/arch/$(SRCARCH)/boot/dts \ 154 154 -I$(srctree)/arch/$(SRCARCH)/boot/dts/include \ 155 + -I$(srctree)/drivers/of/testcase-data \ 155 156 -undef -D__DTS__ 156 157 157 158 # Finds the multi-part object the current object will be linked into
+7 -61
sound/pci/hda/patch_ca0132.c
··· 2662 2662 } 2663 2663 2664 2664 /* 2665 - * PCM stuffs 2666 - */ 2667 - static void ca0132_setup_stream(struct hda_codec *codec, hda_nid_t nid, 2668 - u32 stream_tag, 2669 - int channel_id, int format) 2670 - { 2671 - unsigned int oldval, newval; 2672 - 2673 - if (!nid) 2674 - return; 2675 - 2676 - snd_printdd( 2677 - "ca0132_setup_stream: NID=0x%x, stream=0x%x, " 2678 - "channel=%d, format=0x%x\n", 2679 - nid, stream_tag, channel_id, format); 2680 - 2681 - /* update the format-id if changed */ 2682 - oldval = snd_hda_codec_read(codec, nid, 0, 2683 - AC_VERB_GET_STREAM_FORMAT, 2684 - 0); 2685 - if (oldval != format) { 2686 - msleep(20); 2687 - snd_hda_codec_write(codec, nid, 0, 2688 - AC_VERB_SET_STREAM_FORMAT, 2689 - format); 2690 - } 2691 - 2692 - oldval = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0); 2693 - newval = (stream_tag << 4) | channel_id; 2694 - if (oldval != newval) { 2695 - snd_hda_codec_write(codec, nid, 0, 2696 - AC_VERB_SET_CHANNEL_STREAMID, 2697 - newval); 2698 - } 2699 - } 2700 - 2701 - static void ca0132_cleanup_stream(struct hda_codec *codec, hda_nid_t nid) 2702 - { 2703 - unsigned int val; 2704 - 2705 - if (!nid) 2706 - return; 2707 - 2708 - snd_printdd(KERN_INFO "ca0132_cleanup_stream: NID=0x%x\n", nid); 2709 - 2710 - val = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0); 2711 - if (!val) 2712 - return; 2713 - 2714 - snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_STREAM_FORMAT, 0); 2715 - snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CHANNEL_STREAMID, 0); 2716 - } 2717 - 2718 - /* 2719 2665 * PCM callbacks 2720 2666 */ 2721 2667 static int ca0132_playback_pcm_prepare(struct hda_pcm_stream *hinfo, ··· 2672 2726 { 2673 2727 struct ca0132_spec *spec = codec->spec; 2674 2728 2675 - ca0132_setup_stream(codec, spec->dacs[0], stream_tag, 0, format); 2729 + snd_hda_codec_setup_stream(codec, spec->dacs[0], stream_tag, 0, format); 2676 2730 2677 2731 return 0; 2678 2732 } ··· 2691 2745 if (spec->effects_switch[PLAY_ENHANCEMENT - EFFECT_START_NID]) 2692 2746 msleep(50); 2693 2747 2694 - ca0132_cleanup_stream(codec, spec->dacs[0]); 2748 + snd_hda_codec_cleanup_stream(codec, spec->dacs[0]); 2695 2749 2696 2750 return 0; 2697 2751 } ··· 2768 2822 unsigned int format, 2769 2823 struct snd_pcm_substream *substream) 2770 2824 { 2771 - struct ca0132_spec *spec = codec->spec; 2772 - 2773 - ca0132_setup_stream(codec, spec->adcs[substream->number], 2774 - stream_tag, 0, format); 2825 + snd_hda_codec_setup_stream(codec, hinfo->nid, 2826 + stream_tag, 0, format); 2775 2827 2776 2828 return 0; 2777 2829 } ··· 2783 2839 if (spec->dsp_state == DSP_DOWNLOADING) 2784 2840 return 0; 2785 2841 2786 - ca0132_cleanup_stream(codec, hinfo->nid); 2842 + snd_hda_codec_cleanup_stream(codec, hinfo->nid); 2787 2843 return 0; 2788 2844 } 2789 2845 ··· 4686 4742 return err; 4687 4743 4688 4744 codec->patch_ops = ca0132_patch_ops; 4745 + codec->pcm_format_first = 1; 4746 + codec->no_sticky_stream = 1; 4689 4747 4690 4748 return 0; 4691 4749 }
+2
sound/pci/hda/patch_realtek.c
··· 4308 4308 SND_PCI_QUIRK(0x1028, 0x0651, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4309 4309 SND_PCI_QUIRK(0x1028, 0x0652, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4310 4310 SND_PCI_QUIRK(0x1028, 0x0653, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4311 + SND_PCI_QUIRK(0x1028, 0x0657, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4311 4312 SND_PCI_QUIRK(0x1028, 0x0658, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 4313 + SND_PCI_QUIRK(0x1028, 0x065f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4312 4314 SND_PCI_QUIRK(0x1028, 0x0662, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4313 4315 SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), 4314 4316 SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
+13
sound/pci/hda/patch_sigmatel.c
··· 83 83 STAC_DELL_M6_BOTH, 84 84 STAC_DELL_EQ, 85 85 STAC_ALIENWARE_M17X, 86 + STAC_92HD89XX_HP_FRONT_JACK, 86 87 STAC_92HD73XX_MODELS 87 88 }; 88 89 ··· 1796 1795 {} 1797 1796 }; 1798 1797 1798 + static const struct hda_pintbl stac92hd89xx_hp_front_jack_pin_configs[] = { 1799 + { 0x0a, 0x02214030 }, 1800 + { 0x0b, 0x02A19010 }, 1801 + {} 1802 + }; 1803 + 1799 1804 static void stac92hd73xx_fixup_ref(struct hda_codec *codec, 1800 1805 const struct hda_fixup *fix, int action) 1801 1806 { ··· 1920 1913 [STAC_92HD73XX_NO_JD] = { 1921 1914 .type = HDA_FIXUP_FUNC, 1922 1915 .v.func = stac92hd73xx_fixup_no_jd, 1916 + }, 1917 + [STAC_92HD89XX_HP_FRONT_JACK] = { 1918 + .type = HDA_FIXUP_PINS, 1919 + .v.pins = stac92hd89xx_hp_front_jack_pin_configs, 1923 1920 } 1924 1921 }; 1925 1922 ··· 1984 1973 "Alienware M17x", STAC_ALIENWARE_M17X), 1985 1974 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490, 1986 1975 "Alienware M17x R3", STAC_DELL_EQ), 1976 + SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17, 1977 + "unknown HP", STAC_92HD89XX_HP_FRONT_JACK), 1987 1978 {} /* terminator */ 1988 1979 }; 1989 1980
+5 -6
sound/soc/blackfin/Kconfig
··· 11 11 12 12 config SND_BF5XX_SOC_SSM2602 13 13 tristate "SoC SSM2602 Audio Codec Add-On Card support" 14 - depends on SND_BF5XX_I2S && (SPI_MASTER || I2C) 14 + depends on SND_BF5XX_I2S && SND_SOC_I2C_AND_SPI 15 15 select SND_BF5XX_SOC_I2S if !BF60x 16 16 select SND_BF6XX_SOC_I2S if BF60x 17 17 select SND_SOC_SSM2602 ··· 21 21 22 22 config SND_SOC_BFIN_EVAL_ADAU1701 23 23 tristate "Support for the EVAL-ADAU1701MINIZ board on Blackfin eval boards" 24 - depends on SND_BF5XX_I2S 24 + depends on SND_BF5XX_I2S && I2C 25 25 select SND_BF5XX_SOC_I2S 26 26 select SND_SOC_ADAU1701 27 - select I2C 28 27 help 29 28 Say Y if you want to add support for the Analog Devices EVAL-ADAU1701MINIZ 30 29 board connected to one of the Blackfin evaluation boards like the ··· 44 45 45 46 config SND_SOC_BFIN_EVAL_ADAV80X 46 47 tristate "Support for the EVAL-ADAV80X boards on Blackfin eval boards" 47 - depends on SND_BF5XX_I2S && (SPI_MASTER || I2C) 48 + depends on SND_BF5XX_I2S && SND_SOC_I2C_AND_SPI 48 49 select SND_BF5XX_SOC_I2S 49 50 select SND_SOC_ADAV80X 50 51 help ··· 57 58 58 59 config SND_BF5XX_SOC_AD1836 59 60 tristate "SoC AD1836 Audio support for BF5xx" 60 - depends on SND_BF5XX_I2S 61 + depends on SND_BF5XX_I2S && SPI_MASTER 61 62 select SND_BF5XX_SOC_I2S 62 63 select SND_SOC_AD1836 63 64 help ··· 65 66 66 67 config SND_BF5XX_SOC_AD193X 67 68 tristate "SoC AD193X Audio support for Blackfin" 68 - depends on SND_BF5XX_I2S 69 + depends on SND_BF5XX_I2S && SND_SOC_I2C_AND_SPI 69 70 select SND_BF5XX_SOC_I2S 70 71 select SND_SOC_AD193X 71 72 help
+9 -2
sound/soc/codecs/da9055.c
··· 1523 1523 return 0; 1524 1524 } 1525 1525 1526 + /* 1527 + * DO NOT change the device Ids. The naming is intentionally specific as both 1528 + * the CODEC and PMIC parts of this chip are instantiated separately as I2C 1529 + * devices (both have configurable I2C addresses, and are to all intents and 1530 + * purposes separate). As a result there are specific DA9055 Ids for CODEC 1531 + * and PMIC, which must be different to operate together. 1532 + */ 1526 1533 static const struct i2c_device_id da9055_i2c_id[] = { 1527 - { "da9055", 0 }, 1534 + { "da9055-codec", 0 }, 1528 1535 { } 1529 1536 }; 1530 1537 MODULE_DEVICE_TABLE(i2c, da9055_i2c_id); ··· 1539 1532 /* I2C codec control layer */ 1540 1533 static struct i2c_driver da9055_i2c_driver = { 1541 1534 .driver = { 1542 - .name = "da9055", 1535 + .name = "da9055-codec", 1543 1536 .owner = THIS_MODULE, 1544 1537 }, 1545 1538 .probe = da9055_i2c_probe,
+11 -10
sound/soc/codecs/max98090.c
··· 336 336 case M98090_REG_RECORD_TDM_SLOT: 337 337 case M98090_REG_SAMPLE_RATE: 338 338 case M98090_REG_DMIC34_BIQUAD_BASE ... M98090_REG_DMIC34_BIQUAD_BASE + 0x0E: 339 + case M98090_REG_REVISION_ID: 339 340 return true; 340 341 default: 341 342 return false; ··· 1770 1769 1771 1770 switch (level) { 1772 1771 case SND_SOC_BIAS_ON: 1773 - if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { 1774 - ret = regcache_sync(max98090->regmap); 1775 - 1776 - if (ret != 0) { 1777 - dev_err(codec->dev, 1778 - "Failed to sync cache: %d\n", ret); 1779 - return ret; 1780 - } 1781 - } 1782 - 1783 1772 if (max98090->jack_state == M98090_JACK_STATE_HEADSET) { 1784 1773 /* 1785 1774 * Set to normal bias level. ··· 1783 1792 break; 1784 1793 1785 1794 case SND_SOC_BIAS_STANDBY: 1795 + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { 1796 + ret = regcache_sync(max98090->regmap); 1797 + if (ret != 0) { 1798 + dev_err(codec->dev, 1799 + "Failed to sync cache: %d\n", ret); 1800 + return ret; 1801 + } 1802 + } 1803 + break; 1804 + 1786 1805 case SND_SOC_BIAS_OFF: 1787 1806 /* Set internal pull-up to lowest power mode */ 1788 1807 snd_soc_update_bits(codec, M98090_REG_JACK_DETECT,
+1
sound/soc/codecs/rt5640.c
··· 2093 2093 #ifdef CONFIG_ACPI 2094 2094 static struct acpi_device_id rt5640_acpi_match[] = { 2095 2095 { "INT33CA", 0 }, 2096 + { "10EC5640", 0 }, 2096 2097 { }, 2097 2098 }; 2098 2099 MODULE_DEVICE_TABLE(acpi, rt5640_acpi_match);
-1
sound/soc/codecs/wm8993.c
··· 1562 1562 struct wm8993_priv *wm8993 = snd_soc_codec_get_drvdata(codec); 1563 1563 1564 1564 wm8993_set_bias_level(codec, SND_SOC_BIAS_OFF); 1565 - regulator_bulk_free(ARRAY_SIZE(wm8993->supplies), wm8993->supplies); 1566 1565 return 0; 1567 1566 } 1568 1567
+1
sound/soc/davinci/davinci-evm.c
··· 399 399 .driver = { 400 400 .name = "davinci_evm", 401 401 .owner = THIS_MODULE, 402 + .pm = &snd_soc_pm_ops, 402 403 .of_match_table = of_match_ptr(davinci_evm_dt_ids), 403 404 }, 404 405 };
+36 -43
sound/soc/davinci/davinci-mcasp.c
··· 263 263 unsigned int fmt) 264 264 { 265 265 struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(cpu_dai); 266 + int ret = 0; 266 267 268 + pm_runtime_get_sync(mcasp->dev); 267 269 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { 268 270 case SND_SOC_DAIFMT_DSP_B: 269 271 case SND_SOC_DAIFMT_AC97: ··· 319 317 break; 320 318 321 319 default: 322 - return -EINVAL; 320 + ret = -EINVAL; 321 + goto out; 323 322 } 324 323 325 324 switch (fmt & SND_SOC_DAIFMT_INV_MASK) { ··· 357 354 break; 358 355 359 356 default: 360 - return -EINVAL; 357 + ret = -EINVAL; 358 + break; 361 359 } 362 - 363 - return 0; 360 + out: 361 + pm_runtime_put_sync(mcasp->dev); 362 + return ret; 364 363 } 365 364 366 365 static int davinci_mcasp_set_clkdiv(struct snd_soc_dai *dai, int div_id, int div) ··· 453 448 return 0; 454 449 } 455 450 456 - static int davinci_hw_common_param(struct davinci_mcasp *mcasp, int stream, 451 + static int mcasp_common_hw_param(struct davinci_mcasp *mcasp, int stream, 457 452 int channels) 458 453 { 459 454 int i; ··· 529 524 return 0; 530 525 } 531 526 532 - static void davinci_hw_param(struct davinci_mcasp *mcasp, int stream) 527 + static int mcasp_i2s_hw_param(struct davinci_mcasp *mcasp, int stream) 533 528 { 534 529 int i, active_slots; 535 530 u32 mask = 0; 536 531 u32 busel = 0; 532 + 533 + if ((mcasp->tdm_slots < 2) || (mcasp->tdm_slots > 32)) { 534 + dev_err(mcasp->dev, "tdm slot %d not supported\n", 535 + mcasp->tdm_slots); 536 + return -EINVAL; 537 + } 537 538 538 539 active_slots = (mcasp->tdm_slots > 31) ? 32 : mcasp->tdm_slots; 539 540 for (i = 0; i < active_slots; i++) ··· 550 539 if (!mcasp->dat_port) 551 540 busel = TXSEL; 552 541 553 - if (stream == SNDRV_PCM_STREAM_PLAYBACK) { 554 - /* bit stream is MSB first with no delay */ 555 - /* DSP_B mode */ 556 - mcasp_set_reg(mcasp, DAVINCI_MCASP_TXTDM_REG, mask); 557 - mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, busel | TXORD); 542 + mcasp_set_reg(mcasp, DAVINCI_MCASP_TXTDM_REG, mask); 543 + mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, busel | TXORD); 544 + mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, 545 + FSXMOD(mcasp->tdm_slots), FSXMOD(0x1FF)); 558 546 559 - if ((mcasp->tdm_slots >= 2) && (mcasp->tdm_slots <= 32)) 560 - mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, 561 - FSXMOD(mcasp->tdm_slots), FSXMOD(0x1FF)); 562 - else 563 - printk(KERN_ERR "playback tdm slot %d not supported\n", 564 - mcasp->tdm_slots); 565 - } else { 566 - /* bit stream is MSB first with no delay */ 567 - /* DSP_B mode */ 568 - mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, busel | RXORD); 569 - mcasp_set_reg(mcasp, DAVINCI_MCASP_RXTDM_REG, mask); 547 + mcasp_set_reg(mcasp, DAVINCI_MCASP_RXTDM_REG, mask); 548 + mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, busel | RXORD); 549 + mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, 550 + FSRMOD(mcasp->tdm_slots), FSRMOD(0x1FF)); 570 551 571 - if ((mcasp->tdm_slots >= 2) && (mcasp->tdm_slots <= 32)) 572 - mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, 573 - FSRMOD(mcasp->tdm_slots), FSRMOD(0x1FF)); 574 - else 575 - printk(KERN_ERR "capture tdm slot %d not supported\n", 576 - mcasp->tdm_slots); 577 - } 552 + return 0; 578 553 } 579 554 580 555 /* S/PDIF */ 581 - static void davinci_hw_dit_param(struct davinci_mcasp *mcasp) 556 + static int mcasp_dit_hw_param(struct davinci_mcasp *mcasp) 582 557 { 583 558 /* Set the TX format : 24 bit right rotation, 32 bit slot, Pad 0 584 559 and LSB first */ ··· 586 589 587 590 /* Enable the DIT */ 588 591 mcasp_set_bits(mcasp, DAVINCI_MCASP_TXDITCTL_REG, DITEN); 592 + 593 + return 0; 589 594 } 590 595 591 596 static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream, ··· 604 605 u8 slots = mcasp->tdm_slots; 605 606 u8 active_serializers; 606 607 int channels; 608 + int ret; 607 609 struct snd_interval *pcm_channels = hw_param_interval(params, 608 610 SNDRV_PCM_HW_PARAM_CHANNELS); 609 611 channels = pcm_channels->min; 610 612 611 613 active_serializers = (channels + slots - 1) / slots; 612 614 613 - if (davinci_hw_common_param(mcasp, substream->stream, channels) == -EINVAL) 615 + if (mcasp_common_hw_param(mcasp, substream->stream, channels) == -EINVAL) 614 616 return -EINVAL; 615 617 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 616 618 fifo_level = mcasp->txnumevt * active_serializers; ··· 619 619 fifo_level = mcasp->rxnumevt * active_serializers; 620 620 621 621 if (mcasp->op_mode == DAVINCI_MCASP_DIT_MODE) 622 - davinci_hw_dit_param(mcasp); 622 + ret = mcasp_dit_hw_param(mcasp); 623 623 else 624 - davinci_hw_param(mcasp, substream->stream); 624 + ret = mcasp_i2s_hw_param(mcasp, substream->stream); 625 + 626 + if (ret) 627 + return ret; 625 628 626 629 switch (params_format(params)) { 627 630 case SNDRV_PCM_FORMAT_U8: ··· 681 678 case SNDRV_PCM_TRIGGER_RESUME: 682 679 case SNDRV_PCM_TRIGGER_START: 683 680 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 684 - ret = pm_runtime_get_sync(mcasp->dev); 685 - if (IS_ERR_VALUE(ret)) 686 - dev_err(mcasp->dev, "pm_runtime_get_sync() failed\n"); 687 681 davinci_mcasp_start(mcasp, substream->stream); 688 682 break; 689 - 690 683 case SNDRV_PCM_TRIGGER_SUSPEND: 691 - davinci_mcasp_stop(mcasp, substream->stream); 692 - ret = pm_runtime_put_sync(mcasp->dev); 693 - if (IS_ERR_VALUE(ret)) 694 - dev_err(mcasp->dev, "pm_runtime_put_sync() failed\n"); 695 - break; 696 - 697 684 case SNDRV_PCM_TRIGGER_STOP: 698 685 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 699 686 davinci_mcasp_stop(mcasp, substream->stream);
+2 -2
sound/soc/fsl/fsl_esai.c
··· 326 326 regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMA, 327 327 ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(tx_mask)); 328 328 regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMB, 329 - ESAI_xSMA_xS_MASK, ESAI_xSMB_xS(tx_mask)); 329 + ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(tx_mask)); 330 330 331 331 regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR, 332 332 ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots)); ··· 334 334 regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMA, 335 335 ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(rx_mask)); 336 336 regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMB, 337 - ESAI_xSMA_xS_MASK, ESAI_xSMB_xS(rx_mask)); 337 + ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(rx_mask)); 338 338 339 339 esai_priv->slot_width = slot_width; 340 340
+1 -1
sound/soc/fsl/fsl_esai.h
··· 322 322 #define ESAI_xSMB_xS_SHIFT 0 323 323 #define ESAI_xSMB_xS_WIDTH 16 324 324 #define ESAI_xSMB_xS_MASK (((1 << ESAI_xSMB_xS_WIDTH) - 1) << ESAI_xSMB_xS_SHIFT) 325 - #define ESAI_xSMB_xS(v) (((v) >> ESAI_xSMA_xS_WIDTH) & ESAI_xSMA_xS_MASK) 325 + #define ESAI_xSMB_xS(v) (((v) >> ESAI_xSMA_xS_WIDTH) & ESAI_xSMB_xS_MASK) 326 326 327 327 /* Port C Direction Register -- REG_ESAI_PRRC 0xF8 */ 328 328 #define ESAI_PRRC_PDC_SHIFT 0
-1
sound/soc/fsl/imx-mc13783.c
··· 160 160 .driver = { 161 161 .name = "imx_mc13783", 162 162 .owner = THIS_MODULE, 163 - .pm = &snd_soc_pm_ops, 164 163 }, 165 164 .probe = imx_mc13783_probe, 166 165 .remove = imx_mc13783_remove
+6 -4
sound/soc/fsl/imx-sgtl5000.c
··· 33 33 34 34 static int imx_sgtl5000_dai_init(struct snd_soc_pcm_runtime *rtd) 35 35 { 36 - struct imx_sgtl5000_data *data = container_of(rtd->card, 37 - struct imx_sgtl5000_data, card); 36 + struct imx_sgtl5000_data *data = snd_soc_card_get_drvdata(rtd->card); 38 37 struct device *dev = rtd->card->dev; 39 38 int ret; 40 39 ··· 158 159 data->card.dapm_widgets = imx_sgtl5000_dapm_widgets; 159 160 data->card.num_dapm_widgets = ARRAY_SIZE(imx_sgtl5000_dapm_widgets); 160 161 162 + platform_set_drvdata(pdev, &data->card); 163 + snd_soc_card_set_drvdata(&data->card, data); 164 + 161 165 ret = devm_snd_soc_register_card(&pdev->dev, &data->card); 162 166 if (ret) { 163 167 dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret); 164 168 goto fail; 165 169 } 166 170 167 - platform_set_drvdata(pdev, data); 168 171 of_node_put(ssi_np); 169 172 of_node_put(codec_np); 170 173 ··· 185 184 186 185 static int imx_sgtl5000_remove(struct platform_device *pdev) 187 186 { 188 - struct imx_sgtl5000_data *data = platform_get_drvdata(pdev); 187 + struct snd_soc_card *card = platform_get_drvdata(pdev); 188 + struct imx_sgtl5000_data *data = snd_soc_card_get_drvdata(card); 189 189 190 190 clk_put(data->codec_clk); 191 191
+7 -4
sound/soc/fsl/imx-wm8962.c
··· 71 71 { 72 72 struct snd_soc_dai *codec_dai = card->rtd[0].codec_dai; 73 73 struct imx_priv *priv = &card_priv; 74 - struct imx_wm8962_data *data = platform_get_drvdata(priv->pdev); 74 + struct imx_wm8962_data *data = snd_soc_card_get_drvdata(card); 75 75 struct device *dev = &priv->pdev->dev; 76 76 unsigned int pll_out; 77 77 int ret; ··· 137 137 { 138 138 struct snd_soc_dai *codec_dai = card->rtd[0].codec_dai; 139 139 struct imx_priv *priv = &card_priv; 140 - struct imx_wm8962_data *data = platform_get_drvdata(priv->pdev); 140 + struct imx_wm8962_data *data = snd_soc_card_get_drvdata(card); 141 141 struct device *dev = &priv->pdev->dev; 142 142 int ret; 143 143 ··· 264 264 data->card.late_probe = imx_wm8962_late_probe; 265 265 data->card.set_bias_level = imx_wm8962_set_bias_level; 266 266 267 + platform_set_drvdata(pdev, &data->card); 268 + snd_soc_card_set_drvdata(&data->card, data); 269 + 267 270 ret = devm_snd_soc_register_card(&pdev->dev, &data->card); 268 271 if (ret) { 269 272 dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret); 270 273 goto clk_fail; 271 274 } 272 275 273 - platform_set_drvdata(pdev, data); 274 276 of_node_put(ssi_np); 275 277 of_node_put(codec_np); 276 278 ··· 291 289 292 290 static int imx_wm8962_remove(struct platform_device *pdev) 293 291 { 294 - struct imx_wm8962_data *data = platform_get_drvdata(pdev); 292 + struct snd_soc_card *card = platform_get_drvdata(pdev); 293 + struct imx_wm8962_data *data = snd_soc_card_get_drvdata(card); 295 294 296 295 if (!IS_ERR(data->codec_clk)) 297 296 clk_disable_unprepare(data->codec_clk);
+3 -3
sound/soc/samsung/Kconfig
··· 59 59 select SND_SOC_WM8750 60 60 select SND_S3C2412_SOC_I2S 61 61 help 62 - Sat Y if you want to add support for SoC audio on the Jive. 62 + Say Y if you want to add support for SoC audio on the Jive. 63 63 64 64 config SND_SOC_SAMSUNG_SMDK_WM8580 65 65 tristate "SoC I2S Audio support for WM8580 on SMDK" ··· 145 145 146 146 config SND_SOC_SAMSUNG_SMDK_WM9713 147 147 tristate "SoC AC97 Audio support for SMDK with WM9713" 148 - depends on SND_SOC_SAMSUNG && (MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDKV210 || MACH_SMDKC110 || MACH_SMDKV310 || MACH_SMDKC210) 148 + depends on SND_SOC_SAMSUNG && (MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDKV210 || MACH_SMDKC110) 149 149 select SND_SOC_WM9713 150 150 select SND_SAMSUNG_AC97 151 151 help 152 - Sat Y if you want to add support for SoC audio on the SMDK. 152 + Say Y if you want to add support for SoC audio on the SMDK. 153 153 154 154 config SND_SOC_SMARTQ 155 155 tristate "SoC I2S Audio support for SmartQ board"
+5 -3
sound/soc/txx9/txx9aclc-ac97.c
··· 183 183 irq = platform_get_irq(pdev, 0); 184 184 if (irq < 0) 185 185 return irq; 186 + 187 + drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL); 188 + if (!drvdata) 189 + return -ENOMEM; 190 + 186 191 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 187 192 drvdata->base = devm_ioremap_resource(&pdev->dev, r); 188 193 if (IS_ERR(drvdata->base)) 189 194 return PTR_ERR(drvdata->base); 190 195 191 - drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL); 192 - if (!drvdata) 193 - return -ENOMEM; 194 196 platform_set_drvdata(pdev, drvdata); 195 197 drvdata->physbase = r->start; 196 198 if (sizeof(drvdata->physbase) > sizeof(r->start) &&
+9
sound/usb/mixer_maps.c
··· 328 328 {} 329 329 }; 330 330 331 + static const struct usbmix_name_map kef_x300a_map[] = { 332 + { 10, NULL }, /* firmware locks up (?) when we try to access this FU */ 333 + { 0 } 334 + }; 335 + 331 336 /* 332 337 * Control map entries 333 338 */ ··· 423 418 { 424 419 .id = USB_ID(0x200c, 0x1018), 425 420 .map = ebox44_map, 421 + }, 422 + { 423 + .id = USB_ID(0x27ac, 0x1000), 424 + .map = kef_x300a_map, 426 425 }, 427 426 { 0 } /* terminator */ 428 427 };
+22
tools/perf/builtin-trace.c
··· 37 37 # define MADV_UNMERGEABLE 13 38 38 #endif 39 39 40 + #ifndef EFD_SEMAPHORE 41 + # define EFD_SEMAPHORE 1 42 + #endif 43 + 40 44 struct tp_field { 41 45 int offset; 42 46 union { ··· 283 279 284 280 #define SCA_STRARRAY syscall_arg__scnprintf_strarray 285 281 282 + #if defined(__i386__) || defined(__x86_64__) 283 + /* 284 + * FIXME: Make this available to all arches as soon as the ioctl beautifier 285 + * gets rewritten to support all arches. 286 + */ 286 287 static size_t syscall_arg__scnprintf_strhexarray(char *bf, size_t size, 287 288 struct syscall_arg *arg) 288 289 { ··· 295 286 } 296 287 297 288 #define SCA_STRHEXARRAY syscall_arg__scnprintf_strhexarray 289 + #endif /* defined(__i386__) || defined(__x86_64__) */ 298 290 299 291 static size_t syscall_arg__scnprintf_fd(char *bf, size_t size, 300 292 struct syscall_arg *arg); ··· 849 839 850 840 #define SCA_SIGNUM syscall_arg__scnprintf_signum 851 841 842 + #if defined(__i386__) || defined(__x86_64__) 843 + /* 844 + * FIXME: Make this available to all arches. 845 + */ 852 846 #define TCGETS 0x5401 853 847 854 848 static const char *tioctls[] = { ··· 874 860 }; 875 861 876 862 static DEFINE_STRARRAY_OFFSET(tioctls, 0x5401); 863 + #endif /* defined(__i386__) || defined(__x86_64__) */ 877 864 878 865 #define STRARRAY(arg, name, array) \ 879 866 .arg_scnprintf = { [arg] = SCA_STRARRAY, }, \ ··· 956 941 { .name = "getrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), }, 957 942 { .name = "ioctl", .errmsg = true, 958 943 .arg_scnprintf = { [0] = SCA_FD, /* fd */ 944 + #if defined(__i386__) || defined(__x86_64__) 945 + /* 946 + * FIXME: Make this available to all arches. 947 + */ 959 948 [1] = SCA_STRHEXARRAY, /* cmd */ 960 949 [2] = SCA_HEX, /* arg */ }, 961 950 .arg_parm = { [1] = &strarray__tioctls, /* cmd */ }, }, 951 + #else 952 + [2] = SCA_HEX, /* arg */ }, }, 953 + #endif 962 954 { .name = "kill", .errmsg = true, 963 955 .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, }, 964 956 { .name = "linkat", .errmsg = true,
+15 -2
tools/perf/util/parse-events.c
··· 1091 1091 static bool is_event_supported(u8 type, unsigned config) 1092 1092 { 1093 1093 bool ret = true; 1094 + int open_return; 1094 1095 struct perf_evsel *evsel; 1095 1096 struct perf_event_attr attr = { 1096 1097 .type = type, 1097 1098 .config = config, 1098 1099 .disabled = 1, 1099 - .exclude_kernel = 1, 1100 1100 }; 1101 1101 struct { 1102 1102 struct thread_map map; ··· 1108 1108 1109 1109 evsel = perf_evsel__new(&attr); 1110 1110 if (evsel) { 1111 - ret = perf_evsel__open(evsel, NULL, &tmap.map) >= 0; 1111 + open_return = perf_evsel__open(evsel, NULL, &tmap.map); 1112 + ret = open_return >= 0; 1113 + 1114 + if (open_return == -EACCES) { 1115 + /* 1116 + * This happens if the paranoid value 1117 + * /proc/sys/kernel/perf_event_paranoid is set to 2 1118 + * Re-run with exclude_kernel set; we don't do that 1119 + * by default as some ARM machines do not support it. 1120 + * 1121 + */ 1122 + evsel->attr.exclude_kernel = 1; 1123 + ret = perf_evsel__open(evsel, NULL, &tmap.map) >= 0; 1124 + } 1112 1125 perf_evsel__delete(evsel); 1113 1126 } 1114 1127
+1 -1
tools/perf/util/probe-event.c
··· 336 336 return ret; 337 337 338 338 for (i = 0; i < ntevs && ret >= 0; i++) { 339 + /* point.address is the addres of point.symbol + point.offset */ 339 340 offset = tevs[i].point.address - stext; 340 - offset += tevs[i].point.offset; 341 341 tevs[i].point.offset = 0; 342 342 zfree(&tevs[i].point.symbol); 343 343 ret = e_snprintf(buf, 32, "0x%lx", offset);
+6
tools/perf/util/session.c
··· 1008 1008 if (err == 0) 1009 1009 perf_session__set_id_hdr_size(session); 1010 1010 return err; 1011 + case PERF_RECORD_HEADER_EVENT_TYPE: 1012 + /* 1013 + * Depreceated, but we need to handle it for sake 1014 + * of old data files create in pipe mode. 1015 + */ 1016 + return 0; 1011 1017 case PERF_RECORD_HEADER_TRACING_DATA: 1012 1018 /* setup for reading amidst mmap */ 1013 1019 lseek(fd, file_offset, SEEK_SET);