Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'irq/for-gpio' into irq/core

Merge the request/release callbacks which are in a separate branch for
consumption by the gpio folks.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

+8260 -5027
+1 -2
Documentation/ABI/testing/sysfs-tty
··· 3 3 Contact: Kay Sievers <kay.sievers@vrfy.org> 4 4 Description: 5 5 Shows the list of currently configured 6 - tty devices used for the console, 7 - like 'tty1 ttyS0'. 6 + console devices, like 'tty1 ttyS0'. 8 7 The last entry in the file is the active 9 8 device connected to /dev/console. 10 9 The file supports poll() to detect virtual
+109 -10
Documentation/PCI/MSI-HOWTO.txt
··· 82 82 has to request that the PCI layer set up the MSI capability for this 83 83 device. 84 84 85 - 4.2.1 pci_enable_msi_range 85 + 4.2.1 pci_enable_msi 86 + 87 + int pci_enable_msi(struct pci_dev *dev) 88 + 89 + A successful call allocates ONE interrupt to the device, regardless 90 + of how many MSIs the device supports. The device is switched from 91 + pin-based interrupt mode to MSI mode. The dev->irq number is changed 92 + to a new number which represents the message signaled interrupt; 93 + consequently, this function should be called before the driver calls 94 + request_irq(), because an MSI is delivered via a vector that is 95 + different from the vector of a pin-based interrupt. 96 + 97 + 4.2.2 pci_enable_msi_range 86 98 87 99 int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) 88 100 ··· 159 147 return pci_enable_msi_range(pdev, nvec, nvec); 160 148 } 161 149 150 + Note, unlike pci_enable_msi_exact() function, which could be also used to 151 + enable a particular number of MSI-X interrupts, pci_enable_msi_range() 152 + returns either a negative errno or 'nvec' (not negative errno or 0 - as 153 + pci_enable_msi_exact() does). 154 + 162 155 4.2.1.3 Single MSI mode 163 156 164 157 The most notorious example of the request type described above is ··· 175 158 return pci_enable_msi_range(pdev, 1, 1); 176 159 } 177 160 178 - 4.2.2 pci_disable_msi 161 + Note, unlike pci_enable_msi() function, which could be also used to 162 + enable the single MSI mode, pci_enable_msi_range() returns either a 163 + negative errno or 1 (not negative errno or 0 - as pci_enable_msi() 164 + does). 165 + 166 + 4.2.3 pci_enable_msi_exact 167 + 168 + int pci_enable_msi_exact(struct pci_dev *dev, int nvec) 169 + 170 + This variation on pci_enable_msi_range() call allows a device driver to 171 + request exactly 'nvec' MSIs. 172 + 173 + If this function returns a negative number, it indicates an error and 174 + the driver should not attempt to request any more MSI interrupts for 175 + this device. 176 + 177 + By contrast with pci_enable_msi_range() function, pci_enable_msi_exact() 178 + returns zero in case of success, which indicates MSI interrupts have been 179 + successfully allocated. 180 + 181 + 4.2.4 pci_disable_msi 179 182 180 183 void pci_disable_msi(struct pci_dev *dev) 181 184 ··· 209 172 Failure to do so results in a BUG_ON(), leaving the device with 210 173 MSI enabled and thus leaking its vector. 211 174 212 - 4.2.3 pci_msi_vec_count 175 + 4.2.4 pci_msi_vec_count 213 176 214 177 int pci_msi_vec_count(struct pci_dev *dev) 215 178 ··· 294 257 295 258 static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec) 296 259 { 297 - return pci_enable_msi_range(adapter->pdev, adapter->msix_entries, 298 - 1, nvec); 260 + return pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 261 + 1, nvec); 299 262 } 300 263 301 264 Note the value of 'minvec' parameter is 1. As 'minvec' is inclusive, ··· 306 269 307 270 static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec) 308 271 { 309 - return pci_enable_msi_range(adapter->pdev, adapter->msix_entries, 310 - FOO_DRIVER_MINIMUM_NVEC, nvec); 272 + return pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 273 + FOO_DRIVER_MINIMUM_NVEC, nvec); 311 274 } 312 275 313 276 4.3.1.2 Exact number of MSI-X interrupts ··· 319 282 320 283 static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec) 321 284 { 322 - return pci_enable_msi_range(adapter->pdev, adapter->msix_entries, 323 - nvec, nvec); 285 + return pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 286 + nvec, nvec); 324 287 } 288 + 289 + Note, unlike pci_enable_msix_exact() function, which could be also used to 290 + enable a particular number of MSI-X interrupts, pci_enable_msix_range() 291 + returns either a negative errno or 'nvec' (not negative errno or 0 - as 292 + pci_enable_msix_exact() does). 325 293 326 294 4.3.1.3 Specific requirements to the number of MSI-X interrupts 327 295 ··· 374 332 any error code other than -ENOSPC indicates a fatal error and should not 375 333 be retried. 376 334 377 - 4.3.2 pci_disable_msix 335 + 4.3.2 pci_enable_msix_exact 336 + 337 + int pci_enable_msix_exact(struct pci_dev *dev, 338 + struct msix_entry *entries, int nvec) 339 + 340 + This variation on pci_enable_msix_range() call allows a device driver to 341 + request exactly 'nvec' MSI-Xs. 342 + 343 + If this function returns a negative number, it indicates an error and 344 + the driver should not attempt to allocate any more MSI-X interrupts for 345 + this device. 346 + 347 + By contrast with pci_enable_msix_range() function, pci_enable_msix_exact() 348 + returns zero in case of success, which indicates MSI-X interrupts have been 349 + successfully allocated. 350 + 351 + Another version of a routine that enables MSI-X mode for a device with 352 + specific requirements described in chapter 4.3.1.3 might look like this: 353 + 354 + /* 355 + * Assume 'minvec' and 'maxvec' are non-zero 356 + */ 357 + static int foo_driver_enable_msix(struct foo_adapter *adapter, 358 + int minvec, int maxvec) 359 + { 360 + int rc; 361 + 362 + minvec = roundup_pow_of_two(minvec); 363 + maxvec = rounddown_pow_of_two(maxvec); 364 + 365 + if (minvec > maxvec) 366 + return -ERANGE; 367 + 368 + retry: 369 + rc = pci_enable_msix_exact(adapter->pdev, 370 + adapter->msix_entries, maxvec); 371 + 372 + /* 373 + * -ENOSPC is the only error code allowed to be analyzed 374 + */ 375 + if (rc == -ENOSPC) { 376 + if (maxvec == 1) 377 + return -ENOSPC; 378 + 379 + maxvec /= 2; 380 + 381 + if (minvec > maxvec) 382 + return -ENOSPC; 383 + 384 + goto retry; 385 + } else if (rc < 0) { 386 + return rc; 387 + } 388 + 389 + return maxvec; 390 + } 391 + 392 + 4.3.3 pci_disable_msix 378 393 379 394 void pci_disable_msix(struct pci_dev *dev) 380 395
+5 -6
Documentation/device-mapper/cache.txt
··· 124 124 Updating on-disk metadata 125 125 ------------------------- 126 126 127 - On-disk metadata is committed every time a REQ_SYNC or REQ_FUA bio is 128 - written. If no such requests are made then commits will occur every 129 - second. This means the cache behaves like a physical disk that has a 130 - write cache (the same is true of the thin-provisioning target). If 131 - power is lost you may lose some recent writes. The metadata should 132 - always be consistent in spite of any crash. 127 + On-disk metadata is committed every time a FLUSH or FUA bio is written. 128 + If no such requests are made then commits will occur every second. This 129 + means the cache behaves like a physical disk that has a volatile write 130 + cache. If power is lost you may lose some recent writes. The metadata 131 + should always be consistent in spite of any crash. 133 132 134 133 The 'dirty' state for a cache block changes far too frequently for us 135 134 to keep updating it on the fly. So we treat it as a hint. In normal
+31 -3
Documentation/device-mapper/thin-provisioning.txt
··· 116 116 userspace daemon can use this to detect a situation where a new table 117 117 already exceeds the threshold. 118 118 119 + A low water mark for the metadata device is maintained in the kernel and 120 + will trigger a dm event if free space on the metadata device drops below 121 + it. 122 + 123 + Updating on-disk metadata 124 + ------------------------- 125 + 126 + On-disk metadata is committed every time a FLUSH or FUA bio is written. 127 + If no such requests are made then commits will occur every second. This 128 + means the thin-provisioning target behaves like a physical disk that has 129 + a volatile write cache. If power is lost you may lose some recent 130 + writes. The metadata should always be consistent in spite of any crash. 131 + 132 + If data space is exhausted the pool will either error or queue IO 133 + according to the configuration (see: error_if_no_space). If metadata 134 + space is exhausted or a metadata operation fails: the pool will error IO 135 + until the pool is taken offline and repair is performed to 1) fix any 136 + potential inconsistencies and 2) clear the flag that imposes repair. 137 + Once the pool's metadata device is repaired it may be resized, which 138 + will allow the pool to return to normal operation. Note that if a pool 139 + is flagged as needing repair, the pool's data and metadata devices 140 + cannot be resized until repair is performed. It should also be noted 141 + that when the pool's metadata space is exhausted the current metadata 142 + transaction is aborted. Given that the pool will cache IO whose 143 + completion may have already been acknowledged to upper IO layers 144 + (e.g. filesystem) it is strongly suggested that consistency checks 145 + (e.g. fsck) be performed on those layers when repair of the pool is 146 + required. 147 + 119 148 Thin provisioning 120 149 ----------------- 121 150 ··· 287 258 should register for the event and then check the target's status. 288 259 289 260 held metadata root: 290 - The location, in sectors, of the metadata root that has been 261 + The location, in blocks, of the metadata root that has been 291 262 'held' for userspace read access. '-' indicates there is no 292 - held root. This feature is not yet implemented so '-' is 293 - always returned. 263 + held root. 294 264 295 265 discard_passdown|no_discard_passdown 296 266 Whether or not discards are actually being passed down to the
+1 -1
Documentation/devicetree/bindings/arm/omap/omap.txt
··· 91 91 compatible = "ti,omap3-beagle", "ti,omap3" 92 92 93 93 - OMAP3 Tobi with Overo : Commercial expansion board with daughter board 94 - compatible = "ti,omap3-tobi", "ti,omap3-overo", "ti,omap3" 94 + compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap3" 95 95 96 96 - OMAP4 SDP : Software Development Board 97 97 compatible = "ti,omap4-sdp", "ti,omap4430"
+2 -2
Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
··· 21 21 must appear in the same order as the output clocks. 22 22 - #clock-cells: Must be 1 23 23 - clock-output-names: The name of the clocks as free-form strings 24 - - renesas,indices: Indices of the gate clocks into the group (0 to 31) 24 + - renesas,clock-indices: Indices of the gate clocks into the group (0 to 31) 25 25 26 - The clocks, clock-output-names and renesas,indices properties contain one 26 + The clocks, clock-output-names and renesas,clock-indices properties contain one 27 27 entry per gate clock. The MSTP groups are sparsely populated. Unimplemented 28 28 gate clocks must not be declared. 29 29
+10 -6
Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
··· 1 1 * Freescale Smart Direct Memory Access (SDMA) Controller for i.MX 2 2 3 3 Required properties: 4 - - compatible : Should be "fsl,imx31-sdma", "fsl,imx31-to1-sdma", 5 - "fsl,imx31-to2-sdma", "fsl,imx35-sdma", "fsl,imx35-to1-sdma", 6 - "fsl,imx35-to2-sdma", "fsl,imx51-sdma", "fsl,imx53-sdma" or 7 - "fsl,imx6q-sdma". The -to variants should be preferred since they 8 - allow to determnine the correct ROM script addresses needed for 9 - the driver to work without additional firmware. 4 + - compatible : Should be one of 5 + "fsl,imx25-sdma" 6 + "fsl,imx31-sdma", "fsl,imx31-to1-sdma", "fsl,imx31-to2-sdma" 7 + "fsl,imx35-sdma", "fsl,imx35-to1-sdma", "fsl,imx35-to2-sdma" 8 + "fsl,imx51-sdma" 9 + "fsl,imx53-sdma" 10 + "fsl,imx6q-sdma" 11 + The -to variants should be preferred since they allow to determnine the 12 + correct ROM script addresses needed for the driver to work without additional 13 + firmware. 10 14 - reg : Should contain SDMA registers location and length 11 15 - interrupts : Should contain SDMA interrupt 12 16 - #dma-cells : Must be <3>.
+22
Documentation/devicetree/bindings/net/opencores-ethoc.txt
··· 1 + * OpenCores MAC 10/100 Mbps 2 + 3 + Required properties: 4 + - compatible: Should be "opencores,ethoc". 5 + - reg: two memory regions (address and length), 6 + first region is for the device registers and descriptor rings, 7 + second is for the device packet memory. 8 + - interrupts: interrupt for the device. 9 + 10 + Optional properties: 11 + - clocks: phandle to refer to the clk used as per 12 + Documentation/devicetree/bindings/clock/clock-bindings.txt 13 + 14 + Examples: 15 + 16 + enet0: ethoc@fd030000 { 17 + compatible = "opencores,ethoc"; 18 + reg = <0xfd030000 0x4000 0xfd800000 0x4000>; 19 + interrupts = <1>; 20 + local-mac-address = [00 50 c2 13 6f 00]; 21 + clocks = <&osc>; 22 + };
+58
Documentation/devicetree/bindings/net/sti-dwmac.txt
··· 1 + STMicroelectronics SoC DWMAC glue layer controller 2 + 3 + The device node has following properties. 4 + 5 + Required properties: 6 + - compatible : Can be "st,stih415-dwmac", "st,stih416-dwmac" or 7 + "st,stid127-dwmac". 8 + - reg : Offset of the glue configuration register map in system 9 + configuration regmap pointed by st,syscon property and size. 10 + 11 + - reg-names : Should be "sti-ethconf". 12 + 13 + - st,syscon : Should be phandle to system configuration node which 14 + encompases this glue registers. 15 + 16 + - st,tx-retime-src: On STi Parts for Giga bit speeds, 125Mhz clocks can be 17 + wired up in from different sources. One via TXCLK pin and other via CLK_125 18 + pin. This wiring is totally board dependent. However the retiming glue 19 + logic should be configured accordingly. Possible values for this property 20 + 21 + "txclk" - if 125Mhz clock is wired up via txclk line. 22 + "clk_125" - if 125Mhz clock is wired up via clk_125 line. 23 + 24 + This property is only valid for Giga bit setup( GMII, RGMII), and it is 25 + un-used for non-giga bit (MII and RMII) setups. Also note that internal 26 + clockgen can not generate stable 125Mhz clock. 27 + 28 + - st,ext-phyclk: This boolean property indicates who is generating the clock 29 + for tx and rx. This property is only valid for RMII case where the clock can 30 + be generated from the MAC or PHY. 31 + 32 + - clock-names: should be "sti-ethclk". 33 + - clocks: Should point to ethernet clockgen which can generate phyclk. 34 + 35 + 36 + Example: 37 + 38 + ethernet0: dwmac@fe810000 { 39 + device_type = "network"; 40 + compatible = "st,stih416-dwmac", "snps,dwmac", "snps,dwmac-3.710"; 41 + reg = <0xfe810000 0x8000>, <0x8bc 0x4>; 42 + reg-names = "stmmaceth", "sti-ethconf"; 43 + interrupts = <0 133 0>, <0 134 0>, <0 135 0>; 44 + interrupt-names = "macirq", "eth_wake_irq", "eth_lpi"; 45 + phy-mode = "mii"; 46 + 47 + st,syscon = <&syscfg_rear>; 48 + 49 + snps,pbl = <32>; 50 + snps,mixed-burst; 51 + 52 + resets = <&softreset STIH416_ETH0_SOFTRESET>; 53 + reset-names = "stmmaceth"; 54 + pinctrl-0 = <&pinctrl_mii0>; 55 + pinctrl-names = "default"; 56 + clocks = <&CLK_S_GMAC0_PHY>; 57 + clock-names = "stmmaceth"; 58 + };
+4 -4
Documentation/devicetree/bindings/pinctrl/brcm,capri-pinctrl.txt Documentation/devicetree/bindings/pinctrl/brcm,bcm11351-pinctrl.txt
··· 1 - Broadcom Capri Pin Controller 1 + Broadcom BCM281xx Pin Controller 2 2 3 3 This is a pin controller for the Broadcom BCM281xx SoC family, which includes 4 4 BCM11130, BCM11140, BCM11351, BCM28145, and BCM28155 SoCs. ··· 7 7 8 8 Required Properties: 9 9 10 - - compatible: Must be "brcm,capri-pinctrl". 10 + - compatible: Must be "brcm,bcm11351-pinctrl" 11 11 - reg: Base address of the PAD Controller register block and the size 12 12 of the block. 13 13 14 14 For example, the following is the bare minimum node: 15 15 16 16 pinctrl@35004800 { 17 - compatible = "brcm,capri-pinctrl"; 17 + compatible = "brcm,bcm11351-pinctrl"; 18 18 reg = <0x35004800 0x430>; 19 19 }; 20 20 ··· 119 119 Example: 120 120 // pin controller node 121 121 pinctrl@35004800 { 122 - compatible = "brcm,capri-pinctrl"; 122 + compatible = "brcmbcm11351-pinctrl"; 123 123 reg = <0x35004800 0x430>; 124 124 125 125 // pin configuration node
-45
Documentation/networking/3c505.txt
··· 1 - The 3Com Etherlink Plus (3c505) driver. 2 - 3 - This driver now uses DMA. There is currently no support for PIO operation. 4 - The default DMA channel is 6; this is _not_ autoprobed, so you must 5 - make sure you configure it correctly. If loading the driver as a 6 - module, you can do this with "modprobe 3c505 dma=n". If the driver is 7 - linked statically into the kernel, you must either use an "ether=" 8 - statement on the command line, or change the definition of ELP_DMA in 3c505.h. 9 - 10 - The driver will warn you if it has to fall back on the compiled in 11 - default DMA channel. 12 - 13 - If no base address is given at boot time, the driver will autoprobe 14 - ports 0x300, 0x280 and 0x310 (in that order). If no IRQ is given, the driver 15 - will try to probe for it. 16 - 17 - The driver can be used as a loadable module. 18 - 19 - Theoretically, one instance of the driver can now run multiple cards, 20 - in the standard way (when loading a module, say "modprobe 3c505 21 - io=0x300,0x340 irq=10,11 dma=6,7" or whatever). I have not tested 22 - this, though. 23 - 24 - The driver may now support revision 2 hardware; the dependency on 25 - being able to read the host control register has been removed. This 26 - is also untested, since I don't have a suitable card. 27 - 28 - Known problems: 29 - I still see "DMA upload timed out" messages from time to time. These 30 - seem to be fairly non-fatal though. 31 - The card is old and slow. 32 - 33 - To do: 34 - Improve probe/setup code 35 - Test multicast and promiscuous operation 36 - 37 - Authors: 38 - The driver is mainly written by Craig Southeren, email 39 - <craigs@ineluki.apana.org.au>. 40 - Parts of the driver (adapting the driver to 1.1.4+ kernels, 41 - IRQ/address detection, some changes) and this README by 42 - Juha Laiho <jlaiho@ichaos.nullnet.fi>. 43 - DMA mode, more fixes, etc, by Philip Blundell <pjb27@cam.ac.uk> 44 - Multicard support, Software configurable DMA, etc., by 45 - Christopher Collins <ccollins@pcug.org.au>
-6
Documentation/networking/can.txt
··· 554 554 not specified in the struct can_frame and therefore it is only valid in 555 555 CANFD_MTU sized CAN FD frames. 556 556 557 - As long as the payload length is <=8 the received CAN frames from CAN FD 558 - capable CAN devices can be received and read by legacy sockets too. When 559 - user-generated CAN FD frames have a payload length <=8 these can be send 560 - by legacy CAN network interfaces too. Sending CAN FD frames with payload 561 - length > 8 to a legacy CAN network interface returns an -EMSGSIZE error. 562 - 563 557 Implementation hint for new CAN applications: 564 558 565 559 To build a CAN FD aware application use struct canfd_frame as basic CAN
+95 -57
MAINTAINERS
··· 73 73 L: Mailing list that is relevant to this area 74 74 W: Web-page with status/info 75 75 Q: Patchwork web based patch tracking system site 76 - T: SCM tree type and location. Type is one of: git, hg, quilt, stgit, topgit. 76 + T: SCM tree type and location. 77 + Type is one of: git, hg, quilt, stgit, topgit 77 78 S: Status, one of the following: 78 79 Supported: Someone is actually paid to look after this. 79 80 Maintained: Someone actually looks after it. ··· 474 473 475 474 AGPGART DRIVER 476 475 M: David Airlie <airlied@linux.ie> 477 - T: git git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6.git 476 + T: git git://people.freedesktop.org/~airlied/linux (part of drm maint) 478 477 S: Maintained 479 478 F: drivers/char/agp/ 480 479 F: include/linux/agp* ··· 539 538 ALTERA UART/JTAG UART SERIAL DRIVERS 540 539 M: Tobias Klauser <tklauser@distanz.ch> 541 540 L: linux-serial@vger.kernel.org 542 - L: nios2-dev@sopc.et.ntust.edu.tw (moderated for non-subscribers) 541 + L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers) 543 542 S: Maintained 544 543 F: drivers/tty/serial/altera_uart.c 545 544 F: drivers/tty/serial/altera_jtaguart.c ··· 1613 1612 F: drivers/net/wireless/atmel* 1614 1613 1615 1614 ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER 1616 - M: Bradley Grove <linuxdrivers@attotech.com> 1617 - L: linux-scsi@vger.kernel.org 1618 - W: http://www.attotech.com 1619 - S: Supported 1620 - F: drivers/scsi/esas2r 1615 + M: Bradley Grove <linuxdrivers@attotech.com> 1616 + L: linux-scsi@vger.kernel.org 1617 + W: http://www.attotech.com 1618 + S: Supported 1619 + F: drivers/scsi/esas2r 1621 1620 1622 1621 AUDIT SUBSYSTEM 1623 1622 M: Eric Paris <eparis@redhat.com> ··· 1861 1860 1862 1861 BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE 1863 1862 M: Christian Daudt <bcm@fixthebug.org> 1863 + M: Matt Porter <mporter@linaro.org> 1864 1864 L: bcm-kernel-feedback-list@broadcom.com 1865 1865 T: git git://git.github.com/broadcom/bcm11351 1866 1866 S: Maintained ··· 2160 2158 2161 2159 CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER 2162 2160 M: Peter Chen <Peter.Chen@freescale.com> 2163 - T: git://github.com/hzpeterchen/linux-usb.git 2161 + T: git git://github.com/hzpeterchen/linux-usb.git 2164 2162 L: linux-usb@vger.kernel.org 2165 2163 S: Maintained 2166 2164 F: drivers/usb/chipidea/ ··· 2180 2178 F: drivers/net/ethernet/cisco/enic/ 2181 2179 2182 2180 CISCO VIC LOW LATENCY NIC DRIVER 2183 - M: Upinder Malhi <umalhi@cisco.com> 2184 - S: Supported 2185 - F: drivers/infiniband/hw/usnic 2181 + M: Upinder Malhi <umalhi@cisco.com> 2182 + S: Supported 2183 + F: drivers/infiniband/hw/usnic 2186 2184 2187 2185 CIRRUS LOGIC EP93XX ETHERNET DRIVER 2188 2186 M: Hartley Sweeten <hsweeten@visionengravers.com> ··· 2379 2377 F: drivers/cpufreq/arm_big_little_dt.c 2380 2378 2381 2379 CPUIDLE DRIVER - ARM BIG LITTLE 2382 - M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> 2383 - M: Daniel Lezcano <daniel.lezcano@linaro.org> 2384 - L: linux-pm@vger.kernel.org 2385 - L: linux-arm-kernel@lists.infradead.org 2386 - T: git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git 2387 - S: Maintained 2388 - F: drivers/cpuidle/cpuidle-big_little.c 2380 + M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> 2381 + M: Daniel Lezcano <daniel.lezcano@linaro.org> 2382 + L: linux-pm@vger.kernel.org 2383 + L: linux-arm-kernel@lists.infradead.org 2384 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git 2385 + S: Maintained 2386 + F: drivers/cpuidle/cpuidle-big_little.c 2389 2387 2390 2388 CPUIDLE DRIVERS 2391 2389 M: Rafael J. Wysocki <rjw@rjwysocki.net> 2392 2390 M: Daniel Lezcano <daniel.lezcano@linaro.org> 2393 2391 L: linux-pm@vger.kernel.org 2394 2392 S: Maintained 2395 - T: git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git 2393 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git 2396 2394 F: drivers/cpuidle/* 2397 2395 F: include/linux/cpuidle.h 2398 2396 ··· 2410 2408 2411 2409 CPUSETS 2412 2410 M: Li Zefan <lizefan@huawei.com> 2411 + L: cgroups@vger.kernel.org 2413 2412 W: http://www.bullopensource.org/cpuset/ 2414 2413 W: http://oss.sgi.com/projects/cpusets/ 2414 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git 2415 2415 S: Maintained 2416 2416 F: Documentation/cgroups/cpusets.txt 2417 2417 F: include/linux/cpuset.h ··· 2459 2455 F: sound/pci/cs5535audio/ 2460 2456 2461 2457 CW1200 WLAN driver 2462 - M: Solomon Peachy <pizza@shaftnet.org> 2463 - S: Maintained 2464 - F: drivers/net/wireless/cw1200/ 2458 + M: Solomon Peachy <pizza@shaftnet.org> 2459 + S: Maintained 2460 + F: drivers/net/wireless/cw1200/ 2465 2461 2466 2462 CX18 VIDEO4LINUX DRIVER 2467 2463 M: Andy Walls <awalls@md.metrocast.net> ··· 2612 2608 M: Oliver Neukum <oliver@neukum.org> 2613 2609 M: Ali Akcaagac <aliakc@web.de> 2614 2610 M: Jamie Lenehan <lenehan@twibble.org> 2615 - W: http://twibble.org/dist/dc395x/ 2616 2611 L: dc395x@twibble.org 2617 - L: http://lists.twibble.org/mailman/listinfo/dc395x/ 2612 + W: http://twibble.org/dist/dc395x/ 2613 + W: http://lists.twibble.org/mailman/listinfo/dc395x/ 2618 2614 S: Maintained 2619 2615 F: Documentation/scsi/dc395x.txt 2620 2616 F: drivers/scsi/dc395x.* ··· 2849 2845 DRM DRIVERS 2850 2846 M: David Airlie <airlied@linux.ie> 2851 2847 L: dri-devel@lists.freedesktop.org 2852 - T: git git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6.git 2848 + T: git git://people.freedesktop.org/~airlied/linux 2853 2849 S: Maintained 2854 2850 F: drivers/gpu/drm/ 2855 2851 F: include/drm/ 2856 2852 F: include/uapi/drm/ 2853 + 2854 + RADEON DRM DRIVERS 2855 + M: Alex Deucher <alexander.deucher@amd.com> 2856 + M: Christian König <christian.koenig@amd.com> 2857 + L: dri-devel@lists.freedesktop.org 2858 + T: git git://people.freedesktop.org/~agd5f/linux 2859 + S: Supported 2860 + F: drivers/gpu/drm/radeon/ 2861 + F: include/drm/radeon* 2862 + F: include/uapi/drm/radeon* 2857 2863 2858 2864 INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets) 2859 2865 M: Daniel Vetter <daniel.vetter@ffwll.ch> ··· 3096 3082 3097 3083 EDAC-CORE 3098 3084 M: Doug Thompson <dougthompson@xmission.com> 3085 + M: Borislav Petkov <bp@alien8.de> 3086 + M: Mauro Carvalho Chehab <m.chehab@samsung.com> 3099 3087 L: linux-edac@vger.kernel.org 3100 3088 W: bluesmoke.sourceforge.net 3101 3089 S: Supported ··· 3339 3323 S: Maintained 3340 3324 F: include/linux/netfilter_bridge/ 3341 3325 F: net/bridge/ 3326 + 3327 + ETHERNET PHY LIBRARY 3328 + M: Florian Fainelli <f.fainelli@gmail.com> 3329 + L: netdev@vger.kernel.org 3330 + S: Maintained 3331 + F: include/linux/phy.h 3332 + F: include/linux/phy_fixed.h 3333 + F: drivers/net/phy/ 3334 + F: Documentation/networking/phy.txt 3335 + F: drivers/of/of_mdio.c 3336 + F: drivers/of/of_net.c 3342 3337 3343 3338 EXT2 FILE SYSTEM 3344 3339 M: Jan Kara <jack@suse.cz> ··· 4561 4534 F: Documentation/networking/i40e.txt 4562 4535 F: Documentation/networking/i40evf.txt 4563 4536 F: drivers/net/ethernet/intel/ 4537 + F: drivers/net/ethernet/intel/*/ 4564 4538 4565 4539 INTEL-MID GPIO DRIVER 4566 4540 M: David Cohen <david.a.cohen@linux.intel.com> ··· 4918 4890 KCONFIG 4919 4891 M: "Yann E. MORIN" <yann.morin.1998@free.fr> 4920 4892 L: linux-kbuild@vger.kernel.org 4921 - T: git://gitorious.org/linux-kconfig/linux-kconfig 4893 + T: git git://gitorious.org/linux-kconfig/linux-kconfig 4922 4894 S: Maintained 4923 4895 F: Documentation/kbuild/kconfig-language.txt 4924 4896 F: scripts/kconfig/ ··· 5475 5447 F: drivers/media/tuners/m88ts2022* 5476 5448 5477 5449 MA901 MASTERKIT USB FM RADIO DRIVER 5478 - M: Alexey Klimov <klimov.linux@gmail.com> 5479 - L: linux-media@vger.kernel.org 5480 - T: git git://linuxtv.org/media_tree.git 5481 - S: Maintained 5482 - F: drivers/media/radio/radio-ma901.c 5450 + M: Alexey Klimov <klimov.linux@gmail.com> 5451 + L: linux-media@vger.kernel.org 5452 + T: git git://linuxtv.org/media_tree.git 5453 + S: Maintained 5454 + F: drivers/media/radio/radio-ma901.c 5483 5455 5484 5456 MAC80211 5485 5457 M: Johannes Berg <johannes@sipsolutions.net> ··· 5514 5486 W: http://www.kernel.org/doc/man-pages 5515 5487 L: linux-man@vger.kernel.org 5516 5488 S: Maintained 5489 + 5490 + MARVELL ARMADA DRM SUPPORT 5491 + M: Russell King <rmk+kernel@arm.linux.org.uk> 5492 + S: Maintained 5493 + F: drivers/gpu/drm/armada/ 5517 5494 5518 5495 MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2) 5519 5496 M: Mirko Lindner <mlindner@marvell.com> ··· 5640 5607 5641 5608 MELLANOX ETHERNET DRIVER (mlx4_en) 5642 5609 M: Amir Vadai <amirv@mellanox.com> 5643 - L: netdev@vger.kernel.org 5610 + L: netdev@vger.kernel.org 5644 5611 S: Supported 5645 5612 W: http://www.mellanox.com 5646 5613 Q: http://patchwork.ozlabs.org/project/netdev/list/ ··· 5681 5648 F: include/uapi/mtd/ 5682 5649 5683 5650 MEN A21 WATCHDOG DRIVER 5684 - M: Johannes Thumshirn <johannes.thumshirn@men.de> 5651 + M: Johannes Thumshirn <johannes.thumshirn@men.de> 5685 5652 L: linux-watchdog@vger.kernel.org 5686 5653 S: Supported 5687 5654 F: drivers/watchdog/mena21_wdt.c ··· 5737 5704 W: http://www.mellanox.com 5738 5705 Q: http://patchwork.ozlabs.org/project/netdev/list/ 5739 5706 Q: http://patchwork.kernel.org/project/linux-rdma/list/ 5740 - T: git://openfabrics.org/~eli/connect-ib.git 5707 + T: git git://openfabrics.org/~eli/connect-ib.git 5741 5708 S: Supported 5742 5709 F: drivers/net/ethernet/mellanox/mlx5/core/ 5743 5710 F: include/linux/mlx5/ 5744 5711 5745 5712 Mellanox MLX5 IB driver 5746 - M: Eli Cohen <eli@mellanox.com> 5747 - L: linux-rdma@vger.kernel.org 5748 - W: http://www.mellanox.com 5749 - Q: http://patchwork.kernel.org/project/linux-rdma/list/ 5750 - T: git://openfabrics.org/~eli/connect-ib.git 5751 - S: Supported 5752 - F: include/linux/mlx5/ 5753 - F: drivers/infiniband/hw/mlx5/ 5713 + M: Eli Cohen <eli@mellanox.com> 5714 + L: linux-rdma@vger.kernel.org 5715 + W: http://www.mellanox.com 5716 + Q: http://patchwork.kernel.org/project/linux-rdma/list/ 5717 + T: git git://openfabrics.org/~eli/connect-ib.git 5718 + S: Supported 5719 + F: include/linux/mlx5/ 5720 + F: drivers/infiniband/hw/mlx5/ 5754 5721 5755 5722 MODULE SUPPORT 5756 5723 M: Rusty Russell <rusty@rustcorp.com.au> ··· 6174 6141 S: Supported 6175 6142 F: drivers/block/nvme* 6176 6143 F: include/linux/nvme.h 6144 + 6145 + NXP TDA998X DRM DRIVER 6146 + M: Russell King <rmk+kernel@arm.linux.org.uk> 6147 + S: Supported 6148 + F: drivers/gpu/drm/i2c/tda998x_drv.c 6149 + F: include/drm/i2c/tda998x.h 6177 6150 6178 6151 OMAP SUPPORT 6179 6152 M: Tony Lindgren <tony@atomide.com> ··· 8468 8429 M: Nicholas A. Bellinger <nab@linux-iscsi.org> 8469 8430 L: linux-scsi@vger.kernel.org 8470 8431 L: target-devel@vger.kernel.org 8471 - L: http://groups.google.com/group/linux-iscsi-target-dev 8472 8432 W: http://www.linux-iscsi.org 8433 + W: http://groups.google.com/group/linux-iscsi-target-dev 8473 8434 T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master 8474 8435 S: Supported 8475 8436 F: drivers/target/ ··· 8710 8671 F: drivers/media/radio/radio-raremono.c 8711 8672 8712 8673 THERMAL 8713 - M: Zhang Rui <rui.zhang@intel.com> 8714 - M: Eduardo Valentin <eduardo.valentin@ti.com> 8715 - L: linux-pm@vger.kernel.org 8716 - T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git 8717 - T: git git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal.git 8718 - Q: https://patchwork.kernel.org/project/linux-pm/list/ 8719 - S: Supported 8720 - F: drivers/thermal/ 8721 - F: include/linux/thermal.h 8722 - F: include/linux/cpu_cooling.h 8723 - F: Documentation/devicetree/bindings/thermal/ 8674 + M: Zhang Rui <rui.zhang@intel.com> 8675 + M: Eduardo Valentin <eduardo.valentin@ti.com> 8676 + L: linux-pm@vger.kernel.org 8677 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git 8678 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal.git 8679 + Q: https://patchwork.kernel.org/project/linux-pm/list/ 8680 + S: Supported 8681 + F: drivers/thermal/ 8682 + F: include/linux/thermal.h 8683 + F: include/linux/cpu_cooling.h 8684 + F: Documentation/devicetree/bindings/thermal/ 8724 8685 8725 8686 THINGM BLINK(1) USB RGB LED DRIVER 8726 8687 M: Vivien Didelot <vivien.didelot@savoirfairelinux.com> ··· 9754 9715 XFS FILESYSTEM 9755 9716 P: Silicon Graphics Inc 9756 9717 M: Dave Chinner <david@fromorbit.com> 9757 - M: Ben Myers <bpm@sgi.com> 9758 9718 M: xfs@oss.sgi.com 9759 9719 L: xfs@oss.sgi.com 9760 9720 W: http://oss.sgi.com/projects/xfs ··· 9822 9784 L: mjpeg-users@lists.sourceforge.net 9823 9785 L: linux-media@vger.kernel.org 9824 9786 W: http://mjpeg.sourceforge.net/driver-zoran/ 9825 - T: Mercurial http://linuxtv.org/hg/v4l-dvb 9787 + T: hg http://linuxtv.org/hg/v4l-dvb 9826 9788 S: Odd Fixes 9827 9789 F: drivers/media/pci/zoran/ 9828 9790
+6 -4
Makefile
··· 1 1 VERSION = 3 2 2 PATCHLEVEL = 14 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc3 4 + EXTRAVERSION = -rc6 5 5 NAME = Shuffling Zombie Juror 6 6 7 7 # *DOCUMENTATION* ··· 605 605 ifdef CONFIG_CC_STACKPROTECTOR_REGULAR 606 606 stackp-flag := -fstack-protector 607 607 ifeq ($(call cc-option, $(stackp-flag)),) 608 - $(warning Cannot use CONFIG_CC_STACKPROTECTOR: \ 609 - -fstack-protector not supported by compiler)) 608 + $(warning Cannot use CONFIG_CC_STACKPROTECTOR_REGULAR: \ 609 + -fstack-protector not supported by compiler) 610 610 endif 611 - else ifdef CONFIG_CC_STACKPROTECTOR_STRONG 611 + else 612 + ifdef CONFIG_CC_STACKPROTECTOR_STRONG 612 613 stackp-flag := -fstack-protector-strong 613 614 ifeq ($(call cc-option, $(stackp-flag)),) 614 615 $(warning Cannot use CONFIG_CC_STACKPROTECTOR_STRONG: \ ··· 618 617 else 619 618 # Force off for distro compilers that enable stack protector by default. 620 619 stackp-flag := $(call cc-option, -fno-stack-protector) 620 + endif 621 621 endif 622 622 KBUILD_CFLAGS += $(stackp-flag) 623 623
+2 -2
arch/arc/mm/cache_arc700.c
··· 282 282 #else 283 283 /* if V-P const for loop, PTAG can be written once outside loop */ 284 284 if (full_page_op) 285 - write_aux_reg(ARC_REG_DC_PTAG, paddr); 285 + write_aux_reg(aux_tag, paddr); 286 286 #endif 287 287 288 288 while (num_lines-- > 0) { ··· 296 296 write_aux_reg(aux_cmd, vaddr); 297 297 vaddr += L1_CACHE_BYTES; 298 298 #else 299 - write_aux_reg(aux, paddr); 299 + write_aux_reg(aux_cmd, paddr); 300 300 paddr += L1_CACHE_BYTES; 301 301 #endif 302 302 }
+3
arch/arm/Kconfig
··· 1578 1578 1579 1579 choice 1580 1580 prompt "Memory split" 1581 + depends on MMU 1581 1582 default VMSPLIT_3G 1582 1583 help 1583 1584 Select the desired split between kernel and user memory. ··· 1596 1595 1597 1596 config PAGE_OFFSET 1598 1597 hex 1598 + default PHYS_OFFSET if !MMU 1599 1599 default 0x40000000 if VMSPLIT_1G 1600 1600 default 0x80000000 if VMSPLIT_2G 1601 1601 default 0xC0000000 ··· 1905 1903 depends on ARM && AEABI && OF 1906 1904 depends on CPU_V7 && !CPU_V6 1907 1905 depends on !GENERIC_ATOMIC64 1906 + depends on MMU 1908 1907 select ARM_PSCI 1909 1908 select SWIOTLB_XEN 1910 1909 select ARCH_DMA_ADDR_T_64BIT
+1
arch/arm/boot/compressed/.gitignore
··· 1 1 ashldi3.S 2 + bswapsdi2.S 2 3 font.c 3 4 lib1funcs.S 4 5 hyp-stub.S
+2 -1
arch/arm/boot/dts/Makefile
··· 209 209 omap3-n900.dtb \ 210 210 omap3-n9.dtb \ 211 211 omap3-n950.dtb \ 212 - omap3-tobi.dtb \ 212 + omap3-overo-tobi.dtb \ 213 + omap3-overo-storm-tobi.dtb \ 213 214 omap3-gta04.dtb \ 214 215 omap3-igep0020.dtb \ 215 216 omap3-igep0030.dtb \
+10 -1
arch/arm/boot/dts/am335x-evmsk.dts
··· 121 121 ti,model = "AM335x-EVMSK"; 122 122 ti,audio-codec = <&tlv320aic3106>; 123 123 ti,mcasp-controller = <&mcasp1>; 124 - ti,codec-clock-rate = <24576000>; 124 + ti,codec-clock-rate = <24000000>; 125 125 ti,audio-routing = 126 126 "Headphone Jack", "HPLOUT", 127 127 "Headphone Jack", "HPROUT"; ··· 253 253 /* MDIO reset value */ 254 254 0x148 (PIN_INPUT_PULLDOWN | MUX_MODE7) 255 255 0x14c (PIN_INPUT_PULLDOWN | MUX_MODE7) 256 + >; 257 + }; 258 + 259 + mmc1_pins: pinmux_mmc1_pins { 260 + pinctrl-single,pins = < 261 + 0x160 (PIN_INPUT | MUX_MODE7) /* spi0_cs1.gpio0_6 */ 256 262 >; 257 263 }; 258 264 ··· 462 456 status = "okay"; 463 457 vmmc-supply = <&vmmc_reg>; 464 458 bus-width = <4>; 459 + pinctrl-names = "default"; 460 + pinctrl-0 = <&mmc1_pins>; 461 + cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>; 465 462 }; 466 463 467 464 &sham {
+2 -1
arch/arm/boot/dts/armada-xp-mv78260.dtsi
··· 23 23 gpio0 = &gpio0; 24 24 gpio1 = &gpio1; 25 25 gpio2 = &gpio2; 26 + eth3 = &eth3; 26 27 }; 27 28 28 29 cpus { ··· 292 291 interrupts = <91>; 293 292 }; 294 293 295 - ethernet@34000 { 294 + eth3: ethernet@34000 { 296 295 compatible = "marvell,armada-370-neta"; 297 296 reg = <0x34000 0x4000>; 298 297 interrupts = <14>;
+1 -1
arch/arm/boot/dts/bcm11351.dtsi
··· 147 147 }; 148 148 149 149 pinctrl@35004800 { 150 - compatible = "brcm,capri-pinctrl"; 150 + compatible = "brcm,bcm11351-pinctrl"; 151 151 reg = <0x35004800 0x430>; 152 152 }; 153 153
-11
arch/arm/boot/dts/dove.dtsi
··· 379 379 #clock-cells = <1>; 380 380 }; 381 381 382 - pmu_intc: pmu-interrupt-ctrl@d0050 { 383 - compatible = "marvell,dove-pmu-intc"; 384 - interrupt-controller; 385 - #interrupt-cells = <1>; 386 - reg = <0xd0050 0x8>; 387 - interrupts = <33>; 388 - marvell,#interrupts = <7>; 389 - }; 390 - 391 382 pinctrl: pin-ctrl@d0200 { 392 383 compatible = "marvell,dove-pinctrl"; 393 384 reg = <0xd0200 0x10>; ··· 601 610 rtc: real-time-clock@d8500 { 602 611 compatible = "marvell,orion-rtc"; 603 612 reg = <0xd8500 0x20>; 604 - interrupt-parent = <&pmu_intc>; 605 - interrupts = <5>; 606 613 }; 607 614 608 615 gpio2: gpio-ctrl@e8400 {
+3 -7
arch/arm/boot/dts/imx6dl-hummingboard.dts
··· 52 52 }; 53 53 }; 54 54 55 - codec: spdif-transmitter { 56 - compatible = "linux,spdif-dit"; 57 - pinctrl-names = "default"; 58 - pinctrl-0 = <&pinctrl_hummingboard_spdif>; 59 - }; 60 - 61 55 sound-spdif { 62 56 compatible = "fsl,imx-audio-spdif"; 63 57 model = "imx-spdif"; ··· 105 111 }; 106 112 107 113 pinctrl_hummingboard_spdif: hummingboard-spdif { 108 - fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x1b0b0>; 114 + fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>; 109 115 }; 110 116 111 117 pinctrl_hummingboard_usbh1_vbus: hummingboard-usbh1-vbus { ··· 136 142 }; 137 143 138 144 &spdif { 145 + pinctrl-names = "default"; 146 + pinctrl-0 = <&pinctrl_hummingboard_spdif>; 139 147 status = "okay"; 140 148 }; 141 149
+3 -7
arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
··· 46 46 }; 47 47 }; 48 48 49 - codec: spdif-transmitter { 50 - compatible = "linux,spdif-dit"; 51 - pinctrl-names = "default"; 52 - pinctrl-0 = <&pinctrl_cubox_i_spdif>; 53 - }; 54 - 55 49 sound-spdif { 56 50 compatible = "fsl,imx-audio-spdif"; 57 51 model = "imx-spdif"; ··· 83 89 }; 84 90 85 91 pinctrl_cubox_i_spdif: cubox-i-spdif { 86 - fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x1b0b0>; 92 + fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>; 87 93 }; 88 94 89 95 pinctrl_cubox_i_usbh1_vbus: cubox-i-usbh1-vbus { ··· 115 121 }; 116 122 117 123 &spdif { 124 + pinctrl-names = "default"; 125 + pinctrl-0 = <&pinctrl_cubox_i_spdif>; 118 126 status = "okay"; 119 127 }; 120 128
+1 -1
arch/arm/boot/dts/keystone-clocks.dtsi
··· 612 612 compatible = "ti,keystone,psc-clock"; 613 613 clocks = <&chipclk13>; 614 614 clock-output-names = "vcp-3"; 615 - reg = <0x0235000a8 0xb00>, <0x02350060 0x400>; 615 + reg = <0x023500a8 0xb00>, <0x02350060 0x400>; 616 616 reg-names = "control", "domain"; 617 617 domain-id = <24>; 618 618 };
+5 -3
arch/arm/boot/dts/omap3-gta04.dts
··· 13 13 14 14 / { 15 15 model = "OMAP3 GTA04"; 16 - compatible = "ti,omap3-gta04", "ti,omap3"; 16 + compatible = "ti,omap3-gta04", "ti,omap36xx", "ti,omap3"; 17 17 18 18 cpus { 19 19 cpu@0 { ··· 32 32 aux-button { 33 33 label = "aux"; 34 34 linux,code = <169>; 35 - gpios = <&gpio1 7 GPIO_ACTIVE_LOW>; 35 + gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>; 36 36 gpio-key,wakeup; 37 37 }; 38 38 }; ··· 92 92 bmp085@77 { 93 93 compatible = "bosch,bmp085"; 94 94 reg = <0x77>; 95 + interrupt-parent = <&gpio4>; 96 + interrupts = <17 IRQ_TYPE_EDGE_RISING>; 95 97 }; 96 98 97 99 /* leds */ ··· 143 141 pinctrl-names = "default"; 144 142 pinctrl-0 = <&mmc1_pins>; 145 143 vmmc-supply = <&vmmc1>; 146 - vmmc_aux-supply = <&vsim>; 147 144 bus-width = <4>; 145 + ti,non-removable; 148 146 }; 149 147 150 148 &mmc2 {
+1 -1
arch/arm/boot/dts/omap3-igep0020.dts
··· 14 14 15 15 / { 16 16 model = "IGEPv2 (TI OMAP AM/DM37x)"; 17 - compatible = "isee,omap3-igep0020", "ti,omap3"; 17 + compatible = "isee,omap3-igep0020", "ti,omap36xx", "ti,omap3"; 18 18 19 19 leds { 20 20 pinctrl-names = "default";
+1 -1
arch/arm/boot/dts/omap3-igep0030.dts
··· 13 13 14 14 / { 15 15 model = "IGEP COM MODULE (TI OMAP AM/DM37x)"; 16 - compatible = "isee,omap3-igep0030", "ti,omap3"; 16 + compatible = "isee,omap3-igep0030", "ti,omap36xx", "ti,omap3"; 17 17 18 18 leds { 19 19 pinctrl-names = "default";
+1 -1
arch/arm/boot/dts/omap3-n9.dts
··· 14 14 15 15 / { 16 16 model = "Nokia N9"; 17 - compatible = "nokia,omap3-n9", "ti,omap3"; 17 + compatible = "nokia,omap3-n9", "ti,omap36xx", "ti,omap3"; 18 18 };
+2 -2
arch/arm/boot/dts/omap3-n900.dts
··· 1 1 /* 2 2 * Copyright (C) 2013 Pavel Machek <pavel@ucw.cz> 3 - * Copyright 2013 Aaro Koskinen <aaro.koskinen@iki.fi> 3 + * Copyright (C) 2013-2014 Aaro Koskinen <aaro.koskinen@iki.fi> 4 4 * 5 5 * This program is free software; you can redistribute it and/or modify 6 6 * it under the terms of the GNU General Public License version 2 (or later) as ··· 13 13 14 14 / { 15 15 model = "Nokia N900"; 16 - compatible = "nokia,omap3-n900", "ti,omap3"; 16 + compatible = "nokia,omap3-n900", "ti,omap3430", "ti,omap3"; 17 17 18 18 cpus { 19 19 cpu@0 {
+1 -1
arch/arm/boot/dts/omap3-n950.dts
··· 14 14 15 15 / { 16 16 model = "Nokia N950"; 17 - compatible = "nokia,omap3-n950", "ti,omap3"; 17 + compatible = "nokia,omap3-n950", "ti,omap36xx", "ti,omap3"; 18 18 };
+22
arch/arm/boot/dts/omap3-overo-storm-tobi.dts
··· 1 + /* 2 + * Copyright (C) 2012 Florian Vaussard, EPFL Mobots group 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + */ 8 + 9 + /* 10 + * Tobi expansion board is manufactured by Gumstix Inc. 11 + */ 12 + 13 + /dts-v1/; 14 + 15 + #include "omap36xx.dtsi" 16 + #include "omap3-overo-tobi-common.dtsi" 17 + 18 + / { 19 + model = "OMAP36xx/AM37xx/DM37xx Gumstix Overo on Tobi"; 20 + compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap36xx", "ti,omap3"; 21 + }; 22 +
+22
arch/arm/boot/dts/omap3-overo-tobi.dts
··· 1 + /* 2 + * Copyright (C) 2012 Florian Vaussard, EPFL Mobots group 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + */ 8 + 9 + /* 10 + * Tobi expansion board is manufactured by Gumstix Inc. 11 + */ 12 + 13 + /dts-v1/; 14 + 15 + #include "omap34xx.dtsi" 16 + #include "omap3-overo-tobi-common.dtsi" 17 + 18 + / { 19 + model = "OMAP35xx Gumstix Overo on Tobi"; 20 + compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap3430", "ti,omap3"; 21 + }; 22 +
-3
arch/arm/boot/dts/omap3-overo.dtsi
··· 9 9 /* 10 10 * The Gumstix Overo must be combined with an expansion board. 11 11 */ 12 - /dts-v1/; 13 - 14 - #include "omap34xx.dtsi" 15 12 16 13 / { 17 14 pwmleds {
-3
arch/arm/boot/dts/omap3-tobi.dts arch/arm/boot/dts/omap3-overo-tobi-common.dtsi
··· 13 13 #include "omap3-overo.dtsi" 14 14 15 15 / { 16 - model = "TI OMAP3 Gumstix Overo on Tobi"; 17 - compatible = "ti,omap3-tobi", "ti,omap3-overo", "ti,omap3"; 18 - 19 16 leds { 20 17 compatible = "gpio-leds"; 21 18 heartbeat {
+1 -1
arch/arm/boot/dts/sun4i-a10.dtsi
··· 426 426 }; 427 427 428 428 rtp: rtp@01c25000 { 429 - compatible = "allwinner,sun4i-ts"; 429 + compatible = "allwinner,sun4i-a10-ts"; 430 430 reg = <0x01c25000 0x100>; 431 431 interrupts = <29>; 432 432 };
+1 -1
arch/arm/boot/dts/sun5i-a10s.dtsi
··· 383 383 }; 384 384 385 385 rtp: rtp@01c25000 { 386 - compatible = "allwinner,sun4i-ts"; 386 + compatible = "allwinner,sun4i-a10-ts"; 387 387 reg = <0x01c25000 0x100>; 388 388 interrupts = <29>; 389 389 };
+1 -1
arch/arm/boot/dts/sun5i-a13.dtsi
··· 346 346 }; 347 347 348 348 rtp: rtp@01c25000 { 349 - compatible = "allwinner,sun4i-ts"; 349 + compatible = "allwinner,sun4i-a10-ts"; 350 350 reg = <0x01c25000 0x100>; 351 351 interrupts = <29>; 352 352 };
+6 -6
arch/arm/boot/dts/sun7i-a20.dtsi
··· 454 454 rtc: rtc@01c20d00 { 455 455 compatible = "allwinner,sun7i-a20-rtc"; 456 456 reg = <0x01c20d00 0x20>; 457 - interrupts = <0 24 1>; 457 + interrupts = <0 24 4>; 458 458 }; 459 459 460 460 sid: eeprom@01c23800 { ··· 463 463 }; 464 464 465 465 rtp: rtp@01c25000 { 466 - compatible = "allwinner,sun4i-ts"; 466 + compatible = "allwinner,sun4i-a10-ts"; 467 467 reg = <0x01c25000 0x100>; 468 468 interrupts = <0 29 4>; 469 469 }; ··· 596 596 hstimer@01c60000 { 597 597 compatible = "allwinner,sun7i-a20-hstimer"; 598 598 reg = <0x01c60000 0x1000>; 599 - interrupts = <0 81 1>, 600 - <0 82 1>, 601 - <0 83 1>, 602 - <0 84 1>; 599 + interrupts = <0 81 4>, 600 + <0 82 4>, 601 + <0 83 4>, 602 + <0 84 4>; 603 603 clocks = <&ahb_gates 28>; 604 604 }; 605 605
+4
arch/arm/boot/dts/tegra114.dtsi
··· 57 57 resets = <&tegra_car 27>; 58 58 reset-names = "dc"; 59 59 60 + nvidia,head = <0>; 61 + 60 62 rgb { 61 63 status = "disabled"; 62 64 }; ··· 73 71 clock-names = "dc", "parent"; 74 72 resets = <&tegra_car 26>; 75 73 reset-names = "dc"; 74 + 75 + nvidia,head = <1>; 76 76 77 77 rgb { 78 78 status = "disabled";
+4
arch/arm/boot/dts/tegra20.dtsi
··· 94 94 resets = <&tegra_car 27>; 95 95 reset-names = "dc"; 96 96 97 + nvidia,head = <0>; 98 + 97 99 rgb { 98 100 status = "disabled"; 99 101 }; ··· 110 108 clock-names = "dc", "parent"; 111 109 resets = <&tegra_car 26>; 112 110 reset-names = "dc"; 111 + 112 + nvidia,head = <1>; 113 113 114 114 rgb { 115 115 status = "disabled";
+1 -1
arch/arm/boot/dts/tegra30-cardhu.dtsi
··· 28 28 compatible = "nvidia,cardhu", "nvidia,tegra30"; 29 29 30 30 aliases { 31 - rtc0 = "/i2c@7000d000/tps6586x@34"; 31 + rtc0 = "/i2c@7000d000/tps65911@2d"; 32 32 rtc1 = "/rtc@7000e000"; 33 33 }; 34 34
+4
arch/arm/boot/dts/tegra30.dtsi
··· 170 170 resets = <&tegra_car 27>; 171 171 reset-names = "dc"; 172 172 173 + nvidia,head = <0>; 174 + 173 175 rgb { 174 176 status = "disabled"; 175 177 }; ··· 186 184 clock-names = "dc", "parent"; 187 185 resets = <&tegra_car 26>; 188 186 reset-names = "dc"; 187 + 188 + nvidia,head = <1>; 189 189 190 190 rgb { 191 191 status = "disabled";
arch/arm/boot/dts/testcases/tests-interrupts.dtsi drivers/of/testcase-data/tests-interrupts.dtsi
arch/arm/boot/dts/testcases/tests-phandle.dtsi drivers/of/testcase-data/tests-phandle.dtsi
-2
arch/arm/boot/dts/testcases/tests.dtsi
··· 1 - /include/ "tests-phandle.dtsi" 2 - /include/ "tests-interrupts.dtsi"
+2 -2
arch/arm/boot/dts/versatile-pb.dts
··· 1 - /include/ "versatile-ab.dts" 1 + #include <versatile-ab.dts> 2 2 3 3 / { 4 4 model = "ARM Versatile PB"; ··· 47 47 }; 48 48 }; 49 49 50 - /include/ "testcases/tests.dtsi" 50 + #include <testcases.dtsi>
+3
arch/arm/configs/tegra_defconfig
··· 204 204 CONFIG_MMC_SDHCI=y 205 205 CONFIG_MMC_SDHCI_PLTFM=y 206 206 CONFIG_MMC_SDHCI_TEGRA=y 207 + CONFIG_NEW_LEDS=y 208 + CONFIG_LEDS_CLASS=y 207 209 CONFIG_LEDS_GPIO=y 210 + CONFIG_LEDS_TRIGGERS=y 208 211 CONFIG_LEDS_TRIGGER_TIMER=y 209 212 CONFIG_LEDS_TRIGGER_ONESHOT=y 210 213 CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+1
arch/arm/include/asm/cacheflush.h
··· 212 212 static inline void __flush_icache_all(void) 213 213 { 214 214 __flush_icache_preferred(); 215 + dsb(); 215 216 } 216 217 217 218 /*
+3 -6
arch/arm/include/asm/memory.h
··· 30 30 */ 31 31 #define UL(x) _AC(x, UL) 32 32 33 + /* PAGE_OFFSET - the virtual address of the start of the kernel image */ 34 + #define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) 35 + 33 36 #ifdef CONFIG_MMU 34 37 35 38 /* 36 - * PAGE_OFFSET - the virtual address of the start of the kernel image 37 39 * TASK_SIZE - the maximum size of a user space task. 38 40 * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area 39 41 */ 40 - #define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) 41 42 #define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M)) 42 43 #define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M) 43 44 ··· 103 102 104 103 #ifndef END_MEM 105 104 #define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) 106 - #endif 107 - 108 - #ifndef PAGE_OFFSET 109 - #define PAGE_OFFSET PLAT_PHYS_OFFSET 110 105 #endif 111 106 112 107 /*
+9 -6
arch/arm/include/asm/pgtable-3level.h
··· 120 120 /* 121 121 * 2nd stage PTE definitions for LPAE. 122 122 */ 123 - #define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x5) << 2) /* MemAttr[3:0] */ 124 - #define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */ 125 - #define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */ 126 - #define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ 127 - #define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ 123 + #define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x0) << 2) /* strongly ordered */ 124 + #define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* normal inner write-through */ 125 + #define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* normal inner write-back */ 126 + #define L_PTE_S2_MT_DEV_SHARED (_AT(pteval_t, 0x1) << 2) /* device */ 127 + #define L_PTE_S2_MT_MASK (_AT(pteval_t, 0xf) << 2) 128 128 129 - #define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ 129 + #define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ 130 + #define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ 131 + 132 + #define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ 130 133 131 134 /* 132 135 * Hyp-mode PL2 PTE definitions for LPAE.
+3 -12
arch/arm/include/asm/spinlock.h
··· 37 37 38 38 static inline void dsb_sev(void) 39 39 { 40 - #if __LINUX_ARM_ARCH__ >= 7 41 - __asm__ __volatile__ ( 42 - "dsb ishst\n" 43 - SEV 44 - ); 45 - #else 46 - __asm__ __volatile__ ( 47 - "mcr p15, 0, %0, c7, c10, 4\n" 48 - SEV 49 - : : "r" (0) 50 - ); 51 - #endif 40 + 41 + dsb(ishst); 42 + __asm__(SEV); 52 43 } 53 44 54 45 /*
+12
arch/arm/kernel/head-common.S
··· 177 177 .long __proc_info_end 178 178 .size __lookup_processor_type_data, . - __lookup_processor_type_data 179 179 180 + __error_lpae: 181 + #ifdef CONFIG_DEBUG_LL 182 + adr r0, str_lpae 183 + bl printascii 184 + b __error 185 + str_lpae: .asciz "\nError: Kernel with LPAE support, but CPU does not support LPAE.\n" 186 + #else 187 + b __error 188 + #endif 189 + .align 190 + ENDPROC(__error_lpae) 191 + 180 192 __error_p: 181 193 #ifdef CONFIG_DEBUG_LL 182 194 adr r0, str_p1
+1 -1
arch/arm/kernel/head.S
··· 102 102 and r3, r3, #0xf @ extract VMSA support 103 103 cmp r3, #5 @ long-descriptor translation table format? 104 104 THUMB( it lo ) @ force fixup-able long branch encoding 105 - blo __error_p @ only classic page table format 105 + blo __error_lpae @ only classic page table format 106 106 #endif 107 107 108 108 #ifndef CONFIG_XIP_KERNEL
+1 -1
arch/arm/kernel/setup.c
··· 731 731 kernel_data.end = virt_to_phys(_end - 1); 732 732 733 733 for_each_memblock(memory, region) { 734 - res = memblock_virt_alloc_low(sizeof(*res), 0); 734 + res = memblock_virt_alloc(sizeof(*res), 0); 735 735 res->name = "System RAM"; 736 736 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); 737 737 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
+2 -1
arch/arm/kvm/arm.c
··· 878 878 unsigned long cmd, 879 879 void *v) 880 880 { 881 - if (cmd == CPU_PM_EXIT) { 881 + if (cmd == CPU_PM_EXIT && 882 + __hyp_get_vectors() == hyp_default_vectors) { 882 883 cpu_init_hyp_mode(NULL); 883 884 return NOTIFY_OK; 884 885 }
+10 -1
arch/arm/kvm/interrupts.S
··· 220 220 * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are 221 221 * passed in r0 and r1. 222 222 * 223 + * A function pointer with a value of 0xffffffff has a special meaning, 224 + * and is used to implement __hyp_get_vectors in the same way as in 225 + * arch/arm/kernel/hyp_stub.S. 226 + * 223 227 * The calling convention follows the standard AAPCS: 224 228 * r0 - r3: caller save 225 229 * r12: caller save ··· 367 363 host_switch_to_hyp: 368 364 pop {r0, r1, r2} 369 365 366 + /* Check for __hyp_get_vectors */ 367 + cmp r0, #-1 368 + mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR 369 + beq 1f 370 + 370 371 push {lr} 371 372 mrs lr, SPSR 372 373 push {lr} ··· 387 378 pop {lr} 388 379 msr SPSR_csxf, lr 389 380 pop {lr} 390 - eret 381 + 1: eret 391 382 392 383 guest_trap: 393 384 load_vcpu @ Load VCPU pointer to r0
-2
arch/arm/mach-imx/Makefile
··· 101 101 obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o mach-imx6q.o 102 102 obj-$(CONFIG_SOC_IMX6SL) += clk-imx6sl.o mach-imx6sl.o 103 103 104 - ifeq ($(CONFIG_PM),y) 105 104 obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o headsmp.o 106 105 # i.MX6SL reuses i.MX6Q code 107 106 obj-$(CONFIG_SOC_IMX6SL) += pm-imx6q.o headsmp.o 108 - endif 109 107 110 108 # i.MX5 based machines 111 109 obj-$(CONFIG_MACH_MX51_BABBAGE) += mach-mx51_babbage.o
+1 -3
arch/arm/mach-imx/common.h
··· 144 144 void imx_cpu_die(unsigned int cpu); 145 145 int imx_cpu_kill(unsigned int cpu); 146 146 147 - #ifdef CONFIG_PM 148 147 void imx6q_pm_init(void); 149 148 void imx6q_pm_set_ccm_base(void __iomem *base); 149 + #ifdef CONFIG_PM 150 150 void imx5_pm_init(void); 151 151 #else 152 - static inline void imx6q_pm_init(void) {} 153 - static inline void imx6q_pm_set_ccm_base(void __iomem *base) {} 154 152 static inline void imx5_pm_init(void) {} 155 153 #endif 156 154
+1
arch/arm/mach-omap1/board-nokia770.c
··· 156 156 .register_dev = 1, 157 157 .hmc_mode = 16, 158 158 .pins[0] = 6, 159 + .extcon = "tahvo-usb", 159 160 }; 160 161 161 162 #if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
+4 -4
arch/arm/mach-omap2/Kconfig
··· 50 50 bool "TI OMAP5" 51 51 depends on ARCH_MULTI_V7 52 52 select ARCH_OMAP2PLUS 53 + select ARCH_HAS_OPP 53 54 select ARM_CPU_SUSPEND if PM 54 55 select ARM_GIC 55 56 select CPU_V7 ··· 64 63 bool "TI AM33XX" 65 64 depends on ARCH_MULTI_V7 66 65 select ARCH_OMAP2PLUS 66 + select ARCH_HAS_OPP 67 67 select ARM_CPU_SUSPEND if PM 68 68 select CPU_V7 69 69 select MULTI_IRQ_HANDLER ··· 74 72 depends on ARCH_MULTI_V7 75 73 select CPU_V7 76 74 select ARCH_OMAP2PLUS 75 + select ARCH_HAS_OPP 77 76 select MULTI_IRQ_HANDLER 78 77 select ARM_GIC 79 78 select MACH_OMAP_GENERIC ··· 83 80 bool "TI DRA7XX" 84 81 depends on ARCH_MULTI_V7 85 82 select ARCH_OMAP2PLUS 83 + select ARCH_HAS_OPP 86 84 select ARM_CPU_SUSPEND if PM 87 85 select ARM_GIC 88 86 select CPU_V7 ··· 272 268 default y 273 269 select OMAP_PACKAGE_CBB 274 270 275 - config MACH_NOKIA_N800 276 - bool 277 - 278 271 config MACH_NOKIA_N810 279 272 bool 280 273 ··· 282 281 bool "Nokia N800/N810" 283 282 depends on SOC_OMAP2420 284 283 default y 285 - select MACH_NOKIA_N800 286 284 select MACH_NOKIA_N810 287 285 select MACH_NOKIA_N810_WIMAX 288 286 select OMAP_PACKAGE_ZAC
+2
arch/arm/mach-omap2/cclock3xxx_data.c
··· 433 433 .enable = &omap2_dflt_clk_enable, 434 434 .disable = &omap2_dflt_clk_disable, 435 435 .is_enabled = &omap2_dflt_clk_is_enabled, 436 + .set_rate = &omap3_clkoutx2_set_rate, 436 437 .recalc_rate = &omap3_clkoutx2_recalc, 438 + .round_rate = &omap3_clkoutx2_round_rate, 437 439 }; 438 440 439 441 static const struct clk_ops dpll4_m5x2_ck_3630_ops = {
+5 -3
arch/arm/mach-omap2/cpuidle44xx.c
··· 23 23 #include "prm.h" 24 24 #include "clockdomain.h" 25 25 26 + #define MAX_CPUS 2 27 + 26 28 /* Machine specific information */ 27 29 struct idle_statedata { 28 30 u32 cpu_state; ··· 50 48 }, 51 49 }; 52 50 53 - static struct powerdomain *mpu_pd, *cpu_pd[NR_CPUS]; 54 - static struct clockdomain *cpu_clkdm[NR_CPUS]; 51 + static struct powerdomain *mpu_pd, *cpu_pd[MAX_CPUS]; 52 + static struct clockdomain *cpu_clkdm[MAX_CPUS]; 55 53 56 54 static atomic_t abort_barrier; 57 - static bool cpu_done[NR_CPUS]; 55 + static bool cpu_done[MAX_CPUS]; 58 56 static struct idle_statedata *state_ptr = &omap4_idle_data[0]; 59 57 60 58 /* Private functions */
+78 -16
arch/arm/mach-omap2/dpll3xxx.c
··· 623 623 624 624 /* Clock control for DPLL outputs */ 625 625 626 - /** 627 - * omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate 628 - * @clk: DPLL output struct clk 629 - * 630 - * Using parent clock DPLL data, look up DPLL state. If locked, set our 631 - * rate to the dpll_clk * 2; otherwise, just use dpll_clk. 632 - */ 633 - unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw, 634 - unsigned long parent_rate) 626 + /* Find the parent DPLL for the given clkoutx2 clock */ 627 + static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw) 635 628 { 636 - const struct dpll_data *dd; 637 - unsigned long rate; 638 - u32 v; 639 629 struct clk_hw_omap *pclk = NULL; 640 630 struct clk *parent; 641 - 642 - if (!parent_rate) 643 - return 0; 644 631 645 632 /* Walk up the parents of clk, looking for a DPLL */ 646 633 do { ··· 643 656 /* clk does not have a DPLL as a parent? error in the clock data */ 644 657 if (!pclk) { 645 658 WARN_ON(1); 646 - return 0; 659 + return NULL; 647 660 } 661 + 662 + return pclk; 663 + } 664 + 665 + /** 666 + * omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate 667 + * @clk: DPLL output struct clk 668 + * 669 + * Using parent clock DPLL data, look up DPLL state. If locked, set our 670 + * rate to the dpll_clk * 2; otherwise, just use dpll_clk. 671 + */ 672 + unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw, 673 + unsigned long parent_rate) 674 + { 675 + const struct dpll_data *dd; 676 + unsigned long rate; 677 + u32 v; 678 + struct clk_hw_omap *pclk = NULL; 679 + 680 + if (!parent_rate) 681 + return 0; 682 + 683 + pclk = omap3_find_clkoutx2_dpll(hw); 684 + 685 + if (!pclk) 686 + return 0; 648 687 649 688 dd = pclk->dpll_data; 650 689 ··· 683 670 else 684 671 rate = parent_rate * 2; 685 672 return rate; 673 + } 674 + 675 + int omap3_clkoutx2_set_rate(struct clk_hw *hw, unsigned long rate, 676 + unsigned long parent_rate) 677 + { 678 + return 0; 679 + } 680 + 681 + long omap3_clkoutx2_round_rate(struct clk_hw *hw, unsigned long rate, 682 + unsigned long *prate) 683 + { 684 + const struct dpll_data *dd; 685 + u32 v; 686 + struct clk_hw_omap *pclk = NULL; 687 + 688 + if (!*prate) 689 + return 0; 690 + 691 + pclk = omap3_find_clkoutx2_dpll(hw); 692 + 693 + if (!pclk) 694 + return 0; 695 + 696 + dd = pclk->dpll_data; 697 + 698 + /* TYPE J does not have a clkoutx2 */ 699 + if (dd->flags & DPLL_J_TYPE) { 700 + *prate = __clk_round_rate(__clk_get_parent(pclk->hw.clk), rate); 701 + return *prate; 702 + } 703 + 704 + WARN_ON(!dd->enable_mask); 705 + 706 + v = omap2_clk_readl(pclk, dd->control_reg) & dd->enable_mask; 707 + v >>= __ffs(dd->enable_mask); 708 + 709 + /* If in bypass, the rate is fixed to the bypass rate*/ 710 + if (v != OMAP3XXX_EN_DPLL_LOCKED) 711 + return *prate; 712 + 713 + if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) { 714 + unsigned long best_parent; 715 + 716 + best_parent = (rate / 2); 717 + *prate = __clk_round_rate(__clk_get_parent(hw->clk), 718 + best_parent); 719 + } 720 + 721 + return *prate * 2; 686 722 } 687 723 688 724 /* OMAP3/4 non-CORE DPLL clkops */
+2 -2
arch/arm/mach-omap2/gpmc.c
··· 1339 1339 of_property_read_bool(np, "gpmc,time-para-granularity"); 1340 1340 } 1341 1341 1342 - #ifdef CONFIG_MTD_NAND 1342 + #if IS_ENABLED(CONFIG_MTD_NAND) 1343 1343 1344 1344 static const char * const nand_xfer_types[] = { 1345 1345 [NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled", ··· 1429 1429 } 1430 1430 #endif 1431 1431 1432 - #ifdef CONFIG_MTD_ONENAND 1432 + #if IS_ENABLED(CONFIG_MTD_ONENAND) 1433 1433 static int gpmc_probe_onenand_child(struct platform_device *pdev, 1434 1434 struct device_node *child) 1435 1435 {
-9
arch/arm/mach-omap2/io.c
··· 179 179 .length = L4_EMU_34XX_SIZE, 180 180 .type = MT_DEVICE 181 181 }, 182 - #if defined(CONFIG_DEBUG_LL) && \ 183 - (defined(CONFIG_MACH_OMAP_ZOOM2) || defined(CONFIG_MACH_OMAP_ZOOM3)) 184 - { 185 - .virtual = ZOOM_UART_VIRT, 186 - .pfn = __phys_to_pfn(ZOOM_UART_BASE), 187 - .length = SZ_1M, 188 - .type = MT_DEVICE 189 - }, 190 - #endif 191 182 }; 192 183 #endif 193 184
+14 -12
arch/arm/mach-omap2/omap_hwmod.c
··· 1947 1947 goto dis_opt_clks; 1948 1948 1949 1949 _write_sysconfig(v, oh); 1950 + 1951 + if (oh->class->sysc->srst_udelay) 1952 + udelay(oh->class->sysc->srst_udelay); 1953 + 1954 + c = _wait_softreset_complete(oh); 1955 + if (c == MAX_MODULE_SOFTRESET_WAIT) { 1956 + pr_warning("omap_hwmod: %s: softreset failed (waited %d usec)\n", 1957 + oh->name, MAX_MODULE_SOFTRESET_WAIT); 1958 + ret = -ETIMEDOUT; 1959 + goto dis_opt_clks; 1960 + } else { 1961 + pr_debug("omap_hwmod: %s: softreset in %d usec\n", oh->name, c); 1962 + } 1963 + 1950 1964 ret = _clear_softreset(oh, &v); 1951 1965 if (ret) 1952 1966 goto dis_opt_clks; 1953 1967 1954 1968 _write_sysconfig(v, oh); 1955 1969 1956 - if (oh->class->sysc->srst_udelay) 1957 - udelay(oh->class->sysc->srst_udelay); 1958 - 1959 - c = _wait_softreset_complete(oh); 1960 - if (c == MAX_MODULE_SOFTRESET_WAIT) 1961 - pr_warning("omap_hwmod: %s: softreset failed (waited %d usec)\n", 1962 - oh->name, MAX_MODULE_SOFTRESET_WAIT); 1963 - else 1964 - pr_debug("omap_hwmod: %s: softreset in %d usec\n", oh->name, c); 1965 - 1966 1970 /* 1967 1971 * XXX add _HWMOD_STATE_WEDGED for modules that don't come back from 1968 1972 * _wait_target_ready() or _reset() 1969 1973 */ 1970 - 1971 - ret = (c == MAX_MODULE_SOFTRESET_WAIT) ? -ETIMEDOUT : 0; 1972 1974 1973 1975 dis_opt_clks: 1974 1976 if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET)
+4 -5
arch/arm/mach-omap2/omap_hwmod_7xx_data.c
··· 1365 1365 .rev_offs = 0x0000, 1366 1366 .sysc_offs = 0x0010, 1367 1367 .syss_offs = 0x0014, 1368 - .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY | 1369 - SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | 1370 - SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), 1371 - .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | 1372 - SIDLE_SMART_WKUP), 1368 + .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP | 1369 + SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | 1370 + SYSS_HAS_RESET_STATUS), 1371 + .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), 1373 1372 .sysc_fields = &omap_hwmod_sysc_type1, 1374 1373 }; 1375 1374
+20 -1
arch/arm/mach-omap2/pdata-quirks.c
··· 22 22 #include "common-board-devices.h" 23 23 #include "dss-common.h" 24 24 #include "control.h" 25 + #include "omap-secure.h" 26 + #include "soc.h" 25 27 26 28 struct pdata_init { 27 29 const char *compatible; ··· 171 169 omap_ctrl_writel(v, AM35XX_CONTROL_IP_SW_RESET); 172 170 omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET); /* OCP barrier */ 173 171 } 172 + 173 + static void __init nokia_n900_legacy_init(void) 174 + { 175 + hsmmc2_internal_input_clk(); 176 + 177 + if (omap_type() == OMAP2_DEVICE_TYPE_SEC) { 178 + if (IS_ENABLED(CONFIG_ARM_ERRATA_430973)) { 179 + pr_info("RX-51: Enabling ARM errata 430973 workaround\n"); 180 + /* set IBE to 1 */ 181 + rx51_secure_update_aux_cr(BIT(6), 0); 182 + } else { 183 + pr_warning("RX-51: Not enabling ARM errata 430973 workaround\n"); 184 + pr_warning("Thumb binaries may crash randomly without this workaround\n"); 185 + } 186 + } 187 + } 174 188 #endif /* CONFIG_ARCH_OMAP3 */ 175 189 176 190 #ifdef CONFIG_ARCH_OMAP4 ··· 257 239 #endif 258 240 #ifdef CONFIG_ARCH_OMAP3 259 241 OF_DEV_AUXDATA("ti,omap3-padconf", 0x48002030, "48002030.pinmux", &pcs_pdata), 242 + OF_DEV_AUXDATA("ti,omap3-padconf", 0x480025a0, "480025a0.pinmux", &pcs_pdata), 260 243 OF_DEV_AUXDATA("ti,omap3-padconf", 0x48002a00, "48002a00.pinmux", &pcs_pdata), 261 244 /* Only on am3517 */ 262 245 OF_DEV_AUXDATA("ti,davinci_mdio", 0x5c030000, "davinci_mdio.0", NULL), ··· 278 259 static struct pdata_init pdata_quirks[] __initdata = { 279 260 #ifdef CONFIG_ARCH_OMAP3 280 261 { "compulab,omap3-sbc-t3730", omap3_sbc_t3730_legacy_init, }, 281 - { "nokia,omap3-n900", hsmmc2_internal_input_clk, }, 262 + { "nokia,omap3-n900", nokia_n900_legacy_init, }, 282 263 { "nokia,omap3-n9", hsmmc2_internal_input_clk, }, 283 264 { "nokia,omap3-n950", hsmmc2_internal_input_clk, }, 284 265 { "isee,omap3-igep0020", omap3_igep0020_legacy_init, },
+2 -2
arch/arm/mach-omap2/prminst44xx.c
··· 183 183 OMAP4_PRM_RSTCTRL_OFFSET); 184 184 v |= OMAP4430_RST_GLOBAL_WARM_SW_MASK; 185 185 omap4_prminst_write_inst_reg(v, OMAP4430_PRM_PARTITION, 186 - OMAP4430_PRM_DEVICE_INST, 186 + dev_inst, 187 187 OMAP4_PRM_RSTCTRL_OFFSET); 188 188 189 189 /* OCP barrier */ 190 190 v = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION, 191 - OMAP4430_PRM_DEVICE_INST, 191 + dev_inst, 192 192 OMAP4_PRM_RSTCTRL_OFFSET); 193 193 }
+9
arch/arm/mach-pxa/mioa701.c
··· 38 38 #include <linux/mtd/physmap.h> 39 39 #include <linux/usb/gpio_vbus.h> 40 40 #include <linux/reboot.h> 41 + #include <linux/regulator/fixed.h> 41 42 #include <linux/regulator/max1586.h> 42 43 #include <linux/slab.h> 43 44 #include <linux/i2c/pxa-i2c.h> ··· 715 714 { GPIO56_MT9M111_nOE, GPIOF_OUT_INIT_LOW, "Camera nOE" }, 716 715 }; 717 716 717 + static struct regulator_consumer_supply fixed_5v0_consumers[] = { 718 + REGULATOR_SUPPLY("power", "pwm-backlight"), 719 + }; 720 + 718 721 static void __init mioa701_machine_init(void) 719 722 { 720 723 int rc; ··· 758 753 pxa_set_i2c_info(&i2c_pdata); 759 754 pxa27x_set_i2c_power_info(NULL); 760 755 pxa_set_camera_info(&mioa701_pxacamera_platform_data); 756 + 757 + regulator_register_always_on(0, "fixed-5.0V", fixed_5v0_consumers, 758 + ARRAY_SIZE(fixed_5v0_consumers), 759 + 5000000); 761 760 } 762 761 763 762 static void mioa701_machine_exit(void)
+2
arch/arm/mach-sa1100/include/mach/collie.h
··· 13 13 #ifndef __ASM_ARCH_COLLIE_H 14 14 #define __ASM_ARCH_COLLIE_H 15 15 16 + #include "hardware.h" /* Gives GPIO_MAX */ 17 + 16 18 extern void locomolcd_power(int on); 17 19 18 20 #define COLLIE_SCOOP_GPIO_BASE (GPIO_MAX + 1)
+1
arch/arm/mach-tegra/pm.c
··· 24 24 #include <linux/cpu_pm.h> 25 25 #include <linux/suspend.h> 26 26 #include <linux/err.h> 27 + #include <linux/slab.h> 27 28 #include <linux/clk/tegra.h> 28 29 29 30 #include <asm/smp_plat.h>
+10
arch/arm/mach-tegra/tegra.c
··· 73 73 static void __init tegra_init_cache(void) 74 74 { 75 75 #ifdef CONFIG_CACHE_L2X0 76 + static const struct of_device_id pl310_ids[] __initconst = { 77 + { .compatible = "arm,pl310-cache", }, 78 + {} 79 + }; 80 + 81 + struct device_node *np; 76 82 int ret; 77 83 void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000; 78 84 u32 aux_ctrl, cache_type; 85 + 86 + np = of_find_matching_node(NULL, pl310_ids); 87 + if (!np) 88 + return; 79 89 80 90 cache_type = readl(p + L2X0_CACHE_TYPE); 81 91 aux_ctrl = (cache_type & 0x700) << (17-8);
+1 -1
arch/arm/mm/dma-mapping.c
··· 1358 1358 *handle = DMA_ERROR_CODE; 1359 1359 size = PAGE_ALIGN(size); 1360 1360 1361 - if (gfp & GFP_ATOMIC) 1361 + if (!(gfp & __GFP_WAIT)) 1362 1362 return __iommu_alloc_atomic(dev, size, handle); 1363 1363 1364 1364 /*
+3
arch/arm/mm/dump.c
··· 264 264 note_page(st, addr, 3, pmd_val(*pmd)); 265 265 else 266 266 walk_pte(st, pmd, addr); 267 + 268 + if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1])) 269 + note_page(st, addr + SECTION_SIZE, 3, pmd_val(pmd[1])); 267 270 } 268 271 } 269 272
+1
arch/arm/mm/mm.h
··· 38 38 39 39 struct mem_type { 40 40 pteval_t prot_pte; 41 + pteval_t prot_pte_s2; 41 42 pmdval_t prot_l1; 42 43 pmdval_t prot_sect; 43 44 unsigned int domain;
+6 -1
arch/arm/mm/mmu.c
··· 232 232 #endif /* ifdef CONFIG_CPU_CP15 / else */ 233 233 234 234 #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN 235 + #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE 235 236 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE 236 237 237 238 static struct mem_type mem_types[] = { 238 239 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ 239 240 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | 241 + L_PTE_SHARED, 242 + .prot_pte_s2 = s2_policy(PROT_PTE_S2_DEVICE) | 243 + s2_policy(L_PTE_S2_MT_DEV_SHARED) | 240 244 L_PTE_SHARED, 241 245 .prot_l1 = PMD_TYPE_TABLE, 242 246 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S, ··· 512 508 cp = &cache_policies[cachepolicy]; 513 509 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; 514 510 s2_pgprot = cp->pte_s2; 515 - hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte; 511 + hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte; 512 + s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2; 516 513 517 514 /* 518 515 * ARMv6 and above have extended page tables.
+2 -1
arch/arm/mm/proc-v6.S
··· 208 208 mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache 209 209 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 210 210 mcr p15, 0, r0, c7, c15, 0 @ clean+invalidate cache 211 - mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 212 211 #ifdef CONFIG_MMU 213 212 mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs 214 213 mcr p15, 0, r0, c2, c0, 2 @ TTB control register ··· 217 218 ALT_UP(orr r8, r8, #TTB_FLAGS_UP) 218 219 mcr p15, 0, r8, c2, c0, 1 @ load TTB1 219 220 #endif /* CONFIG_MMU */ 221 + mcr p15, 0, r0, c7, c10, 4 @ drain write buffer and 222 + @ complete invalidations 220 223 adr r5, v6_crval 221 224 ldmia r5, {r5, r6} 222 225 ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables
+1 -1
arch/arm/mm/proc-v7.S
··· 351 351 352 352 4: mov r10, #0 353 353 mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate 354 - dsb 355 354 #ifdef CONFIG_MMU 356 355 mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs 357 356 v7_ttb_setup r10, r4, r8, r5 @ TTBCR, TTBRx setup ··· 359 360 mcr p15, 0, r5, c10, c2, 0 @ write PRRR 360 361 mcr p15, 0, r6, c10, c2, 1 @ write NMRR 361 362 #endif 363 + dsb @ Complete invalidations 362 364 #ifndef CONFIG_ARM_THUMBEE 363 365 mrc p15, 0, r0, c0, c1, 0 @ read ID_PFR0 for ThumbEE 364 366 and r0, r0, #(0xf << 12) @ ThumbEE enabled field
+8
arch/arm64/include/asm/percpu.h
··· 16 16 #ifndef __ASM_PERCPU_H 17 17 #define __ASM_PERCPU_H 18 18 19 + #ifdef CONFIG_SMP 20 + 19 21 static inline void set_my_cpu_offset(unsigned long off) 20 22 { 21 23 asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory"); ··· 37 35 return off; 38 36 } 39 37 #define __my_cpu_offset __my_cpu_offset() 38 + 39 + #else /* !CONFIG_SMP */ 40 + 41 + #define set_my_cpu_offset(x) do { } while (0) 42 + 43 + #endif /* CONFIG_SMP */ 40 44 41 45 #include <asm-generic/percpu.h> 42 46
+5 -5
arch/arm64/include/asm/pgtable.h
··· 136 136 /* 137 137 * The following only work if pte_present(). Undefined behaviour otherwise. 138 138 */ 139 - #define pte_present(pte) (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) 140 - #define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY) 141 - #define pte_young(pte) (pte_val(pte) & PTE_AF) 142 - #define pte_special(pte) (pte_val(pte) & PTE_SPECIAL) 143 - #define pte_write(pte) (pte_val(pte) & PTE_WRITE) 139 + #define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))) 140 + #define pte_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY)) 141 + #define pte_young(pte) (!!(pte_val(pte) & PTE_AF)) 142 + #define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL)) 143 + #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) 144 144 #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) 145 145 146 146 #define pte_valid_user(pte) \
+5 -1
arch/arm64/kernel/stacktrace.c
··· 48 48 49 49 frame->sp = fp + 0x10; 50 50 frame->fp = *(unsigned long *)(fp); 51 - frame->pc = *(unsigned long *)(fp + 8); 51 + /* 52 + * -4 here because we care about the PC at time of bl, 53 + * not where the return will go. 54 + */ 55 + frame->pc = *(unsigned long *)(fp + 8) - 4; 52 56 53 57 return 0; 54 58 }
+25 -2
arch/arm64/kvm/hyp.S
··· 694 694 695 695 .align 2 696 696 697 + /* 698 + * u64 kvm_call_hyp(void *hypfn, ...); 699 + * 700 + * This is not really a variadic function in the classic C-way and care must 701 + * be taken when calling this to ensure parameters are passed in registers 702 + * only, since the stack will change between the caller and the callee. 703 + * 704 + * Call the function with the first argument containing a pointer to the 705 + * function you wish to call in Hyp mode, and subsequent arguments will be 706 + * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the 707 + * function pointer can be passed). The function being called must be mapped 708 + * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are 709 + * passed in r0 and r1. 710 + * 711 + * A function pointer with a value of 0 has a special meaning, and is 712 + * used to implement __hyp_get_vectors in the same way as in 713 + * arch/arm64/kernel/hyp_stub.S. 714 + */ 697 715 ENTRY(kvm_call_hyp) 698 716 hvc #0 699 717 ret ··· 755 737 pop x2, x3 756 738 pop x0, x1 757 739 758 - push lr, xzr 740 + /* Check for __hyp_get_vectors */ 741 + cbnz x0, 1f 742 + mrs x0, vbar_el2 743 + b 2f 744 + 745 + 1: push lr, xzr 759 746 760 747 /* 761 748 * Compute the function address in EL2, and shuffle the parameters. ··· 773 750 blr lr 774 751 775 752 pop lr, xzr 776 - eret 753 + 2: eret 777 754 778 755 el1_trap: 779 756 /*
+1 -1
arch/avr32/Makefile
··· 11 11 12 12 KBUILD_DEFCONFIG := atstk1002_defconfig 13 13 14 - KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic 14 + KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic -D__linux__ 15 15 KBUILD_AFLAGS += -mrelax -mno-pic 16 16 KBUILD_CFLAGS_MODULE += -mno-relax 17 17 LDFLAGS_vmlinux += --relax
+1
arch/avr32/boards/mimc200/fram.c
··· 11 11 #define FRAM_VERSION "1.0" 12 12 13 13 #include <linux/miscdevice.h> 14 + #include <linux/module.h> 14 15 #include <linux/proc_fs.h> 15 16 #include <linux/mm.h> 16 17 #include <linux/io.h>
+1
arch/avr32/include/asm/Kbuild
··· 17 17 generic-y += sections.h 18 18 generic-y += topology.h 19 19 generic-y += trace_clock.h 20 + generic-y += vga.h 20 21 generic-y += xor.h 21 22 generic-y += hash.h
+2
arch/avr32/include/asm/io.h
··· 295 295 #define iounmap(addr) \ 296 296 __iounmap(addr) 297 297 298 + #define ioremap_wc ioremap_nocache 299 + 298 300 #define cached(addr) P1SEGADDR(addr) 299 301 #define uncached(addr) P2SEGADDR(addr) 300 302
+1
arch/c6x/include/asm/cache.h
··· 12 12 #define _ASM_C6X_CACHE_H 13 13 14 14 #include <linux/irqflags.h> 15 + #include <linux/init.h> 15 16 16 17 /* 17 18 * Cache line size
+3 -3
arch/m68k/include/asm/Kbuild
··· 1 - 1 + generic-y += barrier.h 2 2 generic-y += bitsperlong.h 3 3 generic-y += clkdev.h 4 4 generic-y += cputime.h ··· 6 6 generic-y += emergency-restart.h 7 7 generic-y += errno.h 8 8 generic-y += exec.h 9 + generic-y += hash.h 9 10 generic-y += hw_irq.h 10 11 generic-y += ioctl.h 11 12 generic-y += ipcbuf.h ··· 19 18 generic-y += mman.h 20 19 generic-y += mutex.h 21 20 generic-y += percpu.h 21 + generic-y += preempt.h 22 22 generic-y += resource.h 23 23 generic-y += scatterlist.h 24 24 generic-y += sections.h ··· 33 31 generic-y += types.h 34 32 generic-y += word-at-a-time.h 35 33 generic-y += xor.h 36 - generic-y += preempt.h 37 - generic-y += hash.h
-8
arch/m68k/include/asm/barrier.h
··· 1 - #ifndef _M68K_BARRIER_H 2 - #define _M68K_BARRIER_H 3 - 4 - #define nop() do { asm volatile ("nop"); barrier(); } while (0) 5 - 6 - #include <asm-generic/barrier.h> 7 - 8 - #endif /* _M68K_BARRIER_H */
+1 -1
arch/m68k/include/asm/unistd.h
··· 4 4 #include <uapi/asm/unistd.h> 5 5 6 6 7 - #define NR_syscalls 349 7 + #define NR_syscalls 351 8 8 9 9 #define __ARCH_WANT_OLD_READDIR 10 10 #define __ARCH_WANT_OLD_STAT
+2
arch/m68k/include/uapi/asm/unistd.h
··· 354 354 #define __NR_process_vm_writev 346 355 355 #define __NR_kcmp 347 356 356 #define __NR_finit_module 348 357 + #define __NR_sched_setattr 349 358 + #define __NR_sched_getattr 350 357 359 358 360 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */
+2
arch/m68k/kernel/syscalltable.S
··· 369 369 .long sys_process_vm_writev 370 370 .long sys_kcmp 371 371 .long sys_finit_module 372 + .long sys_sched_setattr 373 + .long sys_sched_getattr /* 350 */ 372 374
+3 -2
arch/powerpc/include/asm/compat.h
··· 200 200 201 201 /* 202 202 * We can't access below the stack pointer in the 32bit ABI and 203 - * can access 288 bytes in the 64bit ABI 203 + * can access 288 bytes in the 64bit big-endian ABI, 204 + * or 512 bytes with the new ELFv2 little-endian ABI. 204 205 */ 205 206 if (!is_32bit_task()) 206 - usp -= 288; 207 + usp -= USER_REDZONE_SIZE; 207 208 208 209 return (void __user *) (usp - len); 209 210 }
+19 -2
arch/powerpc/include/asm/eeh.h
··· 172 172 }; 173 173 174 174 extern struct eeh_ops *eeh_ops; 175 - extern int eeh_subsystem_enabled; 175 + extern bool eeh_subsystem_enabled; 176 176 extern raw_spinlock_t confirm_error_lock; 177 177 extern int eeh_probe_mode; 178 + 179 + static inline bool eeh_enabled(void) 180 + { 181 + return eeh_subsystem_enabled; 182 + } 183 + 184 + static inline void eeh_set_enable(bool mode) 185 + { 186 + eeh_subsystem_enabled = mode; 187 + } 178 188 179 189 #define EEH_PROBE_MODE_DEV (1<<0) /* From PCI device */ 180 190 #define EEH_PROBE_MODE_DEVTREE (1<<1) /* From device tree */ ··· 256 246 * If this macro yields TRUE, the caller relays to eeh_check_failure() 257 247 * which does further tests out of line. 258 248 */ 259 - #define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_subsystem_enabled) 249 + #define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_enabled()) 260 250 261 251 /* 262 252 * Reads from a device which has been isolated by EEH will return ··· 266 256 #define EEH_IO_ERROR_VALUE(size) (~0U >> ((4 - (size)) * 8)) 267 257 268 258 #else /* !CONFIG_EEH */ 259 + 260 + static inline bool eeh_enabled(void) 261 + { 262 + return false; 263 + } 264 + 265 + static inline void eeh_set_enable(bool mode) { } 269 266 270 267 static inline int eeh_init(void) 271 268 {
+1 -1
arch/powerpc/include/asm/hugetlb.h
··· 127 127 unsigned long addr, pte_t *ptep) 128 128 { 129 129 #ifdef CONFIG_PPC64 130 - return __pte(pte_update(mm, addr, ptep, ~0UL, 1)); 130 + return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1)); 131 131 #else 132 132 return __pte(pte_update(ptep, ~0UL, 0)); 133 133 #endif
+2 -2
arch/powerpc/include/asm/opal.h
··· 816 816 int64_t opal_pci_poll(uint64_t phb_id); 817 817 int64_t opal_return_cpu(void); 818 818 819 - int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, __be64 *val); 820 - int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val); 819 + int64_t opal_xscom_read(uint32_t gcid, uint64_t pcb_addr, __be64 *val); 820 + int64_t opal_xscom_write(uint32_t gcid, uint64_t pcb_addr, uint64_t val); 821 821 822 822 int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type, 823 823 uint32_t addr, uint32_t data, uint32_t sz);
+15 -11
arch/powerpc/include/asm/pgtable-ppc64.h
··· 195 195 static inline unsigned long pte_update(struct mm_struct *mm, 196 196 unsigned long addr, 197 197 pte_t *ptep, unsigned long clr, 198 + unsigned long set, 198 199 int huge) 199 200 { 200 201 #ifdef PTE_ATOMIC_UPDATES ··· 206 205 andi. %1,%0,%6\n\ 207 206 bne- 1b \n\ 208 207 andc %1,%0,%4 \n\ 208 + or %1,%1,%7\n\ 209 209 stdcx. %1,0,%3 \n\ 210 210 bne- 1b" 211 211 : "=&r" (old), "=&r" (tmp), "=m" (*ptep) 212 - : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY) 212 + : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY), "r" (set) 213 213 : "cc" ); 214 214 #else 215 215 unsigned long old = pte_val(*ptep); 216 - *ptep = __pte(old & ~clr); 216 + *ptep = __pte((old & ~clr) | set); 217 217 #endif 218 218 /* huge pages use the old page table lock */ 219 219 if (!huge) ··· 233 231 { 234 232 unsigned long old; 235 233 236 - if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) 234 + if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) 237 235 return 0; 238 - old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0); 236 + old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); 239 237 return (old & _PAGE_ACCESSED) != 0; 240 238 } 241 239 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG ··· 254 252 if ((pte_val(*ptep) & _PAGE_RW) == 0) 255 253 return; 256 254 257 - pte_update(mm, addr, ptep, _PAGE_RW, 0); 255 + pte_update(mm, addr, ptep, _PAGE_RW, 0, 0); 258 256 } 259 257 260 258 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, ··· 263 261 if ((pte_val(*ptep) & _PAGE_RW) == 0) 264 262 return; 265 263 266 - pte_update(mm, addr, ptep, _PAGE_RW, 1); 264 + pte_update(mm, addr, ptep, _PAGE_RW, 0, 1); 267 265 } 268 266 269 267 /* ··· 286 284 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 287 285 unsigned long addr, pte_t *ptep) 288 286 { 289 - unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0); 287 + unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0); 290 288 return __pte(old); 291 289 } 292 290 293 291 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, 294 292 pte_t * ptep) 295 293 { 296 - pte_update(mm, addr, ptep, ~0UL, 0); 294 + pte_update(mm, addr, ptep, ~0UL, 0, 0); 297 295 } 298 296 299 297 ··· 508 506 509 507 extern unsigned long pmd_hugepage_update(struct mm_struct *mm, 510 508 unsigned long addr, 511 - pmd_t *pmdp, unsigned long clr); 509 + pmd_t *pmdp, 510 + unsigned long clr, 511 + unsigned long set); 512 512 513 513 static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, 514 514 unsigned long addr, pmd_t *pmdp) ··· 519 515 520 516 if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) 521 517 return 0; 522 - old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED); 518 + old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0); 523 519 return ((old & _PAGE_ACCESSED) != 0); 524 520 } 525 521 ··· 546 542 if ((pmd_val(*pmdp) & _PAGE_RW) == 0) 547 543 return; 548 544 549 - pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW); 545 + pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0); 550 546 } 551 547 552 548 #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
+22
arch/powerpc/include/asm/pgtable.h
··· 75 75 return pte; 76 76 } 77 77 78 + #define ptep_set_numa ptep_set_numa 79 + static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr, 80 + pte_t *ptep) 81 + { 82 + if ((pte_val(*ptep) & _PAGE_PRESENT) == 0) 83 + VM_BUG_ON(1); 84 + 85 + pte_update(mm, addr, ptep, _PAGE_PRESENT, _PAGE_NUMA, 0); 86 + return; 87 + } 88 + 78 89 #define pmd_numa pmd_numa 79 90 static inline int pmd_numa(pmd_t pmd) 80 91 { 81 92 return pte_numa(pmd_pte(pmd)); 93 + } 94 + 95 + #define pmdp_set_numa pmdp_set_numa 96 + static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, 97 + pmd_t *pmdp) 98 + { 99 + if ((pmd_val(*pmdp) & _PAGE_PRESENT) == 0) 100 + VM_BUG_ON(1); 101 + 102 + pmd_hugepage_update(mm, addr, pmdp, _PAGE_PRESENT, _PAGE_NUMA); 103 + return; 82 104 } 83 105 84 106 #define pmd_mknonnuma pmd_mknonnuma
+15 -1
arch/powerpc/include/asm/ptrace.h
··· 28 28 29 29 #ifdef __powerpc64__ 30 30 31 + /* 32 + * Size of redzone that userspace is allowed to use below the stack 33 + * pointer. This is 288 in the 64-bit big-endian ELF ABI, and 512 in 34 + * the new ELFv2 little-endian ABI, so we allow the larger amount. 35 + * 36 + * For kernel code we allow a 288-byte redzone, in order to conserve 37 + * kernel stack space; gcc currently only uses 288 bytes, and will 38 + * hopefully allow explicit control of the redzone size in future. 39 + */ 40 + #define USER_REDZONE_SIZE 512 41 + #define KERNEL_REDZONE_SIZE 288 42 + 31 43 #define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */ 32 44 #define STACK_FRAME_LR_SAVE 2 /* Location of LR in stack frame */ 33 45 #define STACK_FRAME_REGS_MARKER ASM_CONST(0x7265677368657265) 34 46 #define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + \ 35 - STACK_FRAME_OVERHEAD + 288) 47 + STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE) 36 48 #define STACK_FRAME_MARKER 12 37 49 38 50 /* Size of dummy stack frame allocated when calling signal handler. */ ··· 53 41 54 42 #else /* __powerpc64__ */ 55 43 44 + #define USER_REDZONE_SIZE 0 45 + #define KERNEL_REDZONE_SIZE 0 56 46 #define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */ 57 47 #define STACK_FRAME_LR_SAVE 1 /* Location of LR in stack frame */ 58 48 #define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773)
+3 -3
arch/powerpc/include/asm/vdso.h
··· 4 4 #ifdef __KERNEL__ 5 5 6 6 /* Default link addresses for the vDSOs */ 7 - #define VDSO32_LBASE 0x100000 8 - #define VDSO64_LBASE 0x100000 7 + #define VDSO32_LBASE 0x0 8 + #define VDSO64_LBASE 0x0 9 9 10 10 /* Default map addresses for 32bit vDSO */ 11 - #define VDSO32_MBASE VDSO32_LBASE 11 + #define VDSO32_MBASE 0x100000 12 12 13 13 #define VDSO_VERSION_STRING LINUX_2.6.15 14 14
+5 -3
arch/powerpc/kernel/crash_dump.c
··· 98 98 size_t csize, unsigned long offset, int userbuf) 99 99 { 100 100 void *vaddr; 101 + phys_addr_t paddr; 101 102 102 103 if (!csize) 103 104 return 0; 104 105 105 106 csize = min_t(size_t, csize, PAGE_SIZE); 107 + paddr = pfn << PAGE_SHIFT; 106 108 107 - if ((min_low_pfn < pfn) && (pfn < max_pfn)) { 108 - vaddr = __va(pfn << PAGE_SHIFT); 109 + if (memblock_is_region_memory(paddr, csize)) { 110 + vaddr = __va(paddr); 109 111 csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); 110 112 } else { 111 - vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0); 113 + vaddr = __ioremap(paddr, PAGE_SIZE, 0); 112 114 csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); 113 115 iounmap(vaddr); 114 116 }
+26 -6
arch/powerpc/kernel/eeh.c
··· 28 28 #include <linux/pci.h> 29 29 #include <linux/proc_fs.h> 30 30 #include <linux/rbtree.h> 31 + #include <linux/reboot.h> 31 32 #include <linux/seq_file.h> 32 33 #include <linux/spinlock.h> 33 34 #include <linux/export.h> ··· 90 89 /* Platform dependent EEH operations */ 91 90 struct eeh_ops *eeh_ops = NULL; 92 91 93 - int eeh_subsystem_enabled; 92 + bool eeh_subsystem_enabled = false; 94 93 EXPORT_SYMBOL(eeh_subsystem_enabled); 95 94 96 95 /* ··· 365 364 366 365 eeh_stats.total_mmio_ffs++; 367 366 368 - if (!eeh_subsystem_enabled) 367 + if (!eeh_enabled()) 369 368 return 0; 370 369 371 370 if (!edev) { ··· 748 747 return -EEXIST; 749 748 } 750 749 750 + static int eeh_reboot_notifier(struct notifier_block *nb, 751 + unsigned long action, void *unused) 752 + { 753 + eeh_set_enable(false); 754 + return NOTIFY_DONE; 755 + } 756 + 757 + static struct notifier_block eeh_reboot_nb = { 758 + .notifier_call = eeh_reboot_notifier, 759 + }; 760 + 751 761 /** 752 762 * eeh_init - EEH initialization 753 763 * ··· 789 777 */ 790 778 if (machine_is(powernv) && cnt++ <= 0) 791 779 return ret; 780 + 781 + /* Register reboot notifier */ 782 + ret = register_reboot_notifier(&eeh_reboot_nb); 783 + if (ret) { 784 + pr_warn("%s: Failed to register notifier (%d)\n", 785 + __func__, ret); 786 + return ret; 787 + } 792 788 793 789 /* call platform initialization function */ 794 790 if (!eeh_ops) { ··· 842 822 return ret; 843 823 } 844 824 845 - if (eeh_subsystem_enabled) 825 + if (eeh_enabled()) 846 826 pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n"); 847 827 else 848 828 pr_warning("EEH: No capable adapters found\n"); ··· 917 897 struct device_node *dn; 918 898 struct eeh_dev *edev; 919 899 920 - if (!dev || !eeh_subsystem_enabled) 900 + if (!dev || !eeh_enabled()) 921 901 return; 922 902 923 903 pr_debug("EEH: Adding device %s\n", pci_name(dev)); ··· 1025 1005 { 1026 1006 struct eeh_dev *edev; 1027 1007 1028 - if (!dev || !eeh_subsystem_enabled) 1008 + if (!dev || !eeh_enabled()) 1029 1009 return; 1030 1010 edev = pci_dev_to_eeh_dev(dev); 1031 1011 ··· 1065 1045 1066 1046 static int proc_eeh_show(struct seq_file *m, void *v) 1067 1047 { 1068 - if (0 == eeh_subsystem_enabled) { 1048 + if (!eeh_enabled()) { 1069 1049 seq_printf(m, "EEH Subsystem is globally disabled\n"); 1070 1050 seq_printf(m, "eeh_total_mmio_ffs=%llu\n", eeh_stats.total_mmio_ffs); 1071 1051 } else {
+1
arch/powerpc/kernel/ftrace.c
··· 74 74 */ 75 75 static int test_24bit_addr(unsigned long ip, unsigned long addr) 76 76 { 77 + addr = ppc_function_entry((void *)addr); 77 78 78 79 /* use the create_branch to verify that this offset can be branched */ 79 80 return create_branch((unsigned int *)ip, addr, 0);
+4 -1
arch/powerpc/kernel/misc_32.S
··· 57 57 mtlr r0 58 58 blr 59 59 60 + /* 61 + * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp); 62 + */ 60 63 _GLOBAL(call_do_irq) 61 64 mflr r0 62 65 stw r0,4(r1) 63 66 lwz r10,THREAD+KSP_LIMIT(r2) 64 - addi r11,r3,THREAD_INFO_GAP 67 + addi r11,r4,THREAD_INFO_GAP 65 68 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4) 66 69 mr r1,r4 67 70 stw r10,8(r1)
+9
arch/powerpc/kernel/process.c
··· 1048 1048 flush_altivec_to_thread(src); 1049 1049 flush_vsx_to_thread(src); 1050 1050 flush_spe_to_thread(src); 1051 + /* 1052 + * Flush TM state out so we can copy it. __switch_to_tm() does this 1053 + * flush but it removes the checkpointed state from the current CPU and 1054 + * transitions the CPU out of TM mode. Hence we need to call 1055 + * tm_recheckpoint_new_task() (on the same task) to restore the 1056 + * checkpointed state back and the TM mode. 1057 + */ 1058 + __switch_to_tm(src); 1059 + tm_recheckpoint_new_task(src); 1051 1060 1052 1061 *dst = *src; 1053 1062
+1
arch/powerpc/kernel/reloc_64.S
··· 81 81 82 82 6: blr 83 83 84 + .balign 8 84 85 p_dyn: .llong __dynamic_start - 0b 85 86 p_rela: .llong __rela_dyn_start - 0b 86 87 p_st: .llong _stext - 0b
+2 -2
arch/powerpc/kernel/signal_64.c
··· 65 65 struct siginfo __user *pinfo; 66 66 void __user *puc; 67 67 struct siginfo info; 68 - /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */ 69 - char abigap[288]; 68 + /* New 64 bit little-endian ABI allows redzone of 512 bytes below sp */ 69 + char abigap[USER_REDZONE_SIZE]; 70 70 } __attribute__ ((aligned (16))); 71 71 72 72 static const char fmt32[] = KERN_INFO \
+1 -1
arch/powerpc/kernel/vdso32/vdso32_wrapper.S
··· 6 6 .globl vdso32_start, vdso32_end 7 7 .balign PAGE_SIZE 8 8 vdso32_start: 9 - .incbin "arch/powerpc/kernel/vdso32/vdso32.so" 9 + .incbin "arch/powerpc/kernel/vdso32/vdso32.so.dbg" 10 10 .balign PAGE_SIZE 11 11 vdso32_end: 12 12
+1 -1
arch/powerpc/kernel/vdso64/vdso64_wrapper.S
··· 6 6 .globl vdso64_start, vdso64_end 7 7 .balign PAGE_SIZE 8 8 vdso64_start: 9 - .incbin "arch/powerpc/kernel/vdso64/vdso64.so" 9 + .incbin "arch/powerpc/kernel/vdso64/vdso64.so.dbg" 10 10 .balign PAGE_SIZE 11 11 vdso64_end: 12 12
+7 -5
arch/powerpc/mm/pgtable_64.c
··· 510 510 } 511 511 512 512 unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, 513 - pmd_t *pmdp, unsigned long clr) 513 + pmd_t *pmdp, unsigned long clr, 514 + unsigned long set) 514 515 { 515 516 516 517 unsigned long old, tmp; ··· 527 526 andi. %1,%0,%6\n\ 528 527 bne- 1b \n\ 529 528 andc %1,%0,%4 \n\ 529 + or %1,%1,%7\n\ 530 530 stdcx. %1,0,%3 \n\ 531 531 bne- 1b" 532 532 : "=&r" (old), "=&r" (tmp), "=m" (*pmdp) 533 - : "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY) 533 + : "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY), "r" (set) 534 534 : "cc" ); 535 535 #else 536 536 old = pmd_val(*pmdp); 537 - *pmdp = __pmd(old & ~clr); 537 + *pmdp = __pmd((old & ~clr) | set); 538 538 #endif 539 539 if (old & _PAGE_HASHPTE) 540 540 hpte_do_hugepage_flush(mm, addr, pmdp); ··· 710 708 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 711 709 pmd_t *pmdp) 712 710 { 713 - pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT); 711 + pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0); 714 712 } 715 713 716 714 /* ··· 837 835 unsigned long old; 838 836 pgtable_t *pgtable_slot; 839 837 840 - old = pmd_hugepage_update(mm, addr, pmdp, ~0UL); 838 + old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0); 841 839 old_pmd = __pmd(old); 842 840 /* 843 841 * We have pmd == none and we are holding page_table_lock.
+1 -1
arch/powerpc/mm/subpage-prot.c
··· 78 78 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 79 79 arch_enter_lazy_mmu_mode(); 80 80 for (; npages > 0; --npages) { 81 - pte_update(mm, addr, pte, 0, 0); 81 + pte_update(mm, addr, pte, 0, 0, 0); 82 82 addr += PAGE_SIZE; 83 83 ++pte; 84 84 }
+49 -79
arch/powerpc/platforms/powernv/eeh-ioda.c
··· 44 44 45 45 /* We simply send special EEH event */ 46 46 if ((changed_evts & OPAL_EVENT_PCI_ERROR) && 47 - (events & OPAL_EVENT_PCI_ERROR)) 47 + (events & OPAL_EVENT_PCI_ERROR) && 48 + eeh_enabled()) 48 49 eeh_send_failure_event(NULL); 49 50 50 51 return 0; ··· 113 112 DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbB_dbgfs_ops, ioda_eeh_inbB_dbgfs_get, 114 113 ioda_eeh_inbB_dbgfs_set, "0x%llx\n"); 115 114 #endif /* CONFIG_DEBUG_FS */ 115 + 116 116 117 117 /** 118 118 * ioda_eeh_post_init - Chip dependent post initialization ··· 222 220 return ret; 223 221 } 224 222 223 + static void ioda_eeh_phb_diag(struct pci_controller *hose) 224 + { 225 + struct pnv_phb *phb = hose->private_data; 226 + long rc; 227 + 228 + rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob, 229 + PNV_PCI_DIAG_BUF_SIZE); 230 + if (rc != OPAL_SUCCESS) { 231 + pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n", 232 + __func__, hose->global_number, rc); 233 + return; 234 + } 235 + 236 + pnv_pci_dump_phb_diag_data(hose, phb->diag.blob); 237 + } 238 + 225 239 /** 226 240 * ioda_eeh_get_state - Retrieve the state of PE 227 241 * @pe: EEH PE ··· 289 271 result |= EEH_STATE_DMA_ACTIVE; 290 272 result |= EEH_STATE_MMIO_ENABLED; 291 273 result |= EEH_STATE_DMA_ENABLED; 274 + } else if (!(pe->state & EEH_PE_ISOLATED)) { 275 + eeh_pe_state_mark(pe, EEH_PE_ISOLATED); 276 + ioda_eeh_phb_diag(hose); 292 277 } 293 278 294 279 return result; ··· 333 312 pr_warning("%s: Unexpected EEH status 0x%x " 334 313 "on PHB#%x-PE#%x\n", 335 314 __func__, fstate, hose->global_number, pe_no); 315 + } 316 + 317 + /* Dump PHB diag-data for frozen PE */ 318 + if (result != EEH_STATE_NOT_SUPPORT && 319 + (result & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) != 320 + (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE) && 321 + !(pe->state & EEH_PE_ISOLATED)) { 322 + eeh_pe_state_mark(pe, EEH_PE_ISOLATED); 323 + ioda_eeh_phb_diag(hose); 336 324 } 337 325 338 326 return result; ··· 519 489 static int ioda_eeh_reset(struct eeh_pe *pe, int option) 520 490 { 521 491 struct pci_controller *hose = pe->phb; 522 - struct eeh_dev *edev; 523 - struct pci_dev *dev; 492 + struct pci_bus *bus; 524 493 int ret; 525 494 526 495 /* ··· 548 519 if (pe->type & EEH_PE_PHB) { 549 520 ret = ioda_eeh_phb_reset(hose, option); 550 521 } else { 551 - if (pe->type & EEH_PE_DEVICE) { 552 - /* 553 - * If it's device PE, we didn't refer to the parent 554 - * PCI bus yet. So we have to figure it out indirectly. 555 - */ 556 - edev = list_first_entry(&pe->edevs, 557 - struct eeh_dev, list); 558 - dev = eeh_dev_to_pci_dev(edev); 559 - dev = dev->bus->self; 560 - } else { 561 - /* 562 - * If it's bus PE, the parent PCI bus is already there 563 - * and just pick it up. 564 - */ 565 - dev = pe->bus->self; 566 - } 567 - 568 - /* 569 - * Do reset based on the fact that the direct upstream bridge 570 - * is root bridge (port) or not. 571 - */ 572 - if (dev->bus->number == 0) 522 + bus = eeh_pe_bus_get(pe); 523 + if (pci_is_root_bus(bus)) 573 524 ret = ioda_eeh_root_reset(hose, option); 574 525 else 575 - ret = ioda_eeh_bridge_reset(hose, dev, option); 526 + ret = ioda_eeh_bridge_reset(hose, bus->self, option); 576 527 } 577 528 578 529 return ret; 579 - } 580 - 581 - /** 582 - * ioda_eeh_get_log - Retrieve error log 583 - * @pe: EEH PE 584 - * @severity: Severity level of the log 585 - * @drv_log: buffer to store the log 586 - * @len: space of the log buffer 587 - * 588 - * The function is used to retrieve error log from P7IOC. 589 - */ 590 - static int ioda_eeh_get_log(struct eeh_pe *pe, int severity, 591 - char *drv_log, unsigned long len) 592 - { 593 - s64 ret; 594 - unsigned long flags; 595 - struct pci_controller *hose = pe->phb; 596 - struct pnv_phb *phb = hose->private_data; 597 - 598 - spin_lock_irqsave(&phb->lock, flags); 599 - 600 - ret = opal_pci_get_phb_diag_data2(phb->opal_id, 601 - phb->diag.blob, PNV_PCI_DIAG_BUF_SIZE); 602 - if (ret) { 603 - spin_unlock_irqrestore(&phb->lock, flags); 604 - pr_warning("%s: Can't get log for PHB#%x-PE#%x (%lld)\n", 605 - __func__, hose->global_number, pe->addr, ret); 606 - return -EIO; 607 - } 608 - 609 - /* The PHB diag-data is always indicative */ 610 - pnv_pci_dump_phb_diag_data(hose, phb->diag.blob); 611 - 612 - spin_unlock_irqrestore(&phb->lock, flags); 613 - 614 - return 0; 615 530 } 616 531 617 532 /** ··· 637 664 pr_warning("%s: Invalid type of HUB#%llx diag-data (%d)\n", 638 665 __func__, phb->hub_id, data->type); 639 666 } 640 - } 641 - 642 - static void ioda_eeh_phb_diag(struct pci_controller *hose) 643 - { 644 - struct pnv_phb *phb = hose->private_data; 645 - long rc; 646 - 647 - rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob, 648 - PNV_PCI_DIAG_BUF_SIZE); 649 - if (rc != OPAL_SUCCESS) { 650 - pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n", 651 - __func__, hose->global_number, rc); 652 - return; 653 - } 654 - 655 - pnv_pci_dump_phb_diag_data(hose, phb->diag.blob); 656 667 } 657 668 658 669 static int ioda_eeh_get_phb_pe(struct pci_controller *hose, ··· 812 855 } 813 856 814 857 /* 858 + * EEH core will try recover from fenced PHB or 859 + * frozen PE. In the time for frozen PE, EEH core 860 + * enable IO path for that before collecting logs, 861 + * but it ruins the site. So we have to dump the 862 + * log in advance here. 863 + */ 864 + if ((ret == EEH_NEXT_ERR_FROZEN_PE || 865 + ret == EEH_NEXT_ERR_FENCED_PHB) && 866 + !((*pe)->state & EEH_PE_ISOLATED)) { 867 + eeh_pe_state_mark(*pe, EEH_PE_ISOLATED); 868 + ioda_eeh_phb_diag(hose); 869 + } 870 + 871 + /* 815 872 * If we have no errors on the specific PHB or only 816 873 * informative error there, we continue poking it. 817 874 * Otherwise, we need actions to be taken by upper ··· 843 872 .set_option = ioda_eeh_set_option, 844 873 .get_state = ioda_eeh_get_state, 845 874 .reset = ioda_eeh_reset, 846 - .get_log = ioda_eeh_get_log, 847 875 .configure_bridge = ioda_eeh_configure_bridge, 848 876 .next_error = ioda_eeh_next_error 849 877 };
+1 -1
arch/powerpc/platforms/powernv/eeh-powernv.c
··· 145 145 * Enable EEH explicitly so that we will do EEH check 146 146 * while accessing I/O stuff 147 147 */ 148 - eeh_subsystem_enabled = 1; 148 + eeh_set_enable(true); 149 149 150 150 /* Save memory bars */ 151 151 eeh_save_bars(edev);
+12 -9
arch/powerpc/platforms/powernv/opal-xscom.c
··· 71 71 } 72 72 } 73 73 74 - static u64 opal_scom_unmangle(u64 reg) 74 + static u64 opal_scom_unmangle(u64 addr) 75 75 { 76 76 /* 77 77 * XSCOM indirect addresses have the top bit set. Additionally 78 - * the reset of the top 3 nibbles is always 0. 78 + * the rest of the top 3 nibbles is always 0. 79 79 * 80 80 * Because the debugfs interface uses signed offsets and shifts 81 81 * the address left by 3, we basically cannot use the top 4 bits ··· 86 86 * conversion here. To leave room for further xscom address 87 87 * expansion, we only clear out the top byte 88 88 * 89 + * For in-kernel use, we also support the real indirect bit, so 90 + * we test for any of the top 5 bits 91 + * 89 92 */ 90 - if (reg & (1ull << 59)) 91 - reg = (reg & ~(0xffull << 56)) | (1ull << 63); 92 - return reg; 93 + if (addr & (0x1full << 59)) 94 + addr = (addr & ~(0xffull << 56)) | (1ull << 63); 95 + return addr; 93 96 } 94 97 95 98 static int opal_scom_read(scom_map_t map, u64 reg, u64 *value) ··· 101 98 int64_t rc; 102 99 __be64 v; 103 100 104 - reg = opal_scom_unmangle(reg); 105 - rc = opal_xscom_read(m->chip, m->addr + reg, (__be64 *)__pa(&v)); 101 + reg = opal_scom_unmangle(m->addr + reg); 102 + rc = opal_xscom_read(m->chip, reg, (__be64 *)__pa(&v)); 106 103 *value = be64_to_cpu(v); 107 104 return opal_xscom_err_xlate(rc); 108 105 } ··· 112 109 struct opal_scom_map *m = map; 113 110 int64_t rc; 114 111 115 - reg = opal_scom_unmangle(reg); 116 - rc = opal_xscom_write(m->chip, m->addr + reg, value); 112 + reg = opal_scom_unmangle(m->addr + reg); 113 + rc = opal_xscom_write(m->chip, reg, value); 117 114 return opal_xscom_err_xlate(rc); 118 115 } 119 116
+125 -95
arch/powerpc/platforms/powernv/pci.c
··· 134 134 pr_info("P7IOC PHB#%d Diag-data (Version: %d)\n\n", 135 135 hose->global_number, common->version); 136 136 137 - pr_info(" brdgCtl: %08x\n", data->brdgCtl); 138 - 139 - pr_info(" portStatusReg: %08x\n", data->portStatusReg); 140 - pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus); 141 - pr_info(" busAgentStatus: %08x\n", data->busAgentStatus); 142 - 143 - pr_info(" deviceStatus: %08x\n", data->deviceStatus); 144 - pr_info(" slotStatus: %08x\n", data->slotStatus); 145 - pr_info(" linkStatus: %08x\n", data->linkStatus); 146 - pr_info(" devCmdStatus: %08x\n", data->devCmdStatus); 147 - pr_info(" devSecStatus: %08x\n", data->devSecStatus); 148 - 149 - pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus); 150 - pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus); 151 - pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus); 152 - pr_info(" tlpHdr1: %08x\n", data->tlpHdr1); 153 - pr_info(" tlpHdr2: %08x\n", data->tlpHdr2); 154 - pr_info(" tlpHdr3: %08x\n", data->tlpHdr3); 155 - pr_info(" tlpHdr4: %08x\n", data->tlpHdr4); 156 - pr_info(" sourceId: %08x\n", data->sourceId); 157 - pr_info(" errorClass: %016llx\n", data->errorClass); 158 - pr_info(" correlator: %016llx\n", data->correlator); 159 - pr_info(" p7iocPlssr: %016llx\n", data->p7iocPlssr); 160 - pr_info(" p7iocCsr: %016llx\n", data->p7iocCsr); 161 - pr_info(" lemFir: %016llx\n", data->lemFir); 162 - pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask); 163 - pr_info(" lemWOF: %016llx\n", data->lemWOF); 164 - pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus); 165 - pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus); 166 - pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0); 167 - pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1); 168 - pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus); 169 - pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus); 170 - pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0); 171 - pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1); 172 - pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus); 173 - pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus); 174 - pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0); 175 - pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1); 176 - pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus); 177 - pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus); 178 - pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0); 179 - pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1); 137 + if (data->brdgCtl) 138 + pr_info(" brdgCtl: %08x\n", 139 + data->brdgCtl); 140 + if (data->portStatusReg || data->rootCmplxStatus || 141 + data->busAgentStatus) 142 + pr_info(" UtlSts: %08x %08x %08x\n", 143 + data->portStatusReg, data->rootCmplxStatus, 144 + data->busAgentStatus); 145 + if (data->deviceStatus || data->slotStatus || 146 + data->linkStatus || data->devCmdStatus || 147 + data->devSecStatus) 148 + pr_info(" RootSts: %08x %08x %08x %08x %08x\n", 149 + data->deviceStatus, data->slotStatus, 150 + data->linkStatus, data->devCmdStatus, 151 + data->devSecStatus); 152 + if (data->rootErrorStatus || data->uncorrErrorStatus || 153 + data->corrErrorStatus) 154 + pr_info(" RootErrSts: %08x %08x %08x\n", 155 + data->rootErrorStatus, data->uncorrErrorStatus, 156 + data->corrErrorStatus); 157 + if (data->tlpHdr1 || data->tlpHdr2 || 158 + data->tlpHdr3 || data->tlpHdr4) 159 + pr_info(" RootErrLog: %08x %08x %08x %08x\n", 160 + data->tlpHdr1, data->tlpHdr2, 161 + data->tlpHdr3, data->tlpHdr4); 162 + if (data->sourceId || data->errorClass || 163 + data->correlator) 164 + pr_info(" RootErrLog1: %08x %016llx %016llx\n", 165 + data->sourceId, data->errorClass, 166 + data->correlator); 167 + if (data->p7iocPlssr || data->p7iocCsr) 168 + pr_info(" PhbSts: %016llx %016llx\n", 169 + data->p7iocPlssr, data->p7iocCsr); 170 + if (data->lemFir || data->lemErrorMask || 171 + data->lemWOF) 172 + pr_info(" Lem: %016llx %016llx %016llx\n", 173 + data->lemFir, data->lemErrorMask, 174 + data->lemWOF); 175 + if (data->phbErrorStatus || data->phbFirstErrorStatus || 176 + data->phbErrorLog0 || data->phbErrorLog1) 177 + pr_info(" PhbErr: %016llx %016llx %016llx %016llx\n", 178 + data->phbErrorStatus, data->phbFirstErrorStatus, 179 + data->phbErrorLog0, data->phbErrorLog1); 180 + if (data->mmioErrorStatus || data->mmioFirstErrorStatus || 181 + data->mmioErrorLog0 || data->mmioErrorLog1) 182 + pr_info(" OutErr: %016llx %016llx %016llx %016llx\n", 183 + data->mmioErrorStatus, data->mmioFirstErrorStatus, 184 + data->mmioErrorLog0, data->mmioErrorLog1); 185 + if (data->dma0ErrorStatus || data->dma0FirstErrorStatus || 186 + data->dma0ErrorLog0 || data->dma0ErrorLog1) 187 + pr_info(" InAErr: %016llx %016llx %016llx %016llx\n", 188 + data->dma0ErrorStatus, data->dma0FirstErrorStatus, 189 + data->dma0ErrorLog0, data->dma0ErrorLog1); 190 + if (data->dma1ErrorStatus || data->dma1FirstErrorStatus || 191 + data->dma1ErrorLog0 || data->dma1ErrorLog1) 192 + pr_info(" InBErr: %016llx %016llx %016llx %016llx\n", 193 + data->dma1ErrorStatus, data->dma1FirstErrorStatus, 194 + data->dma1ErrorLog0, data->dma1ErrorLog1); 180 195 181 196 for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) { 182 197 if ((data->pestA[i] >> 63) == 0 && 183 198 (data->pestB[i] >> 63) == 0) 184 199 continue; 185 200 186 - pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]); 187 - pr_info(" PESTB: %016llx\n", data->pestB[i]); 201 + pr_info(" PE[%3d] A/B: %016llx %016llx\n", 202 + i, data->pestA[i], data->pestB[i]); 188 203 } 189 204 } 190 205 ··· 212 197 data = (struct OpalIoPhb3ErrorData*)common; 213 198 pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n\n", 214 199 hose->global_number, common->version); 215 - 216 - pr_info(" brdgCtl: %08x\n", data->brdgCtl); 217 - 218 - pr_info(" portStatusReg: %08x\n", data->portStatusReg); 219 - pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus); 220 - pr_info(" busAgentStatus: %08x\n", data->busAgentStatus); 221 - 222 - pr_info(" deviceStatus: %08x\n", data->deviceStatus); 223 - pr_info(" slotStatus: %08x\n", data->slotStatus); 224 - pr_info(" linkStatus: %08x\n", data->linkStatus); 225 - pr_info(" devCmdStatus: %08x\n", data->devCmdStatus); 226 - pr_info(" devSecStatus: %08x\n", data->devSecStatus); 227 - 228 - pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus); 229 - pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus); 230 - pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus); 231 - pr_info(" tlpHdr1: %08x\n", data->tlpHdr1); 232 - pr_info(" tlpHdr2: %08x\n", data->tlpHdr2); 233 - pr_info(" tlpHdr3: %08x\n", data->tlpHdr3); 234 - pr_info(" tlpHdr4: %08x\n", data->tlpHdr4); 235 - pr_info(" sourceId: %08x\n", data->sourceId); 236 - pr_info(" errorClass: %016llx\n", data->errorClass); 237 - pr_info(" correlator: %016llx\n", data->correlator); 238 - 239 - pr_info(" nFir: %016llx\n", data->nFir); 240 - pr_info(" nFirMask: %016llx\n", data->nFirMask); 241 - pr_info(" nFirWOF: %016llx\n", data->nFirWOF); 242 - pr_info(" PhbPlssr: %016llx\n", data->phbPlssr); 243 - pr_info(" PhbCsr: %016llx\n", data->phbCsr); 244 - pr_info(" lemFir: %016llx\n", data->lemFir); 245 - pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask); 246 - pr_info(" lemWOF: %016llx\n", data->lemWOF); 247 - pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus); 248 - pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus); 249 - pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0); 250 - pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1); 251 - pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus); 252 - pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus); 253 - pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0); 254 - pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1); 255 - pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus); 256 - pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus); 257 - pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0); 258 - pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1); 259 - pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus); 260 - pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus); 261 - pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0); 262 - pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1); 200 + if (data->brdgCtl) 201 + pr_info(" brdgCtl: %08x\n", 202 + data->brdgCtl); 203 + if (data->portStatusReg || data->rootCmplxStatus || 204 + data->busAgentStatus) 205 + pr_info(" UtlSts: %08x %08x %08x\n", 206 + data->portStatusReg, data->rootCmplxStatus, 207 + data->busAgentStatus); 208 + if (data->deviceStatus || data->slotStatus || 209 + data->linkStatus || data->devCmdStatus || 210 + data->devSecStatus) 211 + pr_info(" RootSts: %08x %08x %08x %08x %08x\n", 212 + data->deviceStatus, data->slotStatus, 213 + data->linkStatus, data->devCmdStatus, 214 + data->devSecStatus); 215 + if (data->rootErrorStatus || data->uncorrErrorStatus || 216 + data->corrErrorStatus) 217 + pr_info(" RootErrSts: %08x %08x %08x\n", 218 + data->rootErrorStatus, data->uncorrErrorStatus, 219 + data->corrErrorStatus); 220 + if (data->tlpHdr1 || data->tlpHdr2 || 221 + data->tlpHdr3 || data->tlpHdr4) 222 + pr_info(" RootErrLog: %08x %08x %08x %08x\n", 223 + data->tlpHdr1, data->tlpHdr2, 224 + data->tlpHdr3, data->tlpHdr4); 225 + if (data->sourceId || data->errorClass || 226 + data->correlator) 227 + pr_info(" RootErrLog1: %08x %016llx %016llx\n", 228 + data->sourceId, data->errorClass, 229 + data->correlator); 230 + if (data->nFir || data->nFirMask || 231 + data->nFirWOF) 232 + pr_info(" nFir: %016llx %016llx %016llx\n", 233 + data->nFir, data->nFirMask, 234 + data->nFirWOF); 235 + if (data->phbPlssr || data->phbCsr) 236 + pr_info(" PhbSts: %016llx %016llx\n", 237 + data->phbPlssr, data->phbCsr); 238 + if (data->lemFir || data->lemErrorMask || 239 + data->lemWOF) 240 + pr_info(" Lem: %016llx %016llx %016llx\n", 241 + data->lemFir, data->lemErrorMask, 242 + data->lemWOF); 243 + if (data->phbErrorStatus || data->phbFirstErrorStatus || 244 + data->phbErrorLog0 || data->phbErrorLog1) 245 + pr_info(" PhbErr: %016llx %016llx %016llx %016llx\n", 246 + data->phbErrorStatus, data->phbFirstErrorStatus, 247 + data->phbErrorLog0, data->phbErrorLog1); 248 + if (data->mmioErrorStatus || data->mmioFirstErrorStatus || 249 + data->mmioErrorLog0 || data->mmioErrorLog1) 250 + pr_info(" OutErr: %016llx %016llx %016llx %016llx\n", 251 + data->mmioErrorStatus, data->mmioFirstErrorStatus, 252 + data->mmioErrorLog0, data->mmioErrorLog1); 253 + if (data->dma0ErrorStatus || data->dma0FirstErrorStatus || 254 + data->dma0ErrorLog0 || data->dma0ErrorLog1) 255 + pr_info(" InAErr: %016llx %016llx %016llx %016llx\n", 256 + data->dma0ErrorStatus, data->dma0FirstErrorStatus, 257 + data->dma0ErrorLog0, data->dma0ErrorLog1); 258 + if (data->dma1ErrorStatus || data->dma1FirstErrorStatus || 259 + data->dma1ErrorLog0 || data->dma1ErrorLog1) 260 + pr_info(" InBErr: %016llx %016llx %016llx %016llx\n", 261 + data->dma1ErrorStatus, data->dma1FirstErrorStatus, 262 + data->dma1ErrorLog0, data->dma1ErrorLog1); 263 263 264 264 for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) { 265 265 if ((data->pestA[i] >> 63) == 0 && 266 266 (data->pestB[i] >> 63) == 0) 267 267 continue; 268 268 269 - pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]); 270 - pr_info(" PESTB: %016llx\n", data->pestB[i]); 269 + pr_info(" PE[%3d] A/B: %016llx %016llx\n", 270 + i, data->pestA[i], data->pestB[i]); 271 271 } 272 272 } 273 273
+1 -1
arch/powerpc/platforms/pseries/eeh_pseries.c
··· 265 265 enable = 1; 266 266 267 267 if (enable) { 268 - eeh_subsystem_enabled = 1; 268 + eeh_set_enable(true); 269 269 eeh_add_to_parent_pe(edev); 270 270 271 271 pr_debug("%s: EEH enabled on %s PHB#%d-PE#%x, config addr#%x\n",
+11 -11
arch/powerpc/platforms/pseries/hotplug-cpu.c
··· 35 35 #include "offline_states.h" 36 36 37 37 /* This version can't take the spinlock, because it never returns */ 38 - static struct rtas_args rtas_stop_self_args = { 39 - .token = RTAS_UNKNOWN_SERVICE, 40 - .nargs = 0, 41 - .nret = 1, 42 - .rets = &rtas_stop_self_args.args[0], 43 - }; 38 + static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE; 44 39 45 40 static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) = 46 41 CPU_STATE_OFFLINE; ··· 88 93 89 94 static void rtas_stop_self(void) 90 95 { 91 - struct rtas_args *args = &rtas_stop_self_args; 96 + struct rtas_args args = { 97 + .token = cpu_to_be32(rtas_stop_self_token), 98 + .nargs = 0, 99 + .nret = 1, 100 + .rets = &args.args[0], 101 + }; 92 102 93 103 local_irq_disable(); 94 104 95 - BUG_ON(args->token == RTAS_UNKNOWN_SERVICE); 105 + BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE); 96 106 97 107 printk("cpu %u (hwid %u) Ready to die...\n", 98 108 smp_processor_id(), hard_smp_processor_id()); 99 - enter_rtas(__pa(args)); 109 + enter_rtas(__pa(&args)); 100 110 101 111 panic("Alas, I survived.\n"); 102 112 } ··· 392 392 } 393 393 } 394 394 395 - rtas_stop_self_args.token = rtas_token("stop-self"); 395 + rtas_stop_self_token = rtas_token("stop-self"); 396 396 qcss_tok = rtas_token("query-cpu-stopped-state"); 397 397 398 - if (rtas_stop_self_args.token == RTAS_UNKNOWN_SERVICE || 398 + if (rtas_stop_self_token == RTAS_UNKNOWN_SERVICE || 399 399 qcss_tok == RTAS_UNKNOWN_SERVICE) { 400 400 printk(KERN_INFO "CPU Hotplug not supported by firmware " 401 401 "- disabling.\n");
+15 -7
arch/powerpc/platforms/pseries/pci.c
··· 113 113 { 114 114 struct device_node *dn, *pdn; 115 115 struct pci_bus *bus; 116 - const __be32 *pcie_link_speed_stats; 116 + u32 pcie_link_speed_stats[2]; 117 + int rc; 117 118 118 119 bus = bridge->bus; 119 120 ··· 123 122 return 0; 124 123 125 124 for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) { 126 - pcie_link_speed_stats = of_get_property(pdn, 127 - "ibm,pcie-link-speed-stats", NULL); 128 - if (pcie_link_speed_stats) 125 + rc = of_property_read_u32_array(pdn, 126 + "ibm,pcie-link-speed-stats", 127 + &pcie_link_speed_stats[0], 2); 128 + if (!rc) 129 129 break; 130 130 } 131 131 132 132 of_node_put(pdn); 133 133 134 - if (!pcie_link_speed_stats) { 134 + if (rc) { 135 135 pr_err("no ibm,pcie-link-speed-stats property\n"); 136 136 return 0; 137 137 } 138 138 139 - switch (be32_to_cpup(pcie_link_speed_stats)) { 139 + switch (pcie_link_speed_stats[0]) { 140 140 case 0x01: 141 141 bus->max_bus_speed = PCIE_SPEED_2_5GT; 142 142 break; 143 143 case 0x02: 144 144 bus->max_bus_speed = PCIE_SPEED_5_0GT; 145 145 break; 146 + case 0x04: 147 + bus->max_bus_speed = PCIE_SPEED_8_0GT; 148 + break; 146 149 default: 147 150 bus->max_bus_speed = PCI_SPEED_UNKNOWN; 148 151 break; 149 152 } 150 153 151 - switch (be32_to_cpup(pcie_link_speed_stats)) { 154 + switch (pcie_link_speed_stats[1]) { 152 155 case 0x01: 153 156 bus->cur_bus_speed = PCIE_SPEED_2_5GT; 154 157 break; 155 158 case 0x02: 156 159 bus->cur_bus_speed = PCIE_SPEED_5_0GT; 160 + break; 161 + case 0x04: 162 + bus->cur_bus_speed = PCIE_SPEED_8_0GT; 157 163 break; 158 164 default: 159 165 bus->cur_bus_speed = PCI_SPEED_UNKNOWN;
+1 -1
arch/s390/kernel/compat_wrapper.S
··· 1421 1421 ENTRY(sys_sched_getattr_wrapper) 1422 1422 lgfr %r2,%r2 # pid_t 1423 1423 llgtr %r3,%r3 # const char __user * 1424 - llgfr %r3,%r3 # unsigned int 1424 + llgfr %r4,%r4 # unsigned int 1425 1425 jg sys_sched_getattr
+5 -3
arch/s390/pci/pci_dma.c
··· 206 206 zdev->dma_table = NULL; 207 207 } 208 208 209 - static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev, unsigned long start, 210 - int size) 209 + static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev, 210 + unsigned long start, int size) 211 211 { 212 - unsigned long boundary_size = 0x1000000; 212 + unsigned long boundary_size; 213 213 214 + boundary_size = ALIGN(dma_get_seg_boundary(&zdev->pdev->dev) + 1, 215 + PAGE_SIZE) >> PAGE_SHIFT; 214 216 return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages, 215 217 start, size, 0, boundary_size, 0); 216 218 }
+1 -1
arch/sh/include/cpu-sh2/cpu/cache.h
··· 18 18 #define SH_CACHE_ASSOC 8 19 19 20 20 #if defined(CONFIG_CPU_SUBTYPE_SH7619) 21 - #define CCR 0xffffffec 21 + #define SH_CCR 0xffffffec 22 22 23 23 #define CCR_CACHE_CE 0x01 /* Cache enable */ 24 24 #define CCR_CACHE_WT 0x02 /* CCR[bit1=1,bit2=1] */
+2 -2
arch/sh/include/cpu-sh2a/cpu/cache.h
··· 17 17 #define SH_CACHE_COMBINED 4 18 18 #define SH_CACHE_ASSOC 8 19 19 20 - #define CCR 0xfffc1000 /* CCR1 */ 21 - #define CCR2 0xfffc1004 20 + #define SH_CCR 0xfffc1000 /* CCR1 */ 21 + #define SH_CCR2 0xfffc1004 22 22 23 23 /* 24 24 * Most of the SH-2A CCR1 definitions resemble the SH-4 ones. All others not
+1 -1
arch/sh/include/cpu-sh3/cpu/cache.h
··· 17 17 #define SH_CACHE_COMBINED 4 18 18 #define SH_CACHE_ASSOC 8 19 19 20 - #define CCR 0xffffffec /* Address of Cache Control Register */ 20 + #define SH_CCR 0xffffffec /* Address of Cache Control Register */ 21 21 22 22 #define CCR_CACHE_CE 0x01 /* Cache Enable */ 23 23 #define CCR_CACHE_WT 0x02 /* Write-Through (for P0,U0,P3) (else writeback) */
+1 -1
arch/sh/include/cpu-sh4/cpu/cache.h
··· 17 17 #define SH_CACHE_COMBINED 4 18 18 #define SH_CACHE_ASSOC 8 19 19 20 - #define CCR 0xff00001c /* Address of Cache Control Register */ 20 + #define SH_CCR 0xff00001c /* Address of Cache Control Register */ 21 21 #define CCR_CACHE_OCE 0x0001 /* Operand Cache Enable */ 22 22 #define CCR_CACHE_WT 0x0002 /* Write-Through (for P0,U0,P3) (else writeback)*/ 23 23 #define CCR_CACHE_CB 0x0004 /* Copy-Back (for P1) (else writethrough) */
+2 -2
arch/sh/kernel/cpu/init.c
··· 112 112 unsigned long ccr, flags; 113 113 114 114 jump_to_uncached(); 115 - ccr = __raw_readl(CCR); 115 + ccr = __raw_readl(SH_CCR); 116 116 117 117 /* 118 118 * At this point we don't know whether the cache is enabled or not - a ··· 189 189 190 190 l2_cache_init(); 191 191 192 - __raw_writel(flags, CCR); 192 + __raw_writel(flags, SH_CCR); 193 193 back_to_cached(); 194 194 } 195 195 #else
+1 -1
arch/sh/mm/cache-debugfs.c
··· 36 36 */ 37 37 jump_to_uncached(); 38 38 39 - ccr = __raw_readl(CCR); 39 + ccr = __raw_readl(SH_CCR); 40 40 if ((ccr & CCR_CACHE_ENABLE) == 0) { 41 41 back_to_cached(); 42 42
+2 -2
arch/sh/mm/cache-sh2.c
··· 63 63 local_irq_save(flags); 64 64 jump_to_uncached(); 65 65 66 - ccr = __raw_readl(CCR); 66 + ccr = __raw_readl(SH_CCR); 67 67 ccr |= CCR_CACHE_INVALIDATE; 68 - __raw_writel(ccr, CCR); 68 + __raw_writel(ccr, SH_CCR); 69 69 70 70 back_to_cached(); 71 71 local_irq_restore(flags);
+4 -2
arch/sh/mm/cache-sh2a.c
··· 134 134 135 135 /* If there are too many pages then just blow the cache */ 136 136 if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) { 137 - __raw_writel(__raw_readl(CCR) | CCR_OCACHE_INVALIDATE, CCR); 137 + __raw_writel(__raw_readl(SH_CCR) | CCR_OCACHE_INVALIDATE, 138 + SH_CCR); 138 139 } else { 139 140 for (v = begin; v < end; v += L1_CACHE_BYTES) 140 141 sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v); ··· 168 167 /* I-Cache invalidate */ 169 168 /* If there are too many pages then just blow the cache */ 170 169 if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { 171 - __raw_writel(__raw_readl(CCR) | CCR_ICACHE_INVALIDATE, CCR); 170 + __raw_writel(__raw_readl(SH_CCR) | CCR_ICACHE_INVALIDATE, 171 + SH_CCR); 172 172 } else { 173 173 for (v = start; v < end; v += L1_CACHE_BYTES) 174 174 sh2a_invalidate_line(CACHE_IC_ADDRESS_ARRAY, v);
+2 -2
arch/sh/mm/cache-sh4.c
··· 133 133 jump_to_uncached(); 134 134 135 135 /* Flush I-cache */ 136 - ccr = __raw_readl(CCR); 136 + ccr = __raw_readl(SH_CCR); 137 137 ccr |= CCR_CACHE_ICI; 138 - __raw_writel(ccr, CCR); 138 + __raw_writel(ccr, SH_CCR); 139 139 140 140 /* 141 141 * back_to_cached() will take care of the barrier for us, don't add
+2 -2
arch/sh/mm/cache-shx3.c
··· 19 19 { 20 20 unsigned int ccr; 21 21 22 - ccr = __raw_readl(CCR); 22 + ccr = __raw_readl(SH_CCR); 23 23 24 24 /* 25 25 * If we've got cache aliases, resolve them in hardware. ··· 40 40 ccr |= CCR_CACHE_IBE; 41 41 #endif 42 42 43 - writel_uncached(ccr, CCR); 43 + writel_uncached(ccr, SH_CCR); 44 44 }
+2 -2
arch/sh/mm/cache.c
··· 285 285 { 286 286 unsigned int cache_disabled = 0; 287 287 288 - #ifdef CCR 289 - cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE); 288 + #ifdef SH_CCR 289 + cache_disabled = !(__raw_readl(SH_CCR) & CCR_CACHE_ENABLE); 290 290 #endif 291 291 292 292 compute_alias(&boot_cpu_data.icache);
+1 -1
arch/sparc/Kconfig
··· 27 27 select RTC_DRV_M48T59 28 28 select HAVE_DMA_ATTRS 29 29 select HAVE_DMA_API_DEBUG 30 - select HAVE_ARCH_JUMP_LABEL 30 + select HAVE_ARCH_JUMP_LABEL if SPARC64 31 31 select GENERIC_IRQ_SHOW 32 32 select ARCH_WANT_IPC_PARSE_VERSION 33 33 select GENERIC_PCI_IOMAP
+2
arch/sparc/mm/srmmu.c
··· 14 14 #include <linux/pagemap.h> 15 15 #include <linux/vmalloc.h> 16 16 #include <linux/kdebug.h> 17 + #include <linux/export.h> 17 18 #include <linux/kernel.h> 18 19 #include <linux/init.h> 19 20 #include <linux/log2.h> ··· 63 62 static pgd_t *srmmu_swapper_pg_dir; 64 63 65 64 const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops; 65 + EXPORT_SYMBOL(sparc32_cachetlb_ops); 66 66 67 67 #ifdef CONFIG_SMP 68 68 const struct sparc32_cachetlb_ops *local_ops;
+5 -4
arch/x86/boot/compressed/aslr.c
··· 111 111 }; 112 112 113 113 #define MEM_AVOID_MAX 5 114 - struct mem_vector mem_avoid[MEM_AVOID_MAX]; 114 + static struct mem_vector mem_avoid[MEM_AVOID_MAX]; 115 115 116 116 static bool mem_contains(struct mem_vector *region, struct mem_vector *item) 117 117 { ··· 180 180 } 181 181 182 182 /* Does this memory vector overlap a known avoided area? */ 183 - bool mem_avoid_overlap(struct mem_vector *img) 183 + static bool mem_avoid_overlap(struct mem_vector *img) 184 184 { 185 185 int i; 186 186 ··· 192 192 return false; 193 193 } 194 194 195 - unsigned long slots[CONFIG_RANDOMIZE_BASE_MAX_OFFSET / CONFIG_PHYSICAL_ALIGN]; 196 - unsigned long slot_max = 0; 195 + static unsigned long slots[CONFIG_RANDOMIZE_BASE_MAX_OFFSET / 196 + CONFIG_PHYSICAL_ALIGN]; 197 + static unsigned long slot_max; 197 198 198 199 static void slots_append(unsigned long addr) 199 200 {
+1
arch/x86/include/asm/efi.h
··· 134 134 extern void __init old_map_region(efi_memory_desc_t *md); 135 135 extern void __init runtime_code_page_mkexec(void); 136 136 extern void __init efi_runtime_mkexec(void); 137 + extern void __init efi_apply_memmap_quirks(void); 137 138 138 139 struct efi_setup_data { 139 140 u64 fw_vendor;
+1 -1
arch/x86/include/asm/tsc.h
··· 66 66 extern void tsc_restore_sched_clock_state(void); 67 67 68 68 /* MSR based TSC calibration for Intel Atom SoC platforms */ 69 - int try_msr_calibrate_tsc(unsigned long *fast_calibrate); 69 + unsigned long try_msr_calibrate_tsc(void); 70 70 71 71 #endif /* _ASM_X86_TSC_H */
+9 -2
arch/x86/kernel/cpu/perf_event.c
··· 1192 1192 for (i = 0; i < cpuc->n_events; i++) { 1193 1193 if (event == cpuc->event_list[i]) { 1194 1194 1195 + if (i >= cpuc->n_events - cpuc->n_added) 1196 + --cpuc->n_added; 1197 + 1195 1198 if (x86_pmu.put_event_constraints) 1196 1199 x86_pmu.put_event_constraints(cpuc, event); 1197 1200 ··· 1524 1521 1525 1522 pr_cont("%s PMU driver.\n", x86_pmu.name); 1526 1523 1524 + x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */ 1525 + 1527 1526 for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next) 1528 1527 quirk->func(); 1529 1528 ··· 1539 1534 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, 1540 1535 0, x86_pmu.num_counters, 0, 0); 1541 1536 1542 - x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */ 1543 1537 x86_pmu_format_group.attrs = x86_pmu.format_attrs; 1544 1538 1545 1539 if (x86_pmu.event_attrs) ··· 1824 1820 if (ret) 1825 1821 return ret; 1826 1822 1823 + if (x86_pmu.attr_rdpmc_broken) 1824 + return -ENOTSUPP; 1825 + 1827 1826 if (!!val != !!x86_pmu.attr_rdpmc) { 1828 1827 x86_pmu.attr_rdpmc = !!val; 1829 - smp_call_function(change_rdpmc, (void *)val, 1); 1828 + on_each_cpu(change_rdpmc, (void *)val, 1); 1830 1829 } 1831 1830 1832 1831 return count;
+1
arch/x86/kernel/cpu/perf_event.h
··· 409 409 /* 410 410 * sysfs attrs 411 411 */ 412 + int attr_rdpmc_broken; 412 413 int attr_rdpmc; 413 414 struct attribute **format_attrs; 414 415 struct attribute **event_attrs;
+3 -8
arch/x86/kernel/cpu/perf_event_intel.c
··· 1361 1361 intel_pmu_disable_all(); 1362 1362 handled = intel_pmu_drain_bts_buffer(); 1363 1363 status = intel_pmu_get_status(); 1364 - if (!status) { 1365 - intel_pmu_enable_all(0); 1366 - return handled; 1367 - } 1364 + if (!status) 1365 + goto done; 1368 1366 1369 1367 loops = 0; 1370 1368 again: ··· 2308 2310 if (version > 1) 2309 2311 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3); 2310 2312 2311 - /* 2312 - * v2 and above have a perf capabilities MSR 2313 - */ 2314 - if (version > 1) { 2313 + if (boot_cpu_has(X86_FEATURE_PDCM)) { 2315 2314 u64 capabilities; 2316 2315 2317 2316 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
+11 -3
arch/x86/kernel/cpu/perf_event_intel_uncore.c
··· 501 501 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, 502 502 SNBEP_CBO_PMON_CTL_TID_EN, 0x1), 503 503 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), 504 + SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6), 504 505 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), 506 + SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6), 505 507 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), 508 + SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6), 506 509 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6), 507 510 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8), 508 511 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8), ··· 1181 1178 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, 1182 1179 SNBEP_CBO_PMON_CTL_TID_EN, 0x1), 1183 1180 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2), 1184 - SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), 1185 - SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), 1186 - SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), 1181 + SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), 1187 1182 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc), 1183 + SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc), 1184 + SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), 1185 + SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc), 1186 + SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), 1187 + SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc), 1188 + SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), 1189 + SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc), 1188 1190 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10), 1189 1191 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10), 1190 1192 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
+33 -15
arch/x86/kernel/cpu/perf_event_p6.c
··· 231 231 232 232 }; 233 233 234 + static __init void p6_pmu_rdpmc_quirk(void) 235 + { 236 + if (boot_cpu_data.x86_mask < 9) { 237 + /* 238 + * PPro erratum 26; fixed in stepping 9 and above. 239 + */ 240 + pr_warn("Userspace RDPMC support disabled due to a CPU erratum\n"); 241 + x86_pmu.attr_rdpmc_broken = 1; 242 + x86_pmu.attr_rdpmc = 0; 243 + } 244 + } 245 + 234 246 __init int p6_pmu_init(void) 235 247 { 248 + x86_pmu = p6_pmu; 249 + 236 250 switch (boot_cpu_data.x86_model) { 237 - case 1: 238 - case 3: /* Pentium Pro */ 239 - case 5: 240 - case 6: /* Pentium II */ 241 - case 7: 242 - case 8: 243 - case 11: /* Pentium III */ 244 - case 9: 245 - case 13: 246 - /* Pentium M */ 251 + case 1: /* Pentium Pro */ 252 + x86_add_quirk(p6_pmu_rdpmc_quirk); 247 253 break; 254 + 255 + case 3: /* Pentium II - Klamath */ 256 + case 5: /* Pentium II - Deschutes */ 257 + case 6: /* Pentium II - Mendocino */ 258 + break; 259 + 260 + case 7: /* Pentium III - Katmai */ 261 + case 8: /* Pentium III - Coppermine */ 262 + case 10: /* Pentium III Xeon */ 263 + case 11: /* Pentium III - Tualatin */ 264 + break; 265 + 266 + case 9: /* Pentium M - Banias */ 267 + case 13: /* Pentium M - Dothan */ 268 + break; 269 + 248 270 default: 249 - pr_cont("unsupported p6 CPU model %d ", 250 - boot_cpu_data.x86_model); 271 + pr_cont("unsupported p6 CPU model %d ", boot_cpu_data.x86_model); 251 272 return -ENODEV; 252 273 } 253 274 254 - x86_pmu = p6_pmu; 255 - 256 275 memcpy(hw_cache_event_ids, p6_hw_cache_event_ids, 257 276 sizeof(hw_cache_event_ids)); 258 - 259 277 260 278 return 0; 261 279 }
+6 -1
arch/x86/kernel/head_32.S
··· 544 544 /* This is global to keep gas from relaxing the jumps */ 545 545 ENTRY(early_idt_handler) 546 546 cld 547 + 548 + cmpl $2,(%esp) # X86_TRAP_NMI 549 + je is_nmi # Ignore NMI 550 + 547 551 cmpl $2,%ss:early_recursion_flag 548 552 je hlt_loop 549 553 incl %ss:early_recursion_flag ··· 598 594 pop %edx 599 595 pop %ecx 600 596 pop %eax 601 - addl $8,%esp /* drop vector number and error code */ 602 597 decl %ss:early_recursion_flag 598 + is_nmi: 599 + addl $8,%esp /* drop vector number and error code */ 603 600 iret 604 601 ENDPROC(early_idt_handler) 605 602
+5 -1
arch/x86/kernel/head_64.S
··· 343 343 ENTRY(early_idt_handler) 344 344 cld 345 345 346 + cmpl $2,(%rsp) # X86_TRAP_NMI 347 + je is_nmi # Ignore NMI 348 + 346 349 cmpl $2,early_recursion_flag(%rip) 347 350 jz 1f 348 351 incl early_recursion_flag(%rip) ··· 408 405 popq %rdx 409 406 popq %rcx 410 407 popq %rax 411 - addq $16,%rsp # drop vector number and error code 412 408 decl early_recursion_flag(%rip) 409 + is_nmi: 410 + addq $16,%rsp # drop vector number and error code 413 411 INTERRUPT_RETURN 414 412 ENDPROC(early_idt_handler) 415 413
+2
arch/x86/kernel/machine_kexec_64.c
··· 279 279 VMCOREINFO_SYMBOL(node_data); 280 280 VMCOREINFO_LENGTH(node_data, MAX_NUMNODES); 281 281 #endif 282 + vmcoreinfo_append_str("KERNELOFFSET=%lx\n", 283 + (unsigned long)&_text - __START_KERNEL); 282 284 } 283 285
+3 -1
arch/x86/kernel/pci-dma.c
··· 100 100 flag |= __GFP_ZERO; 101 101 again: 102 102 page = NULL; 103 - if (!(flag & GFP_ATOMIC)) 103 + /* CMA can be used only in the context which permits sleeping */ 104 + if (flag & __GFP_WAIT) 104 105 page = dma_alloc_from_contiguous(dev, count, get_order(size)); 106 + /* fallback */ 105 107 if (!page) 106 108 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size)); 107 109 if (!page)
+2 -8
arch/x86/kernel/setup.c
··· 1239 1239 register_refined_jiffies(CLOCK_TICK_RATE); 1240 1240 1241 1241 #ifdef CONFIG_EFI 1242 - /* Once setup is done above, unmap the EFI memory map on 1243 - * mismatched firmware/kernel archtectures since there is no 1244 - * support for runtime services. 1245 - */ 1246 - if (efi_enabled(EFI_BOOT) && !efi_is_native()) { 1247 - pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n"); 1248 - efi_unmap_memmap(); 1249 - } 1242 + if (efi_enabled(EFI_BOOT)) 1243 + efi_apply_memmap_quirks(); 1250 1244 #endif 1251 1245 } 1252 1246
+2 -5
arch/x86/kernel/tsc.c
··· 653 653 654 654 /* Calibrate TSC using MSR for Intel Atom SoCs */ 655 655 local_irq_save(flags); 656 - i = try_msr_calibrate_tsc(&fast_calibrate); 656 + fast_calibrate = try_msr_calibrate_tsc(); 657 657 local_irq_restore(flags); 658 - if (i >= 0) { 659 - if (i == 0) 660 - pr_warn("Fast TSC calibration using MSR failed\n"); 658 + if (fast_calibrate) 661 659 return fast_calibrate; 662 - } 663 660 664 661 local_irq_save(flags); 665 662 fast_calibrate = quick_pit_calibrate();
+15 -15
arch/x86/kernel/tsc_msr.c
··· 53 53 /* TNG */ 54 54 { 6, 0x4a, 1, { 0, FREQ_100, FREQ_133, 0, 0, 0, 0, 0 } }, 55 55 /* VLV2 */ 56 - { 6, 0x37, 1, { 0, FREQ_100, FREQ_133, FREQ_166, 0, 0, 0, 0 } }, 56 + { 6, 0x37, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_166, 0, 0, 0, 0 } }, 57 57 /* ANN */ 58 58 { 6, 0x5a, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_100, 0, 0, 0, 0 } }, 59 59 }; ··· 77 77 78 78 /* 79 79 * Do MSR calibration only for known/supported CPUs. 80 - * Return values: 81 - * -1: CPU is unknown/unsupported for MSR based calibration 82 - * 0: CPU is known/supported, but calibration failed 83 - * 1: CPU is known/supported, and calibration succeeded 80 + * 81 + * Returns the calibration value or 0 if MSR calibration failed. 84 82 */ 85 - int try_msr_calibrate_tsc(unsigned long *fast_calibrate) 83 + unsigned long try_msr_calibrate_tsc(void) 86 84 { 87 - int cpu_index; 88 85 u32 lo, hi, ratio, freq_id, freq; 86 + unsigned long res; 87 + int cpu_index; 89 88 90 89 cpu_index = match_cpu(boot_cpu_data.x86, boot_cpu_data.x86_model); 91 90 if (cpu_index < 0) 92 - return -1; 93 - 94 - *fast_calibrate = 0; 91 + return 0; 95 92 96 93 if (freq_desc_tables[cpu_index].msr_plat) { 97 94 rdmsr(MSR_PLATFORM_INFO, lo, hi); ··· 100 103 pr_info("Maximum core-clock to bus-clock ratio: 0x%x\n", ratio); 101 104 102 105 if (!ratio) 103 - return 0; 106 + goto fail; 104 107 105 108 /* Get FSB FREQ ID */ 106 109 rdmsr(MSR_FSB_FREQ, lo, hi); ··· 109 112 pr_info("Resolved frequency ID: %u, frequency: %u KHz\n", 110 113 freq_id, freq); 111 114 if (!freq) 112 - return 0; 115 + goto fail; 113 116 114 117 /* TSC frequency = maximum resolved freq * maximum resolved bus ratio */ 115 - *fast_calibrate = freq * ratio; 116 - pr_info("TSC runs at %lu KHz\n", *fast_calibrate); 118 + res = freq * ratio; 119 + pr_info("TSC runs at %lu KHz\n", res); 117 120 118 121 #ifdef CONFIG_X86_LOCAL_APIC 119 122 lapic_timer_frequency = (freq * 1000) / HZ; 120 123 pr_info("lapic_timer_frequency = %d\n", lapic_timer_frequency); 121 124 #endif 125 + return res; 122 126 123 - return 1; 127 + fail: 128 + pr_warn("Fast TSC calibration using MSR failed\n"); 129 + return 0; 124 130 }
+1
arch/x86/kvm/mmu.c
··· 2672 2672 break; 2673 2673 } 2674 2674 2675 + drop_large_spte(vcpu, iterator.sptep); 2675 2676 if (!is_shadow_present_pte(*iterator.sptep)) { 2676 2677 u64 base_addr = iterator.addr; 2677 2678
+1 -1
arch/x86/kvm/vmx.c
··· 6688 6688 else if (is_page_fault(intr_info)) 6689 6689 return enable_ept; 6690 6690 else if (is_no_device(intr_info) && 6691 - !(nested_read_cr0(vmcs12) & X86_CR0_TS)) 6691 + !(vmcs12->guest_cr0 & X86_CR0_TS)) 6692 6692 return 0; 6693 6693 return vmcs12->exception_bitmap & 6694 6694 (1u << (intr_info & INTR_INFO_VECTOR_MASK));
+1 -1
arch/x86/kvm/x86.c
··· 6186 6186 frag->len -= len; 6187 6187 } 6188 6188 6189 - if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) { 6189 + if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { 6190 6190 vcpu->mmio_needed = 0; 6191 6191 6192 6192 /* FIXME: return into emulator if single-stepping. */
+33 -14
arch/x86/mm/fault.c
··· 1020 1020 * This routine handles page faults. It determines the address, 1021 1021 * and the problem, and then passes it off to one of the appropriate 1022 1022 * routines. 1023 + * 1024 + * This function must have noinline because both callers 1025 + * {,trace_}do_page_fault() have notrace on. Having this an actual function 1026 + * guarantees there's a function trace entry. 1023 1027 */ 1024 - static void __kprobes 1025 - __do_page_fault(struct pt_regs *regs, unsigned long error_code) 1028 + static void __kprobes noinline 1029 + __do_page_fault(struct pt_regs *regs, unsigned long error_code, 1030 + unsigned long address) 1026 1031 { 1027 1032 struct vm_area_struct *vma; 1028 1033 struct task_struct *tsk; 1029 - unsigned long address; 1030 1034 struct mm_struct *mm; 1031 1035 int fault; 1032 1036 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 1033 1037 1034 1038 tsk = current; 1035 1039 mm = tsk->mm; 1036 - 1037 - /* Get the faulting address: */ 1038 - address = read_cr2(); 1039 1040 1040 1041 /* 1041 1042 * Detect and handle instructions that would cause a page fault for ··· 1249 1248 up_read(&mm->mmap_sem); 1250 1249 } 1251 1250 1252 - dotraplinkage void __kprobes 1251 + dotraplinkage void __kprobes notrace 1253 1252 do_page_fault(struct pt_regs *regs, unsigned long error_code) 1254 1253 { 1254 + unsigned long address = read_cr2(); /* Get the faulting address */ 1255 1255 enum ctx_state prev_state; 1256 1256 1257 + /* 1258 + * We must have this function tagged with __kprobes, notrace and call 1259 + * read_cr2() before calling anything else. To avoid calling any kind 1260 + * of tracing machinery before we've observed the CR2 value. 1261 + * 1262 + * exception_{enter,exit}() contain all sorts of tracepoints. 1263 + */ 1264 + 1257 1265 prev_state = exception_enter(); 1258 - __do_page_fault(regs, error_code); 1266 + __do_page_fault(regs, error_code, address); 1259 1267 exception_exit(prev_state); 1260 1268 } 1261 1269 1262 - static void trace_page_fault_entries(struct pt_regs *regs, 1270 + #ifdef CONFIG_TRACING 1271 + static void trace_page_fault_entries(unsigned long address, struct pt_regs *regs, 1263 1272 unsigned long error_code) 1264 1273 { 1265 1274 if (user_mode(regs)) 1266 - trace_page_fault_user(read_cr2(), regs, error_code); 1275 + trace_page_fault_user(address, regs, error_code); 1267 1276 else 1268 - trace_page_fault_kernel(read_cr2(), regs, error_code); 1277 + trace_page_fault_kernel(address, regs, error_code); 1269 1278 } 1270 1279 1271 - dotraplinkage void __kprobes 1280 + dotraplinkage void __kprobes notrace 1272 1281 trace_do_page_fault(struct pt_regs *regs, unsigned long error_code) 1273 1282 { 1283 + /* 1284 + * The exception_enter and tracepoint processing could 1285 + * trigger another page faults (user space callchain 1286 + * reading) and destroy the original cr2 value, so read 1287 + * the faulting address now. 1288 + */ 1289 + unsigned long address = read_cr2(); 1274 1290 enum ctx_state prev_state; 1275 1291 1276 1292 prev_state = exception_enter(); 1277 - trace_page_fault_entries(regs, error_code); 1278 - __do_page_fault(regs, error_code); 1293 + trace_page_fault_entries(address, regs, error_code); 1294 + __do_page_fault(regs, error_code, address); 1279 1295 exception_exit(prev_state); 1280 1296 } 1297 + #endif /* CONFIG_TRACING */
+20
arch/x86/platform/efi/efi.c
··· 52 52 #include <asm/tlbflush.h> 53 53 #include <asm/x86_init.h> 54 54 #include <asm/rtc.h> 55 + #include <asm/uv/uv.h> 55 56 56 57 #define EFI_DEBUG 57 58 ··· 1211 1210 return 0; 1212 1211 } 1213 1212 early_param("efi", parse_efi_cmdline); 1213 + 1214 + void __init efi_apply_memmap_quirks(void) 1215 + { 1216 + /* 1217 + * Once setup is done earlier, unmap the EFI memory map on mismatched 1218 + * firmware/kernel architectures since there is no support for runtime 1219 + * services. 1220 + */ 1221 + if (!efi_is_native()) { 1222 + pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n"); 1223 + efi_unmap_memmap(); 1224 + } 1225 + 1226 + /* 1227 + * UV doesn't support the new EFI pagetable mapping yet. 1228 + */ 1229 + if (is_uv_system()) 1230 + set_bit(EFI_OLD_MEMMAP, &x86_efi_facility); 1231 + }
+1 -2
arch/xtensa/Kconfig
··· 20 20 select HAVE_FUNCTION_TRACER 21 21 select HAVE_IRQ_TIME_ACCOUNTING 22 22 select HAVE_PERF_EVENTS 23 + select COMMON_CLK 23 24 help 24 25 Xtensa processors are 32-bit RISC machines designed by Tensilica 25 26 primarily for embedded systems. These processors are both ··· 81 80 config XTENSA_VARIANT_FSF 82 81 bool "fsf - default (not generic) configuration" 83 82 select MMU 84 - select HAVE_XTENSA_GPIO32 85 83 86 84 config XTENSA_VARIANT_DC232B 87 85 bool "dc232b - Diamond 232L Standard Core Rev.B (LE)" ··· 135 135 config SMP 136 136 bool "Enable Symmetric multi-processing support" 137 137 depends on HAVE_SMP 138 - select USE_GENERIC_SMP_HELPERS 139 138 select GENERIC_SMP_IDLE_THREAD 140 139 help 141 140 Enabled SMP Software; allows more than one CPU/CORE
+9 -3
arch/xtensa/boot/dts/xtfpga.dtsi
··· 35 35 interrupt-controller; 36 36 }; 37 37 38 + clocks { 39 + osc: main-oscillator { 40 + #clock-cells = <0>; 41 + compatible = "fixed-clock"; 42 + }; 43 + }; 44 + 38 45 serial0: serial@fd050020 { 39 46 device_type = "serial"; 40 47 compatible = "ns16550a"; ··· 49 42 reg = <0xfd050020 0x20>; 50 43 reg-shift = <2>; 51 44 interrupts = <0 1>; /* external irq 0 */ 52 - /* Filled in by platform_setup from FPGA register 53 - * clock-frequency = <100000000>; 54 - */ 45 + clocks = <&osc>; 55 46 }; 56 47 57 48 enet0: ethoc@fd030000 { ··· 57 52 reg = <0xfd030000 0x4000 0xfd800000 0x4000>; 58 53 interrupts = <1 1>; /* external irq 1 */ 59 54 local-mac-address = [00 50 c2 13 6f 00]; 55 + clocks = <&osc>; 60 56 }; 61 57 };
+1 -1
arch/xtensa/include/asm/io.h
··· 25 25 26 26 #ifdef CONFIG_MMU 27 27 28 - #if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && CONFIG_OF 28 + #if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF) 29 29 extern unsigned long xtensa_kio_paddr; 30 30 31 31 static inline unsigned long xtensa_get_kio_paddr(void)
+29 -17
arch/xtensa/include/asm/traps.h
··· 23 23 24 24 static inline void spill_registers(void) 25 25 { 26 - 26 + #if XCHAL_NUM_AREGS > 16 27 27 __asm__ __volatile__ ( 28 - "movi a14, "__stringify((1 << PS_EXCM_BIT) | LOCKLEVEL)"\n\t" 29 - "mov a12, a0\n\t" 30 - "rsr a13, sar\n\t" 31 - "xsr a14, ps\n\t" 32 - "movi a0, _spill_registers\n\t" 33 - "rsync\n\t" 34 - "callx0 a0\n\t" 35 - "mov a0, a12\n\t" 36 - "wsr a13, sar\n\t" 37 - "wsr a14, ps\n\t" 38 - : : 39 - #if defined(CONFIG_FRAME_POINTER) 40 - : "a2", "a3", "a4", "a11", "a12", "a13", "a14", "a15", 41 - #else 42 - : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15", 28 + " call12 1f\n" 29 + " _j 2f\n" 30 + " retw\n" 31 + " .align 4\n" 32 + "1:\n" 33 + " _entry a1, 48\n" 34 + " addi a12, a0, 3\n" 35 + #if XCHAL_NUM_AREGS > 32 36 + " .rept (" __stringify(XCHAL_NUM_AREGS) " - 32) / 12\n" 37 + " _entry a1, 48\n" 38 + " mov a12, a0\n" 39 + " .endr\n" 43 40 #endif 44 - "memory"); 41 + " _entry a1, 48\n" 42 + #if XCHAL_NUM_AREGS % 12 == 0 43 + " mov a8, a8\n" 44 + #elif XCHAL_NUM_AREGS % 12 == 4 45 + " mov a12, a12\n" 46 + #elif XCHAL_NUM_AREGS % 12 == 8 47 + " mov a4, a4\n" 48 + #endif 49 + " retw\n" 50 + "2:\n" 51 + : : : "a12", "a13", "memory"); 52 + #else 53 + __asm__ __volatile__ ( 54 + " mov a12, a12\n" 55 + : : : "memory"); 56 + #endif 45 57 } 46 58 47 59 #endif /* _XTENSA_TRAPS_H */
+1 -1
arch/xtensa/include/asm/vectors.h
··· 25 25 #define XCHAL_KIO_DEFAULT_PADDR 0xf0000000 26 26 #define XCHAL_KIO_SIZE 0x10000000 27 27 28 - #if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && CONFIG_OF 28 + #if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF) 29 29 #define XCHAL_KIO_PADDR xtensa_get_kio_paddr() 30 30 #else 31 31 #define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR
+6 -1
arch/xtensa/include/uapi/asm/unistd.h
··· 734 734 #define __NR_accept4 333 735 735 __SYSCALL(333, sys_accept4, 4) 736 736 737 - #define __NR_syscall_count 334 737 + #define __NR_sched_setattr 334 738 + __SYSCALL(334, sys_sched_setattr, 2) 739 + #define __NR_sched_getattr 335 740 + __SYSCALL(335, sys_sched_getattr, 3) 741 + 742 + #define __NR_syscall_count 336 738 743 739 744 /* 740 745 * sysxtensa syscall handler
+235 -222
arch/xtensa/kernel/entry.S
··· 1081 1081 1082 1082 rsr a0, sar 1083 1083 s32i a3, a2, PT_AREG3 1084 + s32i a0, a2, PT_SAR 1085 + 1086 + /* The spill routine might clobber a4, a7, a8, a11, a12, and a15. */ 1087 + 1084 1088 s32i a4, a2, PT_AREG4 1085 - s32i a0, a2, PT_AREG5 # store SAR to PT_AREG5 1086 - 1087 - /* The spill routine might clobber a7, a11, and a15. */ 1088 - 1089 1089 s32i a7, a2, PT_AREG7 1090 + s32i a8, a2, PT_AREG8 1090 1091 s32i a11, a2, PT_AREG11 1092 + s32i a12, a2, PT_AREG12 1091 1093 s32i a15, a2, PT_AREG15 1092 1094 1093 - call0 _spill_registers # destroys a3, a4, and SAR 1095 + /* 1096 + * Rotate ws so that the current windowbase is at bit 0. 1097 + * Assume ws = xxxwww1yy (www1 current window frame). 1098 + * Rotate ws right so that a4 = yyxxxwww1. 1099 + */ 1100 + 1101 + rsr a0, windowbase 1102 + rsr a3, windowstart # a3 = xxxwww1yy 1103 + ssr a0 # holds WB 1104 + slli a0, a3, WSBITS 1105 + or a3, a3, a0 # a3 = xxxwww1yyxxxwww1yy 1106 + srl a3, a3 # a3 = 00xxxwww1yyxxxwww1 1107 + 1108 + /* We are done if there are no more than the current register frame. */ 1109 + 1110 + extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww 1111 + movi a0, (1 << (WSBITS-1)) 1112 + _beqz a3, .Lnospill # only one active frame? jump 1113 + 1114 + /* We want 1 at the top, so that we return to the current windowbase */ 1115 + 1116 + or a3, a3, a0 # 1yyxxxwww 1117 + 1118 + /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */ 1119 + 1120 + wsr a3, windowstart # save shifted windowstart 1121 + neg a0, a3 1122 + and a3, a0, a3 # first bit set from right: 000010000 1123 + 1124 + ffs_ws a0, a3 # a0: shifts to skip empty frames 1125 + movi a3, WSBITS 1126 + sub a0, a3, a0 # WSBITS-a0:number of 0-bits from right 1127 + ssr a0 # save in SAR for later. 1128 + 1129 + rsr a3, windowbase 1130 + add a3, a3, a0 1131 + wsr a3, windowbase 1132 + rsync 1133 + 1134 + rsr a3, windowstart 1135 + srl a3, a3 # shift windowstart 1136 + 1137 + /* WB is now just one frame below the oldest frame in the register 1138 + window. WS is shifted so the oldest frame is in bit 0, thus, WB 1139 + and WS differ by one 4-register frame. */ 1140 + 1141 + /* Save frames. Depending what call was used (call4, call8, call12), 1142 + * we have to save 4,8. or 12 registers. 1143 + */ 1144 + 1145 + 1146 + .Lloop: _bbsi.l a3, 1, .Lc4 1147 + _bbci.l a3, 2, .Lc12 1148 + 1149 + .Lc8: s32e a4, a13, -16 1150 + l32e a4, a5, -12 1151 + s32e a8, a4, -32 1152 + s32e a5, a13, -12 1153 + s32e a6, a13, -8 1154 + s32e a7, a13, -4 1155 + s32e a9, a4, -28 1156 + s32e a10, a4, -24 1157 + s32e a11, a4, -20 1158 + srli a11, a3, 2 # shift windowbase by 2 1159 + rotw 2 1160 + _bnei a3, 1, .Lloop 1161 + j .Lexit 1162 + 1163 + .Lc4: s32e a4, a9, -16 1164 + s32e a5, a9, -12 1165 + s32e a6, a9, -8 1166 + s32e a7, a9, -4 1167 + 1168 + srli a7, a3, 1 1169 + rotw 1 1170 + _bnei a3, 1, .Lloop 1171 + j .Lexit 1172 + 1173 + .Lc12: _bbci.l a3, 3, .Linvalid_mask # bit 2 shouldn't be zero! 1174 + 1175 + /* 12-register frame (call12) */ 1176 + 1177 + l32e a0, a5, -12 1178 + s32e a8, a0, -48 1179 + mov a8, a0 1180 + 1181 + s32e a9, a8, -44 1182 + s32e a10, a8, -40 1183 + s32e a11, a8, -36 1184 + s32e a12, a8, -32 1185 + s32e a13, a8, -28 1186 + s32e a14, a8, -24 1187 + s32e a15, a8, -20 1188 + srli a15, a3, 3 1189 + 1190 + /* The stack pointer for a4..a7 is out of reach, so we rotate the 1191 + * window, grab the stackpointer, and rotate back. 1192 + * Alternatively, we could also use the following approach, but that 1193 + * makes the fixup routine much more complicated: 1194 + * rotw 1 1195 + * s32e a0, a13, -16 1196 + * ... 1197 + * rotw 2 1198 + */ 1199 + 1200 + rotw 1 1201 + mov a4, a13 1202 + rotw -1 1203 + 1204 + s32e a4, a8, -16 1205 + s32e a5, a8, -12 1206 + s32e a6, a8, -8 1207 + s32e a7, a8, -4 1208 + 1209 + rotw 3 1210 + 1211 + _beqi a3, 1, .Lexit 1212 + j .Lloop 1213 + 1214 + .Lexit: 1215 + 1216 + /* Done. Do the final rotation and set WS */ 1217 + 1218 + rotw 1 1219 + rsr a3, windowbase 1220 + ssl a3 1221 + movi a3, 1 1222 + sll a3, a3 1223 + wsr a3, windowstart 1224 + .Lnospill: 1094 1225 1095 1226 /* Advance PC, restore registers and SAR, and return from exception. */ 1096 1227 1097 - l32i a3, a2, PT_AREG5 1098 - l32i a4, a2, PT_AREG4 1228 + l32i a3, a2, PT_SAR 1099 1229 l32i a0, a2, PT_AREG0 1100 1230 wsr a3, sar 1101 1231 l32i a3, a2, PT_AREG3 1102 1232 1103 1233 /* Restore clobbered registers. */ 1104 1234 1235 + l32i a4, a2, PT_AREG4 1105 1236 l32i a7, a2, PT_AREG7 1237 + l32i a8, a2, PT_AREG8 1106 1238 l32i a11, a2, PT_AREG11 1239 + l32i a12, a2, PT_AREG12 1107 1240 l32i a15, a2, PT_AREG15 1108 1241 1109 1242 movi a2, 0 1110 1243 rfe 1244 + 1245 + .Linvalid_mask: 1246 + 1247 + /* We get here because of an unrecoverable error in the window 1248 + * registers, so set up a dummy frame and kill the user application. 1249 + * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer. 1250 + */ 1251 + 1252 + movi a0, 1 1253 + movi a1, 0 1254 + 1255 + wsr a0, windowstart 1256 + wsr a1, windowbase 1257 + rsync 1258 + 1259 + movi a0, 0 1260 + 1261 + rsr a3, excsave1 1262 + l32i a1, a3, EXC_TABLE_KSTK 1263 + 1264 + movi a4, (1 << PS_WOE_BIT) | LOCKLEVEL 1265 + wsr a4, ps 1266 + rsync 1267 + 1268 + movi a6, SIGSEGV 1269 + movi a4, do_exit 1270 + callx4 a4 1271 + 1272 + /* shouldn't return, so panic */ 1273 + 1274 + wsr a0, excsave1 1275 + movi a0, unrecoverable_exception 1276 + callx0 a0 # should not return 1277 + 1: j 1b 1278 + 1111 1279 1112 1280 ENDPROC(fast_syscall_spill_registers) 1113 1281 ··· 1284 1116 * We get here if the spill routine causes an exception, e.g. tlb miss. 1285 1117 * We basically restore WINDOWBASE and WINDOWSTART to the condition when 1286 1118 * we entered the spill routine and jump to the user exception handler. 1119 + * 1120 + * Note that we only need to restore the bits in windowstart that have not 1121 + * been spilled yet by the _spill_register routine. Luckily, a3 contains a 1122 + * rotated windowstart with only those bits set for frames that haven't been 1123 + * spilled yet. Because a3 is rotated such that bit 0 represents the register 1124 + * frame for the current windowbase - 1, we need to rotate a3 left by the 1125 + * value of the current windowbase + 1 and move it to windowstart. 1287 1126 * 1288 1127 * a0: value of depc, original value in depc 1289 1128 * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE ··· 1306 1131 /* We need to make sure the current registers (a0-a3) are preserved. 1307 1132 * To do this, we simply set the bit for the current window frame 1308 1133 * in WS, so that the exception handlers save them to the task stack. 1134 + * 1135 + * Note: we use a3 to set the windowbase, so we take a special care 1136 + * of it, saving it in the original _spill_registers frame across 1137 + * the exception handler call. 1309 1138 */ 1310 1139 1311 1140 xsr a3, excsave1 # get spill-mask 1312 1141 slli a3, a3, 1 # shift left by one 1142 + addi a3, a3, 1 # set the bit for the current window frame 1313 1143 1314 1144 slli a2, a3, 32-WSBITS 1315 1145 src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy...... ··· 1399 1219 rfde 1400 1220 1401 1221 ENDPROC(fast_syscall_spill_registers_fixup_return) 1402 - 1403 - /* 1404 - * spill all registers. 1405 - * 1406 - * This is not a real function. The following conditions must be met: 1407 - * 1408 - * - must be called with call0. 1409 - * - uses a3, a4 and SAR. 1410 - * - the last 'valid' register of each frame are clobbered. 1411 - * - the caller must have registered a fixup handler 1412 - * (or be inside a critical section) 1413 - * - PS_EXCM must be set (PS_WOE cleared?) 1414 - */ 1415 - 1416 - ENTRY(_spill_registers) 1417 - 1418 - /* 1419 - * Rotate ws so that the current windowbase is at bit 0. 1420 - * Assume ws = xxxwww1yy (www1 current window frame). 1421 - * Rotate ws right so that a4 = yyxxxwww1. 1422 - */ 1423 - 1424 - rsr a4, windowbase 1425 - rsr a3, windowstart # a3 = xxxwww1yy 1426 - ssr a4 # holds WB 1427 - slli a4, a3, WSBITS 1428 - or a3, a3, a4 # a3 = xxxwww1yyxxxwww1yy 1429 - srl a3, a3 # a3 = 00xxxwww1yyxxxwww1 1430 - 1431 - /* We are done if there are no more than the current register frame. */ 1432 - 1433 - extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww 1434 - movi a4, (1 << (WSBITS-1)) 1435 - _beqz a3, .Lnospill # only one active frame? jump 1436 - 1437 - /* We want 1 at the top, so that we return to the current windowbase */ 1438 - 1439 - or a3, a3, a4 # 1yyxxxwww 1440 - 1441 - /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */ 1442 - 1443 - wsr a3, windowstart # save shifted windowstart 1444 - neg a4, a3 1445 - and a3, a4, a3 # first bit set from right: 000010000 1446 - 1447 - ffs_ws a4, a3 # a4: shifts to skip empty frames 1448 - movi a3, WSBITS 1449 - sub a4, a3, a4 # WSBITS-a4:number of 0-bits from right 1450 - ssr a4 # save in SAR for later. 1451 - 1452 - rsr a3, windowbase 1453 - add a3, a3, a4 1454 - wsr a3, windowbase 1455 - rsync 1456 - 1457 - rsr a3, windowstart 1458 - srl a3, a3 # shift windowstart 1459 - 1460 - /* WB is now just one frame below the oldest frame in the register 1461 - window. WS is shifted so the oldest frame is in bit 0, thus, WB 1462 - and WS differ by one 4-register frame. */ 1463 - 1464 - /* Save frames. Depending what call was used (call4, call8, call12), 1465 - * we have to save 4,8. or 12 registers. 1466 - */ 1467 - 1468 - _bbsi.l a3, 1, .Lc4 1469 - _bbsi.l a3, 2, .Lc8 1470 - 1471 - /* Special case: we have a call12-frame starting at a4. */ 1472 - 1473 - _bbci.l a3, 3, .Lc12 # bit 3 shouldn't be zero! (Jump to Lc12 first) 1474 - 1475 - s32e a4, a1, -16 # a1 is valid with an empty spill area 1476 - l32e a4, a5, -12 1477 - s32e a8, a4, -48 1478 - mov a8, a4 1479 - l32e a4, a1, -16 1480 - j .Lc12c 1481 - 1482 - .Lnospill: 1483 - ret 1484 - 1485 - .Lloop: _bbsi.l a3, 1, .Lc4 1486 - _bbci.l a3, 2, .Lc12 1487 - 1488 - .Lc8: s32e a4, a13, -16 1489 - l32e a4, a5, -12 1490 - s32e a8, a4, -32 1491 - s32e a5, a13, -12 1492 - s32e a6, a13, -8 1493 - s32e a7, a13, -4 1494 - s32e a9, a4, -28 1495 - s32e a10, a4, -24 1496 - s32e a11, a4, -20 1497 - 1498 - srli a11, a3, 2 # shift windowbase by 2 1499 - rotw 2 1500 - _bnei a3, 1, .Lloop 1501 - 1502 - .Lexit: /* Done. Do the final rotation, set WS, and return. */ 1503 - 1504 - rotw 1 1505 - rsr a3, windowbase 1506 - ssl a3 1507 - movi a3, 1 1508 - sll a3, a3 1509 - wsr a3, windowstart 1510 - ret 1511 - 1512 - .Lc4: s32e a4, a9, -16 1513 - s32e a5, a9, -12 1514 - s32e a6, a9, -8 1515 - s32e a7, a9, -4 1516 - 1517 - srli a7, a3, 1 1518 - rotw 1 1519 - _bnei a3, 1, .Lloop 1520 - j .Lexit 1521 - 1522 - .Lc12: _bbci.l a3, 3, .Linvalid_mask # bit 2 shouldn't be zero! 1523 - 1524 - /* 12-register frame (call12) */ 1525 - 1526 - l32e a2, a5, -12 1527 - s32e a8, a2, -48 1528 - mov a8, a2 1529 - 1530 - .Lc12c: s32e a9, a8, -44 1531 - s32e a10, a8, -40 1532 - s32e a11, a8, -36 1533 - s32e a12, a8, -32 1534 - s32e a13, a8, -28 1535 - s32e a14, a8, -24 1536 - s32e a15, a8, -20 1537 - srli a15, a3, 3 1538 - 1539 - /* The stack pointer for a4..a7 is out of reach, so we rotate the 1540 - * window, grab the stackpointer, and rotate back. 1541 - * Alternatively, we could also use the following approach, but that 1542 - * makes the fixup routine much more complicated: 1543 - * rotw 1 1544 - * s32e a0, a13, -16 1545 - * ... 1546 - * rotw 2 1547 - */ 1548 - 1549 - rotw 1 1550 - mov a5, a13 1551 - rotw -1 1552 - 1553 - s32e a4, a9, -16 1554 - s32e a5, a9, -12 1555 - s32e a6, a9, -8 1556 - s32e a7, a9, -4 1557 - 1558 - rotw 3 1559 - 1560 - _beqi a3, 1, .Lexit 1561 - j .Lloop 1562 - 1563 - .Linvalid_mask: 1564 - 1565 - /* We get here because of an unrecoverable error in the window 1566 - * registers. If we are in user space, we kill the application, 1567 - * however, this condition is unrecoverable in kernel space. 1568 - */ 1569 - 1570 - rsr a0, ps 1571 - _bbci.l a0, PS_UM_BIT, 1f 1572 - 1573 - /* User space: Setup a dummy frame and kill application. 1574 - * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer. 1575 - */ 1576 - 1577 - movi a0, 1 1578 - movi a1, 0 1579 - 1580 - wsr a0, windowstart 1581 - wsr a1, windowbase 1582 - rsync 1583 - 1584 - movi a0, 0 1585 - 1586 - rsr a3, excsave1 1587 - l32i a1, a3, EXC_TABLE_KSTK 1588 - 1589 - movi a4, (1 << PS_WOE_BIT) | LOCKLEVEL 1590 - wsr a4, ps 1591 - rsync 1592 - 1593 - movi a6, SIGSEGV 1594 - movi a4, do_exit 1595 - callx4 a4 1596 - 1597 - 1: /* Kernel space: PANIC! */ 1598 - 1599 - wsr a0, excsave1 1600 - movi a0, unrecoverable_exception 1601 - callx0 a0 # should not return 1602 - 1: j 1b 1603 - 1604 - ENDPROC(_spill_registers) 1605 1222 1606 1223 #ifdef CONFIG_MMU 1607 1224 /* ··· 1771 1794 1772 1795 ENDPROC(system_call) 1773 1796 1797 + /* 1798 + * Spill live registers on the kernel stack macro. 1799 + * 1800 + * Entry condition: ps.woe is set, ps.excm is cleared 1801 + * Exit condition: windowstart has single bit set 1802 + * May clobber: a12, a13 1803 + */ 1804 + .macro spill_registers_kernel 1805 + 1806 + #if XCHAL_NUM_AREGS > 16 1807 + call12 1f 1808 + _j 2f 1809 + retw 1810 + .align 4 1811 + 1: 1812 + _entry a1, 48 1813 + addi a12, a0, 3 1814 + #if XCHAL_NUM_AREGS > 32 1815 + .rept (XCHAL_NUM_AREGS - 32) / 12 1816 + _entry a1, 48 1817 + mov a12, a0 1818 + .endr 1819 + #endif 1820 + _entry a1, 48 1821 + #if XCHAL_NUM_AREGS % 12 == 0 1822 + mov a8, a8 1823 + #elif XCHAL_NUM_AREGS % 12 == 4 1824 + mov a12, a12 1825 + #elif XCHAL_NUM_AREGS % 12 == 8 1826 + mov a4, a4 1827 + #endif 1828 + retw 1829 + 2: 1830 + #else 1831 + mov a12, a12 1832 + #endif 1833 + .endm 1774 1834 1775 1835 /* 1776 1836 * Task switch. ··· 1820 1806 1821 1807 entry a1, 16 1822 1808 1823 - mov a12, a2 # preserve 'prev' (a2) 1824 - mov a13, a3 # and 'next' (a3) 1809 + mov a10, a2 # preserve 'prev' (a2) 1810 + mov a11, a3 # and 'next' (a3) 1825 1811 1826 1812 l32i a4, a2, TASK_THREAD_INFO 1827 1813 l32i a5, a3, TASK_THREAD_INFO 1828 1814 1829 - save_xtregs_user a4 a6 a8 a9 a10 a11 THREAD_XTREGS_USER 1815 + save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER 1830 1816 1831 - s32i a0, a12, THREAD_RA # save return address 1832 - s32i a1, a12, THREAD_SP # save stack pointer 1817 + s32i a0, a10, THREAD_RA # save return address 1818 + s32i a1, a10, THREAD_SP # save stack pointer 1833 1819 1834 1820 /* Disable ints while we manipulate the stack pointer. */ 1835 1821 1836 - movi a14, (1 << PS_EXCM_BIT) | LOCKLEVEL 1837 - xsr a14, ps 1822 + rsil a14, LOCKLEVEL 1838 1823 rsr a3, excsave1 1839 1824 rsync 1840 1825 s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */ ··· 1848 1835 1849 1836 /* Flush register file. */ 1850 1837 1851 - call0 _spill_registers # destroys a3, a4, and SAR 1838 + spill_registers_kernel 1852 1839 1853 1840 /* Set kernel stack (and leave critical section) 1854 1841 * Note: It's save to set it here. The stack will not be overwritten ··· 1864 1851 1865 1852 /* restore context of the task 'next' */ 1866 1853 1867 - l32i a0, a13, THREAD_RA # restore return address 1868 - l32i a1, a13, THREAD_SP # restore stack pointer 1854 + l32i a0, a11, THREAD_RA # restore return address 1855 + l32i a1, a11, THREAD_SP # restore stack pointer 1869 1856 1870 - load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER 1857 + load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER 1871 1858 1872 1859 wsr a14, ps 1873 - mov a2, a12 # return 'prev' 1860 + mov a2, a10 # return 'prev' 1874 1861 rsync 1875 1862 1876 1863 retw
+2
arch/xtensa/kernel/setup.c
··· 22 22 #include <linux/bootmem.h> 23 23 #include <linux/kernel.h> 24 24 #include <linux/percpu.h> 25 + #include <linux/clk-provider.h> 25 26 #include <linux/cpu.h> 26 27 #include <linux/of_fdt.h> 27 28 #include <linux/of_platform.h> ··· 277 276 278 277 static int __init xtensa_device_probe(void) 279 278 { 279 + of_clk_init(NULL); 280 280 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); 281 281 return 0; 282 282 }
+1
arch/xtensa/kernel/time.c
··· 30 30 #include <asm/platform.h> 31 31 32 32 unsigned long ccount_freq; /* ccount Hz */ 33 + EXPORT_SYMBOL(ccount_freq); 33 34 34 35 static cycle_t ccount_read(struct clocksource *cs) 35 36 {
+1 -1
arch/xtensa/kernel/vectors.S
··· 235 235 236 236 /* Check for overflow/underflow exception, jump if overflow. */ 237 237 238 - _bbci.l a0, 6, _DoubleExceptionVector_WindowOverflow 238 + bbci.l a0, 6, _DoubleExceptionVector_WindowOverflow 239 239 240 240 /* 241 241 * Restart window underflow exception.
-2
arch/xtensa/kernel/xtensa_ksyms.c
··· 122 122 EXPORT_SYMBOL(insl); 123 123 124 124 extern long common_exception_return; 125 - extern long _spill_registers; 126 125 EXPORT_SYMBOL(common_exception_return); 127 - EXPORT_SYMBOL(_spill_registers); 128 126 129 127 #ifdef CONFIG_FUNCTION_TRACER 130 128 EXPORT_SYMBOL(_mcount);
+9 -4
arch/xtensa/mm/init.c
··· 90 90 91 91 92 92 /* 93 - * Initialize the bootmem system and give it all the memory we have available. 93 + * Initialize the bootmem system and give it all low memory we have available. 94 94 */ 95 95 96 96 void __init bootmem_init(void) ··· 142 142 143 143 /* Add all remaining memory pieces into the bootmem map */ 144 144 145 - for (i=0; i<sysmem.nr_banks; i++) 146 - free_bootmem(sysmem.bank[i].start, 147 - sysmem.bank[i].end - sysmem.bank[i].start); 145 + for (i = 0; i < sysmem.nr_banks; i++) { 146 + if (sysmem.bank[i].start >> PAGE_SHIFT < max_low_pfn) { 147 + unsigned long end = min(max_low_pfn << PAGE_SHIFT, 148 + sysmem.bank[i].end); 149 + free_bootmem(sysmem.bank[i].start, 150 + end - sysmem.bank[i].start); 151 + } 152 + } 148 153 149 154 } 150 155
+1 -1
arch/xtensa/mm/mmu.c
··· 39 39 set_itlbcfg_register(0); 40 40 set_dtlbcfg_register(0); 41 41 #endif 42 - #if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && CONFIG_OF 42 + #if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF) 43 43 /* 44 44 * Update the IO area mapping in case xtensa_kio_paddr has changed 45 45 */
+4 -3
arch/xtensa/platforms/xtfpga/setup.c
··· 135 135 136 136 static int __init machine_setup(void) 137 137 { 138 - struct device_node *serial; 138 + struct device_node *clock; 139 139 struct device_node *eth = NULL; 140 140 141 - for_each_compatible_node(serial, NULL, "ns16550a") 142 - update_clock_frequency(serial); 141 + for_each_node_by_name(clock, "main-oscillator") 142 + update_clock_frequency(clock); 143 143 144 144 if ((eth = of_find_compatible_node(eth, NULL, "opencores,ethoc"))) 145 145 update_local_mac(eth); ··· 290 290 * knows whether they set it correctly on the DIP switches. 291 291 */ 292 292 pr_info("XTFPGA: Ethernet MAC %pM\n", ethoc_pdata.hwaddr); 293 + ethoc_pdata.eth_clkfreq = *(long *)XTFPGA_CLKFRQ_VADDR; 293 294 294 295 return 0; 295 296 }
+2 -7
arch/xtensa/variants/fsf/include/variant/tie.h
··· 18 18 #define XCHAL_CP_MASK 0x00 /* bitmask of all CPs by ID */ 19 19 #define XCHAL_CP_PORT_MASK 0x00 /* bitmask of only port CPs */ 20 20 21 - /* Basic parameters of each coprocessor: */ 22 - #define XCHAL_CP7_NAME "XTIOP" 23 - #define XCHAL_CP7_IDENT XTIOP 24 - #define XCHAL_CP7_SA_SIZE 0 /* size of state save area */ 25 - #define XCHAL_CP7_SA_ALIGN 1 /* min alignment of save area */ 26 - #define XCHAL_CP_ID_XTIOP 7 /* coprocessor ID (0..7) */ 27 - 28 21 /* Filler info for unassigned coprocessors, to simplify arrays etc: */ 29 22 #define XCHAL_NCP_SA_SIZE 0 30 23 #define XCHAL_NCP_SA_ALIGN 1 ··· 35 42 #define XCHAL_CP5_SA_ALIGN 1 36 43 #define XCHAL_CP6_SA_SIZE 0 37 44 #define XCHAL_CP6_SA_ALIGN 1 45 + #define XCHAL_CP7_SA_SIZE 0 46 + #define XCHAL_CP7_SA_ALIGN 1 38 47 39 48 /* Save area for non-coprocessor optional and custom (TIE) state: */ 40 49 #define XCHAL_NCP_SA_SIZE 0
+1 -1
block/blk-exec.c
··· 65 65 * be resued after dying flag is set 66 66 */ 67 67 if (q->mq_ops) { 68 - blk_mq_insert_request(q, rq, at_head, true); 68 + blk_mq_insert_request(rq, at_head, true, false); 69 69 return; 70 70 } 71 71
+2 -2
block/blk-flush.c
··· 137 137 rq = container_of(work, struct request, mq_flush_work); 138 138 139 139 memset(&rq->csd, 0, sizeof(rq->csd)); 140 - blk_mq_run_request(rq, true, false); 140 + blk_mq_insert_request(rq, false, true, false); 141 141 } 142 142 143 143 static bool blk_flush_queue_rq(struct request *rq) ··· 411 411 if ((policy & REQ_FSEQ_DATA) && 412 412 !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { 413 413 if (q->mq_ops) { 414 - blk_mq_run_request(rq, false, true); 414 + blk_mq_insert_request(rq, false, false, true); 415 415 } else 416 416 list_add_tail(&rq->queuelist, &q->queue_head); 417 417 return;
+7 -7
block/blk-mq-cpu.c
··· 11 11 #include "blk-mq.h" 12 12 13 13 static LIST_HEAD(blk_mq_cpu_notify_list); 14 - static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock); 14 + static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock); 15 15 16 16 static int blk_mq_main_cpu_notify(struct notifier_block *self, 17 17 unsigned long action, void *hcpu) ··· 19 19 unsigned int cpu = (unsigned long) hcpu; 20 20 struct blk_mq_cpu_notifier *notify; 21 21 22 - spin_lock(&blk_mq_cpu_notify_lock); 22 + raw_spin_lock(&blk_mq_cpu_notify_lock); 23 23 24 24 list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) 25 25 notify->notify(notify->data, action, cpu); 26 26 27 - spin_unlock(&blk_mq_cpu_notify_lock); 27 + raw_spin_unlock(&blk_mq_cpu_notify_lock); 28 28 return NOTIFY_OK; 29 29 } 30 30 ··· 32 32 { 33 33 BUG_ON(!notifier->notify); 34 34 35 - spin_lock(&blk_mq_cpu_notify_lock); 35 + raw_spin_lock(&blk_mq_cpu_notify_lock); 36 36 list_add_tail(&notifier->list, &blk_mq_cpu_notify_list); 37 - spin_unlock(&blk_mq_cpu_notify_lock); 37 + raw_spin_unlock(&blk_mq_cpu_notify_lock); 38 38 } 39 39 40 40 void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier) 41 41 { 42 - spin_lock(&blk_mq_cpu_notify_lock); 42 + raw_spin_lock(&blk_mq_cpu_notify_lock); 43 43 list_del(&notifier->list); 44 - spin_unlock(&blk_mq_cpu_notify_lock); 44 + raw_spin_unlock(&blk_mq_cpu_notify_lock); 45 45 } 46 46 47 47 void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
+22 -86
block/blk-mq.c
··· 73 73 set_bit(ctx->index_hw, hctx->ctx_map); 74 74 } 75 75 76 - static struct request *blk_mq_alloc_rq(struct blk_mq_hw_ctx *hctx, gfp_t gfp, 77 - bool reserved) 76 + static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, 77 + gfp_t gfp, bool reserved) 78 78 { 79 79 struct request *rq; 80 80 unsigned int tag; ··· 193 193 ctx->rq_dispatched[rw_is_sync(rw_flags)]++; 194 194 } 195 195 196 - static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, 197 - gfp_t gfp, bool reserved) 198 - { 199 - return blk_mq_alloc_rq(hctx, gfp, reserved); 200 - } 201 - 202 196 static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, 203 197 int rw, gfp_t gfp, 204 198 bool reserved) ··· 283 289 __blk_mq_free_request(hctx, ctx, rq); 284 290 } 285 291 286 - static void blk_mq_bio_endio(struct request *rq, struct bio *bio, int error) 292 + bool blk_mq_end_io_partial(struct request *rq, int error, unsigned int nr_bytes) 287 293 { 288 - if (error) 289 - clear_bit(BIO_UPTODATE, &bio->bi_flags); 290 - else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 291 - error = -EIO; 292 - 293 - if (unlikely(rq->cmd_flags & REQ_QUIET)) 294 - set_bit(BIO_QUIET, &bio->bi_flags); 295 - 296 - /* don't actually finish bio if it's part of flush sequence */ 297 - if (!(rq->cmd_flags & REQ_FLUSH_SEQ)) 298 - bio_endio(bio, error); 299 - } 300 - 301 - void blk_mq_end_io(struct request *rq, int error) 302 - { 303 - struct bio *bio = rq->bio; 304 - unsigned int bytes = 0; 305 - 306 - trace_block_rq_complete(rq->q, rq); 307 - 308 - while (bio) { 309 - struct bio *next = bio->bi_next; 310 - 311 - bio->bi_next = NULL; 312 - bytes += bio->bi_iter.bi_size; 313 - blk_mq_bio_endio(rq, bio, error); 314 - bio = next; 315 - } 316 - 317 - blk_account_io_completion(rq, bytes); 294 + if (blk_update_request(rq, error, blk_rq_bytes(rq))) 295 + return true; 318 296 319 297 blk_account_io_done(rq); 320 298 ··· 294 328 rq->end_io(rq, error); 295 329 else 296 330 blk_mq_free_request(rq); 331 + return false; 297 332 } 298 - EXPORT_SYMBOL(blk_mq_end_io); 333 + EXPORT_SYMBOL(blk_mq_end_io_partial); 299 334 300 335 static void __blk_mq_complete_request_remote(void *data) 301 336 { ··· 697 730 blk_mq_add_timer(rq); 698 731 } 699 732 700 - void blk_mq_insert_request(struct request_queue *q, struct request *rq, 701 - bool at_head, bool run_queue) 702 - { 703 - struct blk_mq_hw_ctx *hctx; 704 - struct blk_mq_ctx *ctx, *current_ctx; 705 - 706 - ctx = rq->mq_ctx; 707 - hctx = q->mq_ops->map_queue(q, ctx->cpu); 708 - 709 - if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) { 710 - blk_insert_flush(rq); 711 - } else { 712 - current_ctx = blk_mq_get_ctx(q); 713 - 714 - if (!cpu_online(ctx->cpu)) { 715 - ctx = current_ctx; 716 - hctx = q->mq_ops->map_queue(q, ctx->cpu); 717 - rq->mq_ctx = ctx; 718 - } 719 - spin_lock(&ctx->lock); 720 - __blk_mq_insert_request(hctx, rq, at_head); 721 - spin_unlock(&ctx->lock); 722 - 723 - blk_mq_put_ctx(current_ctx); 724 - } 725 - 726 - if (run_queue) 727 - __blk_mq_run_hw_queue(hctx); 728 - } 729 - EXPORT_SYMBOL(blk_mq_insert_request); 730 - 731 - /* 732 - * This is a special version of blk_mq_insert_request to bypass FLUSH request 733 - * check. Should only be used internally. 734 - */ 735 - void blk_mq_run_request(struct request *rq, bool run_queue, bool async) 733 + void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, 734 + bool async) 736 735 { 737 736 struct request_queue *q = rq->q; 738 737 struct blk_mq_hw_ctx *hctx; 739 - struct blk_mq_ctx *ctx, *current_ctx; 738 + struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx; 740 739 741 740 current_ctx = blk_mq_get_ctx(q); 741 + if (!cpu_online(ctx->cpu)) 742 + rq->mq_ctx = ctx = current_ctx; 742 743 743 - ctx = rq->mq_ctx; 744 - if (!cpu_online(ctx->cpu)) { 745 - ctx = current_ctx; 746 - rq->mq_ctx = ctx; 747 - } 748 744 hctx = q->mq_ops->map_queue(q, ctx->cpu); 749 745 750 - /* ctx->cpu might be offline */ 751 - spin_lock(&ctx->lock); 752 - __blk_mq_insert_request(hctx, rq, false); 753 - spin_unlock(&ctx->lock); 746 + if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) && 747 + !(rq->cmd_flags & (REQ_FLUSH_SEQ))) { 748 + blk_insert_flush(rq); 749 + } else { 750 + spin_lock(&ctx->lock); 751 + __blk_mq_insert_request(hctx, rq, at_head); 752 + spin_unlock(&ctx->lock); 753 + } 754 754 755 755 blk_mq_put_ctx(current_ctx); 756 756 ··· 860 926 ctx = blk_mq_get_ctx(q); 861 927 hctx = q->mq_ops->map_queue(q, ctx->cpu); 862 928 929 + if (is_sync) 930 + rw |= REQ_SYNC; 863 931 trace_block_getrq(q, bio, rw); 864 932 rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false); 865 933 if (likely(rq))
-1
block/blk-mq.h
··· 23 23 }; 24 24 25 25 void __blk_mq_complete_request(struct request *rq); 26 - void blk_mq_run_request(struct request *rq, bool run_queue, bool async); 27 26 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 28 27 void blk_mq_init_flush(struct request_queue *q); 29 28 void blk_mq_drain_queue(struct request_queue *q);
+2
drivers/acpi/ac.c
··· 243 243 kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE); 244 244 return 0; 245 245 } 246 + #else 247 + #define acpi_ac_resume NULL 246 248 #endif 247 249 static SIMPLE_DEV_PM_OPS(acpi_ac_pm_ops, NULL, acpi_ac_resume); 248 250
+2
drivers/acpi/battery.c
··· 841 841 acpi_battery_update(battery); 842 842 return 0; 843 843 } 844 + #else 845 + #define acpi_battery_resume NULL 844 846 #endif 845 847 846 848 static SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume);
-58
drivers/acpi/blacklist.c
··· 260 260 }, 261 261 { 262 262 .callback = dmi_disable_osi_win8, 263 - .ident = "Dell Inspiron 15R SE", 264 - .matches = { 265 - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 266 - DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"), 267 - }, 268 - }, 269 - { 270 - .callback = dmi_disable_osi_win8, 271 263 .ident = "ThinkPad Edge E530", 272 264 .matches = { 273 265 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ··· 312 320 .matches = { 313 321 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 314 322 DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"), 315 - }, 316 - }, 317 - { 318 - .callback = dmi_disable_osi_win8, 319 - .ident = "HP ProBook 2013 models", 320 - .matches = { 321 - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 322 - DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook "), 323 - DMI_MATCH(DMI_PRODUCT_NAME, " G1"), 324 - }, 325 - }, 326 - { 327 - .callback = dmi_disable_osi_win8, 328 - .ident = "HP EliteBook 2013 models", 329 - .matches = { 330 - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 331 - DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook "), 332 - DMI_MATCH(DMI_PRODUCT_NAME, " G1"), 333 - }, 334 - }, 335 - { 336 - .callback = dmi_disable_osi_win8, 337 - .ident = "HP ZBook 14", 338 - .matches = { 339 - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 340 - DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 14"), 341 - }, 342 - }, 343 - { 344 - .callback = dmi_disable_osi_win8, 345 - .ident = "HP ZBook 15", 346 - .matches = { 347 - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 348 - DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 15"), 349 - }, 350 - }, 351 - { 352 - .callback = dmi_disable_osi_win8, 353 - .ident = "HP ZBook 17", 354 - .matches = { 355 - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 356 - DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 17"), 357 - }, 358 - }, 359 - { 360 - .callback = dmi_disable_osi_win8, 361 - .ident = "HP EliteBook 8780w", 362 - .matches = { 363 - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 364 - DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8780w"), 365 323 }, 366 324 }, 367 325
+2
drivers/acpi/button.c
··· 80 80 81 81 #ifdef CONFIG_PM_SLEEP 82 82 static int acpi_button_resume(struct device *dev); 83 + #else 84 + #define acpi_button_resume NULL 83 85 #endif 84 86 static SIMPLE_DEV_PM_OPS(acpi_button_pm, NULL, acpi_button_resume); 85 87
+3 -5
drivers/acpi/dock.c
··· 713 713 static ssize_t show_docked(struct device *dev, 714 714 struct device_attribute *attr, char *buf) 715 715 { 716 - struct acpi_device *tmp; 717 - 718 716 struct dock_station *dock_station = dev->platform_data; 717 + struct acpi_device *adev = NULL; 719 718 720 - if (!acpi_bus_get_device(dock_station->handle, &tmp)) 721 - return snprintf(buf, PAGE_SIZE, "1\n"); 722 - return snprintf(buf, PAGE_SIZE, "0\n"); 719 + acpi_bus_get_device(dock_station->handle, &adev); 720 + return snprintf(buf, PAGE_SIZE, "%u\n", acpi_device_enumerated(adev)); 723 721 } 724 722 static DEVICE_ATTR(docked, S_IRUGO, show_docked, NULL); 725 723
+64
drivers/acpi/ec.c
··· 67 67 #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */ 68 68 #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ 69 69 #define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */ 70 + #define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query 71 + * when trying to clear the EC */ 70 72 71 73 enum { 72 74 EC_FLAGS_QUERY_PENDING, /* Query is pending */ ··· 118 116 static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */ 119 117 static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */ 120 118 static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */ 119 + static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */ 121 120 122 121 /* -------------------------------------------------------------------------- 123 122 Transaction Management ··· 443 440 444 441 EXPORT_SYMBOL(ec_get_handle); 445 442 443 + static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data); 444 + 445 + /* 446 + * Clears stale _Q events that might have accumulated in the EC. 447 + * Run with locked ec mutex. 448 + */ 449 + static void acpi_ec_clear(struct acpi_ec *ec) 450 + { 451 + int i, status; 452 + u8 value = 0; 453 + 454 + for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) { 455 + status = acpi_ec_query_unlocked(ec, &value); 456 + if (status || !value) 457 + break; 458 + } 459 + 460 + if (unlikely(i == ACPI_EC_CLEAR_MAX)) 461 + pr_warn("Warning: Maximum of %d stale EC events cleared\n", i); 462 + else 463 + pr_info("%d stale EC events cleared\n", i); 464 + } 465 + 446 466 void acpi_ec_block_transactions(void) 447 467 { 448 468 struct acpi_ec *ec = first_ec; ··· 489 463 mutex_lock(&ec->mutex); 490 464 /* Allow transactions to be carried out again */ 491 465 clear_bit(EC_FLAGS_BLOCKED, &ec->flags); 466 + 467 + if (EC_FLAGS_CLEAR_ON_RESUME) 468 + acpi_ec_clear(ec); 469 + 492 470 mutex_unlock(&ec->mutex); 493 471 } 494 472 ··· 851 821 852 822 /* EC is fully operational, allow queries */ 853 823 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); 824 + 825 + /* Clear stale _Q events if hardware might require that */ 826 + if (EC_FLAGS_CLEAR_ON_RESUME) { 827 + mutex_lock(&ec->mutex); 828 + acpi_ec_clear(ec); 829 + mutex_unlock(&ec->mutex); 830 + } 854 831 return ret; 855 832 } 856 833 ··· 959 922 return 0; 960 923 } 961 924 925 + /* 926 + * On some hardware it is necessary to clear events accumulated by the EC during 927 + * sleep. These ECs stop reporting GPEs until they are manually polled, if too 928 + * many events are accumulated. (e.g. Samsung Series 5/9 notebooks) 929 + * 930 + * https://bugzilla.kernel.org/show_bug.cgi?id=44161 931 + * 932 + * Ideally, the EC should also be instructed NOT to accumulate events during 933 + * sleep (which Windows seems to do somehow), but the interface to control this 934 + * behaviour is not known at this time. 935 + * 936 + * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx, 937 + * however it is very likely that other Samsung models are affected. 938 + * 939 + * On systems which don't accumulate _Q events during sleep, this extra check 940 + * should be harmless. 941 + */ 942 + static int ec_clear_on_resume(const struct dmi_system_id *id) 943 + { 944 + pr_debug("Detected system needing EC poll on resume.\n"); 945 + EC_FLAGS_CLEAR_ON_RESUME = 1; 946 + return 0; 947 + } 948 + 962 949 static struct dmi_system_id ec_dmi_table[] __initdata = { 963 950 { 964 951 ec_skip_dsdt_scan, "Compal JFL92", { ··· 1026 965 ec_validate_ecdt, "ASUS hardware", { 1027 966 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."), 1028 967 DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL}, 968 + { 969 + ec_clear_on_resume, "Samsung hardware", { 970 + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL}, 1029 971 {}, 1030 972 }; 1031 973
+3
drivers/acpi/fan.c
··· 55 55 #ifdef CONFIG_PM_SLEEP 56 56 static int acpi_fan_suspend(struct device *dev); 57 57 static int acpi_fan_resume(struct device *dev); 58 + #else 59 + #define acpi_fan_suspend NULL 60 + #define acpi_fan_resume NULL 58 61 #endif 59 62 static SIMPLE_DEV_PM_OPS(acpi_fan_pm, acpi_fan_suspend, acpi_fan_resume); 60 63
+1
drivers/acpi/pci_irq.c
··· 430 430 pin_name(pin)); 431 431 } 432 432 433 + kfree(entry); 433 434 return 0; 434 435 } 435 436
+32 -37
drivers/acpi/processor_throttling.c
··· 56 56 int target_state; /* target T-state */ 57 57 }; 58 58 59 + struct acpi_processor_throttling_arg { 60 + struct acpi_processor *pr; 61 + int target_state; 62 + bool force; 63 + }; 64 + 59 65 #define THROTTLING_PRECHANGE (1) 60 66 #define THROTTLING_POSTCHANGE (2) 61 67 ··· 1066 1060 return 0; 1067 1061 } 1068 1062 1063 + static long acpi_processor_throttling_fn(void *data) 1064 + { 1065 + struct acpi_processor_throttling_arg *arg = data; 1066 + struct acpi_processor *pr = arg->pr; 1067 + 1068 + return pr->throttling.acpi_processor_set_throttling(pr, 1069 + arg->target_state, arg->force); 1070 + } 1071 + 1069 1072 int acpi_processor_set_throttling(struct acpi_processor *pr, 1070 1073 int state, bool force) 1071 1074 { 1072 - cpumask_var_t saved_mask; 1073 1075 int ret = 0; 1074 1076 unsigned int i; 1075 1077 struct acpi_processor *match_pr; 1076 1078 struct acpi_processor_throttling *p_throttling; 1079 + struct acpi_processor_throttling_arg arg; 1077 1080 struct throttling_tstate t_state; 1078 - cpumask_var_t online_throttling_cpus; 1079 1081 1080 1082 if (!pr) 1081 1083 return -EINVAL; ··· 1094 1080 if ((state < 0) || (state > (pr->throttling.state_count - 1))) 1095 1081 return -EINVAL; 1096 1082 1097 - if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL)) 1098 - return -ENOMEM; 1099 - 1100 - if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) { 1101 - free_cpumask_var(saved_mask); 1102 - return -ENOMEM; 1103 - } 1104 - 1105 1083 if (cpu_is_offline(pr->id)) { 1106 1084 /* 1107 1085 * the cpu pointed by pr->id is offline. Unnecessary to change ··· 1102 1096 return -ENODEV; 1103 1097 } 1104 1098 1105 - cpumask_copy(saved_mask, &current->cpus_allowed); 1106 1099 t_state.target_state = state; 1107 1100 p_throttling = &(pr->throttling); 1108 - cpumask_and(online_throttling_cpus, cpu_online_mask, 1109 - p_throttling->shared_cpu_map); 1101 + 1110 1102 /* 1111 1103 * The throttling notifier will be called for every 1112 1104 * affected cpu in order to get one proper T-state. 1113 1105 * The notifier event is THROTTLING_PRECHANGE. 1114 1106 */ 1115 - for_each_cpu(i, online_throttling_cpus) { 1107 + for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) { 1116 1108 t_state.cpu = i; 1117 1109 acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, 1118 1110 &t_state); ··· 1122 1118 * it can be called only for the cpu pointed by pr. 1123 1119 */ 1124 1120 if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { 1125 - /* FIXME: use work_on_cpu() */ 1126 - if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) { 1127 - /* Can't migrate to the pr->id CPU. Exit */ 1128 - ret = -ENODEV; 1129 - goto exit; 1130 - } 1131 - ret = p_throttling->acpi_processor_set_throttling(pr, 1132 - t_state.target_state, force); 1121 + arg.pr = pr; 1122 + arg.target_state = state; 1123 + arg.force = force; 1124 + ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg); 1133 1125 } else { 1134 1126 /* 1135 1127 * When the T-state coordination is SW_ALL or HW_ALL, 1136 1128 * it is necessary to set T-state for every affected 1137 1129 * cpus. 1138 1130 */ 1139 - for_each_cpu(i, online_throttling_cpus) { 1131 + for_each_cpu_and(i, cpu_online_mask, 1132 + p_throttling->shared_cpu_map) { 1140 1133 match_pr = per_cpu(processors, i); 1141 1134 /* 1142 1135 * If the pointer is invalid, we will report the ··· 1154 1153 "on CPU %d\n", i)); 1155 1154 continue; 1156 1155 } 1157 - t_state.cpu = i; 1158 - /* FIXME: use work_on_cpu() */ 1159 - if (set_cpus_allowed_ptr(current, cpumask_of(i))) 1160 - continue; 1161 - ret = match_pr->throttling. 1162 - acpi_processor_set_throttling( 1163 - match_pr, t_state.target_state, force); 1156 + 1157 + arg.pr = match_pr; 1158 + arg.target_state = state; 1159 + arg.force = force; 1160 + ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, 1161 + &arg); 1164 1162 } 1165 1163 } 1166 1164 /* ··· 1168 1168 * affected cpu to update the T-states. 1169 1169 * The notifier event is THROTTLING_POSTCHANGE 1170 1170 */ 1171 - for_each_cpu(i, online_throttling_cpus) { 1171 + for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) { 1172 1172 t_state.cpu = i; 1173 1173 acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, 1174 1174 &t_state); 1175 1175 } 1176 - /* restore the previous state */ 1177 - /* FIXME: use work_on_cpu() */ 1178 - set_cpus_allowed_ptr(current, saved_mask); 1179 - exit: 1180 - free_cpumask_var(online_throttling_cpus); 1181 - free_cpumask_var(saved_mask); 1176 + 1182 1177 return ret; 1183 1178 } 1184 1179
+10
drivers/acpi/resource.c
··· 77 77 switch (ares->type) { 78 78 case ACPI_RESOURCE_TYPE_MEMORY24: 79 79 memory24 = &ares->data.memory24; 80 + if (!memory24->address_length) 81 + return false; 80 82 acpi_dev_get_memresource(res, memory24->minimum, 81 83 memory24->address_length, 82 84 memory24->write_protect); 83 85 break; 84 86 case ACPI_RESOURCE_TYPE_MEMORY32: 85 87 memory32 = &ares->data.memory32; 88 + if (!memory32->address_length) 89 + return false; 86 90 acpi_dev_get_memresource(res, memory32->minimum, 87 91 memory32->address_length, 88 92 memory32->write_protect); 89 93 break; 90 94 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: 91 95 fixed_memory32 = &ares->data.fixed_memory32; 96 + if (!fixed_memory32->address_length) 97 + return false; 92 98 acpi_dev_get_memresource(res, fixed_memory32->address, 93 99 fixed_memory32->address_length, 94 100 fixed_memory32->write_protect); ··· 150 144 switch (ares->type) { 151 145 case ACPI_RESOURCE_TYPE_IO: 152 146 io = &ares->data.io; 147 + if (!io->address_length) 148 + return false; 153 149 acpi_dev_get_ioresource(res, io->minimum, 154 150 io->address_length, 155 151 io->io_decode); 156 152 break; 157 153 case ACPI_RESOURCE_TYPE_FIXED_IO: 158 154 fixed_io = &ares->data.fixed_io; 155 + if (!fixed_io->address_length) 156 + return false; 159 157 acpi_dev_get_ioresource(res, fixed_io->address, 160 158 fixed_io->address_length, 161 159 ACPI_DECODE_10);
+3 -1
drivers/acpi/sbs.c
··· 450 450 { 451 451 unsigned long x; 452 452 struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev)); 453 - if (sscanf(buf, "%ld\n", &x) == 1) 453 + if (sscanf(buf, "%lu\n", &x) == 1) 454 454 battery->alarm_capacity = x / 455 455 (1000 * acpi_battery_scale(battery)); 456 456 if (battery->present) ··· 668 668 acpi_sbs_callback(sbs); 669 669 return 0; 670 670 } 671 + #else 672 + #define acpi_sbs_resume NULL 671 673 #endif 672 674 673 675 static SIMPLE_DEV_PM_OPS(acpi_sbs_pm, NULL, acpi_sbs_resume);
+2
drivers/acpi/thermal.c
··· 102 102 103 103 #ifdef CONFIG_PM_SLEEP 104 104 static int acpi_thermal_resume(struct device *dev); 105 + #else 106 + #define acpi_thermal_resume NULL 105 107 #endif 106 108 static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, NULL, acpi_thermal_resume); 107 109
+141 -6
drivers/acpi/video.c
··· 81 81 module_param(allow_duplicates, bool, 0644); 82 82 83 83 /* 84 - * For Windows 8 systems: if set ture and the GPU driver has 85 - * registered a backlight interface, skip registering ACPI video's. 84 + * For Windows 8 systems: used to decide if video module 85 + * should skip registering backlight interface of its own. 86 86 */ 87 - static bool use_native_backlight = false; 88 - module_param(use_native_backlight, bool, 0644); 87 + static int use_native_backlight_param = -1; 88 + module_param_named(use_native_backlight, use_native_backlight_param, int, 0444); 89 + static bool use_native_backlight_dmi = false; 89 90 90 91 static int register_count; 91 92 static struct mutex video_list_lock; ··· 232 231 static int acpi_video_switch_brightness(struct acpi_video_device *device, 233 232 int event); 234 233 234 + static bool acpi_video_use_native_backlight(void) 235 + { 236 + if (use_native_backlight_param != -1) 237 + return use_native_backlight_param; 238 + else 239 + return use_native_backlight_dmi; 240 + } 241 + 235 242 static bool acpi_video_verify_backlight_support(void) 236 243 { 237 - if (acpi_osi_is_win8() && use_native_backlight && 244 + if (acpi_osi_is_win8() && acpi_video_use_native_backlight() && 238 245 backlight_device_registered(BACKLIGHT_RAW)) 239 246 return false; 240 247 return acpi_video_backlight_support(); ··· 407 398 return 0; 408 399 } 409 400 401 + static int __init video_set_use_native_backlight(const struct dmi_system_id *d) 402 + { 403 + use_native_backlight_dmi = true; 404 + return 0; 405 + } 406 + 410 407 static struct dmi_system_id video_dmi_table[] __initdata = { 411 408 /* 412 409 * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121 ··· 455 440 .matches = { 456 441 DMI_MATCH(DMI_BOARD_VENDOR, "Acer"), 457 442 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"), 443 + }, 444 + }, 445 + { 446 + .callback = video_set_use_native_backlight, 447 + .ident = "ThinkPad T430s", 448 + .matches = { 449 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 450 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430s"), 451 + }, 452 + }, 453 + { 454 + .callback = video_set_use_native_backlight, 455 + .ident = "ThinkPad X230", 456 + .matches = { 457 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 458 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X230"), 459 + }, 460 + }, 461 + { 462 + .callback = video_set_use_native_backlight, 463 + .ident = "ThinkPad X1 Carbon", 464 + .matches = { 465 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 466 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X1 Carbon"), 467 + }, 468 + }, 469 + { 470 + .callback = video_set_use_native_backlight, 471 + .ident = "Lenovo Yoga 13", 472 + .matches = { 473 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 474 + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga 13"), 475 + }, 476 + }, 477 + { 478 + .callback = video_set_use_native_backlight, 479 + .ident = "Dell Inspiron 7520", 480 + .matches = { 481 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 482 + DMI_MATCH(DMI_PRODUCT_VERSION, "Inspiron 7520"), 483 + }, 484 + }, 485 + { 486 + .callback = video_set_use_native_backlight, 487 + .ident = "Acer Aspire 5733Z", 488 + .matches = { 489 + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 490 + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5733Z"), 491 + }, 492 + }, 493 + { 494 + .callback = video_set_use_native_backlight, 495 + .ident = "Acer Aspire V5-431", 496 + .matches = { 497 + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 498 + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-431"), 499 + }, 500 + }, 501 + { 502 + .callback = video_set_use_native_backlight, 503 + .ident = "HP ProBook 4340s", 504 + .matches = { 505 + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 506 + DMI_MATCH(DMI_PRODUCT_VERSION, "HP ProBook 4340s"), 507 + }, 508 + }, 509 + { 510 + .callback = video_set_use_native_backlight, 511 + .ident = "HP ProBook 2013 models", 512 + .matches = { 513 + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 514 + DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook "), 515 + DMI_MATCH(DMI_PRODUCT_NAME, " G1"), 516 + }, 517 + }, 518 + { 519 + .callback = video_set_use_native_backlight, 520 + .ident = "HP EliteBook 2013 models", 521 + .matches = { 522 + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 523 + DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook "), 524 + DMI_MATCH(DMI_PRODUCT_NAME, " G1"), 525 + }, 526 + }, 527 + { 528 + .callback = video_set_use_native_backlight, 529 + .ident = "HP ZBook 14", 530 + .matches = { 531 + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 532 + DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 14"), 533 + }, 534 + }, 535 + { 536 + .callback = video_set_use_native_backlight, 537 + .ident = "HP ZBook 15", 538 + .matches = { 539 + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 540 + DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 15"), 541 + }, 542 + }, 543 + { 544 + .callback = video_set_use_native_backlight, 545 + .ident = "HP ZBook 17", 546 + .matches = { 547 + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 548 + DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 17"), 549 + }, 550 + }, 551 + { 552 + .callback = video_set_use_native_backlight, 553 + .ident = "HP EliteBook 8780w", 554 + .matches = { 555 + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 556 + DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8780w"), 458 557 }, 459 558 }, 460 559 {} ··· 814 685 union acpi_object *o; 815 686 struct acpi_video_device_brightness *br = NULL; 816 687 int result = -EINVAL; 688 + u32 value; 817 689 818 690 if (!ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device, &obj))) { 819 691 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Could not query available " ··· 845 715 printk(KERN_ERR PREFIX "Invalid data\n"); 846 716 continue; 847 717 } 848 - br->levels[count] = (u32) o->integer.value; 718 + value = (u32) o->integer.value; 719 + /* Skip duplicate entries */ 720 + if (count > 2 && br->levels[count - 1] == value) 721 + continue; 722 + 723 + br->levels[count] = value; 849 724 850 725 if (br->levels[count] > max_level) 851 726 max_level = br->levels[count];
-16
drivers/acpi/video_detect.c
··· 168 168 DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"), 169 169 }, 170 170 }, 171 - { 172 - .callback = video_detect_force_vendor, 173 - .ident = "HP EliteBook Revolve 810", 174 - .matches = { 175 - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 176 - DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook Revolve 810 G1"), 177 - }, 178 - }, 179 - { 180 - .callback = video_detect_force_vendor, 181 - .ident = "Lenovo Yoga 13", 182 - .matches = { 183 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 184 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga 13"), 185 - }, 186 - }, 187 171 { }, 188 172 }; 189 173
+1
drivers/ata/Kconfig
··· 247 247 248 248 config SATA_MV 249 249 tristate "Marvell SATA support" 250 + select GENERIC_PHY 250 251 help 251 252 This option enables support for the Marvell Serial ATA family. 252 253 Currently supports 88SX[56]0[48][01] PCI(-X) chips,
+17 -1
drivers/ata/ahci.c
··· 61 61 /* board IDs by feature in alphabetical order */ 62 62 board_ahci, 63 63 board_ahci_ign_iferr, 64 + board_ahci_noncq, 64 65 board_ahci_nosntf, 65 66 board_ahci_yes_fbs, 66 67 ··· 117 116 }, 118 117 [board_ahci_ign_iferr] = { 119 118 AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR), 119 + .flags = AHCI_FLAG_COMMON, 120 + .pio_mask = ATA_PIO4, 121 + .udma_mask = ATA_UDMA6, 122 + .port_ops = &ahci_ops, 123 + }, 124 + [board_ahci_noncq] = { 125 + AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ), 120 126 .flags = AHCI_FLAG_COMMON, 121 127 .pio_mask = ATA_PIO4, 122 128 .udma_mask = ATA_UDMA6, ··· 459 451 { PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci }, /* ASM1060 */ 460 452 { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */ 461 453 { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */ 454 + 455 + /* 456 + * Samsung SSDs found on some macbooks. NCQ times out. 457 + * https://bugzilla.kernel.org/show_bug.cgi?id=60731 458 + */ 459 + { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_noncq }, 462 460 463 461 /* Enmotus */ 464 462 { PCI_DEVICE(0x1c44, 0x8000), board_ahci }, ··· 1184 1170 1185 1171 nvec = rc; 1186 1172 rc = pci_enable_msi_block(pdev, nvec); 1187 - if (rc) 1173 + if (rc < 0) 1188 1174 goto intx; 1175 + else if (rc > 0) 1176 + goto single_msi; 1189 1177 1190 1178 return nvec; 1191 1179
+2
drivers/ata/libata-core.c
··· 4175 4175 4176 4176 /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */ 4177 4177 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, 4178 + { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA }, 4178 4179 4179 4180 /* Blacklist entries taken from Silicon Image 3124/3132 4180 4181 Windows driver .inf file - also several Linux problem reports */ ··· 4226 4225 /* devices that don't properly handle queued TRIM commands */ 4227 4226 { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, 4228 4227 { "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, 4228 + { "Crucial_CT???M500SSD3", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, 4229 4229 4230 4230 /* 4231 4231 * Some WD SATA-I drives spin up and down erratically when the link
+5 -2
drivers/ata/libata-pmp.c
··· 447 447 * otherwise. Don't try hard to recover it. 448 448 */ 449 449 ap->pmp_link[ap->nr_pmp_links - 1].flags |= ATA_LFLAG_NO_RETRY; 450 - } else if (vendor == 0x197b && devid == 0x2352) { 451 - /* chip found in Thermaltake BlackX Duet, jmicron JMB350? */ 450 + } else if (vendor == 0x197b && (devid == 0x2352 || devid == 0x0325)) { 451 + /* 452 + * 0x2352: found in Thermaltake BlackX Duet, jmicron JMB350? 453 + * 0x0325: jmicron JMB394. 454 + */ 452 455 ata_for_each_link(link, ap, EDGE) { 453 456 /* SRST breaks detection and disks get misclassified 454 457 * LPM disabled to avoid potential problems
+6 -2
drivers/ata/pata_imx.c
··· 119 119 return PTR_ERR(priv->clk); 120 120 } 121 121 122 - clk_prepare_enable(priv->clk); 122 + ret = clk_prepare_enable(priv->clk); 123 + if (ret) 124 + return ret; 123 125 124 126 host = ata_host_alloc(&pdev->dev, 1); 125 127 if (!host) { ··· 214 212 struct ata_host *host = dev_get_drvdata(dev); 215 213 struct pata_imx_priv *priv = host->private_data; 216 214 217 - clk_prepare_enable(priv->clk); 215 + int ret = clk_prepare_enable(priv->clk); 216 + if (ret) 217 + return ret; 218 218 219 219 __raw_writel(priv->ata_ctl, priv->host_regs + PATA_IMX_ATA_CONTROL); 220 220
+8 -4
drivers/ata/sata_mv.c
··· 4104 4104 if (!hpriv->port_phys) 4105 4105 return -ENOMEM; 4106 4106 host->private_data = hpriv; 4107 - hpriv->n_ports = n_ports; 4108 4107 hpriv->board_idx = chip_soc; 4109 4108 4110 4109 host->iomap = NULL; ··· 4131 4132 rc = PTR_ERR(hpriv->port_phys[port]); 4132 4133 hpriv->port_phys[port] = NULL; 4133 4134 if (rc != -EPROBE_DEFER) 4134 - dev_warn(&pdev->dev, "error getting phy %d", 4135 - rc); 4135 + dev_warn(&pdev->dev, "error getting phy %d", rc); 4136 + 4137 + /* Cleanup only the initialized ports */ 4138 + hpriv->n_ports = port; 4136 4139 goto err; 4137 4140 } else 4138 4141 phy_power_on(hpriv->port_phys[port]); 4139 4142 } 4143 + 4144 + /* All the ports have been initialized */ 4145 + hpriv->n_ports = n_ports; 4140 4146 4141 4147 /* 4142 4148 * (Re-)program MBUS remapping windows if we are asked to. ··· 4180 4176 clk_disable_unprepare(hpriv->clk); 4181 4177 clk_put(hpriv->clk); 4182 4178 } 4183 - for (port = 0; port < n_ports; port++) { 4179 + for (port = 0; port < hpriv->n_ports; port++) { 4184 4180 if (!IS_ERR(hpriv->port_clks[port])) { 4185 4181 clk_disable_unprepare(hpriv->port_clks[port]); 4186 4182 clk_put(hpriv->port_clks[port]);
+1
drivers/ata/sata_sil.c
··· 157 157 { "ST380011ASL", SIL_QUIRK_MOD15WRITE }, 158 158 { "ST3120022ASL", SIL_QUIRK_MOD15WRITE }, 159 159 { "ST3160021ASL", SIL_QUIRK_MOD15WRITE }, 160 + { "TOSHIBA MK2561GSYN", SIL_QUIRK_MOD15WRITE }, 160 161 { "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX }, 161 162 { } 162 163 };
+12 -13
drivers/base/dma-buf.c
··· 616 616 if (ret) 617 617 return ret; 618 618 619 - seq_printf(s, "\nDma-buf Objects:\n"); 620 - seq_printf(s, "\texp_name\tsize\tflags\tmode\tcount\n"); 619 + seq_puts(s, "\nDma-buf Objects:\n"); 620 + seq_puts(s, "size\tflags\tmode\tcount\texp_name\n"); 621 621 622 622 list_for_each_entry(buf_obj, &db_list.head, list_node) { 623 623 ret = mutex_lock_interruptible(&buf_obj->lock); 624 624 625 625 if (ret) { 626 - seq_printf(s, 627 - "\tERROR locking buffer object: skipping\n"); 626 + seq_puts(s, 627 + "\tERROR locking buffer object: skipping\n"); 628 628 continue; 629 629 } 630 630 631 - seq_printf(s, "\t"); 632 - 633 - seq_printf(s, "\t%s\t%08zu\t%08x\t%08x\t%08ld\n", 634 - buf_obj->exp_name, buf_obj->size, 631 + seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n", 632 + buf_obj->size, 635 633 buf_obj->file->f_flags, buf_obj->file->f_mode, 636 - (long)(buf_obj->file->f_count.counter)); 634 + (long)(buf_obj->file->f_count.counter), 635 + buf_obj->exp_name); 637 636 638 - seq_printf(s, "\t\tAttached Devices:\n"); 637 + seq_puts(s, "\tAttached Devices:\n"); 639 638 attach_count = 0; 640 639 641 640 list_for_each_entry(attach_obj, &buf_obj->attachments, node) { 642 - seq_printf(s, "\t\t"); 641 + seq_puts(s, "\t"); 643 642 644 - seq_printf(s, "%s\n", attach_obj->dev->init_name); 643 + seq_printf(s, "%s\n", dev_name(attach_obj->dev)); 645 644 attach_count++; 646 645 } 647 646 648 - seq_printf(s, "\n\t\tTotal %d devices attached\n", 647 + seq_printf(s, "Total %d devices attached\n\n", 649 648 attach_count); 650 649 651 650 count++;
+1
drivers/base/firmware_class.c
··· 1580 1580 switch (mode) { 1581 1581 case PM_HIBERNATION_PREPARE: 1582 1582 case PM_SUSPEND_PREPARE: 1583 + case PM_RESTORE_PREPARE: 1583 1584 kill_requests_without_uevent(); 1584 1585 device_cache_fw_images(); 1585 1586 break;
+2 -2
drivers/block/aoe/aoecmd.c
··· 874 874 /* Non-zero page count for non-head members of 875 875 * compound pages is no longer allowed by the kernel. 876 876 */ 877 - page = compound_trans_head(bv.bv_page); 877 + page = compound_head(bv.bv_page); 878 878 atomic_inc(&page->_count); 879 879 } 880 880 } ··· 887 887 struct bvec_iter iter; 888 888 889 889 bio_for_each_segment(bv, bio, iter) { 890 - page = compound_trans_head(bv.bv_page); 890 + page = compound_head(bv.bv_page); 891 891 atomic_dec(&page->_count); 892 892 } 893 893 }
+1 -1
drivers/block/mtip32xx/mtip32xx.h
··· 53 53 #define MTIP_FTL_REBUILD_TIMEOUT_MS 2400000 54 54 55 55 /* unaligned IO handling */ 56 - #define MTIP_MAX_UNALIGNED_SLOTS 8 56 + #define MTIP_MAX_UNALIGNED_SLOTS 2 57 57 58 58 /* Macro to extract the tag bit number from a tag value. */ 59 59 #define MTIP_TAG_BIT(tag) (tag & 0x1F)
+2
drivers/block/zram/zram_drv.c
··· 612 612 613 613 disksize = PAGE_ALIGN(disksize); 614 614 meta = zram_meta_alloc(disksize); 615 + if (!meta) 616 + return -ENOMEM; 615 617 down_write(&zram->init_lock); 616 618 if (zram->init_done) { 617 619 up_write(&zram->init_lock);
+1 -1
drivers/clk/at91/clk-master.c
··· 242 242 243 243 irq = irq_of_parse_and_map(np, 0); 244 244 if (!irq) 245 - return; 245 + goto out_free_characteristics; 246 246 247 247 clk = at91_clk_register_master(pmc, irq, name, num_parents, 248 248 parent_names, layout,
+3
drivers/clk/clk-nomadik.c
··· 494 494 495 495 static int __init nomadik_src_clk_init_debugfs(void) 496 496 { 497 + /* Vital for multiplatform */ 498 + if (!src_base) 499 + return -ENODEV; 497 500 src_pcksr0_boot = readl(src_base + SRC_PCKSR0); 498 501 src_pcksr1_boot = readl(src_base + SRC_PCKSR1); 499 502 debugfs_create_file("nomadik-src-clk", S_IFREG | S_IRUGO,
+7 -6
drivers/clk/clk.c
··· 2226 2226 */ 2227 2227 int __clk_get(struct clk *clk) 2228 2228 { 2229 - if (clk && !try_module_get(clk->owner)) 2230 - return 0; 2229 + if (clk) { 2230 + if (!try_module_get(clk->owner)) 2231 + return 0; 2231 2232 2232 - kref_get(&clk->ref); 2233 + kref_get(&clk->ref); 2234 + } 2233 2235 return 1; 2234 2236 } 2235 2237 2236 2238 void __clk_put(struct clk *clk) 2237 2239 { 2238 - if (WARN_ON_ONCE(IS_ERR(clk))) 2240 + if (!clk || WARN_ON_ONCE(IS_ERR(clk))) 2239 2241 return; 2240 2242 2241 2243 clk_prepare_lock(); 2242 2244 kref_put(&clk->ref, __clk_release); 2243 2245 clk_prepare_unlock(); 2244 2246 2245 - if (clk) 2246 - module_put(clk->owner); 2247 + module_put(clk->owner); 2247 2248 } 2248 2249 2249 2250 /*** clk rate change notifiers ***/
+1
drivers/clk/keystone/gate.c
··· 179 179 180 180 init.name = name; 181 181 init.ops = &clk_psc_ops; 182 + init.flags = 0; 182 183 init.parent_names = (parent_name ? &parent_name : NULL); 183 184 init.num_parents = (parent_name ? 1 : 0); 184 185
+10 -11
drivers/clk/mvebu/armada-370.c
··· 141 141 .num_ratios = ARRAY_SIZE(a370_coreclk_ratios), 142 142 }; 143 143 144 - static void __init a370_coreclk_init(struct device_node *np) 145 - { 146 - mvebu_coreclk_setup(np, &a370_coreclks); 147 - } 148 - CLK_OF_DECLARE(a370_core_clk, "marvell,armada-370-core-clock", 149 - a370_coreclk_init); 150 - 151 144 /* 152 145 * Clock Gating Control 153 146 */ ··· 161 168 { } 162 169 }; 163 170 164 - static void __init a370_clk_gating_init(struct device_node *np) 171 + static void __init a370_clk_init(struct device_node *np) 165 172 { 166 - mvebu_clk_gating_setup(np, a370_gating_desc); 173 + struct device_node *cgnp = 174 + of_find_compatible_node(NULL, NULL, "marvell,armada-370-gating-clock"); 175 + 176 + mvebu_coreclk_setup(np, &a370_coreclks); 177 + 178 + if (cgnp) 179 + mvebu_clk_gating_setup(cgnp, a370_gating_desc); 167 180 } 168 - CLK_OF_DECLARE(a370_clk_gating, "marvell,armada-370-gating-clock", 169 - a370_clk_gating_init); 181 + CLK_OF_DECLARE(a370_clk, "marvell,armada-370-core-clock", a370_clk_init); 182 +
+9 -11
drivers/clk/mvebu/armada-xp.c
··· 158 158 .num_ratios = ARRAY_SIZE(axp_coreclk_ratios), 159 159 }; 160 160 161 - static void __init axp_coreclk_init(struct device_node *np) 162 - { 163 - mvebu_coreclk_setup(np, &axp_coreclks); 164 - } 165 - CLK_OF_DECLARE(axp_core_clk, "marvell,armada-xp-core-clock", 166 - axp_coreclk_init); 167 - 168 161 /* 169 162 * Clock Gating Control 170 163 */ ··· 195 202 { } 196 203 }; 197 204 198 - static void __init axp_clk_gating_init(struct device_node *np) 205 + static void __init axp_clk_init(struct device_node *np) 199 206 { 200 - mvebu_clk_gating_setup(np, axp_gating_desc); 207 + struct device_node *cgnp = 208 + of_find_compatible_node(NULL, NULL, "marvell,armada-xp-gating-clock"); 209 + 210 + mvebu_coreclk_setup(np, &axp_coreclks); 211 + 212 + if (cgnp) 213 + mvebu_clk_gating_setup(cgnp, axp_gating_desc); 201 214 } 202 - CLK_OF_DECLARE(axp_clk_gating, "marvell,armada-xp-gating-clock", 203 - axp_clk_gating_init); 215 + CLK_OF_DECLARE(axp_clk, "marvell,armada-xp-core-clock", axp_clk_init);
+9 -10
drivers/clk/mvebu/dove.c
··· 154 154 .num_ratios = ARRAY_SIZE(dove_coreclk_ratios), 155 155 }; 156 156 157 - static void __init dove_coreclk_init(struct device_node *np) 158 - { 159 - mvebu_coreclk_setup(np, &dove_coreclks); 160 - } 161 - CLK_OF_DECLARE(dove_core_clk, "marvell,dove-core-clock", dove_coreclk_init); 162 - 163 157 /* 164 158 * Clock Gating Control 165 159 */ ··· 180 186 { } 181 187 }; 182 188 183 - static void __init dove_clk_gating_init(struct device_node *np) 189 + static void __init dove_clk_init(struct device_node *np) 184 190 { 185 - mvebu_clk_gating_setup(np, dove_gating_desc); 191 + struct device_node *cgnp = 192 + of_find_compatible_node(NULL, NULL, "marvell,dove-gating-clock"); 193 + 194 + mvebu_coreclk_setup(np, &dove_coreclks); 195 + 196 + if (cgnp) 197 + mvebu_clk_gating_setup(cgnp, dove_gating_desc); 186 198 } 187 - CLK_OF_DECLARE(dove_clk_gating, "marvell,dove-gating-clock", 188 - dove_clk_gating_init); 199 + CLK_OF_DECLARE(dove_clk, "marvell,dove-core-clock", dove_clk_init);
+16 -18
drivers/clk/mvebu/kirkwood.c
··· 193 193 .num_ratios = ARRAY_SIZE(kirkwood_coreclk_ratios), 194 194 }; 195 195 196 - static void __init kirkwood_coreclk_init(struct device_node *np) 197 - { 198 - mvebu_coreclk_setup(np, &kirkwood_coreclks); 199 - } 200 - CLK_OF_DECLARE(kirkwood_core_clk, "marvell,kirkwood-core-clock", 201 - kirkwood_coreclk_init); 202 - 203 196 static const struct coreclk_soc_desc mv88f6180_coreclks = { 204 197 .get_tclk_freq = kirkwood_get_tclk_freq, 205 198 .get_cpu_freq = mv88f6180_get_cpu_freq, ··· 200 207 .ratios = kirkwood_coreclk_ratios, 201 208 .num_ratios = ARRAY_SIZE(kirkwood_coreclk_ratios), 202 209 }; 203 - 204 - static void __init mv88f6180_coreclk_init(struct device_node *np) 205 - { 206 - mvebu_coreclk_setup(np, &mv88f6180_coreclks); 207 - } 208 - CLK_OF_DECLARE(mv88f6180_core_clk, "marvell,mv88f6180-core-clock", 209 - mv88f6180_coreclk_init); 210 210 211 211 /* 212 212 * Clock Gating Control ··· 225 239 { } 226 240 }; 227 241 228 - static void __init kirkwood_clk_gating_init(struct device_node *np) 242 + static void __init kirkwood_clk_init(struct device_node *np) 229 243 { 230 - mvebu_clk_gating_setup(np, kirkwood_gating_desc); 244 + struct device_node *cgnp = 245 + of_find_compatible_node(NULL, NULL, "marvell,kirkwood-gating-clock"); 246 + 247 + 248 + if (of_device_is_compatible(np, "marvell,mv88f6180-core-clock")) 249 + mvebu_coreclk_setup(np, &mv88f6180_coreclks); 250 + else 251 + mvebu_coreclk_setup(np, &kirkwood_coreclks); 252 + 253 + if (cgnp) 254 + mvebu_clk_gating_setup(cgnp, kirkwood_gating_desc); 231 255 } 232 - CLK_OF_DECLARE(kirkwood_clk_gating, "marvell,kirkwood-gating-clock", 233 - kirkwood_clk_gating_init); 256 + CLK_OF_DECLARE(kirkwood_clk, "marvell,kirkwood-core-clock", 257 + kirkwood_clk_init); 258 + CLK_OF_DECLARE(mv88f6180_clk, "marvell,mv88f6180-core-clock", 259 + kirkwood_clk_init);
+44 -4
drivers/clk/shmobile/clk-rcar-gen2.c
··· 26 26 void __iomem *reg; 27 27 }; 28 28 29 + #define CPG_FRQCRB 0x00000004 30 + #define CPG_FRQCRB_KICK BIT(31) 29 31 #define CPG_SDCKCR 0x00000074 30 32 #define CPG_PLL0CR 0x000000d8 31 33 #define CPG_FRQCRC 0x000000e0 ··· 47 45 struct cpg_z_clk { 48 46 struct clk_hw hw; 49 47 void __iomem *reg; 48 + void __iomem *kick_reg; 50 49 }; 51 50 52 51 #define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw) ··· 86 83 { 87 84 struct cpg_z_clk *zclk = to_z_clk(hw); 88 85 unsigned int mult; 89 - u32 val; 86 + u32 val, kick; 87 + unsigned int i; 90 88 91 89 mult = div_u64((u64)rate * 32, parent_rate); 92 90 mult = clamp(mult, 1U, 32U); 91 + 92 + if (clk_readl(zclk->kick_reg) & CPG_FRQCRB_KICK) 93 + return -EBUSY; 93 94 94 95 val = clk_readl(zclk->reg); 95 96 val &= ~CPG_FRQCRC_ZFC_MASK; 96 97 val |= (32 - mult) << CPG_FRQCRC_ZFC_SHIFT; 97 98 clk_writel(val, zclk->reg); 98 99 99 - return 0; 100 + /* 101 + * Set KICK bit in FRQCRB to update hardware setting and wait for 102 + * clock change completion. 103 + */ 104 + kick = clk_readl(zclk->kick_reg); 105 + kick |= CPG_FRQCRB_KICK; 106 + clk_writel(kick, zclk->kick_reg); 107 + 108 + /* 109 + * Note: There is no HW information about the worst case latency. 110 + * 111 + * Using experimental measurements, it seems that no more than 112 + * ~10 iterations are needed, independently of the CPU rate. 113 + * Since this value might be dependant of external xtal rate, pll1 114 + * rate or even the other emulation clocks rate, use 1000 as a 115 + * "super" safe value. 116 + */ 117 + for (i = 1000; i; i--) { 118 + if (!(clk_readl(zclk->kick_reg) & CPG_FRQCRB_KICK)) 119 + return 0; 120 + 121 + cpu_relax(); 122 + } 123 + 124 + return -ETIMEDOUT; 100 125 } 101 126 102 127 static const struct clk_ops cpg_z_clk_ops = { ··· 151 120 init.num_parents = 1; 152 121 153 122 zclk->reg = cpg->reg + CPG_FRQCRC; 123 + zclk->kick_reg = cpg->reg + CPG_FRQCRB; 154 124 zclk->hw.init = &init; 155 125 156 126 clk = clk_register(NULL, &zclk->hw); ··· 218 186 const char *name) 219 187 { 220 188 const struct clk_div_table *table = NULL; 221 - const char *parent_name = "main"; 189 + const char *parent_name; 222 190 unsigned int shift; 223 191 unsigned int mult = 1; 224 192 unsigned int div = 1; ··· 233 201 * the multiplier value. 234 202 */ 235 203 u32 value = clk_readl(cpg->reg + CPG_PLL0CR); 204 + parent_name = "main"; 236 205 mult = ((value >> 24) & ((1 << 7) - 1)) + 1; 237 206 } else if (!strcmp(name, "pll1")) { 207 + parent_name = "main"; 238 208 mult = config->pll1_mult / 2; 239 209 } else if (!strcmp(name, "pll3")) { 210 + parent_name = "main"; 240 211 mult = config->pll3_mult; 241 212 } else if (!strcmp(name, "lb")) { 213 + parent_name = "pll1_div2"; 242 214 div = cpg_mode & BIT(18) ? 36 : 24; 243 215 } else if (!strcmp(name, "qspi")) { 216 + parent_name = "pll1_div2"; 244 217 div = (cpg_mode & (BIT(3) | BIT(2) | BIT(1))) == BIT(2) 245 - ? 16 : 20; 218 + ? 8 : 10; 246 219 } else if (!strcmp(name, "sdh")) { 220 + parent_name = "pll1_div2"; 247 221 table = cpg_sdh_div_table; 248 222 shift = 8; 249 223 } else if (!strcmp(name, "sd0")) { 224 + parent_name = "pll1_div2"; 250 225 table = cpg_sd01_div_table; 251 226 shift = 4; 252 227 } else if (!strcmp(name, "sd1")) { 228 + parent_name = "pll1_div2"; 253 229 table = cpg_sd01_div_table; 254 230 shift = 0; 255 231 } else if (!strcmp(name, "z")) {
+1 -1
drivers/clk/tegra/clk-divider.c
··· 59 59 return 0; 60 60 61 61 if (divider_ux1 > get_max_div(divider)) 62 - return -EINVAL; 62 + return get_max_div(divider); 63 63 64 64 return divider_ux1; 65 65 }
+4
drivers/clk/tegra/clk-id.h
··· 180 180 tegra_clk_sbc6_8, 181 181 tegra_clk_sclk, 182 182 tegra_clk_sdmmc1, 183 + tegra_clk_sdmmc1_8, 183 184 tegra_clk_sdmmc2, 185 + tegra_clk_sdmmc2_8, 184 186 tegra_clk_sdmmc3, 187 + tegra_clk_sdmmc3_8, 185 188 tegra_clk_sdmmc4, 189 + tegra_clk_sdmmc4_8, 186 190 tegra_clk_se, 187 191 tegra_clk_soc_therm, 188 192 tegra_clk_sor0,
+6 -4
drivers/clk/tegra/clk-tegra-periph.c
··· 371 371 static const char *mux_pllm_pllc_pllp_plla_pllc2_c3_clkm[] = { 372 372 "pll_m", "pll_c", "pll_p", "pll_a", "pll_c2", "pll_c3", "clk_m" 373 373 }; 374 - static u32 mux_pllm_pllc_pllp_plla_pllc2_c3_clkm_idx[] = { 375 - [0] = 0, [1] = 1, [2] = 2, [3] = 3, [4] = 4, [5] = 6, 376 - }; 374 + #define mux_pllm_pllc_pllp_plla_pllc2_c3_clkm_idx NULL 377 375 378 376 static const char *mux_pllm_pllc2_c_c3_pllp_plla_pllc4[] = { 379 377 "pll_m", "pll_c2", "pll_c", "pll_c3", "pll_p", "pll_a_out0", "pll_c4", ··· 463 465 MUX("adx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_ADX1, 180, TEGRA_PERIPH_ON_APB, tegra_clk_adx1), 464 466 MUX("amx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_AMX1, 185, TEGRA_PERIPH_ON_APB, tegra_clk_amx1), 465 467 MUX("vi_sensor2", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR2, 20, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor2), 468 + MUX8("sdmmc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC1, 14, 0, tegra_clk_sdmmc1_8), 469 + MUX8("sdmmc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC2, 9, 0, tegra_clk_sdmmc2_8), 470 + MUX8("sdmmc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC3, 69, 0, tegra_clk_sdmmc3_8), 471 + MUX8("sdmmc4", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC4, 15, 0, tegra_clk_sdmmc4_8), 466 472 MUX8("sbc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC1, 41, TEGRA_PERIPH_ON_APB, tegra_clk_sbc1_8), 467 473 MUX8("sbc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC2, 44, TEGRA_PERIPH_ON_APB, tegra_clk_sbc2_8), 468 474 MUX8("sbc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC3, 46, TEGRA_PERIPH_ON_APB, tegra_clk_sbc3_8), ··· 494 492 UART("uartb", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTB, 7, tegra_clk_uartb), 495 493 UART("uartc", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTC, 55, tegra_clk_uartc), 496 494 UART("uartd", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTD, 65, tegra_clk_uartd), 497 - UART("uarte", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTE, 65, tegra_clk_uarte), 495 + UART("uarte", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTE, 66, tegra_clk_uarte), 498 496 XUSB("xusb_host_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_HOST_SRC, 143, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_host_src), 499 497 XUSB("xusb_falcon_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_FALCON_SRC, 143, TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_falcon_src), 500 498 XUSB("xusb_fs_src", mux_clkm_48M_pllp_480M, CLK_SOURCE_XUSB_FS_SRC, 143, TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_fs_src),
+1 -1
drivers/clk/tegra/clk-tegra-super-gen4.c
··· 120 120 ARRAY_SIZE(cclk_lp_parents), 121 121 CLK_SET_RATE_PARENT, 122 122 clk_base + CCLKLP_BURST_POLICY, 123 - 0, 4, 8, 9, NULL); 123 + TEGRA_DIVIDER_2, 4, 8, 9, NULL); 124 124 *dt_clk = clk; 125 125 } 126 126
+4 -4
drivers/clk/tegra/clk-tegra114.c
··· 682 682 [tegra_clk_timer] = { .dt_id = TEGRA114_CLK_TIMER, .present = true }, 683 683 [tegra_clk_uarta] = { .dt_id = TEGRA114_CLK_UARTA, .present = true }, 684 684 [tegra_clk_uartd] = { .dt_id = TEGRA114_CLK_UARTD, .present = true }, 685 - [tegra_clk_sdmmc2] = { .dt_id = TEGRA114_CLK_SDMMC2, .present = true }, 685 + [tegra_clk_sdmmc2_8] = { .dt_id = TEGRA114_CLK_SDMMC2, .present = true }, 686 686 [tegra_clk_i2s1] = { .dt_id = TEGRA114_CLK_I2S1, .present = true }, 687 687 [tegra_clk_i2c1] = { .dt_id = TEGRA114_CLK_I2C1, .present = true }, 688 688 [tegra_clk_ndflash] = { .dt_id = TEGRA114_CLK_NDFLASH, .present = true }, 689 - [tegra_clk_sdmmc1] = { .dt_id = TEGRA114_CLK_SDMMC1, .present = true }, 690 - [tegra_clk_sdmmc4] = { .dt_id = TEGRA114_CLK_SDMMC4, .present = true }, 689 + [tegra_clk_sdmmc1_8] = { .dt_id = TEGRA114_CLK_SDMMC1, .present = true }, 690 + [tegra_clk_sdmmc4_8] = { .dt_id = TEGRA114_CLK_SDMMC4, .present = true }, 691 691 [tegra_clk_pwm] = { .dt_id = TEGRA114_CLK_PWM, .present = true }, 692 692 [tegra_clk_i2s0] = { .dt_id = TEGRA114_CLK_I2S0, .present = true }, 693 693 [tegra_clk_i2s2] = { .dt_id = TEGRA114_CLK_I2S2, .present = true }, ··· 723 723 [tegra_clk_bsev] = { .dt_id = TEGRA114_CLK_BSEV, .present = true }, 724 724 [tegra_clk_i2c3] = { .dt_id = TEGRA114_CLK_I2C3, .present = true }, 725 725 [tegra_clk_sbc4_8] = { .dt_id = TEGRA114_CLK_SBC4, .present = true }, 726 - [tegra_clk_sdmmc3] = { .dt_id = TEGRA114_CLK_SDMMC3, .present = true }, 726 + [tegra_clk_sdmmc3_8] = { .dt_id = TEGRA114_CLK_SDMMC3, .present = true }, 727 727 [tegra_clk_owr] = { .dt_id = TEGRA114_CLK_OWR, .present = true }, 728 728 [tegra_clk_csite] = { .dt_id = TEGRA114_CLK_CSITE, .present = true }, 729 729 [tegra_clk_la] = { .dt_id = TEGRA114_CLK_LA, .present = true },
+27 -21
drivers/clk/tegra/clk-tegra124.c
··· 516 516 }; 517 517 518 518 static struct tegra_clk_pll_freq_table pll_p_freq_table[] = { 519 - {12000000, 216000000, 432, 12, 1, 8}, 520 - {13000000, 216000000, 432, 13, 1, 8}, 521 - {16800000, 216000000, 360, 14, 1, 8}, 522 - {19200000, 216000000, 360, 16, 1, 8}, 523 - {26000000, 216000000, 432, 26, 1, 8}, 519 + {12000000, 408000000, 408, 12, 0, 8}, 520 + {13000000, 408000000, 408, 13, 0, 8}, 521 + {16800000, 408000000, 340, 14, 0, 8}, 522 + {19200000, 408000000, 340, 16, 0, 8}, 523 + {26000000, 408000000, 408, 26, 0, 8}, 524 524 {0, 0, 0, 0, 0, 0}, 525 525 }; 526 526 ··· 570 570 .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_USE_LOCK, 571 571 }; 572 572 573 + static struct div_nmp plld_nmp = { 574 + .divm_shift = 0, 575 + .divm_width = 5, 576 + .divn_shift = 8, 577 + .divn_width = 11, 578 + .divp_shift = 20, 579 + .divp_width = 3, 580 + }; 581 + 573 582 static struct tegra_clk_pll_freq_table pll_d_freq_table[] = { 574 583 {12000000, 216000000, 864, 12, 4, 12}, 575 584 {13000000, 216000000, 864, 13, 4, 12}, ··· 612 603 .lock_mask = PLL_BASE_LOCK, 613 604 .lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE, 614 605 .lock_delay = 1000, 615 - .div_nmp = &pllp_nmp, 606 + .div_nmp = &plld_nmp, 616 607 .freq_table = pll_d_freq_table, 617 608 .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON | 618 609 TEGRA_PLL_USE_LOCK, 619 610 }; 620 611 621 612 static struct tegra_clk_pll_freq_table tegra124_pll_d2_freq_table[] = { 622 - { 12000000, 148500000, 99, 1, 8}, 623 - { 12000000, 594000000, 99, 1, 1}, 624 - { 13000000, 594000000, 91, 1, 1}, /* actual: 591.5 MHz */ 625 - { 16800000, 594000000, 71, 1, 1}, /* actual: 596.4 MHz */ 626 - { 19200000, 594000000, 62, 1, 1}, /* actual: 595.2 MHz */ 627 - { 26000000, 594000000, 91, 2, 1}, /* actual: 591.5 MHz */ 613 + { 12000000, 594000000, 99, 1, 2}, 614 + { 13000000, 594000000, 91, 1, 2}, /* actual: 591.5 MHz */ 615 + { 16800000, 594000000, 71, 1, 2}, /* actual: 596.4 MHz */ 616 + { 19200000, 594000000, 62, 1, 2}, /* actual: 595.2 MHz */ 617 + { 26000000, 594000000, 91, 2, 2}, /* actual: 591.5 MHz */ 628 618 { 0, 0, 0, 0, 0, 0 }, 629 619 }; 630 620 ··· 761 753 [tegra_clk_rtc] = { .dt_id = TEGRA124_CLK_RTC, .present = true }, 762 754 [tegra_clk_timer] = { .dt_id = TEGRA124_CLK_TIMER, .present = true }, 763 755 [tegra_clk_uarta] = { .dt_id = TEGRA124_CLK_UARTA, .present = true }, 764 - [tegra_clk_sdmmc2] = { .dt_id = TEGRA124_CLK_SDMMC2, .present = true }, 756 + [tegra_clk_sdmmc2_8] = { .dt_id = TEGRA124_CLK_SDMMC2, .present = true }, 765 757 [tegra_clk_i2s1] = { .dt_id = TEGRA124_CLK_I2S1, .present = true }, 766 758 [tegra_clk_i2c1] = { .dt_id = TEGRA124_CLK_I2C1, .present = true }, 767 759 [tegra_clk_ndflash] = { .dt_id = TEGRA124_CLK_NDFLASH, .present = true }, 768 - [tegra_clk_sdmmc1] = { .dt_id = TEGRA124_CLK_SDMMC1, .present = true }, 769 - [tegra_clk_sdmmc4] = { .dt_id = TEGRA124_CLK_SDMMC4, .present = true }, 760 + [tegra_clk_sdmmc1_8] = { .dt_id = TEGRA124_CLK_SDMMC1, .present = true }, 761 + [tegra_clk_sdmmc4_8] = { .dt_id = TEGRA124_CLK_SDMMC4, .present = true }, 770 762 [tegra_clk_pwm] = { .dt_id = TEGRA124_CLK_PWM, .present = true }, 771 763 [tegra_clk_i2s2] = { .dt_id = TEGRA124_CLK_I2S2, .present = true }, 772 - [tegra_clk_gr2d] = { .dt_id = TEGRA124_CLK_GR_2D, .present = true }, 773 764 [tegra_clk_usbd] = { .dt_id = TEGRA124_CLK_USBD, .present = true }, 774 765 [tegra_clk_isp_8] = { .dt_id = TEGRA124_CLK_ISP, .present = true }, 775 - [tegra_clk_gr3d] = { .dt_id = TEGRA124_CLK_GR_3D, .present = true }, 776 766 [tegra_clk_disp2] = { .dt_id = TEGRA124_CLK_DISP2, .present = true }, 777 767 [tegra_clk_disp1] = { .dt_id = TEGRA124_CLK_DISP1, .present = true }, 778 - [tegra_clk_host1x] = { .dt_id = TEGRA124_CLK_HOST1X, .present = true }, 768 + [tegra_clk_host1x_8] = { .dt_id = TEGRA124_CLK_HOST1X, .present = true }, 779 769 [tegra_clk_vcp] = { .dt_id = TEGRA124_CLK_VCP, .present = true }, 780 770 [tegra_clk_i2s0] = { .dt_id = TEGRA124_CLK_I2S0, .present = true }, 781 771 [tegra_clk_apbdma] = { .dt_id = TEGRA124_CLK_APBDMA, .present = true }, ··· 800 794 [tegra_clk_uartd] = { .dt_id = TEGRA124_CLK_UARTD, .present = true }, 801 795 [tegra_clk_i2c3] = { .dt_id = TEGRA124_CLK_I2C3, .present = true }, 802 796 [tegra_clk_sbc4] = { .dt_id = TEGRA124_CLK_SBC4, .present = true }, 803 - [tegra_clk_sdmmc3] = { .dt_id = TEGRA124_CLK_SDMMC3, .present = true }, 797 + [tegra_clk_sdmmc3_8] = { .dt_id = TEGRA124_CLK_SDMMC3, .present = true }, 804 798 [tegra_clk_pcie] = { .dt_id = TEGRA124_CLK_PCIE, .present = true }, 805 799 [tegra_clk_owr] = { .dt_id = TEGRA124_CLK_OWR, .present = true }, 806 800 [tegra_clk_afi] = { .dt_id = TEGRA124_CLK_AFI, .present = true }, ··· 1292 1286 clk_register_clkdev(clk, "pll_d2", NULL); 1293 1287 clks[TEGRA124_CLK_PLL_D2] = clk; 1294 1288 1295 - /* PLLD2_OUT0 ?? */ 1289 + /* PLLD2_OUT0 */ 1296 1290 clk = clk_register_fixed_factor(NULL, "pll_d2_out0", "pll_d2", 1297 - CLK_SET_RATE_PARENT, 1, 2); 1291 + CLK_SET_RATE_PARENT, 1, 1); 1298 1292 clk_register_clkdev(clk, "pll_d2_out0", NULL); 1299 1293 clks[TEGRA124_CLK_PLL_D2_OUT0] = clk; 1300 1294
+2
drivers/clk/tegra/clk-tegra20.c
··· 574 574 [tegra_clk_tvdac] = { .dt_id = TEGRA20_CLK_TVDAC, .present = true }, 575 575 [tegra_clk_vi_sensor] = { .dt_id = TEGRA20_CLK_VI_SENSOR, .present = true }, 576 576 [tegra_clk_afi] = { .dt_id = TEGRA20_CLK_AFI, .present = true }, 577 + [tegra_clk_fuse] = { .dt_id = TEGRA20_CLK_FUSE, .present = true }, 578 + [tegra_clk_kfuse] = { .dt_id = TEGRA20_CLK_KFUSE, .present = true }, 577 579 }; 578 580 579 581 static unsigned long tegra20_clk_measure_input_freq(void)
+24 -30
drivers/cpufreq/cpufreq.c
··· 1109 1109 goto err_set_policy_cpu; 1110 1110 } 1111 1111 1112 + /* related cpus should atleast have policy->cpus */ 1113 + cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); 1114 + 1115 + /* 1116 + * affected cpus must always be the one, which are online. We aren't 1117 + * managing offline cpus here. 1118 + */ 1119 + cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); 1120 + 1121 + if (!frozen) { 1122 + policy->user_policy.min = policy->min; 1123 + policy->user_policy.max = policy->max; 1124 + } 1125 + 1126 + down_write(&policy->rwsem); 1112 1127 write_lock_irqsave(&cpufreq_driver_lock, flags); 1113 1128 for_each_cpu(j, policy->cpus) 1114 1129 per_cpu(cpufreq_cpu_data, j) = policy; ··· 1177 1162 } 1178 1163 } 1179 1164 1180 - /* related cpus should atleast have policy->cpus */ 1181 - cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); 1182 - 1183 - /* 1184 - * affected cpus must always be the one, which are online. We aren't 1185 - * managing offline cpus here. 1186 - */ 1187 - cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); 1188 - 1189 - if (!frozen) { 1190 - policy->user_policy.min = policy->min; 1191 - policy->user_policy.max = policy->max; 1192 - } 1193 - 1194 1165 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1195 1166 CPUFREQ_START, policy); 1196 1167 ··· 1207 1206 policy->user_policy.policy = policy->policy; 1208 1207 policy->user_policy.governor = policy->governor; 1209 1208 } 1209 + up_write(&policy->rwsem); 1210 1210 1211 1211 kobject_uevent(&policy->kobj, KOBJ_ADD); 1212 1212 up_read(&cpufreq_rwsem); ··· 1325 1323 up_read(&policy->rwsem); 1326 1324 1327 1325 if (cpu != policy->cpu) { 1328 - if (!frozen) 1329 - sysfs_remove_link(&dev->kobj, "cpufreq"); 1326 + sysfs_remove_link(&dev->kobj, "cpufreq"); 1330 1327 } else if (cpus > 1) { 1331 1328 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu); 1332 1329 if (new_cpu >= 0) { ··· 1548 1547 */ 1549 1548 unsigned int cpufreq_get(unsigned int cpu) 1550 1549 { 1551 - struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 1550 + struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 1552 1551 unsigned int ret_freq = 0; 1553 1552 1554 - if (cpufreq_disabled() || !cpufreq_driver) 1555 - return -ENOENT; 1553 + if (policy) { 1554 + down_read(&policy->rwsem); 1555 + ret_freq = __cpufreq_get(cpu); 1556 + up_read(&policy->rwsem); 1556 1557 1557 - BUG_ON(!policy); 1558 - 1559 - if (!down_read_trylock(&cpufreq_rwsem)) 1560 - return 0; 1561 - 1562 - down_read(&policy->rwsem); 1563 - 1564 - ret_freq = __cpufreq_get(cpu); 1565 - 1566 - up_read(&policy->rwsem); 1567 - up_read(&cpufreq_rwsem); 1558 + cpufreq_cpu_put(policy); 1559 + } 1568 1560 1569 1561 return ret_freq; 1570 1562 }
+31 -14
drivers/cpufreq/intel_pstate.c
··· 34 34 35 35 #define SAMPLE_COUNT 3 36 36 37 - #define BYT_RATIOS 0x66a 38 - #define BYT_VIDS 0x66b 37 + #define BYT_RATIOS 0x66a 38 + #define BYT_VIDS 0x66b 39 + #define BYT_TURBO_RATIOS 0x66c 39 40 40 - #define FRAC_BITS 8 41 + 42 + #define FRAC_BITS 6 41 43 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 42 44 #define fp_toint(X) ((X) >> FRAC_BITS) 45 + #define FP_ROUNDUP(X) ((X) += 1 << FRAC_BITS) 43 46 44 47 static inline int32_t mul_fp(int32_t x, int32_t y) 45 48 { ··· 360 357 { 361 358 u64 value; 362 359 rdmsrl(BYT_RATIOS, value); 363 - return value & 0xFF; 360 + return (value >> 8) & 0xFF; 364 361 } 365 362 366 363 static int byt_get_max_pstate(void) ··· 368 365 u64 value; 369 366 rdmsrl(BYT_RATIOS, value); 370 367 return (value >> 16) & 0xFF; 368 + } 369 + 370 + static int byt_get_turbo_pstate(void) 371 + { 372 + u64 value; 373 + rdmsrl(BYT_TURBO_RATIOS, value); 374 + return value & 0x3F; 371 375 } 372 376 373 377 static void byt_set_pstate(struct cpudata *cpudata, int pstate) ··· 479 469 .funcs = { 480 470 .get_max = byt_get_max_pstate, 481 471 .get_min = byt_get_min_pstate, 482 - .get_turbo = byt_get_max_pstate, 472 + .get_turbo = byt_get_turbo_pstate, 483 473 .set = byt_set_pstate, 484 474 .get_vid = byt_get_vid, 485 475 }, ··· 557 547 static inline void intel_pstate_calc_busy(struct cpudata *cpu, 558 548 struct sample *sample) 559 549 { 560 - u64 core_pct; 561 - u64 c0_pct; 550 + int32_t core_pct; 551 + int32_t c0_pct; 562 552 563 - core_pct = div64_u64(sample->aperf * 100, sample->mperf); 553 + core_pct = div_fp(int_tofp((sample->aperf)), 554 + int_tofp((sample->mperf))); 555 + core_pct = mul_fp(core_pct, int_tofp(100)); 556 + FP_ROUNDUP(core_pct); 564 557 565 - c0_pct = div64_u64(sample->mperf * 100, sample->tsc); 558 + c0_pct = div_fp(int_tofp(sample->mperf), int_tofp(sample->tsc)); 559 + 566 560 sample->freq = fp_toint( 567 - mul_fp(int_tofp(cpu->pstate.max_pstate), 568 - int_tofp(core_pct * 1000))); 561 + mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct)); 569 562 570 - sample->core_pct_busy = mul_fp(int_tofp(core_pct), 571 - div_fp(int_tofp(c0_pct + 1), int_tofp(100))); 563 + sample->core_pct_busy = mul_fp(core_pct, c0_pct); 572 564 } 573 565 574 566 static inline void intel_pstate_sample(struct cpudata *cpu) ··· 581 569 rdmsrl(MSR_IA32_APERF, aperf); 582 570 rdmsrl(MSR_IA32_MPERF, mperf); 583 571 tsc = native_read_tsc(); 572 + 573 + aperf = aperf >> FRAC_BITS; 574 + mperf = mperf >> FRAC_BITS; 575 + tsc = tsc >> FRAC_BITS; 584 576 585 577 cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT; 586 578 cpu->samples[cpu->sample_ptr].aperf = aperf; ··· 617 601 core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy; 618 602 max_pstate = int_tofp(cpu->pstate.max_pstate); 619 603 current_pstate = int_tofp(cpu->pstate.current_pstate); 620 - return mul_fp(core_busy, div_fp(max_pstate, current_pstate)); 604 + core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); 605 + return FP_ROUNDUP(core_busy); 621 606 } 622 607 623 608 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
+7 -3
drivers/cpufreq/powernow-k8.c
··· 1076 1076 { 1077 1077 struct powernow_k8_data *data; 1078 1078 struct init_on_cpu init_on_cpu; 1079 - int rc; 1079 + int rc, cpu; 1080 1080 1081 1081 smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1); 1082 1082 if (rc) ··· 1140 1140 pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n", 1141 1141 data->currfid, data->currvid); 1142 1142 1143 - per_cpu(powernow_data, pol->cpu) = data; 1143 + /* Point all the CPUs in this policy to the same data */ 1144 + for_each_cpu(cpu, pol->cpus) 1145 + per_cpu(powernow_data, cpu) = data; 1144 1146 1145 1147 return 0; 1146 1148 ··· 1157 1155 static int powernowk8_cpu_exit(struct cpufreq_policy *pol) 1158 1156 { 1159 1157 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); 1158 + int cpu; 1160 1159 1161 1160 if (!data) 1162 1161 return -EINVAL; ··· 1168 1165 1169 1166 kfree(data->powernow_table); 1170 1167 kfree(data); 1171 - per_cpu(powernow_data, pol->cpu) = NULL; 1168 + for_each_cpu(cpu, pol->cpus) 1169 + per_cpu(powernow_data, cpu) = NULL; 1172 1170 1173 1171 return 0; 1174 1172 }
+1
drivers/dma/imx-sdma.c
··· 449 449 { .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, }, 450 450 { .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, }, 451 451 { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, }, 452 + { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, }, 452 453 { /* sentinel */ } 453 454 }; 454 455 MODULE_DEVICE_TABLE(of, sdma_dt_ids);
+45 -7
drivers/dma/ioat/dma.c
··· 77 77 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); 78 78 for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) { 79 79 chan = ioat_chan_by_index(instance, bit); 80 - tasklet_schedule(&chan->cleanup_task); 80 + if (test_bit(IOAT_RUN, &chan->state)) 81 + tasklet_schedule(&chan->cleanup_task); 81 82 } 82 83 83 84 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); ··· 94 93 { 95 94 struct ioat_chan_common *chan = data; 96 95 97 - tasklet_schedule(&chan->cleanup_task); 96 + if (test_bit(IOAT_RUN, &chan->state)) 97 + tasklet_schedule(&chan->cleanup_task); 98 98 99 99 return IRQ_HANDLED; 100 100 } ··· 118 116 chan->timer.function = device->timer_fn; 119 117 chan->timer.data = data; 120 118 tasklet_init(&chan->cleanup_task, device->cleanup_fn, data); 121 - tasklet_disable(&chan->cleanup_task); 122 119 } 123 120 124 121 /** ··· 355 354 writel(((u64) chan->completion_dma) >> 32, 356 355 chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); 357 356 358 - tasklet_enable(&chan->cleanup_task); 357 + set_bit(IOAT_RUN, &chan->state); 359 358 ioat1_dma_start_null_desc(ioat); /* give chain to dma device */ 360 359 dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n", 361 360 __func__, ioat->desccount); 362 361 return ioat->desccount; 362 + } 363 + 364 + void ioat_stop(struct ioat_chan_common *chan) 365 + { 366 + struct ioatdma_device *device = chan->device; 367 + struct pci_dev *pdev = device->pdev; 368 + int chan_id = chan_num(chan); 369 + struct msix_entry *msix; 370 + 371 + /* 1/ stop irq from firing tasklets 372 + * 2/ stop the tasklet from re-arming irqs 373 + */ 374 + clear_bit(IOAT_RUN, &chan->state); 375 + 376 + /* flush inflight interrupts */ 377 + switch (device->irq_mode) { 378 + case IOAT_MSIX: 379 + msix = &device->msix_entries[chan_id]; 380 + synchronize_irq(msix->vector); 381 + break; 382 + case IOAT_MSI: 383 + case IOAT_INTX: 384 + synchronize_irq(pdev->irq); 385 + break; 386 + default: 387 + break; 388 + } 389 + 390 + /* flush inflight timers */ 391 + del_timer_sync(&chan->timer); 392 + 393 + /* flush inflight tasklet runs */ 394 + tasklet_kill(&chan->cleanup_task); 395 + 396 + /* final cleanup now that everything is quiesced and can't re-arm */ 397 + device->cleanup_fn((unsigned long) &chan->common); 363 398 } 364 399 365 400 /** ··· 416 379 if (ioat->desccount == 0) 417 380 return; 418 381 419 - tasklet_disable(&chan->cleanup_task); 420 - del_timer_sync(&chan->timer); 421 - ioat1_cleanup(ioat); 382 + ioat_stop(chan); 422 383 423 384 /* Delay 100ms after reset to allow internal DMA logic to quiesce 424 385 * before removing DMA descriptor resources. ··· 561 526 static void ioat1_cleanup_event(unsigned long data) 562 527 { 563 528 struct ioat_dma_chan *ioat = to_ioat_chan((void *) data); 529 + struct ioat_chan_common *chan = &ioat->base; 564 530 565 531 ioat1_cleanup(ioat); 532 + if (!test_bit(IOAT_RUN, &chan->state)) 533 + return; 566 534 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); 567 535 } 568 536
+1
drivers/dma/ioat/dma.h
··· 356 356 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); 357 357 void ioat_kobject_del(struct ioatdma_device *device); 358 358 int ioat_dma_setup_interrupts(struct ioatdma_device *device); 359 + void ioat_stop(struct ioat_chan_common *chan); 359 360 extern const struct sysfs_ops ioat_sysfs_ops; 360 361 extern struct ioat_sysfs_entry ioat_version_attr; 361 362 extern struct ioat_sysfs_entry ioat_cap_attr;
+5 -6
drivers/dma/ioat/dma_v2.c
··· 190 190 void ioat2_cleanup_event(unsigned long data) 191 191 { 192 192 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); 193 + struct ioat_chan_common *chan = &ioat->base; 193 194 194 195 ioat2_cleanup(ioat); 196 + if (!test_bit(IOAT_RUN, &chan->state)) 197 + return; 195 198 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); 196 199 } 197 200 ··· 556 553 ioat->issued = 0; 557 554 ioat->tail = 0; 558 555 ioat->alloc_order = order; 556 + set_bit(IOAT_RUN, &chan->state); 559 557 spin_unlock_bh(&ioat->prep_lock); 560 558 spin_unlock_bh(&chan->cleanup_lock); 561 559 562 - tasklet_enable(&chan->cleanup_task); 563 560 ioat2_start_null_desc(ioat); 564 561 565 562 /* check that we got off the ground */ ··· 569 566 } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status)); 570 567 571 568 if (is_ioat_active(status) || is_ioat_idle(status)) { 572 - set_bit(IOAT_RUN, &chan->state); 573 569 return 1 << ioat->alloc_order; 574 570 } else { 575 571 u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); ··· 811 809 if (!ioat->ring) 812 810 return; 813 811 814 - tasklet_disable(&chan->cleanup_task); 815 - del_timer_sync(&chan->timer); 816 - device->cleanup_fn((unsigned long) c); 812 + ioat_stop(chan); 817 813 device->reset_hw(chan); 818 - clear_bit(IOAT_RUN, &chan->state); 819 814 820 815 spin_lock_bh(&chan->cleanup_lock); 821 816 spin_lock_bh(&ioat->prep_lock);
+3
drivers/dma/ioat/dma_v3.c
··· 464 464 static void ioat3_cleanup_event(unsigned long data) 465 465 { 466 466 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); 467 + struct ioat_chan_common *chan = &ioat->base; 467 468 468 469 ioat3_cleanup(ioat); 470 + if (!test_bit(IOAT_RUN, &chan->state)) 471 + return; 469 472 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); 470 473 } 471 474
+3 -1
drivers/dma/ste_dma40.c
··· 1641 1641 struct d40_chan *d40c = (struct d40_chan *) data; 1642 1642 struct d40_desc *d40d; 1643 1643 unsigned long flags; 1644 + bool callback_active; 1644 1645 dma_async_tx_callback callback; 1645 1646 void *callback_param; 1646 1647 ··· 1669 1668 } 1670 1669 1671 1670 /* Callback to client */ 1671 + callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT); 1672 1672 callback = d40d->txd.callback; 1673 1673 callback_param = d40d->txd.callback_param; 1674 1674 ··· 1692 1690 1693 1691 spin_unlock_irqrestore(&d40c->lock, flags); 1694 1692 1695 - if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT)) 1693 + if (callback_active && callback) 1696 1694 callback(callback_param); 1697 1695 1698 1696 return;
+20 -18
drivers/edac/i7300_edac.c
··· 943 943 944 944 /* Attempt to 'get' the MCH register we want */ 945 945 pdev = NULL; 946 - while (!pvt->pci_dev_16_1_fsb_addr_map || 947 - !pvt->pci_dev_16_2_fsb_err_regs) { 948 - pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 949 - PCI_DEVICE_ID_INTEL_I7300_MCH_ERR, pdev); 950 - if (!pdev) { 951 - /* End of list, leave */ 952 - i7300_printk(KERN_ERR, 953 - "'system address,Process Bus' " 954 - "device not found:" 955 - "vendor 0x%x device 0x%x ERR funcs " 956 - "(broken BIOS?)\n", 957 - PCI_VENDOR_ID_INTEL, 958 - PCI_DEVICE_ID_INTEL_I7300_MCH_ERR); 959 - goto error; 960 - } 961 - 946 + while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 947 + PCI_DEVICE_ID_INTEL_I7300_MCH_ERR, 948 + pdev))) { 962 949 /* Store device 16 funcs 1 and 2 */ 963 950 switch (PCI_FUNC(pdev->devfn)) { 964 951 case 1: 965 - pvt->pci_dev_16_1_fsb_addr_map = pdev; 952 + if (!pvt->pci_dev_16_1_fsb_addr_map) 953 + pvt->pci_dev_16_1_fsb_addr_map = 954 + pci_dev_get(pdev); 966 955 break; 967 956 case 2: 968 - pvt->pci_dev_16_2_fsb_err_regs = pdev; 957 + if (!pvt->pci_dev_16_2_fsb_err_regs) 958 + pvt->pci_dev_16_2_fsb_err_regs = 959 + pci_dev_get(pdev); 969 960 break; 970 961 } 962 + } 963 + 964 + if (!pvt->pci_dev_16_1_fsb_addr_map || 965 + !pvt->pci_dev_16_2_fsb_err_regs) { 966 + /* At least one device was not found */ 967 + i7300_printk(KERN_ERR, 968 + "'system address,Process Bus' device not found:" 969 + "vendor 0x%x device 0x%x ERR funcs (broken BIOS?)\n", 970 + PCI_VENDOR_ID_INTEL, 971 + PCI_DEVICE_ID_INTEL_I7300_MCH_ERR); 972 + goto error; 971 973 } 972 974 973 975 edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n",
+7 -2
drivers/edac/i7core_edac.c
··· 1334 1334 * is at addr 8086:2c40, instead of 8086:2c41. So, we need 1335 1335 * to probe for the alternate address in case of failure 1336 1336 */ 1337 - if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev) 1337 + if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev) { 1338 + pci_dev_get(*prev); /* pci_get_device will put it */ 1338 1339 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 1339 1340 PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev); 1341 + } 1340 1342 1341 - if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev) 1343 + if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && 1344 + !pdev) { 1345 + pci_dev_get(*prev); /* pci_get_device will put it */ 1342 1346 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 1343 1347 PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT, 1344 1348 *prev); 1349 + } 1345 1350 1346 1351 if (!pdev) { 1347 1352 if (*prev) {
-12
drivers/extcon/extcon-arizona.c
··· 222 222 struct snd_soc_dapm_context *dapm = arizona->dapm; 223 223 int ret; 224 224 225 - mutex_lock(&dapm->card->dapm_mutex); 226 - 227 225 ret = snd_soc_dapm_force_enable_pin(dapm, widget); 228 226 if (ret != 0) 229 227 dev_warn(arizona->dev, "Failed to enable %s: %d\n", 230 228 widget, ret); 231 229 232 - mutex_unlock(&dapm->card->dapm_mutex); 233 - 234 230 snd_soc_dapm_sync(dapm); 235 231 236 232 if (!arizona->pdata.micd_force_micbias) { 237 - mutex_lock(&dapm->card->dapm_mutex); 238 - 239 233 ret = snd_soc_dapm_disable_pin(arizona->dapm, widget); 240 234 if (ret != 0) 241 235 dev_warn(arizona->dev, "Failed to disable %s: %d\n", 242 236 widget, ret); 243 - 244 - mutex_unlock(&dapm->card->dapm_mutex); 245 237 246 238 snd_soc_dapm_sync(dapm); 247 239 } ··· 296 304 ARIZONA_MICD_ENA, 0, 297 305 &change); 298 306 299 - mutex_lock(&dapm->card->dapm_mutex); 300 - 301 307 ret = snd_soc_dapm_disable_pin(dapm, widget); 302 308 if (ret != 0) 303 309 dev_warn(arizona->dev, 304 310 "Failed to disable %s: %d\n", 305 311 widget, ret); 306 - 307 - mutex_unlock(&dapm->card->dapm_mutex); 308 312 309 313 snd_soc_dapm_sync(dapm); 310 314
+15 -7
drivers/firewire/core-device.c
··· 916 916 old->config_rom_retries = 0; 917 917 fw_notice(card, "rediscovered device %s\n", dev_name(dev)); 918 918 919 - PREPARE_DELAYED_WORK(&old->work, fw_device_update); 919 + old->workfn = fw_device_update; 920 920 fw_schedule_device_work(old, 0); 921 921 922 922 if (current_node == card->root_node) ··· 1075 1075 if (atomic_cmpxchg(&device->state, 1076 1076 FW_DEVICE_INITIALIZING, 1077 1077 FW_DEVICE_RUNNING) == FW_DEVICE_GONE) { 1078 - PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); 1078 + device->workfn = fw_device_shutdown; 1079 1079 fw_schedule_device_work(device, SHUTDOWN_DELAY); 1080 1080 } else { 1081 1081 fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n", ··· 1196 1196 dev_name(&device->device), fw_rcode_string(ret)); 1197 1197 gone: 1198 1198 atomic_set(&device->state, FW_DEVICE_GONE); 1199 - PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); 1199 + device->workfn = fw_device_shutdown; 1200 1200 fw_schedule_device_work(device, SHUTDOWN_DELAY); 1201 1201 out: 1202 1202 if (node_id == card->root_node->node_id) 1203 1203 fw_schedule_bm_work(card, 0); 1204 + } 1205 + 1206 + static void fw_device_workfn(struct work_struct *work) 1207 + { 1208 + struct fw_device *device = container_of(to_delayed_work(work), 1209 + struct fw_device, work); 1210 + device->workfn(work); 1204 1211 } 1205 1212 1206 1213 void fw_node_event(struct fw_card *card, struct fw_node *node, int event) ··· 1259 1252 * power-up after getting plugged in. We schedule the 1260 1253 * first config rom scan half a second after bus reset. 1261 1254 */ 1262 - INIT_DELAYED_WORK(&device->work, fw_device_init); 1255 + device->workfn = fw_device_init; 1256 + INIT_DELAYED_WORK(&device->work, fw_device_workfn); 1263 1257 fw_schedule_device_work(device, INITIAL_DELAY); 1264 1258 break; 1265 1259 ··· 1276 1268 if (atomic_cmpxchg(&device->state, 1277 1269 FW_DEVICE_RUNNING, 1278 1270 FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) { 1279 - PREPARE_DELAYED_WORK(&device->work, fw_device_refresh); 1271 + device->workfn = fw_device_refresh; 1280 1272 fw_schedule_device_work(device, 1281 1273 device->is_local ? 0 : INITIAL_DELAY); 1282 1274 } ··· 1291 1283 smp_wmb(); /* update node_id before generation */ 1292 1284 device->generation = card->generation; 1293 1285 if (atomic_read(&device->state) == FW_DEVICE_RUNNING) { 1294 - PREPARE_DELAYED_WORK(&device->work, fw_device_update); 1286 + device->workfn = fw_device_update; 1295 1287 fw_schedule_device_work(device, 0); 1296 1288 } 1297 1289 break; ··· 1316 1308 device = node->data; 1317 1309 if (atomic_xchg(&device->state, 1318 1310 FW_DEVICE_GONE) == FW_DEVICE_RUNNING) { 1319 - PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); 1311 + device->workfn = fw_device_shutdown; 1320 1312 fw_schedule_device_work(device, 1321 1313 list_empty(&card->link) ? 0 : SHUTDOWN_DELAY); 1322 1314 }
+3 -3
drivers/firewire/net.c
··· 929 929 if (rcode == RCODE_COMPLETE) { 930 930 fwnet_transmit_packet_done(ptask); 931 931 } else { 932 - fwnet_transmit_packet_failed(ptask); 933 - 934 932 if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) { 935 933 dev_err(&ptask->dev->netdev->dev, 936 934 "fwnet_write_complete failed: %x (skipped %d)\n", ··· 936 938 937 939 errors_skipped = 0; 938 940 last_rcode = rcode; 939 - } else 941 + } else { 940 942 errors_skipped++; 943 + } 944 + fwnet_transmit_packet_failed(ptask); 941 945 } 942 946 } 943 947
+2 -13
drivers/firewire/ohci.c
··· 290 290 #define QUIRK_NO_MSI 0x10 291 291 #define QUIRK_TI_SLLZ059 0x20 292 292 #define QUIRK_IR_WAKE 0x40 293 - #define QUIRK_PHY_LCTRL_TIMEOUT 0x80 294 293 295 294 /* In case of multiple matches in ohci_quirks[], only the first one is used. */ 296 295 static const struct { ··· 302 303 QUIRK_BE_HEADERS}, 303 304 304 305 {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6, 305 - QUIRK_PHY_LCTRL_TIMEOUT | QUIRK_NO_MSI}, 306 - 307 - {PCI_VENDOR_ID_ATT, PCI_ANY_ID, PCI_ANY_ID, 308 - QUIRK_PHY_LCTRL_TIMEOUT}, 306 + QUIRK_NO_MSI}, 309 307 310 308 {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID, 311 309 QUIRK_RESET_PACKET}, ··· 349 353 ", disable MSI = " __stringify(QUIRK_NO_MSI) 350 354 ", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059) 351 355 ", IR wake unreliable = " __stringify(QUIRK_IR_WAKE) 352 - ", phy LCtrl timeout = " __stringify(QUIRK_PHY_LCTRL_TIMEOUT) 353 356 ")"); 354 357 355 358 #define OHCI_PARAM_DEBUG_AT_AR 1 ··· 2294 2299 * TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but 2295 2300 * cannot actually use the phy at that time. These need tens of 2296 2301 * millisecods pause between LPS write and first phy access too. 2297 - * 2298 - * But do not wait for 50msec on Agere/LSI cards. Their phy 2299 - * arbitration state machine may time out during such a long wait. 2300 2302 */ 2301 2303 2302 2304 reg_write(ohci, OHCI1394_HCControlSet, ··· 2301 2309 OHCI1394_HCControl_postedWriteEnable); 2302 2310 flush_writes(ohci); 2303 2311 2304 - if (!(ohci->quirks & QUIRK_PHY_LCTRL_TIMEOUT)) 2312 + for (lps = 0, i = 0; !lps && i < 3; i++) { 2305 2313 msleep(50); 2306 - 2307 - for (lps = 0, i = 0; !lps && i < 150; i++) { 2308 - msleep(1); 2309 2314 lps = reg_read(ohci, OHCI1394_HCControlSet) & 2310 2315 OHCI1394_HCControl_LPS; 2311 2316 }
+13 -4
drivers/firewire/sbp2.c
··· 146 146 */ 147 147 int generation; 148 148 int retries; 149 + work_func_t workfn; 149 150 struct delayed_work work; 150 151 bool has_sdev; 151 152 bool blocked; ··· 865 864 /* set appropriate retry limit(s) in BUSY_TIMEOUT register */ 866 865 sbp2_set_busy_timeout(lu); 867 866 868 - PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect); 867 + lu->workfn = sbp2_reconnect; 869 868 sbp2_agent_reset(lu); 870 869 871 870 /* This was a re-login. */ ··· 919 918 * If a bus reset happened, sbp2_update will have requeued 920 919 * lu->work already. Reset the work from reconnect to login. 921 920 */ 922 - PREPARE_DELAYED_WORK(&lu->work, sbp2_login); 921 + lu->workfn = sbp2_login; 923 922 } 924 923 925 924 static void sbp2_reconnect(struct work_struct *work) ··· 953 952 lu->retries++ >= 5) { 954 953 dev_err(tgt_dev(tgt), "failed to reconnect\n"); 955 954 lu->retries = 0; 956 - PREPARE_DELAYED_WORK(&lu->work, sbp2_login); 955 + lu->workfn = sbp2_login; 957 956 } 958 957 sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); 959 958 ··· 971 970 sbp2_agent_reset(lu); 972 971 sbp2_cancel_orbs(lu); 973 972 sbp2_conditionally_unblock(lu); 973 + } 974 + 975 + static void sbp2_lu_workfn(struct work_struct *work) 976 + { 977 + struct sbp2_logical_unit *lu = container_of(to_delayed_work(work), 978 + struct sbp2_logical_unit, work); 979 + lu->workfn(work); 974 980 } 975 981 976 982 static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) ··· 1006 998 lu->blocked = false; 1007 999 ++tgt->dont_block; 1008 1000 INIT_LIST_HEAD(&lu->orb_list); 1009 - INIT_DELAYED_WORK(&lu->work, sbp2_login); 1001 + lu->workfn = sbp2_login; 1002 + INIT_DELAYED_WORK(&lu->work, sbp2_lu_workfn); 1010 1003 1011 1004 list_add_tail(&lu->link, &tgt->lu_list); 1012 1005 return 0;
+1 -1
drivers/fmc/fmc-write-eeprom.c
··· 27 27 /* The "file=" is like the generic "gateware=" used elsewhere */ 28 28 static char *fwe_file[FMC_MAX_CARDS]; 29 29 static int fwe_file_n; 30 - module_param_array_named(file, fwe_file, charp, &fwe_file_n, 444); 30 + module_param_array_named(file, fwe_file, charp, &fwe_file_n, 0444); 31 31 32 32 static int fwe_run_tlv(struct fmc_device *fmc, const struct firmware *fw, 33 33 int write)
+1 -9
drivers/gpu/drm/armada/armada_drv.c
··· 68 68 { 69 69 struct armada_private *priv = dev->dev_private; 70 70 71 - /* 72 - * Yes, we really must jump through these hoops just to store a 73 - * _pointer_ to something into the kfifo. This is utterly insane 74 - * and idiotic, because it kfifo requires the _data_ pointed to by 75 - * the pointer const, not the pointer itself. Not only that, but 76 - * you have to pass a pointer _to_ the pointer you want stored. 77 - */ 78 - const struct drm_framebuffer *silly_api_alert = fb; 79 - WARN_ON(!kfifo_put(&priv->fb_unref, &silly_api_alert)); 71 + WARN_ON(!kfifo_put(&priv->fb_unref, fb)); 80 72 schedule_work(&priv->fb_unref_work); 81 73 } 82 74
+1
drivers/gpu/drm/bochs/Kconfig
··· 2 2 tristate "DRM Support for bochs dispi vga interface (qemu stdvga)" 3 3 depends on DRM && PCI 4 4 select DRM_KMS_HELPER 5 + select DRM_KMS_FB_HELPER 5 6 select FB_SYS_FILLRECT 6 7 select FB_SYS_COPYAREA 7 8 select FB_SYS_IMAGEBLIT
+12
drivers/gpu/drm/drm_ioctl.c
··· 296 296 case DRM_CAP_ASYNC_PAGE_FLIP: 297 297 req->value = dev->mode_config.async_page_flip; 298 298 break; 299 + case DRM_CAP_CURSOR_WIDTH: 300 + if (dev->mode_config.cursor_width) 301 + req->value = dev->mode_config.cursor_width; 302 + else 303 + req->value = 64; 304 + break; 305 + case DRM_CAP_CURSOR_HEIGHT: 306 + if (dev->mode_config.cursor_height) 307 + req->value = dev->mode_config.cursor_height; 308 + else 309 + req->value = 64; 310 + break; 299 311 default: 300 312 return -EINVAL; 301 313 }
+3 -1
drivers/gpu/drm/i2c/tda998x_drv.c
··· 1151 1151 1152 1152 priv->current_page = 0xff; 1153 1153 priv->cec = i2c_new_dummy(client->adapter, 0x34); 1154 - if (!priv->cec) 1154 + if (!priv->cec) { 1155 + kfree(priv); 1155 1156 return -ENODEV; 1157 + } 1156 1158 priv->dpms = DRM_MODE_DPMS_OFF; 1157 1159 1158 1160 encoder_slave->slave_priv = priv;
+9 -14
drivers/gpu/drm/i915/i915_drv.c
··· 403 403 void intel_detect_pch(struct drm_device *dev) 404 404 { 405 405 struct drm_i915_private *dev_priv = dev->dev_private; 406 - struct pci_dev *pch; 406 + struct pci_dev *pch = NULL; 407 407 408 408 /* In all current cases, num_pipes is equivalent to the PCH_NOP setting 409 409 * (which really amounts to a PCH but no South Display). ··· 424 424 * all the ISA bridge devices and check for the first match, instead 425 425 * of only checking the first one. 426 426 */ 427 - pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); 428 - while (pch) { 429 - struct pci_dev *curr = pch; 427 + while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) { 430 428 if (pch->vendor == PCI_VENDOR_ID_INTEL) { 431 - unsigned short id; 432 - id = pch->device & INTEL_PCH_DEVICE_ID_MASK; 429 + unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK; 433 430 dev_priv->pch_id = id; 434 431 435 432 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { ··· 458 461 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); 459 462 WARN_ON(!IS_HASWELL(dev)); 460 463 WARN_ON(!IS_ULT(dev)); 461 - } else { 462 - goto check_next; 463 - } 464 - pci_dev_put(pch); 464 + } else 465 + continue; 466 + 465 467 break; 466 468 } 467 - check_next: 468 - pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr); 469 - pci_dev_put(curr); 470 469 } 471 470 if (!pch) 472 - DRM_DEBUG_KMS("No PCH found?\n"); 471 + DRM_DEBUG_KMS("No PCH found.\n"); 472 + 473 + pci_dev_put(pch); 473 474 } 474 475 475 476 bool i915_semaphore_is_enabled(struct drm_device *dev)
+16 -3
drivers/gpu/drm/i915/i915_gem_stolen.c
··· 82 82 r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size, 83 83 "Graphics Stolen Memory"); 84 84 if (r == NULL) { 85 - DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", 86 - base, base + (uint32_t)dev_priv->gtt.stolen_size); 87 - base = 0; 85 + /* 86 + * One more attempt but this time requesting region from 87 + * base + 1, as we have seen that this resolves the region 88 + * conflict with the PCI Bus. 89 + * This is a BIOS w/a: Some BIOS wrap stolen in the root 90 + * PCI bus, but have an off-by-one error. Hence retry the 91 + * reservation starting from 1 instead of 0. 92 + */ 93 + r = devm_request_mem_region(dev->dev, base + 1, 94 + dev_priv->gtt.stolen_size - 1, 95 + "Graphics Stolen Memory"); 96 + if (r == NULL) { 97 + DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", 98 + base, base + (uint32_t)dev_priv->gtt.stolen_size); 99 + base = 0; 100 + } 88 101 } 89 102 90 103 return base;
+18 -4
drivers/gpu/drm/i915/intel_display.c
··· 1092 1092 struct drm_device *dev = dev_priv->dev; 1093 1093 bool cur_state; 1094 1094 1095 - if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 1096 - cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE; 1097 - else if (IS_845G(dev) || IS_I865G(dev)) 1095 + if (IS_845G(dev) || IS_I865G(dev)) 1098 1096 cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE; 1099 - else 1097 + else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) 1100 1098 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; 1099 + else 1100 + cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE; 1101 1101 1102 1102 WARN(cur_state != state, 1103 1103 "cursor on pipe %c assertion failure (expected %s, current %s)\n", ··· 8585 8585 len = 4; 8586 8586 if (ring->id == RCS) 8587 8587 len += 6; 8588 + 8589 + /* 8590 + * BSpec MI_DISPLAY_FLIP for IVB: 8591 + * "The full packet must be contained within the same cache line." 8592 + * 8593 + * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same 8594 + * cacheline, if we ever start emitting more commands before 8595 + * the MI_DISPLAY_FLIP we may need to first emit everything else, 8596 + * then do the cacheline alignment, and finally emit the 8597 + * MI_DISPLAY_FLIP. 8598 + */ 8599 + ret = intel_ring_cacheline_align(ring); 8600 + if (ret) 8601 + goto err_unpin; 8588 8602 8589 8603 ret = intel_ring_begin(ring, len); 8590 8604 if (ret)
+13 -6
drivers/gpu/drm/i915/intel_dp.c
··· 537 537 uint8_t msg[20]; 538 538 int msg_bytes; 539 539 uint8_t ack; 540 + int retry; 540 541 541 542 if (WARN_ON(send_bytes > 16)) 542 543 return -E2BIG; ··· 549 548 msg[3] = send_bytes - 1; 550 549 memcpy(&msg[4], send, send_bytes); 551 550 msg_bytes = send_bytes + 4; 552 - for (;;) { 551 + for (retry = 0; retry < 7; retry++) { 553 552 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); 554 553 if (ret < 0) 555 554 return ret; 556 555 ack >>= 4; 557 556 if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) 558 - break; 557 + return send_bytes; 559 558 else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) 560 - udelay(100); 559 + usleep_range(400, 500); 561 560 else 562 561 return -EIO; 563 562 } 564 - return send_bytes; 563 + 564 + DRM_ERROR("too many retries, giving up\n"); 565 + return -EIO; 565 566 } 566 567 567 568 /* Write a single byte to the aux channel in native mode */ ··· 585 582 int reply_bytes; 586 583 uint8_t ack; 587 584 int ret; 585 + int retry; 588 586 589 587 if (WARN_ON(recv_bytes > 19)) 590 588 return -E2BIG; ··· 599 595 msg_bytes = 4; 600 596 reply_bytes = recv_bytes + 1; 601 597 602 - for (;;) { 598 + for (retry = 0; retry < 7; retry++) { 603 599 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, 604 600 reply, reply_bytes); 605 601 if (ret == 0) ··· 612 608 return ret - 1; 613 609 } 614 610 else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) 615 - udelay(100); 611 + usleep_range(400, 500); 616 612 else 617 613 return -EIO; 618 614 } 615 + 616 + DRM_ERROR("too many retries, giving up\n"); 617 + return -EIO; 619 618 } 620 619 621 620 static int
+3 -3
drivers/gpu/drm/i915/intel_hdmi.c
··· 845 845 { 846 846 struct drm_device *dev = intel_hdmi_to_dev(hdmi); 847 847 848 - if (IS_G4X(dev)) 848 + if (!hdmi->has_hdmi_sink || IS_G4X(dev)) 849 849 return 165000; 850 850 else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) 851 851 return 300000; ··· 899 899 * outputs. We also need to check that the higher clock still fits 900 900 * within limits. 901 901 */ 902 - if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit 903 - && HAS_PCH_SPLIT(dev)) { 902 + if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink && 903 + clock_12bpc <= portclock_limit && HAS_PCH_SPLIT(dev)) { 904 904 DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); 905 905 desired_bpp = 12*3; 906 906
+2 -2
drivers/gpu/drm/i915/intel_panel.c
··· 698 698 freq /= 0xff; 699 699 700 700 ctl = freq << 17; 701 - if (IS_GEN2(dev) && panel->backlight.combination_mode) 701 + if (panel->backlight.combination_mode) 702 702 ctl |= BLM_LEGACY_MODE; 703 703 if (IS_PINEVIEW(dev) && panel->backlight.active_low_pwm) 704 704 ctl |= BLM_POLARITY_PNV; ··· 979 979 980 980 ctl = I915_READ(BLC_PWM_CTL); 981 981 982 - if (IS_GEN2(dev)) 982 + if (IS_GEN2(dev) || IS_I915GM(dev) || IS_I945GM(dev)) 983 983 panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE; 984 984 985 985 if (IS_PINEVIEW(dev))
+4 -2
drivers/gpu/drm/i915/intel_pm.c
··· 3493 3493 u32 pcbr; 3494 3494 int pctx_size = 24*1024; 3495 3495 3496 + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 3497 + 3496 3498 pcbr = I915_READ(VLV_PCBR); 3497 3499 if (pcbr) { 3498 3500 /* BIOS set it up already, grab the pre-alloc'd space */ ··· 3543 3541 gtfifodbg); 3544 3542 I915_WRITE(GTFIFODBG, gtfifodbg); 3545 3543 } 3546 - 3547 - valleyview_setup_pctx(dev); 3548 3544 3549 3545 /* If VLV, Forcewake all wells, else re-direct to regular path */ 3550 3546 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); ··· 4395 4395 ironlake_enable_rc6(dev); 4396 4396 intel_init_emon(dev); 4397 4397 } else if (IS_GEN6(dev) || IS_GEN7(dev)) { 4398 + if (IS_VALLEYVIEW(dev)) 4399 + valleyview_setup_pctx(dev); 4398 4400 /* 4399 4401 * PCU communication is slow and this doesn't need to be 4400 4402 * done at any specific time, so do this out of our fast path
+21
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 1653 1653 return 0; 1654 1654 } 1655 1655 1656 + /* Align the ring tail to a cacheline boundary */ 1657 + int intel_ring_cacheline_align(struct intel_ring_buffer *ring) 1658 + { 1659 + int num_dwords = (64 - (ring->tail & 63)) / sizeof(uint32_t); 1660 + int ret; 1661 + 1662 + if (num_dwords == 0) 1663 + return 0; 1664 + 1665 + ret = intel_ring_begin(ring, num_dwords); 1666 + if (ret) 1667 + return ret; 1668 + 1669 + while (num_dwords--) 1670 + intel_ring_emit(ring, MI_NOOP); 1671 + 1672 + intel_ring_advance(ring); 1673 + 1674 + return 0; 1675 + } 1676 + 1656 1677 void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) 1657 1678 { 1658 1679 struct drm_i915_private *dev_priv = ring->dev->dev_private;
+1
drivers/gpu/drm/i915/intel_ringbuffer.h
··· 233 233 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); 234 234 235 235 int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); 236 + int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring); 236 237 static inline void intel_ring_emit(struct intel_ring_buffer *ring, 237 238 u32 data) 238 239 {
+1
drivers/gpu/drm/nouveau/Makefile
··· 141 141 nouveau-y += core/subdev/mc/nv04.o 142 142 nouveau-y += core/subdev/mc/nv40.o 143 143 nouveau-y += core/subdev/mc/nv44.o 144 + nouveau-y += core/subdev/mc/nv4c.o 144 145 nouveau-y += core/subdev/mc/nv50.o 145 146 nouveau-y += core/subdev/mc/nv94.o 146 147 nouveau-y += core/subdev/mc/nv98.o
+5 -5
drivers/gpu/drm/nouveau/core/engine/device/nv40.c
··· 311 311 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 312 312 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 313 313 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; 314 - device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; 314 + device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass; 315 315 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; 316 316 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 317 317 device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; ··· 334 334 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 335 335 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 336 336 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; 337 - device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; 337 + device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass; 338 338 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; 339 339 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 340 340 device->oclass[NVDEV_SUBDEV_FB ] = nv4e_fb_oclass; ··· 357 357 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 358 358 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 359 359 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; 360 - device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; 360 + device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass; 361 361 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; 362 362 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 363 363 device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; ··· 380 380 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 381 381 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 382 382 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; 383 - device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; 383 + device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass; 384 384 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; 385 385 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 386 386 device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; ··· 403 403 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 404 404 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 405 405 device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; 406 - device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; 406 + device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass; 407 407 device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; 408 408 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 409 409 device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
+1 -1
drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
··· 1142 1142 if (conf != ~0) { 1143 1143 if (outp.location == 0 && outp.type == DCB_OUTPUT_DP) { 1144 1144 u32 soff = (ffs(outp.or) - 1) * 0x08; 1145 - u32 ctrl = nv_rd32(priv, 0x610798 + soff); 1145 + u32 ctrl = nv_rd32(priv, 0x610794 + soff); 1146 1146 u32 datarate; 1147 1147 1148 1148 switch ((ctrl & 0x000f0000) >> 16) {
+1 -1
drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
··· 112 112 113 113 nv_wr32(priv, 0x002270, cur->addr >> 12); 114 114 nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3)); 115 - if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000)) 115 + if (!nv_wait(priv, 0x002284 + (engine * 8), 0x00100000, 0x00000000)) 116 116 nv_error(priv, "runlist %d update timeout\n", engine); 117 117 mutex_unlock(&nv_subdev(priv)->mutex); 118 118 }
+1 -1
drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
··· 539 539 ustatus &= ~0x04030000; 540 540 } 541 541 if (ustatus && display) { 542 - nv_error("%s - TP%d:", name, i); 542 + nv_error(priv, "%s - TP%d:", name, i); 543 543 nouveau_bitfield_print(nv50_mpc_traps, ustatus); 544 544 pr_cont("\n"); 545 545 ustatus = 0;
+1
drivers/gpu/drm/nouveau/core/include/subdev/mc.h
··· 47 47 extern struct nouveau_oclass *nv04_mc_oclass; 48 48 extern struct nouveau_oclass *nv40_mc_oclass; 49 49 extern struct nouveau_oclass *nv44_mc_oclass; 50 + extern struct nouveau_oclass *nv4c_mc_oclass; 50 51 extern struct nouveau_oclass *nv50_mc_oclass; 51 52 extern struct nouveau_oclass *nv94_mc_oclass; 52 53 extern struct nouveau_oclass *nv98_mc_oclass;
+4
drivers/gpu/drm/nouveau/core/subdev/bios/base.c
··· 130 130 u16 pcir; 131 131 int i; 132 132 133 + /* there is no prom on nv4x IGP's */ 134 + if (device->card_type == NV_40 && device->chipset >= 0x4c) 135 + return; 136 + 133 137 /* enable access to rom */ 134 138 if (device->card_type >= NV_50) 135 139 pcireg = 0x088050;
+1 -1
drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
··· 36 36 .fini = _nouveau_fb_fini, 37 37 }, 38 38 .base.memtype = nv04_fb_memtype_valid, 39 - .base.ram = &nv10_ram_oclass, 39 + .base.ram = &nv1a_ram_oclass, 40 40 .tile.regions = 8, 41 41 .tile.init = nv10_fb_tile_init, 42 42 .tile.fini = nv10_fb_tile_fini,
+1
drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
··· 14 14 extern const struct nouveau_mc_intr nv04_mc_intr[]; 15 15 int nv04_mc_init(struct nouveau_object *); 16 16 void nv40_mc_msi_rearm(struct nouveau_mc *); 17 + int nv44_mc_init(struct nouveau_object *object); 17 18 int nv50_mc_init(struct nouveau_object *); 18 19 extern const struct nouveau_mc_intr nv50_mc_intr[]; 19 20 extern const struct nouveau_mc_intr nvc0_mc_intr[];
+1 -1
drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
··· 24 24 25 25 #include "nv04.h" 26 26 27 - static int 27 + int 28 28 nv44_mc_init(struct nouveau_object *object) 29 29 { 30 30 struct nv04_mc_priv *priv = (void *)object;
+45
drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
··· 1 + /* 2 + * Copyright 2014 Ilia Mirkin 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + * Authors: Ilia Mirkin 23 + */ 24 + 25 + #include "nv04.h" 26 + 27 + static void 28 + nv4c_mc_msi_rearm(struct nouveau_mc *pmc) 29 + { 30 + struct nv04_mc_priv *priv = (void *)pmc; 31 + nv_wr08(priv, 0x088050, 0xff); 32 + } 33 + 34 + struct nouveau_oclass * 35 + nv4c_mc_oclass = &(struct nouveau_mc_oclass) { 36 + .base.handle = NV_SUBDEV(MC, 0x4c), 37 + .base.ofuncs = &(struct nouveau_ofuncs) { 38 + .ctor = nv04_mc_ctor, 39 + .dtor = _nouveau_mc_dtor, 40 + .init = nv44_mc_init, 41 + .fini = _nouveau_mc_fini, 42 + }, 43 + .intr = nv04_mc_intr, 44 + .msi_rearm = nv4c_mc_msi_rearm, 45 + }.base;
+24 -2
drivers/gpu/drm/nouveau/nouveau_acpi.c
··· 106 106 return 0; 107 107 } 108 108 109 + /* 110 + * On some platforms, _DSM(nouveau_op_dsm_muid, func0) has special 111 + * requirements on the fourth parameter, so a private implementation 112 + * instead of using acpi_check_dsm(). 113 + */ 114 + static int nouveau_check_optimus_dsm(acpi_handle handle) 115 + { 116 + int result; 117 + 118 + /* 119 + * Function 0 returns a Buffer containing available functions. 120 + * The args parameter is ignored for function 0, so just put 0 in it 121 + */ 122 + if (nouveau_optimus_dsm(handle, 0, 0, &result)) 123 + return 0; 124 + 125 + /* 126 + * ACPI Spec v4 9.14.1: if bit 0 is zero, no function is supported. 127 + * If the n-th bit is enabled, function n is supported 128 + */ 129 + return result & 1 && result & (1 << NOUVEAU_DSM_OPTIMUS_CAPS); 130 + } 131 + 109 132 static int nouveau_dsm(acpi_handle handle, int func, int arg) 110 133 { 111 134 int ret = 0; ··· 230 207 1 << NOUVEAU_DSM_POWER)) 231 208 retval |= NOUVEAU_DSM_HAS_MUX; 232 209 233 - if (acpi_check_dsm(dhandle, nouveau_op_dsm_muid, 0x00000100, 234 - 1 << NOUVEAU_DSM_OPTIMUS_CAPS)) 210 + if (nouveau_check_optimus_dsm(dhandle)) 235 211 retval |= NOUVEAU_DSM_HAS_OPT; 236 212 237 213 if (retval & NOUVEAU_DSM_HAS_OPT) {
+1 -1
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 1249 1249 mem->bus.is_iomem = !dev->agp->cant_use_aperture; 1250 1250 } 1251 1251 #endif 1252 - if (!node->memtype) 1252 + if (nv_device(drm->device)->card_type < NV_50 || !node->memtype) 1253 1253 /* untiled */ 1254 1254 break; 1255 1255 /* fallthrough, tiled memory */
+3
drivers/gpu/drm/nouveau/nouveau_drm.c
··· 376 376 if (ret) 377 377 goto fail_device; 378 378 379 + dev->irq_enabled = true; 380 + 379 381 /* workaround an odd issue on nvc1 by disabling the device's 380 382 * nosnoop capability. hopefully won't cause issues until a 381 383 * better fix is found - assuming there is one... ··· 477 475 struct nouveau_drm *drm = nouveau_drm(dev); 478 476 struct nouveau_object *device; 479 477 478 + dev->irq_enabled = false; 480 479 device = drm->client.base.device; 481 480 drm_put_dev(dev); 482 481
+3 -1
drivers/gpu/drm/nouveau/nouveau_vga.c
··· 14 14 { 15 15 struct nouveau_device *device = nouveau_dev(priv); 16 16 17 - if (device->chipset >= 0x40) 17 + if (device->card_type == NV_40 && device->chipset >= 0x4c) 18 + nv_wr32(device, 0x088060, state); 19 + else if (device->chipset >= 0x40) 18 20 nv_wr32(device, 0x088054, state); 19 21 else 20 22 nv_wr32(device, 0x001854, state);
+23 -8
drivers/gpu/drm/radeon/atombios_crtc.c
··· 559 559 u32 adjusted_clock = mode->clock; 560 560 int encoder_mode = atombios_get_encoder_mode(encoder); 561 561 u32 dp_clock = mode->clock; 562 - int bpc = radeon_get_monitor_bpc(connector); 562 + int bpc = radeon_crtc->bpc; 563 563 bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock); 564 564 565 565 /* reset the pll flags */ ··· 1176 1176 evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); 1177 1177 1178 1178 /* Set NUM_BANKS. */ 1179 - if (rdev->family >= CHIP_BONAIRE) { 1179 + if (rdev->family >= CHIP_TAHITI) { 1180 1180 unsigned tileb, index, num_banks, tile_split_bytes; 1181 1181 1182 1182 /* Calculate the macrotile mode index. */ ··· 1194 1194 return -EINVAL; 1195 1195 } 1196 1196 1197 - num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3; 1197 + if (rdev->family >= CHIP_BONAIRE) 1198 + num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3; 1199 + else 1200 + num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3; 1198 1201 fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks); 1199 1202 } else { 1200 - /* SI and older. */ 1201 - if (rdev->family >= CHIP_TAHITI) 1202 - tmp = rdev->config.si.tile_config; 1203 - else if (rdev->family >= CHIP_CAYMAN) 1203 + /* NI and older. */ 1204 + if (rdev->family >= CHIP_CAYMAN) 1204 1205 tmp = rdev->config.cayman.tile_config; 1205 1206 else 1206 1207 tmp = rdev->config.evergreen.tile_config; ··· 1774 1773 return ATOM_PPLL1; 1775 1774 DRM_ERROR("unable to allocate a PPLL\n"); 1776 1775 return ATOM_PPLL_INVALID; 1776 + } else if (ASIC_IS_DCE41(rdev)) { 1777 + /* Don't share PLLs on DCE4.1 chips */ 1778 + if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) { 1779 + if (rdev->clock.dp_extclk) 1780 + /* skip PPLL programming if using ext clock */ 1781 + return ATOM_PPLL_INVALID; 1782 + } 1783 + pll_in_use = radeon_get_pll_use_mask(crtc); 1784 + if (!(pll_in_use & (1 << ATOM_PPLL1))) 1785 + return ATOM_PPLL1; 1786 + if (!(pll_in_use & (1 << ATOM_PPLL2))) 1787 + return ATOM_PPLL2; 1788 + DRM_ERROR("unable to allocate a PPLL\n"); 1789 + return ATOM_PPLL_INVALID; 1777 1790 } else if (ASIC_IS_DCE4(rdev)) { 1778 1791 /* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock, 1779 1792 * depending on the asic: ··· 1815 1800 if (pll != ATOM_PPLL_INVALID) 1816 1801 return pll; 1817 1802 } 1818 - } else if (!ASIC_IS_DCE41(rdev)) { /* Don't share PLLs on DCE4.1 chips */ 1803 + } else { 1819 1804 /* use the same PPLL for all monitors with the same clock */ 1820 1805 pll = radeon_get_shared_nondp_ppll(crtc); 1821 1806 if (pll != ATOM_PPLL_INVALID)
+5 -4
drivers/gpu/drm/radeon/atombios_encoders.c
··· 464 464 465 465 static u8 radeon_atom_get_bpc(struct drm_encoder *encoder) 466 466 { 467 - struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 468 467 int bpc = 8; 469 468 470 - if (connector) 471 - bpc = radeon_get_monitor_bpc(connector); 469 + if (encoder->crtc) { 470 + struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); 471 + bpc = radeon_crtc->bpc; 472 + } 472 473 473 474 switch (bpc) { 474 475 case 0: ··· 1314 1313 } 1315 1314 if (is_dp) 1316 1315 args.v5.ucLaneNum = dp_lane_count; 1317 - else if (radeon_encoder->pixel_clock > 165000) 1316 + else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) 1318 1317 args.v5.ucLaneNum = 8; 1319 1318 else 1320 1319 args.v5.ucLaneNum = 4;
+3 -2
drivers/gpu/drm/radeon/cik.c
··· 3046 3046 } 3047 3047 3048 3048 /** 3049 - * cik_select_se_sh - select which SE, SH to address 3049 + * cik_get_rb_disabled - computes the mask of disabled RBs 3050 3050 * 3051 3051 * @rdev: radeon_device pointer 3052 3052 * @max_rb_num: max RBs (render backends) for the asic ··· 7902 7902 /* init golden registers */ 7903 7903 cik_init_golden_registers(rdev); 7904 7904 7905 - radeon_pm_resume(rdev); 7905 + if (rdev->pm.pm_method == PM_METHOD_DPM) 7906 + radeon_pm_resume(rdev); 7906 7907 7907 7908 rdev->accel_working = true; 7908 7909 r = cik_startup(rdev);
+9 -6
drivers/gpu/drm/radeon/dce6_afmt.c
··· 278 278 return !ASIC_IS_NODCE(rdev); 279 279 } 280 280 281 - static void dce6_audio_enable(struct radeon_device *rdev, 282 - struct r600_audio_pin *pin, 283 - bool enable) 281 + void dce6_audio_enable(struct radeon_device *rdev, 282 + struct r600_audio_pin *pin, 283 + bool enable) 284 284 { 285 + if (!pin) 286 + return; 287 + 285 288 WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL, 286 - AUDIO_ENABLED); 287 - DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id); 289 + enable ? AUDIO_ENABLED : 0); 288 290 } 289 291 290 292 static const u32 pin_offsets[7] = ··· 325 323 rdev->audio.pin[i].connected = false; 326 324 rdev->audio.pin[i].offset = pin_offsets[i]; 327 325 rdev->audio.pin[i].id = i; 328 - dce6_audio_enable(rdev, &rdev->audio.pin[i], true); 326 + /* disable audio. it will be set up later */ 327 + dce6_audio_enable(rdev, &rdev->audio.pin[i], false); 329 328 } 330 329 331 330 return 0;
+4 -3
drivers/gpu/drm/radeon/evergreen.c
··· 1680 1680 case RADEON_HPD_6: 1681 1681 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE) 1682 1682 connected = true; 1683 - break; 1683 + break; 1684 1684 default: 1685 1685 break; 1686 1686 } ··· 5299 5299 /* init golden registers */ 5300 5300 evergreen_init_golden_registers(rdev); 5301 5301 5302 - radeon_pm_resume(rdev); 5302 + if (rdev->pm.pm_method == PM_METHOD_DPM) 5303 + radeon_pm_resume(rdev); 5303 5304 5304 5305 rdev->accel_working = true; 5305 5306 r = evergreen_startup(rdev); ··· 5476 5475 radeon_wb_fini(rdev); 5477 5476 radeon_ib_pool_fini(rdev); 5478 5477 radeon_irq_kms_fini(rdev); 5479 - evergreen_pcie_gart_fini(rdev); 5480 5478 uvd_v1_0_fini(rdev); 5481 5479 radeon_uvd_fini(rdev); 5480 + evergreen_pcie_gart_fini(rdev); 5482 5481 r600_vram_scratch_fini(rdev); 5483 5482 radeon_gem_fini(rdev); 5484 5483 radeon_fence_driver_fini(rdev);
+15 -11
drivers/gpu/drm/radeon/evergreen_hdmi.c
··· 306 306 return; 307 307 offset = dig->afmt->offset; 308 308 309 + /* disable audio prior to setting up hw */ 310 + if (ASIC_IS_DCE6(rdev)) { 311 + dig->afmt->pin = dce6_audio_get_pin(rdev); 312 + dce6_audio_enable(rdev, dig->afmt->pin, false); 313 + } else { 314 + dig->afmt->pin = r600_audio_get_pin(rdev); 315 + r600_audio_enable(rdev, dig->afmt->pin, false); 316 + } 317 + 309 318 evergreen_audio_set_dto(encoder, mode->clock); 310 319 311 320 WREG32(HDMI_VBI_PACKET_CONTROL + offset, ··· 418 409 WREG32(AFMT_RAMP_CONTROL1 + offset, 0x007FFFFF); 419 410 WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001); 420 411 WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001); 412 + 413 + /* enable audio after to setting up hw */ 414 + if (ASIC_IS_DCE6(rdev)) 415 + dce6_audio_enable(rdev, dig->afmt->pin, true); 416 + else 417 + r600_audio_enable(rdev, dig->afmt->pin, true); 421 418 } 422 419 423 420 void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) 424 421 { 425 - struct drm_device *dev = encoder->dev; 426 - struct radeon_device *rdev = dev->dev_private; 427 422 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 428 423 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 429 424 ··· 439 426 return; 440 427 if (!enable && !dig->afmt->enabled) 441 428 return; 442 - 443 - if (enable) { 444 - if (ASIC_IS_DCE6(rdev)) 445 - dig->afmt->pin = dce6_audio_get_pin(rdev); 446 - else 447 - dig->afmt->pin = r600_audio_get_pin(rdev); 448 - } else { 449 - dig->afmt->pin = NULL; 450 - } 451 429 452 430 dig->afmt->enabled = enable; 453 431
+1 -1
drivers/gpu/drm/radeon/evergreen_smc.h
··· 57 57 58 58 #define EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION 0x100 59 59 60 - #define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x0 60 + #define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x8 61 61 #define EVERGREEN_SMC_FIRMWARE_HEADER_stateTable 0xC 62 62 #define EVERGREEN_SMC_FIRMWARE_HEADER_mcRegisterTable 0x20 63 63
+2 -1
drivers/gpu/drm/radeon/ni.c
··· 2105 2105 /* init golden registers */ 2106 2106 ni_init_golden_registers(rdev); 2107 2107 2108 - radeon_pm_resume(rdev); 2108 + if (rdev->pm.pm_method == PM_METHOD_DPM) 2109 + radeon_pm_resume(rdev); 2109 2110 2110 2111 rdev->accel_working = true; 2111 2112 r = cayman_startup(rdev);
+1 -1
drivers/gpu/drm/radeon/ni_dpm.c
··· 2588 2588 if (NISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) 2589 2589 enable_sq_ramping = false; 2590 2590 2591 - if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) 2591 + if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) 2592 2592 enable_sq_ramping = false; 2593 2593 2594 2594 for (i = 0; i < state->performance_level_count; i++) {
-2
drivers/gpu/drm/radeon/r100.c
··· 3942 3942 /* Initialize surface registers */ 3943 3943 radeon_surface_init(rdev); 3944 3944 3945 - radeon_pm_resume(rdev); 3946 - 3947 3945 rdev->accel_working = true; 3948 3946 r = r100_startup(rdev); 3949 3947 if (r) {
-2
drivers/gpu/drm/radeon/r300.c
··· 1430 1430 /* Initialize surface registers */ 1431 1431 radeon_surface_init(rdev); 1432 1432 1433 - radeon_pm_resume(rdev); 1434 - 1435 1433 rdev->accel_working = true; 1436 1434 r = r300_startup(rdev); 1437 1435 if (r) {
-2
drivers/gpu/drm/radeon/r420.c
··· 325 325 /* Initialize surface registers */ 326 326 radeon_surface_init(rdev); 327 327 328 - radeon_pm_resume(rdev); 329 - 330 328 rdev->accel_working = true; 331 329 r = r420_startup(rdev); 332 330 if (r) {
-2
drivers/gpu/drm/radeon/r520.c
··· 240 240 /* Initialize surface registers */ 241 241 radeon_surface_init(rdev); 242 242 243 - radeon_pm_resume(rdev); 244 - 245 243 rdev->accel_working = true; 246 244 r = r520_startup(rdev); 247 245 if (r) {
+2 -1
drivers/gpu/drm/radeon/r600.c
··· 2968 2968 /* post card */ 2969 2969 atom_asic_init(rdev->mode_info.atom_context); 2970 2970 2971 - radeon_pm_resume(rdev); 2971 + if (rdev->pm.pm_method == PM_METHOD_DPM) 2972 + radeon_pm_resume(rdev); 2972 2973 2973 2974 rdev->accel_working = true; 2974 2975 r = r600_startup(rdev);
+8 -6
drivers/gpu/drm/radeon/r600_audio.c
··· 142 142 } 143 143 144 144 /* enable the audio stream */ 145 - static void r600_audio_enable(struct radeon_device *rdev, 146 - struct r600_audio_pin *pin, 147 - bool enable) 145 + void r600_audio_enable(struct radeon_device *rdev, 146 + struct r600_audio_pin *pin, 147 + bool enable) 148 148 { 149 149 u32 value = 0; 150 + 151 + if (!pin) 152 + return; 150 153 151 154 if (ASIC_IS_DCE4(rdev)) { 152 155 if (enable) { ··· 161 158 WREG32_P(R600_AUDIO_ENABLE, 162 159 enable ? 0x81000000 : 0x0, ~0x81000000); 163 160 } 164 - DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id); 165 161 } 166 162 167 163 /* ··· 180 178 rdev->audio.pin[0].status_bits = 0; 181 179 rdev->audio.pin[0].category_code = 0; 182 180 rdev->audio.pin[0].id = 0; 183 - 184 - r600_audio_enable(rdev, &rdev->audio.pin[0], true); 181 + /* disable audio. it will be set up later */ 182 + r600_audio_enable(rdev, &rdev->audio.pin[0], false); 185 183 186 184 return 0; 187 185 }
+7 -8
drivers/gpu/drm/radeon/r600_hdmi.c
··· 329 329 u8 *sadb; 330 330 int sad_count; 331 331 332 - /* XXX: setting this register causes hangs on some asics */ 333 - return; 334 - 335 332 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 336 333 if (connector->encoder == encoder) { 337 334 radeon_connector = to_radeon_connector(connector); ··· 457 460 return; 458 461 offset = dig->afmt->offset; 459 462 463 + /* disable audio prior to setting up hw */ 464 + dig->afmt->pin = r600_audio_get_pin(rdev); 465 + r600_audio_enable(rdev, dig->afmt->pin, false); 466 + 460 467 r600_audio_set_dto(encoder, mode->clock); 461 468 462 469 WREG32(HDMI0_VBI_PACKET_CONTROL + offset, ··· 532 531 WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001); 533 532 534 533 r600_hdmi_audio_workaround(encoder); 534 + 535 + /* enable audio after to setting up hw */ 536 + r600_audio_enable(rdev, dig->afmt->pin, true); 535 537 } 536 538 537 539 /* ··· 654 650 return; 655 651 if (!enable && !dig->afmt->enabled) 656 652 return; 657 - 658 - if (enable) 659 - dig->afmt->pin = r600_audio_get_pin(rdev); 660 - else 661 - dig->afmt->pin = NULL; 662 653 663 654 /* Older chipsets require setting HDMI and routing manually */ 664 655 if (!ASIC_IS_DCE3(rdev)) {
+9 -1
drivers/gpu/drm/radeon/radeon.h
··· 135 135 /* R600+ */ 136 136 #define R600_RING_TYPE_UVD_INDEX 5 137 137 138 + /* number of hw syncs before falling back on blocking */ 139 + #define RADEON_NUM_SYNCS 4 140 + 138 141 /* hardcode those limit for now */ 139 142 #define RADEON_VA_IB_OFFSET (1 << 20) 140 143 #define RADEON_VA_RESERVED_SIZE (8 << 20) ··· 557 554 /* 558 555 * Semaphores. 559 556 */ 560 - /* everything here is constant */ 561 557 struct radeon_semaphore { 562 558 struct radeon_sa_bo *sa_bo; 563 559 signed waiters; ··· 2747 2745 void r600_audio_update_hdmi(struct work_struct *work); 2748 2746 struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev); 2749 2747 struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev); 2748 + void r600_audio_enable(struct radeon_device *rdev, 2749 + struct r600_audio_pin *pin, 2750 + bool enable); 2751 + void dce6_audio_enable(struct radeon_device *rdev, 2752 + struct r600_audio_pin *pin, 2753 + bool enable); 2750 2754 2751 2755 /* 2752 2756 * R600 vram scratch functions
+2 -1
drivers/gpu/drm/radeon/radeon_atpx_handler.c
··· 219 219 memcpy(&output, info->buffer.pointer, size); 220 220 221 221 /* TODO: check version? */ 222 - printk("ATPX version %u\n", output.version); 222 + printk("ATPX version %u, functions 0x%08x\n", 223 + output.version, output.function_bits); 223 224 224 225 radeon_atpx_parse_functions(&atpx->functions, output.function_bits); 225 226
+4 -1
drivers/gpu/drm/radeon/radeon_device.c
··· 1521 1521 if (r) 1522 1522 DRM_ERROR("ib ring test failed (%d).\n", r); 1523 1523 1524 - if (rdev->pm.dpm_enabled) { 1524 + if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { 1525 1525 /* do dpm late init */ 1526 1526 r = radeon_pm_late_init(rdev); 1527 1527 if (r) { 1528 1528 rdev->pm.dpm_enabled = false; 1529 1529 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); 1530 1530 } 1531 + } else { 1532 + /* resume old pm late */ 1533 + radeon_pm_resume(rdev); 1531 1534 } 1532 1535 1533 1536 radeon_restore_bios_scratch_regs(rdev);
+2
drivers/gpu/drm/radeon/radeon_display.c
··· 571 571 radeon_crtc->max_cursor_width = CURSOR_WIDTH; 572 572 radeon_crtc->max_cursor_height = CURSOR_HEIGHT; 573 573 } 574 + dev->mode_config.cursor_width = radeon_crtc->max_cursor_width; 575 + dev->mode_config.cursor_height = radeon_crtc->max_cursor_height; 574 576 575 577 #if 0 576 578 radeon_crtc->mode_set.crtc = &radeon_crtc->base;
+6
drivers/gpu/drm/radeon/radeon_kms.c
··· 537 537 538 538 radeon_vm_init(rdev, &fpriv->vm); 539 539 540 + r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 541 + if (r) 542 + return r; 543 + 540 544 /* map the ib pool buffer read only into 541 545 * virtual address space */ 542 546 bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, ··· 548 544 r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, 549 545 RADEON_VM_PAGE_READABLE | 550 546 RADEON_VM_PAGE_SNOOPED); 547 + 548 + radeon_bo_unreserve(rdev->ring_tmp_bo.bo); 551 549 if (r) { 552 550 radeon_vm_fini(rdev, &fpriv->vm); 553 551 kfree(fpriv);
+1 -1
drivers/gpu/drm/radeon/radeon_ring.c
··· 139 139 } 140 140 141 141 /* 64 dwords should be enough for fence too */ 142 - r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8); 142 + r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8); 143 143 if (r) { 144 144 dev_err(rdev->dev, "scheduling IB failed (%d).\n", r); 145 145 return r;
+16 -3
drivers/gpu/drm/radeon/radeon_semaphore.c
··· 34 34 int radeon_semaphore_create(struct radeon_device *rdev, 35 35 struct radeon_semaphore **semaphore) 36 36 { 37 + uint32_t *cpu_addr; 37 38 int i, r; 38 39 39 40 *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL); 40 41 if (*semaphore == NULL) { 41 42 return -ENOMEM; 42 43 } 43 - r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, 44 - &(*semaphore)->sa_bo, 8, 8, true); 44 + r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*semaphore)->sa_bo, 45 + 8 * RADEON_NUM_SYNCS, 8, true); 45 46 if (r) { 46 47 kfree(*semaphore); 47 48 *semaphore = NULL; ··· 50 49 } 51 50 (*semaphore)->waiters = 0; 52 51 (*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo); 53 - *((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0; 52 + 53 + cpu_addr = radeon_sa_bo_cpu_addr((*semaphore)->sa_bo); 54 + for (i = 0; i < RADEON_NUM_SYNCS; ++i) 55 + cpu_addr[i] = 0; 54 56 55 57 for (i = 0; i < RADEON_NUM_RINGS; ++i) 56 58 (*semaphore)->sync_to[i] = NULL; ··· 129 125 struct radeon_semaphore *semaphore, 130 126 int ring) 131 127 { 128 + unsigned count = 0; 132 129 int i, r; 133 130 134 131 for (i = 0; i < RADEON_NUM_RINGS; ++i) { ··· 143 138 if (!rdev->ring[i].ready) { 144 139 dev_err(rdev->dev, "Syncing to a disabled ring!"); 145 140 return -EINVAL; 141 + } 142 + 143 + if (++count > RADEON_NUM_SYNCS) { 144 + /* not enough room, wait manually */ 145 + radeon_fence_wait_locked(fence); 146 + continue; 146 147 } 147 148 148 149 /* allocate enough space for sync command */ ··· 175 164 176 165 radeon_ring_commit(rdev, &rdev->ring[i]); 177 166 radeon_fence_note_sync(fence, ring); 167 + 168 + semaphore->gpu_addr += 8; 178 169 } 179 170 180 171 return 0;
+4 -1
drivers/gpu/drm/radeon/radeon_ttm.c
··· 714 714 DRM_ERROR("Failed initializing VRAM heap.\n"); 715 715 return r; 716 716 } 717 + /* Change the size here instead of the init above so only lpfn is affected */ 718 + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 719 + 717 720 r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true, 718 721 RADEON_GEM_DOMAIN_VRAM, 719 722 NULL, &rdev->stollen_vga_memory); ··· 938 935 while (size) { 939 936 loff_t p = *pos / PAGE_SIZE; 940 937 unsigned off = *pos & ~PAGE_MASK; 941 - ssize_t cur_size = min(size, PAGE_SIZE - off); 938 + size_t cur_size = min_t(size_t, size, PAGE_SIZE - off); 942 939 struct page *page; 943 940 void *ptr; 944 941
+2
drivers/gpu/drm/radeon/radeon_uvd.c
··· 171 171 172 172 radeon_bo_unref(&rdev->uvd.vcpu_bo); 173 173 174 + radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX]); 175 + 174 176 release_firmware(rdev->uvd_fw); 175 177 } 176 178
-2
drivers/gpu/drm/radeon/rs400.c
··· 474 474 /* Initialize surface registers */ 475 475 radeon_surface_init(rdev); 476 476 477 - radeon_pm_resume(rdev); 478 - 479 477 rdev->accel_working = true; 480 478 r = rs400_startup(rdev); 481 479 if (r) {
-2
drivers/gpu/drm/radeon/rs600.c
··· 1048 1048 /* Initialize surface registers */ 1049 1049 radeon_surface_init(rdev); 1050 1050 1051 - radeon_pm_resume(rdev); 1052 - 1053 1051 rdev->accel_working = true; 1054 1052 r = rs600_startup(rdev); 1055 1053 if (r) {
-2
drivers/gpu/drm/radeon/rs690.c
··· 756 756 /* Initialize surface registers */ 757 757 radeon_surface_init(rdev); 758 758 759 - radeon_pm_resume(rdev); 760 - 761 759 rdev->accel_working = true; 762 760 r = rs690_startup(rdev); 763 761 if (r) {
-2
drivers/gpu/drm/radeon/rv515.c
··· 586 586 /* Initialize surface registers */ 587 587 radeon_surface_init(rdev); 588 588 589 - radeon_pm_resume(rdev); 590 - 591 589 rdev->accel_working = true; 592 590 r = rv515_startup(rdev); 593 591 if (r) {
+3 -2
drivers/gpu/drm/radeon/rv770.c
··· 1811 1811 /* init golden registers */ 1812 1812 rv770_init_golden_registers(rdev); 1813 1813 1814 - radeon_pm_resume(rdev); 1814 + if (rdev->pm.pm_method == PM_METHOD_DPM) 1815 + radeon_pm_resume(rdev); 1815 1816 1816 1817 rdev->accel_working = true; 1817 1818 r = rv770_startup(rdev); ··· 1956 1955 radeon_wb_fini(rdev); 1957 1956 radeon_ib_pool_fini(rdev); 1958 1957 radeon_irq_kms_fini(rdev); 1959 - rv770_pcie_gart_fini(rdev); 1960 1958 uvd_v1_0_fini(rdev); 1961 1959 radeon_uvd_fini(rdev); 1960 + rv770_pcie_gart_fini(rdev); 1962 1961 r600_vram_scratch_fini(rdev); 1963 1962 radeon_gem_fini(rdev); 1964 1963 radeon_fence_driver_fini(rdev);
+1 -8
drivers/gpu/drm/radeon/rv770_dpm.c
··· 2526 2526 bool rv770_dpm_vblank_too_short(struct radeon_device *rdev) 2527 2527 { 2528 2528 u32 vblank_time = r600_dpm_get_vblank_time(rdev); 2529 - u32 switch_limit = 300; 2530 - 2531 - /* quirks */ 2532 - /* ASUS K70AF */ 2533 - if ((rdev->pdev->device == 0x9553) && 2534 - (rdev->pdev->subsystem_vendor == 0x1043) && 2535 - (rdev->pdev->subsystem_device == 0x1c42)) 2536 - switch_limit = 200; 2529 + u32 switch_limit = 200; /* 300 */ 2537 2530 2538 2531 /* RV770 */ 2539 2532 /* mclk switching doesn't seem to work reliably on desktop RV770s */
+2 -1
drivers/gpu/drm/radeon/si.c
··· 6618 6618 /* init golden registers */ 6619 6619 si_init_golden_registers(rdev); 6620 6620 6621 - radeon_pm_resume(rdev); 6621 + if (rdev->pm.pm_method == PM_METHOD_DPM) 6622 + radeon_pm_resume(rdev); 6622 6623 6623 6624 rdev->accel_working = true; 6624 6625 r = si_startup(rdev);
+1 -1
drivers/gpu/drm/radeon/si_dpm.c
··· 2395 2395 if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) 2396 2396 enable_sq_ramping = false; 2397 2397 2398 - if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) 2398 + if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) 2399 2399 enable_sq_ramping = false; 2400 2400 2401 2401 for (i = 0; i < state->performance_level_count; i++) {
+1 -1
drivers/gpu/drm/tegra/drm.c
··· 104 104 105 105 static void tegra_drm_lastclose(struct drm_device *drm) 106 106 { 107 - #ifdef CONFIG_TEGRA_DRM_FBDEV 107 + #ifdef CONFIG_DRM_TEGRA_FBDEV 108 108 struct tegra_drm *tegra = drm->dev_private; 109 109 110 110 tegra_fbdev_restore_mode(tegra->fbdev);
+11
drivers/gpu/drm/tegra/rgb.c
··· 15 15 struct tegra_rgb { 16 16 struct tegra_output output; 17 17 struct tegra_dc *dc; 18 + bool enabled; 18 19 19 20 struct clk *clk_parent; 20 21 struct clk *clk; ··· 90 89 struct tegra_rgb *rgb = to_rgb(output); 91 90 unsigned long value; 92 91 92 + if (rgb->enabled) 93 + return 0; 94 + 93 95 tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable)); 94 96 95 97 value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL; ··· 126 122 tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); 127 123 tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); 128 124 125 + rgb->enabled = true; 126 + 129 127 return 0; 130 128 } 131 129 ··· 135 129 { 136 130 struct tegra_rgb *rgb = to_rgb(output); 137 131 unsigned long value; 132 + 133 + if (!rgb->enabled) 134 + return 0; 138 135 139 136 value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_POWER_CONTROL); 140 137 value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | ··· 152 143 tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); 153 144 154 145 tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable)); 146 + 147 + rgb->enabled = false; 155 148 156 149 return 0; 157 150 }
+1
drivers/gpu/drm/ttm/ttm_agp_backend.c
··· 126 126 agp_be->ttm.func = &ttm_agp_func; 127 127 128 128 if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) { 129 + kfree(agp_be); 129 130 return NULL; 130 131 } 131 132
+73 -56
drivers/gpu/drm/vmwgfx/svga3d_reg.h
··· 261 261 /* Planar video formats. */ 262 262 SVGA3D_YV12 = 121, 263 263 264 - /* Shader constant formats. */ 265 - SVGA3D_SURFACE_SHADERCONST_FLOAT = 122, 266 - SVGA3D_SURFACE_SHADERCONST_INT = 123, 267 - SVGA3D_SURFACE_SHADERCONST_BOOL = 124, 268 - 269 - SVGA3D_FORMAT_MAX = 125, 264 + SVGA3D_FORMAT_MAX = 122, 270 265 } SVGA3dSurfaceFormat; 271 266 272 267 typedef uint32 SVGA3dColor; /* a, r, g, b */ ··· 1218 1223 #define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129 1219 1224 1220 1225 #define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE 1130 1221 - 1226 + #define SVGA_3D_CMD_GB_SCREEN_DMA 1131 1227 + #define SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH 1132 1228 + #define SVGA_3D_CMD_GB_MOB_FENCE 1133 1229 + #define SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 1134 1222 1230 #define SVGA_3D_CMD_DEFINE_GB_MOB64 1135 1223 1231 #define SVGA_3D_CMD_REDEFINE_GB_MOB64 1136 1232 + #define SVGA_3D_CMD_NOP_ERROR 1137 1233 + 1234 + #define SVGA_3D_CMD_RESERVED1 1138 1235 + #define SVGA_3D_CMD_RESERVED2 1139 1236 + #define SVGA_3D_CMD_RESERVED3 1140 1237 + #define SVGA_3D_CMD_RESERVED4 1141 1238 + #define SVGA_3D_CMD_RESERVED5 1142 1224 1239 1225 1240 #define SVGA_3D_CMD_MAX 1142 1226 1241 #define SVGA_3D_CMD_FUTURE_MAX 3000 ··· 1978 1973 uint32 sizeInBytes; 1979 1974 uint32 validSizeInBytes; 1980 1975 SVGAMobFormat ptDepth; 1981 - } 1982 - __attribute__((__packed__)) 1976 + } __packed 1983 1977 SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */ 1984 1978 1985 1979 typedef ··· 1988 1984 uint32 sizeInBytes; 1989 1985 uint32 validSizeInBytes; 1990 1986 SVGAMobFormat ptDepth; 1991 - } 1992 - __attribute__((__packed__)) 1987 + } __packed 1993 1988 SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */ 1994 1989 1995 1990 typedef 1996 1991 struct { 1997 1992 SVGAOTableType type; 1998 - } 1999 - __attribute__((__packed__)) 1993 + } __packed 2000 1994 SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */ 2001 1995 2002 1996 /* ··· 2007 2005 SVGAMobFormat ptDepth; 2008 2006 PPN base; 2009 2007 uint32 sizeInBytes; 2010 - } 2011 - __attribute__((__packed__)) 2008 + } __packed 2012 2009 SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */ 2013 2010 2014 2011 ··· 2018 2017 typedef 2019 2018 struct SVGA3dCmdDestroyGBMob { 2020 2019 SVGAMobId mobid; 2021 - } 2022 - __attribute__((__packed__)) 2020 + } __packed 2023 2021 SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */ 2024 2022 2025 2023 /* ··· 2031 2031 SVGAMobFormat ptDepth; 2032 2032 PPN base; 2033 2033 uint32 sizeInBytes; 2034 - } 2035 - __attribute__((__packed__)) 2034 + } __packed 2036 2035 SVGA3dCmdRedefineGBMob; /* SVGA_3D_CMD_REDEFINE_GB_MOB */ 2037 2036 2038 2037 /* ··· 2044 2045 SVGAMobFormat ptDepth; 2045 2046 PPN64 base; 2046 2047 uint32 sizeInBytes; 2047 - } 2048 - __attribute__((__packed__)) 2048 + } __packed 2049 2049 SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */ 2050 2050 2051 2051 /* ··· 2057 2059 SVGAMobFormat ptDepth; 2058 2060 PPN64 base; 2059 2061 uint32 sizeInBytes; 2060 - } 2061 - __attribute__((__packed__)) 2062 + } __packed 2062 2063 SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */ 2063 2064 2064 2065 /* ··· 2067 2070 typedef 2068 2071 struct SVGA3dCmdUpdateGBMobMapping { 2069 2072 SVGAMobId mobid; 2070 - } 2071 - __attribute__((__packed__)) 2073 + } __packed 2072 2074 SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */ 2073 2075 2074 2076 /* ··· 2083 2087 uint32 multisampleCount; 2084 2088 SVGA3dTextureFilter autogenFilter; 2085 2089 SVGA3dSize size; 2086 - } SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */ 2090 + } __packed 2091 + SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */ 2087 2092 2088 2093 /* 2089 2094 * Destroy a guest-backed surface. ··· 2093 2096 typedef 2094 2097 struct SVGA3dCmdDestroyGBSurface { 2095 2098 uint32 sid; 2096 - } SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */ 2099 + } __packed 2100 + SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */ 2097 2101 2098 2102 /* 2099 2103 * Bind a guest-backed surface to an object. ··· 2104 2106 struct SVGA3dCmdBindGBSurface { 2105 2107 uint32 sid; 2106 2108 SVGAMobId mobid; 2107 - } SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */ 2109 + } __packed 2110 + SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */ 2108 2111 2109 2112 /* 2110 2113 * Conditionally bind a mob to a guest backed surface if testMobid ··· 2122 2123 SVGAMobId testMobid; 2123 2124 SVGAMobId mobid; 2124 2125 uint32 flags; 2125 - } 2126 + } __packed 2126 2127 SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */ 2127 2128 2128 2129 /* ··· 2134 2135 struct SVGA3dCmdUpdateGBImage { 2135 2136 SVGA3dSurfaceImageId image; 2136 2137 SVGA3dBox box; 2137 - } SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */ 2138 + } __packed 2139 + SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */ 2138 2140 2139 2141 /* 2140 2142 * Update an entire guest-backed surface. ··· 2145 2145 typedef 2146 2146 struct SVGA3dCmdUpdateGBSurface { 2147 2147 uint32 sid; 2148 - } SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */ 2148 + } __packed 2149 + SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */ 2149 2150 2150 2151 /* 2151 2152 * Readback an image in a guest-backed surface. ··· 2156 2155 typedef 2157 2156 struct SVGA3dCmdReadbackGBImage { 2158 2157 SVGA3dSurfaceImageId image; 2159 - } SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/ 2158 + } __packed 2159 + SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/ 2160 2160 2161 2161 /* 2162 2162 * Readback an entire guest-backed surface. ··· 2167 2165 typedef 2168 2166 struct SVGA3dCmdReadbackGBSurface { 2169 2167 uint32 sid; 2170 - } SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */ 2168 + } __packed 2169 + SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */ 2171 2170 2172 2171 /* 2173 2172 * Readback a sub rect of an image in a guest-backed surface. After ··· 2182 2179 SVGA3dSurfaceImageId image; 2183 2180 SVGA3dBox box; 2184 2181 uint32 invertBox; 2185 - } 2182 + } __packed 2186 2183 SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */ 2187 2184 2188 2185 /* ··· 2193 2190 typedef 2194 2191 struct SVGA3dCmdInvalidateGBImage { 2195 2192 SVGA3dSurfaceImageId image; 2196 - } SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */ 2193 + } __packed 2194 + SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */ 2197 2195 2198 2196 /* 2199 2197 * Invalidate an entire guest-backed surface. ··· 2204 2200 typedef 2205 2201 struct SVGA3dCmdInvalidateGBSurface { 2206 2202 uint32 sid; 2207 - } SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */ 2203 + } __packed 2204 + SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */ 2208 2205 2209 2206 /* 2210 2207 * Invalidate a sub rect of an image in a guest-backed surface. After ··· 2219 2214 SVGA3dSurfaceImageId image; 2220 2215 SVGA3dBox box; 2221 2216 uint32 invertBox; 2222 - } 2217 + } __packed 2223 2218 SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */ 2224 2219 2225 2220 /* ··· 2229 2224 typedef 2230 2225 struct SVGA3dCmdDefineGBContext { 2231 2226 uint32 cid; 2232 - } SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */ 2227 + } __packed 2228 + SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */ 2233 2229 2234 2230 /* 2235 2231 * Destroy a guest-backed context. ··· 2239 2233 typedef 2240 2234 struct SVGA3dCmdDestroyGBContext { 2241 2235 uint32 cid; 2242 - } SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */ 2236 + } __packed 2237 + SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */ 2243 2238 2244 2239 /* 2245 2240 * Bind a guest-backed context. ··· 2259 2252 uint32 cid; 2260 2253 SVGAMobId mobid; 2261 2254 uint32 validContents; 2262 - } SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */ 2255 + } __packed 2256 + SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */ 2263 2257 2264 2258 /* 2265 2259 * Readback a guest-backed context. ··· 2270 2262 typedef 2271 2263 struct SVGA3dCmdReadbackGBContext { 2272 2264 uint32 cid; 2273 - } SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */ 2265 + } __packed 2266 + SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */ 2274 2267 2275 2268 /* 2276 2269 * Invalidate a guest-backed context. ··· 2279 2270 typedef 2280 2271 struct SVGA3dCmdInvalidateGBContext { 2281 2272 uint32 cid; 2282 - } SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */ 2273 + } __packed 2274 + SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */ 2283 2275 2284 2276 /* 2285 2277 * Define a guest-backed shader. ··· 2291 2281 uint32 shid; 2292 2282 SVGA3dShaderType type; 2293 2283 uint32 sizeInBytes; 2294 - } SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */ 2284 + } __packed 2285 + SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */ 2295 2286 2296 2287 /* 2297 2288 * Bind a guest-backed shader. ··· 2302 2291 uint32 shid; 2303 2292 SVGAMobId mobid; 2304 2293 uint32 offsetInBytes; 2305 - } SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */ 2294 + } __packed 2295 + SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */ 2306 2296 2307 2297 /* 2308 2298 * Destroy a guest-backed shader. ··· 2311 2299 2312 2300 typedef struct SVGA3dCmdDestroyGBShader { 2313 2301 uint32 shid; 2314 - } SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */ 2302 + } __packed 2303 + SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */ 2315 2304 2316 2305 typedef 2317 2306 struct { ··· 2327 2314 * Note that FLOAT and INT constants are 4-dwords in length, while 2328 2315 * BOOL constants are 1-dword in length. 2329 2316 */ 2330 - } SVGA3dCmdSetGBShaderConstInline; 2317 + } __packed 2318 + SVGA3dCmdSetGBShaderConstInline; 2331 2319 /* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */ 2332 2320 2333 2321 typedef 2334 2322 struct { 2335 2323 uint32 cid; 2336 2324 SVGA3dQueryType type; 2337 - } SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */ 2325 + } __packed 2326 + SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */ 2338 2327 2339 2328 typedef 2340 2329 struct { ··· 2344 2329 SVGA3dQueryType type; 2345 2330 SVGAMobId mobid; 2346 2331 uint32 offset; 2347 - } SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */ 2332 + } __packed 2333 + SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */ 2348 2334 2349 2335 2350 2336 /* ··· 2362 2346 SVGA3dQueryType type; 2363 2347 SVGAMobId mobid; 2364 2348 uint32 offset; 2365 - } SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */ 2349 + } __packed 2350 + SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */ 2366 2351 2367 2352 typedef 2368 2353 struct { 2369 2354 SVGAMobId mobid; 2370 2355 uint32 fbOffset; 2371 2356 uint32 initalized; 2372 - } 2357 + } __packed 2373 2358 SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */ 2374 2359 2375 2360 typedef 2376 2361 struct { 2377 2362 SVGAMobId mobid; 2378 2363 uint32 gartOffset; 2379 - } 2364 + } __packed 2380 2365 SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */ 2381 2366 2382 2367 ··· 2385 2368 struct { 2386 2369 uint32 gartOffset; 2387 2370 uint32 numPages; 2388 - } 2371 + } __packed 2389 2372 SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */ 2390 2373 2391 2374 ··· 2402 2385 int32 xRoot; 2403 2386 int32 yRoot; 2404 2387 uint32 flags; 2405 - } 2388 + } __packed 2406 2389 SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */ 2407 2390 2408 2391 typedef 2409 2392 struct { 2410 2393 uint32 stid; 2411 - } 2394 + } __packed 2412 2395 SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */ 2413 2396 2414 2397 typedef 2415 2398 struct { 2416 2399 uint32 stid; 2417 2400 SVGA3dSurfaceImageId image; 2418 - } 2401 + } __packed 2419 2402 SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */ 2420 2403 2421 2404 typedef 2422 2405 struct { 2423 2406 uint32 stid; 2424 2407 SVGA3dBox box; 2425 - } 2408 + } __packed 2426 2409 SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */ 2427 2410 2428 2411 /*
+7 -4
drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
··· 38 38 39 39 #define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y)) 40 40 #define max_t(type, x, y) ((x) > (y) ? (x) : (y)) 41 + #define min_t(type, x, y) ((x) < (y) ? (x) : (y)) 41 42 #define surf_size_struct SVGA3dSize 42 43 #define u32 uint32 44 + #define u64 uint64_t 45 + #define U32_MAX ((u32)~0U) 43 46 44 47 #endif /* __KERNEL__ */ 45 48 ··· 707 704 708 705 static inline u32 clamped_umul32(u32 a, u32 b) 709 706 { 710 - uint64_t tmp = (uint64_t) a*b; 711 - return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp; 707 + u64 tmp = (u64) a*b; 708 + return (tmp > (u64) U32_MAX) ? U32_MAX : tmp; 712 709 } 713 710 714 711 static inline const struct svga3d_surface_desc * ··· 837 834 bool cubemap) 838 835 { 839 836 const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format); 840 - u32 total_size = 0; 837 + u64 total_size = 0; 841 838 u32 mip; 842 839 843 840 for (mip = 0; mip < num_mip_levels; mip++) { ··· 850 847 if (cubemap) 851 848 total_size *= SVGA3D_MAX_SURFACE_FACES; 852 849 853 - return total_size; 850 + return (u32) min_t(u64, total_size, (u64) U32_MAX); 854 851 } 855 852 856 853
+8 -1
drivers/gpu/drm/vmwgfx/svga_reg.h
··· 169 169 SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */ 170 170 SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */ 171 171 SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */ 172 + SVGA_REG_COMMAND_LOW = 48, /* Lower 32 bits and submits commands */ 173 + SVGA_REG_COMMAND_HIGH = 49, /* Upper 32 bits of command buffer PA */ 172 174 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */ 173 175 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */ 174 176 SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */ 175 - SVGA_REG_TOP = 53, /* Must be 1 more than the last register */ 177 + SVGA_REG_CMD_PREPEND_LOW = 53, 178 + SVGA_REG_CMD_PREPEND_HIGH = 54, 179 + SVGA_REG_SCREENTARGET_MAX_WIDTH = 55, 180 + SVGA_REG_SCREENTARGET_MAX_HEIGHT = 56, 181 + SVGA_REG_MOB_MAX_SIZE = 57, 182 + SVGA_REG_TOP = 58, /* Must be 1 more than the last register */ 176 183 177 184 SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */ 178 185 /* Next 768 (== 256*3) registers exist for colormap */
+3 -6
drivers/gpu/drm/vmwgfx/vmwgfx_context.c
··· 551 551 cmd->header.size = sizeof(cmd->body); 552 552 cmd->body.cid = bi->ctx->id; 553 553 cmd->body.type = bi->i1.shader_type; 554 - cmd->body.shid = 555 - cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID); 554 + cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); 556 555 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 557 556 558 557 return 0; ··· 584 585 cmd->header.size = sizeof(cmd->body); 585 586 cmd->body.cid = bi->ctx->id; 586 587 cmd->body.type = bi->i1.rt_type; 587 - cmd->body.target.sid = 588 - cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID); 588 + cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); 589 589 cmd->body.target.face = 0; 590 590 cmd->body.target.mipmap = 0; 591 591 vmw_fifo_commit(dev_priv, sizeof(*cmd)); ··· 626 628 cmd->body.c.cid = bi->ctx->id; 627 629 cmd->body.s1.stage = bi->i1.texture_stage; 628 630 cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE; 629 - cmd->body.s1.value = 630 - cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID); 631 + cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); 631 632 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 632 633 633 634 return 0;
+3
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
··· 667 667 dev_priv->memory_size = 512*1024*1024; 668 668 } 669 669 dev_priv->max_mob_pages = 0; 670 + dev_priv->max_mob_size = 0; 670 671 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { 671 672 uint64_t mem_size = 672 673 vmw_read(dev_priv, ··· 677 676 dev_priv->prim_bb_mem = 678 677 vmw_read(dev_priv, 679 678 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); 679 + dev_priv->max_mob_size = 680 + vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE); 680 681 } else 681 682 dev_priv->prim_bb_mem = dev_priv->vram_size; 682 683
+2 -1
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
··· 40 40 #include <drm/ttm/ttm_module.h> 41 41 #include "vmwgfx_fence.h" 42 42 43 - #define VMWGFX_DRIVER_DATE "20121114" 43 + #define VMWGFX_DRIVER_DATE "20140228" 44 44 #define VMWGFX_DRIVER_MAJOR 2 45 45 #define VMWGFX_DRIVER_MINOR 5 46 46 #define VMWGFX_DRIVER_PATCHLEVEL 0 ··· 386 386 uint32_t max_gmr_ids; 387 387 uint32_t max_gmr_pages; 388 388 uint32_t max_mob_pages; 389 + uint32_t max_mob_size; 389 390 uint32_t memory_size; 390 391 bool has_gmr; 391 392 bool has_mob;
+6 -3
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
··· 602 602 { 603 603 struct vmw_cid_cmd { 604 604 SVGA3dCmdHeader header; 605 - __le32 cid; 605 + uint32_t cid; 606 606 } *cmd; 607 607 608 608 cmd = container_of(header, struct vmw_cid_cmd, header); ··· 1835 1835 return 0; 1836 1836 } 1837 1837 1838 - static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = { 1838 + static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { 1839 1839 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid, 1840 1840 false, false, false), 1841 1841 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid, ··· 2032 2032 goto out_invalid; 2033 2033 2034 2034 entry = &vmw_cmd_entries[cmd_id]; 2035 + if (unlikely(!entry->func)) 2036 + goto out_invalid; 2037 + 2035 2038 if (unlikely(!entry->user_allow && !sw_context->kernel)) 2036 2039 goto out_privileged; 2037 2040 ··· 2472 2469 if (dev_priv->has_mob) { 2473 2470 ret = vmw_rebind_contexts(sw_context); 2474 2471 if (unlikely(ret != 0)) 2475 - goto out_err; 2472 + goto out_unlock_binding; 2476 2473 } 2477 2474 2478 2475 cmd = vmw_fifo_reserve(dev_priv, command_size);
+3
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
··· 102 102 vmw_fp->gb_aware = true; 103 103 param->value = dev_priv->max_mob_pages * PAGE_SIZE; 104 104 break; 105 + case DRM_VMW_PARAM_MAX_MOB_SIZE: 106 + param->value = dev_priv->max_mob_size; 107 + break; 105 108 default: 106 109 DRM_ERROR("Illegal vmwgfx get param request: %d\n", 107 110 param->param);
+19 -16
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
··· 188 188 189 189 bo = otable->page_table->pt_bo; 190 190 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 191 - if (unlikely(cmd == NULL)) 192 - DRM_ERROR("Failed reserving FIFO space for OTable setup.\n"); 193 - 194 - memset(cmd, 0, sizeof(*cmd)); 195 - cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE; 196 - cmd->header.size = sizeof(cmd->body); 197 - cmd->body.type = type; 198 - cmd->body.baseAddress = 0; 199 - cmd->body.sizeInBytes = 0; 200 - cmd->body.validSizeInBytes = 0; 201 - cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID; 202 - vmw_fifo_commit(dev_priv, sizeof(*cmd)); 191 + if (unlikely(cmd == NULL)) { 192 + DRM_ERROR("Failed reserving FIFO space for OTable " 193 + "takedown.\n"); 194 + } else { 195 + memset(cmd, 0, sizeof(*cmd)); 196 + cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE; 197 + cmd->header.size = sizeof(cmd->body); 198 + cmd->body.type = type; 199 + cmd->body.baseAddress = 0; 200 + cmd->body.sizeInBytes = 0; 201 + cmd->body.validSizeInBytes = 0; 202 + cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID; 203 + vmw_fifo_commit(dev_priv, sizeof(*cmd)); 204 + } 203 205 204 206 if (bo) { 205 207 int ret; ··· 564 562 if (unlikely(cmd == NULL)) { 565 563 DRM_ERROR("Failed reserving FIFO space for Memory " 566 564 "Object unbinding.\n"); 565 + } else { 566 + cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB; 567 + cmd->header.size = sizeof(cmd->body); 568 + cmd->body.mobid = mob->id; 569 + vmw_fifo_commit(dev_priv, sizeof(*cmd)); 567 570 } 568 - cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB; 569 - cmd->header.size = sizeof(cmd->body); 570 - cmd->body.mobid = mob->id; 571 - vmw_fifo_commit(dev_priv, sizeof(*cmd)); 572 571 if (bo) { 573 572 vmw_fence_single_bo(bo, NULL); 574 573 ttm_bo_unreserve(bo);
+1 -2
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
··· 427 427 INIT_LIST_HEAD(&vmw_bo->res_list); 428 428 429 429 ret = ttm_bo_init(bdev, &vmw_bo->base, size, 430 - (user) ? ttm_bo_type_device : 431 - ttm_bo_type_kernel, placement, 430 + ttm_bo_type_device, placement, 432 431 0, interruptible, 433 432 NULL, acc_size, NULL, bo_free); 434 433 return ret;
+9 -7
drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
··· 371 371 TTM_REF_USAGE); 372 372 } 373 373 374 - int vmw_shader_alloc(struct vmw_private *dev_priv, 375 - struct vmw_dma_buffer *buffer, 376 - size_t shader_size, 377 - size_t offset, 378 - SVGA3dShaderType shader_type, 379 - struct ttm_object_file *tfile, 380 - u32 *handle) 374 + static int vmw_shader_alloc(struct vmw_private *dev_priv, 375 + struct vmw_dma_buffer *buffer, 376 + size_t shader_size, 377 + size_t offset, 378 + SVGA3dShaderType shader_type, 379 + struct ttm_object_file *tfile, 380 + u32 *handle) 381 381 { 382 382 struct vmw_user_shader *ushader; 383 383 struct vmw_resource *res, *tmp; ··· 779 779 int ret; 780 780 781 781 man = kzalloc(sizeof(*man), GFP_KERNEL); 782 + if (man == NULL) 783 + return ERR_PTR(-ENOMEM); 782 784 783 785 man->dev_priv = dev_priv; 784 786 INIT_LIST_HEAD(&man->list);
+1 -1
drivers/gpu/host1x/job.c
··· 538 538 539 539 g->base = job->gather_addr_phys[i]; 540 540 541 - for (j = 0; j < job->num_gathers; j++) 541 + for (j = i + 1; j < job->num_gathers; j++) 542 542 if (job->gathers[j].bo == g->bo) 543 543 job->gathers[j].handled = true; 544 544
+3
drivers/hid/hid-apple.c
··· 469 469 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, 470 470 USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI), 471 471 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, 472 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, 473 + USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS), 474 + .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, 472 475 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS), 473 476 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, 474 477 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
+3
drivers/hid/hid-core.c
··· 1679 1679 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, 1680 1680 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) }, 1681 1681 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) }, 1682 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) }, 1682 1683 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, 1683 1684 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, 1684 1685 { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) }, ··· 1780 1779 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) }, 1781 1780 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) }, 1782 1781 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) }, 1782 + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2) }, 1783 + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2) }, 1783 1784 { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, 1784 1785 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) }, 1785 1786 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) },
+8 -3
drivers/hid/hid-hyperv.c
··· 157 157 u32 report_desc_size; 158 158 struct hv_input_dev_info hid_dev_info; 159 159 struct hid_device *hid_device; 160 + u8 input_buf[HID_MAX_BUFFER_SIZE]; 160 161 }; 161 162 162 163 ··· 257 256 struct synthhid_msg *hid_msg; 258 257 struct mousevsc_dev *input_dev = hv_get_drvdata(device); 259 258 struct synthhid_input_report *input_report; 259 + size_t len; 260 260 261 261 pipe_msg = (struct pipe_prt_msg *)((unsigned long)packet + 262 262 (packet->offset8 << 3)); ··· 302 300 (struct synthhid_input_report *)pipe_msg->data; 303 301 if (!input_dev->init_complete) 304 302 break; 305 - hid_input_report(input_dev->hid_device, 306 - HID_INPUT_REPORT, input_report->buffer, 307 - input_report->header.size, 1); 303 + 304 + len = min(input_report->header.size, 305 + (u32)sizeof(input_dev->input_buf)); 306 + memcpy(input_dev->input_buf, input_report->buffer, len); 307 + hid_input_report(input_dev->hid_device, HID_INPUT_REPORT, 308 + input_dev->input_buf, len, 1); 308 309 break; 309 310 default: 310 311 pr_err("unsupported hid msg type - type %d len %d",
+8
drivers/hid/hid-ids.h
··· 135 135 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b 136 136 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI 0x0255 137 137 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256 138 + #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS 0x0257 138 139 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290 139 140 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291 140 141 #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292 ··· 241 240 242 241 #define USB_VENDOR_ID_CYGNAL 0x10c4 243 242 #define USB_DEVICE_ID_CYGNAL_RADIO_SI470X 0x818a 243 + #define USB_DEVICE_ID_FOCALTECH_FTXXXX_MULTITOUCH 0x81b9 244 244 245 245 #define USB_DEVICE_ID_CYGNAL_RADIO_SI4713 0x8244 246 246 ··· 453 451 #define USB_VENDOR_ID_INTEL_1 0x8087 454 452 #define USB_DEVICE_ID_INTEL_HID_SENSOR 0x09fa 455 453 454 + #define USB_VENDOR_ID_STM_0 0x0483 455 + #define USB_DEVICE_ID_STM_HID_SENSOR 0x91d1 456 + 456 457 #define USB_VENDOR_ID_ION 0x15e4 457 458 #define USB_DEVICE_ID_ICADE 0x0132 458 459 ··· 624 619 #define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713 625 620 #define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730 626 621 #define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c 622 + #define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7 623 + #define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9 627 624 628 625 #define USB_VENDOR_ID_MOJO 0x8282 629 626 #define USB_DEVICE_ID_RETRO_ADAPTER 0x3201 ··· 651 644 652 645 #define USB_VENDOR_ID_NEXIO 0x1870 653 646 #define USB_DEVICE_ID_NEXIO_MULTITOUCH_420 0x010d 647 + #define USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750 0x0110 654 648 655 649 #define USB_VENDOR_ID_NEXTWINDOW 0x1926 656 650 #define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003
+1 -1
drivers/hid/hid-input.c
··· 1178 1178 1179 1179 /* fall back to generic raw-output-report */ 1180 1180 len = ((report->size - 1) >> 3) + 1 + (report->id > 0); 1181 - buf = kmalloc(len, GFP_KERNEL); 1181 + buf = hid_alloc_report_buf(report, GFP_KERNEL); 1182 1182 if (!buf) 1183 1183 return; 1184 1184
+4
drivers/hid/hid-microsoft.c
··· 208 208 .driver_data = MS_NOGET }, 209 209 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500), 210 210 .driver_data = MS_DUPLICATE_USAGES }, 211 + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2), 212 + .driver_data = 0 }, 213 + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2), 214 + .driver_data = 0 }, 211 215 212 216 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT), 213 217 .driver_data = MS_PRESENTER },
+5
drivers/hid/hid-multitouch.c
··· 1166 1166 MT_USB_DEVICE(USB_VENDOR_ID_FLATFROG, 1167 1167 USB_DEVICE_ID_MULTITOUCH_3200) }, 1168 1168 1169 + /* FocalTech Panels */ 1170 + { .driver_data = MT_CLS_SERIAL, 1171 + MT_USB_DEVICE(USB_VENDOR_ID_CYGNAL, 1172 + USB_DEVICE_ID_FOCALTECH_FTXXXX_MULTITOUCH) }, 1173 + 1169 1174 /* GeneralTouch panel */ 1170 1175 { .driver_data = MT_CLS_GENERALTOUCH_TWOFINGERS, 1171 1176 MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+3
drivers/hid/hid-sensor-hub.c
··· 665 665 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_INTEL_1, 666 666 USB_DEVICE_ID_INTEL_HID_SENSOR), 667 667 .driver_data = HID_SENSOR_HUB_ENUM_QUIRK}, 668 + { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_STM_0, 669 + USB_DEVICE_ID_STM_HID_SENSOR), 670 + .driver_data = HID_SENSOR_HUB_ENUM_QUIRK}, 668 671 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, HID_ANY_ID, 669 672 HID_ANY_ID) }, 670 673 { }
+1 -1
drivers/hid/i2c-hid/i2c-hid.c
··· 582 582 int ret; 583 583 int len = i2c_hid_get_report_length(rep) - 2; 584 584 585 - buf = kzalloc(len, GFP_KERNEL); 585 + buf = hid_alloc_report_buf(rep, GFP_KERNEL); 586 586 if (!buf) 587 587 return; 588 588
+1
drivers/hid/usbhid/hid-quirks.c
··· 74 74 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, 75 75 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, 76 76 { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS }, 77 + { USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS }, 77 78 { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS }, 78 79 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS }, 79 80 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
+1 -1
drivers/hwmon/max1668.c
··· 243 243 data->temp_min[index] = clamp_val(temp/1000, -128, 127); 244 244 if (i2c_smbus_write_byte_data(client, 245 245 MAX1668_REG_LIML_WR(index), 246 - data->temp_max[index])) 246 + data->temp_min[index])) 247 247 count = -EIO; 248 248 mutex_unlock(&data->update_lock); 249 249
+1 -1
drivers/iio/gyro/Kconfig
··· 70 70 select IIO_TRIGGERED_BUFFER if (IIO_BUFFER) 71 71 help 72 72 Say yes here to build support for STMicroelectronics gyroscopes: 73 - L3G4200D, LSM330DL, L3GD20, L3GD20H, LSM330DLC, L3G4IS, LSM330. 73 + L3G4200D, LSM330DL, L3GD20, LSM330DLC, L3G4IS, LSM330. 74 74 75 75 This driver can also be built as a module. If so, these modules 76 76 will be created:
-1
drivers/iio/gyro/st_gyro.h
··· 19 19 #define LSM330DL_GYRO_DEV_NAME "lsm330dl_gyro" 20 20 #define LSM330DLC_GYRO_DEV_NAME "lsm330dlc_gyro" 21 21 #define L3GD20_GYRO_DEV_NAME "l3gd20" 22 - #define L3GD20H_GYRO_DEV_NAME "l3gd20h" 23 22 #define L3G4IS_GYRO_DEV_NAME "l3g4is_ui" 24 23 #define LSM330_GYRO_DEV_NAME "lsm330_gyro" 25 24
+4 -5
drivers/iio/gyro/st_gyro_core.c
··· 167 167 .wai = ST_GYRO_2_WAI_EXP, 168 168 .sensors_supported = { 169 169 [0] = L3GD20_GYRO_DEV_NAME, 170 - [1] = L3GD20H_GYRO_DEV_NAME, 171 - [2] = LSM330D_GYRO_DEV_NAME, 172 - [3] = LSM330DLC_GYRO_DEV_NAME, 173 - [4] = L3G4IS_GYRO_DEV_NAME, 174 - [5] = LSM330_GYRO_DEV_NAME, 170 + [1] = LSM330D_GYRO_DEV_NAME, 171 + [2] = LSM330DLC_GYRO_DEV_NAME, 172 + [3] = L3G4IS_GYRO_DEV_NAME, 173 + [4] = LSM330_GYRO_DEV_NAME, 175 174 }, 176 175 .ch = (struct iio_chan_spec *)st_gyro_16bit_channels, 177 176 .odr = {
-1
drivers/iio/gyro/st_gyro_i2c.c
··· 55 55 { LSM330DL_GYRO_DEV_NAME }, 56 56 { LSM330DLC_GYRO_DEV_NAME }, 57 57 { L3GD20_GYRO_DEV_NAME }, 58 - { L3GD20H_GYRO_DEV_NAME }, 59 58 { L3G4IS_GYRO_DEV_NAME }, 60 59 { LSM330_GYRO_DEV_NAME }, 61 60 {},
-1
drivers/iio/gyro/st_gyro_spi.c
··· 54 54 { LSM330DL_GYRO_DEV_NAME }, 55 55 { LSM330DLC_GYRO_DEV_NAME }, 56 56 { L3GD20_GYRO_DEV_NAME }, 57 - { L3GD20H_GYRO_DEV_NAME }, 58 57 { L3G4IS_GYRO_DEV_NAME }, 59 58 { LSM330_GYRO_DEV_NAME }, 60 59 {},
+8 -8
drivers/iio/light/cm32181.c
··· 103 103 /** 104 104 * cm32181_read_als_it() - Get sensor integration time (ms) 105 105 * @cm32181: pointer of struct cm32181 106 - * @val: pointer of int to load the als_it value. 106 + * @val2: pointer of int to load the als_it value. 107 107 * 108 108 * Report the current integartion time by millisecond. 109 109 * 110 - * Return: IIO_VAL_INT for success, otherwise -EINVAL. 110 + * Return: IIO_VAL_INT_PLUS_MICRO for success, otherwise -EINVAL. 111 111 */ 112 - static int cm32181_read_als_it(struct cm32181_chip *cm32181, int *val) 112 + static int cm32181_read_als_it(struct cm32181_chip *cm32181, int *val2) 113 113 { 114 114 u16 als_it; 115 115 int i; ··· 119 119 als_it >>= CM32181_CMD_ALS_IT_SHIFT; 120 120 for (i = 0; i < ARRAY_SIZE(als_it_bits); i++) { 121 121 if (als_it == als_it_bits[i]) { 122 - *val = als_it_value[i]; 123 - return IIO_VAL_INT; 122 + *val2 = als_it_value[i]; 123 + return IIO_VAL_INT_PLUS_MICRO; 124 124 } 125 125 } 126 126 ··· 221 221 *val = cm32181->calibscale; 222 222 return IIO_VAL_INT; 223 223 case IIO_CHAN_INFO_INT_TIME: 224 - ret = cm32181_read_als_it(cm32181, val); 224 + ret = cm32181_read_als_it(cm32181, val2); 225 225 return ret; 226 226 } 227 227 ··· 240 240 cm32181->calibscale = val; 241 241 return val; 242 242 case IIO_CHAN_INFO_INT_TIME: 243 - ret = cm32181_write_als_it(cm32181, val); 243 + ret = cm32181_write_als_it(cm32181, val2); 244 244 return ret; 245 245 } 246 246 ··· 264 264 265 265 n = ARRAY_SIZE(als_it_value); 266 266 for (i = 0, len = 0; i < n; i++) 267 - len += sprintf(buf + len, "%d ", als_it_value[i]); 267 + len += sprintf(buf + len, "0.%06u ", als_it_value[i]); 268 268 return len + sprintf(buf + len, "\n"); 269 269 } 270 270
+23 -22
drivers/iio/light/cm36651.c
··· 50 50 #define CM36651_CS_CONF2_DEFAULT_BIT 0x08 51 51 52 52 /* CS_CONF3 channel integration time */ 53 - #define CM36651_CS_IT1 0x00 /* Integration time 80000 usec */ 54 - #define CM36651_CS_IT2 0x40 /* Integration time 160000 usec */ 55 - #define CM36651_CS_IT3 0x80 /* Integration time 320000 usec */ 56 - #define CM36651_CS_IT4 0xC0 /* Integration time 640000 usec */ 53 + #define CM36651_CS_IT1 0x00 /* Integration time 80 msec */ 54 + #define CM36651_CS_IT2 0x40 /* Integration time 160 msec */ 55 + #define CM36651_CS_IT3 0x80 /* Integration time 320 msec */ 56 + #define CM36651_CS_IT4 0xC0 /* Integration time 640 msec */ 57 57 58 58 /* PS_CONF1 command code */ 59 59 #define CM36651_PS_ENABLE 0x00 ··· 64 64 #define CM36651_PS_PERS4 0x0C 65 65 66 66 /* PS_CONF1 command code: integration time */ 67 - #define CM36651_PS_IT1 0x00 /* Integration time 320 usec */ 68 - #define CM36651_PS_IT2 0x10 /* Integration time 420 usec */ 69 - #define CM36651_PS_IT3 0x20 /* Integration time 520 usec */ 70 - #define CM36651_PS_IT4 0x30 /* Integration time 640 usec */ 67 + #define CM36651_PS_IT1 0x00 /* Integration time 0.32 msec */ 68 + #define CM36651_PS_IT2 0x10 /* Integration time 0.42 msec */ 69 + #define CM36651_PS_IT3 0x20 /* Integration time 0.52 msec */ 70 + #define CM36651_PS_IT4 0x30 /* Integration time 0.64 msec */ 71 71 72 72 /* PS_CONF1 command code: duty ratio */ 73 73 #define CM36651_PS_DR1 0x00 /* Duty ratio 1/80 */ ··· 93 93 #define CM36651_CLOSE_PROXIMITY 0x32 94 94 #define CM36651_FAR_PROXIMITY 0x33 95 95 96 - #define CM36651_CS_INT_TIME_AVAIL "80000 160000 320000 640000" 97 - #define CM36651_PS_INT_TIME_AVAIL "320 420 520 640" 96 + #define CM36651_CS_INT_TIME_AVAIL "0.08 0.16 0.32 0.64" 97 + #define CM36651_PS_INT_TIME_AVAIL "0.000320 0.000420 0.000520 0.000640" 98 98 99 99 enum cm36651_operation_mode { 100 100 CM36651_LIGHT_EN, ··· 356 356 } 357 357 358 358 static int cm36651_read_int_time(struct cm36651_data *cm36651, 359 - struct iio_chan_spec const *chan, int *val) 359 + struct iio_chan_spec const *chan, int *val2) 360 360 { 361 361 switch (chan->type) { 362 362 case IIO_LIGHT: 363 363 if (cm36651->cs_int_time[chan->address] == CM36651_CS_IT1) 364 - *val = 80000; 364 + *val2 = 80000; 365 365 else if (cm36651->cs_int_time[chan->address] == CM36651_CS_IT2) 366 - *val = 160000; 366 + *val2 = 160000; 367 367 else if (cm36651->cs_int_time[chan->address] == CM36651_CS_IT3) 368 - *val = 320000; 368 + *val2 = 320000; 369 369 else if (cm36651->cs_int_time[chan->address] == CM36651_CS_IT4) 370 - *val = 640000; 370 + *val2 = 640000; 371 371 else 372 372 return -EINVAL; 373 373 break; 374 374 case IIO_PROXIMITY: 375 375 if (cm36651->ps_int_time == CM36651_PS_IT1) 376 - *val = 320; 376 + *val2 = 320; 377 377 else if (cm36651->ps_int_time == CM36651_PS_IT2) 378 - *val = 420; 378 + *val2 = 420; 379 379 else if (cm36651->ps_int_time == CM36651_PS_IT3) 380 - *val = 520; 380 + *val2 = 520; 381 381 else if (cm36651->ps_int_time == CM36651_PS_IT4) 382 - *val = 640; 382 + *val2 = 640; 383 383 else 384 384 return -EINVAL; 385 385 break; ··· 387 387 return -EINVAL; 388 388 } 389 389 390 - return IIO_VAL_INT; 390 + return IIO_VAL_INT_PLUS_MICRO; 391 391 } 392 392 393 393 static int cm36651_write_int_time(struct cm36651_data *cm36651, ··· 459 459 ret = cm36651_read_channel(cm36651, chan, val); 460 460 break; 461 461 case IIO_CHAN_INFO_INT_TIME: 462 - ret = cm36651_read_int_time(cm36651, chan, val); 462 + *val = 0; 463 + ret = cm36651_read_int_time(cm36651, chan, val2); 463 464 break; 464 465 default: 465 466 ret = -EINVAL; ··· 480 479 int ret = -EINVAL; 481 480 482 481 if (mask == IIO_CHAN_INFO_INT_TIME) { 483 - ret = cm36651_write_int_time(cm36651, chan, val); 482 + ret = cm36651_write_int_time(cm36651, chan, val2); 484 483 if (ret < 0) 485 484 dev_err(&client->dev, "Integration time write failed\n"); 486 485 }
+2 -2
drivers/infiniband/hw/mlx4/main.c
··· 53 53 #include "user.h" 54 54 55 55 #define DRV_NAME MLX4_IB_DRV_NAME 56 - #define DRV_VERSION "1.0" 57 - #define DRV_RELDATE "April 4, 2008" 56 + #define DRV_VERSION "2.2-1" 57 + #define DRV_RELDATE "Feb 2014" 58 58 59 59 #define MLX4_IB_FLOW_MAX_PRIO 0xFFF 60 60 #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
+2 -2
drivers/infiniband/hw/mlx5/main.c
··· 46 46 #include "mlx5_ib.h" 47 47 48 48 #define DRIVER_NAME "mlx5_ib" 49 - #define DRIVER_VERSION "1.0" 50 - #define DRIVER_RELDATE "June 2013" 49 + #define DRIVER_VERSION "2.2-1" 50 + #define DRIVER_RELDATE "Feb 2014" 51 51 52 52 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); 53 53 MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
+109 -73
drivers/infiniband/ulp/isert/ib_isert.c
··· 492 492 isert_conn->state = ISER_CONN_INIT; 493 493 INIT_LIST_HEAD(&isert_conn->conn_accept_node); 494 494 init_completion(&isert_conn->conn_login_comp); 495 - init_waitqueue_head(&isert_conn->conn_wait); 496 - init_waitqueue_head(&isert_conn->conn_wait_comp_err); 495 + init_completion(&isert_conn->conn_wait); 496 + init_completion(&isert_conn->conn_wait_comp_err); 497 497 kref_init(&isert_conn->conn_kref); 498 498 kref_get(&isert_conn->conn_kref); 499 499 mutex_init(&isert_conn->conn_mutex); 500 - mutex_init(&isert_conn->conn_comp_mutex); 501 500 spin_lock_init(&isert_conn->conn_lock); 502 501 503 502 cma_id->context = isert_conn; ··· 687 688 688 689 pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); 689 690 mutex_lock(&isert_conn->conn_mutex); 690 - isert_conn->state = ISER_CONN_DOWN; 691 + if (isert_conn->state == ISER_CONN_UP) 692 + isert_conn->state = ISER_CONN_TERMINATING; 691 693 692 694 if (isert_conn->post_recv_buf_count == 0 && 693 695 atomic_read(&isert_conn->post_send_buf_count) == 0) { 694 - pr_debug("Calling wake_up(&isert_conn->conn_wait);\n"); 695 696 mutex_unlock(&isert_conn->conn_mutex); 696 697 goto wake_up; 697 698 } ··· 711 712 mutex_unlock(&isert_conn->conn_mutex); 712 713 713 714 wake_up: 714 - wake_up(&isert_conn->conn_wait); 715 + complete(&isert_conn->conn_wait); 715 716 isert_put_conn(isert_conn); 716 717 } 717 718 ··· 887 888 * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED 888 889 * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls. 889 890 */ 890 - mutex_lock(&isert_conn->conn_comp_mutex); 891 - if (coalesce && 891 + mutex_lock(&isert_conn->conn_mutex); 892 + if (coalesce && isert_conn->state == ISER_CONN_UP && 892 893 ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) { 894 + tx_desc->llnode_active = true; 893 895 llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist); 894 - mutex_unlock(&isert_conn->conn_comp_mutex); 896 + mutex_unlock(&isert_conn->conn_mutex); 895 897 return; 896 898 } 897 899 isert_conn->conn_comp_batch = 0; 898 900 tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist); 899 - mutex_unlock(&isert_conn->conn_comp_mutex); 901 + mutex_unlock(&isert_conn->conn_mutex); 900 902 901 903 send_wr->send_flags = IB_SEND_SIGNALED; 902 904 } ··· 1464 1464 case ISCSI_OP_SCSI_CMD: 1465 1465 spin_lock_bh(&conn->cmd_lock); 1466 1466 if (!list_empty(&cmd->i_conn_node)) 1467 - list_del(&cmd->i_conn_node); 1467 + list_del_init(&cmd->i_conn_node); 1468 1468 spin_unlock_bh(&conn->cmd_lock); 1469 1469 1470 1470 if (cmd->data_direction == DMA_TO_DEVICE) ··· 1476 1476 case ISCSI_OP_SCSI_TMFUNC: 1477 1477 spin_lock_bh(&conn->cmd_lock); 1478 1478 if (!list_empty(&cmd->i_conn_node)) 1479 - list_del(&cmd->i_conn_node); 1479 + list_del_init(&cmd->i_conn_node); 1480 1480 spin_unlock_bh(&conn->cmd_lock); 1481 1481 1482 1482 transport_generic_free_cmd(&cmd->se_cmd, 0); ··· 1486 1486 case ISCSI_OP_TEXT: 1487 1487 spin_lock_bh(&conn->cmd_lock); 1488 1488 if (!list_empty(&cmd->i_conn_node)) 1489 - list_del(&cmd->i_conn_node); 1489 + list_del_init(&cmd->i_conn_node); 1490 1490 spin_unlock_bh(&conn->cmd_lock); 1491 1491 1492 1492 /* ··· 1549 1549 iscsit_stop_dataout_timer(cmd); 1550 1550 device->unreg_rdma_mem(isert_cmd, isert_conn); 1551 1551 cmd->write_data_done = wr->cur_rdma_length; 1552 + wr->send_wr_num = 0; 1552 1553 1553 1554 pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); 1554 1555 spin_lock_bh(&cmd->istate_lock); ··· 1590 1589 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n"); 1591 1590 /* 1592 1591 * Call atomic_dec(&isert_conn->post_send_buf_count) 1593 - * from isert_free_conn() 1592 + * from isert_wait_conn() 1594 1593 */ 1595 1594 isert_conn->logout_posted = true; 1596 1595 iscsit_logout_post_handler(cmd, cmd->conn); ··· 1614 1613 struct ib_device *ib_dev) 1615 1614 { 1616 1615 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1616 + struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1617 1617 1618 1618 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP || 1619 1619 cmd->i_state == ISTATE_SEND_LOGOUTRSP || ··· 1626 1624 queue_work(isert_comp_wq, &isert_cmd->comp_work); 1627 1625 return; 1628 1626 } 1629 - atomic_dec(&isert_conn->post_send_buf_count); 1627 + atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); 1630 1628 1631 1629 cmd->i_state = ISTATE_SENT_STATUS; 1632 1630 isert_completion_put(tx_desc, isert_cmd, ib_dev); ··· 1664 1662 case ISER_IB_RDMA_READ: 1665 1663 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n"); 1666 1664 1667 - atomic_dec(&isert_conn->post_send_buf_count); 1665 + atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); 1668 1666 isert_completion_rdma_read(tx_desc, isert_cmd); 1669 1667 break; 1670 1668 default: ··· 1693 1691 } 1694 1692 1695 1693 static void 1696 - isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn) 1694 + isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_dev) 1695 + { 1696 + struct llist_node *llnode; 1697 + struct isert_rdma_wr *wr; 1698 + struct iser_tx_desc *t; 1699 + 1700 + mutex_lock(&isert_conn->conn_mutex); 1701 + llnode = llist_del_all(&isert_conn->conn_comp_llist); 1702 + isert_conn->conn_comp_batch = 0; 1703 + mutex_unlock(&isert_conn->conn_mutex); 1704 + 1705 + while (llnode) { 1706 + t = llist_entry(llnode, struct iser_tx_desc, comp_llnode); 1707 + llnode = llist_next(llnode); 1708 + wr = &t->isert_cmd->rdma_wr; 1709 + 1710 + atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); 1711 + isert_completion_put(t, t->isert_cmd, ib_dev); 1712 + } 1713 + } 1714 + 1715 + static void 1716 + isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn) 1697 1717 { 1698 1718 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1719 + struct isert_cmd *isert_cmd = tx_desc->isert_cmd; 1720 + struct llist_node *llnode = tx_desc->comp_llnode_batch; 1721 + struct isert_rdma_wr *wr; 1722 + struct iser_tx_desc *t; 1699 1723 1700 - if (tx_desc) { 1701 - struct isert_cmd *isert_cmd = tx_desc->isert_cmd; 1724 + while (llnode) { 1725 + t = llist_entry(llnode, struct iser_tx_desc, comp_llnode); 1726 + llnode = llist_next(llnode); 1727 + wr = &t->isert_cmd->rdma_wr; 1702 1728 1703 - if (!isert_cmd) 1704 - isert_unmap_tx_desc(tx_desc, ib_dev); 1705 - else 1706 - isert_completion_put(tx_desc, isert_cmd, ib_dev); 1729 + atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); 1730 + isert_completion_put(t, t->isert_cmd, ib_dev); 1731 + } 1732 + tx_desc->comp_llnode_batch = NULL; 1733 + 1734 + if (!isert_cmd) 1735 + isert_unmap_tx_desc(tx_desc, ib_dev); 1736 + else 1737 + isert_completion_put(tx_desc, isert_cmd, ib_dev); 1738 + } 1739 + 1740 + static void 1741 + isert_cq_rx_comp_err(struct isert_conn *isert_conn) 1742 + { 1743 + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1744 + struct iscsi_conn *conn = isert_conn->conn; 1745 + 1746 + if (isert_conn->post_recv_buf_count) 1747 + return; 1748 + 1749 + isert_cq_drain_comp_llist(isert_conn, ib_dev); 1750 + 1751 + if (conn->sess) { 1752 + target_sess_cmd_list_set_waiting(conn->sess->se_sess); 1753 + target_wait_for_sess_cmds(conn->sess->se_sess); 1707 1754 } 1708 1755 1709 - if (isert_conn->post_recv_buf_count == 0 && 1710 - atomic_read(&isert_conn->post_send_buf_count) == 0) { 1711 - pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); 1712 - pr_debug("Calling wake_up from isert_cq_comp_err\n"); 1756 + while (atomic_read(&isert_conn->post_send_buf_count)) 1757 + msleep(3000); 1713 1758 1714 - mutex_lock(&isert_conn->conn_mutex); 1715 - if (isert_conn->state != ISER_CONN_DOWN) 1716 - isert_conn->state = ISER_CONN_TERMINATING; 1717 - mutex_unlock(&isert_conn->conn_mutex); 1759 + mutex_lock(&isert_conn->conn_mutex); 1760 + isert_conn->state = ISER_CONN_DOWN; 1761 + mutex_unlock(&isert_conn->conn_mutex); 1718 1762 1719 - wake_up(&isert_conn->conn_wait_comp_err); 1720 - } 1763 + complete(&isert_conn->conn_wait_comp_err); 1721 1764 } 1722 1765 1723 1766 static void ··· 1787 1740 pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n"); 1788 1741 pr_debug("TX wc.status: 0x%08x\n", wc.status); 1789 1742 pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err); 1790 - atomic_dec(&isert_conn->post_send_buf_count); 1791 - isert_cq_comp_err(tx_desc, isert_conn); 1743 + 1744 + if (wc.wr_id != ISER_FASTREG_LI_WRID) { 1745 + if (tx_desc->llnode_active) 1746 + continue; 1747 + 1748 + atomic_dec(&isert_conn->post_send_buf_count); 1749 + isert_cq_tx_comp_err(tx_desc, isert_conn); 1750 + } 1792 1751 } 1793 1752 } 1794 1753 ··· 1837 1784 wc.vendor_err); 1838 1785 } 1839 1786 isert_conn->post_recv_buf_count--; 1840 - isert_cq_comp_err(NULL, isert_conn); 1787 + isert_cq_rx_comp_err(isert_conn); 1841 1788 } 1842 1789 } 1843 1790 ··· 2255 2202 2256 2203 if (!fr_desc->valid) { 2257 2204 memset(&inv_wr, 0, sizeof(inv_wr)); 2205 + inv_wr.wr_id = ISER_FASTREG_LI_WRID; 2258 2206 inv_wr.opcode = IB_WR_LOCAL_INV; 2259 2207 inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey; 2260 2208 wr = &inv_wr; ··· 2266 2212 2267 2213 /* Prepare FASTREG WR */ 2268 2214 memset(&fr_wr, 0, sizeof(fr_wr)); 2215 + fr_wr.wr_id = ISER_FASTREG_LI_WRID; 2269 2216 fr_wr.opcode = IB_WR_FAST_REG_MR; 2270 2217 fr_wr.wr.fast_reg.iova_start = 2271 2218 fr_desc->data_frpl->page_list[0] + page_off; ··· 2432 2377 isert_init_send_wr(isert_conn, isert_cmd, 2433 2378 &isert_cmd->tx_desc.send_wr, true); 2434 2379 2435 - atomic_inc(&isert_conn->post_send_buf_count); 2380 + atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); 2436 2381 2437 2382 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); 2438 2383 if (rc) { 2439 2384 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); 2440 - atomic_dec(&isert_conn->post_send_buf_count); 2385 + atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); 2441 2386 } 2442 2387 pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n", 2443 2388 isert_cmd); ··· 2465 2410 return rc; 2466 2411 } 2467 2412 2468 - atomic_inc(&isert_conn->post_send_buf_count); 2413 + atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count); 2469 2414 2470 2415 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); 2471 2416 if (rc) { 2472 2417 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); 2473 - atomic_dec(&isert_conn->post_send_buf_count); 2418 + atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); 2474 2419 } 2475 2420 pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", 2476 2421 isert_cmd); ··· 2757 2702 kfree(isert_np); 2758 2703 } 2759 2704 2760 - static int isert_check_state(struct isert_conn *isert_conn, int state) 2761 - { 2762 - int ret; 2763 - 2764 - mutex_lock(&isert_conn->conn_mutex); 2765 - ret = (isert_conn->state == state); 2766 - mutex_unlock(&isert_conn->conn_mutex); 2767 - 2768 - return ret; 2769 - } 2770 - 2771 - static void isert_free_conn(struct iscsi_conn *conn) 2705 + static void isert_wait_conn(struct iscsi_conn *conn) 2772 2706 { 2773 2707 struct isert_conn *isert_conn = conn->context; 2774 2708 2775 - pr_debug("isert_free_conn: Starting \n"); 2709 + pr_debug("isert_wait_conn: Starting \n"); 2776 2710 /* 2777 2711 * Decrement post_send_buf_count for special case when called 2778 2712 * from isert_do_control_comp() -> iscsit_logout_post_handler() ··· 2771 2727 atomic_dec(&isert_conn->post_send_buf_count); 2772 2728 2773 2729 if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) { 2774 - pr_debug("Calling rdma_disconnect from isert_free_conn\n"); 2730 + pr_debug("Calling rdma_disconnect from isert_wait_conn\n"); 2775 2731 rdma_disconnect(isert_conn->conn_cm_id); 2776 2732 } 2777 2733 /* 2778 2734 * Only wait for conn_wait_comp_err if the isert_conn made it 2779 2735 * into full feature phase.. 2780 2736 */ 2781 - if (isert_conn->state == ISER_CONN_UP) { 2782 - pr_debug("isert_free_conn: Before wait_event comp_err %d\n", 2783 - isert_conn->state); 2784 - mutex_unlock(&isert_conn->conn_mutex); 2785 - 2786 - wait_event(isert_conn->conn_wait_comp_err, 2787 - (isert_check_state(isert_conn, ISER_CONN_TERMINATING))); 2788 - 2789 - wait_event(isert_conn->conn_wait, 2790 - (isert_check_state(isert_conn, ISER_CONN_DOWN))); 2791 - 2792 - isert_put_conn(isert_conn); 2793 - return; 2794 - } 2795 2737 if (isert_conn->state == ISER_CONN_INIT) { 2796 2738 mutex_unlock(&isert_conn->conn_mutex); 2797 - isert_put_conn(isert_conn); 2798 2739 return; 2799 2740 } 2800 - pr_debug("isert_free_conn: wait_event conn_wait %d\n", 2801 - isert_conn->state); 2741 + if (isert_conn->state == ISER_CONN_UP) 2742 + isert_conn->state = ISER_CONN_TERMINATING; 2802 2743 mutex_unlock(&isert_conn->conn_mutex); 2803 2744 2804 - wait_event(isert_conn->conn_wait, 2805 - (isert_check_state(isert_conn, ISER_CONN_DOWN))); 2745 + wait_for_completion(&isert_conn->conn_wait_comp_err); 2746 + 2747 + wait_for_completion(&isert_conn->conn_wait); 2748 + } 2749 + 2750 + static void isert_free_conn(struct iscsi_conn *conn) 2751 + { 2752 + struct isert_conn *isert_conn = conn->context; 2806 2753 2807 2754 isert_put_conn(isert_conn); 2808 2755 } ··· 2806 2771 .iscsit_setup_np = isert_setup_np, 2807 2772 .iscsit_accept_np = isert_accept_np, 2808 2773 .iscsit_free_np = isert_free_np, 2774 + .iscsit_wait_conn = isert_wait_conn, 2809 2775 .iscsit_free_conn = isert_free_conn, 2810 2776 .iscsit_get_login_rx = isert_get_login_rx, 2811 2777 .iscsit_put_login_tx = isert_put_login_tx,
+4 -3
drivers/infiniband/ulp/isert/ib_isert.h
··· 6 6 7 7 #define ISERT_RDMA_LISTEN_BACKLOG 10 8 8 #define ISCSI_ISER_SG_TABLESIZE 256 9 + #define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL 9 10 10 11 enum isert_desc_type { 11 12 ISCSI_TX_CONTROL, ··· 46 45 struct isert_cmd *isert_cmd; 47 46 struct llist_node *comp_llnode_batch; 48 47 struct llist_node comp_llnode; 48 + bool llnode_active; 49 49 struct ib_send_wr send_wr; 50 50 } __packed; 51 51 ··· 118 116 struct isert_device *conn_device; 119 117 struct work_struct conn_logout_work; 120 118 struct mutex conn_mutex; 121 - wait_queue_head_t conn_wait; 122 - wait_queue_head_t conn_wait_comp_err; 119 + struct completion conn_wait; 120 + struct completion conn_wait_comp_err; 123 121 struct kref conn_kref; 124 122 struct list_head conn_fr_pool; 125 123 int conn_fr_pool_size; ··· 128 126 #define ISERT_COMP_BATCH_COUNT 8 129 127 int conn_comp_batch; 130 128 struct llist_head conn_comp_llist; 131 - struct mutex conn_comp_mutex; 132 129 }; 133 130 134 131 #define ISERT_MAX_CQ 64
-19
drivers/input/misc/arizona-haptics.c
··· 37 37 struct arizona_haptics, 38 38 work); 39 39 struct arizona *arizona = haptics->arizona; 40 - struct mutex *dapm_mutex = &arizona->dapm->card->dapm_mutex; 41 40 int ret; 42 41 43 42 if (!haptics->arizona->dapm) { ··· 66 67 return; 67 68 } 68 69 69 - mutex_lock_nested(dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); 70 - 71 70 ret = snd_soc_dapm_enable_pin(arizona->dapm, "HAPTICS"); 72 71 if (ret != 0) { 73 72 dev_err(arizona->dev, "Failed to start HAPTICS: %d\n", 74 73 ret); 75 - mutex_unlock(dapm_mutex); 76 74 return; 77 75 } 78 76 ··· 77 81 if (ret != 0) { 78 82 dev_err(arizona->dev, "Failed to sync DAPM: %d\n", 79 83 ret); 80 - mutex_unlock(dapm_mutex); 81 84 return; 82 85 } 83 - 84 - mutex_unlock(dapm_mutex); 85 - 86 86 } else { 87 87 /* This disable sequence will be a noop if already enabled */ 88 - mutex_lock_nested(dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); 89 - 90 88 ret = snd_soc_dapm_disable_pin(arizona->dapm, "HAPTICS"); 91 89 if (ret != 0) { 92 90 dev_err(arizona->dev, "Failed to disable HAPTICS: %d\n", 93 91 ret); 94 - mutex_unlock(dapm_mutex); 95 92 return; 96 93 } 97 94 ··· 92 103 if (ret != 0) { 93 104 dev_err(arizona->dev, "Failed to sync DAPM: %d\n", 94 105 ret); 95 - mutex_unlock(dapm_mutex); 96 106 return; 97 107 } 98 - 99 - mutex_unlock(dapm_mutex); 100 108 101 109 ret = regmap_update_bits(arizona->regmap, 102 110 ARIZONA_HAPTICS_CONTROL_1, ··· 141 155 static void arizona_haptics_close(struct input_dev *input) 142 156 { 143 157 struct arizona_haptics *haptics = input_get_drvdata(input); 144 - struct mutex *dapm_mutex = &haptics->arizona->dapm->card->dapm_mutex; 145 158 146 159 cancel_work_sync(&haptics->work); 147 160 148 - mutex_lock_nested(dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); 149 - 150 161 if (haptics->arizona->dapm) 151 162 snd_soc_dapm_disable_pin(haptics->arizona->dapm, "HAPTICS"); 152 - 153 - mutex_unlock(dapm_mutex); 154 163 } 155 164 156 165 static int arizona_haptics_probe(struct platform_device *pdev)
+63 -42
drivers/iommu/arm-smmu.c
··· 79 79 80 80 #define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES) 81 81 #define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1)) 82 - #define ARM_SMMU_PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(pte_t)) 83 82 84 83 /* Stage-1 PTE */ 85 84 #define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6) ··· 190 191 #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2)) 191 192 #define CBAR_VMID_SHIFT 0 192 193 #define CBAR_VMID_MASK 0xff 194 + #define CBAR_S1_BPSHCFG_SHIFT 8 195 + #define CBAR_S1_BPSHCFG_MASK 3 196 + #define CBAR_S1_BPSHCFG_NSH 3 193 197 #define CBAR_S1_MEMATTR_SHIFT 12 194 198 #define CBAR_S1_MEMATTR_MASK 0xf 195 199 #define CBAR_S1_MEMATTR_WB 0xf ··· 395 393 struct arm_smmu_cfg root_cfg; 396 394 phys_addr_t output_mask; 397 395 398 - struct mutex lock; 396 + spinlock_t lock; 399 397 }; 400 398 401 399 static DEFINE_SPINLOCK(arm_smmu_devices_lock); ··· 634 632 return IRQ_HANDLED; 635 633 } 636 634 635 + static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr, 636 + size_t size) 637 + { 638 + unsigned long offset = (unsigned long)addr & ~PAGE_MASK; 639 + 640 + 641 + /* Ensure new page tables are visible to the hardware walker */ 642 + if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) { 643 + dsb(); 644 + } else { 645 + /* 646 + * If the SMMU can't walk tables in the CPU caches, treat them 647 + * like non-coherent DMA since we need to flush the new entries 648 + * all the way out to memory. There's no possibility of 649 + * recursion here as the SMMU table walker will not be wired 650 + * through another SMMU. 651 + */ 652 + dma_map_page(smmu->dev, virt_to_page(addr), offset, size, 653 + DMA_TO_DEVICE); 654 + } 655 + } 656 + 637 657 static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) 638 658 { 639 659 u32 reg; ··· 674 650 if (smmu->version == 1) 675 651 reg |= root_cfg->irptndx << CBAR_IRPTNDX_SHIFT; 676 652 677 - /* Use the weakest memory type, so it is overridden by the pte */ 678 - if (stage1) 679 - reg |= (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT); 680 - else 653 + /* 654 + * Use the weakest shareability/memory types, so they are 655 + * overridden by the ttbcr/pte. 656 + */ 657 + if (stage1) { 658 + reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) | 659 + (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT); 660 + } else { 681 661 reg |= ARM_SMMU_CB_VMID(root_cfg) << CBAR_VMID_SHIFT; 662 + } 682 663 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(root_cfg->cbndx)); 683 664 684 665 if (smmu->version > 1) { ··· 744 715 } 745 716 746 717 /* TTBR0 */ 718 + arm_smmu_flush_pgtable(smmu, root_cfg->pgd, 719 + PTRS_PER_PGD * sizeof(pgd_t)); 747 720 reg = __pa(root_cfg->pgd); 748 721 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); 749 722 reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32; ··· 932 901 goto out_free_domain; 933 902 smmu_domain->root_cfg.pgd = pgd; 934 903 935 - mutex_init(&smmu_domain->lock); 904 + spin_lock_init(&smmu_domain->lock); 936 905 domain->priv = smmu_domain; 937 906 return 0; 938 907 ··· 1159 1128 struct arm_smmu_domain *smmu_domain = domain->priv; 1160 1129 struct arm_smmu_device *device_smmu = dev->archdata.iommu; 1161 1130 struct arm_smmu_master *master; 1131 + unsigned long flags; 1162 1132 1163 1133 if (!device_smmu) { 1164 1134 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); ··· 1170 1138 * Sanity check the domain. We don't currently support domains 1171 1139 * that cross between different SMMU chains. 1172 1140 */ 1173 - mutex_lock(&smmu_domain->lock); 1141 + spin_lock_irqsave(&smmu_domain->lock, flags); 1174 1142 if (!smmu_domain->leaf_smmu) { 1175 1143 /* Now that we have a master, we can finalise the domain */ 1176 1144 ret = arm_smmu_init_domain_context(domain, dev); ··· 1185 1153 dev_name(device_smmu->dev)); 1186 1154 goto err_unlock; 1187 1155 } 1188 - mutex_unlock(&smmu_domain->lock); 1156 + spin_unlock_irqrestore(&smmu_domain->lock, flags); 1189 1157 1190 1158 /* Looks ok, so add the device to the domain */ 1191 1159 master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node); ··· 1195 1163 return arm_smmu_domain_add_master(smmu_domain, master); 1196 1164 1197 1165 err_unlock: 1198 - mutex_unlock(&smmu_domain->lock); 1166 + spin_unlock_irqrestore(&smmu_domain->lock, flags); 1199 1167 return ret; 1200 1168 } 1201 1169 ··· 1207 1175 master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node); 1208 1176 if (master) 1209 1177 arm_smmu_domain_remove_master(smmu_domain, master); 1210 - } 1211 - 1212 - static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr, 1213 - size_t size) 1214 - { 1215 - unsigned long offset = (unsigned long)addr & ~PAGE_MASK; 1216 - 1217 - /* 1218 - * If the SMMU can't walk tables in the CPU caches, treat them 1219 - * like non-coherent DMA since we need to flush the new entries 1220 - * all the way out to memory. There's no possibility of recursion 1221 - * here as the SMMU table walker will not be wired through another 1222 - * SMMU. 1223 - */ 1224 - if (!(smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)) 1225 - dma_map_page(smmu->dev, virt_to_page(addr), offset, size, 1226 - DMA_TO_DEVICE); 1227 1178 } 1228 1179 1229 1180 static bool arm_smmu_pte_is_contiguous_range(unsigned long addr, ··· 1225 1210 1226 1211 if (pmd_none(*pmd)) { 1227 1212 /* Allocate a new set of tables */ 1228 - pgtable_t table = alloc_page(PGALLOC_GFP); 1213 + pgtable_t table = alloc_page(GFP_ATOMIC|__GFP_ZERO); 1229 1214 if (!table) 1230 1215 return -ENOMEM; 1231 1216 1232 - arm_smmu_flush_pgtable(smmu, page_address(table), 1233 - ARM_SMMU_PTE_HWTABLE_SIZE); 1217 + arm_smmu_flush_pgtable(smmu, page_address(table), PAGE_SIZE); 1234 1218 if (!pgtable_page_ctor(table)) { 1235 1219 __free_page(table); 1236 1220 return -ENOMEM; ··· 1331 1317 1332 1318 #ifndef __PAGETABLE_PMD_FOLDED 1333 1319 if (pud_none(*pud)) { 1334 - pmd = pmd_alloc_one(NULL, addr); 1320 + pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC); 1335 1321 if (!pmd) 1336 1322 return -ENOMEM; 1323 + 1324 + arm_smmu_flush_pgtable(smmu, pmd, PAGE_SIZE); 1325 + pud_populate(NULL, pud, pmd); 1326 + arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud)); 1327 + 1328 + pmd += pmd_index(addr); 1337 1329 } else 1338 1330 #endif 1339 1331 pmd = pmd_offset(pud, addr); ··· 1348 1328 next = pmd_addr_end(addr, end); 1349 1329 ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn, 1350 1330 flags, stage); 1351 - pud_populate(NULL, pud, pmd); 1352 - arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud)); 1353 1331 phys += next - addr; 1354 1332 } while (pmd++, addr = next, addr < end); 1355 1333 ··· 1364 1346 1365 1347 #ifndef __PAGETABLE_PUD_FOLDED 1366 1348 if (pgd_none(*pgd)) { 1367 - pud = pud_alloc_one(NULL, addr); 1349 + pud = (pud_t *)get_zeroed_page(GFP_ATOMIC); 1368 1350 if (!pud) 1369 1351 return -ENOMEM; 1352 + 1353 + arm_smmu_flush_pgtable(smmu, pud, PAGE_SIZE); 1354 + pgd_populate(NULL, pgd, pud); 1355 + arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd)); 1356 + 1357 + pud += pud_index(addr); 1370 1358 } else 1371 1359 #endif 1372 1360 pud = pud_offset(pgd, addr); ··· 1381 1357 next = pud_addr_end(addr, end); 1382 1358 ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys, 1383 1359 flags, stage); 1384 - pgd_populate(NULL, pud, pgd); 1385 - arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd)); 1386 1360 phys += next - addr; 1387 1361 } while (pud++, addr = next, addr < end); 1388 1362 ··· 1397 1375 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; 1398 1376 pgd_t *pgd = root_cfg->pgd; 1399 1377 struct arm_smmu_device *smmu = root_cfg->smmu; 1378 + unsigned long irqflags; 1400 1379 1401 1380 if (root_cfg->cbar == CBAR_TYPE_S2_TRANS) { 1402 1381 stage = 2; ··· 1420 1397 if (paddr & ~output_mask) 1421 1398 return -ERANGE; 1422 1399 1423 - mutex_lock(&smmu_domain->lock); 1400 + spin_lock_irqsave(&smmu_domain->lock, irqflags); 1424 1401 pgd += pgd_index(iova); 1425 1402 end = iova + size; 1426 1403 do { ··· 1436 1413 } while (pgd++, iova != end); 1437 1414 1438 1415 out_unlock: 1439 - mutex_unlock(&smmu_domain->lock); 1440 - 1441 - /* Ensure new page tables are visible to the hardware walker */ 1442 - if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) 1443 - dsb(); 1416 + spin_unlock_irqrestore(&smmu_domain->lock, irqflags); 1444 1417 1445 1418 return ret; 1446 1419 } ··· 2006 1987 if (!iommu_present(&platform_bus_type)) 2007 1988 bus_set_iommu(&platform_bus_type, &arm_smmu_ops); 2008 1989 1990 + #ifdef CONFIG_ARM_AMBA 2009 1991 if (!iommu_present(&amba_bustype)) 2010 1992 bus_set_iommu(&amba_bustype, &arm_smmu_ops); 1993 + #endif 2011 1994 2012 1995 return 0; 2013 1996 }
+2 -2
drivers/iommu/omap-iommu-debug.c
··· 354 354 return -ENOMEM; \ 355 355 } 356 356 357 - #define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 600) 358 - #define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 400) 357 + #define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 0600) 358 + #define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 0400) 359 359 360 360 static int iommu_debug_register(struct device *dev, void *data) 361 361 {
+1 -1
drivers/irqchip/irq-metag-ext.c
··· 515 515 * one cpu (the interrupt code doesn't support it), so we just 516 516 * pick the first cpu we find in 'cpumask'. 517 517 */ 518 - cpu = cpumask_any(cpumask); 518 + cpu = cpumask_any_and(cpumask, cpu_online_mask); 519 519 thread = cpu_2_hwthread_id[cpu]; 520 520 521 521 metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
+1 -1
drivers/irqchip/irq-metag.c
··· 201 201 * one cpu (the interrupt code doesn't support it), so we just 202 202 * pick the first cpu we find in 'cpumask'. 203 203 */ 204 - cpu = cpumask_any(cpumask); 204 + cpu = cpumask_any_and(cpumask, cpu_online_mask); 205 205 thread = cpu_2_hwthread_id[cpu]; 206 206 207 207 metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR1(thread)),
+19 -3
drivers/irqchip/irq-orion.c
··· 111 111 static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc) 112 112 { 113 113 struct irq_domain *d = irq_get_handler_data(irq); 114 - struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, irq); 114 + 115 + struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, 0); 115 116 u32 stat = readl_relaxed(gc->reg_base + ORION_BRIDGE_IRQ_CAUSE) & 116 117 gc->mask_cache; 117 118 ··· 122 121 generic_handle_irq(irq_find_mapping(d, gc->irq_base + hwirq)); 123 122 stat &= ~(1 << hwirq); 124 123 } 124 + } 125 + 126 + /* 127 + * Bridge IRQ_CAUSE is asserted regardless of IRQ_MASK register. 128 + * To avoid interrupt events on stale irqs, we clear them before unmask. 129 + */ 130 + static unsigned int orion_bridge_irq_startup(struct irq_data *d) 131 + { 132 + struct irq_chip_type *ct = irq_data_get_chip_type(d); 133 + 134 + ct->chip.irq_ack(d); 135 + ct->chip.irq_unmask(d); 136 + return 0; 125 137 } 126 138 127 139 static int __init orion_bridge_irq_init(struct device_node *np, ··· 157 143 } 158 144 159 145 ret = irq_alloc_domain_generic_chips(domain, nrirqs, 1, np->name, 160 - handle_level_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE); 146 + handle_edge_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE); 161 147 if (ret) { 162 148 pr_err("%s: unable to alloc irq domain gc\n", np->name); 163 149 return ret; ··· 190 176 191 177 gc->chip_types[0].regs.ack = ORION_BRIDGE_IRQ_CAUSE; 192 178 gc->chip_types[0].regs.mask = ORION_BRIDGE_IRQ_MASK; 179 + gc->chip_types[0].chip.irq_startup = orion_bridge_irq_startup; 193 180 gc->chip_types[0].chip.irq_ack = irq_gc_ack_clr_bit; 194 181 gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit; 195 182 gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit; 196 183 197 - /* mask all interrupts */ 184 + /* mask and clear all interrupts */ 198 185 writel(0, gc->reg_base + ORION_BRIDGE_IRQ_MASK); 186 + writel(0, gc->reg_base + ORION_BRIDGE_IRQ_CAUSE); 199 187 200 188 irq_set_handler_data(irq, domain); 201 189 irq_set_chained_handler(irq, orion_bridge_irq_handler);
-10
drivers/md/Kconfig
··· 254 254 ---help--- 255 255 Provides thin provisioning and snapshots that share a data store. 256 256 257 - config DM_DEBUG_BLOCK_STACK_TRACING 258 - boolean "Keep stack trace of persistent data block lock holders" 259 - depends on STACKTRACE_SUPPORT && DM_PERSISTENT_DATA 260 - select STACKTRACE 261 - ---help--- 262 - Enable this for messages that may help debug problems with the 263 - block manager locking used by thin provisioning and caching. 264 - 265 - If unsure, say N. 266 - 267 257 config DM_CACHE 268 258 tristate "Cache target (EXPERIMENTAL)" 269 259 depends on BLK_DEV_DM
+2 -2
drivers/md/dm-cache-policy-mq.c
··· 872 872 { 873 873 struct mq_policy *mq = to_mq_policy(p); 874 874 875 - kfree(mq->table); 875 + vfree(mq->table); 876 876 epool_exit(&mq->cache_pool); 877 877 epool_exit(&mq->pre_cache_pool); 878 878 kfree(mq); ··· 1245 1245 1246 1246 mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16); 1247 1247 mq->hash_bits = ffs(mq->nr_buckets) - 1; 1248 - mq->table = kzalloc(sizeof(*mq->table) * mq->nr_buckets, GFP_KERNEL); 1248 + mq->table = vzalloc(sizeof(*mq->table) * mq->nr_buckets); 1249 1249 if (!mq->table) 1250 1250 goto bad_alloc_table; 1251 1251
+8 -5
drivers/md/dm-cache-target.c
··· 289 289 bool tick:1; 290 290 unsigned req_nr:2; 291 291 struct dm_deferred_entry *all_io_entry; 292 + struct dm_hook_info hook_info; 292 293 293 294 /* 294 295 * writethrough fields. These MUST remain at the end of this ··· 298 297 */ 299 298 struct cache *cache; 300 299 dm_cblock_t cblock; 301 - struct dm_hook_info hook_info; 302 300 struct dm_bio_details bio_details; 303 301 }; 304 302 ··· 671 671 dm_cblock_t cblock) 672 672 { 673 673 sector_t bi_sector = bio->bi_iter.bi_sector; 674 + sector_t block = from_cblock(cblock); 674 675 675 676 bio->bi_bdev = cache->cache_dev->bdev; 676 677 if (!block_size_is_power_of_two(cache)) 677 678 bio->bi_iter.bi_sector = 678 - (from_cblock(cblock) * cache->sectors_per_block) + 679 + (block * cache->sectors_per_block) + 679 680 sector_div(bi_sector, cache->sectors_per_block); 680 681 else 681 682 bio->bi_iter.bi_sector = 682 - (from_cblock(cblock) << cache->sectors_per_block_shift) | 683 + (block << cache->sectors_per_block_shift) | 683 684 (bi_sector & (cache->sectors_per_block - 1)); 684 685 } 685 686 ··· 1011 1010 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 1012 1011 unsigned long flags; 1013 1012 1013 + dm_unhook_bio(&pb->hook_info, bio); 1014 + 1014 1015 if (err) 1015 1016 mg->err = true; 1016 1017 1018 + mg->requeue_holder = false; 1019 + 1017 1020 spin_lock_irqsave(&cache->lock, flags); 1018 1021 list_add_tail(&mg->list, &cache->completed_migrations); 1019 - dm_unhook_bio(&pb->hook_info, bio); 1020 - mg->requeue_holder = false; 1021 1022 spin_unlock_irqrestore(&cache->lock, flags); 1022 1023 1023 1024 wake_worker(cache);
+11 -12
drivers/md/dm-io.c
··· 201 201 /* 202 202 * Functions for getting the pages from a bvec. 203 203 */ 204 - static void bio_get_page(struct dpages *dp, 205 - struct page **p, unsigned long *len, unsigned *offset) 204 + static void bio_get_page(struct dpages *dp, struct page **p, 205 + unsigned long *len, unsigned *offset) 206 206 { 207 - struct bio *bio = dp->context_ptr; 208 - struct bio_vec bvec = bio_iovec(bio); 209 - *p = bvec.bv_page; 210 - *len = bvec.bv_len; 211 - *offset = bvec.bv_offset; 207 + struct bio_vec *bvec = dp->context_ptr; 208 + *p = bvec->bv_page; 209 + *len = bvec->bv_len - dp->context_u; 210 + *offset = bvec->bv_offset + dp->context_u; 212 211 } 213 212 214 213 static void bio_next_page(struct dpages *dp) 215 214 { 216 - struct bio *bio = dp->context_ptr; 217 - struct bio_vec bvec = bio_iovec(bio); 218 - 219 - bio_advance(bio, bvec.bv_len); 215 + struct bio_vec *bvec = dp->context_ptr; 216 + dp->context_ptr = bvec + 1; 217 + dp->context_u = 0; 220 218 } 221 219 222 220 static void bio_dp_init(struct dpages *dp, struct bio *bio) 223 221 { 224 222 dp->get_page = bio_get_page; 225 223 dp->next_page = bio_next_page; 226 - dp->context_ptr = bio; 224 + dp->context_ptr = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); 225 + dp->context_u = bio->bi_iter.bi_bvec_done; 227 226 } 228 227 229 228 /*
+5 -2
drivers/md/dm-mpath.c
··· 1626 1626 /* 1627 1627 * Only pass ioctls through if the device sizes match exactly. 1628 1628 */ 1629 - if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) 1630 - r = scsi_verify_blk_ioctl(NULL, cmd); 1629 + if (!bdev || ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) { 1630 + int err = scsi_verify_blk_ioctl(NULL, cmd); 1631 + if (err) 1632 + r = err; 1633 + } 1631 1634 1632 1635 if (r == -ENOTCONN && !fatal_signal_pending(current)) 1633 1636 queue_work(kmultipathd, &m->process_queued_ios);
+3
drivers/md/dm-raid1.c
··· 1244 1244 1245 1245 dm_bio_restore(bd, bio); 1246 1246 bio_record->details.bi_bdev = NULL; 1247 + 1248 + atomic_inc(&bio->bi_remaining); 1249 + 1247 1250 queue_bio(ms, bio, rw); 1248 1251 return DM_ENDIO_INCOMPLETE; 1249 1252 }
+3
drivers/md/dm-snap-persistent.c
··· 546 546 r = insert_exceptions(ps, area, callback, callback_context, 547 547 &full); 548 548 549 + if (!full) 550 + memcpy(ps->area, area, ps->store->chunk_size << SECTOR_SHIFT); 551 + 549 552 dm_bufio_release(bp); 550 553 551 554 dm_bufio_forget(client, chunk);
+55 -3
drivers/md/dm-thin-metadata.c
··· 76 76 77 77 #define THIN_SUPERBLOCK_MAGIC 27022010 78 78 #define THIN_SUPERBLOCK_LOCATION 0 79 - #define THIN_VERSION 1 79 + #define THIN_VERSION 2 80 80 #define THIN_METADATA_CACHE_SIZE 64 81 81 #define SECTOR_TO_BLOCK_SHIFT 3 82 82 ··· 483 483 484 484 disk_super->data_mapping_root = cpu_to_le64(pmd->root); 485 485 disk_super->device_details_root = cpu_to_le64(pmd->details_root); 486 - disk_super->metadata_block_size = cpu_to_le32(THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT); 486 + disk_super->metadata_block_size = cpu_to_le32(THIN_METADATA_BLOCK_SIZE); 487 487 disk_super->metadata_nr_blocks = cpu_to_le64(bdev_size >> SECTOR_TO_BLOCK_SHIFT); 488 488 disk_super->data_block_size = cpu_to_le32(pmd->data_block_size); 489 489 ··· 651 651 { 652 652 int r; 653 653 654 - pmd->bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE, 654 + pmd->bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT, 655 655 THIN_METADATA_CACHE_SIZE, 656 656 THIN_MAX_CONCURRENT_LOCKS); 657 657 if (IS_ERR(pmd->bm)) { ··· 1489 1489 return r; 1490 1490 } 1491 1491 1492 + bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd) 1493 + { 1494 + bool r = false; 1495 + struct dm_thin_device *td, *tmp; 1496 + 1497 + down_read(&pmd->root_lock); 1498 + list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) { 1499 + if (td->changed) { 1500 + r = td->changed; 1501 + break; 1502 + } 1503 + } 1504 + up_read(&pmd->root_lock); 1505 + 1506 + return r; 1507 + } 1508 + 1492 1509 bool dm_thin_aborted_changes(struct dm_thin_device *td) 1493 1510 { 1494 1511 bool r; ··· 1754 1737 up_write(&pmd->root_lock); 1755 1738 1756 1739 return r; 1740 + } 1741 + 1742 + int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd) 1743 + { 1744 + int r; 1745 + struct dm_block *sblock; 1746 + struct thin_disk_superblock *disk_super; 1747 + 1748 + down_write(&pmd->root_lock); 1749 + pmd->flags |= THIN_METADATA_NEEDS_CHECK_FLAG; 1750 + 1751 + r = superblock_lock(pmd, &sblock); 1752 + if (r) { 1753 + DMERR("couldn't read superblock"); 1754 + goto out; 1755 + } 1756 + 1757 + disk_super = dm_block_data(sblock); 1758 + disk_super->flags = cpu_to_le32(pmd->flags); 1759 + 1760 + dm_bm_unlock(sblock); 1761 + out: 1762 + up_write(&pmd->root_lock); 1763 + return r; 1764 + } 1765 + 1766 + bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd) 1767 + { 1768 + bool needs_check; 1769 + 1770 + down_read(&pmd->root_lock); 1771 + needs_check = pmd->flags & THIN_METADATA_NEEDS_CHECK_FLAG; 1772 + up_read(&pmd->root_lock); 1773 + 1774 + return needs_check; 1757 1775 }
+16 -5
drivers/md/dm-thin-metadata.h
··· 9 9 10 10 #include "persistent-data/dm-block-manager.h" 11 11 #include "persistent-data/dm-space-map.h" 12 + #include "persistent-data/dm-space-map-metadata.h" 12 13 13 - #define THIN_METADATA_BLOCK_SIZE 4096 14 + #define THIN_METADATA_BLOCK_SIZE DM_SM_METADATA_BLOCK_SIZE 14 15 15 16 /* 16 17 * The metadata device is currently limited in size. 17 - * 18 - * We have one block of index, which can hold 255 index entries. Each 19 - * index entry contains allocation info about 16k metadata blocks. 20 18 */ 21 - #define THIN_METADATA_MAX_SECTORS (255 * (1 << 14) * (THIN_METADATA_BLOCK_SIZE / (1 << SECTOR_SHIFT))) 19 + #define THIN_METADATA_MAX_SECTORS DM_SM_METADATA_MAX_SECTORS 22 20 23 21 /* 24 22 * A metadata device larger than 16GB triggers a warning. ··· 24 26 #define THIN_METADATA_MAX_SECTORS_WARNING (16 * (1024 * 1024 * 1024 >> SECTOR_SHIFT)) 25 27 26 28 /*----------------------------------------------------------------*/ 29 + 30 + /* 31 + * Thin metadata superblock flags. 32 + */ 33 + #define THIN_METADATA_NEEDS_CHECK_FLAG (1 << 0) 27 34 28 35 struct dm_pool_metadata; 29 36 struct dm_thin_device; ··· 164 161 */ 165 162 bool dm_thin_changed_this_transaction(struct dm_thin_device *td); 166 163 164 + bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd); 165 + 167 166 bool dm_thin_aborted_changes(struct dm_thin_device *td); 168 167 169 168 int dm_thin_get_highest_mapped_block(struct dm_thin_device *td, ··· 206 201 dm_block_t threshold, 207 202 dm_sm_threshold_fn fn, 208 203 void *context); 204 + 205 + /* 206 + * Updates the superblock immediately. 207 + */ 208 + int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd); 209 + bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd); 209 210 210 211 /*----------------------------------------------------------------*/ 211 212
+260 -83
drivers/md/dm-thin.c
··· 130 130 struct dm_thin_new_mapping; 131 131 132 132 /* 133 - * The pool runs in 3 modes. Ordered in degraded order for comparisons. 133 + * The pool runs in 4 modes. Ordered in degraded order for comparisons. 134 134 */ 135 135 enum pool_mode { 136 136 PM_WRITE, /* metadata may be changed */ 137 + PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */ 137 138 PM_READ_ONLY, /* metadata may not be changed */ 138 139 PM_FAIL, /* all I/O fails */ 139 140 }; ··· 199 198 }; 200 199 201 200 static enum pool_mode get_pool_mode(struct pool *pool); 202 - static void out_of_data_space(struct pool *pool); 203 201 static void metadata_operation_failed(struct pool *pool, const char *op, int r); 204 202 205 203 /* ··· 226 226 227 227 struct pool *pool; 228 228 struct dm_thin_device *td; 229 + bool requeue_mode:1; 229 230 }; 230 231 231 232 /*----------------------------------------------------------------*/ ··· 370 369 struct dm_thin_new_mapping *overwrite_mapping; 371 370 }; 372 371 373 - static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master) 372 + static void requeue_bio_list(struct thin_c *tc, struct bio_list *master) 374 373 { 375 374 struct bio *bio; 376 375 struct bio_list bios; 376 + unsigned long flags; 377 377 378 378 bio_list_init(&bios); 379 + 380 + spin_lock_irqsave(&tc->pool->lock, flags); 379 381 bio_list_merge(&bios, master); 380 382 bio_list_init(master); 383 + spin_unlock_irqrestore(&tc->pool->lock, flags); 381 384 382 385 while ((bio = bio_list_pop(&bios))) { 383 386 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); ··· 396 391 static void requeue_io(struct thin_c *tc) 397 392 { 398 393 struct pool *pool = tc->pool; 394 + 395 + requeue_bio_list(tc, &pool->deferred_bios); 396 + requeue_bio_list(tc, &pool->retry_on_resume_list); 397 + } 398 + 399 + static void error_retry_list(struct pool *pool) 400 + { 401 + struct bio *bio; 399 402 unsigned long flags; 403 + struct bio_list bios; 404 + 405 + bio_list_init(&bios); 400 406 401 407 spin_lock_irqsave(&pool->lock, flags); 402 - __requeue_bio_list(tc, &pool->deferred_bios); 403 - __requeue_bio_list(tc, &pool->retry_on_resume_list); 408 + bio_list_merge(&bios, &pool->retry_on_resume_list); 409 + bio_list_init(&pool->retry_on_resume_list); 404 410 spin_unlock_irqrestore(&pool->lock, flags); 411 + 412 + while ((bio = bio_list_pop(&bios))) 413 + bio_io_error(bio); 405 414 } 406 415 407 416 /* ··· 944 925 } 945 926 } 946 927 928 + static void set_pool_mode(struct pool *pool, enum pool_mode new_mode); 929 + 947 930 static int alloc_data_block(struct thin_c *tc, dm_block_t *result) 948 931 { 949 932 int r; 950 933 dm_block_t free_blocks; 951 934 struct pool *pool = tc->pool; 952 935 953 - if (get_pool_mode(pool) != PM_WRITE) 936 + if (WARN_ON(get_pool_mode(pool) != PM_WRITE)) 954 937 return -EINVAL; 955 938 956 939 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); ··· 979 958 } 980 959 981 960 if (!free_blocks) { 982 - out_of_data_space(pool); 961 + set_pool_mode(pool, PM_OUT_OF_DATA_SPACE); 983 962 return -ENOSPC; 984 963 } 985 964 } ··· 1009 988 spin_unlock_irqrestore(&pool->lock, flags); 1010 989 } 1011 990 991 + static bool should_error_unserviceable_bio(struct pool *pool) 992 + { 993 + enum pool_mode m = get_pool_mode(pool); 994 + 995 + switch (m) { 996 + case PM_WRITE: 997 + /* Shouldn't get here */ 998 + DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode"); 999 + return true; 1000 + 1001 + case PM_OUT_OF_DATA_SPACE: 1002 + return pool->pf.error_if_no_space; 1003 + 1004 + case PM_READ_ONLY: 1005 + case PM_FAIL: 1006 + return true; 1007 + default: 1008 + /* Shouldn't get here */ 1009 + DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode"); 1010 + return true; 1011 + } 1012 + } 1013 + 1012 1014 static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) 1013 1015 { 1014 - /* 1015 - * When pool is read-only, no cell locking is needed because 1016 - * nothing is changing. 1017 - */ 1018 - WARN_ON_ONCE(get_pool_mode(pool) != PM_READ_ONLY); 1019 - 1020 - if (pool->pf.error_if_no_space) 1016 + if (should_error_unserviceable_bio(pool)) 1021 1017 bio_io_error(bio); 1022 1018 else 1023 1019 retry_on_resume(bio); ··· 1045 1007 struct bio *bio; 1046 1008 struct bio_list bios; 1047 1009 1010 + if (should_error_unserviceable_bio(pool)) { 1011 + cell_error(pool, cell); 1012 + return; 1013 + } 1014 + 1048 1015 bio_list_init(&bios); 1049 1016 cell_release(pool, cell, &bios); 1050 1017 1051 - while ((bio = bio_list_pop(&bios))) 1052 - handle_unserviceable_bio(pool, bio); 1018 + if (should_error_unserviceable_bio(pool)) 1019 + while ((bio = bio_list_pop(&bios))) 1020 + bio_io_error(bio); 1021 + else 1022 + while ((bio = bio_list_pop(&bios))) 1023 + retry_on_resume(bio); 1053 1024 } 1054 1025 1055 1026 static void process_discard(struct thin_c *tc, struct bio *bio) ··· 1343 1296 } 1344 1297 } 1345 1298 1299 + static void process_bio_success(struct thin_c *tc, struct bio *bio) 1300 + { 1301 + bio_endio(bio, 0); 1302 + } 1303 + 1346 1304 static void process_bio_fail(struct thin_c *tc, struct bio *bio) 1347 1305 { 1348 1306 bio_io_error(bio); ··· 1380 1328 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 1381 1329 struct thin_c *tc = h->tc; 1382 1330 1331 + if (tc->requeue_mode) { 1332 + bio_endio(bio, DM_ENDIO_REQUEUE); 1333 + continue; 1334 + } 1335 + 1383 1336 /* 1384 1337 * If we've got no free new_mapping structs, and processing 1385 1338 * this bio might require one, we pause until there are some ··· 1414 1357 bio_list_init(&pool->deferred_flush_bios); 1415 1358 spin_unlock_irqrestore(&pool->lock, flags); 1416 1359 1417 - if (bio_list_empty(&bios) && !need_commit_due_to_time(pool)) 1360 + if (bio_list_empty(&bios) && 1361 + !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool))) 1418 1362 return; 1419 1363 1420 1364 if (commit(pool)) { ··· 1451 1393 1452 1394 /*----------------------------------------------------------------*/ 1453 1395 1396 + struct noflush_work { 1397 + struct work_struct worker; 1398 + struct thin_c *tc; 1399 + 1400 + atomic_t complete; 1401 + wait_queue_head_t wait; 1402 + }; 1403 + 1404 + static void complete_noflush_work(struct noflush_work *w) 1405 + { 1406 + atomic_set(&w->complete, 1); 1407 + wake_up(&w->wait); 1408 + } 1409 + 1410 + static void do_noflush_start(struct work_struct *ws) 1411 + { 1412 + struct noflush_work *w = container_of(ws, struct noflush_work, worker); 1413 + w->tc->requeue_mode = true; 1414 + requeue_io(w->tc); 1415 + complete_noflush_work(w); 1416 + } 1417 + 1418 + static void do_noflush_stop(struct work_struct *ws) 1419 + { 1420 + struct noflush_work *w = container_of(ws, struct noflush_work, worker); 1421 + w->tc->requeue_mode = false; 1422 + complete_noflush_work(w); 1423 + } 1424 + 1425 + static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *)) 1426 + { 1427 + struct noflush_work w; 1428 + 1429 + INIT_WORK(&w.worker, fn); 1430 + w.tc = tc; 1431 + atomic_set(&w.complete, 0); 1432 + init_waitqueue_head(&w.wait); 1433 + 1434 + queue_work(tc->pool->wq, &w.worker); 1435 + 1436 + wait_event(w.wait, atomic_read(&w.complete)); 1437 + } 1438 + 1439 + /*----------------------------------------------------------------*/ 1440 + 1454 1441 static enum pool_mode get_pool_mode(struct pool *pool) 1455 1442 { 1456 1443 return pool->pf.mode; 1457 1444 } 1458 1445 1446 + static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode) 1447 + { 1448 + dm_table_event(pool->ti->table); 1449 + DMINFO("%s: switching pool to %s mode", 1450 + dm_device_name(pool->pool_md), new_mode); 1451 + } 1452 + 1459 1453 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) 1460 1454 { 1461 - int r; 1462 - enum pool_mode old_mode = pool->pf.mode; 1455 + struct pool_c *pt = pool->ti->private; 1456 + bool needs_check = dm_pool_metadata_needs_check(pool->pmd); 1457 + enum pool_mode old_mode = get_pool_mode(pool); 1458 + 1459 + /* 1460 + * Never allow the pool to transition to PM_WRITE mode if user 1461 + * intervention is required to verify metadata and data consistency. 1462 + */ 1463 + if (new_mode == PM_WRITE && needs_check) { 1464 + DMERR("%s: unable to switch pool to write mode until repaired.", 1465 + dm_device_name(pool->pool_md)); 1466 + if (old_mode != new_mode) 1467 + new_mode = old_mode; 1468 + else 1469 + new_mode = PM_READ_ONLY; 1470 + } 1471 + /* 1472 + * If we were in PM_FAIL mode, rollback of metadata failed. We're 1473 + * not going to recover without a thin_repair. So we never let the 1474 + * pool move out of the old mode. 1475 + */ 1476 + if (old_mode == PM_FAIL) 1477 + new_mode = old_mode; 1463 1478 1464 1479 switch (new_mode) { 1465 1480 case PM_FAIL: 1466 1481 if (old_mode != new_mode) 1467 - DMERR("%s: switching pool to failure mode", 1468 - dm_device_name(pool->pool_md)); 1482 + notify_of_pool_mode_change(pool, "failure"); 1469 1483 dm_pool_metadata_read_only(pool->pmd); 1470 1484 pool->process_bio = process_bio_fail; 1471 1485 pool->process_discard = process_bio_fail; 1472 1486 pool->process_prepared_mapping = process_prepared_mapping_fail; 1473 1487 pool->process_prepared_discard = process_prepared_discard_fail; 1488 + 1489 + error_retry_list(pool); 1474 1490 break; 1475 1491 1476 1492 case PM_READ_ONLY: 1477 1493 if (old_mode != new_mode) 1478 - DMERR("%s: switching pool to read-only mode", 1479 - dm_device_name(pool->pool_md)); 1480 - r = dm_pool_abort_metadata(pool->pmd); 1481 - if (r) { 1482 - DMERR("%s: aborting transaction failed", 1483 - dm_device_name(pool->pool_md)); 1484 - new_mode = PM_FAIL; 1485 - set_pool_mode(pool, new_mode); 1486 - } else { 1487 - dm_pool_metadata_read_only(pool->pmd); 1488 - pool->process_bio = process_bio_read_only; 1489 - pool->process_discard = process_discard; 1490 - pool->process_prepared_mapping = process_prepared_mapping_fail; 1491 - pool->process_prepared_discard = process_prepared_discard_passdown; 1492 - } 1494 + notify_of_pool_mode_change(pool, "read-only"); 1495 + dm_pool_metadata_read_only(pool->pmd); 1496 + pool->process_bio = process_bio_read_only; 1497 + pool->process_discard = process_bio_success; 1498 + pool->process_prepared_mapping = process_prepared_mapping_fail; 1499 + pool->process_prepared_discard = process_prepared_discard_passdown; 1500 + 1501 + error_retry_list(pool); 1502 + break; 1503 + 1504 + case PM_OUT_OF_DATA_SPACE: 1505 + /* 1506 + * Ideally we'd never hit this state; the low water mark 1507 + * would trigger userland to extend the pool before we 1508 + * completely run out of data space. However, many small 1509 + * IOs to unprovisioned space can consume data space at an 1510 + * alarming rate. Adjust your low water mark if you're 1511 + * frequently seeing this mode. 1512 + */ 1513 + if (old_mode != new_mode) 1514 + notify_of_pool_mode_change(pool, "out-of-data-space"); 1515 + pool->process_bio = process_bio_read_only; 1516 + pool->process_discard = process_discard; 1517 + pool->process_prepared_mapping = process_prepared_mapping; 1518 + pool->process_prepared_discard = process_prepared_discard_passdown; 1493 1519 break; 1494 1520 1495 1521 case PM_WRITE: 1496 1522 if (old_mode != new_mode) 1497 - DMINFO("%s: switching pool to write mode", 1498 - dm_device_name(pool->pool_md)); 1523 + notify_of_pool_mode_change(pool, "write"); 1499 1524 dm_pool_metadata_read_write(pool->pmd); 1500 1525 pool->process_bio = process_bio; 1501 1526 pool->process_discard = process_discard; ··· 1588 1447 } 1589 1448 1590 1449 pool->pf.mode = new_mode; 1450 + /* 1451 + * The pool mode may have changed, sync it so bind_control_target() 1452 + * doesn't cause an unexpected mode transition on resume. 1453 + */ 1454 + pt->adjusted_pf.mode = new_mode; 1591 1455 } 1592 1456 1593 - /* 1594 - * Rather than calling set_pool_mode directly, use these which describe the 1595 - * reason for mode degradation. 1596 - */ 1597 - static void out_of_data_space(struct pool *pool) 1457 + static void abort_transaction(struct pool *pool) 1598 1458 { 1599 - DMERR_LIMIT("%s: no free data space available.", 1600 - dm_device_name(pool->pool_md)); 1601 - set_pool_mode(pool, PM_READ_ONLY); 1459 + const char *dev_name = dm_device_name(pool->pool_md); 1460 + 1461 + DMERR_LIMIT("%s: aborting current metadata transaction", dev_name); 1462 + if (dm_pool_abort_metadata(pool->pmd)) { 1463 + DMERR("%s: failed to abort metadata transaction", dev_name); 1464 + set_pool_mode(pool, PM_FAIL); 1465 + } 1466 + 1467 + if (dm_pool_metadata_set_needs_check(pool->pmd)) { 1468 + DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name); 1469 + set_pool_mode(pool, PM_FAIL); 1470 + } 1602 1471 } 1603 1472 1604 1473 static void metadata_operation_failed(struct pool *pool, const char *op, int r) 1605 1474 { 1606 - dm_block_t free_blocks; 1607 - 1608 1475 DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d", 1609 1476 dm_device_name(pool->pool_md), op, r); 1610 1477 1611 - if (r == -ENOSPC && 1612 - !dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks) && 1613 - !free_blocks) 1614 - DMERR_LIMIT("%s: no free metadata space available.", 1615 - dm_device_name(pool->pool_md)); 1616 - 1478 + abort_transaction(pool); 1617 1479 set_pool_mode(pool, PM_READ_ONLY); 1618 1480 } 1619 1481 ··· 1666 1522 struct dm_cell_key key; 1667 1523 1668 1524 thin_hook_bio(tc, bio); 1525 + 1526 + if (tc->requeue_mode) { 1527 + bio_endio(bio, DM_ENDIO_REQUEUE); 1528 + return DM_MAPIO_SUBMITTED; 1529 + } 1669 1530 1670 1531 if (get_pool_mode(tc->pool) == PM_FAIL) { 1671 1532 bio_io_error(bio); ··· 1835 1686 /* 1836 1687 * We want to make sure that a pool in PM_FAIL mode is never upgraded. 1837 1688 */ 1838 - enum pool_mode old_mode = pool->pf.mode; 1689 + enum pool_mode old_mode = get_pool_mode(pool); 1839 1690 enum pool_mode new_mode = pt->adjusted_pf.mode; 1840 1691 1841 1692 /* ··· 1848 1699 pool->ti = ti; 1849 1700 pool->pf = pt->adjusted_pf; 1850 1701 pool->low_water_blocks = pt->low_water_blocks; 1851 - 1852 - /* 1853 - * If we were in PM_FAIL mode, rollback of metadata failed. We're 1854 - * not going to recover without a thin_repair. So we never let the 1855 - * pool move out of the old mode. On the other hand a PM_READ_ONLY 1856 - * may have been due to a lack of metadata or data space, and may 1857 - * now work (ie. if the underlying devices have been resized). 1858 - */ 1859 - if (old_mode == PM_FAIL) 1860 - new_mode = old_mode; 1861 1702 1862 1703 set_pool_mode(pool, new_mode); 1863 1704 ··· 2138 1999 dm_table_event(pool->ti->table); 2139 2000 } 2140 2001 2141 - static sector_t get_metadata_dev_size(struct block_device *bdev) 2002 + static sector_t get_dev_size(struct block_device *bdev) 2142 2003 { 2143 - sector_t metadata_dev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; 2004 + return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; 2005 + } 2006 + 2007 + static void warn_if_metadata_device_too_big(struct block_device *bdev) 2008 + { 2009 + sector_t metadata_dev_size = get_dev_size(bdev); 2144 2010 char buffer[BDEVNAME_SIZE]; 2145 2011 2146 - if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) { 2012 + if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) 2147 2013 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.", 2148 2014 bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS); 2149 - metadata_dev_size = THIN_METADATA_MAX_SECTORS_WARNING; 2150 - } 2015 + } 2016 + 2017 + static sector_t get_metadata_dev_size(struct block_device *bdev) 2018 + { 2019 + sector_t metadata_dev_size = get_dev_size(bdev); 2020 + 2021 + if (metadata_dev_size > THIN_METADATA_MAX_SECTORS) 2022 + metadata_dev_size = THIN_METADATA_MAX_SECTORS; 2151 2023 2152 2024 return metadata_dev_size; 2153 2025 } ··· 2167 2017 { 2168 2018 sector_t metadata_dev_size = get_metadata_dev_size(bdev); 2169 2019 2170 - sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT); 2020 + sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE); 2171 2021 2172 2022 return metadata_dev_size; 2173 2023 } ··· 2245 2095 ti->error = "Error opening metadata block device"; 2246 2096 goto out_unlock; 2247 2097 } 2248 - 2249 - /* 2250 - * Run for the side-effect of possibly issuing a warning if the 2251 - * device is too big. 2252 - */ 2253 - (void) get_metadata_dev_size(metadata_dev->bdev); 2098 + warn_if_metadata_device_too_big(metadata_dev->bdev); 2254 2099 2255 2100 r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev); 2256 2101 if (r) { ··· 2391 2246 return -EINVAL; 2392 2247 2393 2248 } else if (data_size > sb_data_size) { 2249 + if (dm_pool_metadata_needs_check(pool->pmd)) { 2250 + DMERR("%s: unable to grow the data device until repaired.", 2251 + dm_device_name(pool->pool_md)); 2252 + return 0; 2253 + } 2254 + 2394 2255 if (sb_data_size) 2395 2256 DMINFO("%s: growing the data device from %llu to %llu blocks", 2396 2257 dm_device_name(pool->pool_md), ··· 2438 2287 return -EINVAL; 2439 2288 2440 2289 } else if (metadata_dev_size > sb_metadata_dev_size) { 2290 + if (dm_pool_metadata_needs_check(pool->pmd)) { 2291 + DMERR("%s: unable to grow the metadata device until repaired.", 2292 + dm_device_name(pool->pool_md)); 2293 + return 0; 2294 + } 2295 + 2296 + warn_if_metadata_device_too_big(pool->md_dev); 2441 2297 DMINFO("%s: growing the metadata device from %llu to %llu blocks", 2442 2298 dm_device_name(pool->pool_md), 2443 2299 sb_metadata_dev_size, metadata_dev_size); ··· 2831 2673 else 2832 2674 DMEMIT("- "); 2833 2675 2834 - if (pool->pf.mode == PM_READ_ONLY) 2676 + if (pool->pf.mode == PM_OUT_OF_DATA_SPACE) 2677 + DMEMIT("out_of_data_space "); 2678 + else if (pool->pf.mode == PM_READ_ONLY) 2835 2679 DMEMIT("ro "); 2836 2680 else 2837 2681 DMEMIT("rw "); ··· 2947 2787 .name = "thin-pool", 2948 2788 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | 2949 2789 DM_TARGET_IMMUTABLE, 2950 - .version = {1, 10, 0}, 2790 + .version = {1, 11, 0}, 2951 2791 .module = THIS_MODULE, 2952 2792 .ctr = pool_ctr, 2953 2793 .dtr = pool_dtr, ··· 3054 2894 3055 2895 if (get_pool_mode(tc->pool) == PM_FAIL) { 3056 2896 ti->error = "Couldn't open thin device, Pool is in fail mode"; 2897 + r = -EINVAL; 3057 2898 goto bad_thin_open; 3058 2899 } 3059 2900 ··· 3066 2905 3067 2906 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block); 3068 2907 if (r) 3069 - goto bad_thin_open; 2908 + goto bad_target_max_io_len; 3070 2909 3071 2910 ti->num_flush_bios = 1; 3072 2911 ti->flush_supported = true; ··· 3087 2926 3088 2927 return 0; 3089 2928 2929 + bad_target_max_io_len: 2930 + dm_pool_close_thin_device(tc->td); 3090 2931 bad_thin_open: 3091 2932 __pool_dec(tc->pool); 3092 2933 bad_pool_lookup: ··· 3149 2986 return 0; 3150 2987 } 3151 2988 2989 + static void thin_presuspend(struct dm_target *ti) 2990 + { 2991 + struct thin_c *tc = ti->private; 2992 + 2993 + if (dm_noflush_suspending(ti)) 2994 + noflush_work(tc, do_noflush_start); 2995 + } 2996 + 3152 2997 static void thin_postsuspend(struct dm_target *ti) 3153 2998 { 3154 - if (dm_noflush_suspending(ti)) 3155 - requeue_io((struct thin_c *)ti->private); 2999 + struct thin_c *tc = ti->private; 3000 + 3001 + /* 3002 + * The dm_noflush_suspending flag has been cleared by now, so 3003 + * unfortunately we must always run this. 3004 + */ 3005 + noflush_work(tc, do_noflush_stop); 3156 3006 } 3157 3007 3158 3008 /* ··· 3250 3074 3251 3075 static struct target_type thin_target = { 3252 3076 .name = "thin", 3253 - .version = {1, 10, 0}, 3077 + .version = {1, 11, 0}, 3254 3078 .module = THIS_MODULE, 3255 3079 .ctr = thin_ctr, 3256 3080 .dtr = thin_dtr, 3257 3081 .map = thin_map, 3258 3082 .end_io = thin_endio, 3083 + .presuspend = thin_presuspend, 3259 3084 .postsuspend = thin_postsuspend, 3260 3085 .status = thin_status, 3261 3086 .iterate_devices = thin_iterate_devices,
+10
drivers/md/persistent-data/Kconfig
··· 6 6 ---help--- 7 7 Library providing immutable on-disk data structure support for 8 8 device-mapper targets such as the thin provisioning target. 9 + 10 + config DM_DEBUG_BLOCK_STACK_TRACING 11 + boolean "Keep stack trace of persistent data block lock holders" 12 + depends on STACKTRACE_SUPPORT && DM_PERSISTENT_DATA 13 + select STACKTRACE 14 + ---help--- 15 + Enable this for messages that may help debug problems with the 16 + block manager locking used by thin provisioning and caching. 17 + 18 + If unsure, say N.
+94 -21
drivers/md/persistent-data/dm-space-map-metadata.c
··· 91 91 dm_block_t block; 92 92 }; 93 93 94 + struct bop_ring_buffer { 95 + unsigned begin; 96 + unsigned end; 97 + struct block_op bops[MAX_RECURSIVE_ALLOCATIONS + 1]; 98 + }; 99 + 100 + static void brb_init(struct bop_ring_buffer *brb) 101 + { 102 + brb->begin = 0; 103 + brb->end = 0; 104 + } 105 + 106 + static bool brb_empty(struct bop_ring_buffer *brb) 107 + { 108 + return brb->begin == brb->end; 109 + } 110 + 111 + static unsigned brb_next(struct bop_ring_buffer *brb, unsigned old) 112 + { 113 + unsigned r = old + 1; 114 + return (r >= (sizeof(brb->bops) / sizeof(*brb->bops))) ? 0 : r; 115 + } 116 + 117 + static int brb_push(struct bop_ring_buffer *brb, 118 + enum block_op_type type, dm_block_t b) 119 + { 120 + struct block_op *bop; 121 + unsigned next = brb_next(brb, brb->end); 122 + 123 + /* 124 + * We don't allow the last bop to be filled, this way we can 125 + * differentiate between full and empty. 126 + */ 127 + if (next == brb->begin) 128 + return -ENOMEM; 129 + 130 + bop = brb->bops + brb->end; 131 + bop->type = type; 132 + bop->block = b; 133 + 134 + brb->end = next; 135 + 136 + return 0; 137 + } 138 + 139 + static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result) 140 + { 141 + struct block_op *bop; 142 + 143 + if (brb_empty(brb)) 144 + return -ENODATA; 145 + 146 + bop = brb->bops + brb->begin; 147 + result->type = bop->type; 148 + result->block = bop->block; 149 + 150 + brb->begin = brb_next(brb, brb->begin); 151 + 152 + return 0; 153 + } 154 + 155 + /*----------------------------------------------------------------*/ 156 + 94 157 struct sm_metadata { 95 158 struct dm_space_map sm; 96 159 ··· 164 101 165 102 unsigned recursion_count; 166 103 unsigned allocated_this_transaction; 167 - unsigned nr_uncommitted; 168 - struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS]; 104 + struct bop_ring_buffer uncommitted; 169 105 170 106 struct threshold threshold; 171 107 }; 172 108 173 109 static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b) 174 110 { 175 - struct block_op *op; 111 + int r = brb_push(&smm->uncommitted, type, b); 176 112 177 - if (smm->nr_uncommitted == MAX_RECURSIVE_ALLOCATIONS) { 113 + if (r) { 178 114 DMERR("too many recursive allocations"); 179 115 return -ENOMEM; 180 116 } 181 - 182 - op = smm->uncommitted + smm->nr_uncommitted++; 183 - op->type = type; 184 - op->block = b; 185 117 186 118 return 0; 187 119 } ··· 216 158 return -ENOMEM; 217 159 } 218 160 219 - if (smm->recursion_count == 1 && smm->nr_uncommitted) { 220 - while (smm->nr_uncommitted && !r) { 221 - smm->nr_uncommitted--; 222 - r = commit_bop(smm, smm->uncommitted + 223 - smm->nr_uncommitted); 161 + if (smm->recursion_count == 1) { 162 + while (!brb_empty(&smm->uncommitted)) { 163 + struct block_op bop; 164 + 165 + r = brb_pop(&smm->uncommitted, &bop); 166 + if (r) { 167 + DMERR("bug in bop ring buffer"); 168 + break; 169 + } 170 + 171 + r = commit_bop(smm, &bop); 224 172 if (r) 225 173 break; 226 174 } ··· 281 217 static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b, 282 218 uint32_t *result) 283 219 { 284 - int r, i; 220 + int r; 221 + unsigned i; 285 222 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); 286 223 unsigned adjustment = 0; 287 224 ··· 290 225 * We may have some uncommitted adjustments to add. This list 291 226 * should always be really short. 292 227 */ 293 - for (i = 0; i < smm->nr_uncommitted; i++) { 294 - struct block_op *op = smm->uncommitted + i; 228 + for (i = smm->uncommitted.begin; 229 + i != smm->uncommitted.end; 230 + i = brb_next(&smm->uncommitted, i)) { 231 + struct block_op *op = smm->uncommitted.bops + i; 295 232 296 233 if (op->block != b) 297 234 continue; ··· 321 254 static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm, 322 255 dm_block_t b, int *result) 323 256 { 324 - int r, i, adjustment = 0; 257 + int r, adjustment = 0; 258 + unsigned i; 325 259 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); 326 260 uint32_t rc; 327 261 ··· 330 262 * We may have some uncommitted adjustments to add. This list 331 263 * should always be really short. 332 264 */ 333 - for (i = 0; i < smm->nr_uncommitted; i++) { 334 - struct block_op *op = smm->uncommitted + i; 265 + for (i = smm->uncommitted.begin; 266 + i != smm->uncommitted.end; 267 + i = brb_next(&smm->uncommitted, i)) { 268 + 269 + struct block_op *op = smm->uncommitted.bops + i; 335 270 336 271 if (op->block != b) 337 272 continue; ··· 742 671 smm->begin = superblock + 1; 743 672 smm->recursion_count = 0; 744 673 smm->allocated_this_transaction = 0; 745 - smm->nr_uncommitted = 0; 674 + brb_init(&smm->uncommitted); 746 675 threshold_init(&smm->threshold); 747 676 748 677 memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm)); ··· 751 680 if (r) 752 681 return r; 753 682 683 + if (nr_blocks > DM_SM_METADATA_MAX_BLOCKS) 684 + nr_blocks = DM_SM_METADATA_MAX_BLOCKS; 754 685 r = sm_ll_extend(&smm->ll, nr_blocks); 755 686 if (r) 756 687 return r; ··· 786 713 smm->begin = 0; 787 714 smm->recursion_count = 0; 788 715 smm->allocated_this_transaction = 0; 789 - smm->nr_uncommitted = 0; 716 + brb_init(&smm->uncommitted); 790 717 threshold_init(&smm->threshold); 791 718 792 719 memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
+11
drivers/md/persistent-data/dm-space-map-metadata.h
··· 9 9 10 10 #include "dm-transaction-manager.h" 11 11 12 + #define DM_SM_METADATA_BLOCK_SIZE (4096 >> SECTOR_SHIFT) 13 + 14 + /* 15 + * The metadata device is currently limited in size. 16 + * 17 + * We have one block of index, which can hold 255 index entries. Each 18 + * index entry contains allocation info about ~16k metadata blocks. 19 + */ 20 + #define DM_SM_METADATA_MAX_BLOCKS (255 * ((1 << 14) - 64)) 21 + #define DM_SM_METADATA_MAX_SECTORS (DM_SM_METADATA_MAX_BLOCKS * DM_SM_METADATA_BLOCK_SIZE) 22 + 12 23 /* 13 24 * Unfortunately we have to use two-phase construction due to the cycle 14 25 * between the tm and sm.
+10 -2
drivers/mfd/da9055-i2c.c
··· 53 53 return 0; 54 54 } 55 55 56 + /* 57 + * DO NOT change the device Ids. The naming is intentionally specific as both 58 + * the PMIC and CODEC parts of this chip are instantiated separately as I2C 59 + * devices (both have configurable I2C addresses, and are to all intents and 60 + * purposes separate). As a result there are specific DA9055 ids for PMIC 61 + * and CODEC, which must be different to operate together. 62 + */ 56 63 static struct i2c_device_id da9055_i2c_id[] = { 57 - {"da9055", 0}, 64 + {"da9055-pmic", 0}, 58 65 { } 59 66 }; 67 + MODULE_DEVICE_TABLE(i2c, da9055_i2c_id); 60 68 61 69 static struct i2c_driver da9055_i2c_driver = { 62 70 .probe = da9055_i2c_probe, 63 71 .remove = da9055_i2c_remove, 64 72 .id_table = da9055_i2c_id, 65 73 .driver = { 66 - .name = "da9055", 74 + .name = "da9055-pmic", 67 75 .owner = THIS_MODULE, 68 76 }, 69 77 };
+2
drivers/mfd/max14577.c
··· 173 173 }; 174 174 MODULE_DEVICE_TABLE(i2c, max14577_i2c_id); 175 175 176 + #ifdef CONFIG_PM_SLEEP 176 177 static int max14577_suspend(struct device *dev) 177 178 { 178 179 struct i2c_client *i2c = container_of(dev, struct i2c_client, dev); ··· 209 208 210 209 return 0; 211 210 } 211 + #endif /* CONFIG_PM_SLEEP */ 212 212 213 213 static struct of_device_id max14577_dt_match[] = { 214 214 { .compatible = "maxim,max14577", },
+3 -3
drivers/mfd/max8997.c
··· 164 164 return pd; 165 165 } 166 166 167 - static inline int max8997_i2c_get_driver_data(struct i2c_client *i2c, 167 + static inline unsigned long max8997_i2c_get_driver_data(struct i2c_client *i2c, 168 168 const struct i2c_device_id *id) 169 169 { 170 170 if (IS_ENABLED(CONFIG_OF) && i2c->dev.of_node) { 171 171 const struct of_device_id *match; 172 172 match = of_match_node(max8997_pmic_dt_match, i2c->dev.of_node); 173 - return (int)match->data; 173 + return (unsigned long)match->data; 174 174 } 175 - return (int)id->driver_data; 175 + return id->driver_data; 176 176 } 177 177 178 178 static int max8997_i2c_probe(struct i2c_client *i2c,
+3 -3
drivers/mfd/max8998.c
··· 169 169 return pd; 170 170 } 171 171 172 - static inline int max8998_i2c_get_driver_data(struct i2c_client *i2c, 172 + static inline unsigned long max8998_i2c_get_driver_data(struct i2c_client *i2c, 173 173 const struct i2c_device_id *id) 174 174 { 175 175 if (IS_ENABLED(CONFIG_OF) && i2c->dev.of_node) { 176 176 const struct of_device_id *match; 177 177 match = of_match_node(max8998_dt_match, i2c->dev.of_node); 178 - return (int)(long)match->data; 178 + return (unsigned long)match->data; 179 179 } 180 180 181 - return (int)id->driver_data; 181 + return id->driver_data; 182 182 } 183 183 184 184 static int max8998_i2c_probe(struct i2c_client *i2c,
+2
drivers/mfd/sec-core.c
··· 315 315 return 0; 316 316 } 317 317 318 + #ifdef CONFIG_PM_SLEEP 318 319 static int sec_pmic_suspend(struct device *dev) 319 320 { 320 321 struct i2c_client *i2c = container_of(dev, struct i2c_client, dev); ··· 350 349 351 350 return 0; 352 351 } 352 + #endif /* CONFIG_PM_SLEEP */ 353 353 354 354 static SIMPLE_DEV_PM_OPS(sec_pmic_pm_ops, sec_pmic_suspend, sec_pmic_resume); 355 355
+2 -2
drivers/mfd/tps65217.c
··· 158 158 { 159 159 struct tps65217 *tps; 160 160 unsigned int version; 161 - unsigned int chip_id = ids->driver_data; 161 + unsigned long chip_id = ids->driver_data; 162 162 const struct of_device_id *match; 163 163 bool status_off = false; 164 164 int ret; ··· 170 170 "Failed to find matching dt id\n"); 171 171 return -EINVAL; 172 172 } 173 - chip_id = (unsigned int)(unsigned long)match->data; 173 + chip_id = (unsigned long)match->data; 174 174 status_off = of_property_read_bool(client->dev.of_node, 175 175 "ti,pmic-shutdown-controller"); 176 176 }
+1 -1
drivers/mfd/wm8994-core.c
··· 636 636 if (i2c->dev.of_node) { 637 637 of_id = of_match_device(wm8994_of_match, &i2c->dev); 638 638 if (of_id) 639 - wm8994->type = (int)of_id->data; 639 + wm8994->type = (enum wm8994_type)of_id->data; 640 640 } else { 641 641 wm8994->type = id->driver_data; 642 642 }
+3 -1
drivers/misc/mei/client.c
··· 666 666 goto err; 667 667 668 668 cb->fop_type = MEI_FOP_READ; 669 - cl->read_cb = cb; 670 669 if (dev->hbuf_is_ready) { 671 670 dev->hbuf_is_ready = false; 672 671 if (mei_hbm_cl_flow_control_req(dev, cl)) { ··· 677 678 } else { 678 679 list_add_tail(&cb->list, &dev->ctrl_wr_list.list); 679 680 } 681 + 682 + cl->read_cb = cb; 683 + 680 684 return rets; 681 685 err: 682 686 mei_io_cb_free(cb);
+1 -1
drivers/mmc/card/queue.c
··· 197 197 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; 198 198 199 199 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) 200 - limit = dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; 200 + limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; 201 201 202 202 mq->card = card; 203 203 mq->queue = blk_init_queue(mmc_request_fn, lock);
+1 -1
drivers/mtd/nand/nand_base.c
··· 1584 1584 } 1585 1585 1586 1586 if (mtd->ecc_stats.failed - ecc_failures) { 1587 - if (retry_mode + 1 <= chip->read_retries) { 1587 + if (retry_mode + 1 < chip->read_retries) { 1588 1588 retry_mode++; 1589 1589 ret = nand_setup_read_retry(mtd, 1590 1590 retry_mode);
+40 -21
drivers/mtd/nand/omap2.c
··· 1633 1633 int i; 1634 1634 dma_cap_mask_t mask; 1635 1635 unsigned sig; 1636 + unsigned oob_index; 1636 1637 struct resource *res; 1637 1638 struct mtd_part_parser_data ppdata = {}; 1638 1639 ··· 1827 1826 (mtd->writesize / 1828 1827 nand_chip->ecc.size); 1829 1828 if (nand_chip->options & NAND_BUSWIDTH_16) 1830 - ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH; 1829 + oob_index = BADBLOCK_MARKER_LENGTH; 1831 1830 else 1832 - ecclayout->eccpos[0] = 1; 1833 - ecclayout->oobfree->offset = ecclayout->eccpos[0] + 1834 - ecclayout->eccbytes; 1831 + oob_index = 1; 1832 + for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) 1833 + ecclayout->eccpos[i] = oob_index; 1834 + /* no reserved-marker in ecclayout for this ecc-scheme */ 1835 + ecclayout->oobfree->offset = 1836 + ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; 1835 1837 break; 1836 1838 1837 1839 case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: ··· 1851 1847 ecclayout->eccbytes = nand_chip->ecc.bytes * 1852 1848 (mtd->writesize / 1853 1849 nand_chip->ecc.size); 1854 - ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH; 1855 - ecclayout->oobfree->offset = ecclayout->eccpos[0] + 1856 - ecclayout->eccbytes; 1850 + oob_index = BADBLOCK_MARKER_LENGTH; 1851 + for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) { 1852 + ecclayout->eccpos[i] = oob_index; 1853 + if (((i + 1) % nand_chip->ecc.bytes) == 0) 1854 + oob_index++; 1855 + } 1856 + /* include reserved-marker in ecclayout->oobfree calculation */ 1857 + ecclayout->oobfree->offset = 1 + 1858 + ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; 1857 1859 /* software bch library is used for locating errors */ 1858 1860 nand_chip->ecc.priv = nand_bch_init(mtd, 1859 1861 nand_chip->ecc.size, ··· 1893 1883 ecclayout->eccbytes = nand_chip->ecc.bytes * 1894 1884 (mtd->writesize / 1895 1885 nand_chip->ecc.size); 1896 - ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH; 1897 - ecclayout->oobfree->offset = ecclayout->eccpos[0] + 1898 - ecclayout->eccbytes; 1886 + oob_index = BADBLOCK_MARKER_LENGTH; 1887 + for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) 1888 + ecclayout->eccpos[i] = oob_index; 1889 + /* reserved marker already included in ecclayout->eccbytes */ 1890 + ecclayout->oobfree->offset = 1891 + ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; 1899 1892 /* This ECC scheme requires ELM H/W block */ 1900 1893 if (is_elm_present(info, pdata->elm_of_node, BCH4_ECC) < 0) { 1901 1894 pr_err("nand: error: could not initialize ELM\n"); ··· 1926 1913 ecclayout->eccbytes = nand_chip->ecc.bytes * 1927 1914 (mtd->writesize / 1928 1915 nand_chip->ecc.size); 1929 - ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH; 1930 - ecclayout->oobfree->offset = ecclayout->eccpos[0] + 1931 - ecclayout->eccbytes; 1916 + oob_index = BADBLOCK_MARKER_LENGTH; 1917 + for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) { 1918 + ecclayout->eccpos[i] = oob_index; 1919 + if (((i + 1) % nand_chip->ecc.bytes) == 0) 1920 + oob_index++; 1921 + } 1922 + /* include reserved-marker in ecclayout->oobfree calculation */ 1923 + ecclayout->oobfree->offset = 1 + 1924 + ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; 1932 1925 /* software bch library is used for locating errors */ 1933 1926 nand_chip->ecc.priv = nand_bch_init(mtd, 1934 1927 nand_chip->ecc.size, ··· 1975 1956 ecclayout->eccbytes = nand_chip->ecc.bytes * 1976 1957 (mtd->writesize / 1977 1958 nand_chip->ecc.size); 1978 - ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH; 1979 - ecclayout->oobfree->offset = ecclayout->eccpos[0] + 1980 - ecclayout->eccbytes; 1959 + oob_index = BADBLOCK_MARKER_LENGTH; 1960 + for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) 1961 + ecclayout->eccpos[i] = oob_index; 1962 + /* reserved marker already included in ecclayout->eccbytes */ 1963 + ecclayout->oobfree->offset = 1964 + ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; 1981 1965 break; 1982 1966 #else 1983 1967 pr_err("nand: error: CONFIG_MTD_NAND_OMAP_BCH not enabled\n"); ··· 1994 1972 goto return_error; 1995 1973 } 1996 1974 1997 - /* populate remaining ECC layout data */ 1998 - ecclayout->oobfree->length = mtd->oobsize - (BADBLOCK_MARKER_LENGTH + 1999 - ecclayout->eccbytes); 2000 - for (i = 1; i < ecclayout->eccbytes; i++) 2001 - ecclayout->eccpos[i] = ecclayout->eccpos[0] + i; 1975 + /* all OOB bytes from oobfree->offset till end off OOB are free */ 1976 + ecclayout->oobfree->length = mtd->oobsize - ecclayout->oobfree->offset; 2002 1977 /* check if NAND device's OOB is enough to store ECC signatures */ 2003 1978 if (mtd->oobsize < (ecclayout->eccbytes + BADBLOCK_MARKER_LENGTH)) { 2004 1979 pr_err("not enough OOB bytes required = %d, available=%d\n",
+4 -4
drivers/mtd/ubi/fastmap.c
··· 463 463 } 464 464 } 465 465 if (found_orphan) { 466 - kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); 467 466 list_del(&tmp_aeb->u.list); 467 + kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); 468 468 } 469 469 470 470 new_aeb = kmem_cache_alloc(ai->aeb_slab_cache, ··· 846 846 ret = UBI_BAD_FASTMAP; 847 847 fail: 848 848 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) { 849 - kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); 850 849 list_del(&tmp_aeb->u.list); 850 + kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); 851 851 } 852 852 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans, u.list) { 853 - kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); 854 853 list_del(&tmp_aeb->u.list); 854 + kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); 855 855 } 856 856 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) { 857 - kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); 858 857 list_del(&tmp_aeb->u.list); 858 + kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); 859 859 } 860 860 861 861 return ret;
+1 -1
drivers/net/Kconfig
··· 139 139 This adds a specialized tap character device driver that is based 140 140 on the MAC-VLAN network interface, called macvtap. A macvtap device 141 141 can be added in the same way as a macvlan device, using 'type 142 - macvlan', and then be accessed through the tap user space interface. 142 + macvtap', and then be accessed through the tap user space interface. 143 143 144 144 To compile this driver as a module, choose M here: the module 145 145 will be called macvtap.
+16 -6
drivers/net/bonding/bond_3ad.c
··· 181 181 */ 182 182 static inline void __disable_port(struct port *port) 183 183 { 184 - bond_set_slave_inactive_flags(port->slave); 184 + bond_set_slave_inactive_flags(port->slave, BOND_SLAVE_NOTIFY_LATER); 185 185 } 186 186 187 187 /** ··· 193 193 struct slave *slave = port->slave; 194 194 195 195 if ((slave->link == BOND_LINK_UP) && IS_UP(slave->dev)) 196 - bond_set_slave_active_flags(slave); 196 + bond_set_slave_active_flags(slave, BOND_SLAVE_NOTIFY_LATER); 197 197 } 198 198 199 199 /** ··· 1796 1796 BOND_AD_INFO(bond).agg_select_timer = timeout; 1797 1797 } 1798 1798 1799 - static u16 aggregator_identifier; 1800 - 1801 1799 /** 1802 1800 * bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures 1803 1801 * @bond: bonding struct to work on ··· 1809 1811 if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr), 1810 1812 bond->dev->dev_addr)) { 1811 1813 1812 - aggregator_identifier = 0; 1814 + BOND_AD_INFO(bond).aggregator_identifier = 0; 1813 1815 1814 1816 BOND_AD_INFO(bond).system.sys_priority = 0xFFFF; 1815 1817 BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->dev->dev_addr); ··· 1878 1880 ad_initialize_agg(aggregator); 1879 1881 1880 1882 aggregator->aggregator_mac_address = *((struct mac_addr *)bond->dev->dev_addr); 1881 - aggregator->aggregator_identifier = (++aggregator_identifier); 1883 + aggregator->aggregator_identifier = ++BOND_AD_INFO(bond).aggregator_identifier; 1882 1884 aggregator->slave = slave; 1883 1885 aggregator->is_active = 0; 1884 1886 aggregator->num_of_ports = 0; ··· 2062 2064 struct list_head *iter; 2063 2065 struct slave *slave; 2064 2066 struct port *port; 2067 + bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER; 2065 2068 2066 2069 read_lock(&bond->lock); 2067 2070 rcu_read_lock(); ··· 2120 2121 } 2121 2122 2122 2123 re_arm: 2124 + bond_for_each_slave_rcu(bond, slave, iter) { 2125 + if (slave->should_notify) { 2126 + should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW; 2127 + break; 2128 + } 2129 + } 2123 2130 rcu_read_unlock(); 2124 2131 read_unlock(&bond->lock); 2132 + 2133 + if (should_notify_rtnl && rtnl_trylock()) { 2134 + bond_slave_state_notify(bond); 2135 + rtnl_unlock(); 2136 + } 2125 2137 queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); 2126 2138 } 2127 2139
+1
drivers/net/bonding/bond_3ad.h
··· 253 253 struct ad_bond_info { 254 254 struct ad_system system; /* 802.3ad system structure */ 255 255 u32 agg_select_timer; // Timer to select aggregator after all adapter's hand shakes 256 + u16 aggregator_identifier; 256 257 }; 257 258 258 259 struct ad_slave_info {
+79 -56
drivers/net/bonding/bond_main.c
··· 829 829 if (bond_is_lb(bond)) { 830 830 bond_alb_handle_active_change(bond, new_active); 831 831 if (old_active) 832 - bond_set_slave_inactive_flags(old_active); 832 + bond_set_slave_inactive_flags(old_active, 833 + BOND_SLAVE_NOTIFY_NOW); 833 834 if (new_active) 834 - bond_set_slave_active_flags(new_active); 835 + bond_set_slave_active_flags(new_active, 836 + BOND_SLAVE_NOTIFY_NOW); 835 837 } else { 836 838 rcu_assign_pointer(bond->curr_active_slave, new_active); 837 839 } 838 840 839 841 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) { 840 842 if (old_active) 841 - bond_set_slave_inactive_flags(old_active); 843 + bond_set_slave_inactive_flags(old_active, 844 + BOND_SLAVE_NOTIFY_NOW); 842 845 843 846 if (new_active) { 844 847 bool should_notify_peers = false; 845 848 846 - bond_set_slave_active_flags(new_active); 849 + bond_set_slave_active_flags(new_active, 850 + BOND_SLAVE_NOTIFY_NOW); 847 851 848 852 if (bond->params.fail_over_mac) 849 853 bond_do_fail_over_mac(bond, new_active, ··· 1197 1193 return -EBUSY; 1198 1194 } 1199 1195 1196 + if (bond_dev == slave_dev) { 1197 + pr_err("%s: cannot enslave bond to itself.\n", bond_dev->name); 1198 + return -EPERM; 1199 + } 1200 + 1200 1201 /* vlan challenged mutual exclusion */ 1201 1202 /* no need to lock since we're protected by rtnl_lock */ 1202 1203 if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) { ··· 1472 1463 1473 1464 switch (bond->params.mode) { 1474 1465 case BOND_MODE_ACTIVEBACKUP: 1475 - bond_set_slave_inactive_flags(new_slave); 1466 + bond_set_slave_inactive_flags(new_slave, 1467 + BOND_SLAVE_NOTIFY_NOW); 1476 1468 break; 1477 1469 case BOND_MODE_8023AD: 1478 1470 /* in 802.3ad mode, the internal mechanism 1479 1471 * will activate the slaves in the selected 1480 1472 * aggregator 1481 1473 */ 1482 - bond_set_slave_inactive_flags(new_slave); 1474 + bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW); 1483 1475 /* if this is the first slave */ 1484 1476 if (!prev_slave) { 1485 1477 SLAVE_AD_INFO(new_slave).id = 1; ··· 1498 1488 case BOND_MODE_TLB: 1499 1489 case BOND_MODE_ALB: 1500 1490 bond_set_active_slave(new_slave); 1501 - bond_set_slave_inactive_flags(new_slave); 1491 + bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW); 1502 1492 break; 1503 1493 default: 1504 1494 pr_debug("This slave is always active in trunk mode\n"); ··· 1553 1543 bond_set_carrier(bond); 1554 1544 1555 1545 if (USES_PRIMARY(bond->params.mode)) { 1546 + block_netpoll_tx(); 1556 1547 write_lock_bh(&bond->curr_slave_lock); 1557 1548 bond_select_active_slave(bond); 1558 1549 write_unlock_bh(&bond->curr_slave_lock); 1550 + unblock_netpoll_tx(); 1559 1551 } 1560 1552 1561 1553 pr_info("%s: enslaving %s as a%s interface with a%s link.\n", ··· 1583 1571 if (bond->primary_slave == new_slave) 1584 1572 bond->primary_slave = NULL; 1585 1573 if (bond->curr_active_slave == new_slave) { 1574 + block_netpoll_tx(); 1586 1575 write_lock_bh(&bond->curr_slave_lock); 1587 1576 bond_change_active_slave(bond, NULL); 1588 1577 bond_select_active_slave(bond); 1589 1578 write_unlock_bh(&bond->curr_slave_lock); 1579 + unblock_netpoll_tx(); 1590 1580 } 1591 1581 slave_disable_netpoll(new_slave); 1592 1582 ··· 1663 1649 unblock_netpoll_tx(); 1664 1650 return -EINVAL; 1665 1651 } 1666 - 1667 - /* release the slave from its bond */ 1668 - bond->slave_cnt--; 1669 1652 1670 1653 bond_sysfs_slave_del(slave); 1671 1654 ··· 1745 1734 1746 1735 unblock_netpoll_tx(); 1747 1736 synchronize_rcu(); 1737 + bond->slave_cnt--; 1748 1738 1749 1739 if (!bond_has_slaves(bond)) { 1750 1740 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev); ··· 2023 2011 2024 2012 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP || 2025 2013 bond->params.mode == BOND_MODE_8023AD) 2026 - bond_set_slave_inactive_flags(slave); 2014 + bond_set_slave_inactive_flags(slave, 2015 + BOND_SLAVE_NOTIFY_NOW); 2027 2016 2028 2017 pr_info("%s: link status definitely down for interface %s, disabling it\n", 2029 2018 bond->dev->name, slave->dev->name); ··· 2571 2558 slave->link = BOND_LINK_UP; 2572 2559 if (bond->current_arp_slave) { 2573 2560 bond_set_slave_inactive_flags( 2574 - bond->current_arp_slave); 2561 + bond->current_arp_slave, 2562 + BOND_SLAVE_NOTIFY_NOW); 2575 2563 bond->current_arp_slave = NULL; 2576 2564 } 2577 2565 ··· 2592 2578 slave->link_failure_count++; 2593 2579 2594 2580 slave->link = BOND_LINK_DOWN; 2595 - bond_set_slave_inactive_flags(slave); 2581 + bond_set_slave_inactive_flags(slave, 2582 + BOND_SLAVE_NOTIFY_NOW); 2596 2583 2597 2584 pr_info("%s: link status definitely down for interface %s, disabling it\n", 2598 2585 bond->dev->name, slave->dev->name); ··· 2626 2611 2627 2612 /* 2628 2613 * Send ARP probes for active-backup mode ARP monitor. 2614 + * 2615 + * Called with rcu_read_lock hold. 2629 2616 */ 2630 2617 static bool bond_ab_arp_probe(struct bonding *bond) 2631 2618 { 2632 2619 struct slave *slave, *before = NULL, *new_slave = NULL, 2633 - *curr_arp_slave, *curr_active_slave; 2620 + *curr_arp_slave = rcu_dereference(bond->current_arp_slave), 2621 + *curr_active_slave = rcu_dereference(bond->curr_active_slave); 2634 2622 struct list_head *iter; 2635 2623 bool found = false; 2636 - 2637 - rcu_read_lock(); 2638 - curr_arp_slave = rcu_dereference(bond->current_arp_slave); 2639 - curr_active_slave = rcu_dereference(bond->curr_active_slave); 2624 + bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER; 2640 2625 2641 2626 if (curr_arp_slave && curr_active_slave) 2642 2627 pr_info("PROBE: c_arp %s && cas %s BAD\n", ··· 2645 2630 2646 2631 if (curr_active_slave) { 2647 2632 bond_arp_send_all(bond, curr_active_slave); 2648 - rcu_read_unlock(); 2649 - return true; 2633 + return should_notify_rtnl; 2650 2634 } 2651 - rcu_read_unlock(); 2652 2635 2653 2636 /* if we don't have a curr_active_slave, search for the next available 2654 2637 * backup slave from the current_arp_slave and make it the candidate 2655 2638 * for becoming the curr_active_slave 2656 2639 */ 2657 2640 2658 - if (!rtnl_trylock()) 2659 - return false; 2660 - /* curr_arp_slave might have gone away */ 2661 - curr_arp_slave = ACCESS_ONCE(bond->current_arp_slave); 2662 - 2663 2641 if (!curr_arp_slave) { 2664 - curr_arp_slave = bond_first_slave(bond); 2665 - if (!curr_arp_slave) { 2666 - rtnl_unlock(); 2667 - return true; 2668 - } 2642 + curr_arp_slave = bond_first_slave_rcu(bond); 2643 + if (!curr_arp_slave) 2644 + return should_notify_rtnl; 2669 2645 } 2670 2646 2671 - bond_set_slave_inactive_flags(curr_arp_slave); 2647 + bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER); 2672 2648 2673 - bond_for_each_slave(bond, slave, iter) { 2649 + bond_for_each_slave_rcu(bond, slave, iter) { 2674 2650 if (!found && !before && IS_UP(slave->dev)) 2675 2651 before = slave; 2676 2652 ··· 2679 2673 if (slave->link_failure_count < UINT_MAX) 2680 2674 slave->link_failure_count++; 2681 2675 2682 - bond_set_slave_inactive_flags(slave); 2676 + bond_set_slave_inactive_flags(slave, 2677 + BOND_SLAVE_NOTIFY_LATER); 2683 2678 2684 2679 pr_info("%s: backup interface %s is now down.\n", 2685 2680 bond->dev->name, slave->dev->name); ··· 2692 2685 if (!new_slave && before) 2693 2686 new_slave = before; 2694 2687 2695 - if (!new_slave) { 2696 - rtnl_unlock(); 2697 - return true; 2698 - } 2688 + if (!new_slave) 2689 + goto check_state; 2699 2690 2700 2691 new_slave->link = BOND_LINK_BACK; 2701 - bond_set_slave_active_flags(new_slave); 2692 + bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER); 2702 2693 bond_arp_send_all(bond, new_slave); 2703 2694 new_slave->jiffies = jiffies; 2704 2695 rcu_assign_pointer(bond->current_arp_slave, new_slave); 2705 - rtnl_unlock(); 2706 2696 2707 - return true; 2697 + check_state: 2698 + bond_for_each_slave_rcu(bond, slave, iter) { 2699 + if (slave->should_notify) { 2700 + should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW; 2701 + break; 2702 + } 2703 + } 2704 + return should_notify_rtnl; 2708 2705 } 2709 2706 2710 2707 static void bond_activebackup_arp_mon(struct work_struct *work) 2711 2708 { 2712 2709 struct bonding *bond = container_of(work, struct bonding, 2713 2710 arp_work.work); 2714 - bool should_notify_peers = false, should_commit = false; 2711 + bool should_notify_peers = false; 2712 + bool should_notify_rtnl = false; 2715 2713 int delta_in_ticks; 2716 2714 2717 2715 delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); ··· 2725 2713 goto re_arm; 2726 2714 2727 2715 rcu_read_lock(); 2728 - should_notify_peers = bond_should_notify_peers(bond); 2729 - should_commit = bond_ab_arp_inspect(bond); 2730 - rcu_read_unlock(); 2731 2716 2732 - if (should_commit) { 2717 + should_notify_peers = bond_should_notify_peers(bond); 2718 + 2719 + if (bond_ab_arp_inspect(bond)) { 2720 + rcu_read_unlock(); 2721 + 2733 2722 /* Race avoidance with bond_close flush of workqueue */ 2734 2723 if (!rtnl_trylock()) { 2735 2724 delta_in_ticks = 1; ··· 2739 2726 } 2740 2727 2741 2728 bond_ab_arp_commit(bond); 2729 + 2742 2730 rtnl_unlock(); 2731 + rcu_read_lock(); 2743 2732 } 2744 2733 2745 - if (!bond_ab_arp_probe(bond)) { 2746 - /* rtnl locking failed, re-arm */ 2747 - delta_in_ticks = 1; 2748 - should_notify_peers = false; 2749 - } 2734 + should_notify_rtnl = bond_ab_arp_probe(bond); 2735 + rcu_read_unlock(); 2750 2736 2751 2737 re_arm: 2752 2738 if (bond->params.arp_interval) 2753 2739 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); 2754 2740 2755 - if (should_notify_peers) { 2741 + if (should_notify_peers || should_notify_rtnl) { 2756 2742 if (!rtnl_trylock()) 2757 2743 return; 2758 - call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev); 2744 + 2745 + if (should_notify_peers) 2746 + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, 2747 + bond->dev); 2748 + if (should_notify_rtnl) 2749 + bond_slave_state_notify(bond); 2750 + 2759 2751 rtnl_unlock(); 2760 2752 } 2761 2753 } ··· 2882 2864 pr_info("%s: Primary slave changed to %s, reselecting active slave.\n", 2883 2865 bond->dev->name, bond->primary_slave ? slave_dev->name : 2884 2866 "none"); 2867 + 2868 + block_netpoll_tx(); 2885 2869 write_lock_bh(&bond->curr_slave_lock); 2886 2870 bond_select_active_slave(bond); 2887 2871 write_unlock_bh(&bond->curr_slave_lock); 2872 + unblock_netpoll_tx(); 2888 2873 break; 2889 2874 case NETDEV_FEAT_CHANGE: 2890 2875 bond_compute_features(bond); ··· 3060 3039 bond_for_each_slave(bond, slave, iter) { 3061 3040 if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP) 3062 3041 && (slave != bond->curr_active_slave)) { 3063 - bond_set_slave_inactive_flags(slave); 3042 + bond_set_slave_inactive_flags(slave, 3043 + BOND_SLAVE_NOTIFY_NOW); 3064 3044 } else { 3065 - bond_set_slave_active_flags(slave); 3045 + bond_set_slave_active_flags(slave, 3046 + BOND_SLAVE_NOTIFY_NOW); 3066 3047 } 3067 3048 } 3068 3049 read_unlock(&bond->curr_slave_lock); ··· 3723 3700 3724 3701 3725 3702 static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb, 3726 - void *accel_priv) 3703 + void *accel_priv, select_queue_fallback_t fallback) 3727 3704 { 3728 3705 /* 3729 3706 * This helper function exists to help dev_pick_tx get the correct
+1 -1
drivers/net/bonding/bond_options.c
··· 14 14 #include <linux/errno.h> 15 15 #include <linux/if.h> 16 16 #include <linux/netdevice.h> 17 - #include <linux/rwlock.h> 17 + #include <linux/spinlock.h> 18 18 #include <linux/rcupdate.h> 19 19 #include <linux/ctype.h> 20 20 #include <linux/inet.h>
+42 -5
drivers/net/bonding/bonding.h
··· 195 195 s8 new_link; 196 196 u8 backup:1, /* indicates backup slave. Value corresponds with 197 197 BOND_STATE_ACTIVE and BOND_STATE_BACKUP */ 198 - inactive:1; /* indicates inactive slave */ 198 + inactive:1, /* indicates inactive slave */ 199 + should_notify:1; /* indicateds whether the state changed */ 199 200 u8 duplex; 200 201 u32 original_mtu; 201 202 u32 link_failure_count; ··· 304 303 } 305 304 } 306 305 306 + static inline void bond_set_slave_state(struct slave *slave, 307 + int slave_state, bool notify) 308 + { 309 + if (slave->backup == slave_state) 310 + return; 311 + 312 + slave->backup = slave_state; 313 + if (notify) { 314 + rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_KERNEL); 315 + slave->should_notify = 0; 316 + } else { 317 + if (slave->should_notify) 318 + slave->should_notify = 0; 319 + else 320 + slave->should_notify = 1; 321 + } 322 + } 323 + 307 324 static inline void bond_slave_state_change(struct bonding *bond) 308 325 { 309 326 struct list_head *iter; ··· 332 313 bond_set_active_slave(tmp); 333 314 else if (tmp->link == BOND_LINK_DOWN) 334 315 bond_set_backup_slave(tmp); 316 + } 317 + } 318 + 319 + static inline void bond_slave_state_notify(struct bonding *bond) 320 + { 321 + struct list_head *iter; 322 + struct slave *tmp; 323 + 324 + bond_for_each_slave(bond, tmp, iter) { 325 + if (tmp->should_notify) { 326 + rtmsg_ifinfo(RTM_NEWLINK, tmp->dev, 0, GFP_KERNEL); 327 + tmp->should_notify = 0; 328 + } 335 329 } 336 330 } 337 331 ··· 374 342 #define BOND_ARP_VALIDATE_BACKUP (1 << BOND_STATE_BACKUP) 375 343 #define BOND_ARP_VALIDATE_ALL (BOND_ARP_VALIDATE_ACTIVE | \ 376 344 BOND_ARP_VALIDATE_BACKUP) 345 + 346 + #define BOND_SLAVE_NOTIFY_NOW true 347 + #define BOND_SLAVE_NOTIFY_LATER false 377 348 378 349 static inline int slave_do_arp_validate(struct bonding *bond, 379 350 struct slave *slave) ··· 429 394 } 430 395 #endif 431 396 432 - static inline void bond_set_slave_inactive_flags(struct slave *slave) 397 + static inline void bond_set_slave_inactive_flags(struct slave *slave, 398 + bool notify) 433 399 { 434 400 if (!bond_is_lb(slave->bond)) 435 - bond_set_backup_slave(slave); 401 + bond_set_slave_state(slave, BOND_STATE_BACKUP, notify); 436 402 if (!slave->bond->params.all_slaves_active) 437 403 slave->inactive = 1; 438 404 } 439 405 440 - static inline void bond_set_slave_active_flags(struct slave *slave) 406 + static inline void bond_set_slave_active_flags(struct slave *slave, 407 + bool notify) 441 408 { 442 - bond_set_active_slave(slave); 409 + bond_set_slave_state(slave, BOND_STATE_ACTIVE, notify); 443 410 slave->inactive = 0; 444 411 } 445 412
+131 -41
drivers/net/can/flexcan.c
··· 144 144 145 145 #define FLEXCAN_MB_CODE_MASK (0xf0ffffff) 146 146 147 + #define FLEXCAN_TIMEOUT_US (50) 148 + 147 149 /* 148 150 * FLEXCAN hardware feature flags 149 151 * ··· 264 262 } 265 263 #endif 266 264 265 + static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv) 266 + { 267 + if (!priv->reg_xceiver) 268 + return 0; 269 + 270 + return regulator_enable(priv->reg_xceiver); 271 + } 272 + 273 + static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv) 274 + { 275 + if (!priv->reg_xceiver) 276 + return 0; 277 + 278 + return regulator_disable(priv->reg_xceiver); 279 + } 280 + 267 281 static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv, 268 282 u32 reg_esr) 269 283 { ··· 287 269 (reg_esr & FLEXCAN_ESR_ERR_BUS); 288 270 } 289 271 290 - static inline void flexcan_chip_enable(struct flexcan_priv *priv) 272 + static int flexcan_chip_enable(struct flexcan_priv *priv) 291 273 { 292 274 struct flexcan_regs __iomem *regs = priv->base; 275 + unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; 293 276 u32 reg; 294 277 295 278 reg = flexcan_read(&regs->mcr); 296 279 reg &= ~FLEXCAN_MCR_MDIS; 297 280 flexcan_write(reg, &regs->mcr); 298 281 299 - udelay(10); 282 + while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)) 283 + usleep_range(10, 20); 284 + 285 + if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK) 286 + return -ETIMEDOUT; 287 + 288 + return 0; 300 289 } 301 290 302 - static inline void flexcan_chip_disable(struct flexcan_priv *priv) 291 + static int flexcan_chip_disable(struct flexcan_priv *priv) 303 292 { 304 293 struct flexcan_regs __iomem *regs = priv->base; 294 + unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; 305 295 u32 reg; 306 296 307 297 reg = flexcan_read(&regs->mcr); 308 298 reg |= FLEXCAN_MCR_MDIS; 309 299 flexcan_write(reg, &regs->mcr); 300 + 301 + while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)) 302 + usleep_range(10, 20); 303 + 304 + if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)) 305 + return -ETIMEDOUT; 306 + 307 + return 0; 308 + } 309 + 310 + static int flexcan_chip_freeze(struct flexcan_priv *priv) 311 + { 312 + struct flexcan_regs __iomem *regs = priv->base; 313 + unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate; 314 + u32 reg; 315 + 316 + reg = flexcan_read(&regs->mcr); 317 + reg |= FLEXCAN_MCR_HALT; 318 + flexcan_write(reg, &regs->mcr); 319 + 320 + while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK)) 321 + usleep_range(100, 200); 322 + 323 + if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK)) 324 + return -ETIMEDOUT; 325 + 326 + return 0; 327 + } 328 + 329 + static int flexcan_chip_unfreeze(struct flexcan_priv *priv) 330 + { 331 + struct flexcan_regs __iomem *regs = priv->base; 332 + unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; 333 + u32 reg; 334 + 335 + reg = flexcan_read(&regs->mcr); 336 + reg &= ~FLEXCAN_MCR_HALT; 337 + flexcan_write(reg, &regs->mcr); 338 + 339 + while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK)) 340 + usleep_range(10, 20); 341 + 342 + if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK) 343 + return -ETIMEDOUT; 344 + 345 + return 0; 346 + } 347 + 348 + static int flexcan_chip_softreset(struct flexcan_priv *priv) 349 + { 350 + struct flexcan_regs __iomem *regs = priv->base; 351 + unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; 352 + 353 + flexcan_write(FLEXCAN_MCR_SOFTRST, &regs->mcr); 354 + while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST)) 355 + usleep_range(10, 20); 356 + 357 + if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST) 358 + return -ETIMEDOUT; 359 + 360 + return 0; 310 361 } 311 362 312 363 static int flexcan_get_berr_counter(const struct net_device *dev, ··· 796 709 u32 reg_mcr, reg_ctrl; 797 710 798 711 /* enable module */ 799 - flexcan_chip_enable(priv); 712 + err = flexcan_chip_enable(priv); 713 + if (err) 714 + return err; 800 715 801 716 /* soft reset */ 802 - flexcan_write(FLEXCAN_MCR_SOFTRST, &regs->mcr); 803 - udelay(10); 804 - 805 - reg_mcr = flexcan_read(&regs->mcr); 806 - if (reg_mcr & FLEXCAN_MCR_SOFTRST) { 807 - netdev_err(dev, "Failed to softreset can module (mcr=0x%08x)\n", 808 - reg_mcr); 809 - err = -ENODEV; 810 - goto out; 811 - } 717 + err = flexcan_chip_softreset(priv); 718 + if (err) 719 + goto out_chip_disable; 812 720 813 721 flexcan_set_bittiming(dev); 814 722 ··· 870 788 if (priv->devtype_data->features & FLEXCAN_HAS_V10_FEATURES) 871 789 flexcan_write(0x0, &regs->rxfgmask); 872 790 873 - if (priv->reg_xceiver) { 874 - err = regulator_enable(priv->reg_xceiver); 875 - if (err) 876 - goto out; 877 - } 791 + err = flexcan_transceiver_enable(priv); 792 + if (err) 793 + goto out_chip_disable; 878 794 879 795 /* synchronize with the can bus */ 880 - reg_mcr = flexcan_read(&regs->mcr); 881 - reg_mcr &= ~FLEXCAN_MCR_HALT; 882 - flexcan_write(reg_mcr, &regs->mcr); 796 + err = flexcan_chip_unfreeze(priv); 797 + if (err) 798 + goto out_transceiver_disable; 883 799 884 800 priv->can.state = CAN_STATE_ERROR_ACTIVE; 885 801 ··· 890 810 891 811 return 0; 892 812 893 - out: 813 + out_transceiver_disable: 814 + flexcan_transceiver_disable(priv); 815 + out_chip_disable: 894 816 flexcan_chip_disable(priv); 895 817 return err; 896 818 } ··· 907 825 { 908 826 struct flexcan_priv *priv = netdev_priv(dev); 909 827 struct flexcan_regs __iomem *regs = priv->base; 910 - u32 reg; 828 + 829 + /* freeze + disable module */ 830 + flexcan_chip_freeze(priv); 831 + flexcan_chip_disable(priv); 911 832 912 833 /* Disable all interrupts */ 913 834 flexcan_write(0, &regs->imask1); 835 + flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL, 836 + &regs->ctrl); 914 837 915 - /* Disable + halt module */ 916 - reg = flexcan_read(&regs->mcr); 917 - reg |= FLEXCAN_MCR_MDIS | FLEXCAN_MCR_HALT; 918 - flexcan_write(reg, &regs->mcr); 919 - 920 - if (priv->reg_xceiver) 921 - regulator_disable(priv->reg_xceiver); 838 + flexcan_transceiver_disable(priv); 922 839 priv->can.state = CAN_STATE_STOPPED; 923 840 924 841 return; ··· 947 866 /* start chip and queuing */ 948 867 err = flexcan_chip_start(dev); 949 868 if (err) 950 - goto out_close; 869 + goto out_free_irq; 951 870 952 871 can_led_event(dev, CAN_LED_EVENT_OPEN); 953 872 ··· 956 875 957 876 return 0; 958 877 878 + out_free_irq: 879 + free_irq(dev->irq, dev); 959 880 out_close: 960 881 close_candev(dev); 961 882 out_disable_per: ··· 1028 945 goto out_disable_ipg; 1029 946 1030 947 /* select "bus clock", chip must be disabled */ 1031 - flexcan_chip_disable(priv); 948 + err = flexcan_chip_disable(priv); 949 + if (err) 950 + goto out_disable_per; 1032 951 reg = flexcan_read(&regs->ctrl); 1033 952 reg |= FLEXCAN_CTRL_CLK_SRC; 1034 953 flexcan_write(reg, &regs->ctrl); 1035 954 1036 - flexcan_chip_enable(priv); 955 + err = flexcan_chip_enable(priv); 956 + if (err) 957 + goto out_chip_disable; 1037 958 1038 959 /* set freeze, halt and activate FIFO, restrict register access */ 1039 960 reg = flexcan_read(&regs->mcr); ··· 1054 967 if (!(reg & FLEXCAN_MCR_FEN)) { 1055 968 netdev_err(dev, "Could not enable RX FIFO, unsupported core\n"); 1056 969 err = -ENODEV; 1057 - goto out_disable_per; 970 + goto out_chip_disable; 1058 971 } 1059 972 1060 973 err = register_candev(dev); 1061 974 1062 - out_disable_per: 1063 975 /* disable core and turn off clocks */ 976 + out_chip_disable: 1064 977 flexcan_chip_disable(priv); 978 + out_disable_per: 1065 979 clk_disable_unprepare(priv->clk_per); 1066 980 out_disable_ipg: 1067 981 clk_disable_unprepare(priv->clk_ipg); ··· 1192 1104 static int flexcan_remove(struct platform_device *pdev) 1193 1105 { 1194 1106 struct net_device *dev = platform_get_drvdata(pdev); 1107 + struct flexcan_priv *priv = netdev_priv(dev); 1195 1108 1196 1109 unregister_flexcandev(dev); 1197 - 1110 + netif_napi_del(&priv->napi); 1198 1111 free_candev(dev); 1199 1112 1200 1113 return 0; ··· 1206 1117 { 1207 1118 struct net_device *dev = dev_get_drvdata(device); 1208 1119 struct flexcan_priv *priv = netdev_priv(dev); 1120 + int err; 1209 1121 1210 - flexcan_chip_disable(priv); 1122 + err = flexcan_chip_disable(priv); 1123 + if (err) 1124 + return err; 1211 1125 1212 1126 if (netif_running(dev)) { 1213 1127 netif_stop_queue(dev); ··· 1231 1139 netif_device_attach(dev); 1232 1140 netif_start_queue(dev); 1233 1141 } 1234 - flexcan_chip_enable(priv); 1235 - 1236 - return 0; 1142 + return flexcan_chip_enable(priv); 1237 1143 } 1238 1144 #endif /* CONFIG_PM_SLEEP */ 1239 1145
+2
drivers/net/can/usb/kvaser_usb.c
··· 473 473 return err; 474 474 475 475 dev->nchannels = msg.u.cardinfo.nchannels; 476 + if (dev->nchannels > MAX_NET_DEVICES) 477 + return -EINVAL; 476 478 477 479 return 0; 478 480 }
+13 -1
drivers/net/ethernet/broadcom/b44.c
··· 1484 1484 add_timer(&bp->timer); 1485 1485 1486 1486 b44_enable_ints(bp); 1487 + 1488 + if (bp->flags & B44_FLAG_EXTERNAL_PHY) 1489 + phy_start(bp->phydev); 1490 + 1487 1491 netif_start_queue(dev); 1488 1492 out: 1489 1493 return err; ··· 1649 1645 struct b44 *bp = netdev_priv(dev); 1650 1646 1651 1647 netif_stop_queue(dev); 1648 + 1649 + if (bp->flags & B44_FLAG_EXTERNAL_PHY) 1650 + phy_stop(bp->phydev); 1652 1651 1653 1652 napi_disable(&bp->napi); 1654 1653 ··· 2229 2222 } 2230 2223 2231 2224 if (status_changed) { 2232 - b44_check_phy(bp); 2225 + u32 val = br32(bp, B44_TX_CTRL); 2226 + if (bp->flags & B44_FLAG_FULL_DUPLEX) 2227 + val |= TX_CTRL_DUPLEX; 2228 + else 2229 + val &= ~TX_CTRL_DUPLEX; 2230 + bw32(bp, B44_TX_CTRL, val); 2233 2231 phy_print_status(phydev); 2234 2232 } 2235 2233 }
+10 -3
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
··· 1873 1873 } 1874 1874 1875 1875 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, 1876 - void *accel_priv) 1876 + void *accel_priv, select_queue_fallback_t fallback) 1877 1877 { 1878 1878 struct bnx2x *bp = netdev_priv(dev); 1879 1879 ··· 1895 1895 } 1896 1896 1897 1897 /* select a non-FCoE queue */ 1898 - return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp); 1898 + return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp); 1899 1899 } 1900 1900 1901 1901 void bnx2x_set_num_queues(struct bnx2x *bp) ··· 3875 3875 xmit_type); 3876 3876 } 3877 3877 3878 - /* Add the macs to the parsing BD this is a vf */ 3878 + /* Add the macs to the parsing BD if this is a vf or if 3879 + * Tx Switching is enabled. 3880 + */ 3879 3881 if (IS_VF(bp)) { 3880 3882 /* override GRE parameters in BD */ 3881 3883 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi, ··· 3885 3883 &pbd_e2->data.mac_addr.src_lo, 3886 3884 eth->h_source); 3887 3885 3886 + bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi, 3887 + &pbd_e2->data.mac_addr.dst_mid, 3888 + &pbd_e2->data.mac_addr.dst_lo, 3889 + eth->h_dest); 3890 + } else if (bp->flags & TX_SWITCHING) { 3888 3891 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi, 3889 3892 &pbd_e2->data.mac_addr.dst_mid, 3890 3893 &pbd_e2->data.mac_addr.dst_lo,
+1 -1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
··· 496 496 497 497 /* select_queue callback */ 498 498 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, 499 - void *accel_priv); 499 + void *accel_priv, select_queue_fallback_t fallback); 500 500 501 501 static inline void bnx2x_update_rx_prod(struct bnx2x *bp, 502 502 struct bnx2x_fastpath *fp,
+1 -2
drivers/net/ethernet/broadcom/tg3.c
··· 6843 6843 6844 6844 work_mask |= opaque_key; 6845 6845 6846 - if ((desc->err_vlan & RXD_ERR_MASK) != 0 && 6847 - (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { 6846 + if (desc->err_vlan & RXD_ERR_MASK) { 6848 6847 drop_it: 6849 6848 tg3_recycle_rx(tnapi, tpr, opaque_key, 6850 6849 desc_idx, *post_ptr);
+5 -1
drivers/net/ethernet/broadcom/tg3.h
··· 2608 2608 #define RXD_ERR_TOO_SMALL 0x00400000 2609 2609 #define RXD_ERR_NO_RESOURCES 0x00800000 2610 2610 #define RXD_ERR_HUGE_FRAME 0x01000000 2611 - #define RXD_ERR_MASK 0xffff0000 2611 + 2612 + #define RXD_ERR_MASK (RXD_ERR_BAD_CRC | RXD_ERR_COLLISION | \ 2613 + RXD_ERR_LINK_LOST | RXD_ERR_PHY_DECODE | \ 2614 + RXD_ERR_MAC_ABRT | RXD_ERR_TOO_SMALL | \ 2615 + RXD_ERR_NO_RESOURCES | RXD_ERR_HUGE_FRAME) 2612 2616 2613 2617 u32 reserved; 2614 2618 u32 opaque;
+30 -10
drivers/net/ethernet/brocade/bna/bnad.c
··· 707 707 else 708 708 skb_checksum_none_assert(skb); 709 709 710 - if (flags & BNA_CQ_EF_VLAN) 710 + if ((flags & BNA_CQ_EF_VLAN) && 711 + (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) 711 712 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag)); 712 713 713 714 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) ··· 2095 2094 rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE; 2096 2095 } 2097 2096 2098 - rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED; 2097 + rx_config->vlan_strip_status = 2098 + (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ? 2099 + BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED; 2099 2100 } 2100 2101 2101 2102 static void ··· 3248 3245 BNA_RXMODE_ALLMULTI; 3249 3246 bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL); 3250 3247 3251 - if (bnad->cfg_flags & BNAD_CF_PROMISC) 3252 - bna_rx_vlan_strip_disable(bnad->rx_info[0].rx); 3253 - else 3254 - bna_rx_vlan_strip_enable(bnad->rx_info[0].rx); 3255 - 3256 3248 spin_unlock_irqrestore(&bnad->bna_lock, flags); 3257 3249 } 3258 3250 ··· 3372 3374 return 0; 3373 3375 } 3374 3376 3377 + static int bnad_set_features(struct net_device *dev, netdev_features_t features) 3378 + { 3379 + struct bnad *bnad = netdev_priv(dev); 3380 + netdev_features_t changed = features ^ dev->features; 3381 + 3382 + if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) { 3383 + unsigned long flags; 3384 + 3385 + spin_lock_irqsave(&bnad->bna_lock, flags); 3386 + 3387 + if (features & NETIF_F_HW_VLAN_CTAG_RX) 3388 + bna_rx_vlan_strip_enable(bnad->rx_info[0].rx); 3389 + else 3390 + bna_rx_vlan_strip_disable(bnad->rx_info[0].rx); 3391 + 3392 + spin_unlock_irqrestore(&bnad->bna_lock, flags); 3393 + } 3394 + 3395 + return 0; 3396 + } 3397 + 3375 3398 #ifdef CONFIG_NET_POLL_CONTROLLER 3376 3399 static void 3377 3400 bnad_netpoll(struct net_device *netdev) ··· 3440 3421 .ndo_change_mtu = bnad_change_mtu, 3441 3422 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid, 3442 3423 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid, 3424 + .ndo_set_features = bnad_set_features, 3443 3425 #ifdef CONFIG_NET_POLL_CONTROLLER 3444 3426 .ndo_poll_controller = bnad_netpoll 3445 3427 #endif ··· 3453 3433 3454 3434 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | 3455 3435 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 3456 - NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX; 3436 + NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX | 3437 + NETIF_F_HW_VLAN_CTAG_RX; 3457 3438 3458 3439 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA | 3459 3440 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 3460 3441 NETIF_F_TSO | NETIF_F_TSO6; 3461 3442 3462 - netdev->features |= netdev->hw_features | 3463 - NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; 3443 + netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; 3464 3444 3465 3445 if (using_dac) 3466 3446 netdev->features |= NETIF_F_HIGHDMA;
+1
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
··· 6179 6179 .id_table = cxgb4_pci_tbl, 6180 6180 .probe = init_one, 6181 6181 .remove = remove_one, 6182 + .shutdown = remove_one, 6182 6183 .err_handler = &cxgb4_eeh, 6183 6184 }; 6184 6185
+1
drivers/net/ethernet/dec/tulip/tulip_core.c
··· 1939 1939 pci_iounmap(pdev, tp->base_addr); 1940 1940 free_netdev (dev); 1941 1941 pci_release_regions (pdev); 1942 + pci_disable_device(pdev); 1942 1943 1943 1944 /* pci_power_off (pdev, -1); */ 1944 1945 }
+3 -1
drivers/net/ethernet/emulex/benet/be.h
··· 350 350 u32 roce_drops_crc; 351 351 }; 352 352 353 + /* A vlan-id of 0xFFFF must be used to clear transparent vlan-tagging */ 354 + #define BE_RESET_VLAN_TAG_ID 0xFFFF 355 + 353 356 struct be_vf_cfg { 354 357 unsigned char mac_addr[ETH_ALEN]; 355 358 int if_handle; 356 359 int pmac_id; 357 - u16 def_vid; 358 360 u16 vlan_tag; 359 361 u32 tx_rate; 360 362 };
+48 -38
drivers/net/ethernet/emulex/benet/be_main.c
··· 913 913 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb); 914 914 } 915 915 916 - static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, 917 - struct sk_buff *skb, 918 - bool *skip_hw_vlan) 916 + static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, 917 + struct sk_buff *skb, 918 + bool *skip_hw_vlan) 919 919 { 920 920 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 921 921 unsigned int eth_hdr_len; 922 922 struct iphdr *ip; 923 - 924 - /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less 925 - * may cause a transmit stall on that port. So the work-around is to 926 - * pad short packets (<= 32 bytes) to a 36-byte length. 927 - */ 928 - if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) { 929 - if (skb_padto(skb, 36)) 930 - goto tx_drop; 931 - skb->len = 36; 932 - } 933 923 934 924 /* For padded packets, BE HW modifies tot_len field in IP header 935 925 * incorrecly when VLAN tag is inserted by HW. ··· 949 959 vlan_tx_tag_present(skb)) { 950 960 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); 951 961 if (unlikely(!skb)) 952 - goto tx_drop; 962 + goto err; 953 963 } 954 964 955 965 /* HW may lockup when VLAN HW tagging is requested on ··· 971 981 be_vlan_tag_tx_chk(adapter, skb)) { 972 982 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); 973 983 if (unlikely(!skb)) 974 - goto tx_drop; 984 + goto err; 975 985 } 976 986 977 987 return skb; 978 988 tx_drop: 979 989 dev_kfree_skb_any(skb); 990 + err: 980 991 return NULL; 992 + } 993 + 994 + static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, 995 + struct sk_buff *skb, 996 + bool *skip_hw_vlan) 997 + { 998 + /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or 999 + * less may cause a transmit stall on that port. So the work-around is 1000 + * to pad short packets (<= 32 bytes) to a 36-byte length. 1001 + */ 1002 + if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) { 1003 + if (skb_padto(skb, 36)) 1004 + return NULL; 1005 + skb->len = 36; 1006 + } 1007 + 1008 + if (BEx_chip(adapter) || lancer_chip(adapter)) { 1009 + skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan); 1010 + if (!skb) 1011 + return NULL; 1012 + } 1013 + 1014 + return skb; 981 1015 } 982 1016 983 1017 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) ··· 1171 1157 return status; 1172 1158 } 1173 1159 1160 + static void be_clear_promisc(struct be_adapter *adapter) 1161 + { 1162 + adapter->promiscuous = false; 1163 + adapter->flags &= ~BE_FLAGS_VLAN_PROMISC; 1164 + 1165 + be_cmd_rx_filter(adapter, IFF_PROMISC, OFF); 1166 + } 1167 + 1174 1168 static void be_set_rx_mode(struct net_device *netdev) 1175 1169 { 1176 1170 struct be_adapter *adapter = netdev_priv(netdev); ··· 1192 1170 1193 1171 /* BE was previously in promiscuous mode; disable it */ 1194 1172 if (adapter->promiscuous) { 1195 - adapter->promiscuous = false; 1196 - be_cmd_rx_filter(adapter, IFF_PROMISC, OFF); 1197 - 1173 + be_clear_promisc(adapter); 1198 1174 if (adapter->vlans_added) 1199 1175 be_vid_config(adapter); 1200 1176 } ··· 1307 1287 1308 1288 if (vlan || qos) { 1309 1289 vlan |= qos << VLAN_PRIO_SHIFT; 1310 - if (vf_cfg->vlan_tag != vlan) { 1311 - /* If this is new value, program it. Else skip. */ 1312 - vf_cfg->vlan_tag = vlan; 1290 + if (vf_cfg->vlan_tag != vlan) 1313 1291 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, 1314 1292 vf_cfg->if_handle, 0); 1315 - } 1316 1293 } else { 1317 1294 /* Reset Transparent Vlan Tagging. */ 1318 - vf_cfg->vlan_tag = 0; 1319 - vlan = vf_cfg->def_vid; 1320 - status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, 1321 - vf_cfg->if_handle, 0); 1295 + status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, 1296 + vf + 1, vf_cfg->if_handle, 0); 1322 1297 } 1323 1298 1324 - 1325 - if (status) 1299 + if (!status) 1300 + vf_cfg->vlan_tag = vlan; 1301 + else 1326 1302 dev_info(&adapter->pdev->dev, 1327 - "VLAN %d config on VF %d failed\n", vlan, vf); 1303 + "VLAN %d config on VF %d failed\n", vlan, vf); 1328 1304 return status; 1329 1305 } 1330 1306 ··· 3029 3013 3030 3014 static int be_vf_setup(struct be_adapter *adapter) 3031 3015 { 3032 - struct be_vf_cfg *vf_cfg; 3033 - u16 def_vlan, lnk_speed; 3034 - int status, old_vfs, vf; 3035 3016 struct device *dev = &adapter->pdev->dev; 3017 + struct be_vf_cfg *vf_cfg; 3018 + int status, old_vfs, vf; 3036 3019 u32 privileges; 3020 + u16 lnk_speed; 3037 3021 3038 3022 old_vfs = pci_num_vf(adapter->pdev); 3039 3023 if (old_vfs) { ··· 3099 3083 NULL, vf + 1); 3100 3084 if (!status) 3101 3085 vf_cfg->tx_rate = lnk_speed; 3102 - 3103 - status = be_cmd_get_hsw_config(adapter, &def_vlan, 3104 - vf + 1, vf_cfg->if_handle, NULL); 3105 - if (status) 3106 - goto err; 3107 - vf_cfg->def_vid = def_vlan; 3108 3086 3109 3087 if (!old_vfs) 3110 3088 be_cmd_enable_vf(adapter, vf + 1);
+9 -8
drivers/net/ethernet/freescale/fec_main.c
··· 389 389 netdev_err(ndev, "Tx DMA memory map failed\n"); 390 390 return NETDEV_TX_OK; 391 391 } 392 - /* Send it on its way. Tell FEC it's ready, interrupt when done, 393 - * it's the last BD of the frame, and to put the CRC on the end. 394 - */ 395 - status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR 396 - | BD_ENET_TX_LAST | BD_ENET_TX_TC); 397 - bdp->cbd_sc = status; 398 392 399 393 if (fep->bufdesc_ex) { 400 394 ··· 409 415 ebdp->cbd_esc |= BD_ENET_TX_PINS; 410 416 } 411 417 } 418 + 419 + /* Send it on its way. Tell FEC it's ready, interrupt when done, 420 + * it's the last BD of the frame, and to put the CRC on the end. 421 + */ 422 + status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR 423 + | BD_ENET_TX_LAST | BD_ENET_TX_TC); 424 + bdp->cbd_sc = status; 412 425 413 426 bdp_pre = fec_enet_get_prevdesc(bdp, fep); 414 427 if ((id_entry->driver_data & FEC_QUIRK_ERR006358) && ··· 1779 1778 struct fec_enet_private *fep = netdev_priv(ndev); 1780 1779 int ret; 1781 1780 1782 - napi_enable(&fep->napi); 1783 - 1784 1781 /* I should reset the ring buffers here, but I don't yet know 1785 1782 * a simple way to do that. 1786 1783 */ ··· 1793 1794 fec_enet_free_buffers(ndev); 1794 1795 return ret; 1795 1796 } 1797 + 1798 + napi_enable(&fep->napi); 1796 1799 phy_start(fep->phy_dev); 1797 1800 netif_start_queue(ndev); 1798 1801 fep->opened = 1;
+3 -3
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 6881 6881 } 6882 6882 6883 6883 static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, 6884 - void *accel_priv) 6884 + void *accel_priv, select_queue_fallback_t fallback) 6885 6885 { 6886 6886 struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; 6887 6887 #ifdef IXGBE_FCOE ··· 6907 6907 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 6908 6908 break; 6909 6909 default: 6910 - return __netdev_pick_tx(dev, skb); 6910 + return fallback(dev, skb); 6911 6911 } 6912 6912 6913 6913 f = &adapter->ring_feature[RING_F_FCOE]; ··· 6920 6920 6921 6921 return txq + f->offset; 6922 6922 #else 6923 - return __netdev_pick_tx(dev, skb); 6923 + return fallback(dev, skb); 6924 6924 #endif 6925 6925 } 6926 6926
+1 -1
drivers/net/ethernet/lantiq_etop.c
··· 619 619 620 620 static u16 621 621 ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb, 622 - void *accel_priv) 622 + void *accel_priv, select_queue_fallback_t fallback) 623 623 { 624 624 /* we are currently only using the first queue */ 625 625 return 0;
+3 -3
drivers/net/ethernet/marvell/Kconfig
··· 43 43 This driver is used by the MV643XX_ETH and MVNETA drivers. 44 44 45 45 config MVNETA 46 - tristate "Marvell Armada 370/XP network interface support" 47 - depends on MACH_ARMADA_370_XP 46 + tristate "Marvell Armada 370/38x/XP network interface support" 47 + depends on PLAT_ORION 48 48 select MVMDIO 49 49 ---help--- 50 50 This driver supports the network interface units in the 51 - Marvell ARMADA XP and ARMADA 370 SoC family. 51 + Marvell ARMADA XP, ARMADA 370 and ARMADA 38x SoC family. 52 52 53 53 Note that this driver is distinct from the mv643xx_eth 54 54 driver, which should be used for the older Marvell SoCs
+2 -2
drivers/net/ethernet/mellanox/mlx4/en_tx.c
··· 629 629 } 630 630 631 631 u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, 632 - void *accel_priv) 632 + void *accel_priv, select_queue_fallback_t fallback) 633 633 { 634 634 struct mlx4_en_priv *priv = netdev_priv(dev); 635 635 u16 rings_p_up = priv->num_tx_rings_p_up; ··· 641 641 if (vlan_tx_tag_present(skb)) 642 642 up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT; 643 643 644 - return __netdev_pick_tx(dev, skb) % rings_p_up + up * rings_p_up; 644 + return fallback(dev, skb) % rings_p_up + up * rings_p_up; 645 645 } 646 646 647 647 static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt)
+2 -2
drivers/net/ethernet/mellanox/mlx4/mlx4.h
··· 51 51 52 52 #define DRV_NAME "mlx4_core" 53 53 #define PFX DRV_NAME ": " 54 - #define DRV_VERSION "1.1" 55 - #define DRV_RELDATE "Dec, 2011" 54 + #define DRV_VERSION "2.2-1" 55 + #define DRV_RELDATE "Feb, 2014" 56 56 57 57 #define MLX4_FS_UDP_UC_EN (1 << 1) 58 58 #define MLX4_FS_TCP_UC_EN (1 << 2)
+3 -3
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
··· 57 57 #include "en_port.h" 58 58 59 59 #define DRV_NAME "mlx4_en" 60 - #define DRV_VERSION "2.0" 61 - #define DRV_RELDATE "Dec 2011" 60 + #define DRV_VERSION "2.2-1" 61 + #define DRV_RELDATE "Feb 2014" 62 62 63 63 #define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN) 64 64 ··· 723 723 724 724 void mlx4_en_tx_irq(struct mlx4_cq *mcq); 725 725 u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, 726 - void *accel_priv); 726 + void *accel_priv, select_queue_fallback_t fallback); 727 727 netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); 728 728 729 729 int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 46 46 #include "mlx5_core.h" 47 47 48 48 #define DRIVER_NAME "mlx5_core" 49 - #define DRIVER_VERSION "1.0" 50 - #define DRIVER_RELDATE "June 2013" 49 + #define DRIVER_VERSION "2.2-1" 50 + #define DRIVER_RELDATE "Feb 2014" 51 51 52 52 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); 53 53 MODULE_DESCRIPTION("Mellanox ConnectX-IB HCA core library");
+1
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
··· 340 340 if (qlcnic_sriov_vf_check(adapter)) 341 341 return -EINVAL; 342 342 num_msix = 1; 343 + adapter->drv_sds_rings = QLCNIC_SINGLE_RING; 343 344 adapter->drv_tx_rings = QLCNIC_SINGLE_RING; 344 345 } 345 346 }
+2 -2
drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
··· 807 807 !type->tc_param_valid) 808 808 return; 809 809 810 - if (tc < 0 || (tc > QLC_DCB_MAX_TC)) 810 + if (tc < 0 || (tc >= QLC_DCB_MAX_TC)) 811 811 return; 812 812 813 813 tc_cfg = &type->tc_cfg[tc]; ··· 843 843 !type->tc_param_valid) 844 844 return; 845 845 846 - if (pgid < 0 || pgid > QLC_DCB_MAX_PG) 846 + if (pgid < 0 || pgid >= QLC_DCB_MAX_PG) 847 847 return; 848 848 849 849 pgcfg = &type->pg_cfg[pgid];
+3 -2
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
··· 816 816 817 817 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { 818 818 qlcnic_disable_multi_tx(adapter); 819 + adapter->drv_sds_rings = QLCNIC_SINGLE_RING; 819 820 820 821 err = qlcnic_enable_msi_legacy(adapter); 821 - if (!err) 822 + if (err) 822 823 return err; 823 824 } 824 825 } ··· 3864 3863 strcpy(buf, "Tx"); 3865 3864 } 3866 3865 3867 - if (!qlcnic_use_msi_x && !qlcnic_use_msi) { 3866 + if (!QLCNIC_IS_MSI_FAMILY(adapter)) { 3868 3867 netdev_err(netdev, "No RSS/TSS support in INT-x mode\n"); 3869 3868 return -EINVAL; 3870 3869 }
-9
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
··· 13 13 #define QLC_VF_MIN_TX_RATE 100 14 14 #define QLC_VF_MAX_TX_RATE 9999 15 15 #define QLC_MAC_OPCODE_MASK 0x7 16 - #define QLC_MAC_STAR_ADD 6 17 - #define QLC_MAC_STAR_DEL 7 18 16 #define QLC_VF_FLOOD_BIT BIT_16 19 17 #define QLC_FLOOD_MODE 0x5 20 18 ··· 1203 1205 struct qlcnic_macvlan_mbx *macvlan; 1204 1206 struct qlcnic_vport *vp = vf->vp; 1205 1207 u8 op, new_op; 1206 - 1207 - if (((cmd->req.arg[1] & QLC_MAC_OPCODE_MASK) == QLC_MAC_STAR_ADD) || 1208 - ((cmd->req.arg[1] & QLC_MAC_OPCODE_MASK) == QLC_MAC_STAR_DEL)) { 1209 - netdev_err(adapter->netdev, "MAC + any VLAN filter not allowed from VF %d\n", 1210 - vf->pci_func); 1211 - return -EINVAL; 1212 - } 1213 1208 1214 1209 if (!(cmd->req.arg[1] & BIT_8)) 1215 1210 return -EINVAL;
+2
drivers/net/ethernet/realtek/r8169.c
··· 7118 7118 } 7119 7119 7120 7120 mutex_init(&tp->wk.mutex); 7121 + u64_stats_init(&tp->rx_stats.syncp); 7122 + u64_stats_init(&tp->tx_stats.syncp); 7121 7123 7122 7124 /* Get MAC address */ 7123 7125 for (i = 0; i < ETH_ALEN; i++)
+7
drivers/net/ethernet/sfc/ptp.c
··· 1668 1668 struct efx_ptp_data *ptp = efx->ptp_data; 1669 1669 int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE); 1670 1670 1671 + if (!ptp) { 1672 + if (net_ratelimit()) 1673 + netif_warn(efx, drv, efx->net_dev, 1674 + "Received PTP event but PTP not set up\n"); 1675 + return; 1676 + } 1677 + 1671 1678 if (!ptp->enabled) 1672 1679 return; 1673 1680
+11
drivers/net/ethernet/stmicro/stmmac/Kconfig
··· 37 37 stmmac device driver. This driver is used for A20/A31 38 38 GMAC ethernet controller. 39 39 40 + config DWMAC_STI 41 + bool "STi GMAC support" 42 + depends on STMMAC_PLATFORM && ARCH_STI 43 + default y 44 + ---help--- 45 + Support for ethernet controller on STi SOCs. 46 + 47 + This selects STi SoC glue layer support for the stmmac 48 + device driver. This driver is used on for the STi series 49 + SOCs GMAC ethernet controller. 50 + 40 51 config STMMAC_PCI 41 52 bool "STMMAC PCI bus support" 42 53 depends on STMMAC_ETH && PCI
+1
drivers/net/ethernet/stmicro/stmmac/Makefile
··· 2 2 stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o 3 3 stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o 4 4 stmmac-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o 5 + stmmac-$(CONFIG_DWMAC_STI) += dwmac-sti.o 5 6 stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ 6 7 chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ 7 8 dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
+330
drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
··· 1 + /** 2 + * dwmac-sti.c - STMicroelectronics DWMAC Specific Glue layer 3 + * 4 + * Copyright (C) 2003-2014 STMicroelectronics (R&D) Limited 5 + * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com> 6 + * 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License as published by 10 + * the Free Software Foundation; either version 2 of the License, or 11 + * (at your option) any later version. 12 + */ 13 + 14 + #include <linux/kernel.h> 15 + #include <linux/slab.h> 16 + #include <linux/platform_device.h> 17 + #include <linux/stmmac.h> 18 + #include <linux/phy.h> 19 + #include <linux/mfd/syscon.h> 20 + #include <linux/regmap.h> 21 + #include <linux/clk.h> 22 + #include <linux/of.h> 23 + #include <linux/of_net.h> 24 + 25 + /** 26 + * STi GMAC glue logic. 27 + * -------------------- 28 + * 29 + * _ 30 + * | \ 31 + * --------|0 \ ETH_SEL_INTERNAL_NOTEXT_PHYCLK 32 + * phyclk | |___________________________________________ 33 + * | | | (phyclk-in) 34 + * --------|1 / | 35 + * int-clk |_ / | 36 + * | _ 37 + * | | \ 38 + * |_______|1 \ ETH_SEL_TX_RETIME_CLK 39 + * | |___________________________ 40 + * | | (tx-retime-clk) 41 + * _______|0 / 42 + * | |_ / 43 + * _ | 44 + * | \ | 45 + * --------|0 \ | 46 + * clk_125 | |__| 47 + * | | ETH_SEL_TXCLK_NOT_CLK125 48 + * --------|1 / 49 + * txclk |_ / 50 + * 51 + * 52 + * ETH_SEL_INTERNAL_NOTEXT_PHYCLK is valid only for RMII where PHY can 53 + * generate 50MHz clock or MAC can generate it. 54 + * This bit is configured by "st,ext-phyclk" property. 55 + * 56 + * ETH_SEL_TXCLK_NOT_CLK125 is only valid for gigabit modes, where the 125Mhz 57 + * clock either comes from clk-125 pin or txclk pin. This configuration is 58 + * totally driven by the board wiring. This bit is configured by 59 + * "st,tx-retime-src" property. 60 + * 61 + * TXCLK configuration is different for different phy interface modes 62 + * and changes according to link speed in modes like RGMII. 63 + * 64 + * Below table summarizes the clock requirement and clock sources for 65 + * supported phy interface modes with link speeds. 66 + * ________________________________________________ 67 + *| PHY_MODE | 1000 Mbit Link | 100 Mbit Link | 68 + * ------------------------------------------------ 69 + *| MII | n/a | 25Mhz | 70 + *| | | txclk | 71 + * ------------------------------------------------ 72 + *| GMII | 125Mhz | 25Mhz | 73 + *| | clk-125/txclk | txclk | 74 + * ------------------------------------------------ 75 + *| RGMII | 125Mhz | 25Mhz | 76 + *| | clk-125/txclk | clkgen | 77 + * ------------------------------------------------ 78 + *| RMII | n/a | 25Mhz | 79 + *| | |clkgen/phyclk-in | 80 + * ------------------------------------------------ 81 + * 82 + * TX lines are always retimed with a clk, which can vary depending 83 + * on the board configuration. Below is the table of these bits 84 + * in eth configuration register depending on source of retime clk. 85 + * 86 + *--------------------------------------------------------------- 87 + * src | tx_rt_clk | int_not_ext_phyclk | txclk_n_clk125| 88 + *--------------------------------------------------------------- 89 + * txclk | 0 | n/a | 1 | 90 + *--------------------------------------------------------------- 91 + * ck_125| 0 | n/a | 0 | 92 + *--------------------------------------------------------------- 93 + * phyclk| 1 | 0 | n/a | 94 + *--------------------------------------------------------------- 95 + * clkgen| 1 | 1 | n/a | 96 + *--------------------------------------------------------------- 97 + */ 98 + 99 + /* Register definition */ 100 + 101 + /* 3 bits [8:6] 102 + * [6:6] ETH_SEL_TXCLK_NOT_CLK125 103 + * [7:7] ETH_SEL_INTERNAL_NOTEXT_PHYCLK 104 + * [8:8] ETH_SEL_TX_RETIME_CLK 105 + * 106 + */ 107 + 108 + #define TX_RETIME_SRC_MASK GENMASK(8, 6) 109 + #define ETH_SEL_TX_RETIME_CLK BIT(8) 110 + #define ETH_SEL_INTERNAL_NOTEXT_PHYCLK BIT(7) 111 + #define ETH_SEL_TXCLK_NOT_CLK125 BIT(6) 112 + 113 + #define ENMII_MASK GENMASK(5, 5) 114 + #define ENMII BIT(5) 115 + 116 + /** 117 + * 3 bits [4:2] 118 + * 000-GMII/MII 119 + * 001-RGMII 120 + * 010-SGMII 121 + * 100-RMII 122 + */ 123 + #define MII_PHY_SEL_MASK GENMASK(4, 2) 124 + #define ETH_PHY_SEL_RMII BIT(4) 125 + #define ETH_PHY_SEL_SGMII BIT(3) 126 + #define ETH_PHY_SEL_RGMII BIT(2) 127 + #define ETH_PHY_SEL_GMII 0x0 128 + #define ETH_PHY_SEL_MII 0x0 129 + 130 + #define IS_PHY_IF_MODE_RGMII(iface) (iface == PHY_INTERFACE_MODE_RGMII || \ 131 + iface == PHY_INTERFACE_MODE_RGMII_ID || \ 132 + iface == PHY_INTERFACE_MODE_RGMII_RXID || \ 133 + iface == PHY_INTERFACE_MODE_RGMII_TXID) 134 + 135 + #define IS_PHY_IF_MODE_GBIT(iface) (IS_PHY_IF_MODE_RGMII(iface) || \ 136 + iface == PHY_INTERFACE_MODE_GMII) 137 + 138 + struct sti_dwmac { 139 + int interface; 140 + bool ext_phyclk; 141 + bool is_tx_retime_src_clk_125; 142 + struct clk *clk; 143 + int reg; 144 + struct device *dev; 145 + struct regmap *regmap; 146 + }; 147 + 148 + static u32 phy_intf_sels[] = { 149 + [PHY_INTERFACE_MODE_MII] = ETH_PHY_SEL_MII, 150 + [PHY_INTERFACE_MODE_GMII] = ETH_PHY_SEL_GMII, 151 + [PHY_INTERFACE_MODE_RGMII] = ETH_PHY_SEL_RGMII, 152 + [PHY_INTERFACE_MODE_RGMII_ID] = ETH_PHY_SEL_RGMII, 153 + [PHY_INTERFACE_MODE_SGMII] = ETH_PHY_SEL_SGMII, 154 + [PHY_INTERFACE_MODE_RMII] = ETH_PHY_SEL_RMII, 155 + }; 156 + 157 + enum { 158 + TX_RETIME_SRC_NA = 0, 159 + TX_RETIME_SRC_TXCLK = 1, 160 + TX_RETIME_SRC_CLK_125, 161 + TX_RETIME_SRC_PHYCLK, 162 + TX_RETIME_SRC_CLKGEN, 163 + }; 164 + 165 + static const char *const tx_retime_srcs[] = { 166 + [TX_RETIME_SRC_NA] = "", 167 + [TX_RETIME_SRC_TXCLK] = "txclk", 168 + [TX_RETIME_SRC_CLK_125] = "clk_125", 169 + [TX_RETIME_SRC_PHYCLK] = "phyclk", 170 + [TX_RETIME_SRC_CLKGEN] = "clkgen", 171 + }; 172 + 173 + static u32 tx_retime_val[] = { 174 + [TX_RETIME_SRC_TXCLK] = ETH_SEL_TXCLK_NOT_CLK125, 175 + [TX_RETIME_SRC_CLK_125] = 0x0, 176 + [TX_RETIME_SRC_PHYCLK] = ETH_SEL_TX_RETIME_CLK, 177 + [TX_RETIME_SRC_CLKGEN] = ETH_SEL_TX_RETIME_CLK | 178 + ETH_SEL_INTERNAL_NOTEXT_PHYCLK, 179 + }; 180 + 181 + static void setup_retime_src(struct sti_dwmac *dwmac, u32 spd) 182 + { 183 + u32 src = 0, freq = 0; 184 + 185 + if (spd == SPEED_100) { 186 + if (dwmac->interface == PHY_INTERFACE_MODE_MII || 187 + dwmac->interface == PHY_INTERFACE_MODE_GMII) { 188 + src = TX_RETIME_SRC_TXCLK; 189 + } else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) { 190 + if (dwmac->ext_phyclk) { 191 + src = TX_RETIME_SRC_PHYCLK; 192 + } else { 193 + src = TX_RETIME_SRC_CLKGEN; 194 + freq = 50000000; 195 + } 196 + 197 + } else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) { 198 + src = TX_RETIME_SRC_CLKGEN; 199 + freq = 25000000; 200 + } 201 + 202 + if (src == TX_RETIME_SRC_CLKGEN && dwmac->clk) 203 + clk_set_rate(dwmac->clk, freq); 204 + 205 + } else if (spd == SPEED_1000) { 206 + if (dwmac->is_tx_retime_src_clk_125) 207 + src = TX_RETIME_SRC_CLK_125; 208 + else 209 + src = TX_RETIME_SRC_TXCLK; 210 + } 211 + 212 + regmap_update_bits(dwmac->regmap, dwmac->reg, 213 + TX_RETIME_SRC_MASK, tx_retime_val[src]); 214 + } 215 + 216 + static void sti_dwmac_exit(struct platform_device *pdev, void *priv) 217 + { 218 + struct sti_dwmac *dwmac = priv; 219 + 220 + if (dwmac->clk) 221 + clk_disable_unprepare(dwmac->clk); 222 + } 223 + 224 + static void sti_fix_mac_speed(void *priv, unsigned int spd) 225 + { 226 + struct sti_dwmac *dwmac = priv; 227 + 228 + setup_retime_src(dwmac, spd); 229 + 230 + return; 231 + } 232 + 233 + static int sti_dwmac_parse_data(struct sti_dwmac *dwmac, 234 + struct platform_device *pdev) 235 + { 236 + struct resource *res; 237 + struct device *dev = &pdev->dev; 238 + struct device_node *np = dev->of_node; 239 + struct regmap *regmap; 240 + int err; 241 + 242 + if (!np) 243 + return -EINVAL; 244 + 245 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-ethconf"); 246 + if (!res) 247 + return -ENODATA; 248 + 249 + regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon"); 250 + if (IS_ERR(regmap)) 251 + return PTR_ERR(regmap); 252 + 253 + dwmac->dev = dev; 254 + dwmac->interface = of_get_phy_mode(np); 255 + dwmac->regmap = regmap; 256 + dwmac->reg = res->start; 257 + dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk"); 258 + dwmac->is_tx_retime_src_clk_125 = false; 259 + 260 + if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) { 261 + const char *rs; 262 + 263 + err = of_property_read_string(np, "st,tx-retime-src", &rs); 264 + if (err < 0) { 265 + dev_err(dev, "st,tx-retime-src not specified\n"); 266 + return err; 267 + } 268 + 269 + if (!strcasecmp(rs, "clk_125")) 270 + dwmac->is_tx_retime_src_clk_125 = true; 271 + } 272 + 273 + dwmac->clk = devm_clk_get(dev, "sti-ethclk"); 274 + 275 + if (IS_ERR(dwmac->clk)) 276 + dwmac->clk = NULL; 277 + 278 + return 0; 279 + } 280 + 281 + static int sti_dwmac_init(struct platform_device *pdev, void *priv) 282 + { 283 + struct sti_dwmac *dwmac = priv; 284 + struct regmap *regmap = dwmac->regmap; 285 + int iface = dwmac->interface; 286 + u32 reg = dwmac->reg; 287 + u32 val, spd; 288 + 289 + if (dwmac->clk) 290 + clk_prepare_enable(dwmac->clk); 291 + 292 + regmap_update_bits(regmap, reg, MII_PHY_SEL_MASK, phy_intf_sels[iface]); 293 + 294 + val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII; 295 + regmap_update_bits(regmap, reg, ENMII_MASK, val); 296 + 297 + if (IS_PHY_IF_MODE_GBIT(iface)) 298 + spd = SPEED_1000; 299 + else 300 + spd = SPEED_100; 301 + 302 + setup_retime_src(dwmac, spd); 303 + 304 + return 0; 305 + } 306 + 307 + static void *sti_dwmac_setup(struct platform_device *pdev) 308 + { 309 + struct sti_dwmac *dwmac; 310 + int ret; 311 + 312 + dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); 313 + if (!dwmac) 314 + return ERR_PTR(-ENOMEM); 315 + 316 + ret = sti_dwmac_parse_data(dwmac, pdev); 317 + if (ret) { 318 + dev_err(&pdev->dev, "Unable to parse OF data\n"); 319 + return ERR_PTR(ret); 320 + } 321 + 322 + return dwmac; 323 + } 324 + 325 + const struct stmmac_of_data sti_gmac_data = { 326 + .fix_mac_speed = sti_fix_mac_speed, 327 + .setup = sti_dwmac_setup, 328 + .init = sti_dwmac_init, 329 + .exit = sti_dwmac_exit, 330 + };
+3
drivers/net/ethernet/stmicro/stmmac/stmmac.h
··· 133 133 #ifdef CONFIG_DWMAC_SUNXI 134 134 extern const struct stmmac_of_data sun7i_gmac_data; 135 135 #endif 136 + #ifdef CONFIG_DWMAC_STI 137 + extern const struct stmmac_of_data sti_gmac_data; 138 + #endif 136 139 extern struct platform_driver stmmac_pltfr_driver; 137 140 static inline int stmmac_register_platform(void) 138 141 {
+1 -1
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 1705 1705 priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); 1706 1706 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); 1707 1707 1708 - alloc_dma_desc_resources(priv); 1708 + ret = alloc_dma_desc_resources(priv); 1709 1709 if (ret < 0) { 1710 1710 pr_err("%s: DMA descriptors allocation failed\n", __func__); 1711 1711 goto dma_desc_error;
+5
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
··· 33 33 #ifdef CONFIG_DWMAC_SUNXI 34 34 { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data}, 35 35 #endif 36 + #ifdef CONFIG_DWMAC_STI 37 + { .compatible = "st,stih415-dwmac", .data = &sti_gmac_data}, 38 + { .compatible = "st,stih416-dwmac", .data = &sti_gmac_data}, 39 + { .compatible = "st,stih127-dwmac", .data = &sti_gmac_data}, 40 + #endif 36 41 /* SoC specific glue layers should come before generic bindings */ 37 42 { .compatible = "st,spear600-gmac"}, 38 43 { .compatible = "snps,dwmac-3.610"},
+14 -3
drivers/net/ethernet/ti/cpsw.c
··· 554 554 * common for both the interface as the interface shares 555 555 * the same hardware resource. 556 556 */ 557 - for (i = 0; i <= priv->data.slaves; i++) 557 + for (i = 0; i < priv->data.slaves; i++) 558 558 if (priv->slaves[i].ndev->flags & IFF_PROMISC) 559 559 flag = true; 560 560 ··· 578 578 unsigned long timeout = jiffies + HZ; 579 579 580 580 /* Disable Learn for all ports */ 581 - for (i = 0; i <= priv->data.slaves; i++) { 581 + for (i = 0; i < priv->data.slaves; i++) { 582 582 cpsw_ale_control_set(ale, i, 583 583 ALE_PORT_NOLEARN, 1); 584 584 cpsw_ale_control_set(ale, i, ··· 606 606 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0); 607 607 608 608 /* Enable Learn for all ports */ 609 - for (i = 0; i <= priv->data.slaves; i++) { 609 + for (i = 0; i < priv->data.slaves; i++) { 610 610 cpsw_ale_control_set(ale, i, 611 611 ALE_PORT_NOLEARN, 0); 612 612 cpsw_ale_control_set(ale, i, ··· 1164 1164 1165 1165 static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv) 1166 1166 { 1167 + u32 slave_port; 1168 + 1169 + slave_port = cpsw_get_slave_port(priv, slave->slave_num); 1170 + 1167 1171 if (!slave->phy) 1168 1172 return; 1169 1173 phy_stop(slave->phy); 1170 1174 phy_disconnect(slave->phy); 1171 1175 slave->phy = NULL; 1176 + cpsw_ale_control_set(priv->ale, slave_port, 1177 + ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); 1172 1178 } 1173 1179 1174 1180 static int cpsw_ndo_open(struct net_device *ndev) ··· 1902 1896 memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); 1903 1897 1904 1898 slave_data->phy_if = of_get_phy_mode(slave_node); 1899 + if (slave_data->phy_if < 0) { 1900 + pr_err("Missing or malformed slave[%d] phy-mode property\n", 1901 + i); 1902 + return slave_data->phy_if; 1903 + } 1905 1904 1906 1905 if (data->dual_emac) { 1907 1906 if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
+1 -1
drivers/net/ethernet/tile/tilegx.c
··· 2071 2071 2072 2072 /* Return subqueue id on this core (one per core). */ 2073 2073 static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb, 2074 - void *accel_priv) 2074 + void *accel_priv, select_queue_fallback_t fallback) 2075 2075 { 2076 2076 return smp_processor_id(); 2077 2077 }
+9 -4
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
··· 26 26 #include <linux/netdevice.h> 27 27 #include <linux/of_mdio.h> 28 28 #include <linux/of_platform.h> 29 + #include <linux/of_irq.h> 29 30 #include <linux/of_address.h> 30 31 #include <linux/skbuff.h> 31 32 #include <linux/spinlock.h> ··· 601 600 size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; 602 601 packets++; 603 602 604 - lp->tx_bd_ci = ++lp->tx_bd_ci % TX_BD_NUM; 603 + ++lp->tx_bd_ci; 604 + lp->tx_bd_ci %= TX_BD_NUM; 605 605 cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; 606 606 status = cur_p->status; 607 607 } ··· 688 686 skb_headlen(skb), DMA_TO_DEVICE); 689 687 690 688 for (ii = 0; ii < num_frag; ii++) { 691 - lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM; 689 + ++lp->tx_bd_tail; 690 + lp->tx_bd_tail %= TX_BD_NUM; 692 691 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 693 692 frag = &skb_shinfo(skb)->frags[ii]; 694 693 cur_p->phys = dma_map_single(ndev->dev.parent, ··· 705 702 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; 706 703 /* Start the transfer */ 707 704 axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); 708 - lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM; 705 + ++lp->tx_bd_tail; 706 + lp->tx_bd_tail %= TX_BD_NUM; 709 707 710 708 return NETDEV_TX_OK; 711 709 } ··· 778 774 cur_p->status = 0; 779 775 cur_p->sw_id_offset = (u32) new_skb; 780 776 781 - lp->rx_bd_ci = ++lp->rx_bd_ci % RX_BD_NUM; 777 + ++lp->rx_bd_ci; 778 + lp->rx_bd_ci %= RX_BD_NUM; 782 779 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 783 780 } 784 781
+38 -15
drivers/net/hyperv/netvsc_drv.c
··· 88 88 { 89 89 struct net_device_context *net_device_ctx = netdev_priv(net); 90 90 struct hv_device *device_obj = net_device_ctx->device_ctx; 91 + struct netvsc_device *nvdev; 92 + struct rndis_device *rdev; 91 93 int ret = 0; 94 + 95 + netif_carrier_off(net); 92 96 93 97 /* Open up the device */ 94 98 ret = rndis_filter_open(device_obj); ··· 102 98 } 103 99 104 100 netif_start_queue(net); 101 + 102 + nvdev = hv_get_drvdata(device_obj); 103 + rdev = nvdev->extension; 104 + if (!rdev->link_state) 105 + netif_carrier_on(net); 105 106 106 107 return ret; 107 108 } ··· 238 229 struct net_device *net; 239 230 struct net_device_context *ndev_ctx; 240 231 struct netvsc_device *net_device; 232 + struct rndis_device *rdev; 241 233 242 234 net_device = hv_get_drvdata(device_obj); 235 + rdev = net_device->extension; 236 + 237 + rdev->link_state = status != 1; 238 + 243 239 net = net_device->ndev; 244 240 245 - if (!net) { 246 - netdev_err(net, "got link status but net device " 247 - "not initialized yet\n"); 241 + if (!net || net->reg_state != NETREG_REGISTERED) 248 242 return; 249 - } 250 243 244 + ndev_ctx = netdev_priv(net); 251 245 if (status == 1) { 252 - netif_carrier_on(net); 253 - ndev_ctx = netdev_priv(net); 254 246 schedule_delayed_work(&ndev_ctx->dwork, 0); 255 247 schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20)); 256 248 } else { 257 - netif_carrier_off(net); 249 + schedule_delayed_work(&ndev_ctx->dwork, 0); 258 250 } 259 251 } 260 252 ··· 398 388 * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add 399 389 * another netif_notify_peers() into a delayed work, otherwise GARP packet 400 390 * will not be sent after quick migration, and cause network disconnection. 391 + * Also, we update the carrier status here. 401 392 */ 402 - static void netvsc_send_garp(struct work_struct *w) 393 + static void netvsc_link_change(struct work_struct *w) 403 394 { 404 395 struct net_device_context *ndev_ctx; 405 396 struct net_device *net; 406 397 struct netvsc_device *net_device; 398 + struct rndis_device *rdev; 399 + bool notify; 400 + 401 + rtnl_lock(); 407 402 408 403 ndev_ctx = container_of(w, struct net_device_context, dwork.work); 409 404 net_device = hv_get_drvdata(ndev_ctx->device_ctx); 405 + rdev = net_device->extension; 410 406 net = net_device->ndev; 411 - netdev_notify_peers(net); 407 + 408 + if (rdev->link_state) { 409 + netif_carrier_off(net); 410 + notify = false; 411 + } else { 412 + netif_carrier_on(net); 413 + notify = true; 414 + } 415 + 416 + rtnl_unlock(); 417 + 418 + if (notify) 419 + netdev_notify_peers(net); 412 420 } 413 421 414 422 ··· 442 414 if (!net) 443 415 return -ENOMEM; 444 416 445 - /* Set initial state */ 446 - netif_carrier_off(net); 447 - 448 417 net_device_ctx = netdev_priv(net); 449 418 net_device_ctx->device_ctx = dev; 450 419 hv_set_drvdata(dev, net); 451 - INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp); 420 + INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); 452 421 INIT_WORK(&net_device_ctx->work, do_set_multicast); 453 422 454 423 net->netdev_ops = &device_ops; ··· 467 442 return ret; 468 443 } 469 444 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); 470 - 471 - netif_carrier_on(net); 472 445 473 446 ret = register_netdev(net); 474 447 if (ret != 0) {
-1
drivers/net/irda/irtty-sir.c
··· 522 522 sirdev_put_instance(priv->dev); 523 523 524 524 /* Stop tty */ 525 - irtty_stop_receiver(tty, TRUE); 526 525 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 527 526 if (tty->ops->stop) 528 527 tty->ops->stop(tty);
+8 -4
drivers/net/macvlan.c
··· 506 506 static struct lock_class_key macvlan_netdev_xmit_lock_key; 507 507 static struct lock_class_key macvlan_netdev_addr_lock_key; 508 508 509 + #define ALWAYS_ON_FEATURES \ 510 + (NETIF_F_SG | NETIF_F_GEN_CSUM | NETIF_F_GSO_SOFTWARE | NETIF_F_LLTX) 511 + 509 512 #define MACVLAN_FEATURES \ 510 513 (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ 511 514 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \ ··· 542 539 dev->state = (dev->state & ~MACVLAN_STATE_MASK) | 543 540 (lowerdev->state & MACVLAN_STATE_MASK); 544 541 dev->features = lowerdev->features & MACVLAN_FEATURES; 545 - dev->features |= NETIF_F_LLTX; 542 + dev->features |= ALWAYS_ON_FEATURES; 546 543 dev->gso_max_size = lowerdev->gso_max_size; 547 544 dev->iflink = lowerdev->ifindex; 548 545 dev->hard_header_len = lowerdev->hard_header_len; ··· 702 699 features = netdev_increment_features(vlan->lowerdev->features, 703 700 features, 704 701 mask); 705 - features |= NETIF_F_LLTX; 702 + features |= ALWAYS_ON_FEATURES; 706 703 707 704 return features; 708 705 } ··· 882 879 dev->priv_flags |= IFF_MACVLAN; 883 880 err = netdev_upper_dev_link(lowerdev, dev); 884 881 if (err) 885 - goto destroy_port; 886 - 882 + goto unregister_netdev; 887 883 888 884 list_add_tail_rcu(&vlan->list, &port->vlans); 889 885 netif_stacked_transfer_operstate(lowerdev, dev); 890 886 891 887 return 0; 892 888 889 + unregister_netdev: 890 + unregister_netdevice(dev); 893 891 destroy_port: 894 892 port->count -= 1; 895 893 if (!port->count)
+8 -5
drivers/net/phy/dp83640.c
··· 1006 1006 } else 1007 1007 list_add_tail(&dp83640->list, &clock->phylist); 1008 1008 1009 - if (clock->chosen && !list_empty(&clock->phylist)) 1010 - recalibrate(clock); 1011 - else 1012 - enable_broadcast(dp83640->phydev, clock->page, 1); 1013 - 1014 1009 dp83640_clock_put(clock); 1015 1010 return 0; 1016 1011 ··· 1058 1063 1059 1064 static int dp83640_config_init(struct phy_device *phydev) 1060 1065 { 1066 + struct dp83640_private *dp83640 = phydev->priv; 1067 + struct dp83640_clock *clock = dp83640->clock; 1068 + 1069 + if (clock->chosen && !list_empty(&clock->phylist)) 1070 + recalibrate(clock); 1071 + else 1072 + enable_broadcast(phydev, clock->page, 1); 1073 + 1061 1074 enable_status_frames(phydev, true); 1062 1075 ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE); 1063 1076 return 0;
+9 -7
drivers/net/phy/phy_device.c
··· 916 916 int err; 917 917 int lpa; 918 918 int lpagb = 0; 919 + int common_adv; 920 + int common_adv_gb = 0; 919 921 920 922 /* Update the link, but return if there was an error */ 921 923 err = genphy_update_link(phydev); ··· 939 937 940 938 phydev->lp_advertising = 941 939 mii_stat1000_to_ethtool_lpa_t(lpagb); 942 - lpagb &= adv << 2; 940 + common_adv_gb = lpagb & adv << 2; 943 941 } 944 942 945 943 lpa = phy_read(phydev, MII_LPA); ··· 952 950 if (adv < 0) 953 951 return adv; 954 952 955 - lpa &= adv; 953 + common_adv = lpa & adv; 956 954 957 955 phydev->speed = SPEED_10; 958 956 phydev->duplex = DUPLEX_HALF; 959 957 phydev->pause = 0; 960 958 phydev->asym_pause = 0; 961 959 962 - if (lpagb & (LPA_1000FULL | LPA_1000HALF)) { 960 + if (common_adv_gb & (LPA_1000FULL | LPA_1000HALF)) { 963 961 phydev->speed = SPEED_1000; 964 962 965 - if (lpagb & LPA_1000FULL) 963 + if (common_adv_gb & LPA_1000FULL) 966 964 phydev->duplex = DUPLEX_FULL; 967 - } else if (lpa & (LPA_100FULL | LPA_100HALF)) { 965 + } else if (common_adv & (LPA_100FULL | LPA_100HALF)) { 968 966 phydev->speed = SPEED_100; 969 967 970 - if (lpa & LPA_100FULL) 968 + if (common_adv & LPA_100FULL) 971 969 phydev->duplex = DUPLEX_FULL; 972 970 } else 973 - if (lpa & LPA_10FULL) 971 + if (common_adv & LPA_10FULL) 974 972 phydev->duplex = DUPLEX_FULL; 975 973 976 974 if (phydev->duplex == DUPLEX_FULL) {
+1 -1
drivers/net/team/team.c
··· 1648 1648 } 1649 1649 1650 1650 static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb, 1651 - void *accel_priv) 1651 + void *accel_priv, select_queue_fallback_t fallback) 1652 1652 { 1653 1653 /* 1654 1654 * This helper function exists to help dev_pick_tx get the correct
+4 -2
drivers/net/tun.c
··· 366 366 * hope the rxq no. may help here. 367 367 */ 368 368 static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, 369 - void *accel_priv) 369 + void *accel_priv, select_queue_fallback_t fallback) 370 370 { 371 371 struct tun_struct *tun = netdev_priv(dev); 372 372 struct tun_flow_entry *e; ··· 1686 1686 TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | 1687 1687 NETIF_F_HW_VLAN_STAG_TX; 1688 1688 dev->features = dev->hw_features; 1689 - dev->vlan_features = dev->features; 1689 + dev->vlan_features = dev->features & 1690 + ~(NETIF_F_HW_VLAN_CTAG_TX | 1691 + NETIF_F_HW_VLAN_STAG_TX); 1690 1692 1691 1693 INIT_LIST_HEAD(&tun->disabled); 1692 1694 err = tun_attach(tun, file, false);
-1
drivers/net/usb/Kconfig
··· 296 296 tristate "CoreChip-sz SR9800 based USB 2.0 10/100 ethernet devices" 297 297 depends on USB_USBNET 298 298 select CRC32 299 - default y 300 299 ---help--- 301 300 Say Y if you want to use one of the following 100Mbps USB Ethernet 302 301 device based on the CoreChip-sz SR9800 chip.
+2 -1
drivers/net/usb/asix_devices.c
··· 917 917 .status = asix_status, 918 918 .link_reset = ax88178_link_reset, 919 919 .reset = ax88178_reset, 920 - .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR, 920 + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | 921 + FLAG_MULTI_PACKET, 921 922 .rx_fixup = asix_rx_fixup_common, 922 923 .tx_fixup = asix_tx_fixup, 923 924 };
+38 -8
drivers/net/usb/ax88179_178a.c
··· 1029 1029 dev->mii.phy_id = 0x03; 1030 1030 dev->mii.supports_gmii = 1; 1031 1031 1032 - if (usb_device_no_sg_constraint(dev->udev)) 1033 - dev->can_dma_sg = 1; 1034 - 1035 1032 dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1036 1033 NETIF_F_RXCSUM; 1037 1034 1038 1035 dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1039 1036 NETIF_F_RXCSUM; 1040 - 1041 - if (dev->can_dma_sg) { 1042 - dev->net->features |= NETIF_F_SG | NETIF_F_TSO; 1043 - dev->net->hw_features |= NETIF_F_SG | NETIF_F_TSO; 1044 - } 1045 1037 1046 1038 /* Enable checksum offload */ 1047 1039 *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP | ··· 1109 1117 u32 rx_hdr; 1110 1118 u16 hdr_off; 1111 1119 u32 *pkt_hdr; 1120 + 1121 + /* This check is no longer done by usbnet */ 1122 + if (skb->len < dev->net->hard_header_len) 1123 + return 0; 1112 1124 1113 1125 skb_trim(skb, skb->len - 4); 1114 1126 memcpy(&rx_hdr, skb_tail_pointer(skb), 4); ··· 1387 1391 .tx_fixup = ax88179_tx_fixup, 1388 1392 }; 1389 1393 1394 + static const struct driver_info dlink_dub1312_info = { 1395 + .description = "D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter", 1396 + .bind = ax88179_bind, 1397 + .unbind = ax88179_unbind, 1398 + .status = ax88179_status, 1399 + .link_reset = ax88179_link_reset, 1400 + .reset = ax88179_reset, 1401 + .stop = ax88179_stop, 1402 + .flags = FLAG_ETHER | FLAG_FRAMING_AX, 1403 + .rx_fixup = ax88179_rx_fixup, 1404 + .tx_fixup = ax88179_tx_fixup, 1405 + }; 1406 + 1390 1407 static const struct driver_info sitecom_info = { 1391 1408 .description = "Sitecom USB 3.0 to Gigabit Adapter", 1392 1409 .bind = ax88179_bind, ··· 1426 1417 .tx_fixup = ax88179_tx_fixup, 1427 1418 }; 1428 1419 1420 + static const struct driver_info lenovo_info = { 1421 + .description = "Lenovo OneLinkDock Gigabit LAN", 1422 + .bind = ax88179_bind, 1423 + .unbind = ax88179_unbind, 1424 + .status = ax88179_status, 1425 + .link_reset = ax88179_link_reset, 1426 + .reset = ax88179_reset, 1427 + .stop = ax88179_stop, 1428 + .flags = FLAG_ETHER | FLAG_FRAMING_AX, 1429 + .rx_fixup = ax88179_rx_fixup, 1430 + .tx_fixup = ax88179_tx_fixup, 1431 + }; 1432 + 1429 1433 static const struct usb_device_id products[] = { 1430 1434 { 1431 1435 /* ASIX AX88179 10/100/1000 */ ··· 1449 1427 USB_DEVICE(0x0b95, 0x178a), 1450 1428 .driver_info = (unsigned long)&ax88178a_info, 1451 1429 }, { 1430 + /* D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter */ 1431 + USB_DEVICE(0x2001, 0x4a00), 1432 + .driver_info = (unsigned long)&dlink_dub1312_info, 1433 + }, { 1452 1434 /* Sitecom USB 3.0 to Gigabit Adapter */ 1453 1435 USB_DEVICE(0x0df6, 0x0072), 1454 1436 .driver_info = (unsigned long)&sitecom_info, ··· 1460 1434 /* Samsung USB Ethernet Adapter */ 1461 1435 USB_DEVICE(0x04e8, 0xa100), 1462 1436 .driver_info = (unsigned long)&samsung_info, 1437 + }, { 1438 + /* Lenovo OneLinkDock Gigabit LAN */ 1439 + USB_DEVICE(0x17ef, 0x304b), 1440 + .driver_info = (unsigned long)&lenovo_info, 1463 1441 }, 1464 1442 { }, 1465 1443 };
+4
drivers/net/usb/gl620a.c
··· 84 84 u32 size; 85 85 u32 count; 86 86 87 + /* This check is no longer done by usbnet */ 88 + if (skb->len < dev->net->hard_header_len) 89 + return 0; 90 + 87 91 header = (struct gl_header *) skb->data; 88 92 89 93 // get the packet count of the received skb
+3 -2
drivers/net/usb/mcs7830.c
··· 526 526 { 527 527 u8 status; 528 528 529 - if (skb->len == 0) { 530 - dev_err(&dev->udev->dev, "unexpected empty rx frame\n"); 529 + /* This check is no longer done by usbnet */ 530 + if (skb->len < dev->net->hard_header_len) { 531 + dev_err(&dev->udev->dev, "unexpected tiny rx frame\n"); 531 532 return 0; 532 533 } 533 534
+4
drivers/net/usb/net1080.c
··· 364 364 struct nc_trailer *trailer; 365 365 u16 hdr_len, packet_len; 366 366 367 + /* This check is no longer done by usbnet */ 368 + if (skb->len < dev->net->hard_header_len) 369 + return 0; 370 + 367 371 if (!(skb->len & 0x01)) { 368 372 netdev_dbg(dev->net, "rx framesize %d range %d..%d mtu %d\n", 369 373 skb->len, dev->net->hard_header_len, dev->hard_mtu,
+5 -4
drivers/net/usb/qmi_wwan.c
··· 80 80 { 81 81 __be16 proto; 82 82 83 - /* usbnet rx_complete guarantees that skb->len is at least 84 - * hard_header_len, so we can inspect the dest address without 85 - * checking skb->len 86 - */ 83 + /* This check is no longer done by usbnet */ 84 + if (skb->len < dev->net->hard_header_len) 85 + return 0; 86 + 87 87 switch (skb->data[0] & 0xf0) { 88 88 case 0x40: 89 89 proto = htons(ETH_P_IP); ··· 732 732 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ 733 733 {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */ 734 734 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ 735 + {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */ 735 736 736 737 /* 4. Gobi 1000 devices */ 737 738 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
+4
drivers/net/usb/rndis_host.c
··· 492 492 */ 493 493 int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb) 494 494 { 495 + /* This check is no longer done by usbnet */ 496 + if (skb->len < dev->net->hard_header_len) 497 + return 0; 498 + 495 499 /* peripheral may have batched packets to us... */ 496 500 while (likely(skb->len)) { 497 501 struct rndis_data_hdr *hdr = (void *)skb->data;
+4
drivers/net/usb/smsc75xx.c
··· 2106 2106 2107 2107 static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) 2108 2108 { 2109 + /* This check is no longer done by usbnet */ 2110 + if (skb->len < dev->net->hard_header_len) 2111 + return 0; 2112 + 2109 2113 while (skb->len > 0) { 2110 2114 u32 rx_cmd_a, rx_cmd_b, align_count, size; 2111 2115 struct sk_buff *ax_skb;
+4
drivers/net/usb/smsc95xx.c
··· 1723 1723 1724 1724 static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) 1725 1725 { 1726 + /* This check is no longer done by usbnet */ 1727 + if (skb->len < dev->net->hard_header_len) 1728 + return 0; 1729 + 1726 1730 while (skb->len > 0) { 1727 1731 u32 header, align_count; 1728 1732 struct sk_buff *ax_skb;
+5 -1
drivers/net/usb/sr9800.c
··· 63 63 { 64 64 int offset = 0; 65 65 66 + /* This check is no longer done by usbnet */ 67 + if (skb->len < dev->net->hard_header_len) 68 + return 0; 69 + 66 70 while (offset + sizeof(u32) < skb->len) { 67 71 struct sk_buff *sr_skb; 68 72 u16 size; ··· 827 823 dev->rx_urb_size = 828 824 SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_2K].size; 829 825 } 830 - netdev_dbg(dev->net, "%s : setting rx_urb_size with : %ld\n", __func__, 826 + netdev_dbg(dev->net, "%s : setting rx_urb_size with : %zu\n", __func__, 831 827 dev->rx_urb_size); 832 828 return 0; 833 829
+10 -15
drivers/net/usb/usbnet.c
··· 542 542 } 543 543 // else network stack removes extra byte if we forced a short packet 544 544 545 - if (skb->len) { 546 - /* all data was already cloned from skb inside the driver */ 547 - if (dev->driver_info->flags & FLAG_MULTI_PACKET) 548 - dev_kfree_skb_any(skb); 549 - else 550 - usbnet_skb_return(dev, skb); 545 + /* all data was already cloned from skb inside the driver */ 546 + if (dev->driver_info->flags & FLAG_MULTI_PACKET) 547 + goto done; 548 + 549 + if (skb->len < ETH_HLEN) { 550 + dev->net->stats.rx_errors++; 551 + dev->net->stats.rx_length_errors++; 552 + netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len); 553 + } else { 554 + usbnet_skb_return(dev, skb); 551 555 return; 552 556 } 553 557 554 - netif_dbg(dev, rx_err, dev->net, "drop\n"); 555 - dev->net->stats.rx_errors++; 556 558 done: 557 559 skb_queue_tail(&dev->done, skb); 558 560 } ··· 576 574 switch (urb_status) { 577 575 /* success */ 578 576 case 0: 579 - if (skb->len < dev->net->hard_header_len) { 580 - state = rx_cleanup; 581 - dev->net->stats.rx_errors++; 582 - dev->net->stats.rx_length_errors++; 583 - netif_dbg(dev, rx_err, dev->net, 584 - "rx length %d\n", skb->len); 585 - } 586 577 break; 587 578 588 579 /* stalls need manual reset. this is rare ... except that
+2 -1
drivers/net/veth.c
··· 285 285 dev->ethtool_ops = &veth_ethtool_ops; 286 286 dev->features |= NETIF_F_LLTX; 287 287 dev->features |= VETH_FEATURES; 288 - dev->vlan_features = dev->features; 288 + dev->vlan_features = dev->features & 289 + ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX); 289 290 dev->destructor = veth_dev_free; 290 291 291 292 dev->hw_features = VETH_FEATURES;
+2 -1
drivers/net/virtio_net.c
··· 1711 1711 /* If we can receive ANY GSO packets, we must allocate large ones. */ 1712 1712 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || 1713 1713 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || 1714 - virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN)) 1714 + virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || 1715 + virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO)) 1715 1716 vi->big_packets = true; 1716 1717 1717 1718 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
+1 -1
drivers/net/wireless/ath/ath5k/phy.c
··· 110 110 ath5k_hw_reg_write(ah, 0x00010000, AR5K_PHY(0x20)); 111 111 112 112 if (ah->ah_version == AR5K_AR5210) { 113 - srev = ath5k_hw_reg_read(ah, AR5K_PHY(256) >> 28) & 0xf; 113 + srev = (ath5k_hw_reg_read(ah, AR5K_PHY(256)) >> 28) & 0xf; 114 114 ret = (u16)ath5k_hw_bitswap(srev, 4) + 1; 115 115 } else { 116 116 srev = (ath5k_hw_reg_read(ah, AR5K_PHY(0x100)) >> 24) & 0xff;
+2 -2
drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
··· 57 57 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32365a5e}, 58 58 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 59 59 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 60 - {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, 60 + {0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5}, 61 61 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, 62 62 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282}, 63 63 {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27}, ··· 96 96 {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000}, 97 97 {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 98 98 {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c}, 99 - {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce}, 99 + {0x0000ae20, 0x000001a6, 0x000001a6, 0x000001aa, 0x000001aa}, 100 100 {0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550}, 101 101 }; 102 102
+7 -1
drivers/net/wireless/ath/ath9k/hw.c
··· 1534 1534 bool ath9k_hw_check_alive(struct ath_hw *ah) 1535 1535 { 1536 1536 int count = 50; 1537 - u32 reg; 1537 + u32 reg, last_val; 1538 1538 1539 1539 if (AR_SREV_9300(ah)) 1540 1540 return !ath9k_hw_detect_mac_hang(ah); ··· 1542 1542 if (AR_SREV_9285_12_OR_LATER(ah)) 1543 1543 return true; 1544 1544 1545 + last_val = REG_READ(ah, AR_OBS_BUS_1); 1545 1546 do { 1546 1547 reg = REG_READ(ah, AR_OBS_BUS_1); 1548 + if (reg != last_val) 1549 + return true; 1547 1550 1551 + last_val = reg; 1548 1552 if ((reg & 0x7E7FFFEF) == 0x00702400) 1549 1553 continue; 1550 1554 ··· 1560 1556 default: 1561 1557 return true; 1562 1558 } 1559 + 1560 + udelay(1); 1563 1561 } while (count-- > 0); 1564 1562 1565 1563 return false;
+35 -35
drivers/net/wireless/ath/ath9k/recv.c
··· 732 732 return NULL; 733 733 734 734 /* 735 - * mark descriptor as zero-length and set the 'more' 736 - * flag to ensure that both buffers get discarded 735 + * Re-check previous descriptor, in case it has been filled 736 + * in the mean time. 737 737 */ 738 - rs->rs_datalen = 0; 739 - rs->rs_more = true; 738 + ret = ath9k_hw_rxprocdesc(ah, ds, rs); 739 + if (ret == -EINPROGRESS) { 740 + /* 741 + * mark descriptor as zero-length and set the 'more' 742 + * flag to ensure that both buffers get discarded 743 + */ 744 + rs->rs_datalen = 0; 745 + rs->rs_more = true; 746 + } 740 747 } 741 748 742 749 list_del(&bf->list); ··· 992 985 struct ath_common *common = ath9k_hw_common(ah); 993 986 struct ieee80211_hdr *hdr; 994 987 bool discard_current = sc->rx.discard_next; 995 - int ret = 0; 996 988 997 989 /* 998 990 * Discard corrupt descriptors which are marked in 999 991 * ath_get_next_rx_buf(). 1000 992 */ 1001 - sc->rx.discard_next = rx_stats->rs_more; 1002 993 if (discard_current) 1003 - return -EINVAL; 994 + goto corrupt; 995 + 996 + sc->rx.discard_next = false; 1004 997 1005 998 /* 1006 999 * Discard zero-length packets. 1007 1000 */ 1008 1001 if (!rx_stats->rs_datalen) { 1009 1002 RX_STAT_INC(rx_len_err); 1010 - return -EINVAL; 1003 + goto corrupt; 1011 1004 } 1012 1005 1013 - /* 1014 - * rs_status follows rs_datalen so if rs_datalen is too large 1015 - * we can take a hint that hardware corrupted it, so ignore 1016 - * those frames. 1017 - */ 1006 + /* 1007 + * rs_status follows rs_datalen so if rs_datalen is too large 1008 + * we can take a hint that hardware corrupted it, so ignore 1009 + * those frames. 1010 + */ 1018 1011 if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) { 1019 1012 RX_STAT_INC(rx_len_err); 1020 - return -EINVAL; 1013 + goto corrupt; 1021 1014 } 1022 1015 1023 1016 /* Only use status info from the last fragment */ ··· 1031 1024 * This is different from the other corrupt descriptor 1032 1025 * condition handled above. 1033 1026 */ 1034 - if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) { 1035 - ret = -EINVAL; 1036 - goto exit; 1037 - } 1027 + if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) 1028 + goto corrupt; 1038 1029 1039 1030 hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len); 1040 1031 ··· 1048 1043 if (ath_process_fft(sc, hdr, rx_stats, rx_status->mactime)) 1049 1044 RX_STAT_INC(rx_spectral); 1050 1045 1051 - ret = -EINVAL; 1052 - goto exit; 1046 + return -EINVAL; 1053 1047 } 1054 1048 1055 1049 /* 1056 1050 * everything but the rate is checked here, the rate check is done 1057 1051 * separately to avoid doing two lookups for a rate for each frame. 1058 1052 */ 1059 - if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) { 1060 - ret = -EINVAL; 1061 - goto exit; 1062 - } 1053 + if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) 1054 + return -EINVAL; 1063 1055 1064 1056 if (ath_is_mybeacon(common, hdr)) { 1065 1057 RX_STAT_INC(rx_beacons); ··· 1066 1064 /* 1067 1065 * This shouldn't happen, but have a safety check anyway. 1068 1066 */ 1069 - if (WARN_ON(!ah->curchan)) { 1070 - ret = -EINVAL; 1071 - goto exit; 1072 - } 1067 + if (WARN_ON(!ah->curchan)) 1068 + return -EINVAL; 1073 1069 1074 - if (ath9k_process_rate(common, hw, rx_stats, rx_status)) { 1075 - ret =-EINVAL; 1076 - goto exit; 1077 - } 1070 + if (ath9k_process_rate(common, hw, rx_stats, rx_status)) 1071 + return -EINVAL; 1078 1072 1079 1073 ath9k_process_rssi(common, hw, rx_stats, rx_status); 1080 1074 ··· 1085 1087 sc->rx.num_pkts++; 1086 1088 #endif 1087 1089 1088 - exit: 1089 - sc->rx.discard_next = false; 1090 - return ret; 1090 + return 0; 1091 + 1092 + corrupt: 1093 + sc->rx.discard_next = rx_stats->rs_more; 1094 + return -EINVAL; 1091 1095 } 1092 1096 1093 1097 static void ath9k_rx_skb_postprocess(struct ath_common *common,
+8 -5
drivers/net/wireless/ath/ath9k/xmit.c
··· 1444 1444 for (tidno = 0, tid = &an->tid[tidno]; 1445 1445 tidno < IEEE80211_NUM_TIDS; tidno++, tid++) { 1446 1446 1447 - if (!tid->sched) 1448 - continue; 1449 - 1450 1447 ac = tid->ac; 1451 1448 txq = ac->txq; 1452 1449 1453 1450 ath_txq_lock(sc, txq); 1451 + 1452 + if (!tid->sched) { 1453 + ath_txq_unlock(sc, txq); 1454 + continue; 1455 + } 1454 1456 1455 1457 buffered = ath_tid_has_buffered(tid); 1456 1458 ··· 2186 2184 txq->stopped = true; 2187 2185 } 2188 2186 2187 + if (txctl->an) 2188 + tid = ath_get_skb_tid(sc, txctl->an, skb); 2189 + 2189 2190 if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) { 2190 2191 ath_txq_unlock(sc, txq); 2191 2192 txq = sc->tx.uapsdq; 2192 2193 ath_txq_lock(sc, txq); 2193 2194 } else if (txctl->an && 2194 2195 ieee80211_is_data_present(hdr->frame_control)) { 2195 - tid = ath_get_skb_tid(sc, txctl->an, skb); 2196 - 2197 2196 WARN_ON(tid->ac->txq != txctl->txq); 2198 2197 2199 2198 if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
+4 -10
drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
··· 457 457 458 458 u8 tx_hdrlen; /* sdio bus header length for tx packet */ 459 459 bool txglom; /* host tx glomming enable flag */ 460 - struct sk_buff *txglom_sgpad; /* scatter-gather padding buffer */ 461 460 u16 head_align; /* buffer pointer alignment */ 462 461 u16 sgentry_align; /* scatter-gather buffer alignment */ 463 462 }; ··· 1943 1944 if (lastfrm && chain_pad) 1944 1945 tail_pad += blksize - chain_pad; 1945 1946 if (skb_tailroom(pkt) < tail_pad && pkt->len > blksize) { 1946 - pkt_pad = bus->txglom_sgpad; 1947 - if (pkt_pad == NULL) 1948 - brcmu_pkt_buf_get_skb(tail_pad + tail_chop); 1947 + pkt_pad = brcmu_pkt_buf_get_skb(tail_pad + tail_chop + 1948 + bus->head_align); 1949 1949 if (pkt_pad == NULL) 1950 1950 return -ENOMEM; 1951 1951 ret = brcmf_sdio_txpkt_hdalign(bus, pkt_pad); ··· 1955 1957 tail_chop); 1956 1958 *(u32 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop; 1957 1959 skb_trim(pkt, pkt->len - tail_chop); 1960 + skb_trim(pkt_pad, tail_pad + tail_chop); 1958 1961 __skb_queue_after(pktq, pkt, pkt_pad); 1959 1962 } else { 1960 1963 ntail = pkt->data_len + tail_pad - ··· 2010 2011 return ret; 2011 2012 head_pad = (u16)ret; 2012 2013 if (head_pad) 2013 - memset(pkt_next->data, 0, head_pad + bus->tx_hdrlen); 2014 + memset(pkt_next->data + bus->tx_hdrlen, 0, head_pad); 2014 2015 2015 2016 total_len += pkt_next->len; 2016 2017 ··· 3485 3486 bus->txglom = false; 3486 3487 value = 1; 3487 3488 pad_size = bus->sdiodev->func[2]->cur_blksize << 1; 3488 - bus->txglom_sgpad = brcmu_pkt_buf_get_skb(pad_size); 3489 - if (!bus->txglom_sgpad) 3490 - brcmf_err("allocating txglom padding skb failed, reduced performance\n"); 3491 - 3492 3489 err = brcmf_iovar_data_set(bus->sdiodev->dev, "bus:rxglom", 3493 3490 &value, sizeof(u32)); 3494 3491 if (err < 0) { ··· 4048 4053 brcmf_sdio_chip_detach(&bus->ci); 4049 4054 } 4050 4055 4051 - brcmu_pkt_buf_free_skb(bus->txglom_sgpad); 4052 4056 kfree(bus->rxbuf); 4053 4057 kfree(bus->hdrbuf); 4054 4058 kfree(bus);
+1 -1
drivers/net/wireless/hostap/hostap_ap.c
··· 147 147 148 148 if (!sta->ap && sta->u.sta.challenge) 149 149 kfree(sta->u.sta.challenge); 150 - del_timer(&sta->timer); 150 + del_timer_sync(&sta->timer); 151 151 #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ 152 152 153 153 kfree(sta);
+1 -1
drivers/net/wireless/hostap/hostap_proc.c
··· 496 496 497 497 void hostap_remove_proc(local_info_t *local) 498 498 { 499 - remove_proc_subtree(local->ddev->name, hostap_proc); 499 + proc_remove(local->proc); 500 500 } 501 501 502 502
+20 -2
drivers/net/wireless/iwlwifi/dvm/mac80211.c
··· 696 696 return ret; 697 697 } 698 698 699 + static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg) 700 + { 701 + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) 702 + return false; 703 + return true; 704 + } 705 + 706 + static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg) 707 + { 708 + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) 709 + return false; 710 + if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG) 711 + return true; 712 + 713 + /* disabled by default */ 714 + return false; 715 + } 716 + 699 717 static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, 700 718 struct ieee80211_vif *vif, 701 719 enum ieee80211_ampdu_mlme_action action, ··· 735 717 736 718 switch (action) { 737 719 case IEEE80211_AMPDU_RX_START: 738 - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) 720 + if (!iwl_enable_rx_ampdu(priv->cfg)) 739 721 break; 740 722 IWL_DEBUG_HT(priv, "start Rx\n"); 741 723 ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn); ··· 747 729 case IEEE80211_AMPDU_TX_START: 748 730 if (!priv->trans->ops->txq_enable) 749 731 break; 750 - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) 732 + if (!iwl_enable_tx_ampdu(priv->cfg)) 751 733 break; 752 734 IWL_DEBUG_HT(priv, "start Tx\n"); 753 735 ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
+1
drivers/net/wireless/iwlwifi/dvm/sta.c
··· 590 590 sizeof(priv->tid_data[sta_id][tid])); 591 591 592 592 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; 593 + priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; 593 594 594 595 priv->num_stations--; 595 596
+9 -5
drivers/net/wireless/iwlwifi/dvm/tx.c
··· 1291 1291 struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data; 1292 1292 struct iwl_ht_agg *agg; 1293 1293 struct sk_buff_head reclaimed_skbs; 1294 - struct ieee80211_tx_info *info; 1295 - struct ieee80211_hdr *hdr; 1296 1294 struct sk_buff *skb; 1297 1295 int sta_id; 1298 1296 int tid; ··· 1377 1379 freed = 0; 1378 1380 1379 1381 skb_queue_walk(&reclaimed_skbs, skb) { 1380 - hdr = (struct ieee80211_hdr *)skb->data; 1382 + struct ieee80211_hdr *hdr = (void *)skb->data; 1383 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1381 1384 1382 1385 if (ieee80211_is_data_qos(hdr->frame_control)) 1383 1386 freed++; 1384 1387 else 1385 1388 WARN_ON_ONCE(1); 1386 1389 1387 - info = IEEE80211_SKB_CB(skb); 1388 1390 iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]); 1391 + 1392 + memset(&info->status, 0, sizeof(info->status)); 1393 + /* Packet was transmitted successfully, failures come as single 1394 + * frames because before failing a frame the firmware transmits 1395 + * it without aggregation at least once. 1396 + */ 1397 + info->flags |= IEEE80211_TX_STAT_ACK; 1389 1398 1390 1399 if (freed == 1) { 1391 1400 /* this is the first skb we deliver in this batch */ 1392 1401 /* put the rate scaling data there */ 1393 1402 info = IEEE80211_SKB_CB(skb); 1394 1403 memset(&info->status, 0, sizeof(info->status)); 1395 - info->flags |= IEEE80211_TX_STAT_ACK; 1396 1404 info->flags |= IEEE80211_TX_STAT_AMPDU; 1397 1405 info->status.ampdu_ack_len = ba_resp->txed_2_done; 1398 1406 info->status.ampdu_len = ba_resp->txed;
+1 -1
drivers/net/wireless/iwlwifi/iwl-drv.c
··· 1286 1286 MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); 1287 1287 module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, S_IRUGO); 1288 1288 MODULE_PARM_DESC(11n_disable, 1289 - "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX"); 1289 + "disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX"); 1290 1290 module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K, 1291 1291 int, S_IRUGO); 1292 1292 MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)");
+7 -4
drivers/net/wireless/iwlwifi/iwl-modparams.h
··· 79 79 IWL_POWER_NUM 80 80 }; 81 81 82 - #define IWL_DISABLE_HT_ALL BIT(0) 83 - #define IWL_DISABLE_HT_TXAGG BIT(1) 84 - #define IWL_DISABLE_HT_RXAGG BIT(2) 82 + enum iwl_disable_11n { 83 + IWL_DISABLE_HT_ALL = BIT(0), 84 + IWL_DISABLE_HT_TXAGG = BIT(1), 85 + IWL_DISABLE_HT_RXAGG = BIT(2), 86 + IWL_ENABLE_HT_TXAGG = BIT(3), 87 + }; 85 88 86 89 /** 87 90 * struct iwl_mod_params ··· 93 90 * 94 91 * @sw_crypto: using hardware encryption, default = 0 95 92 * @disable_11n: disable 11n capabilities, default = 0, 96 - * use IWL_DISABLE_HT_* constants 93 + * use IWL_[DIS,EN]ABLE_HT_* constants 97 94 * @amsdu_size_8K: enable 8K amsdu size, default = 0 98 95 * @restart_fw: restart firmware, default = 1 99 96 * @wd_disable: enable stuck queue check, default = 0
+20 -2
drivers/net/wireless/iwlwifi/mvm/mac80211.c
··· 328 328 ieee80211_free_txskb(hw, skb); 329 329 } 330 330 331 + static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg) 332 + { 333 + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) 334 + return false; 335 + return true; 336 + } 337 + 338 + static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg) 339 + { 340 + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) 341 + return false; 342 + if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG) 343 + return true; 344 + 345 + /* enabled by default */ 346 + return true; 347 + } 348 + 331 349 static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, 332 350 struct ieee80211_vif *vif, 333 351 enum ieee80211_ampdu_mlme_action action, ··· 365 347 366 348 switch (action) { 367 349 case IEEE80211_AMPDU_RX_START: 368 - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) { 350 + if (!iwl_enable_rx_ampdu(mvm->cfg)) { 369 351 ret = -EINVAL; 370 352 break; 371 353 } ··· 375 357 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false); 376 358 break; 377 359 case IEEE80211_AMPDU_TX_START: 378 - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) { 360 + if (!iwl_enable_tx_ampdu(mvm->cfg)) { 379 361 ret = -EINVAL; 380 362 break; 381 363 }
+1 -1
drivers/net/wireless/iwlwifi/mvm/mvm.h
··· 152 152 IWL_POWER_SCHEME_LP 153 153 }; 154 154 155 - #define IWL_CONN_MAX_LISTEN_INTERVAL 70 155 + #define IWL_CONN_MAX_LISTEN_INTERVAL 10 156 156 #define IWL_UAPSD_AC_INFO (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\ 157 157 IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\ 158 158 IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\
+9 -9
drivers/net/wireless/iwlwifi/mvm/tx.c
··· 822 822 struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data; 823 823 struct sk_buff_head reclaimed_skbs; 824 824 struct iwl_mvm_tid_data *tid_data; 825 - struct ieee80211_tx_info *info; 826 825 struct ieee80211_sta *sta; 827 826 struct iwl_mvm_sta *mvmsta; 828 - struct ieee80211_hdr *hdr; 829 827 struct sk_buff *skb; 830 828 int sta_id, tid, freed; 831 - 832 829 /* "flow" corresponds to Tx queue */ 833 830 u16 scd_flow = le16_to_cpu(ba_notif->scd_flow); 834 - 835 831 /* "ssn" is start of block-ack Tx window, corresponds to index 836 832 * (in Tx queue's circular buffer) of first TFD/frame in window */ 837 833 u16 ba_resp_scd_ssn = le16_to_cpu(ba_notif->scd_ssn); ··· 884 888 freed = 0; 885 889 886 890 skb_queue_walk(&reclaimed_skbs, skb) { 887 - hdr = (struct ieee80211_hdr *)skb->data; 891 + struct ieee80211_hdr *hdr = (void *)skb->data; 892 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 888 893 889 894 if (ieee80211_is_data_qos(hdr->frame_control)) 890 895 freed++; 891 896 else 892 897 WARN_ON_ONCE(1); 893 898 894 - info = IEEE80211_SKB_CB(skb); 895 899 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); 900 + 901 + memset(&info->status, 0, sizeof(info->status)); 902 + /* Packet was transmitted successfully, failures come as single 903 + * frames because before failing a frame the firmware transmits 904 + * it without aggregation at least once. 905 + */ 906 + info->flags |= IEEE80211_TX_STAT_ACK; 896 907 897 908 if (freed == 1) { 898 909 /* this is the first skb we deliver in this batch */ 899 910 /* put the rate scaling data there */ 900 - info = IEEE80211_SKB_CB(skb); 901 - memset(&info->status, 0, sizeof(info->status)); 902 - info->flags |= IEEE80211_TX_STAT_ACK; 903 911 info->flags |= IEEE80211_TX_STAT_AMPDU; 904 912 info->status.ampdu_ack_len = ba_notif->txed_2_done; 905 913 info->status.ampdu_len = ba_notif->txed;
+1 -1
drivers/net/wireless/libertas/cfg.c
··· 621 621 id = *pos++; 622 622 elen = *pos++; 623 623 left -= 2; 624 - if (elen > left || elen == 0) { 624 + if (elen > left) { 625 625 lbs_deb_scan("scan response: invalid IE fmt\n"); 626 626 goto done; 627 627 }
+1 -1
drivers/net/wireless/mwifiex/main.c
··· 748 748 749 749 static u16 750 750 mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb, 751 - void *accel_priv) 751 + void *accel_priv, select_queue_fallback_t fallback) 752 752 { 753 753 skb->priority = cfg80211_classify8021d(skb, NULL); 754 754 return mwifiex_1d_to_wmm_queue[skb->priority];
+17 -17
drivers/net/wireless/mwifiex/pcie.c
··· 1211 1211 rd_index = card->rxbd_rdptr & reg->rx_mask; 1212 1212 skb_data = card->rx_buf_list[rd_index]; 1213 1213 1214 + /* If skb allocation was failed earlier for Rx packet, 1215 + * rx_buf_list[rd_index] would have been left with a NULL. 1216 + */ 1217 + if (!skb_data) 1218 + return -ENOMEM; 1219 + 1214 1220 MWIFIEX_SKB_PACB(skb_data, &buf_pa); 1215 1221 pci_unmap_single(card->dev, buf_pa, MWIFIEX_RX_DATA_BUF_SIZE, 1216 1222 PCI_DMA_FROMDEVICE); ··· 1531 1525 if (adapter->ps_state == PS_STATE_SLEEP_CFM) { 1532 1526 mwifiex_process_sleep_confirm_resp(adapter, skb->data, 1533 1527 skb->len); 1528 + mwifiex_pcie_enable_host_int(adapter); 1529 + if (mwifiex_write_reg(adapter, 1530 + PCIE_CPU_INT_EVENT, 1531 + CPU_INTR_SLEEP_CFM_DONE)) { 1532 + dev_warn(adapter->dev, 1533 + "Write register failed\n"); 1534 + return -1; 1535 + } 1534 1536 while (reg->sleep_cookie && (count++ < 10) && 1535 1537 mwifiex_pcie_ok_to_access_hw(adapter)) 1536 1538 usleep_range(50, 60); ··· 2007 1993 adapter->int_status |= pcie_ireg; 2008 1994 spin_unlock_irqrestore(&adapter->int_lock, flags); 2009 1995 2010 - if (pcie_ireg & HOST_INTR_CMD_DONE) { 2011 - if ((adapter->ps_state == PS_STATE_SLEEP_CFM) || 2012 - (adapter->ps_state == PS_STATE_SLEEP)) { 2013 - mwifiex_pcie_enable_host_int(adapter); 2014 - if (mwifiex_write_reg(adapter, 2015 - PCIE_CPU_INT_EVENT, 2016 - CPU_INTR_SLEEP_CFM_DONE) 2017 - ) { 2018 - dev_warn(adapter->dev, 2019 - "Write register failed\n"); 2020 - return; 2021 - 2022 - } 2023 - } 2024 - } else if (!adapter->pps_uapsd_mode && 2025 - adapter->ps_state == PS_STATE_SLEEP && 2026 - mwifiex_pcie_ok_to_access_hw(adapter)) { 1996 + if (!adapter->pps_uapsd_mode && 1997 + adapter->ps_state == PS_STATE_SLEEP && 1998 + mwifiex_pcie_ok_to_access_hw(adapter)) { 2027 1999 /* Potentially for PCIe we could get other 2028 2000 * interrupts like shared. Don't change power 2029 2001 * state until cookie is set */
+1 -11
drivers/net/wireless/mwifiex/usb.c
··· 22 22 23 23 #define USB_VERSION "1.0" 24 24 25 - static const char usbdriver_name[] = "usb8xxx"; 26 - 27 25 static struct mwifiex_if_ops usb_ops; 28 26 static struct semaphore add_remove_card_sem; 29 27 static struct usb_card_rec *usb_card; ··· 525 527 MWIFIEX_BSS_ROLE_ANY), 526 528 MWIFIEX_ASYNC_CMD); 527 529 528 - #ifdef CONFIG_PM 529 - /* Resume handler may be called due to remote wakeup, 530 - * force to exit suspend anyway 531 - */ 532 - usb_disable_autosuspend(card->udev); 533 - #endif /* CONFIG_PM */ 534 - 535 530 return 0; 536 531 } 537 532 ··· 558 567 } 559 568 560 569 static struct usb_driver mwifiex_usb_driver = { 561 - .name = usbdriver_name, 570 + .name = "mwifiex_usb", 562 571 .probe = mwifiex_usb_probe, 563 572 .disconnect = mwifiex_usb_disconnect, 564 573 .id_table = mwifiex_usb_table, 565 574 .suspend = mwifiex_usb_suspend, 566 575 .resume = mwifiex_usb_resume, 567 - .supports_autosuspend = 1, 568 576 }; 569 577 570 578 static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter)
+2 -1
drivers/net/wireless/mwifiex/wmm.c
··· 559 559 mwifiex_wmm_delete_all_ralist(priv); 560 560 memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid)); 561 561 562 - if (priv->adapter->if_ops.clean_pcie_ring) 562 + if (priv->adapter->if_ops.clean_pcie_ring && 563 + !priv->adapter->surprise_removed) 563 564 priv->adapter->if_ops.clean_pcie_ring(priv->adapter); 564 565 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); 565 566 }
+8 -2
drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
··· 15 15 #ifndef RTL8187_H 16 16 #define RTL8187_H 17 17 18 + #include <linux/cache.h> 19 + 18 20 #include "rtl818x.h" 19 21 #include "leds.h" 20 22 ··· 141 139 u8 aifsn[4]; 142 140 u8 rfkill_mask; 143 141 struct { 144 - __le64 buf; 142 + union { 143 + __le64 buf; 144 + u8 dummy1[L1_CACHE_BYTES]; 145 + } ____cacheline_aligned; 145 146 struct sk_buff_head queue; 146 147 } b_tx_status; /* This queue is used by both -b and non-b devices */ 147 148 struct mutex io_mutex; ··· 152 147 u8 bits8; 153 148 __le16 bits16; 154 149 __le32 bits32; 155 - } *io_dmabuf; 150 + u8 dummy2[L1_CACHE_BYTES]; 151 + } *io_dmabuf ____cacheline_aligned; 156 152 bool rfkill_off; 157 153 u16 seqno; 158 154 };
+1 -1
drivers/net/wireless/rtlwifi/ps.c
··· 48 48 49 49 /*<2> Enable Adapter */ 50 50 if (rtlpriv->cfg->ops->hw_init(hw)) 51 - return 1; 51 + return false; 52 52 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); 53 53 54 54 /*<3> Enable Interrupt */
+16 -2
drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
··· 937 937 bool is92c; 938 938 int err; 939 939 u8 tmp_u1b; 940 + unsigned long flags; 940 941 941 942 rtlpci->being_init_adapter = true; 943 + 944 + /* Since this function can take a very long time (up to 350 ms) 945 + * and can be called with irqs disabled, reenable the irqs 946 + * to let the other devices continue being serviced. 947 + * 948 + * It is safe doing so since our own interrupts will only be enabled 949 + * in a subsequent step. 950 + */ 951 + local_save_flags(flags); 952 + local_irq_enable(); 953 + 942 954 rtlpriv->intf_ops->disable_aspm(hw); 943 955 rtstatus = _rtl92ce_init_mac(hw); 944 956 if (!rtstatus) { 945 957 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n"); 946 958 err = 1; 947 - return err; 959 + goto exit; 948 960 } 949 961 950 962 err = rtl92c_download_fw(hw); ··· 964 952 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, 965 953 "Failed to download FW. Init HW without FW now..\n"); 966 954 err = 1; 967 - return err; 955 + goto exit; 968 956 } 969 957 970 958 rtlhal->last_hmeboxnum = 0; ··· 1044 1032 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n"); 1045 1033 } 1046 1034 rtl92c_dm_init(hw); 1035 + exit: 1036 + local_irq_restore(flags); 1047 1037 rtlpci->being_init_adapter = false; 1048 1038 return err; 1049 1039 }
+1
drivers/net/xen-netfront.c
··· 907 907 908 908 /* Ethernet work: Delayed to here as it peeks the header. */ 909 909 skb->protocol = eth_type_trans(skb, dev); 910 + skb_reset_network_header(skb); 910 911 911 912 if (checksum_setup(dev, skb)) { 912 913 kfree_skb(skb);
+74 -76
drivers/of/base.c
··· 342 342 } 343 343 EXPORT_SYMBOL(of_get_cpu_node); 344 344 345 - /** Checks if the given "compat" string matches one of the strings in 346 - * the device's "compatible" property 345 + /** 346 + * __of_device_is_compatible() - Check if the node matches given constraints 347 + * @device: pointer to node 348 + * @compat: required compatible string, NULL or "" for any match 349 + * @type: required device_type value, NULL or "" for any match 350 + * @name: required node name, NULL or "" for any match 351 + * 352 + * Checks if the given @compat, @type and @name strings match the 353 + * properties of the given @device. A constraints can be skipped by 354 + * passing NULL or an empty string as the constraint. 355 + * 356 + * Returns 0 for no match, and a positive integer on match. The return 357 + * value is a relative score with larger values indicating better 358 + * matches. The score is weighted for the most specific compatible value 359 + * to get the highest score. Matching type is next, followed by matching 360 + * name. Practically speaking, this results in the following priority 361 + * order for matches: 362 + * 363 + * 1. specific compatible && type && name 364 + * 2. specific compatible && type 365 + * 3. specific compatible && name 366 + * 4. specific compatible 367 + * 5. general compatible && type && name 368 + * 6. general compatible && type 369 + * 7. general compatible && name 370 + * 8. general compatible 371 + * 9. type && name 372 + * 10. type 373 + * 11. name 347 374 */ 348 375 static int __of_device_is_compatible(const struct device_node *device, 349 - const char *compat) 376 + const char *compat, const char *type, const char *name) 350 377 { 351 - const char* cp; 352 - int cplen, l; 378 + struct property *prop; 379 + const char *cp; 380 + int index = 0, score = 0; 353 381 354 - cp = __of_get_property(device, "compatible", &cplen); 355 - if (cp == NULL) 356 - return 0; 357 - while (cplen > 0) { 358 - if (of_compat_cmp(cp, compat, strlen(compat)) == 0) 359 - return 1; 360 - l = strlen(cp) + 1; 361 - cp += l; 362 - cplen -= l; 382 + /* Compatible match has highest priority */ 383 + if (compat && compat[0]) { 384 + prop = __of_find_property(device, "compatible", NULL); 385 + for (cp = of_prop_next_string(prop, NULL); cp; 386 + cp = of_prop_next_string(prop, cp), index++) { 387 + if (of_compat_cmp(cp, compat, strlen(compat)) == 0) { 388 + score = INT_MAX/2 - (index << 2); 389 + break; 390 + } 391 + } 392 + if (!score) 393 + return 0; 363 394 } 364 395 365 - return 0; 396 + /* Matching type is better than matching name */ 397 + if (type && type[0]) { 398 + if (!device->type || of_node_cmp(type, device->type)) 399 + return 0; 400 + score += 2; 401 + } 402 + 403 + /* Matching name is a bit better than not */ 404 + if (name && name[0]) { 405 + if (!device->name || of_node_cmp(name, device->name)) 406 + return 0; 407 + score++; 408 + } 409 + 410 + return score; 366 411 } 367 412 368 413 /** Checks if the given "compat" string matches one of the strings in ··· 420 375 int res; 421 376 422 377 raw_spin_lock_irqsave(&devtree_lock, flags); 423 - res = __of_device_is_compatible(device, compat); 378 + res = __of_device_is_compatible(device, compat, NULL, NULL); 424 379 raw_spin_unlock_irqrestore(&devtree_lock, flags); 425 380 return res; 426 381 } ··· 726 681 raw_spin_lock_irqsave(&devtree_lock, flags); 727 682 np = from ? from->allnext : of_allnodes; 728 683 for (; np; np = np->allnext) { 729 - if (type 730 - && !(np->type && (of_node_cmp(np->type, type) == 0))) 731 - continue; 732 - if (__of_device_is_compatible(np, compatible) && 684 + if (__of_device_is_compatible(np, compatible, type, NULL) && 733 685 of_node_get(np)) 734 686 break; 735 687 } ··· 772 730 } 773 731 EXPORT_SYMBOL(of_find_node_with_property); 774 732 775 - static const struct of_device_id * 776 - of_match_compatible(const struct of_device_id *matches, 777 - const struct device_node *node) 778 - { 779 - const char *cp; 780 - int cplen, l; 781 - const struct of_device_id *m; 782 - 783 - cp = __of_get_property(node, "compatible", &cplen); 784 - while (cp && (cplen > 0)) { 785 - m = matches; 786 - while (m->name[0] || m->type[0] || m->compatible[0]) { 787 - /* Only match for the entries without type and name */ 788 - if (m->name[0] || m->type[0] || 789 - of_compat_cmp(m->compatible, cp, 790 - strlen(m->compatible))) 791 - m++; 792 - else 793 - return m; 794 - } 795 - 796 - /* Get node's next compatible string */ 797 - l = strlen(cp) + 1; 798 - cp += l; 799 - cplen -= l; 800 - } 801 - 802 - return NULL; 803 - } 804 - 805 733 static 806 734 const struct of_device_id *__of_match_node(const struct of_device_id *matches, 807 735 const struct device_node *node) 808 736 { 809 - const struct of_device_id *m; 737 + const struct of_device_id *best_match = NULL; 738 + int score, best_score = 0; 810 739 811 740 if (!matches) 812 741 return NULL; 813 742 814 - m = of_match_compatible(matches, node); 815 - if (m) 816 - return m; 817 - 818 - while (matches->name[0] || matches->type[0] || matches->compatible[0]) { 819 - int match = 1; 820 - if (matches->name[0]) 821 - match &= node->name 822 - && !strcmp(matches->name, node->name); 823 - if (matches->type[0]) 824 - match &= node->type 825 - && !strcmp(matches->type, node->type); 826 - if (matches->compatible[0]) 827 - match &= __of_device_is_compatible(node, 828 - matches->compatible); 829 - if (match) 830 - return matches; 831 - matches++; 743 + for (; matches->name[0] || matches->type[0] || matches->compatible[0]; matches++) { 744 + score = __of_device_is_compatible(node, matches->compatible, 745 + matches->type, matches->name); 746 + if (score > best_score) { 747 + best_match = matches; 748 + best_score = score; 749 + } 832 750 } 833 - return NULL; 751 + 752 + return best_match; 834 753 } 835 754 836 755 /** ··· 799 796 * @matches: array of of device match structures to search in 800 797 * @node: the of device structure to match against 801 798 * 802 - * Low level utility function used by device matching. We have two ways 803 - * of matching: 804 - * - Try to find the best compatible match by comparing each compatible 805 - * string of device node with all the given matches respectively. 806 - * - If the above method failed, then try to match the compatible by using 807 - * __of_device_is_compatible() besides the match in type and name. 799 + * Low level utility function used by device matching. 808 800 */ 809 801 const struct of_device_id *of_match_node(const struct of_device_id *matches, 810 802 const struct device_node *node)
+14 -8
drivers/of/of_mdio.c
··· 24 24 25 25 static void of_set_phy_supported(struct phy_device *phydev, u32 max_speed) 26 26 { 27 - phydev->supported |= PHY_DEFAULT_FEATURES; 27 + /* The default values for phydev->supported are provided by the PHY 28 + * driver "features" member, we want to reset to sane defaults fist 29 + * before supporting higher speeds. 30 + */ 31 + phydev->supported &= PHY_DEFAULT_FEATURES; 28 32 29 33 switch (max_speed) { 30 34 default: ··· 48 44 { 49 45 struct phy_device *phy; 50 46 bool is_c45; 51 - int rc, prev_irq; 47 + int rc; 52 48 u32 max_speed = 0; 53 49 54 50 is_c45 = of_device_is_compatible(child, ··· 58 54 if (!phy || IS_ERR(phy)) 59 55 return 1; 60 56 61 - if (mdio->irq) { 62 - prev_irq = mdio->irq[addr]; 63 - mdio->irq[addr] = 64 - irq_of_parse_and_map(child, 0); 65 - if (!mdio->irq[addr]) 66 - mdio->irq[addr] = prev_irq; 57 + rc = irq_of_parse_and_map(child, 0); 58 + if (rc > 0) { 59 + phy->irq = rc; 60 + if (mdio->irq) 61 + mdio->irq[addr] = rc; 62 + } else { 63 + if (mdio->irq) 64 + phy->irq = mdio->irq[addr]; 67 65 } 68 66 69 67 /* Associate the OF node with the device structure so it
+67
drivers/of/selftest.c
··· 300 300 of_node_put(np); 301 301 } 302 302 303 + static struct of_device_id match_node_table[] = { 304 + { .data = "A", .name = "name0", }, /* Name alone is lowest priority */ 305 + { .data = "B", .type = "type1", }, /* followed by type alone */ 306 + 307 + { .data = "Ca", .name = "name2", .type = "type1", }, /* followed by both together */ 308 + { .data = "Cb", .name = "name2", }, /* Only match when type doesn't match */ 309 + { .data = "Cc", .name = "name2", .type = "type2", }, 310 + 311 + { .data = "E", .compatible = "compat3" }, 312 + { .data = "G", .compatible = "compat2", }, 313 + { .data = "H", .compatible = "compat2", .name = "name5", }, 314 + { .data = "I", .compatible = "compat2", .type = "type1", }, 315 + { .data = "J", .compatible = "compat2", .type = "type1", .name = "name8", }, 316 + { .data = "K", .compatible = "compat2", .name = "name9", }, 317 + {} 318 + }; 319 + 320 + static struct { 321 + const char *path; 322 + const char *data; 323 + } match_node_tests[] = { 324 + { .path = "/testcase-data/match-node/name0", .data = "A", }, 325 + { .path = "/testcase-data/match-node/name1", .data = "B", }, 326 + { .path = "/testcase-data/match-node/a/name2", .data = "Ca", }, 327 + { .path = "/testcase-data/match-node/b/name2", .data = "Cb", }, 328 + { .path = "/testcase-data/match-node/c/name2", .data = "Cc", }, 329 + { .path = "/testcase-data/match-node/name3", .data = "E", }, 330 + { .path = "/testcase-data/match-node/name4", .data = "G", }, 331 + { .path = "/testcase-data/match-node/name5", .data = "H", }, 332 + { .path = "/testcase-data/match-node/name6", .data = "G", }, 333 + { .path = "/testcase-data/match-node/name7", .data = "I", }, 334 + { .path = "/testcase-data/match-node/name8", .data = "J", }, 335 + { .path = "/testcase-data/match-node/name9", .data = "K", }, 336 + }; 337 + 338 + static void __init of_selftest_match_node(void) 339 + { 340 + struct device_node *np; 341 + const struct of_device_id *match; 342 + int i; 343 + 344 + for (i = 0; i < ARRAY_SIZE(match_node_tests); i++) { 345 + np = of_find_node_by_path(match_node_tests[i].path); 346 + if (!np) { 347 + selftest(0, "missing testcase node %s\n", 348 + match_node_tests[i].path); 349 + continue; 350 + } 351 + 352 + match = of_match_node(match_node_table, np); 353 + if (!match) { 354 + selftest(0, "%s didn't match anything\n", 355 + match_node_tests[i].path); 356 + continue; 357 + } 358 + 359 + if (strcmp(match->data, match_node_tests[i].data) != 0) { 360 + selftest(0, "%s got wrong match. expected %s, got %s\n", 361 + match_node_tests[i].path, match_node_tests[i].data, 362 + (const char *)match->data); 363 + continue; 364 + } 365 + selftest(1, "passed"); 366 + } 367 + } 368 + 303 369 static int __init of_selftest(void) 304 370 { 305 371 struct device_node *np; ··· 382 316 of_selftest_property_match_string(); 383 317 of_selftest_parse_interrupts(); 384 318 of_selftest_parse_interrupts_extended(); 319 + of_selftest_match_node(); 385 320 pr_info("end of selftest - %i passed, %i failed\n", 386 321 selftest_results.passed, selftest_results.failed); 387 322 return 0;
+3
drivers/of/testcase-data/testcases.dtsi
··· 1 + #include "tests-phandle.dtsi" 2 + #include "tests-interrupts.dtsi" 3 + #include "tests-match.dtsi"
+19
drivers/of/testcase-data/tests-match.dtsi
··· 1 + 2 + / { 3 + testcase-data { 4 + match-node { 5 + name0 { }; 6 + name1 { device_type = "type1"; }; 7 + a { name2 { device_type = "type1"; }; }; 8 + b { name2 { }; }; 9 + c { name2 { device_type = "type2"; }; }; 10 + name3 { compatible = "compat3"; }; 11 + name4 { compatible = "compat2", "compat3"; }; 12 + name5 { compatible = "compat2", "compat3"; }; 13 + name6 { compatible = "compat1", "compat2", "compat3"; }; 14 + name7 { compatible = "compat2"; device_type = "type1"; }; 15 + name8 { compatible = "compat2"; device_type = "type1"; }; 16 + name9 { compatible = "compat2"; }; 17 + }; 18 + }; 19 + };
+2 -9
drivers/pci/host/pci-mvebu.c
··· 60 60 #define PCIE_DEBUG_CTRL 0x1a60 61 61 #define PCIE_DEBUG_SOFT_RESET BIT(20) 62 62 63 - /* 64 - * This product ID is registered by Marvell, and used when the Marvell 65 - * SoC is not the root complex, but an endpoint on the PCIe bus. It is 66 - * therefore safe to re-use this PCI ID for our emulated PCI-to-PCI 67 - * bridge. 68 - */ 69 - #define MARVELL_EMULATED_PCI_PCI_BRIDGE_ID 0x7846 70 - 71 63 /* PCI configuration space of a PCI-to-PCI bridge */ 72 64 struct mvebu_sw_pci_bridge { 73 65 u16 vendor; ··· 380 388 381 389 bridge->class = PCI_CLASS_BRIDGE_PCI; 382 390 bridge->vendor = PCI_VENDOR_ID_MARVELL; 383 - bridge->device = MARVELL_EMULATED_PCI_PCI_BRIDGE_ID; 391 + bridge->device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16; 392 + bridge->revision = mvebu_readl(port, PCIE_DEV_REV_OFF) & 0xff; 384 393 bridge->header_type = PCI_HEADER_TYPE_BRIDGE; 385 394 bridge->cache_line_size = 0x10; 386 395
+9 -3
drivers/pci/msi.c
··· 545 545 return -ENOMEM; 546 546 list_for_each_entry(entry, &pdev->msi_list, list) { 547 547 char *name = kmalloc(20, GFP_KERNEL); 548 - msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL); 549 - if (!msi_dev_attr) 548 + if (!name) 550 549 goto error_attrs; 550 + 551 + msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL); 552 + if (!msi_dev_attr) { 553 + kfree(name); 554 + goto error_attrs; 555 + } 556 + 551 557 sprintf(name, "%d", entry->irq); 552 558 sysfs_attr_init(&msi_dev_attr->attr); 553 559 msi_dev_attr->attr.name = name; ··· 595 589 ++count; 596 590 msi_attr = msi_attrs[count]; 597 591 } 592 + kfree(msi_attrs); 598 593 return ret; 599 594 } 600 595 ··· 966 959 /** 967 960 * pci_msix_vec_count - return the number of device's MSI-X table entries 968 961 * @dev: pointer to the pci_dev data structure of MSI-X device function 969 - 970 962 * This function returns the number of device's MSI-X table entries and 971 963 * therefore the number of MSI-X vectors device is capable of sending. 972 964 * It returns a negative errno if the device is not capable of sending MSI-X
+10
drivers/pci/pci.c
··· 1181 1181 static int do_pci_enable_device(struct pci_dev *dev, int bars) 1182 1182 { 1183 1183 int err; 1184 + u16 cmd; 1185 + u8 pin; 1184 1186 1185 1187 err = pci_set_power_state(dev, PCI_D0); 1186 1188 if (err < 0 && err != -EIO) ··· 1191 1189 if (err < 0) 1192 1190 return err; 1193 1191 pci_fixup_device(pci_fixup_enable, dev); 1192 + 1193 + pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); 1194 + if (pin) { 1195 + pci_read_config_word(dev, PCI_COMMAND, &cmd); 1196 + if (cmd & PCI_COMMAND_INTX_DISABLE) 1197 + pci_write_config_word(dev, PCI_COMMAND, 1198 + cmd & ~PCI_COMMAND_INTX_DISABLE); 1199 + } 1194 1200 1195 1201 return 0; 1196 1202 }
+2 -1
drivers/phy/Kconfig
··· 5 5 menu "PHY Subsystem" 6 6 7 7 config GENERIC_PHY 8 - tristate "PHY Core" 8 + bool "PHY Core" 9 9 help 10 10 Generic PHY support. 11 11 ··· 61 61 config BCM_KONA_USB2_PHY 62 62 tristate "Broadcom Kona USB2 PHY Driver" 63 63 depends on GENERIC_PHY 64 + depends on HAS_IOMEM 64 65 help 65 66 Enable this to support the Broadcom Kona USB 2.0 PHY. 66 67
+6 -8
drivers/phy/phy-core.c
··· 176 176 dev_err(&phy->dev, "phy init failed --> %d\n", ret); 177 177 goto out; 178 178 } 179 + } else { 180 + ret = 0; /* Override possible ret == -ENOTSUPP */ 179 181 } 180 182 ++phy->init_count; 181 183 ··· 234 232 dev_err(&phy->dev, "phy poweron failed --> %d\n", ret); 235 233 goto out; 236 234 } 235 + } else { 236 + ret = 0; /* Override possible ret == -ENOTSUPP */ 237 237 } 238 238 ++phy->power_count; 239 239 mutex_unlock(&phy->mutex); ··· 408 404 index = of_property_match_string(dev->of_node, "phy-names", 409 405 string); 410 406 phy = of_phy_get(dev, index); 411 - if (IS_ERR(phy)) { 412 - dev_err(dev, "unable to find phy\n"); 413 - return phy; 414 - } 415 407 } else { 416 408 phy = phy_lookup(dev, string); 417 - if (IS_ERR(phy)) { 418 - dev_err(dev, "unable to find phy\n"); 419 - return phy; 420 - } 421 409 } 410 + if (IS_ERR(phy)) 411 + return phy; 422 412 423 413 if (!try_module_get(phy->ops->owner)) 424 414 return ERR_PTR(-EPROBE_DEFER);
+4 -4
drivers/phy/phy-exynos-dp-video.c
··· 76 76 if (IS_ERR(state->regs)) 77 77 return PTR_ERR(state->regs); 78 78 79 - phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); 80 - if (IS_ERR(phy_provider)) 81 - return PTR_ERR(phy_provider); 82 - 83 79 phy = devm_phy_create(dev, &exynos_dp_video_phy_ops, NULL); 84 80 if (IS_ERR(phy)) { 85 81 dev_err(dev, "failed to create Display Port PHY\n"); 86 82 return PTR_ERR(phy); 87 83 } 88 84 phy_set_drvdata(phy, state); 85 + 86 + phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); 87 + if (IS_ERR(phy_provider)) 88 + return PTR_ERR(phy_provider); 89 89 90 90 return 0; 91 91 }
+5 -5
drivers/phy/phy-exynos-mipi-video.c
··· 134 134 dev_set_drvdata(dev, state); 135 135 spin_lock_init(&state->slock); 136 136 137 - phy_provider = devm_of_phy_provider_register(dev, 138 - exynos_mipi_video_phy_xlate); 139 - if (IS_ERR(phy_provider)) 140 - return PTR_ERR(phy_provider); 141 - 142 137 for (i = 0; i < EXYNOS_MIPI_PHYS_NUM; i++) { 143 138 struct phy *phy = devm_phy_create(dev, 144 139 &exynos_mipi_video_phy_ops, NULL); ··· 146 151 state->phys[i].index = i; 147 152 phy_set_drvdata(phy, &state->phys[i]); 148 153 } 154 + 155 + phy_provider = devm_of_phy_provider_register(dev, 156 + exynos_mipi_video_phy_xlate); 157 + if (IS_ERR(phy_provider)) 158 + return PTR_ERR(phy_provider); 149 159 150 160 return 0; 151 161 }
+5 -5
drivers/phy/phy-mvebu-sata.c
··· 99 99 if (IS_ERR(priv->clk)) 100 100 return PTR_ERR(priv->clk); 101 101 102 - phy_provider = devm_of_phy_provider_register(&pdev->dev, 103 - of_phy_simple_xlate); 104 - if (IS_ERR(phy_provider)) 105 - return PTR_ERR(phy_provider); 106 - 107 102 phy = devm_phy_create(&pdev->dev, &phy_mvebu_sata_ops, NULL); 108 103 if (IS_ERR(phy)) 109 104 return PTR_ERR(phy); 110 105 111 106 phy_set_drvdata(phy, priv); 107 + 108 + phy_provider = devm_of_phy_provider_register(&pdev->dev, 109 + of_phy_simple_xlate); 110 + if (IS_ERR(phy_provider)) 111 + return PTR_ERR(phy_provider); 112 112 113 113 /* The boot loader may of left it on. Turn it off. */ 114 114 phy_mvebu_sata_power_off(phy);
+5 -5
drivers/phy/phy-omap-usb2.c
··· 177 177 phy->phy.otg = otg; 178 178 phy->phy.type = USB_PHY_TYPE_USB2; 179 179 180 - phy_provider = devm_of_phy_provider_register(phy->dev, 181 - of_phy_simple_xlate); 182 - if (IS_ERR(phy_provider)) 183 - return PTR_ERR(phy_provider); 184 - 185 180 control_node = of_parse_phandle(node, "ctrl-module", 0); 186 181 if (!control_node) { 187 182 dev_err(&pdev->dev, "Failed to get control device phandle\n"); ··· 208 213 return PTR_ERR(generic_phy); 209 214 210 215 phy_set_drvdata(generic_phy, phy); 216 + 217 + phy_provider = devm_of_phy_provider_register(phy->dev, 218 + of_phy_simple_xlate); 219 + if (IS_ERR(phy_provider)) 220 + return PTR_ERR(phy_provider); 211 221 212 222 phy->wkupclk = devm_clk_get(phy->dev, "usb_phy_cm_clk32k"); 213 223 if (IS_ERR(phy->wkupclk)) {
+5 -5
drivers/phy/phy-twl4030-usb.c
··· 695 695 otg->set_host = twl4030_set_host; 696 696 otg->set_peripheral = twl4030_set_peripheral; 697 697 698 - phy_provider = devm_of_phy_provider_register(twl->dev, 699 - of_phy_simple_xlate); 700 - if (IS_ERR(phy_provider)) 701 - return PTR_ERR(phy_provider); 702 - 703 698 phy = devm_phy_create(twl->dev, &ops, init_data); 704 699 if (IS_ERR(phy)) { 705 700 dev_dbg(&pdev->dev, "Failed to create PHY\n"); ··· 702 707 } 703 708 704 709 phy_set_drvdata(phy, twl); 710 + 711 + phy_provider = devm_of_phy_provider_register(twl->dev, 712 + of_phy_simple_xlate); 713 + if (IS_ERR(phy_provider)) 714 + return PTR_ERR(phy_provider); 705 715 706 716 /* init spinlock for workqueue */ 707 717 spin_lock_init(&twl->lock);
+1 -1
drivers/pinctrl/Kconfig
··· 217 217 select PINCTRL_MXS 218 218 219 219 config PINCTRL_MSM 220 - tristate 220 + bool 221 221 select PINMUX 222 222 select PINCONF 223 223 select GENERIC_PINCONF
+1 -1
drivers/pinctrl/pinctrl-capri.c
··· 1435 1435 } 1436 1436 1437 1437 static struct of_device_id capri_pinctrl_of_match[] = { 1438 - { .compatible = "brcm,capri-pinctrl", }, 1438 + { .compatible = "brcm,bcm11351-pinctrl", }, 1439 1439 { }, 1440 1440 }; 1441 1441
+5 -1
drivers/pinctrl/pinctrl-sunxi.c
··· 14 14 #include <linux/clk.h> 15 15 #include <linux/gpio.h> 16 16 #include <linux/irqdomain.h> 17 + #include <linux/irqchip/chained_irq.h> 17 18 #include <linux/module.h> 18 19 #include <linux/of.h> 19 20 #include <linux/of_address.h> ··· 585 584 spin_lock_irqsave(&pctl->lock, flags); 586 585 587 586 regval = readl(pctl->membase + reg); 588 - regval &= ~IRQ_CFG_IRQ_MASK; 587 + regval &= ~(IRQ_CFG_IRQ_MASK << index); 589 588 writel(regval | (mode << index), pctl->membase + reg); 590 589 591 590 spin_unlock_irqrestore(&pctl->lock, flags); ··· 666 665 667 666 static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc) 668 667 { 668 + struct irq_chip *chip = irq_get_chip(irq); 669 669 struct sunxi_pinctrl *pctl = irq_get_handler_data(irq); 670 670 const unsigned long reg = readl(pctl->membase + IRQ_STATUS_REG); 671 671 ··· 676 674 if (reg) { 677 675 int irqoffset; 678 676 677 + chained_irq_enter(chip, desc); 679 678 for_each_set_bit(irqoffset, &reg, SUNXI_IRQ_NUMBER) { 680 679 int pin_irq = irq_find_mapping(pctl->domain, irqoffset); 681 680 generic_handle_irq(pin_irq); 682 681 } 682 + chained_irq_exit(chip, desc); 683 683 } 684 684 } 685 685
+3 -3
drivers/pinctrl/pinctrl-sunxi.h
··· 511 511 512 512 static inline u32 sunxi_irq_cfg_reg(u16 irq) 513 513 { 514 - u8 reg = irq / IRQ_CFG_IRQ_PER_REG; 514 + u8 reg = irq / IRQ_CFG_IRQ_PER_REG * 0x04; 515 515 return reg + IRQ_CFG_REG; 516 516 } 517 517 ··· 523 523 524 524 static inline u32 sunxi_irq_ctrl_reg(u16 irq) 525 525 { 526 - u8 reg = irq / IRQ_CTRL_IRQ_PER_REG; 526 + u8 reg = irq / IRQ_CTRL_IRQ_PER_REG * 0x04; 527 527 return reg + IRQ_CTRL_REG; 528 528 } 529 529 ··· 535 535 536 536 static inline u32 sunxi_irq_status_reg(u16 irq) 537 537 { 538 - u8 reg = irq / IRQ_STATUS_IRQ_PER_REG; 538 + u8 reg = irq / IRQ_STATUS_IRQ_PER_REG * 0x04; 539 539 return reg + IRQ_STATUS_REG; 540 540 } 541 541
+4 -2
drivers/pinctrl/sh-pfc/pfc-r8a7791.c
··· 89 89 90 90 /* GPSR6 */ 91 91 FN_IP13_10, FN_IP13_11, FN_IP13_12, FN_IP13_13, FN_IP13_14, 92 - FN_IP13_15, FN_IP13_18_16, FN_IP13_21_19, FN_IP13_22, FN_IP13_24_23, 92 + FN_IP13_15, FN_IP13_18_16, FN_IP13_21_19, 93 + FN_IP13_22, FN_IP13_24_23, FN_SD1_CLK, 93 94 FN_IP13_25, FN_IP13_26, FN_IP13_27, FN_IP13_30_28, FN_IP14_1_0, 94 95 FN_IP14_2, FN_IP14_3, FN_IP14_4, FN_IP14_5, FN_IP14_6, FN_IP14_7, 95 96 FN_IP14_10_8, FN_IP14_13_11, FN_IP14_16_14, FN_IP14_19_17, ··· 789 788 PINMUX_DATA(USB1_PWEN_MARK, FN_USB1_PWEN), 790 789 PINMUX_DATA(USB1_OVC_MARK, FN_USB1_OVC), 791 790 PINMUX_DATA(DU0_DOTCLKIN_MARK, FN_DU0_DOTCLKIN), 791 + PINMUX_DATA(SD1_CLK_MARK, FN_SD1_CLK), 792 792 793 793 /* IPSR0 */ 794 794 PINMUX_IPSR_DATA(IP0_0, D0), ··· 3827 3825 GP_6_11_FN, FN_IP13_25, 3828 3826 GP_6_10_FN, FN_IP13_24_23, 3829 3827 GP_6_9_FN, FN_IP13_22, 3830 - 0, 0, 3828 + GP_6_8_FN, FN_SD1_CLK, 3831 3829 GP_6_7_FN, FN_IP13_21_19, 3832 3830 GP_6_6_FN, FN_IP13_18_16, 3833 3831 GP_6_5_FN, FN_IP13_15,
+2 -2
drivers/pinctrl/sirf/pinctrl-sirf.c
··· 598 598 { 599 599 struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d); 600 600 601 - if (gpio_lock_as_irq(&bank->chip.gc, d->hwirq)) 601 + if (gpio_lock_as_irq(&bank->chip.gc, d->hwirq % SIRFSOC_GPIO_BANK_SIZE)) 602 602 dev_err(bank->chip.gc.dev, 603 603 "unable to lock HW IRQ %lu for IRQ\n", 604 604 d->hwirq); ··· 611 611 struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d); 612 612 613 613 sirfsoc_gpio_irq_mask(d); 614 - gpio_unlock_as_irq(&bank->chip.gc, d->hwirq); 614 + gpio_unlock_as_irq(&bank->chip.gc, d->hwirq % SIRFSOC_GPIO_BANK_SIZE); 615 615 } 616 616 617 617 static struct irq_chip sirfsoc_irq_chip = {
+3 -1
drivers/pwm/pwm-lp3943.c
··· 52 52 offset = pwm_map->output[i]; 53 53 54 54 /* Return an error if the pin is already assigned */ 55 - if (test_and_set_bit(offset, &lp3943->pin_used)) 55 + if (test_and_set_bit(offset, &lp3943->pin_used)) { 56 + kfree(pwm_map); 56 57 return ERR_PTR(-EBUSY); 58 + } 57 59 } 58 60 59 61 return pwm_map;
+1
drivers/rapidio/devices/tsi721.h
··· 678 678 struct list_head free_list; 679 679 dma_cookie_t completed_cookie; 680 680 struct tasklet_struct tasklet; 681 + bool active; 681 682 }; 682 683 683 684 #endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+18 -9
drivers/rapidio/devices/tsi721_dma.c
··· 206 206 { 207 207 /* Disable BDMA channel interrupts */ 208 208 iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); 209 - 210 - tasklet_schedule(&bdma_chan->tasklet); 209 + if (bdma_chan->active) 210 + tasklet_schedule(&bdma_chan->tasklet); 211 211 } 212 212 213 213 #ifdef CONFIG_PCI_MSI ··· 562 562 } 563 563 #endif /* CONFIG_PCI_MSI */ 564 564 565 - tasklet_enable(&bdma_chan->tasklet); 565 + bdma_chan->active = true; 566 566 tsi721_bdma_interrupt_enable(bdma_chan, 1); 567 567 568 568 return bdma_chan->bd_num - 1; ··· 576 576 static void tsi721_free_chan_resources(struct dma_chan *dchan) 577 577 { 578 578 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); 579 - #ifdef CONFIG_PCI_MSI 580 579 struct tsi721_device *priv = to_tsi721(dchan->device); 581 - #endif 582 580 LIST_HEAD(list); 583 581 584 582 dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); ··· 587 589 BUG_ON(!list_empty(&bdma_chan->active_list)); 588 590 BUG_ON(!list_empty(&bdma_chan->queue)); 589 591 590 - tasklet_disable(&bdma_chan->tasklet); 592 + tsi721_bdma_interrupt_enable(bdma_chan, 0); 593 + bdma_chan->active = false; 594 + 595 + #ifdef CONFIG_PCI_MSI 596 + if (priv->flags & TSI721_USING_MSIX) { 597 + synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE + 598 + bdma_chan->id].vector); 599 + synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT + 600 + bdma_chan->id].vector); 601 + } else 602 + #endif 603 + synchronize_irq(priv->pdev->irq); 604 + 605 + tasklet_kill(&bdma_chan->tasklet); 591 606 592 607 spin_lock_bh(&bdma_chan->lock); 593 608 list_splice_init(&bdma_chan->free_list, &list); 594 609 spin_unlock_bh(&bdma_chan->lock); 595 - 596 - tsi721_bdma_interrupt_enable(bdma_chan, 0); 597 610 598 611 #ifdef CONFIG_PCI_MSI 599 612 if (priv->flags & TSI721_USING_MSIX) { ··· 799 790 bdma_chan->dchan.cookie = 1; 800 791 bdma_chan->dchan.chan_id = i; 801 792 bdma_chan->id = i; 793 + bdma_chan->active = false; 802 794 803 795 spin_lock_init(&bdma_chan->lock); 804 796 ··· 809 799 810 800 tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet, 811 801 (unsigned long)bdma_chan); 812 - tasklet_disable(&bdma_chan->tasklet); 813 802 list_add_tail(&bdma_chan->dchan.device_node, 814 803 &mport->dma.channels); 815 804 }
+21 -29
drivers/regulator/core.c
··· 953 953 return 0; 954 954 } 955 955 956 + static int _regulator_do_enable(struct regulator_dev *rdev); 957 + 956 958 /** 957 959 * set_machine_constraints - sets regulator constraints 958 960 * @rdev: regulator source ··· 1015 1013 /* If the constraints say the regulator should be on at this point 1016 1014 * and we have control then make sure it is enabled. 1017 1015 */ 1018 - if ((rdev->constraints->always_on || rdev->constraints->boot_on) && 1019 - ops->enable) { 1020 - ret = ops->enable(rdev); 1021 - if (ret < 0) { 1016 + if (rdev->constraints->always_on || rdev->constraints->boot_on) { 1017 + ret = _regulator_do_enable(rdev); 1018 + if (ret < 0 && ret != -EINVAL) { 1022 1019 rdev_err(rdev, "failed to enable\n"); 1023 1020 goto out; 1024 1021 } ··· 1360 1359 goto found; 1361 1360 /* Don't log an error when called from regulator_get_optional() */ 1362 1361 } else if (!have_full_constraints() || exclusive) { 1363 - dev_err(dev, "dummy supplies not allowed\n"); 1362 + dev_warn(dev, "dummy supplies not allowed\n"); 1364 1363 } 1365 1364 1366 1365 mutex_unlock(&regulator_list_mutex); ··· 1908 1907 1909 1908 trace_regulator_disable_complete(rdev_get_name(rdev)); 1910 1909 1911 - _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE, 1912 - NULL); 1913 1910 return 0; 1914 1911 } 1915 1912 ··· 1931 1932 rdev_err(rdev, "failed to disable\n"); 1932 1933 return ret; 1933 1934 } 1935 + _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE, 1936 + NULL); 1934 1937 } 1935 1938 1936 1939 rdev->use_count = 0; ··· 1985 1984 { 1986 1985 int ret = 0; 1987 1986 1988 - /* force disable */ 1989 - if (rdev->desc->ops->disable) { 1990 - /* ah well, who wants to live forever... */ 1991 - ret = rdev->desc->ops->disable(rdev); 1992 - if (ret < 0) { 1993 - rdev_err(rdev, "failed to force disable\n"); 1994 - return ret; 1995 - } 1996 - /* notify other consumers that power has been forced off */ 1997 - _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE | 1998 - REGULATOR_EVENT_DISABLE, NULL); 1987 + ret = _regulator_do_disable(rdev); 1988 + if (ret < 0) { 1989 + rdev_err(rdev, "failed to force disable\n"); 1990 + return ret; 1999 1991 } 2000 1992 2001 - return ret; 1993 + _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE | 1994 + REGULATOR_EVENT_DISABLE, NULL); 1995 + 1996 + return 0; 2002 1997 } 2003 1998 2004 1999 /** ··· 3627 3630 3628 3631 mutex_lock(&regulator_list_mutex); 3629 3632 list_for_each_entry(rdev, &regulator_list, list) { 3630 - struct regulator_ops *ops = rdev->desc->ops; 3631 - 3632 3633 mutex_lock(&rdev->mutex); 3633 - if ((rdev->use_count > 0 || rdev->constraints->always_on) && 3634 - ops->enable) { 3635 - error = ops->enable(rdev); 3634 + if (rdev->use_count > 0 || rdev->constraints->always_on) { 3635 + error = _regulator_do_enable(rdev); 3636 3636 if (error) 3637 3637 ret = error; 3638 3638 } else { 3639 3639 if (!have_full_constraints()) 3640 3640 goto unlock; 3641 - if (!ops->disable) 3642 - goto unlock; 3643 3641 if (!_regulator_is_enabled(rdev)) 3644 3642 goto unlock; 3645 3643 3646 - error = ops->disable(rdev); 3644 + error = _regulator_do_disable(rdev); 3647 3645 if (error) 3648 3646 ret = error; 3649 3647 } ··· 3812 3820 ops = rdev->desc->ops; 3813 3821 c = rdev->constraints; 3814 3822 3815 - if (!ops->disable || (c && c->always_on)) 3823 + if (c && c->always_on) 3816 3824 continue; 3817 3825 3818 3826 mutex_lock(&rdev->mutex); ··· 3833 3841 /* We log since this may kill the system if it 3834 3842 * goes wrong. */ 3835 3843 rdev_info(rdev, "disabling\n"); 3836 - ret = ops->disable(rdev); 3844 + ret = _regulator_do_disable(rdev); 3837 3845 if (ret != 0) 3838 3846 rdev_err(rdev, "couldn't disable: %d\n", ret); 3839 3847 } else {
+3 -1
drivers/regulator/da9063-regulator.c
··· 1 + 1 2 /* 2 3 * Regulator driver for DA9063 PMIC series 3 4 * ··· 61 60 .desc.ops = &da9063_ldo_ops, \ 62 61 .desc.min_uV = (min_mV) * 1000, \ 63 62 .desc.uV_step = (step_mV) * 1000, \ 64 - .desc.n_voltages = (((max_mV) - (min_mV))/(step_mV) + 1), \ 63 + .desc.n_voltages = (((max_mV) - (min_mV))/(step_mV) + 1 \ 64 + + (DA9063_V##regl_name##_BIAS)), \ 65 65 .desc.enable_reg = DA9063_REG_##regl_name##_CONT, \ 66 66 .desc.enable_mask = DA9063_LDO_EN, \ 67 67 .desc.vsel_reg = DA9063_REG_V##regl_name##_A, \
+3 -2
drivers/regulator/max14577.c
··· 166 166 167 167 ret = of_regulator_match(&pdev->dev, np, max14577_regulator_matches, 168 168 MAX14577_REG_MAX); 169 - if (ret < 0) { 169 + if (ret < 0) 170 170 dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret); 171 - } 171 + else 172 + ret = 0; 172 173 173 174 of_node_put(np); 174 175
+3 -1
drivers/regulator/s5m8767.c
··· 535 535 return -ENODEV; 536 536 } 537 537 538 - regulators_np = of_find_node_by_name(pmic_np, "regulators"); 538 + regulators_np = of_get_child_by_name(pmic_np, "regulators"); 539 539 if (!regulators_np) { 540 540 dev_err(iodev->dev, "could not find regulators sub-node\n"); 541 541 return -EINVAL; ··· 590 590 } 591 591 rmode++; 592 592 } 593 + 594 + of_node_put(regulators_np); 593 595 594 596 if (of_get_property(pmic_np, "s5m8767,pmic-buck2-uses-gpio-dvs", NULL)) { 595 597 pdata->buck2_gpiodvs = true;
+12 -5
drivers/rtc/rtc-s3c.c
··· 580 580 581 581 clk_enable(rtc_clk); 582 582 /* save TICNT for anyone using periodic interrupts */ 583 - ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT); 584 583 if (s3c_rtc_cpu_type == TYPE_S3C64XX) { 585 584 ticnt_en_save = readw(s3c_rtc_base + S3C2410_RTCCON); 586 585 ticnt_en_save &= S3C64XX_RTCCON_TICEN; 586 + ticnt_save = readl(s3c_rtc_base + S3C2410_TICNT); 587 + } else { 588 + ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT); 587 589 } 588 590 s3c_rtc_enable(pdev, 0); 589 591 ··· 607 605 608 606 clk_enable(rtc_clk); 609 607 s3c_rtc_enable(pdev, 1); 610 - writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT); 611 - if (s3c_rtc_cpu_type == TYPE_S3C64XX && ticnt_en_save) { 612 - tmp = readw(s3c_rtc_base + S3C2410_RTCCON); 613 - writew(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON); 608 + if (s3c_rtc_cpu_type == TYPE_S3C64XX) { 609 + writel(ticnt_save, s3c_rtc_base + S3C2410_TICNT); 610 + if (ticnt_en_save) { 611 + tmp = readw(s3c_rtc_base + S3C2410_RTCCON); 612 + writew(tmp | ticnt_en_save, 613 + s3c_rtc_base + S3C2410_RTCCON); 614 + } 615 + } else { 616 + writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT); 614 617 } 615 618 616 619 if (device_may_wakeup(dev) && wake_en) {
+1
drivers/s390/cio/chsc.c
··· 610 610 css_wait_for_slow_path(); 611 611 for_each_subchannel_staged(__s390_process_res_acc, NULL, 612 612 &link); 613 + css_schedule_reprobe(); 613 614 } 614 615 } 615 616
+23 -1
drivers/s390/crypto/zcrypt_msgtype6.c
··· 311 311 } __packed * msg = ap_msg->message; 312 312 313 313 int rcblen = CEIL4(xcRB->request_control_blk_length); 314 - int replylen; 314 + int replylen, req_sumlen, resp_sumlen; 315 315 char *req_data = ap_msg->message + sizeof(struct type6_hdr) + rcblen; 316 316 char *function_code; 317 317 ··· 321 321 xcRB->request_data_length; 322 322 if (ap_msg->length > MSGTYPE06_MAX_MSG_SIZE) 323 323 return -EINVAL; 324 + 325 + /* Overflow check 326 + sum must be greater (or equal) than the largest operand */ 327 + req_sumlen = CEIL4(xcRB->request_control_blk_length) + 328 + xcRB->request_data_length; 329 + if ((CEIL4(xcRB->request_control_blk_length) <= 330 + xcRB->request_data_length) ? 331 + (req_sumlen < xcRB->request_data_length) : 332 + (req_sumlen < CEIL4(xcRB->request_control_blk_length))) { 333 + return -EINVAL; 334 + } 335 + 324 336 replylen = sizeof(struct type86_fmt2_msg) + 325 337 CEIL4(xcRB->reply_control_blk_length) + 326 338 xcRB->reply_data_length; 327 339 if (replylen > MSGTYPE06_MAX_MSG_SIZE) 328 340 return -EINVAL; 341 + 342 + /* Overflow check 343 + sum must be greater (or equal) than the largest operand */ 344 + resp_sumlen = CEIL4(xcRB->reply_control_blk_length) + 345 + xcRB->reply_data_length; 346 + if ((CEIL4(xcRB->reply_control_blk_length) <= xcRB->reply_data_length) ? 347 + (resp_sumlen < xcRB->reply_data_length) : 348 + (resp_sumlen < CEIL4(xcRB->reply_control_blk_length))) { 349 + return -EINVAL; 350 + } 329 351 330 352 /* prepare type6 header */ 331 353 msg->hdr = static_type6_hdrX;
+3 -2
drivers/s390/net/qeth_core_main.c
··· 1660 1660 QDIO_FLAG_CLEANUP_USING_CLEAR); 1661 1661 if (rc) 1662 1662 QETH_CARD_TEXT_(card, 3, "1err%d", rc); 1663 - qdio_free(CARD_DDEV(card)); 1664 1663 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 1665 1664 break; 1666 1665 case QETH_QDIO_CLEANING: ··· 2604 2605 return 0; 2605 2606 out_qdio: 2606 2607 qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); 2608 + qdio_free(CARD_DDEV(card)); 2607 2609 return rc; 2608 2610 } 2609 2611 ··· 4906 4906 if (retries < 3) 4907 4907 QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", 4908 4908 dev_name(&card->gdev->dev)); 4909 + rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); 4909 4910 ccw_device_set_offline(CARD_DDEV(card)); 4910 4911 ccw_device_set_offline(CARD_WDEV(card)); 4911 4912 ccw_device_set_offline(CARD_RDEV(card)); 4913 + qdio_free(CARD_DDEV(card)); 4912 4914 rc = ccw_device_set_online(CARD_RDEV(card)); 4913 4915 if (rc) 4914 4916 goto retriable; ··· 4920 4918 rc = ccw_device_set_online(CARD_DDEV(card)); 4921 4919 if (rc) 4922 4920 goto retriable; 4923 - rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); 4924 4921 retriable: 4925 4922 if (rc == -ERESTARTSYS) { 4926 4923 QETH_DBF_TEXT(SETUP, 2, "break1");
+3
drivers/s390/net/qeth_l2_main.c
··· 1091 1091 ccw_device_set_offline(CARD_DDEV(card)); 1092 1092 ccw_device_set_offline(CARD_WDEV(card)); 1093 1093 ccw_device_set_offline(CARD_RDEV(card)); 1094 + qdio_free(CARD_DDEV(card)); 1094 1095 if (recover_flag == CARD_STATE_RECOVER) 1095 1096 card->state = CARD_STATE_RECOVER; 1096 1097 else ··· 1133 1132 rc = (rc2) ? rc2 : rc3; 1134 1133 if (rc) 1135 1134 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 1135 + qdio_free(CARD_DDEV(card)); 1136 1136 if (recover_flag == CARD_STATE_UP) 1137 1137 card->state = CARD_STATE_RECOVER; 1138 1138 /* let user_space know that device is offline */ ··· 1196 1194 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); 1197 1195 qeth_qdio_clear_card(card, 0); 1198 1196 qeth_clear_qdio_buffers(card); 1197 + qdio_free(CARD_DDEV(card)); 1199 1198 } 1200 1199 1201 1200 static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev)
+3
drivers/s390/net/qeth_l3_main.c
··· 3447 3447 ccw_device_set_offline(CARD_DDEV(card)); 3448 3448 ccw_device_set_offline(CARD_WDEV(card)); 3449 3449 ccw_device_set_offline(CARD_RDEV(card)); 3450 + qdio_free(CARD_DDEV(card)); 3450 3451 if (recover_flag == CARD_STATE_RECOVER) 3451 3452 card->state = CARD_STATE_RECOVER; 3452 3453 else ··· 3494 3493 rc = (rc2) ? rc2 : rc3; 3495 3494 if (rc) 3496 3495 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 3496 + qdio_free(CARD_DDEV(card)); 3497 3497 if (recover_flag == CARD_STATE_UP) 3498 3498 card->state = CARD_STATE_RECOVER; 3499 3499 /* let user_space know that device is offline */ ··· 3547 3545 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); 3548 3546 qeth_qdio_clear_card(card, 0); 3549 3547 qeth_clear_qdio_buffers(card); 3548 + qdio_free(CARD_DDEV(card)); 3550 3549 } 3551 3550 3552 3551 static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev)
-1
drivers/sbus/char/jsflash.c
··· 507 507 } 508 508 509 509 /* Let us be really paranoid for modifications to probing code. */ 510 - /* extern enum sparc_cpu sparc_cpu_model; */ /* in <asm/system.h> */ 511 510 if (sparc_cpu_model != sun4m) { 512 511 /* We must be on sun4m because we use MMU Bypass ASI. */ 513 512 return -ENXIO;
+36 -6
drivers/scsi/qla2xxx/qla_target.c
··· 790 790 } 791 791 792 792 /* Called by tcm_qla2xxx configfs code */ 793 - void qlt_stop_phase1(struct qla_tgt *tgt) 793 + int qlt_stop_phase1(struct qla_tgt *tgt) 794 794 { 795 795 struct scsi_qla_host *vha = tgt->vha; 796 796 struct qla_hw_data *ha = tgt->ha; 797 797 unsigned long flags; 798 798 799 + mutex_lock(&qla_tgt_mutex); 800 + if (!vha->fc_vport) { 801 + struct Scsi_Host *sh = vha->host; 802 + struct fc_host_attrs *fc_host = shost_to_fc_host(sh); 803 + bool npiv_vports; 804 + 805 + spin_lock_irqsave(sh->host_lock, flags); 806 + npiv_vports = (fc_host->npiv_vports_inuse); 807 + spin_unlock_irqrestore(sh->host_lock, flags); 808 + 809 + if (npiv_vports) { 810 + mutex_unlock(&qla_tgt_mutex); 811 + return -EPERM; 812 + } 813 + } 799 814 if (tgt->tgt_stop || tgt->tgt_stopped) { 800 815 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e, 801 816 "Already in tgt->tgt_stop or tgt_stopped state\n"); 802 - dump_stack(); 803 - return; 817 + mutex_unlock(&qla_tgt_mutex); 818 + return -EPERM; 804 819 } 805 820 806 821 ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n", ··· 830 815 qlt_clear_tgt_db(tgt, true); 831 816 spin_unlock_irqrestore(&ha->hardware_lock, flags); 832 817 mutex_unlock(&vha->vha_tgt.tgt_mutex); 818 + mutex_unlock(&qla_tgt_mutex); 833 819 834 820 flush_delayed_work(&tgt->sess_del_work); 835 821 ··· 857 841 858 842 /* Wait for sessions to clear out (just in case) */ 859 843 wait_event(tgt->waitQ, test_tgt_sess_count(tgt)); 844 + return 0; 860 845 } 861 846 EXPORT_SYMBOL(qlt_stop_phase1); 862 847 ··· 3202 3185 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c, 3203 3186 "SRR cmd %p (se_cmd %p, tag %d, op %x), " 3204 3187 "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag, 3205 - se_cmd->t_task_cdb[0], cmd->sg_cnt, cmd->offset); 3188 + se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, 3189 + cmd->sg_cnt, cmd->offset); 3206 3190 3207 3191 qlt_handle_srr(vha, sctio, imm); 3208 3192 ··· 4199 4181 tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX; 4200 4182 tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX; 4201 4183 4184 + if (base_vha->fc_vport) 4185 + return 0; 4186 + 4202 4187 mutex_lock(&qla_tgt_mutex); 4203 4188 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist); 4204 4189 mutex_unlock(&qla_tgt_mutex); ··· 4215 4194 if (!vha->vha_tgt.qla_tgt) 4216 4195 return 0; 4217 4196 4197 + if (vha->fc_vport) { 4198 + qlt_release(vha->vha_tgt.qla_tgt); 4199 + return 0; 4200 + } 4218 4201 mutex_lock(&qla_tgt_mutex); 4219 4202 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry); 4220 4203 mutex_unlock(&qla_tgt_mutex); ··· 4290 4265 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4291 4266 continue; 4292 4267 } 4268 + if (tgt->tgt_stop) { 4269 + pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n", 4270 + host->host_no); 4271 + spin_unlock_irqrestore(&ha->hardware_lock, flags); 4272 + continue; 4273 + } 4293 4274 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4294 4275 4295 4276 if (!scsi_host_get(host)) { ··· 4310 4279 scsi_host_put(host); 4311 4280 continue; 4312 4281 } 4313 - mutex_unlock(&qla_tgt_mutex); 4314 - 4315 4282 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn); 4316 4283 if (rc != 0) 4317 4284 scsi_host_put(host); 4318 4285 4286 + mutex_unlock(&qla_tgt_mutex); 4319 4287 return rc; 4320 4288 } 4321 4289 mutex_unlock(&qla_tgt_mutex);
+1 -1
drivers/scsi/qla2xxx/qla_target.h
··· 1001 1001 extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *); 1002 1002 extern int qlt_mem_alloc(struct qla_hw_data *); 1003 1003 extern void qlt_mem_free(struct qla_hw_data *); 1004 - extern void qlt_stop_phase1(struct qla_tgt *); 1004 + extern int qlt_stop_phase1(struct qla_tgt *); 1005 1005 extern void qlt_stop_phase2(struct qla_tgt *); 1006 1006 extern irqreturn_t qla83xx_msix_atio_q(int, void *); 1007 1007 extern void qlt_83xx_iospace_config(struct qla_hw_data *);
+115 -43
drivers/scsi/qla2xxx/tcm_qla2xxx.c
··· 182 182 return 0; 183 183 } 184 184 185 - static ssize_t tcm_qla2xxx_npiv_format_wwn(char *buf, size_t len, 186 - u64 wwpn, u64 wwnn) 187 - { 188 - u8 b[8], b2[8]; 189 - 190 - put_unaligned_be64(wwpn, b); 191 - put_unaligned_be64(wwnn, b2); 192 - return snprintf(buf, len, 193 - "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x," 194 - "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x", 195 - b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7], 196 - b2[0], b2[1], b2[2], b2[3], b2[4], b2[5], b2[6], b2[7]); 197 - } 198 - 199 185 static char *tcm_qla2xxx_npiv_get_fabric_name(void) 200 186 { 201 187 return "qla2xxx_npiv"; ··· 211 225 struct tcm_qla2xxx_lport *lport = tpg->lport; 212 226 213 227 return lport->lport_naa_name; 214 - } 215 - 216 - static char *tcm_qla2xxx_npiv_get_fabric_wwn(struct se_portal_group *se_tpg) 217 - { 218 - struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 219 - struct tcm_qla2xxx_tpg, se_tpg); 220 - struct tcm_qla2xxx_lport *lport = tpg->lport; 221 - 222 - return &lport->lport_npiv_name[0]; 223 228 } 224 229 225 230 static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg) ··· 918 941 atomic_read(&tpg->lport_tpg_enabled)); 919 942 } 920 943 944 + static void tcm_qla2xxx_depend_tpg(struct work_struct *work) 945 + { 946 + struct tcm_qla2xxx_tpg *base_tpg = container_of(work, 947 + struct tcm_qla2xxx_tpg, tpg_base_work); 948 + struct se_portal_group *se_tpg = &base_tpg->se_tpg; 949 + struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha; 950 + 951 + if (!configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys, 952 + &se_tpg->tpg_group.cg_item)) { 953 + atomic_set(&base_tpg->lport_tpg_enabled, 1); 954 + qlt_enable_vha(base_vha); 955 + } 956 + complete(&base_tpg->tpg_base_comp); 957 + } 958 + 959 + static void tcm_qla2xxx_undepend_tpg(struct work_struct *work) 960 + { 961 + struct tcm_qla2xxx_tpg *base_tpg = container_of(work, 962 + struct tcm_qla2xxx_tpg, tpg_base_work); 963 + struct se_portal_group *se_tpg = &base_tpg->se_tpg; 964 + struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha; 965 + 966 + if (!qlt_stop_phase1(base_vha->vha_tgt.qla_tgt)) { 967 + atomic_set(&base_tpg->lport_tpg_enabled, 0); 968 + configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys, 969 + &se_tpg->tpg_group.cg_item); 970 + } 971 + complete(&base_tpg->tpg_base_comp); 972 + } 973 + 921 974 static ssize_t tcm_qla2xxx_tpg_store_enable( 922 975 struct se_portal_group *se_tpg, 923 976 const char *page, 924 977 size_t count) 925 978 { 926 - struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; 927 - struct tcm_qla2xxx_lport *lport = container_of(se_wwn, 928 - struct tcm_qla2xxx_lport, lport_wwn); 929 - struct scsi_qla_host *vha = lport->qla_vha; 930 979 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 931 980 struct tcm_qla2xxx_tpg, se_tpg); 932 981 unsigned long op; ··· 967 964 pr_err("Illegal value for tpg_enable: %lu\n", op); 968 965 return -EINVAL; 969 966 } 967 + if (op) { 968 + if (atomic_read(&tpg->lport_tpg_enabled)) 969 + return -EEXIST; 970 + 971 + INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_depend_tpg); 972 + } else { 973 + if (!atomic_read(&tpg->lport_tpg_enabled)) 974 + return count; 975 + 976 + INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_undepend_tpg); 977 + } 978 + init_completion(&tpg->tpg_base_comp); 979 + schedule_work(&tpg->tpg_base_work); 980 + wait_for_completion(&tpg->tpg_base_comp); 970 981 971 982 if (op) { 972 - atomic_set(&tpg->lport_tpg_enabled, 1); 973 - qlt_enable_vha(vha); 974 - } else { 975 - if (!vha->vha_tgt.qla_tgt) { 976 - pr_err("struct qla_hw_data *vha->vha_tgt.qla_tgt is NULL\n"); 983 + if (!atomic_read(&tpg->lport_tpg_enabled)) 977 984 return -ENODEV; 978 - } 979 - atomic_set(&tpg->lport_tpg_enabled, 0); 980 - qlt_stop_phase1(vha->vha_tgt.qla_tgt); 985 + } else { 986 + if (atomic_read(&tpg->lport_tpg_enabled)) 987 + return -EPERM; 981 988 } 982 - 983 989 return count; 984 990 } 985 991 ··· 1065 1053 /* 1066 1054 * Clear local TPG=1 pointer for non NPIV mode. 1067 1055 */ 1068 - lport->tpg_1 = NULL; 1069 - 1056 + lport->tpg_1 = NULL; 1070 1057 kfree(tpg); 1071 1058 } 1059 + 1060 + static ssize_t tcm_qla2xxx_npiv_tpg_show_enable( 1061 + struct se_portal_group *se_tpg, 1062 + char *page) 1063 + { 1064 + return tcm_qla2xxx_tpg_show_enable(se_tpg, page); 1065 + } 1066 + 1067 + static ssize_t tcm_qla2xxx_npiv_tpg_store_enable( 1068 + struct se_portal_group *se_tpg, 1069 + const char *page, 1070 + size_t count) 1071 + { 1072 + struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; 1073 + struct tcm_qla2xxx_lport *lport = container_of(se_wwn, 1074 + struct tcm_qla2xxx_lport, lport_wwn); 1075 + struct scsi_qla_host *vha = lport->qla_vha; 1076 + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 1077 + struct tcm_qla2xxx_tpg, se_tpg); 1078 + unsigned long op; 1079 + int rc; 1080 + 1081 + rc = kstrtoul(page, 0, &op); 1082 + if (rc < 0) { 1083 + pr_err("kstrtoul() returned %d\n", rc); 1084 + return -EINVAL; 1085 + } 1086 + if ((op != 1) && (op != 0)) { 1087 + pr_err("Illegal value for tpg_enable: %lu\n", op); 1088 + return -EINVAL; 1089 + } 1090 + if (op) { 1091 + if (atomic_read(&tpg->lport_tpg_enabled)) 1092 + return -EEXIST; 1093 + 1094 + atomic_set(&tpg->lport_tpg_enabled, 1); 1095 + qlt_enable_vha(vha); 1096 + } else { 1097 + if (!atomic_read(&tpg->lport_tpg_enabled)) 1098 + return count; 1099 + 1100 + atomic_set(&tpg->lport_tpg_enabled, 0); 1101 + qlt_stop_phase1(vha->vha_tgt.qla_tgt); 1102 + } 1103 + 1104 + return count; 1105 + } 1106 + 1107 + TF_TPG_BASE_ATTR(tcm_qla2xxx_npiv, enable, S_IRUGO | S_IWUSR); 1108 + 1109 + static struct configfs_attribute *tcm_qla2xxx_npiv_tpg_attrs[] = { 1110 + &tcm_qla2xxx_npiv_tpg_enable.attr, 1111 + NULL, 1112 + }; 1072 1113 1073 1114 static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg( 1074 1115 struct se_wwn *wwn, ··· 1715 1650 struct scsi_qla_host *npiv_vha; 1716 1651 struct tcm_qla2xxx_lport *lport = 1717 1652 (struct tcm_qla2xxx_lport *)target_lport_ptr; 1653 + struct tcm_qla2xxx_lport *base_lport = 1654 + (struct tcm_qla2xxx_lport *)base_vha->vha_tgt.target_lport_ptr; 1655 + struct tcm_qla2xxx_tpg *base_tpg; 1718 1656 struct fc_vport_identifiers vport_id; 1719 1657 1720 1658 if (!qla_tgt_mode_enabled(base_vha)) { 1721 1659 pr_err("qla2xxx base_vha not enabled for target mode\n"); 1722 1660 return -EPERM; 1723 1661 } 1662 + 1663 + if (!base_lport || !base_lport->tpg_1 || 1664 + !atomic_read(&base_lport->tpg_1->lport_tpg_enabled)) { 1665 + pr_err("qla2xxx base_lport or tpg_1 not available\n"); 1666 + return -EPERM; 1667 + } 1668 + base_tpg = base_lport->tpg_1; 1724 1669 1725 1670 memset(&vport_id, 0, sizeof(vport_id)); 1726 1671 vport_id.port_name = npiv_wwpn; ··· 1750 1675 npiv_vha = (struct scsi_qla_host *)vport->dd_data; 1751 1676 npiv_vha->vha_tgt.target_lport_ptr = target_lport_ptr; 1752 1677 lport->qla_vha = npiv_vha; 1753 - 1754 1678 scsi_host_get(npiv_vha->host); 1755 1679 return 0; 1756 1680 } ··· 1788 1714 } 1789 1715 lport->lport_npiv_wwpn = npiv_wwpn; 1790 1716 lport->lport_npiv_wwnn = npiv_wwnn; 1791 - tcm_qla2xxx_npiv_format_wwn(&lport->lport_npiv_name[0], 1792 - TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn); 1793 1717 sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn); 1794 1718 1795 1719 ret = tcm_qla2xxx_init_lport(lport); ··· 1896 1824 static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { 1897 1825 .get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name, 1898 1826 .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident, 1899 - .tpg_get_wwn = tcm_qla2xxx_npiv_get_fabric_wwn, 1827 + .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, 1900 1828 .tpg_get_tag = tcm_qla2xxx_get_tag, 1901 1829 .tpg_get_default_depth = tcm_qla2xxx_get_default_depth, 1902 1830 .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id, ··· 2007 1935 */ 2008 1936 npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; 2009 1937 npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = 2010 - tcm_qla2xxx_tpg_attrs; 1938 + tcm_qla2xxx_npiv_tpg_attrs; 2011 1939 npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; 2012 1940 npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; 2013 1941 npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
+3 -4
drivers/scsi/qla2xxx/tcm_qla2xxx.h
··· 4 4 #define TCM_QLA2XXX_VERSION "v0.1" 5 5 /* length of ASCII WWPNs including pad */ 6 6 #define TCM_QLA2XXX_NAMELEN 32 7 - /* lenth of ASCII NPIV 'WWPN+WWNN' including pad */ 8 - #define TCM_QLA2XXX_NPIV_NAMELEN 66 9 7 10 8 #include "qla_target.h" 11 9 ··· 41 43 struct tcm_qla2xxx_tpg_attrib tpg_attrib; 42 44 /* Returned by tcm_qla2xxx_make_tpg() */ 43 45 struct se_portal_group se_tpg; 46 + /* Items for dealing with configfs_depend_item */ 47 + struct completion tpg_base_comp; 48 + struct work_struct tpg_base_work; 44 49 }; 45 50 46 51 struct tcm_qla2xxx_fc_loopid { ··· 63 62 char lport_name[TCM_QLA2XXX_NAMELEN]; 64 63 /* ASCII formatted naa WWPN for VPD page 83 etc */ 65 64 char lport_naa_name[TCM_QLA2XXX_NAMELEN]; 66 - /* ASCII formatted WWPN+WWNN for NPIV FC Target Lport */ 67 - char lport_npiv_name[TCM_QLA2XXX_NPIV_NAMELEN]; 68 65 /* map for fc_port pointers in 24-bit FC Port ID space */ 69 66 struct btree_head32 lport_fcport_map; 70 67 /* vmalloc-ed memory for fc_port pointers for 16-bit FC loop ID */
+1 -1
drivers/scsi/scsi_lib.c
··· 1684 1684 1685 1685 host_dev = scsi_get_device(shost); 1686 1686 if (host_dev && host_dev->dma_mask) 1687 - bounce_limit = dma_max_pfn(host_dev) << PAGE_SHIFT; 1687 + bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT; 1688 1688 1689 1689 return bounce_limit; 1690 1690 }
+2 -2
drivers/spi/spi-ath79.c
··· 132 132 133 133 flags = GPIOF_DIR_OUT; 134 134 if (spi->mode & SPI_CS_HIGH) 135 - flags |= GPIOF_INIT_HIGH; 136 - else 137 135 flags |= GPIOF_INIT_LOW; 136 + else 137 + flags |= GPIOF_INIT_HIGH; 138 138 139 139 status = gpio_request_one(cdata->gpio, flags, 140 140 dev_name(&spi->dev));
+16 -1
drivers/spi/spi-atmel.c
··· 1455 1455 { 1456 1456 struct spi_master *master = dev_get_drvdata(dev); 1457 1457 struct atmel_spi *as = spi_master_get_devdata(master); 1458 + int ret; 1459 + 1460 + /* Stop the queue running */ 1461 + ret = spi_master_suspend(master); 1462 + if (ret) { 1463 + dev_warn(dev, "cannot suspend master\n"); 1464 + return ret; 1465 + } 1458 1466 1459 1467 clk_disable_unprepare(as->clk); 1460 1468 return 0; ··· 1472 1464 { 1473 1465 struct spi_master *master = dev_get_drvdata(dev); 1474 1466 struct atmel_spi *as = spi_master_get_devdata(master); 1467 + int ret; 1475 1468 1476 1469 clk_prepare_enable(as->clk); 1477 - return 0; 1470 + 1471 + /* Start the queue running */ 1472 + ret = spi_master_resume(master); 1473 + if (ret) 1474 + dev_err(dev, "problem starting queue (%d)\n", ret); 1475 + 1476 + return ret; 1478 1477 } 1479 1478 1480 1479 static SIMPLE_DEV_PM_OPS(atmel_spi_pm_ops, atmel_spi_suspend, atmel_spi_resume);
+4 -2
drivers/spi/spi-coldfire-qspi.c
··· 514 514 #ifdef CONFIG_PM_RUNTIME 515 515 static int mcfqspi_runtime_suspend(struct device *dev) 516 516 { 517 - struct mcfqspi *mcfqspi = dev_get_drvdata(dev); 517 + struct spi_master *master = dev_get_drvdata(dev); 518 + struct mcfqspi *mcfqspi = spi_master_get_devdata(master); 518 519 519 520 clk_disable(mcfqspi->clk); 520 521 ··· 524 523 525 524 static int mcfqspi_runtime_resume(struct device *dev) 526 525 { 527 - struct mcfqspi *mcfqspi = dev_get_drvdata(dev); 526 + struct spi_master *master = dev_get_drvdata(dev); 527 + struct mcfqspi *mcfqspi = spi_master_get_devdata(master); 528 528 529 529 clk_enable(mcfqspi->clk); 530 530
+3 -3
drivers/spi/spi-fsl-dspi.c
··· 420 420 421 421 static int dspi_resume(struct device *dev) 422 422 { 423 - 424 423 struct spi_master *master = dev_get_drvdata(dev); 425 424 struct fsl_dspi *dspi = spi_master_get_devdata(master); 426 425 ··· 503 504 clk_prepare_enable(dspi->clk); 504 505 505 506 init_waitqueue_head(&dspi->waitq); 506 - platform_set_drvdata(pdev, dspi); 507 + platform_set_drvdata(pdev, master); 507 508 508 509 ret = spi_bitbang_start(&dspi->bitbang); 509 510 if (ret != 0) { ··· 524 525 525 526 static int dspi_remove(struct platform_device *pdev) 526 527 { 527 - struct fsl_dspi *dspi = platform_get_drvdata(pdev); 528 + struct spi_master *master = platform_get_drvdata(pdev); 529 + struct fsl_dspi *dspi = spi_master_get_devdata(master); 528 530 529 531 /* Disconnect from the SPI framework */ 530 532 spi_bitbang_stop(&dspi->bitbang);
+2 -2
drivers/spi/spi-imx.c
··· 948 948 spi_bitbang_stop(&spi_imx->bitbang); 949 949 950 950 writel(0, spi_imx->base + MXC_CSPICTRL); 951 - clk_disable_unprepare(spi_imx->clk_ipg); 952 - clk_disable_unprepare(spi_imx->clk_per); 951 + clk_unprepare(spi_imx->clk_ipg); 952 + clk_unprepare(spi_imx->clk_per); 953 953 spi_master_put(master); 954 954 955 955 return 0;
+8 -7
drivers/spi/spi-topcliff-pch.c
··· 915 915 /* Set Tx DMA */ 916 916 param = &dma->param_tx; 917 917 param->dma_dev = &dma_dev->dev; 918 - param->chan_id = data->master->bus_num * 2; /* Tx = 0, 2 */ 918 + param->chan_id = data->ch * 2; /* Tx = 0, 2 */; 919 919 param->tx_reg = data->io_base_addr + PCH_SPDWR; 920 920 param->width = width; 921 921 chan = dma_request_channel(mask, pch_spi_filter, param); ··· 930 930 /* Set Rx DMA */ 931 931 param = &dma->param_rx; 932 932 param->dma_dev = &dma_dev->dev; 933 - param->chan_id = data->master->bus_num * 2 + 1; /* Rx = Tx + 1 */ 933 + param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */; 934 934 param->rx_reg = data->io_base_addr + PCH_SPDRR; 935 935 param->width = width; 936 936 chan = dma_request_channel(mask, pch_spi_filter, param); ··· 1452 1452 1453 1453 pch_spi_set_master_mode(master); 1454 1454 1455 + if (use_dma) { 1456 + dev_info(&plat_dev->dev, "Use DMA for data transfers\n"); 1457 + pch_alloc_dma_buf(board_dat, data); 1458 + } 1459 + 1455 1460 ret = spi_register_master(master); 1456 1461 if (ret != 0) { 1457 1462 dev_err(&plat_dev->dev, ··· 1464 1459 goto err_spi_register_master; 1465 1460 } 1466 1461 1467 - if (use_dma) { 1468 - dev_info(&plat_dev->dev, "Use DMA for data transfers\n"); 1469 - pch_alloc_dma_buf(board_dat, data); 1470 - } 1471 - 1472 1462 return 0; 1473 1463 1474 1464 err_spi_register_master: 1465 + pch_free_dma_buf(board_dat, data); 1475 1466 free_irq(board_dat->pdev->irq, data); 1476 1467 err_request_irq: 1477 1468 pch_spi_free_resources(board_dat, data);
+1 -2
drivers/staging/android/binder.c
··· 2904 2904 refs++; 2905 2905 2906 2906 if (!ref->death) 2907 - goto out; 2907 + continue; 2908 2908 2909 2909 death++; 2910 2910 ··· 2917 2917 BUG(); 2918 2918 } 2919 2919 2920 - out: 2921 2920 binder_debug(BINDER_DEBUG_DEAD_BINDER, 2922 2921 "node %d now dead, refs %d, death %d\n", 2923 2922 node->debug_id, refs, death);
+1 -1
drivers/staging/bcm/Bcmnet.c
··· 40 40 } 41 41 42 42 static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb, 43 - void *accel_priv) 43 + void *accel_priv, select_queue_fallback_t fallback) 44 44 { 45 45 return ClassifyPacket(netdev_priv(dev), skb); 46 46 }
+2
drivers/staging/cxt1e1/linux.c
··· 866 866 _IOC_SIZE (iocmd)); 867 867 #endif 868 868 iolen = _IOC_SIZE (iocmd); 869 + if (iolen > sizeof(arg)) 870 + return -EFAULT; 869 871 data = ifr->ifr_data + sizeof (iocmd); 870 872 if (copy_from_user (&arg, data, iolen)) 871 873 return -EFAULT;
+1
drivers/staging/iio/adc/mxs-lradc.c
··· 757 757 } 758 758 759 759 /* if it is released, wait for the next touch via IRQ */ 760 + lradc->cur_plate = LRADC_TOUCH; 760 761 mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ, LRADC_CTRL1); 761 762 mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1); 762 763 }
+1 -1
drivers/staging/netlogic/xlr_net.c
··· 307 307 } 308 308 309 309 static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb, 310 - void *accel_priv) 310 + void *accel_priv, select_queue_fallback_t fallback) 311 311 { 312 312 return (u16)smp_processor_id(); 313 313 }
+1 -1
drivers/staging/rtl8188eu/os_dep/os_intfs.c
··· 653 653 } 654 654 655 655 static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb, 656 - void *accel_priv) 656 + void *accel_priv, select_queue_fallback_t fallback) 657 657 { 658 658 struct adapter *padapter = rtw_netdev_priv(dev); 659 659 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+1
drivers/staging/rtl8188eu/os_dep/usb_intf.c
··· 55 55 /****** 8188EUS ********/ 56 56 {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */ 57 57 {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ 58 + {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */ 58 59 {} /* Terminating entry */ 59 60 }; 60 61
+7 -3
drivers/target/iscsi/iscsi_target.c
··· 785 785 spin_unlock_bh(&conn->cmd_lock); 786 786 787 787 list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) { 788 - list_del(&cmd->i_conn_node); 788 + list_del_init(&cmd->i_conn_node); 789 789 iscsit_free_cmd(cmd, false); 790 790 } 791 791 } ··· 3708 3708 break; 3709 3709 case ISTATE_REMOVE: 3710 3710 spin_lock_bh(&conn->cmd_lock); 3711 - list_del(&cmd->i_conn_node); 3711 + list_del_init(&cmd->i_conn_node); 3712 3712 spin_unlock_bh(&conn->cmd_lock); 3713 3713 3714 3714 iscsit_free_cmd(cmd, false); ··· 4151 4151 spin_lock_bh(&conn->cmd_lock); 4152 4152 list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) { 4153 4153 4154 - list_del(&cmd->i_conn_node); 4154 + list_del_init(&cmd->i_conn_node); 4155 4155 spin_unlock_bh(&conn->cmd_lock); 4156 4156 4157 4157 iscsit_increment_maxcmdsn(cmd, sess); ··· 4196 4196 iscsit_stop_timers_for_cmds(conn); 4197 4197 iscsit_stop_nopin_response_timer(conn); 4198 4198 iscsit_stop_nopin_timer(conn); 4199 + 4200 + if (conn->conn_transport->iscsit_wait_conn) 4201 + conn->conn_transport->iscsit_wait_conn(conn); 4202 + 4199 4203 iscsit_free_queue_reqs_for_conn(conn); 4200 4204 4201 4205 /*
+8 -8
drivers/target/iscsi/iscsi_target_erl2.c
··· 138 138 list_for_each_entry_safe(cmd, cmd_tmp, 139 139 &cr->conn_recovery_cmd_list, i_conn_node) { 140 140 141 - list_del(&cmd->i_conn_node); 141 + list_del_init(&cmd->i_conn_node); 142 142 cmd->conn = NULL; 143 143 spin_unlock(&cr->conn_recovery_cmd_lock); 144 144 iscsit_free_cmd(cmd, true); ··· 160 160 list_for_each_entry_safe(cmd, cmd_tmp, 161 161 &cr->conn_recovery_cmd_list, i_conn_node) { 162 162 163 - list_del(&cmd->i_conn_node); 163 + list_del_init(&cmd->i_conn_node); 164 164 cmd->conn = NULL; 165 165 spin_unlock(&cr->conn_recovery_cmd_lock); 166 166 iscsit_free_cmd(cmd, true); ··· 216 216 } 217 217 cr = cmd->cr; 218 218 219 - list_del(&cmd->i_conn_node); 219 + list_del_init(&cmd->i_conn_node); 220 220 return --cr->cmd_count; 221 221 } 222 222 ··· 297 297 if (!(cmd->cmd_flags & ICF_OOO_CMDSN)) 298 298 continue; 299 299 300 - list_del(&cmd->i_conn_node); 300 + list_del_init(&cmd->i_conn_node); 301 301 302 302 spin_unlock_bh(&conn->cmd_lock); 303 303 iscsit_free_cmd(cmd, true); ··· 335 335 /* 336 336 * Only perform connection recovery on ISCSI_OP_SCSI_CMD or 337 337 * ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call 338 - * list_del(&cmd->i_conn_node); to release the command to the 338 + * list_del_init(&cmd->i_conn_node); to release the command to the 339 339 * session pool and remove it from the connection's list. 340 340 * 341 341 * Also stop the DataOUT timer, which will be restarted after ··· 351 351 " CID: %hu\n", cmd->iscsi_opcode, 352 352 cmd->init_task_tag, cmd->cmd_sn, conn->cid); 353 353 354 - list_del(&cmd->i_conn_node); 354 + list_del_init(&cmd->i_conn_node); 355 355 spin_unlock_bh(&conn->cmd_lock); 356 356 iscsit_free_cmd(cmd, true); 357 357 spin_lock_bh(&conn->cmd_lock); ··· 371 371 */ 372 372 if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd && 373 373 iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) { 374 - list_del(&cmd->i_conn_node); 374 + list_del_init(&cmd->i_conn_node); 375 375 spin_unlock_bh(&conn->cmd_lock); 376 376 iscsit_free_cmd(cmd, true); 377 377 spin_lock_bh(&conn->cmd_lock); ··· 393 393 394 394 cmd->sess = conn->sess; 395 395 396 - list_del(&cmd->i_conn_node); 396 + list_del_init(&cmd->i_conn_node); 397 397 spin_unlock_bh(&conn->cmd_lock); 398 398 399 399 iscsit_free_all_datain_reqs(cmd);
+1 -1
drivers/target/iscsi/iscsi_target_tpg.c
··· 137 137 list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) { 138 138 139 139 spin_lock(&tpg->tpg_state_lock); 140 - if (tpg->tpg_state == TPG_STATE_FREE) { 140 + if (tpg->tpg_state != TPG_STATE_ACTIVE) { 141 141 spin_unlock(&tpg->tpg_state_lock); 142 142 continue; 143 143 }
+26 -21
drivers/target/target_core_sbc.c
··· 1074 1074 struct scatterlist *psg; 1075 1075 void *paddr, *addr; 1076 1076 unsigned int i, len, left; 1077 - unsigned int offset = 0; 1077 + unsigned int offset = sg_off; 1078 1078 1079 1079 left = sectors * dev->prot_length; 1080 1080 1081 1081 for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { 1082 - 1083 - len = min(psg->length, left); 1084 - if (offset >= sg->length) { 1085 - sg = sg_next(sg); 1086 - offset = 0; 1087 - sg_off = sg->offset; 1088 - } 1082 + unsigned int psg_len, copied = 0; 1089 1083 1090 1084 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1091 - addr = kmap_atomic(sg_page(sg)) + sg_off; 1085 + psg_len = min(left, psg->length); 1086 + while (psg_len) { 1087 + len = min(psg_len, sg->length - offset); 1088 + addr = kmap_atomic(sg_page(sg)) + sg->offset + offset; 1092 1089 1093 - if (read) 1094 - memcpy(paddr, addr, len); 1095 - else 1096 - memcpy(addr, paddr, len); 1090 + if (read) 1091 + memcpy(paddr + copied, addr, len); 1092 + else 1093 + memcpy(addr, paddr + copied, len); 1097 1094 1098 - left -= len; 1099 - offset += len; 1095 + left -= len; 1096 + offset += len; 1097 + copied += len; 1098 + psg_len -= len; 1099 + 1100 + if (offset >= sg->length) { 1101 + sg = sg_next(sg); 1102 + offset = 0; 1103 + } 1104 + kunmap_atomic(addr); 1105 + } 1100 1106 kunmap_atomic(paddr); 1101 - kunmap_atomic(addr); 1102 1107 } 1103 1108 } 1104 1109 ··· 1168 1163 { 1169 1164 struct se_device *dev = cmd->se_dev; 1170 1165 struct se_dif_v1_tuple *sdt; 1171 - struct scatterlist *dsg; 1166 + struct scatterlist *dsg, *psg = sg; 1172 1167 sector_t sector = start; 1173 1168 void *daddr, *paddr; 1174 1169 int i, j, offset = sg_off; ··· 1176 1171 1177 1172 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { 1178 1173 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1179 - paddr = kmap_atomic(sg_page(sg)) + sg->offset; 1174 + paddr = kmap_atomic(sg_page(psg)) + sg->offset; 1180 1175 1181 1176 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { 1182 1177 1183 - if (offset >= sg->length) { 1178 + if (offset >= psg->length) { 1184 1179 kunmap_atomic(paddr); 1185 - sg = sg_next(sg); 1186 - paddr = kmap_atomic(sg_page(sg)) + sg->offset; 1180 + psg = sg_next(psg); 1181 + paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1187 1182 offset = 0; 1188 1183 } 1189 1184
+3
drivers/target/target_core_transport.c
··· 1601 1601 case TCM_CHECK_CONDITION_ABORT_CMD: 1602 1602 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 1603 1603 case TCM_CHECK_CONDITION_NOT_READY: 1604 + case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: 1605 + case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: 1606 + case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: 1604 1607 break; 1605 1608 case TCM_OUT_OF_RESOURCES: 1606 1609 sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+11 -2
drivers/thermal/Kconfig
··· 136 136 config RCAR_THERMAL 137 137 tristate "Renesas R-Car thermal driver" 138 138 depends on ARCH_SHMOBILE || COMPILE_TEST 139 + depends on HAS_IOMEM 139 140 help 140 141 Enable this to plug the R-Car thermal sensor driver into the Linux 141 142 thermal framework. ··· 211 210 tristate "ACPI INT3403 thermal driver" 212 211 depends on X86 && ACPI 213 212 help 214 - This driver uses ACPI INT3403 device objects. If present, it will 215 - register each INT3403 thermal sensor as a thermal zone. 213 + Newer laptops and tablets that use ACPI may have thermal sensors 214 + outside the core CPU/SOC for thermal safety reasons. These 215 + temperature sensors are also exposed for the OS to use via the so 216 + called INT3403 ACPI object. This driver will, on devices that have 217 + such sensors, expose the temperature information from these sensors 218 + to userspace via the normal thermal framework. This means that a wide 219 + range of applications and GUI widgets can show this information to 220 + the user or use this information for making decisions. For example, 221 + the Intel Thermal Daemon can use this information to allow the user 222 + to select his laptop to run without turning on the fans. 216 223 217 224 menu "Texas Instruments thermal drivers" 218 225 source "drivers/thermal/ti-soc-thermal/Kconfig"
+19 -8
drivers/thermal/thermal_core.c
··· 56 56 static DEFINE_MUTEX(thermal_list_lock); 57 57 static DEFINE_MUTEX(thermal_governor_lock); 58 58 59 + static struct thermal_governor *def_governor; 60 + 59 61 static struct thermal_governor *__find_governor(const char *name) 60 62 { 61 63 struct thermal_governor *pos; 64 + 65 + if (!name || !name[0]) 66 + return def_governor; 62 67 63 68 list_for_each_entry(pos, &thermal_governor_list, governor_list) 64 69 if (!strnicmp(name, pos->name, THERMAL_NAME_LENGTH)) ··· 87 82 if (__find_governor(governor->name) == NULL) { 88 83 err = 0; 89 84 list_add(&governor->governor_list, &thermal_governor_list); 85 + if (!def_governor && !strncmp(governor->name, 86 + DEFAULT_THERMAL_GOVERNOR, THERMAL_NAME_LENGTH)) 87 + def_governor = governor; 90 88 } 91 89 92 90 mutex_lock(&thermal_list_lock); 93 91 94 92 list_for_each_entry(pos, &thermal_tz_list, node) { 93 + /* 94 + * only thermal zones with specified tz->tzp->governor_name 95 + * may run with tz->govenor unset 96 + */ 95 97 if (pos->governor) 96 98 continue; 97 - if (pos->tzp) 98 - name = pos->tzp->governor_name; 99 - else 100 - name = DEFAULT_THERMAL_GOVERNOR; 99 + 100 + name = pos->tzp->governor_name; 101 + 101 102 if (!strnicmp(name, governor->name, THERMAL_NAME_LENGTH)) 102 103 pos->governor = governor; 103 104 } ··· 353 342 static void handle_non_critical_trips(struct thermal_zone_device *tz, 354 343 int trip, enum thermal_trip_type trip_type) 355 344 { 356 - if (tz->governor) 357 - tz->governor->throttle(tz, trip); 345 + tz->governor ? tz->governor->throttle(tz, trip) : 346 + def_governor->throttle(tz, trip); 358 347 } 359 348 360 349 static void handle_critical_trips(struct thermal_zone_device *tz, ··· 1118 1107 INIT_LIST_HEAD(&cdev->thermal_instances); 1119 1108 cdev->np = np; 1120 1109 cdev->ops = ops; 1121 - cdev->updated = true; 1110 + cdev->updated = false; 1122 1111 cdev->device.class = &thermal_class; 1123 1112 cdev->devdata = devdata; 1124 1113 dev_set_name(&cdev->device, "cooling_device%d", cdev->id); ··· 1544 1533 if (tz->tzp) 1545 1534 tz->governor = __find_governor(tz->tzp->governor_name); 1546 1535 else 1547 - tz->governor = __find_governor(DEFAULT_THERMAL_GOVERNOR); 1536 + tz->governor = def_governor; 1548 1537 1549 1538 mutex_unlock(&thermal_governor_lock); 1550 1539
+6 -5
drivers/thermal/x86_pkg_temp_thermal.c
··· 68 68 struct thermal_zone_device *tzone; 69 69 }; 70 70 71 + static const struct thermal_zone_params pkg_temp_tz_params = { 72 + .no_hwmon = true, 73 + }; 74 + 71 75 /* List maintaining number of package instances */ 72 76 static LIST_HEAD(phy_dev_list); 73 77 static DEFINE_MUTEX(phy_dev_list_mutex); ··· 398 394 int err; 399 395 u32 tj_max; 400 396 struct phy_dev_entry *phy_dev_entry; 401 - char buffer[30]; 402 397 int thres_count; 403 398 u32 eax, ebx, ecx, edx; 404 399 u8 *temp; ··· 443 440 phy_dev_entry->first_cpu = cpu; 444 441 phy_dev_entry->tj_max = tj_max; 445 442 phy_dev_entry->ref_cnt = 1; 446 - snprintf(buffer, sizeof(buffer), "pkg-temp-%d\n", 447 - phy_dev_entry->phys_proc_id); 448 - phy_dev_entry->tzone = thermal_zone_device_register(buffer, 443 + phy_dev_entry->tzone = thermal_zone_device_register("x86_pkg_temp", 449 444 thres_count, 450 445 (thres_count == MAX_NUMBER_OF_TRIPS) ? 451 446 0x03 : 0x01, 452 - phy_dev_entry, &tzone_ops, NULL, 0, 0); 447 + phy_dev_entry, &tzone_ops, &pkg_temp_tz_params, 0, 0); 453 448 if (IS_ERR(phy_dev_entry->tzone)) { 454 449 err = PTR_ERR(phy_dev_entry->tzone); 455 450 goto err_ret_free;
+7 -18
drivers/tty/tty_io.c
··· 1267 1267 * @p: output buffer of at least 7 bytes 1268 1268 * 1269 1269 * Generate a name from a driver reference and write it to the output 1270 - * buffer. Return the number of bytes written. 1270 + * buffer. 1271 1271 * 1272 1272 * Locking: None 1273 1273 */ 1274 - static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p) 1274 + static void tty_line_name(struct tty_driver *driver, int index, char *p) 1275 1275 { 1276 1276 if (driver->flags & TTY_DRIVER_UNNUMBERED_NODE) 1277 - return sprintf(p, "%s", driver->name); 1277 + strcpy(p, driver->name); 1278 1278 else 1279 - return sprintf(p, "%s%d", driver->name, 1280 - index + driver->name_base); 1279 + sprintf(p, "%s%d", driver->name, index + driver->name_base); 1281 1280 } 1282 1281 1283 1282 /** ··· 3545 3546 if (i >= ARRAY_SIZE(cs)) 3546 3547 break; 3547 3548 } 3548 - while (i--) { 3549 - struct tty_driver *driver; 3550 - const char *name = cs[i]->name; 3551 - int index = cs[i]->index; 3552 - 3553 - driver = cs[i]->device(cs[i], &index); 3554 - if (driver) { 3555 - count += tty_line_name(driver, index, buf + count); 3556 - count += sprintf(buf + count, "%c", i ? ' ':'\n'); 3557 - } else 3558 - count += sprintf(buf + count, "%s%d%c", 3559 - name, index, i ? ' ':'\n'); 3560 - } 3549 + while (i--) 3550 + count += sprintf(buf + count, "%s%d%c", 3551 + cs[i]->name, cs[i]->index, i ? ' ':'\n'); 3561 3552 console_unlock(); 3562 3553 3563 3554 return count;
+2 -2
drivers/usb/chipidea/udc.c
··· 105 105 106 106 do { 107 107 /* flush any pending transfer */ 108 - hw_write(ci, OP_ENDPTFLUSH, BIT(n), BIT(n)); 108 + hw_write(ci, OP_ENDPTFLUSH, ~0, BIT(n)); 109 109 while (hw_read(ci, OP_ENDPTFLUSH, BIT(n))) 110 110 cpu_relax(); 111 111 } while (hw_read(ci, OP_ENDPTSTAT, BIT(n))); ··· 205 205 if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num))) 206 206 return -EAGAIN; 207 207 208 - hw_write(ci, OP_ENDPTPRIME, BIT(n), BIT(n)); 208 + hw_write(ci, OP_ENDPTPRIME, ~0, BIT(n)); 209 209 210 210 while (hw_read(ci, OP_ENDPTPRIME, BIT(n))) 211 211 cpu_relax();
+4
drivers/usb/core/config.c
··· 717 717 result = -ENOMEM; 718 718 goto err; 719 719 } 720 + 721 + if (dev->quirks & USB_QUIRK_DELAY_INIT) 722 + msleep(100); 723 + 720 724 result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno, 721 725 bigbuffer, length); 722 726 if (result < 0) {
+4
drivers/usb/core/quirks.c
··· 47 47 /* Microsoft LifeCam-VX700 v2.0 */ 48 48 { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME }, 49 49 50 + /* Logitech HD Pro Webcams C920 and C930e */ 51 + { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, 52 + { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT }, 53 + 50 54 /* Logitech Quickcam Fusion */ 51 55 { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME }, 52 56
+32 -26
drivers/usb/gadget/bcm63xx_udc.c
··· 360 360 bcm_writel(val, udc->iudma_regs + off); 361 361 } 362 362 363 - static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off) 363 + static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan) 364 364 { 365 - return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off); 365 + return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off + 366 + (ENETDMA_CHAN_WIDTH * chan)); 366 367 } 367 368 368 - static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off) 369 + static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off, 370 + int chan) 369 371 { 370 - bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off); 372 + bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off + 373 + (ENETDMA_CHAN_WIDTH * chan)); 371 374 } 372 375 373 - static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off) 376 + static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan) 374 377 { 375 - return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off); 378 + return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off + 379 + (ENETDMA_CHAN_WIDTH * chan)); 376 380 } 377 381 378 - static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off) 382 + static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off, 383 + int chan) 379 384 { 380 - bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off); 385 + bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off + 386 + (ENETDMA_CHAN_WIDTH * chan)); 381 387 } 382 388 383 389 static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled) ··· 644 638 } while (!last_bd); 645 639 646 640 usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK, 647 - ENETDMAC_CHANCFG_REG(iudma->ch_idx)); 641 + ENETDMAC_CHANCFG_REG, iudma->ch_idx); 648 642 } 649 643 650 644 /** ··· 700 694 bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num)); 701 695 702 696 /* stop DMA, then wait for the hardware to wrap up */ 703 - usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG(ch_idx)); 697 + usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx); 704 698 705 - while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG(ch_idx)) & 699 + while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) & 706 700 ENETDMAC_CHANCFG_EN_MASK) { 707 701 udelay(1); 708 702 ··· 719 713 dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n", 720 714 ch_idx); 721 715 usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK, 722 - ENETDMAC_CHANCFG_REG(ch_idx)); 716 + ENETDMAC_CHANCFG_REG, ch_idx); 723 717 } 724 718 } 725 - usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG(ch_idx)); 719 + usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx); 726 720 727 721 /* don't leave "live" HW-owned entries for the next guy to step on */ 728 722 for (d = iudma->bd_ring; d <= iudma->end_bd; d++) ··· 734 728 735 729 /* set up IRQs, UBUS burst size, and BD base for this channel */ 736 730 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK, 737 - ENETDMAC_IRMASK_REG(ch_idx)); 738 - usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG(ch_idx)); 731 + ENETDMAC_IRMASK_REG, ch_idx); 732 + usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx); 739 733 740 - usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG(ch_idx)); 741 - usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG(ch_idx)); 734 + usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx); 735 + usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx); 742 736 } 743 737 744 738 /** ··· 2041 2035 spin_lock(&udc->lock); 2042 2036 2043 2037 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK, 2044 - ENETDMAC_IR_REG(iudma->ch_idx)); 2038 + ENETDMAC_IR_REG, iudma->ch_idx); 2045 2039 bep = iudma->bep; 2046 2040 rc = iudma_read(udc, iudma); 2047 2041 ··· 2181 2175 seq_printf(s, " [ep%d]:\n", 2182 2176 max_t(int, iudma_defaults[ch_idx].ep_num, 0)); 2183 2177 seq_printf(s, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n", 2184 - usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG(ch_idx)), 2185 - usb_dmac_readl(udc, ENETDMAC_IR_REG(ch_idx)), 2186 - usb_dmac_readl(udc, ENETDMAC_IRMASK_REG(ch_idx)), 2187 - usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG(ch_idx))); 2178 + usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx), 2179 + usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx), 2180 + usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx), 2181 + usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx)); 2188 2182 2189 - sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG(ch_idx)); 2190 - sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG(ch_idx)); 2183 + sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx); 2184 + sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx); 2191 2185 seq_printf(s, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n", 2192 - usb_dmas_readl(udc, ENETDMAS_RSTART_REG(ch_idx)), 2186 + usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx), 2193 2187 sram2 >> 16, sram2 & 0xffff, 2194 2188 sram3 >> 16, sram3 & 0xffff, 2195 - usb_dmas_readl(udc, ENETDMAS_SRAM4_REG(ch_idx))); 2189 + usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx)); 2196 2190 seq_printf(s, " desc: %d/%d used", iudma->n_bds_used, 2197 2191 iudma->n_bds); 2198 2192
+6 -1
drivers/usb/gadget/f_fs.c
··· 585 585 char __user *buf, size_t len, int read) 586 586 { 587 587 struct ffs_epfile *epfile = file->private_data; 588 - struct usb_gadget *gadget = epfile->ffs->gadget; 589 588 struct ffs_ep *ep; 590 589 char *data = NULL; 591 590 ssize_t ret, data_len; ··· 620 621 621 622 /* Allocate & copy */ 622 623 if (!halt) { 624 + /* 625 + * if we _do_ wait above, the epfile->ffs->gadget might be NULL 626 + * before the waiting completes, so do not assign to 'gadget' earlier 627 + */ 628 + struct usb_gadget *gadget = epfile->ffs->gadget; 629 + 623 630 /* 624 631 * Controller may require buffer size to be aligned to 625 632 * maxpacketsize of an out endpoint.
+1 -1
drivers/usb/gadget/printer.c
··· 1157 1157 1158 1158 usb_gadget_set_selfpowered(gadget); 1159 1159 1160 - if (gadget->is_otg) { 1160 + if (gadget_is_otg(gadget)) { 1161 1161 otg_descriptor.bmAttributes |= USB_OTG_HNP; 1162 1162 printer_cfg_driver.descriptors = otg_desc; 1163 1163 printer_cfg_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+1 -1
drivers/usb/gadget/s3c2410_udc.c
··· 1629 1629 ep->ep.desc = NULL; 1630 1630 ep->halted = 0; 1631 1631 INIT_LIST_HEAD(&ep->queue); 1632 - usb_ep_set_maxpacket_limit(&ep->ep, &ep->ep.maxpacket); 1632 + usb_ep_set_maxpacket_limit(&ep->ep, ep->ep.maxpacket); 1633 1633 } 1634 1634 } 1635 1635
+10 -3
drivers/usb/host/ehci-hcd.c
··· 685 685 struct ehci_hcd *ehci = hcd_to_ehci (hcd); 686 686 u32 status, masked_status, pcd_status = 0, cmd; 687 687 int bh; 688 + unsigned long flags; 688 689 689 - spin_lock (&ehci->lock); 690 + /* 691 + * For threadirqs option we use spin_lock_irqsave() variant to prevent 692 + * deadlock with ehci hrtimer callback, because hrtimer callbacks run 693 + * in interrupt context even when threadirqs is specified. We can go 694 + * back to spin_lock() variant when hrtimer callbacks become threaded. 695 + */ 696 + spin_lock_irqsave(&ehci->lock, flags); 690 697 691 698 status = ehci_readl(ehci, &ehci->regs->status); 692 699 ··· 711 704 712 705 /* Shared IRQ? */ 713 706 if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) { 714 - spin_unlock(&ehci->lock); 707 + spin_unlock_irqrestore(&ehci->lock, flags); 715 708 return IRQ_NONE; 716 709 } 717 710 ··· 822 815 823 816 if (bh) 824 817 ehci_work (ehci); 825 - spin_unlock (&ehci->lock); 818 + spin_unlock_irqrestore(&ehci->lock, flags); 826 819 if (pcd_status) 827 820 usb_hcd_poll_rh_status(hcd); 828 821 return IRQ_HANDLED;
+22 -4
drivers/usb/host/ehci-hub.c
··· 238 238 int port; 239 239 int mask; 240 240 int changed; 241 + bool fs_idle_delay; 241 242 242 243 ehci_dbg(ehci, "suspend root hub\n"); 243 244 ··· 273 272 ehci->bus_suspended = 0; 274 273 ehci->owned_ports = 0; 275 274 changed = 0; 275 + fs_idle_delay = false; 276 276 port = HCS_N_PORTS(ehci->hcs_params); 277 277 while (port--) { 278 278 u32 __iomem *reg = &ehci->regs->port_status [port]; ··· 302 300 } 303 301 304 302 if (t1 != t2) { 303 + /* 304 + * On some controllers, Wake-On-Disconnect will 305 + * generate false wakeup signals until the bus 306 + * switches over to full-speed idle. For their 307 + * sake, add a delay if we need one. 308 + */ 309 + if ((t2 & PORT_WKDISC_E) && 310 + ehci_port_speed(ehci, t2) == 311 + USB_PORT_STAT_HIGH_SPEED) 312 + fs_idle_delay = true; 305 313 ehci_writel(ehci, t2, reg); 306 314 changed = 1; 307 315 } 308 316 } 317 + spin_unlock_irq(&ehci->lock); 318 + 319 + if ((changed && ehci->has_tdi_phy_lpm) || fs_idle_delay) { 320 + /* 321 + * Wait for HCD to enter low-power mode or for the bus 322 + * to switch to full-speed idle. 323 + */ 324 + usleep_range(5000, 5500); 325 + } 309 326 310 327 if (changed && ehci->has_tdi_phy_lpm) { 311 - spin_unlock_irq(&ehci->lock); 312 - msleep(5); /* 5 ms for HCD to enter low-power mode */ 313 328 spin_lock_irq(&ehci->lock); 314 - 315 329 port = HCS_N_PORTS(ehci->hcs_params); 316 330 while (port--) { 317 331 u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port]; ··· 340 322 port, (t3 & HOSTPC_PHCD) ? 341 323 "succeeded" : "failed"); 342 324 } 325 + spin_unlock_irq(&ehci->lock); 343 326 } 344 - spin_unlock_irq(&ehci->lock); 345 327 346 328 /* Apparently some devices need a >= 1-uframe delay here */ 347 329 if (ehci->bus_suspended)
+3 -11
drivers/usb/host/xhci.c
··· 4733 4733 /* Accept arbitrarily long scatter-gather lists */ 4734 4734 hcd->self.sg_tablesize = ~0; 4735 4735 4736 + /* support to build packet from discontinuous buffers */ 4737 + hcd->self.no_sg_constraint = 1; 4738 + 4736 4739 /* XHCI controllers don't stop the ep queue on short packets :| */ 4737 4740 hcd->self.no_stop_on_short = 1; 4738 4741 ··· 4760 4757 /* xHCI private pointer was set in xhci_pci_probe for the second 4761 4758 * registered roothub. 4762 4759 */ 4763 - xhci = hcd_to_xhci(hcd); 4764 - /* 4765 - * Support arbitrarily aligned sg-list entries on hosts without 4766 - * TD fragment rules (which are currently unsupported). 4767 - */ 4768 - if (xhci->hci_version < 0x100) 4769 - hcd->self.no_sg_constraint = 1; 4770 - 4771 4760 return 0; 4772 4761 } 4773 4762 ··· 4787 4792 */ 4788 4793 if (xhci->hci_version > 0x96) 4789 4794 xhci->quirks |= XHCI_SPURIOUS_SUCCESS; 4790 - 4791 - if (xhci->hci_version < 0x100) 4792 - hcd->self.no_sg_constraint = 1; 4793 4795 4794 4796 /* Make sure the HC is halted. */ 4795 4797 retval = xhci_halt(xhci);
+13 -2
drivers/usb/musb/musb_core.c
··· 477 477 musb->port1_status |= 478 478 (USB_PORT_STAT_C_SUSPEND << 16) 479 479 | MUSB_PORT_STAT_RESUME; 480 + musb->rh_timer = jiffies 481 + + msecs_to_jiffies(20); 480 482 schedule_delayed_work( 481 - &musb->finish_resume_work, 20); 483 + &musb->finish_resume_work, 484 + msecs_to_jiffies(20)); 482 485 483 486 musb->xceiv->state = OTG_STATE_A_HOST; 484 487 musb->is_active = 1; ··· 2160 2157 void __iomem *musb_base = musb->mregs; 2161 2158 void __iomem *ep_target_regs; 2162 2159 void __iomem *epio; 2160 + u8 power; 2163 2161 2164 2162 musb_writew(musb_base, MUSB_FRAME, musb->context.frame); 2165 2163 musb_writeb(musb_base, MUSB_TESTMODE, musb->context.testmode); 2166 2164 musb_write_ulpi_buscontrol(musb->mregs, musb->context.busctl); 2167 - musb_writeb(musb_base, MUSB_POWER, musb->context.power); 2165 + 2166 + /* Don't affect SUSPENDM/RESUME bits in POWER reg */ 2167 + power = musb_readb(musb_base, MUSB_POWER); 2168 + power &= MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME; 2169 + musb->context.power &= ~(MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME); 2170 + power |= musb->context.power; 2171 + musb_writeb(musb_base, MUSB_POWER, power); 2172 + 2168 2173 musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe); 2169 2174 musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe); 2170 2175 musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe);
+3
drivers/usb/musb/musb_host.c
··· 1183 1183 csr = MUSB_CSR0_H_STATUSPKT 1184 1184 | MUSB_CSR0_TXPKTRDY; 1185 1185 1186 + /* disable ping token in status phase */ 1187 + csr |= MUSB_CSR0_H_DIS_PING; 1188 + 1186 1189 /* flag status stage */ 1187 1190 musb->ep0_stage = MUSB_EP0_STATUS; 1188 1191
+19 -7
drivers/usb/musb/musb_virthub.c
··· 135 135 136 136 /* later, GetPortStatus will stop RESUME signaling */ 137 137 musb->port1_status |= MUSB_PORT_STAT_RESUME; 138 - schedule_delayed_work(&musb->finish_resume_work, 20); 138 + schedule_delayed_work(&musb->finish_resume_work, 139 + msecs_to_jiffies(20)); 139 140 } 140 141 } 141 142 ··· 159 158 */ 160 159 power = musb_readb(mbase, MUSB_POWER); 161 160 if (do_reset) { 162 - 163 161 /* 164 162 * If RESUME is set, we must make sure it stays minimum 20 ms. 165 163 * Then we must clear RESUME and wait a bit to let musb start ··· 167 167 * detected". 168 168 */ 169 169 if (power & MUSB_POWER_RESUME) { 170 - while (time_before(jiffies, musb->rh_timer)) 171 - msleep(1); 170 + long remain = (unsigned long) musb->rh_timer - jiffies; 171 + 172 + if (musb->rh_timer > 0 && remain > 0) { 173 + /* take into account the minimum delay after resume */ 174 + schedule_delayed_work( 175 + &musb->deassert_reset_work, remain); 176 + return; 177 + } 178 + 172 179 musb_writeb(mbase, MUSB_POWER, 173 - power & ~MUSB_POWER_RESUME); 174 - msleep(1); 180 + power & ~MUSB_POWER_RESUME); 181 + 182 + /* Give the core 1 ms to clear MUSB_POWER_RESUME */ 183 + schedule_delayed_work(&musb->deassert_reset_work, 184 + msecs_to_jiffies(1)); 185 + return; 175 186 } 176 187 177 188 power &= 0xf0; ··· 191 180 192 181 musb->port1_status |= USB_PORT_STAT_RESET; 193 182 musb->port1_status &= ~USB_PORT_STAT_ENABLE; 194 - schedule_delayed_work(&musb->deassert_reset_work, 50); 183 + schedule_delayed_work(&musb->deassert_reset_work, 184 + msecs_to_jiffies(50)); 195 185 } else { 196 186 dev_dbg(musb->controller, "root port reset stopped\n"); 197 187 musb_writeb(mbase, MUSB_POWER,
-2
drivers/usb/musb/omap2430.c
··· 659 659 OTG_INTERFSEL); 660 660 661 661 omap2430_low_level_exit(musb); 662 - phy_power_off(musb->phy); 663 662 } 664 663 665 664 return 0; ··· 673 674 omap2430_low_level_init(musb); 674 675 musb_writel(musb->mregs, OTG_INTERFSEL, 675 676 musb->context.otg_interfsel); 676 - phy_power_on(musb->phy); 677 677 } 678 678 679 679 return 0;
+26 -31
drivers/usb/phy/phy-msm-usb.c
··· 159 159 return rc; 160 160 } 161 161 162 - #ifdef CONFIG_PM_SLEEP 163 - #define USB_PHY_SUSP_DIG_VOL 500000 164 - static int msm_hsusb_config_vddcx(int high) 165 - { 166 - int max_vol = USB_PHY_VDD_DIG_VOL_MAX; 167 - int min_vol; 168 - int ret; 169 - 170 - if (high) 171 - min_vol = USB_PHY_VDD_DIG_VOL_MIN; 172 - else 173 - min_vol = USB_PHY_SUSP_DIG_VOL; 174 - 175 - ret = regulator_set_voltage(hsusb_vddcx, min_vol, max_vol); 176 - if (ret) { 177 - pr_err("%s: unable to set the voltage for regulator " 178 - "HSUSB_VDDCX\n", __func__); 179 - return ret; 180 - } 181 - 182 - pr_debug("%s: min_vol:%d max_vol:%d\n", __func__, min_vol, max_vol); 183 - 184 - return ret; 185 - } 186 - #endif 187 - 188 162 static int msm_hsusb_ldo_set_mode(int on) 189 163 { 190 164 int ret = 0; ··· 414 440 #define PHY_SUSPEND_TIMEOUT_USEC (500 * 1000) 415 441 #define PHY_RESUME_TIMEOUT_USEC (100 * 1000) 416 442 417 - #ifdef CONFIG_PM_SLEEP 443 + #ifdef CONFIG_PM 444 + 445 + #define USB_PHY_SUSP_DIG_VOL 500000 446 + static int msm_hsusb_config_vddcx(int high) 447 + { 448 + int max_vol = USB_PHY_VDD_DIG_VOL_MAX; 449 + int min_vol; 450 + int ret; 451 + 452 + if (high) 453 + min_vol = USB_PHY_VDD_DIG_VOL_MIN; 454 + else 455 + min_vol = USB_PHY_SUSP_DIG_VOL; 456 + 457 + ret = regulator_set_voltage(hsusb_vddcx, min_vol, max_vol); 458 + if (ret) { 459 + pr_err("%s: unable to set the voltage for regulator " 460 + "HSUSB_VDDCX\n", __func__); 461 + return ret; 462 + } 463 + 464 + pr_debug("%s: min_vol:%d max_vol:%d\n", __func__, min_vol, max_vol); 465 + 466 + return ret; 467 + } 468 + 418 469 static int msm_otg_suspend(struct msm_otg *motg) 419 470 { 420 471 struct usb_phy *phy = &motg->phy; ··· 1732 1733 } 1733 1734 #endif 1734 1735 1735 - #ifdef CONFIG_PM 1736 1736 static const struct dev_pm_ops msm_otg_dev_pm_ops = { 1737 1737 SET_SYSTEM_SLEEP_PM_OPS(msm_otg_pm_suspend, msm_otg_pm_resume) 1738 1738 SET_RUNTIME_PM_OPS(msm_otg_runtime_suspend, msm_otg_runtime_resume, 1739 1739 msm_otg_runtime_idle) 1740 1740 }; 1741 - #endif 1742 1741 1743 1742 static struct platform_driver msm_otg_driver = { 1744 1743 .remove = msm_otg_remove, 1745 1744 .driver = { 1746 1745 .name = DRIVER_NAME, 1747 1746 .owner = THIS_MODULE, 1748 - #ifdef CONFIG_PM 1749 1747 .pm = &msm_otg_dev_pm_ops, 1750 - #endif 1751 1748 }, 1752 1749 }; 1753 1750
+2
drivers/usb/serial/ftdi_sio.c
··· 907 907 /* Crucible Devices */ 908 908 { USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) }, 909 909 { USB_DEVICE(FTDI_VID, FTDI_Z3X_PID) }, 910 + /* Cressi Devices */ 911 + { USB_DEVICE(FTDI_VID, FTDI_CRESSI_PID) }, 910 912 { } /* Terminating entry */ 911 913 }; 912 914
+6
drivers/usb/serial/ftdi_sio_ids.h
··· 1320 1320 * Manufacturer: Smart GSM Team 1321 1321 */ 1322 1322 #define FTDI_Z3X_PID 0x0011 1323 + 1324 + /* 1325 + * Product: Cressi PC Interface 1326 + * Manufacturer: Cressi 1327 + */ 1328 + #define FTDI_CRESSI_PID 0x87d0
+2 -1
drivers/usb/serial/option.c
··· 1526 1526 /* Cinterion */ 1527 1527 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) }, 1528 1528 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) }, 1529 - { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) }, 1529 + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8), 1530 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1530 1531 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) }, 1531 1532 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX), 1532 1533 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+2 -2
drivers/vfio/vfio_iommu_type1.c
··· 186 186 if (pfn_valid(pfn)) { 187 187 bool reserved; 188 188 struct page *tail = pfn_to_page(pfn); 189 - struct page *head = compound_trans_head(tail); 189 + struct page *head = compound_head(tail); 190 190 reserved = !!(PageReserved(head)); 191 191 if (head != tail) { 192 192 /* 193 193 * "head" is not a dangling pointer 194 - * (compound_trans_head takes care of that) 194 + * (compound_head takes care of that) 195 195 * but the hugepage may have been split 196 196 * from under us (and we may not hold a 197 197 * reference count on the head page so it can
+26 -21
drivers/vhost/net.c
··· 70 70 }; 71 71 72 72 struct vhost_net_ubuf_ref { 73 - struct kref kref; 73 + /* refcount follows semantics similar to kref: 74 + * 0: object is released 75 + * 1: no outstanding ubufs 76 + * >1: outstanding ubufs 77 + */ 78 + atomic_t refcount; 74 79 wait_queue_head_t wait; 75 80 struct vhost_virtqueue *vq; 76 81 }; ··· 121 116 vhost_net_zcopy_mask |= 0x1 << vq; 122 117 } 123 118 124 - static void vhost_net_zerocopy_done_signal(struct kref *kref) 125 - { 126 - struct vhost_net_ubuf_ref *ubufs; 127 - 128 - ubufs = container_of(kref, struct vhost_net_ubuf_ref, kref); 129 - wake_up(&ubufs->wait); 130 - } 131 - 132 119 static struct vhost_net_ubuf_ref * 133 120 vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy) 134 121 { ··· 131 134 ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL); 132 135 if (!ubufs) 133 136 return ERR_PTR(-ENOMEM); 134 - kref_init(&ubufs->kref); 137 + atomic_set(&ubufs->refcount, 1); 135 138 init_waitqueue_head(&ubufs->wait); 136 139 ubufs->vq = vq; 137 140 return ubufs; 138 141 } 139 142 140 - static void vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) 143 + static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) 141 144 { 142 - kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal); 145 + int r = atomic_sub_return(1, &ubufs->refcount); 146 + if (unlikely(!r)) 147 + wake_up(&ubufs->wait); 148 + return r; 143 149 } 144 150 145 151 static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs) 146 152 { 147 - kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal); 148 - wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount)); 153 + vhost_net_ubuf_put(ubufs); 154 + wait_event(ubufs->wait, !atomic_read(&ubufs->refcount)); 149 155 } 150 156 151 157 static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs) ··· 306 306 { 307 307 struct vhost_net_ubuf_ref *ubufs = ubuf->ctx; 308 308 struct vhost_virtqueue *vq = ubufs->vq; 309 - int cnt = atomic_read(&ubufs->kref.refcount); 309 + int cnt; 310 + 311 + rcu_read_lock_bh(); 310 312 311 313 /* set len to mark this desc buffers done DMA */ 312 314 vq->heads[ubuf->desc].len = success ? 313 315 VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN; 314 - vhost_net_ubuf_put(ubufs); 316 + cnt = vhost_net_ubuf_put(ubufs); 315 317 316 318 /* 317 319 * Trigger polling thread if guest stopped submitting new buffers: 318 - * in this case, the refcount after decrement will eventually reach 1 319 - * so here it is 2. 320 + * in this case, the refcount after decrement will eventually reach 1. 320 321 * We also trigger polling periodically after each 16 packets 321 322 * (the value 16 here is more or less arbitrary, it's tuned to trigger 322 323 * less than 10% of times). 323 324 */ 324 - if (cnt <= 2 || !(cnt % 16)) 325 + if (cnt <= 1 || !(cnt % 16)) 325 326 vhost_poll_queue(&vq->poll); 327 + 328 + rcu_read_unlock_bh(); 326 329 } 327 330 328 331 /* Expects to be always run from workqueue - which acts as ··· 423 420 msg.msg_control = ubuf; 424 421 msg.msg_controllen = sizeof(ubuf); 425 422 ubufs = nvq->ubufs; 426 - kref_get(&ubufs->kref); 423 + atomic_inc(&ubufs->refcount); 427 424 nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV; 428 425 } else { 429 426 msg.msg_control = NULL; ··· 783 780 vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs); 784 781 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); 785 782 n->tx_flush = false; 786 - kref_init(&n->vqs[VHOST_NET_VQ_TX].ubufs->kref); 783 + atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1); 787 784 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); 788 785 } 789 786 } ··· 803 800 fput(tx_sock->file); 804 801 if (rx_sock) 805 802 fput(rx_sock->file); 803 + /* Make sure no callbacks are outstanding */ 804 + synchronize_rcu_bh(); 806 805 /* We do an extra flush before freeing memory, 807 806 * since jobs can re-queue themselves. */ 808 807 vhost_net_flush(n);
+6
drivers/vhost/scsi.c
··· 1001 1001 break; 1002 1002 } 1003 1003 1004 + /* virtio-scsi spec requires byte 0 of the lun to be 1 */ 1005 + if (unlikely(v_req.lun[0] != 1)) { 1006 + vhost_scsi_send_bad_target(vs, vq, head, out); 1007 + continue; 1008 + } 1009 + 1004 1010 /* Extract the tpgt */ 1005 1011 target = v_req.lun[1]; 1006 1012 tpg = ACCESS_ONCE(vs_tpg[target]);
+1 -1
drivers/watchdog/w83697hf_wdt.c
··· 402 402 403 403 if (!found) { 404 404 pr_err("No W83697HF/HG could be found\n"); 405 - ret = -EIO; 405 + ret = -ENODEV; 406 406 goto out; 407 407 } 408 408
+1 -4
fs/bio-integrity.c
··· 458 458 struct blk_integrity_exchg bix; 459 459 struct bio_vec *bv; 460 460 sector_t sector = bio->bi_integrity->bip_iter.bi_sector; 461 - unsigned int sectors, total, ret; 461 + unsigned int sectors, ret = 0; 462 462 void *prot_buf = bio->bi_integrity->bip_buf; 463 463 int i; 464 464 465 - ret = total = 0; 466 465 bix.disk_name = bio->bi_bdev->bd_disk->disk_name; 467 466 bix.sector_size = bi->sector_size; 468 467 ··· 483 484 sectors = bv->bv_len / bi->sector_size; 484 485 sector += sectors; 485 486 prot_buf += sectors * bi->tuple_size; 486 - total += sectors * bi->tuple_size; 487 - BUG_ON(total > bio->bi_integrity->bip_iter.bi_size); 488 487 489 488 kunmap_atomic(kaddr); 490 489 }
+1 -10
fs/ceph/acl.c
··· 54 54 return acl; 55 55 } 56 56 57 - void ceph_forget_all_cached_acls(struct inode *inode) 58 - { 59 - forget_all_cached_acls(inode); 60 - } 61 - 62 57 struct posix_acl *ceph_get_acl(struct inode *inode, int type) 63 58 { 64 59 int size; ··· 155 160 goto out_dput; 156 161 } 157 162 158 - if (value) 159 - ret = __ceph_setxattr(dentry, name, value, size, 0); 160 - else 161 - ret = __ceph_removexattr(dentry, name); 162 - 163 + ret = __ceph_setxattr(dentry, name, value, size, 0); 163 164 if (ret) { 164 165 if (new_mode != old_mode) { 165 166 newattrs.ia_mode = old_mode;
+17 -6
fs/ceph/dir.c
··· 100 100 return p & 0xffffffff; 101 101 } 102 102 103 + static int fpos_cmp(loff_t l, loff_t r) 104 + { 105 + int v = ceph_frag_compare(fpos_frag(l), fpos_frag(r)); 106 + if (v) 107 + return v; 108 + return (int)(fpos_off(l) - fpos_off(r)); 109 + } 110 + 103 111 /* 104 112 * When possible, we try to satisfy a readdir by peeking at the 105 113 * dcache. We make this work by carefully ordering dentries on ··· 164 156 if (!d_unhashed(dentry) && dentry->d_inode && 165 157 ceph_snap(dentry->d_inode) != CEPH_SNAPDIR && 166 158 ceph_ino(dentry->d_inode) != CEPH_INO_CEPH && 167 - ctx->pos <= di->offset) 159 + fpos_cmp(ctx->pos, di->offset) <= 0) 168 160 break; 169 161 dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry, 170 162 dentry->d_name.len, dentry->d_name.name, di->offset, ··· 703 695 ceph_mdsc_put_request(req); 704 696 705 697 if (!err) 706 - err = ceph_init_acl(dentry, dentry->d_inode, dir); 707 - 708 - if (err) 698 + ceph_init_acl(dentry, dentry->d_inode, dir); 699 + else 709 700 d_drop(dentry); 710 701 return err; 711 702 } ··· 742 735 if (!err && !req->r_reply_info.head->is_dentry) 743 736 err = ceph_handle_notrace_create(dir, dentry); 744 737 ceph_mdsc_put_request(req); 745 - if (err) 738 + if (!err) 739 + ceph_init_acl(dentry, dentry->d_inode, dir); 740 + else 746 741 d_drop(dentry); 747 742 return err; 748 743 } ··· 785 776 err = ceph_handle_notrace_create(dir, dentry); 786 777 ceph_mdsc_put_request(req); 787 778 out: 788 - if (err < 0) 779 + if (!err) 780 + ceph_init_acl(dentry, dentry->d_inode, dir); 781 + else 789 782 d_drop(dentry); 790 783 return err; 791 784 }
+1
fs/ceph/file.c
··· 286 286 } else { 287 287 dout("atomic_open finish_open on dn %p\n", dn); 288 288 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) { 289 + ceph_init_acl(dentry, dentry->d_inode, dir); 289 290 *opened |= FILE_CREATED; 290 291 } 291 292 err = finish_open(file, dentry, ceph_open, opened);
+28 -4
fs/ceph/super.c
··· 144 144 Opt_ino32, 145 145 Opt_noino32, 146 146 Opt_fscache, 147 - Opt_nofscache 147 + Opt_nofscache, 148 + #ifdef CONFIG_CEPH_FS_POSIX_ACL 149 + Opt_acl, 150 + #endif 151 + Opt_noacl 148 152 }; 149 153 150 154 static match_table_t fsopt_tokens = { ··· 176 172 {Opt_noino32, "noino32"}, 177 173 {Opt_fscache, "fsc"}, 178 174 {Opt_nofscache, "nofsc"}, 175 + #ifdef CONFIG_CEPH_FS_POSIX_ACL 176 + {Opt_acl, "acl"}, 177 + #endif 178 + {Opt_noacl, "noacl"}, 179 179 {-1, NULL} 180 180 }; 181 181 ··· 278 270 break; 279 271 case Opt_nofscache: 280 272 fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE; 273 + break; 274 + #ifdef CONFIG_CEPH_FS_POSIX_ACL 275 + case Opt_acl: 276 + fsopt->sb_flags |= MS_POSIXACL; 277 + break; 278 + #endif 279 + case Opt_noacl: 280 + fsopt->sb_flags &= ~MS_POSIXACL; 281 281 break; 282 282 default: 283 283 BUG_ON(token); ··· 453 437 seq_puts(m, ",fsc"); 454 438 else 455 439 seq_puts(m, ",nofsc"); 440 + 441 + #ifdef CONFIG_CEPH_FS_POSIX_ACL 442 + if (fsopt->sb_flags & MS_POSIXACL) 443 + seq_puts(m, ",acl"); 444 + else 445 + seq_puts(m, ",noacl"); 446 + #endif 456 447 457 448 if (fsopt->wsize) 458 449 seq_printf(m, ",wsize=%d", fsopt->wsize); ··· 842 819 843 820 s->s_flags = fsc->mount_options->sb_flags; 844 821 s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */ 845 - #ifdef CONFIG_CEPH_FS_POSIX_ACL 846 - s->s_flags |= MS_POSIXACL; 847 - #endif 848 822 849 823 s->s_xattr = ceph_xattr_handlers; 850 824 s->s_fs_info = fsc; ··· 931 911 struct ceph_options *opt = NULL; 932 912 933 913 dout("ceph_mount\n"); 914 + 915 + #ifdef CONFIG_CEPH_FS_POSIX_ACL 916 + flags |= MS_POSIXACL; 917 + #endif 934 918 err = parse_mount_options(&fsopt, &opt, flags, data, dev_name, &path); 935 919 if (err < 0) { 936 920 res = ERR_PTR(err);
+6 -1
fs/ceph/super.h
··· 13 13 #include <linux/wait.h> 14 14 #include <linux/writeback.h> 15 15 #include <linux/slab.h> 16 + #include <linux/posix_acl.h> 16 17 17 18 #include <linux/ceph/libceph.h> 18 19 ··· 744 743 struct posix_acl *ceph_get_acl(struct inode *, int); 745 744 int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type); 746 745 int ceph_init_acl(struct dentry *, struct inode *, struct inode *); 747 - void ceph_forget_all_cached_acls(struct inode *inode); 746 + 747 + static inline void ceph_forget_all_cached_acls(struct inode *inode) 748 + { 749 + forget_all_cached_acls(inode); 750 + } 748 751 749 752 #else 750 753
+40 -14
fs/ceph/xattr.c
··· 12 12 #define XATTR_CEPH_PREFIX "ceph." 13 13 #define XATTR_CEPH_PREFIX_LEN (sizeof (XATTR_CEPH_PREFIX) - 1) 14 14 15 + static int __remove_xattr(struct ceph_inode_info *ci, 16 + struct ceph_inode_xattr *xattr); 17 + 15 18 /* 16 19 * List of handlers for synthetic system.* attributes. Other 17 20 * attributes are handled directly. ··· 322 319 static int __set_xattr(struct ceph_inode_info *ci, 323 320 const char *name, int name_len, 324 321 const char *val, int val_len, 325 - int dirty, 326 - int should_free_name, int should_free_val, 322 + int flags, int update_xattr, 327 323 struct ceph_inode_xattr **newxattr) 328 324 { 329 325 struct rb_node **p; ··· 351 349 xattr = NULL; 352 350 } 353 351 352 + if (update_xattr) { 353 + int err = 0; 354 + if (xattr && (flags & XATTR_CREATE)) 355 + err = -EEXIST; 356 + else if (!xattr && (flags & XATTR_REPLACE)) 357 + err = -ENODATA; 358 + if (err) { 359 + kfree(name); 360 + kfree(val); 361 + return err; 362 + } 363 + if (update_xattr < 0) { 364 + if (xattr) 365 + __remove_xattr(ci, xattr); 366 + kfree(name); 367 + return 0; 368 + } 369 + } 370 + 354 371 if (!xattr) { 355 372 new = 1; 356 373 xattr = *newxattr; 357 374 xattr->name = name; 358 375 xattr->name_len = name_len; 359 - xattr->should_free_name = should_free_name; 376 + xattr->should_free_name = update_xattr; 360 377 361 378 ci->i_xattrs.count++; 362 379 dout("__set_xattr count=%d\n", ci->i_xattrs.count); ··· 385 364 if (xattr->should_free_val) 386 365 kfree((void *)xattr->val); 387 366 388 - if (should_free_name) { 367 + if (update_xattr) { 389 368 kfree((void *)name); 390 369 name = xattr->name; 391 370 } ··· 400 379 xattr->val = ""; 401 380 402 381 xattr->val_len = val_len; 403 - xattr->dirty = dirty; 404 - xattr->should_free_val = (val && should_free_val); 382 + xattr->dirty = update_xattr; 383 + xattr->should_free_val = (val && update_xattr); 405 384 406 385 if (new) { 407 386 rb_link_node(&xattr->node, parent, p); ··· 463 442 struct ceph_inode_xattr *xattr) 464 443 { 465 444 if (!xattr) 466 - return -EOPNOTSUPP; 445 + return -ENODATA; 467 446 468 447 rb_erase(&xattr->node, &ci->i_xattrs.index); 469 448 ··· 609 588 p += len; 610 589 611 590 err = __set_xattr(ci, name, namelen, val, len, 612 - 0, 0, 0, &xattrs[numattr]); 591 + 0, 0, &xattrs[numattr]); 613 592 614 593 if (err < 0) 615 594 goto bad; ··· 871 850 872 851 dout("setxattr value=%.*s\n", (int)size, value); 873 852 853 + if (!value) 854 + flags |= CEPH_XATTR_REMOVE; 855 + 874 856 /* do request */ 875 857 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETXATTR, 876 858 USE_AUTH_MDS); ··· 916 892 struct ceph_inode_info *ci = ceph_inode(inode); 917 893 int issued; 918 894 int err; 919 - int dirty; 895 + int dirty = 0; 920 896 int name_len = strlen(name); 921 897 int val_len = size; 922 898 char *newname = NULL; ··· 977 953 goto retry; 978 954 } 979 955 980 - err = __set_xattr(ci, newname, name_len, newval, 981 - val_len, 1, 1, 1, &xattr); 956 + err = __set_xattr(ci, newname, name_len, newval, val_len, 957 + flags, value ? 1 : -1, &xattr); 982 958 983 - dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL); 984 - ci->i_xattrs.dirty = true; 985 - inode->i_ctime = CURRENT_TIME; 959 + if (!err) { 960 + dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL); 961 + ci->i_xattrs.dirty = true; 962 + inode->i_ctime = CURRENT_TIME; 963 + } 986 964 987 965 spin_unlock(&ci->i_ceph_lock); 988 966 if (dirty)
+24 -9
fs/cifs/cifsacl.c
··· 865 865 return rc; 866 866 } 867 867 868 - static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb, 869 - __u16 fid, u32 *pacllen) 868 + struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb, 869 + const struct cifs_fid *cifsfid, u32 *pacllen) 870 870 { 871 871 struct cifs_ntsd *pntsd = NULL; 872 872 unsigned int xid; ··· 877 877 return ERR_CAST(tlink); 878 878 879 879 xid = get_xid(); 880 - rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), fid, &pntsd, pacllen); 880 + rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), cifsfid->netfid, &pntsd, 881 + pacllen); 881 882 free_xid(xid); 882 883 883 884 cifs_put_tlink(tlink); ··· 947 946 if (!open_file) 948 947 return get_cifs_acl_by_path(cifs_sb, path, pacllen); 949 948 950 - pntsd = get_cifs_acl_by_fid(cifs_sb, open_file->fid.netfid, pacllen); 949 + pntsd = get_cifs_acl_by_fid(cifs_sb, &open_file->fid, pacllen); 951 950 cifsFileInfo_put(open_file); 952 951 return pntsd; 953 952 } ··· 1007 1006 /* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */ 1008 1007 int 1009 1008 cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr, 1010 - struct inode *inode, const char *path, const __u16 *pfid) 1009 + struct inode *inode, const char *path, 1010 + const struct cifs_fid *pfid) 1011 1011 { 1012 1012 struct cifs_ntsd *pntsd = NULL; 1013 1013 u32 acllen = 0; 1014 1014 int rc = 0; 1015 + struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); 1016 + struct cifs_tcon *tcon; 1015 1017 1016 1018 cifs_dbg(NOISY, "converting ACL to mode for %s\n", path); 1017 1019 1018 - if (pfid) 1019 - pntsd = get_cifs_acl_by_fid(cifs_sb, *pfid, &acllen); 1020 - else 1021 - pntsd = get_cifs_acl(cifs_sb, inode, path, &acllen); 1020 + if (IS_ERR(tlink)) 1021 + return PTR_ERR(tlink); 1022 + tcon = tlink_tcon(tlink); 1022 1023 1024 + if (pfid && (tcon->ses->server->ops->get_acl_by_fid)) 1025 + pntsd = tcon->ses->server->ops->get_acl_by_fid(cifs_sb, pfid, 1026 + &acllen); 1027 + else if (tcon->ses->server->ops->get_acl) 1028 + pntsd = tcon->ses->server->ops->get_acl(cifs_sb, inode, path, 1029 + &acllen); 1030 + else { 1031 + cifs_put_tlink(tlink); 1032 + return -EOPNOTSUPP; 1033 + } 1023 1034 /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */ 1024 1035 if (IS_ERR(pntsd)) { 1025 1036 rc = PTR_ERR(pntsd); ··· 1042 1029 if (rc) 1043 1030 cifs_dbg(VFS, "parse sec desc failed rc = %d\n", rc); 1044 1031 } 1032 + 1033 + cifs_put_tlink(tlink); 1045 1034 1046 1035 return rc; 1047 1036 }
+2
fs/cifs/cifsglob.h
··· 398 398 const struct nls_table *, int); 399 399 struct cifs_ntsd * (*get_acl)(struct cifs_sb_info *, struct inode *, 400 400 const char *, u32 *); 401 + struct cifs_ntsd * (*get_acl_by_fid)(struct cifs_sb_info *, 402 + const struct cifs_fid *, u32 *); 401 403 int (*set_acl)(struct cifs_ntsd *, __u32, struct inode *, const char *, 402 404 int); 403 405 };
+4 -2
fs/cifs/cifsproto.h
··· 151 151 152 152 extern int cifs_get_inode_info(struct inode **inode, const char *full_path, 153 153 FILE_ALL_INFO *data, struct super_block *sb, 154 - int xid, const __u16 *fid); 154 + int xid, const struct cifs_fid *fid); 155 155 extern int cifs_get_inode_info_unix(struct inode **pinode, 156 156 const unsigned char *search_path, 157 157 struct super_block *sb, unsigned int xid); ··· 162 162 const unsigned int xid); 163 163 extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, 164 164 struct cifs_fattr *fattr, struct inode *inode, 165 - const char *path, const __u16 *pfid); 165 + const char *path, const struct cifs_fid *pfid); 166 166 extern int id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64, 167 167 kuid_t, kgid_t); 168 168 extern struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *, struct inode *, 169 169 const char *, u32 *); 170 + extern struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *, 171 + const struct cifs_fid *, u32 *); 170 172 extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *, 171 173 const char *, int); 172 174
+1 -1
fs/cifs/dir.c
··· 378 378 xid); 379 379 else { 380 380 rc = cifs_get_inode_info(&newinode, full_path, buf, inode->i_sb, 381 - xid, &fid->netfid); 381 + xid, fid); 382 382 if (newinode) { 383 383 if (server->ops->set_lease_key) 384 384 server->ops->set_lease_key(newinode, fid);
+35 -4
fs/cifs/file.c
··· 244 244 xid); 245 245 else 246 246 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb, 247 - xid, &fid->netfid); 247 + xid, fid); 248 248 249 249 out: 250 250 kfree(buf); ··· 2389 2389 unsigned long nr_segs, loff_t *poffset) 2390 2390 { 2391 2391 unsigned long nr_pages, i; 2392 - size_t copied, len, cur_len; 2392 + size_t bytes, copied, len, cur_len; 2393 2393 ssize_t total_written = 0; 2394 2394 loff_t offset; 2395 2395 struct iov_iter it; ··· 2444 2444 2445 2445 save_len = cur_len; 2446 2446 for (i = 0; i < nr_pages; i++) { 2447 - copied = min_t(const size_t, cur_len, PAGE_SIZE); 2447 + bytes = min_t(const size_t, cur_len, PAGE_SIZE); 2448 2448 copied = iov_iter_copy_from_user(wdata->pages[i], &it, 2449 - 0, copied); 2449 + 0, bytes); 2450 2450 cur_len -= copied; 2451 2451 iov_iter_advance(&it, copied); 2452 + /* 2453 + * If we didn't copy as much as we expected, then that 2454 + * may mean we trod into an unmapped area. Stop copying 2455 + * at that point. On the next pass through the big 2456 + * loop, we'll likely end up getting a zero-length 2457 + * write and bailing out of it. 2458 + */ 2459 + if (copied < bytes) 2460 + break; 2452 2461 } 2453 2462 cur_len = save_len - cur_len; 2463 + 2464 + /* 2465 + * If we have no data to send, then that probably means that 2466 + * the copy above failed altogether. That's most likely because 2467 + * the address in the iovec was bogus. Set the rc to -EFAULT, 2468 + * free anything we allocated and bail out. 2469 + */ 2470 + if (!cur_len) { 2471 + for (i = 0; i < nr_pages; i++) 2472 + put_page(wdata->pages[i]); 2473 + kfree(wdata); 2474 + rc = -EFAULT; 2475 + break; 2476 + } 2477 + 2478 + /* 2479 + * i + 1 now represents the number of pages we actually used in 2480 + * the copy phase above. Bring nr_pages down to that, and free 2481 + * any pages that we didn't use. 2482 + */ 2483 + for ( ; nr_pages > i + 1; nr_pages--) 2484 + put_page(wdata->pages[nr_pages - 1]); 2454 2485 2455 2486 wdata->sync_mode = WB_SYNC_ALL; 2456 2487 wdata->nr_pages = nr_pages;
+1 -1
fs/cifs/inode.c
··· 677 677 int 678 678 cifs_get_inode_info(struct inode **inode, const char *full_path, 679 679 FILE_ALL_INFO *data, struct super_block *sb, int xid, 680 - const __u16 *fid) 680 + const struct cifs_fid *fid) 681 681 { 682 682 bool validinum = false; 683 683 __u16 srchflgs;
+1
fs/cifs/smb1ops.c
··· 1073 1073 #endif /* CIFS_XATTR */ 1074 1074 #ifdef CONFIG_CIFS_ACL 1075 1075 .get_acl = get_cifs_acl, 1076 + .get_acl_by_fid = get_cifs_acl_by_fid, 1076 1077 .set_acl = set_cifs_acl, 1077 1078 #endif /* CIFS_ACL */ 1078 1079 };
+3
fs/cifs/smb2glob.h
··· 57 57 #define SMB2_CMACAES_SIZE (16) 58 58 #define SMB3_SIGNKEY_SIZE (16) 59 59 60 + /* Maximum buffer size value we can send with 1 credit */ 61 + #define SMB2_MAX_BUFFER_SIZE 65536 62 + 60 63 #endif /* _SMB2_GLOB_H */
+4 -10
fs/cifs/smb2ops.c
··· 182 182 /* start with specified wsize, or default */ 183 183 wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE; 184 184 wsize = min_t(unsigned int, wsize, server->max_write); 185 - /* 186 - * limit write size to 2 ** 16, because we don't support multicredit 187 - * requests now. 188 - */ 189 - wsize = min_t(unsigned int, wsize, 2 << 15); 185 + /* set it to the maximum buffer size value we can send with 1 credit */ 186 + wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE); 190 187 191 188 return wsize; 192 189 } ··· 197 200 /* start with specified rsize, or default */ 198 201 rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE; 199 202 rsize = min_t(unsigned int, rsize, server->max_read); 200 - /* 201 - * limit write size to 2 ** 16, because we don't support multicredit 202 - * requests now. 203 - */ 204 - rsize = min_t(unsigned int, rsize, 2 << 15); 203 + /* set it to the maximum buffer size value we can send with 1 credit */ 204 + rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE); 205 205 206 206 return rsize; 207 207 }
+3 -1
fs/cifs/smb2pdu.c
··· 413 413 414 414 /* SMB2 only has an extended negflavor */ 415 415 server->negflavor = CIFS_NEGFLAVOR_EXTENDED; 416 - server->maxBuf = le32_to_cpu(rsp->MaxTransactSize); 416 + /* set it to the maximum buffer size value we can send with 1 credit */ 417 + server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize), 418 + SMB2_MAX_BUFFER_SIZE); 417 419 server->max_read = le32_to_cpu(rsp->MaxReadSize); 418 420 server->max_write = le32_to_cpu(rsp->MaxWriteSize); 419 421 /* BB Do we need to validate the SecurityMode? */
+2
fs/ext4/ext4.h
··· 771 771 if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime)) \ 772 772 (einode)->xtime.tv_sec = \ 773 773 (signed)le32_to_cpu((raw_inode)->xtime); \ 774 + else \ 775 + (einode)->xtime.tv_sec = 0; \ 774 776 if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \ 775 777 ext4_decode_extra_time(&(einode)->xtime, \ 776 778 raw_inode->xtime ## _extra); \
+1
fs/ext4/extents.c
··· 3906 3906 } else 3907 3907 err = ret; 3908 3908 map->m_flags |= EXT4_MAP_MAPPED; 3909 + map->m_pblk = newblock; 3909 3910 if (allocated > map->m_len) 3910 3911 allocated = map->m_len; 3911 3912 map->m_len = allocated;
+2 -1
fs/ext4/ioctl.c
··· 140 140 handle = ext4_journal_start(inode_bl, EXT4_HT_MOVE_EXTENTS, 2); 141 141 if (IS_ERR(handle)) { 142 142 err = -EINVAL; 143 - goto swap_boot_out; 143 + goto journal_err_out; 144 144 } 145 145 146 146 /* Protect extent tree against block allocations via delalloc */ ··· 198 198 199 199 ext4_double_up_write_data_sem(inode, inode_bl); 200 200 201 + journal_err_out: 201 202 ext4_inode_resume_unlocked_dio(inode); 202 203 ext4_inode_resume_unlocked_dio(inode_bl); 203 204
+21 -13
fs/ext4/resize.c
··· 243 243 ext4_group_t group; 244 244 ext4_group_t last_group; 245 245 unsigned overhead; 246 + __u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0; 246 247 247 248 BUG_ON(flex_gd->count == 0 || group_data == NULL); 248 249 ··· 267 266 src_group++; 268 267 for (; src_group <= last_group; src_group++) { 269 268 overhead = ext4_group_overhead_blocks(sb, src_group); 270 - if (overhead != 0) 269 + if (overhead == 0) 271 270 last_blk += group_data[src_group - group].blocks_count; 272 271 else 273 272 break; ··· 281 280 group = ext4_get_group_number(sb, start_blk - 1); 282 281 group -= group_data[0].group; 283 282 group_data[group].free_blocks_count--; 284 - if (flexbg_size > 1) 285 - flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT; 283 + flex_gd->bg_flags[group] &= uninit_mask; 286 284 } 287 285 288 286 /* Allocate inode bitmaps */ ··· 292 292 group = ext4_get_group_number(sb, start_blk - 1); 293 293 group -= group_data[0].group; 294 294 group_data[group].free_blocks_count--; 295 - if (flexbg_size > 1) 296 - flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT; 295 + flex_gd->bg_flags[group] &= uninit_mask; 297 296 } 298 297 299 298 /* Allocate inode tables */ 300 299 for (; it_index < flex_gd->count; it_index++) { 301 - if (start_blk + EXT4_SB(sb)->s_itb_per_group > last_blk) 300 + unsigned int itb = EXT4_SB(sb)->s_itb_per_group; 301 + ext4_fsblk_t next_group_start; 302 + 303 + if (start_blk + itb > last_blk) 302 304 goto next_group; 303 305 group_data[it_index].inode_table = start_blk; 304 - group = ext4_get_group_number(sb, start_blk - 1); 306 + group = ext4_get_group_number(sb, start_blk); 307 + next_group_start = ext4_group_first_block_no(sb, group + 1); 305 308 group -= group_data[0].group; 306 - group_data[group].free_blocks_count -= 307 - EXT4_SB(sb)->s_itb_per_group; 308 - if (flexbg_size > 1) 309 - flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT; 310 309 310 + if (start_blk + itb > next_group_start) { 311 + flex_gd->bg_flags[group + 1] &= uninit_mask; 312 + overhead = start_blk + itb - next_group_start; 313 + group_data[group + 1].free_blocks_count -= overhead; 314 + itb -= overhead; 315 + } 316 + 317 + group_data[group].free_blocks_count -= itb; 318 + flex_gd->bg_flags[group] &= uninit_mask; 311 319 start_blk += EXT4_SB(sb)->s_itb_per_group; 312 320 } 313 321 ··· 409 401 start = ext4_group_first_block_no(sb, group); 410 402 group -= flex_gd->groups[0].group; 411 403 412 - count2 = sb->s_blocksize * 8 - (block - start); 404 + count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start); 413 405 if (count2 > count) 414 406 count2 = count; 415 407 ··· 628 620 if (err) 629 621 goto out; 630 622 count = group_table_count[j]; 631 - start = group_data[i].block_bitmap; 623 + start = (&group_data[i].block_bitmap)[j]; 632 624 block = start; 633 625 } 634 626
+13 -7
fs/ext4/super.c
··· 3695 3695 for (i = 0; i < 4; i++) 3696 3696 sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]); 3697 3697 sbi->s_def_hash_version = es->s_def_hash_version; 3698 - i = le32_to_cpu(es->s_flags); 3699 - if (i & EXT2_FLAGS_UNSIGNED_HASH) 3700 - sbi->s_hash_unsigned = 3; 3701 - else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) { 3698 + if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) { 3699 + i = le32_to_cpu(es->s_flags); 3700 + if (i & EXT2_FLAGS_UNSIGNED_HASH) 3701 + sbi->s_hash_unsigned = 3; 3702 + else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) { 3702 3703 #ifdef __CHAR_UNSIGNED__ 3703 - es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH); 3704 - sbi->s_hash_unsigned = 3; 3704 + if (!(sb->s_flags & MS_RDONLY)) 3705 + es->s_flags |= 3706 + cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH); 3707 + sbi->s_hash_unsigned = 3; 3705 3708 #else 3706 - es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH); 3709 + if (!(sb->s_flags & MS_RDONLY)) 3710 + es->s_flags |= 3711 + cpu_to_le32(EXT2_FLAGS_SIGNED_HASH); 3707 3712 #endif 3713 + } 3708 3714 } 3709 3715 3710 3716 /* Handle clustersize */
+11 -22
fs/fs-writeback.c
··· 40 40 struct wb_writeback_work { 41 41 long nr_pages; 42 42 struct super_block *sb; 43 - /* 44 - * Write only inodes dirtied before this time. Don't forget to set 45 - * older_than_this_is_set when you set this. 46 - */ 47 - unsigned long older_than_this; 43 + unsigned long *older_than_this; 48 44 enum writeback_sync_modes sync_mode; 49 45 unsigned int tagged_writepages:1; 50 46 unsigned int for_kupdate:1; 51 47 unsigned int range_cyclic:1; 52 48 unsigned int for_background:1; 53 49 unsigned int for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ 54 - unsigned int older_than_this_is_set:1; 55 50 enum wb_reason reason; /* why was writeback initiated? */ 56 51 57 52 struct list_head list; /* pending work list */ ··· 247 252 int do_sb_sort = 0; 248 253 int moved = 0; 249 254 250 - WARN_ON_ONCE(!work->older_than_this_is_set); 251 255 while (!list_empty(delaying_queue)) { 252 256 inode = wb_inode(delaying_queue->prev); 253 - if (inode_dirtied_after(inode, work->older_than_this)) 257 + if (work->older_than_this && 258 + inode_dirtied_after(inode, *work->older_than_this)) 254 259 break; 255 260 list_move(&inode->i_wb_list, &tmp); 256 261 moved++; ··· 737 742 .sync_mode = WB_SYNC_NONE, 738 743 .range_cyclic = 1, 739 744 .reason = reason, 740 - .older_than_this = jiffies, 741 - .older_than_this_is_set = 1, 742 745 }; 743 746 744 747 spin_lock(&wb->list_lock); ··· 795 802 { 796 803 unsigned long wb_start = jiffies; 797 804 long nr_pages = work->nr_pages; 805 + unsigned long oldest_jif; 798 806 struct inode *inode; 799 807 long progress; 800 808 801 - if (!work->older_than_this_is_set) { 802 - work->older_than_this = jiffies; 803 - work->older_than_this_is_set = 1; 804 - } 809 + oldest_jif = jiffies; 810 + work->older_than_this = &oldest_jif; 805 811 806 812 spin_lock(&wb->list_lock); 807 813 for (;;) { ··· 834 842 * safe. 835 843 */ 836 844 if (work->for_kupdate) { 837 - work->older_than_this = jiffies - 845 + oldest_jif = jiffies - 838 846 msecs_to_jiffies(dirty_expire_interval * 10); 839 847 } else if (work->for_background) 840 - work->older_than_this = jiffies; 848 + oldest_jif = jiffies; 841 849 842 850 trace_writeback_start(wb->bdi, work); 843 851 if (list_empty(&wb->b_io)) ··· 1349 1357 1350 1358 /** 1351 1359 * sync_inodes_sb - sync sb inode pages 1352 - * @sb: the superblock 1353 - * @older_than_this: timestamp 1360 + * @sb: the superblock 1354 1361 * 1355 1362 * This function writes and waits on any dirty inode belonging to this 1356 - * superblock that has been dirtied before given timestamp. 1363 + * super_block. 1357 1364 */ 1358 - void sync_inodes_sb(struct super_block *sb, unsigned long older_than_this) 1365 + void sync_inodes_sb(struct super_block *sb) 1359 1366 { 1360 1367 DECLARE_COMPLETION_ONSTACK(done); 1361 1368 struct wb_writeback_work work = { 1362 1369 .sb = sb, 1363 1370 .sync_mode = WB_SYNC_ALL, 1364 1371 .nr_pages = LONG_MAX, 1365 - .older_than_this = older_than_this, 1366 - .older_than_this_is_set = 1, 1367 1372 .range_cyclic = 0, 1368 1373 .done = &done, 1369 1374 .reason = WB_REASON_SYNC,
+5
fs/fscache/object-list.c
··· 50 50 struct fscache_object *xobj; 51 51 struct rb_node **p = &fscache_object_list.rb_node, *parent = NULL; 52 52 53 + ASSERT(RB_EMPTY_NODE(&obj->objlist_link)); 54 + 53 55 write_lock(&fscache_object_list_lock); 54 56 55 57 while (*p) { ··· 77 75 */ 78 76 void fscache_objlist_remove(struct fscache_object *obj) 79 77 { 78 + if (RB_EMPTY_NODE(&obj->objlist_link)) 79 + return; 80 + 80 81 write_lock(&fscache_object_list_lock); 81 82 82 83 BUG_ON(RB_EMPTY_ROOT(&fscache_object_list));
+3
fs/fscache/object.c
··· 314 314 object->cache = cache; 315 315 object->cookie = cookie; 316 316 object->parent = NULL; 317 + #ifdef CONFIG_FSCACHE_OBJECT_LIST 318 + RB_CLEAR_NODE(&object->objlist_link); 319 + #endif 317 320 318 321 object->oob_event_mask = 0; 319 322 for (t = object->oob_table; t->events; t++)
+1 -1
fs/hfsplus/options.c
··· 75 75 int token; 76 76 77 77 if (!input) 78 - return 0; 78 + return 1; 79 79 80 80 while ((p = strsep(&input, ",")) != NULL) { 81 81 if (!*p)
+4 -2
fs/jbd2/transaction.c
··· 514 514 * similarly constrained call sites 515 515 */ 516 516 ret = start_this_handle(journal, handle, GFP_NOFS); 517 - if (ret < 0) 517 + if (ret < 0) { 518 518 jbd2_journal_free_reserved(handle); 519 + return ret; 520 + } 519 521 handle->h_type = type; 520 522 handle->h_line_no = line_no; 521 - return ret; 523 + return 0; 522 524 } 523 525 EXPORT_SYMBOL(jbd2_journal_start_reserved); 524 526
+2
fs/jfs/acl.c
··· 86 86 rc = posix_acl_equiv_mode(acl, &inode->i_mode); 87 87 if (rc < 0) 88 88 return rc; 89 + inode->i_ctime = CURRENT_TIME; 90 + mark_inode_dirty(inode); 89 91 if (rc == 0) 90 92 acl = NULL; 91 93 break;
+7 -1
fs/kernfs/mount.c
··· 94 94 * @fs_type: file_system_type of the fs being mounted 95 95 * @flags: mount flags specified for the mount 96 96 * @root: kernfs_root of the hierarchy being mounted 97 + * @new_sb_created: tell the caller if we allocated a new superblock 97 98 * @ns: optional namespace tag of the mount 98 99 * 99 100 * This is to be called from each kernfs user's file_system_type->mount() ··· 105 104 * The return value can be passed to the vfs layer verbatim. 106 105 */ 107 106 struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags, 108 - struct kernfs_root *root, const void *ns) 107 + struct kernfs_root *root, bool *new_sb_created, 108 + const void *ns) 109 109 { 110 110 struct super_block *sb; 111 111 struct kernfs_super_info *info; ··· 124 122 kfree(info); 125 123 if (IS_ERR(sb)) 126 124 return ERR_CAST(sb); 125 + 126 + if (new_sb_created) 127 + *new_sb_created = !sb->s_root; 128 + 127 129 if (!sb->s_root) { 128 130 error = kernfs_fill_super(sb); 129 131 if (error) {
+7 -4
fs/nfs/delegation.c
··· 659 659 660 660 rcu_read_lock(); 661 661 delegation = rcu_dereference(NFS_I(inode)->delegation); 662 + if (delegation == NULL) 663 + goto out_enoent; 662 664 663 - if (!clp->cl_mvops->match_stateid(&delegation->stateid, stateid)) { 664 - rcu_read_unlock(); 665 - return -ENOENT; 666 - } 665 + if (!clp->cl_mvops->match_stateid(&delegation->stateid, stateid)) 666 + goto out_enoent; 667 667 nfs_mark_return_delegation(server, delegation); 668 668 rcu_read_unlock(); 669 669 670 670 nfs_delegation_run_state_manager(clp); 671 671 return 0; 672 + out_enoent: 673 + rcu_read_unlock(); 674 + return -ENOENT; 672 675 } 673 676 674 677 static struct inode *
+10 -4
fs/nfs/inode.c
··· 164 164 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) { 165 165 nfs_fscache_invalidate(inode); 166 166 nfsi->cache_validity |= NFS_INO_INVALID_ATTR 167 - | NFS_INO_INVALID_LABEL 168 167 | NFS_INO_INVALID_DATA 169 168 | NFS_INO_INVALID_ACCESS 170 169 | NFS_INO_INVALID_ACL 171 170 | NFS_INO_REVAL_PAGECACHE; 172 171 } else 173 172 nfsi->cache_validity |= NFS_INO_INVALID_ATTR 174 - | NFS_INO_INVALID_LABEL 175 173 | NFS_INO_INVALID_ACCESS 176 174 | NFS_INO_INVALID_ACL 177 175 | NFS_INO_REVAL_PAGECACHE; 176 + nfs_zap_label_cache_locked(nfsi); 178 177 } 179 178 180 179 void nfs_zap_caches(struct inode *inode) ··· 265 266 } 266 267 267 268 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 269 + static void nfs_clear_label_invalid(struct inode *inode) 270 + { 271 + spin_lock(&inode->i_lock); 272 + NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_LABEL; 273 + spin_unlock(&inode->i_lock); 274 + } 275 + 268 276 void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr, 269 277 struct nfs4_label *label) 270 278 { ··· 289 283 __func__, 290 284 (char *)label->label, 291 285 label->len, error); 286 + nfs_clear_label_invalid(inode); 292 287 } 293 288 } 294 289 ··· 1655 1648 inode->i_blocks = fattr->du.nfs2.blocks; 1656 1649 1657 1650 /* Update attrtimeo value if we're out of the unstable period */ 1658 - if (invalid & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_LABEL)) { 1651 + if (invalid & NFS_INO_INVALID_ATTR) { 1659 1652 nfs_inc_stats(inode, NFSIOS_ATTRINVALIDATE); 1660 1653 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); 1661 1654 nfsi->attrtimeo_timestamp = now; ··· 1668 1661 } 1669 1662 } 1670 1663 invalid &= ~NFS_INO_INVALID_ATTR; 1671 - invalid &= ~NFS_INO_INVALID_LABEL; 1672 1664 /* Don't invalidate the data if we were to blame */ 1673 1665 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) 1674 1666 || S_ISLNK(inode->i_mode)))
+11 -1
fs/nfs/internal.h
··· 176 176 extern struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *, 177 177 struct nfs_fh *); 178 178 extern int nfs4_update_server(struct nfs_server *server, const char *hostname, 179 - struct sockaddr *sap, size_t salen); 179 + struct sockaddr *sap, size_t salen, 180 + struct net *net); 180 181 extern void nfs_free_server(struct nfs_server *server); 181 182 extern struct nfs_server *nfs_clone_server(struct nfs_server *, 182 183 struct nfs_fh *, ··· 280 279 } 281 280 return; 282 281 } 282 + 283 + static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi) 284 + { 285 + if (nfs_server_capable(&nfsi->vfs_inode, NFS_CAP_SECURITY_LABEL)) 286 + nfsi->cache_validity |= NFS_INO_INVALID_LABEL; 287 + } 283 288 #else 284 289 static inline struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags) { return NULL; } 285 290 static inline void nfs4_label_free(void *label) {} 291 + static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi) 292 + { 293 + } 286 294 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */ 287 295 288 296 /* proc.c */
+1
fs/nfs/nfs3proc.c
··· 18 18 #include <linux/lockd/bind.h> 19 19 #include <linux/nfs_mount.h> 20 20 #include <linux/freezer.h> 21 + #include <linux/xattr.h> 21 22 22 23 #include "iostat.h" 23 24 #include "internal.h"
+4 -3
fs/nfs/nfs4client.c
··· 1135 1135 * @hostname: new end-point's hostname 1136 1136 * @sap: new end-point's socket address 1137 1137 * @salen: size of "sap" 1138 + * @net: net namespace 1138 1139 * 1139 1140 * The nfs_server must be quiescent before this function is invoked. 1140 1141 * Either its session is drained (NFSv4.1+), or its transport is ··· 1144 1143 * Returns zero on success, or a negative errno value. 1145 1144 */ 1146 1145 int nfs4_update_server(struct nfs_server *server, const char *hostname, 1147 - struct sockaddr *sap, size_t salen) 1146 + struct sockaddr *sap, size_t salen, struct net *net) 1148 1147 { 1149 1148 struct nfs_client *clp = server->nfs_client; 1150 1149 struct rpc_clnt *clnt = server->client; 1151 1150 struct xprt_create xargs = { 1152 1151 .ident = clp->cl_proto, 1153 - .net = &init_net, 1152 + .net = net, 1154 1153 .dstaddr = sap, 1155 1154 .addrlen = salen, 1156 1155 .servername = hostname, ··· 1190 1189 error = nfs4_set_client(server, hostname, sap, salen, buf, 1191 1190 clp->cl_rpcclient->cl_auth->au_flavor, 1192 1191 clp->cl_proto, clnt->cl_timeout, 1193 - clp->cl_minorversion, clp->cl_net); 1192 + clp->cl_minorversion, net); 1194 1193 nfs_put_client(clp); 1195 1194 if (error != 0) { 1196 1195 nfs_server_insert_lists(server);
+6 -4
fs/nfs/nfs4filelayout.c
··· 324 324 &rdata->res.seq_res, 325 325 task)) 326 326 return; 327 - nfs4_set_rw_stateid(&rdata->args.stateid, rdata->args.context, 328 - rdata->args.lock_context, FMODE_READ); 327 + if (nfs4_set_rw_stateid(&rdata->args.stateid, rdata->args.context, 328 + rdata->args.lock_context, FMODE_READ) == -EIO) 329 + rpc_exit(task, -EIO); /* lost lock, terminate I/O */ 329 330 } 330 331 331 332 static void filelayout_read_call_done(struct rpc_task *task, void *data) ··· 436 435 &wdata->res.seq_res, 437 436 task)) 438 437 return; 439 - nfs4_set_rw_stateid(&wdata->args.stateid, wdata->args.context, 440 - wdata->args.lock_context, FMODE_WRITE); 438 + if (nfs4_set_rw_stateid(&wdata->args.stateid, wdata->args.context, 439 + wdata->args.lock_context, FMODE_WRITE) == -EIO) 440 + rpc_exit(task, -EIO); /* lost lock, terminate I/O */ 441 441 } 442 442 443 443 static void filelayout_write_call_done(struct rpc_task *task, void *data)
+6 -6
fs/nfs/nfs4namespace.c
··· 121 121 } 122 122 123 123 static size_t nfs_parse_server_name(char *string, size_t len, 124 - struct sockaddr *sa, size_t salen, struct nfs_server *server) 124 + struct sockaddr *sa, size_t salen, struct net *net) 125 125 { 126 - struct net *net = rpc_net_ns(server->client); 127 126 ssize_t ret; 128 127 129 128 ret = rpc_pton(net, string, len, sa, salen); ··· 222 223 const struct nfs4_fs_location *location) 223 224 { 224 225 const size_t addr_bufsize = sizeof(struct sockaddr_storage); 226 + struct net *net = rpc_net_ns(NFS_SB(mountdata->sb)->client); 225 227 struct vfsmount *mnt = ERR_PTR(-ENOENT); 226 228 char *mnt_path; 227 229 unsigned int maxbuflen; ··· 248 248 continue; 249 249 250 250 mountdata->addrlen = nfs_parse_server_name(buf->data, buf->len, 251 - mountdata->addr, addr_bufsize, 252 - NFS_SB(mountdata->sb)); 251 + mountdata->addr, addr_bufsize, net); 253 252 if (mountdata->addrlen == 0) 254 253 continue; 255 254 ··· 418 419 const struct nfs4_fs_location *location) 419 420 { 420 421 const size_t addr_bufsize = sizeof(struct sockaddr_storage); 422 + struct net *net = rpc_net_ns(server->client); 421 423 struct sockaddr *sap; 422 424 unsigned int s; 423 425 size_t salen; ··· 440 440 continue; 441 441 442 442 salen = nfs_parse_server_name(buf->data, buf->len, 443 - sap, addr_bufsize, server); 443 + sap, addr_bufsize, net); 444 444 if (salen == 0) 445 445 continue; 446 446 rpc_set_port(sap, NFS_PORT); ··· 450 450 if (hostname == NULL) 451 451 break; 452 452 453 - error = nfs4_update_server(server, hostname, sap, salen); 453 + error = nfs4_update_server(server, hostname, sap, salen, net); 454 454 kfree(hostname); 455 455 if (error == 0) 456 456 break;
+14 -10
fs/nfs/nfs4proc.c
··· 2398 2398 2399 2399 if (nfs4_copy_delegation_stateid(&arg.stateid, inode, fmode)) { 2400 2400 /* Use that stateid */ 2401 - } else if (truncate && state != NULL && nfs4_valid_open_stateid(state)) { 2401 + } else if (truncate && state != NULL) { 2402 2402 struct nfs_lockowner lockowner = { 2403 2403 .l_owner = current->files, 2404 2404 .l_pid = current->tgid, 2405 2405 }; 2406 - nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE, 2407 - &lockowner); 2406 + if (!nfs4_valid_open_stateid(state)) 2407 + return -EBADF; 2408 + if (nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE, 2409 + &lockowner) == -EIO) 2410 + return -EBADF; 2408 2411 } else 2409 2412 nfs4_stateid_copy(&arg.stateid, &zero_stateid); 2410 2413 ··· 4014 4011 { 4015 4012 nfs4_stateid current_stateid; 4016 4013 4017 - if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode)) 4018 - return false; 4014 + /* If the current stateid represents a lost lock, then exit */ 4015 + if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode) == -EIO) 4016 + return true; 4019 4017 return nfs4_stateid_match(stateid, &current_stateid); 4020 4018 } 4021 4019 ··· 5832 5828 struct nfs4_lock_state *lsp; 5833 5829 struct nfs_server *server; 5834 5830 struct nfs_release_lockowner_args args; 5835 - struct nfs4_sequence_args seq_args; 5836 - struct nfs4_sequence_res seq_res; 5831 + struct nfs_release_lockowner_res res; 5837 5832 unsigned long timestamp; 5838 5833 }; 5839 5834 ··· 5840 5837 { 5841 5838 struct nfs_release_lockowner_data *data = calldata; 5842 5839 nfs40_setup_sequence(data->server, 5843 - &data->seq_args, &data->seq_res, task); 5840 + &data->args.seq_args, &data->res.seq_res, task); 5844 5841 data->timestamp = jiffies; 5845 5842 } 5846 5843 ··· 5849 5846 struct nfs_release_lockowner_data *data = calldata; 5850 5847 struct nfs_server *server = data->server; 5851 5848 5852 - nfs40_sequence_done(task, &data->seq_res); 5849 + nfs40_sequence_done(task, &data->res.seq_res); 5853 5850 5854 5851 switch (task->tk_status) { 5855 5852 case 0: ··· 5890 5887 data = kmalloc(sizeof(*data), GFP_NOFS); 5891 5888 if (!data) 5892 5889 return -ENOMEM; 5893 - nfs4_init_sequence(&data->seq_args, &data->seq_res, 0); 5894 5890 data->lsp = lsp; 5895 5891 data->server = server; 5896 5892 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; ··· 5897 5895 data->args.lock_owner.s_dev = server->s_dev; 5898 5896 5899 5897 msg.rpc_argp = &data->args; 5898 + msg.rpc_resp = &data->res; 5899 + nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); 5900 5900 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data); 5901 5901 return 0; 5902 5902 }
+7 -12
fs/nfs/nfs4state.c
··· 974 974 else if (lsp != NULL && test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) { 975 975 nfs4_stateid_copy(dst, &lsp->ls_stateid); 976 976 ret = 0; 977 - smp_rmb(); 978 - if (!list_empty(&lsp->ls_seqid.list)) 979 - ret = -EWOULDBLOCK; 980 977 } 981 978 spin_unlock(&state->state_lock); 982 979 nfs4_put_lock_state(lsp); ··· 981 984 return ret; 982 985 } 983 986 984 - static int nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state) 987 + static void nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state) 985 988 { 986 989 const nfs4_stateid *src; 987 - int ret; 988 990 int seq; 989 991 990 992 do { ··· 992 996 if (test_bit(NFS_OPEN_STATE, &state->flags)) 993 997 src = &state->open_stateid; 994 998 nfs4_stateid_copy(dst, src); 995 - ret = 0; 996 - smp_rmb(); 997 - if (!list_empty(&state->owner->so_seqid.list)) 998 - ret = -EWOULDBLOCK; 999 999 } while (read_seqretry(&state->seqlock, seq)); 1000 - return ret; 1001 1000 } 1002 1001 1003 1002 /* ··· 1006 1015 if (ret == -EIO) 1007 1016 /* A lost lock - don't even consider delegations */ 1008 1017 goto out; 1009 - if (nfs4_copy_delegation_stateid(dst, state->inode, fmode)) 1018 + /* returns true if delegation stateid found and copied */ 1019 + if (nfs4_copy_delegation_stateid(dst, state->inode, fmode)) { 1020 + ret = 0; 1010 1021 goto out; 1022 + } 1011 1023 if (ret != -ENOENT) 1012 1024 /* nfs4_copy_delegation_stateid() didn't over-write 1013 1025 * dst, so it still has the lock stateid which we now 1014 1026 * choose to use. 1015 1027 */ 1016 1028 goto out; 1017 - ret = nfs4_copy_open_stateid(dst, state); 1029 + nfs4_copy_open_stateid(dst, state); 1030 + ret = 0; 1018 1031 out: 1019 1032 if (nfs_server_capable(state->inode, NFS_CAP_STATEID_NFSV41)) 1020 1033 dst->seqid = 0;
+1 -1
fs/notify/dnotify/dnotify.c
··· 86 86 struct fsnotify_mark *inode_mark, 87 87 struct fsnotify_mark *vfsmount_mark, 88 88 u32 mask, void *data, int data_type, 89 - const unsigned char *file_name) 89 + const unsigned char *file_name, u32 cookie) 90 90 { 91 91 struct dnotify_mark *dn_mark; 92 92 struct dnotify_struct *dn;
+5 -3
fs/notify/fanotify/fanotify.c
··· 147 147 struct fsnotify_mark *inode_mark, 148 148 struct fsnotify_mark *fanotify_mark, 149 149 u32 mask, void *data, int data_type, 150 - const unsigned char *file_name) 150 + const unsigned char *file_name, u32 cookie) 151 151 { 152 152 int ret = 0; 153 153 struct fanotify_event_info *event; ··· 192 192 193 193 ret = fsnotify_add_notify_event(group, fsn_event, fanotify_merge); 194 194 if (ret) { 195 - BUG_ON(mask & FAN_ALL_PERM_EVENTS); 195 + /* Permission events shouldn't be merged */ 196 + BUG_ON(ret == 1 && mask & FAN_ALL_PERM_EVENTS); 196 197 /* Our event wasn't used in the end. Free it. */ 197 198 fsnotify_destroy_event(group, fsn_event); 198 - ret = 0; 199 + 200 + return 0; 199 201 } 200 202 201 203 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+13
fs/notify/fanotify/fanotify_user.c
··· 698 698 struct fsnotify_group *group; 699 699 int f_flags, fd; 700 700 struct user_struct *user; 701 + struct fanotify_event_info *oevent; 701 702 702 703 pr_debug("%s: flags=%d event_f_flags=%d\n", 703 704 __func__, flags, event_f_flags); ··· 731 730 group->fanotify_data.user = user; 732 731 atomic_inc(&user->fanotify_listeners); 733 732 733 + oevent = kmem_cache_alloc(fanotify_event_cachep, GFP_KERNEL); 734 + if (unlikely(!oevent)) { 735 + fd = -ENOMEM; 736 + goto out_destroy_group; 737 + } 738 + group->overflow_event = &oevent->fse; 739 + fsnotify_init_event(group->overflow_event, NULL, FS_Q_OVERFLOW); 740 + oevent->tgid = get_pid(task_tgid(current)); 741 + oevent->path.mnt = NULL; 742 + oevent->path.dentry = NULL; 743 + 734 744 group->fanotify_data.f_flags = event_f_flags; 735 745 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS 746 + oevent->response = 0; 736 747 mutex_init(&group->fanotify_data.access_mutex); 737 748 init_waitqueue_head(&group->fanotify_data.access_waitq); 738 749 INIT_LIST_HEAD(&group->fanotify_data.access_list);
+1 -1
fs/notify/fsnotify.c
··· 179 179 180 180 return group->ops->handle_event(group, to_tell, inode_mark, 181 181 vfsmount_mark, mask, data, data_is, 182 - file_name); 182 + file_name, cookie); 183 183 } 184 184 185 185 /*
+7 -1
fs/notify/group.c
··· 55 55 /* clear the notification queue of all events */ 56 56 fsnotify_flush_notify(group); 57 57 58 + /* 59 + * Destroy overflow event (we cannot use fsnotify_destroy_event() as 60 + * that deliberately ignores overflow events. 61 + */ 62 + if (group->overflow_event) 63 + group->ops->free_event(group->overflow_event); 64 + 58 65 fsnotify_put_group(group); 59 66 } 60 67 ··· 106 99 INIT_LIST_HEAD(&group->marks_list); 107 100 108 101 group->ops = ops; 109 - fsnotify_init_event(&group->overflow_event, NULL, FS_Q_OVERFLOW); 110 102 111 103 return group; 112 104 }
+1 -1
fs/notify/inotify/inotify.h
··· 27 27 struct fsnotify_mark *inode_mark, 28 28 struct fsnotify_mark *vfsmount_mark, 29 29 u32 mask, void *data, int data_type, 30 - const unsigned char *file_name); 30 + const unsigned char *file_name, u32 cookie); 31 31 32 32 extern const struct fsnotify_ops inotify_fsnotify_ops;
+2 -1
fs/notify/inotify/inotify_fsnotify.c
··· 67 67 struct fsnotify_mark *inode_mark, 68 68 struct fsnotify_mark *vfsmount_mark, 69 69 u32 mask, void *data, int data_type, 70 - const unsigned char *file_name) 70 + const unsigned char *file_name, u32 cookie) 71 71 { 72 72 struct inotify_inode_mark *i_mark; 73 73 struct inotify_event_info *event; ··· 103 103 fsn_event = &event->fse; 104 104 fsnotify_init_event(fsn_event, inode, mask); 105 105 event->wd = i_mark->wd; 106 + event->sync_cookie = cookie; 106 107 event->name_len = len; 107 108 if (len) 108 109 strcpy(event->name, file_name);
+13 -1
fs/notify/inotify/inotify_user.c
··· 495 495 496 496 /* Queue ignore event for the watch */ 497 497 inotify_handle_event(group, NULL, fsn_mark, NULL, FS_IN_IGNORED, 498 - NULL, FSNOTIFY_EVENT_NONE, NULL); 498 + NULL, FSNOTIFY_EVENT_NONE, NULL, 0); 499 499 500 500 i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark); 501 501 /* remove this mark from the idr */ ··· 633 633 static struct fsnotify_group *inotify_new_group(unsigned int max_events) 634 634 { 635 635 struct fsnotify_group *group; 636 + struct inotify_event_info *oevent; 636 637 637 638 group = fsnotify_alloc_group(&inotify_fsnotify_ops); 638 639 if (IS_ERR(group)) 639 640 return group; 641 + 642 + oevent = kmalloc(sizeof(struct inotify_event_info), GFP_KERNEL); 643 + if (unlikely(!oevent)) { 644 + fsnotify_destroy_group(group); 645 + return ERR_PTR(-ENOMEM); 646 + } 647 + group->overflow_event = &oevent->fse; 648 + fsnotify_init_event(group->overflow_event, NULL, FS_Q_OVERFLOW); 649 + oevent->wd = -1; 650 + oevent->sync_cookie = 0; 651 + oevent->name_len = 0; 640 652 641 653 group->max_events = max_events; 642 654
+15 -5
fs/notify/notification.c
··· 80 80 /* 81 81 * Add an event to the group notification queue. The group can later pull this 82 82 * event off the queue to deal with. The function returns 0 if the event was 83 - * added to the queue, 1 if the event was merged with some other queued event. 83 + * added to the queue, 1 if the event was merged with some other queued event, 84 + * 2 if the queue of events has overflown. 84 85 */ 85 86 int fsnotify_add_notify_event(struct fsnotify_group *group, 86 87 struct fsnotify_event *event, ··· 96 95 mutex_lock(&group->notification_mutex); 97 96 98 97 if (group->q_len >= group->max_events) { 98 + ret = 2; 99 99 /* Queue overflow event only if it isn't already queued */ 100 - if (list_empty(&group->overflow_event.list)) 101 - event = &group->overflow_event; 102 - ret = 1; 100 + if (!list_empty(&group->overflow_event->list)) { 101 + mutex_unlock(&group->notification_mutex); 102 + return ret; 103 + } 104 + event = group->overflow_event; 105 + goto queue; 103 106 } 104 107 105 108 if (!list_empty(list) && merge) { ··· 114 109 } 115 110 } 116 111 112 + queue: 117 113 group->q_len++; 118 114 list_add_tail(&event->list, list); 119 115 mutex_unlock(&group->notification_mutex); ··· 138 132 139 133 event = list_first_entry(&group->notification_list, 140 134 struct fsnotify_event, list); 141 - list_del(&event->list); 135 + /* 136 + * We need to init list head for the case of overflow event so that 137 + * check in fsnotify_add_notify_events() works 138 + */ 139 + list_del_init(&event->list); 142 140 group->q_len--; 143 141 144 142 return event;
+17 -10
fs/ocfs2/quota_global.c
··· 717 717 */ 718 718 if (status < 0) 719 719 mlog_errno(status); 720 + /* 721 + * Clear dq_off so that we search for the structure in quota file next 722 + * time we acquire it. The structure might be deleted and reallocated 723 + * elsewhere by another node while our dquot structure is on freelist. 724 + */ 725 + dquot->dq_off = 0; 720 726 clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); 721 727 out_trans: 722 728 ocfs2_commit_trans(osb, handle); ··· 762 756 status = ocfs2_lock_global_qf(info, 1); 763 757 if (status < 0) 764 758 goto out; 765 - if (!test_bit(DQ_READ_B, &dquot->dq_flags)) { 766 - status = ocfs2_qinfo_lock(info, 0); 767 - if (status < 0) 768 - goto out_dq; 769 - status = qtree_read_dquot(&info->dqi_gi, dquot); 770 - ocfs2_qinfo_unlock(info, 0); 771 - if (status < 0) 772 - goto out_dq; 773 - } 774 - set_bit(DQ_READ_B, &dquot->dq_flags); 759 + status = ocfs2_qinfo_lock(info, 0); 760 + if (status < 0) 761 + goto out_dq; 762 + /* 763 + * We always want to read dquot structure from disk because we don't 764 + * know what happened with it while it was on freelist. 765 + */ 766 + status = qtree_read_dquot(&info->dqi_gi, dquot); 767 + ocfs2_qinfo_unlock(info, 0); 768 + if (status < 0) 769 + goto out_dq; 775 770 776 771 OCFS2_DQUOT(dquot)->dq_use_count++; 777 772 OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
-4
fs/ocfs2/quota_local.c
··· 1303 1303 ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh); 1304 1304 1305 1305 out: 1306 - /* Clear the read bit so that next time someone uses this 1307 - * dquot he reads fresh info from disk and allocates local 1308 - * dquot structure */ 1309 - clear_bit(DQ_READ_B, &dquot->dq_flags); 1310 1306 return status; 1311 1307 } 1312 1308
+2 -3
fs/proc/page.c
··· 121 121 * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon 122 122 * to make sure a given page is a thp, not a non-huge compound page. 123 123 */ 124 - else if (PageTransCompound(page) && 125 - (PageLRU(compound_trans_head(page)) || 126 - PageAnon(compound_trans_head(page)))) 124 + else if (PageTransCompound(page) && (PageLRU(compound_head(page)) || 125 + PageAnon(compound_head(page)))) 127 126 u |= 1 << KPF_THP; 128 127 129 128 /*
+11 -3
fs/quota/dquot.c
··· 581 581 dqstats_inc(DQST_LOOKUPS); 582 582 dqput(old_dquot); 583 583 old_dquot = dquot; 584 - ret = fn(dquot, priv); 585 - if (ret < 0) 586 - goto out; 584 + /* 585 + * ->release_dquot() can be racing with us. Our reference 586 + * protects us from new calls to it so just wait for any 587 + * outstanding call and recheck the DQ_ACTIVE_B after that. 588 + */ 589 + wait_on_dquot(dquot); 590 + if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { 591 + ret = fn(dquot, priv); 592 + if (ret < 0) 593 + goto out; 594 + } 587 595 spin_lock(&dq_list_lock); 588 596 /* We are safe to continue now because our dquot could not 589 597 * be moved out of the inuse list while we hold the reference */
+190 -695
fs/reiserfs/do_balan.c
··· 324 324 switch (flag) { 325 325 case M_INSERT: /* insert item into L[0] */ 326 326 327 - if (item_pos == tb->lnum[0] - 1 328 - && tb->lbytes != -1) { 327 + if (item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) { 329 328 /* part of new item falls into L[0] */ 330 329 int new_item_len; 331 330 int version; 332 331 333 - ret_val = 334 - leaf_shift_left(tb, tb->lnum[0] - 1, 335 - -1); 332 + ret_val = leaf_shift_left(tb, tb->lnum[0] - 1, -1); 336 333 337 334 /* Calculate item length to insert to S[0] */ 338 - new_item_len = 339 - ih_item_len(ih) - tb->lbytes; 335 + new_item_len = ih_item_len(ih) - tb->lbytes; 340 336 /* Calculate and check item length to insert to L[0] */ 341 - put_ih_item_len(ih, 342 - ih_item_len(ih) - 343 - new_item_len); 337 + put_ih_item_len(ih, ih_item_len(ih) - new_item_len); 344 338 345 339 RFALSE(ih_item_len(ih) <= 0, 346 340 "PAP-12080: there is nothing to insert into L[0]: ih_item_len=%d", ··· 343 349 /* Insert new item into L[0] */ 344 350 buffer_info_init_left(tb, &bi); 345 351 leaf_insert_into_buf(&bi, 346 - n + item_pos - 347 - ret_val, ih, body, 348 - zeros_num > 349 - ih_item_len(ih) ? 350 - ih_item_len(ih) : 351 - zeros_num); 352 + n + item_pos - ret_val, ih, body, 353 + zeros_num > ih_item_len(ih) ? ih_item_len(ih) : zeros_num); 352 354 353 355 version = ih_version(ih); 354 356 355 357 /* Calculate key component, item length and body to insert into S[0] */ 356 - set_le_ih_k_offset(ih, 357 - le_ih_k_offset(ih) + 358 - (tb-> 359 - lbytes << 360 - (is_indirect_le_ih 361 - (ih) ? tb->tb_sb-> 362 - s_blocksize_bits - 363 - UNFM_P_SHIFT : 364 - 0))); 358 + set_le_ih_k_offset(ih, le_ih_k_offset(ih) + 359 + (tb-> lbytes << (is_indirect_le_ih(ih) ? tb->tb_sb-> s_blocksize_bits - UNFM_P_SHIFT : 0))); 365 360 366 361 put_ih_item_len(ih, new_item_len); 367 362 if (tb->lbytes > zeros_num) { 368 - body += 369 - (tb->lbytes - zeros_num); 363 + body += (tb->lbytes - zeros_num); 370 364 zeros_num = 0; 371 365 } else 372 366 zeros_num -= tb->lbytes; ··· 365 383 } else { 366 384 /* new item in whole falls into L[0] */ 367 385 /* Shift lnum[0]-1 items to L[0] */ 368 - ret_val = 369 - leaf_shift_left(tb, tb->lnum[0] - 1, 370 - tb->lbytes); 386 + ret_val = leaf_shift_left(tb, tb->lnum[0] - 1, tb->lbytes); 371 387 /* Insert new item into L[0] */ 372 388 buffer_info_init_left(tb, &bi); 373 - leaf_insert_into_buf(&bi, 374 - n + item_pos - 375 - ret_val, ih, body, 376 - zeros_num); 389 + leaf_insert_into_buf(&bi, n + item_pos - ret_val, ih, body, zeros_num); 377 390 tb->insert_size[0] = 0; 378 391 zeros_num = 0; 379 392 } ··· 376 399 377 400 case M_PASTE: /* append item in L[0] */ 378 401 379 - if (item_pos == tb->lnum[0] - 1 380 - && tb->lbytes != -1) { 402 + if (item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) { 381 403 /* we must shift the part of the appended item */ 382 - if (is_direntry_le_ih 383 - (B_N_PITEM_HEAD(tbS0, item_pos))) { 404 + if (is_direntry_le_ih(B_N_PITEM_HEAD(tbS0, item_pos))) { 384 405 385 406 RFALSE(zeros_num, 386 407 "PAP-12090: invalid parameter in case of a directory"); 387 408 /* directory item */ 388 409 if (tb->lbytes > pos_in_item) { 389 410 /* new directory entry falls into L[0] */ 390 - struct item_head 391 - *pasted; 392 - int l_pos_in_item = 393 - pos_in_item; 411 + struct item_head *pasted; 412 + int l_pos_in_item = pos_in_item; 394 413 395 414 /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 entries from given directory item */ 396 - ret_val = 397 - leaf_shift_left(tb, 398 - tb-> 399 - lnum 400 - [0], 401 - tb-> 402 - lbytes 403 - - 404 - 1); 405 - if (ret_val 406 - && !item_pos) { 407 - pasted = 408 - B_N_PITEM_HEAD 409 - (tb->L[0], 410 - B_NR_ITEMS 411 - (tb-> 412 - L[0]) - 413 - 1); 414 - l_pos_in_item += 415 - I_ENTRY_COUNT 416 - (pasted) - 417 - (tb-> 418 - lbytes - 419 - 1); 415 + ret_val = leaf_shift_left(tb, tb->lnum[0], tb->lbytes-1); 416 + if (ret_val && !item_pos) { 417 + pasted = B_N_PITEM_HEAD(tb->L[0], B_NR_ITEMS(tb->L[0]) - 1); 418 + l_pos_in_item += I_ENTRY_COUNT(pasted) - (tb->lbytes -1); 420 419 } 421 420 422 421 /* Append given directory entry to directory item */ 423 422 buffer_info_init_left(tb, &bi); 424 - leaf_paste_in_buffer 425 - (&bi, 426 - n + item_pos - 427 - ret_val, 428 - l_pos_in_item, 429 - tb->insert_size[0], 430 - body, zeros_num); 423 + leaf_paste_in_buffer(&bi, n + item_pos - ret_val, l_pos_in_item, tb->insert_size[0], body, zeros_num); 431 424 432 425 /* previous string prepared space for pasting new entry, following string pastes this entry */ 433 426 434 427 /* when we have merge directory item, pos_in_item has been changed too */ 435 428 436 429 /* paste new directory entry. 1 is entry number */ 437 - leaf_paste_entries(&bi, 438 - n + 439 - item_pos 440 - - 441 - ret_val, 442 - l_pos_in_item, 443 - 1, 444 - (struct 445 - reiserfs_de_head 446 - *) 447 - body, 448 - body 449 - + 450 - DEH_SIZE, 451 - tb-> 452 - insert_size 453 - [0] 454 - ); 430 + leaf_paste_entries(&bi, n + item_pos - ret_val, l_pos_in_item, 431 + 1, (struct reiserfs_de_head *) body, 432 + body + DEH_SIZE, tb->insert_size[0]); 455 433 tb->insert_size[0] = 0; 456 434 } else { 457 435 /* new directory item doesn't fall into L[0] */ 458 436 /* Shift lnum[0]-1 items in whole. Shift lbytes directory entries from directory item number lnum[0] */ 459 - leaf_shift_left(tb, 460 - tb-> 461 - lnum[0], 462 - tb-> 463 - lbytes); 437 + leaf_shift_left(tb, tb->lnum[0], tb->lbytes); 464 438 } 465 439 /* Calculate new position to append in item body */ 466 440 pos_in_item -= tb->lbytes; 467 441 } else { 468 442 /* regular object */ 469 - RFALSE(tb->lbytes <= 0, 470 - "PAP-12095: there is nothing to shift to L[0]. lbytes=%d", 471 - tb->lbytes); 472 - RFALSE(pos_in_item != 473 - ih_item_len 474 - (B_N_PITEM_HEAD 475 - (tbS0, item_pos)), 443 + RFALSE(tb->lbytes <= 0, "PAP-12095: there is nothing to shift to L[0]. lbytes=%d", tb->lbytes); 444 + RFALSE(pos_in_item != ih_item_len(B_N_PITEM_HEAD(tbS0, item_pos)), 476 445 "PAP-12100: incorrect position to paste: item_len=%d, pos_in_item=%d", 477 - ih_item_len 478 - (B_N_PITEM_HEAD 479 - (tbS0, item_pos)), 480 - pos_in_item); 446 + ih_item_len(B_N_PITEM_HEAD(tbS0, item_pos)),pos_in_item); 481 447 482 448 if (tb->lbytes >= pos_in_item) { 483 449 /* appended item will be in L[0] in whole */ 484 450 int l_n; 485 451 486 452 /* this bytes number must be appended to the last item of L[h] */ 487 - l_n = 488 - tb->lbytes - 489 - pos_in_item; 453 + l_n = tb->lbytes - pos_in_item; 490 454 491 455 /* Calculate new insert_size[0] */ 492 - tb->insert_size[0] -= 493 - l_n; 456 + tb->insert_size[0] -= l_n; 494 457 495 - RFALSE(tb-> 496 - insert_size[0] <= 497 - 0, 458 + RFALSE(tb->insert_size[0] <= 0, 498 459 "PAP-12105: there is nothing to paste into L[0]. insert_size=%d", 499 - tb-> 500 - insert_size[0]); 501 - ret_val = 502 - leaf_shift_left(tb, 503 - tb-> 504 - lnum 505 - [0], 506 - ih_item_len 507 - (B_N_PITEM_HEAD 508 - (tbS0, 509 - item_pos))); 460 + tb->insert_size[0]); 461 + ret_val = leaf_shift_left(tb, tb->lnum[0], ih_item_len 462 + (B_N_PITEM_HEAD(tbS0, item_pos))); 510 463 /* Append to body of item in L[0] */ 511 464 buffer_info_init_left(tb, &bi); 512 465 leaf_paste_in_buffer 513 - (&bi, 514 - n + item_pos - 515 - ret_val, 516 - ih_item_len 517 - (B_N_PITEM_HEAD 518 - (tb->L[0], 519 - n + item_pos - 520 - ret_val)), l_n, 521 - body, 522 - zeros_num > 523 - l_n ? l_n : 524 - zeros_num); 466 + (&bi, n + item_pos - ret_val, ih_item_len 467 + (B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val)), 468 + l_n, body, 469 + zeros_num > l_n ? l_n : zeros_num); 525 470 /* 0-th item in S0 can be only of DIRECT type when l_n != 0 */ 526 471 { 527 472 int version; 528 - int temp_l = 529 - l_n; 473 + int temp_l = l_n; 530 474 531 - RFALSE 532 - (ih_item_len 533 - (B_N_PITEM_HEAD 534 - (tbS0, 535 - 0)), 475 + RFALSE(ih_item_len(B_N_PITEM_HEAD(tbS0, 0)), 536 476 "PAP-12106: item length must be 0"); 537 - RFALSE 538 - (comp_short_le_keys 539 - (B_N_PKEY 540 - (tbS0, 0), 541 - B_N_PKEY 542 - (tb->L[0], 543 - n + 544 - item_pos 545 - - 546 - ret_val)), 477 + RFALSE(comp_short_le_keys(B_N_PKEY(tbS0, 0), B_N_PKEY 478 + (tb->L[0], n + item_pos - ret_val)), 547 479 "PAP-12107: items must be of the same file"); 548 480 if (is_indirect_le_ih(B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val))) { 549 - temp_l = 550 - l_n 551 - << 552 - (tb-> 553 - tb_sb-> 554 - s_blocksize_bits 555 - - 556 - UNFM_P_SHIFT); 481 + temp_l = l_n << (tb->tb_sb-> s_blocksize_bits - UNFM_P_SHIFT); 557 482 } 558 483 /* update key of first item in S0 */ 559 - version = 560 - ih_version 561 - (B_N_PITEM_HEAD 562 - (tbS0, 0)); 563 - set_le_key_k_offset 564 - (version, 565 - B_N_PKEY 566 - (tbS0, 0), 567 - le_key_k_offset 568 - (version, 569 - B_N_PKEY 570 - (tbS0, 571 - 0)) + 572 - temp_l); 484 + version = ih_version(B_N_PITEM_HEAD(tbS0, 0)); 485 + set_le_key_k_offset(version, B_N_PKEY(tbS0, 0), 486 + le_key_k_offset(version,B_N_PKEY(tbS0, 0)) + temp_l); 573 487 /* update left delimiting key */ 574 - set_le_key_k_offset 575 - (version, 576 - B_N_PDELIM_KEY 577 - (tb-> 578 - CFL[0], 579 - tb-> 580 - lkey[0]), 581 - le_key_k_offset 582 - (version, 583 - B_N_PDELIM_KEY 584 - (tb-> 585 - CFL[0], 586 - tb-> 587 - lkey[0])) 588 - + temp_l); 488 + set_le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0]), 489 + le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0])) + temp_l); 589 490 } 590 491 591 492 /* Calculate new body, position in item and insert_size[0] */ 592 493 if (l_n > zeros_num) { 593 - body += 594 - (l_n - 595 - zeros_num); 494 + body += (l_n - zeros_num); 596 495 zeros_num = 0; 597 496 } else 598 - zeros_num -= 599 - l_n; 497 + zeros_num -= l_n; 600 498 pos_in_item = 0; 601 499 602 - RFALSE 603 - (comp_short_le_keys 604 - (B_N_PKEY(tbS0, 0), 605 - B_N_PKEY(tb->L[0], 606 - B_NR_ITEMS 607 - (tb-> 608 - L[0]) - 609 - 1)) 610 - || 611 - !op_is_left_mergeable 612 - (B_N_PKEY(tbS0, 0), 613 - tbS0->b_size) 614 - || 615 - !op_is_left_mergeable 616 - (B_N_PDELIM_KEY 617 - (tb->CFL[0], 618 - tb->lkey[0]), 619 - tbS0->b_size), 500 + RFALSE(comp_short_le_keys(B_N_PKEY(tbS0, 0), B_N_PKEY(tb->L[0], B_NR_ITEMS(tb->L[0]) - 1)) 501 + || !op_is_left_mergeable(B_N_PKEY(tbS0, 0), tbS0->b_size) 502 + || !op_is_left_mergeable(B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0]), tbS0->b_size), 620 503 "PAP-12120: item must be merge-able with left neighboring item"); 621 504 } else { /* only part of the appended item will be in L[0] */ 622 505 623 506 /* Calculate position in item for append in S[0] */ 624 - pos_in_item -= 625 - tb->lbytes; 507 + pos_in_item -= tb->lbytes; 626 508 627 - RFALSE(pos_in_item <= 0, 628 - "PAP-12125: no place for paste. pos_in_item=%d", 629 - pos_in_item); 509 + RFALSE(pos_in_item <= 0, "PAP-12125: no place for paste. pos_in_item=%d", pos_in_item); 630 510 631 511 /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */ 632 - leaf_shift_left(tb, 633 - tb-> 634 - lnum[0], 635 - tb-> 636 - lbytes); 512 + leaf_shift_left(tb, tb->lnum[0], tb->lbytes); 637 513 } 638 514 } 639 515 } else { /* appended item will be in L[0] in whole */ ··· 495 665 496 666 if (!item_pos && op_is_left_mergeable(B_N_PKEY(tbS0, 0), tbS0->b_size)) { /* if we paste into first item of S[0] and it is left mergable */ 497 667 /* then increment pos_in_item by the size of the last item in L[0] */ 498 - pasted = 499 - B_N_PITEM_HEAD(tb->L[0], 500 - n - 1); 668 + pasted = B_N_PITEM_HEAD(tb->L[0], n - 1); 501 669 if (is_direntry_le_ih(pasted)) 502 - pos_in_item += 503 - ih_entry_count 504 - (pasted); 670 + pos_in_item += ih_entry_count(pasted); 505 671 else 506 - pos_in_item += 507 - ih_item_len(pasted); 672 + pos_in_item += ih_item_len(pasted); 508 673 } 509 674 510 675 /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */ 511 - ret_val = 512 - leaf_shift_left(tb, tb->lnum[0], 513 - tb->lbytes); 676 + ret_val = leaf_shift_left(tb, tb->lnum[0], tb->lbytes); 514 677 /* Append to body of item in L[0] */ 515 678 buffer_info_init_left(tb, &bi); 516 - leaf_paste_in_buffer(&bi, 517 - n + item_pos - 518 - ret_val, 679 + leaf_paste_in_buffer(&bi, n + item_pos - ret_val, 519 680 pos_in_item, 520 681 tb->insert_size[0], 521 682 body, zeros_num); 522 683 523 684 /* if appended item is directory, paste entry */ 524 - pasted = 525 - B_N_PITEM_HEAD(tb->L[0], 526 - n + item_pos - 527 - ret_val); 685 + pasted = B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val); 528 686 if (is_direntry_le_ih(pasted)) 529 - leaf_paste_entries(&bi, 530 - n + 531 - item_pos - 532 - ret_val, 533 - pos_in_item, 534 - 1, 535 - (struct 536 - reiserfs_de_head 537 - *)body, 538 - body + 539 - DEH_SIZE, 540 - tb-> 541 - insert_size 542 - [0] 543 - ); 687 + leaf_paste_entries(&bi, n + item_pos - ret_val, 688 + pos_in_item, 1, 689 + (struct reiserfs_de_head *) body, 690 + body + DEH_SIZE, 691 + tb->insert_size[0]); 544 692 /* if appended item is indirect item, put unformatted node into un list */ 545 693 if (is_indirect_le_ih(pasted)) 546 694 set_ih_free_space(pasted, 0); ··· 530 722 reiserfs_panic(tb->tb_sb, "PAP-12130", 531 723 "lnum > 0: unexpected mode: " 532 724 " %s(%d)", 533 - (flag == 534 - M_DELETE) ? "DELETE" : ((flag == 535 - M_CUT) 536 - ? "CUT" 537 - : 538 - "UNKNOWN"), 539 - flag); 725 + (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag); 540 726 } 541 727 } else { 542 728 /* new item doesn't fall into L[0] */ ··· 550 748 case M_INSERT: /* insert item */ 551 749 if (n - tb->rnum[0] < item_pos) { /* new item or its part falls to R[0] */ 552 750 if (item_pos == n - tb->rnum[0] + 1 && tb->rbytes != -1) { /* part of new item falls into R[0] */ 553 - loff_t old_key_comp, old_len, 554 - r_zeros_number; 751 + loff_t old_key_comp, old_len, r_zeros_number; 555 752 const char *r_body; 556 753 int version; 557 754 loff_t offset; 558 755 559 - leaf_shift_right(tb, tb->rnum[0] - 1, 560 - -1); 756 + leaf_shift_right(tb, tb->rnum[0] - 1, -1); 561 757 562 758 version = ih_version(ih); 563 759 /* Remember key component and item length */ ··· 563 763 old_len = ih_item_len(ih); 564 764 565 765 /* Calculate key component and item length to insert into R[0] */ 566 - offset = 567 - le_ih_k_offset(ih) + 568 - ((old_len - 569 - tb-> 570 - rbytes) << (is_indirect_le_ih(ih) 571 - ? tb->tb_sb-> 572 - s_blocksize_bits - 573 - UNFM_P_SHIFT : 0)); 766 + offset = le_ih_k_offset(ih) + ((old_len - tb->rbytes) << (is_indirect_le_ih(ih) ? tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT : 0)); 574 767 set_le_ih_k_offset(ih, offset); 575 768 put_ih_item_len(ih, tb->rbytes); 576 769 /* Insert part of the item into R[0] */ 577 770 buffer_info_init_right(tb, &bi); 578 771 if ((old_len - tb->rbytes) > zeros_num) { 579 772 r_zeros_number = 0; 580 - r_body = 581 - body + (old_len - 582 - tb->rbytes) - 583 - zeros_num; 773 + r_body = body + (old_len - tb->rbytes) - zeros_num; 584 774 } else { 585 775 r_body = body; 586 - r_zeros_number = 587 - zeros_num - (old_len - 588 - tb->rbytes); 776 + r_zeros_number = zeros_num - (old_len - tb->rbytes); 589 777 zeros_num -= r_zeros_number; 590 778 } 591 779 ··· 586 798 587 799 /* Calculate key component and item length to insert into S[0] */ 588 800 set_le_ih_k_offset(ih, old_key_comp); 589 - put_ih_item_len(ih, 590 - old_len - tb->rbytes); 801 + put_ih_item_len(ih, old_len - tb->rbytes); 591 802 592 803 tb->insert_size[0] -= tb->rbytes; 593 804 594 805 } else { /* whole new item falls into R[0] */ 595 806 596 807 /* Shift rnum[0]-1 items to R[0] */ 597 - ret_val = 598 - leaf_shift_right(tb, 599 - tb->rnum[0] - 1, 600 - tb->rbytes); 808 + ret_val = leaf_shift_right(tb, tb->rnum[0] - 1, tb->rbytes); 601 809 /* Insert new item into R[0] */ 602 810 buffer_info_init_right(tb, &bi); 603 - leaf_insert_into_buf(&bi, 604 - item_pos - n + 605 - tb->rnum[0] - 1, 606 - ih, body, 607 - zeros_num); 811 + leaf_insert_into_buf(&bi, item_pos - n + tb->rnum[0] - 1, 812 + ih, body, zeros_num); 608 813 609 814 if (item_pos - n + tb->rnum[0] - 1 == 0) { 610 815 replace_key(tb, tb->CFR[0], ··· 622 841 623 842 RFALSE(zeros_num, 624 843 "PAP-12145: invalid parameter in case of a directory"); 625 - entry_count = 626 - I_ENTRY_COUNT(B_N_PITEM_HEAD 627 - (tbS0, 628 - item_pos)); 844 + entry_count = I_ENTRY_COUNT(B_N_PITEM_HEAD 845 + (tbS0, item_pos)); 629 846 if (entry_count - tb->rbytes < 630 847 pos_in_item) 631 848 /* new directory entry falls into R[0] */ 632 849 { 633 850 int paste_entry_position; 634 851 635 - RFALSE(tb->rbytes - 1 >= 636 - entry_count 637 - || !tb-> 638 - insert_size[0], 852 + RFALSE(tb->rbytes - 1 >= entry_count || !tb-> insert_size[0], 639 853 "PAP-12150: no enough of entries to shift to R[0]: rbytes=%d, entry_count=%d", 640 - tb->rbytes, 641 - entry_count); 854 + tb->rbytes, entry_count); 642 855 /* Shift rnum[0]-1 items in whole. Shift rbytes-1 directory entries from directory item number rnum[0] */ 643 - leaf_shift_right(tb, 644 - tb-> 645 - rnum 646 - [0], 647 - tb-> 648 - rbytes 649 - - 1); 856 + leaf_shift_right(tb, tb->rnum[0], tb->rbytes - 1); 650 857 /* Paste given directory entry to directory item */ 651 - paste_entry_position = 652 - pos_in_item - 653 - entry_count + 654 - tb->rbytes - 1; 858 + paste_entry_position = pos_in_item - entry_count + tb->rbytes - 1; 655 859 buffer_info_init_right(tb, &bi); 656 - leaf_paste_in_buffer 657 - (&bi, 0, 658 - paste_entry_position, 659 - tb->insert_size[0], 660 - body, zeros_num); 860 + leaf_paste_in_buffer(&bi, 0, paste_entry_position, tb->insert_size[0], body, zeros_num); 661 861 /* paste entry */ 662 - leaf_paste_entries(&bi, 663 - 0, 664 - paste_entry_position, 665 - 1, 666 - (struct 667 - reiserfs_de_head 668 - *) 669 - body, 670 - body 671 - + 672 - DEH_SIZE, 673 - tb-> 674 - insert_size 675 - [0] 676 - ); 862 + leaf_paste_entries(&bi, 0, paste_entry_position, 1, 863 + (struct reiserfs_de_head *) body, 864 + body + DEH_SIZE, tb->insert_size[0]); 677 865 678 - if (paste_entry_position 679 - == 0) { 866 + if (paste_entry_position == 0) { 680 867 /* change delimiting keys */ 681 - replace_key(tb, 682 - tb-> 683 - CFR 684 - [0], 685 - tb-> 686 - rkey 687 - [0], 688 - tb-> 689 - R 690 - [0], 691 - 0); 868 + replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0],0); 692 869 } 693 870 694 871 tb->insert_size[0] = 0; 695 872 pos_in_item++; 696 873 } else { /* new directory entry doesn't fall into R[0] */ 697 874 698 - leaf_shift_right(tb, 699 - tb-> 700 - rnum 701 - [0], 702 - tb-> 703 - rbytes); 875 + leaf_shift_right(tb, tb->rnum[0], tb->rbytes); 704 876 } 705 877 } else { /* regular object */ 706 878 707 - int n_shift, n_rem, 708 - r_zeros_number; 879 + int n_shift, n_rem, r_zeros_number; 709 880 const char *r_body; 710 881 711 882 /* Calculate number of bytes which must be shifted from appended item */ 712 - if ((n_shift = 713 - tb->rbytes - 714 - tb->insert_size[0]) < 0) 883 + if ((n_shift = tb->rbytes - tb->insert_size[0]) < 0) 715 884 n_shift = 0; 716 885 717 - RFALSE(pos_in_item != 718 - ih_item_len 719 - (B_N_PITEM_HEAD 720 - (tbS0, item_pos)), 886 + RFALSE(pos_in_item != ih_item_len 887 + (B_N_PITEM_HEAD(tbS0, item_pos)), 721 888 "PAP-12155: invalid position to paste. ih_item_len=%d, pos_in_item=%d", 722 - pos_in_item, 723 - ih_item_len 724 - (B_N_PITEM_HEAD 725 - (tbS0, item_pos))); 889 + pos_in_item, ih_item_len 890 + (B_N_PITEM_HEAD(tbS0, item_pos))); 726 891 727 - leaf_shift_right(tb, 728 - tb->rnum[0], 729 - n_shift); 892 + leaf_shift_right(tb, tb->rnum[0], n_shift); 730 893 /* Calculate number of bytes which must remain in body after appending to R[0] */ 731 - if ((n_rem = 732 - tb->insert_size[0] - 733 - tb->rbytes) < 0) 894 + if ((n_rem = tb->insert_size[0] - tb->rbytes) < 0) 734 895 n_rem = 0; 735 896 736 897 { 737 898 int version; 738 - unsigned long temp_rem = 739 - n_rem; 899 + unsigned long temp_rem = n_rem; 740 900 741 - version = 742 - ih_version 743 - (B_N_PITEM_HEAD 744 - (tb->R[0], 0)); 745 - if (is_indirect_le_key 746 - (version, 747 - B_N_PKEY(tb->R[0], 748 - 0))) { 749 - temp_rem = 750 - n_rem << 751 - (tb->tb_sb-> 752 - s_blocksize_bits 753 - - 754 - UNFM_P_SHIFT); 901 + version = ih_version(B_N_PITEM_HEAD(tb->R[0], 0)); 902 + if (is_indirect_le_key(version, B_N_PKEY(tb->R[0], 0))) { 903 + temp_rem = n_rem << (tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT); 755 904 } 756 - set_le_key_k_offset 757 - (version, 758 - B_N_PKEY(tb->R[0], 759 - 0), 760 - le_key_k_offset 761 - (version, 762 - B_N_PKEY(tb->R[0], 763 - 0)) + 764 - temp_rem); 765 - set_le_key_k_offset 766 - (version, 767 - B_N_PDELIM_KEY(tb-> 768 - CFR 769 - [0], 770 - tb-> 771 - rkey 772 - [0]), 773 - le_key_k_offset 774 - (version, 775 - B_N_PDELIM_KEY 776 - (tb->CFR[0], 777 - tb->rkey[0])) + 778 - temp_rem); 905 + set_le_key_k_offset(version, B_N_PKEY(tb->R[0], 0), 906 + le_key_k_offset(version, B_N_PKEY(tb->R[0], 0)) + temp_rem); 907 + set_le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFR[0], tb->rkey[0]), 908 + le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFR[0], tb->rkey[0])) + temp_rem); 779 909 } 780 910 /* k_offset (B_N_PKEY(tb->R[0],0)) += n_rem; 781 911 k_offset (B_N_PDELIM_KEY(tb->CFR[0],tb->rkey[0])) += n_rem;*/ 782 - do_balance_mark_internal_dirty 783 - (tb, tb->CFR[0], 0); 912 + do_balance_mark_internal_dirty(tb, tb->CFR[0], 0); 784 913 785 914 /* Append part of body into R[0] */ 786 915 buffer_info_init_right(tb, &bi); 787 916 if (n_rem > zeros_num) { 788 917 r_zeros_number = 0; 789 - r_body = 790 - body + n_rem - 791 - zeros_num; 918 + r_body = body + n_rem - zeros_num; 792 919 } else { 793 920 r_body = body; 794 - r_zeros_number = 795 - zeros_num - n_rem; 796 - zeros_num -= 797 - r_zeros_number; 921 + r_zeros_number = zeros_num - n_rem; 922 + zeros_num -= r_zeros_number; 798 923 } 799 924 800 - leaf_paste_in_buffer(&bi, 0, 801 - n_shift, 802 - tb-> 803 - insert_size 804 - [0] - 805 - n_rem, 806 - r_body, 807 - r_zeros_number); 925 + leaf_paste_in_buffer(&bi, 0, n_shift, 926 + tb->insert_size[0] - n_rem, 927 + r_body, r_zeros_number); 808 928 809 - if (is_indirect_le_ih 810 - (B_N_PITEM_HEAD 811 - (tb->R[0], 0))) { 929 + if (is_indirect_le_ih(B_N_PITEM_HEAD(tb->R[0], 0))) { 812 930 #if 0 813 931 RFALSE(n_rem, 814 932 "PAP-12160: paste more than one unformatted node pointer"); 815 933 #endif 816 - set_ih_free_space 817 - (B_N_PITEM_HEAD 818 - (tb->R[0], 0), 0); 934 + set_ih_free_space(B_N_PITEM_HEAD(tb->R[0], 0), 0); 819 935 } 820 936 tb->insert_size[0] = n_rem; 821 937 if (!n_rem) ··· 722 1044 723 1045 struct item_head *pasted; 724 1046 725 - ret_val = 726 - leaf_shift_right(tb, tb->rnum[0], 727 - tb->rbytes); 1047 + ret_val = leaf_shift_right(tb, tb->rnum[0], tb->rbytes); 728 1048 /* append item in R[0] */ 729 1049 if (pos_in_item >= 0) { 730 1050 buffer_info_init_right(tb, &bi); 731 - leaf_paste_in_buffer(&bi, 732 - item_pos - 733 - n + 734 - tb-> 735 - rnum[0], 736 - pos_in_item, 737 - tb-> 738 - insert_size 739 - [0], body, 740 - zeros_num); 1051 + leaf_paste_in_buffer(&bi, item_pos - n + tb->rnum[0], pos_in_item, 1052 + tb->insert_size[0], body, zeros_num); 741 1053 } 742 1054 743 1055 /* paste new entry, if item is directory item */ 744 - pasted = 745 - B_N_PITEM_HEAD(tb->R[0], 746 - item_pos - n + 747 - tb->rnum[0]); 748 - if (is_direntry_le_ih(pasted) 749 - && pos_in_item >= 0) { 750 - leaf_paste_entries(&bi, 751 - item_pos - 752 - n + 753 - tb->rnum[0], 754 - pos_in_item, 755 - 1, 756 - (struct 757 - reiserfs_de_head 758 - *)body, 759 - body + 760 - DEH_SIZE, 761 - tb-> 762 - insert_size 763 - [0] 764 - ); 1056 + pasted = B_N_PITEM_HEAD(tb->R[0], item_pos - n + tb->rnum[0]); 1057 + if (is_direntry_le_ih(pasted) && pos_in_item >= 0) { 1058 + leaf_paste_entries(&bi, item_pos - n + tb->rnum[0], 1059 + pos_in_item, 1, 1060 + (struct reiserfs_de_head *) body, 1061 + body + DEH_SIZE, tb->insert_size[0]); 765 1062 if (!pos_in_item) { 766 1063 767 - RFALSE(item_pos - n + 768 - tb->rnum[0], 1064 + RFALSE(item_pos - n + tb->rnum[0], 769 1065 "PAP-12165: directory item must be first item of node when pasting is in 0th position"); 770 1066 771 1067 /* update delimiting keys */ 772 - replace_key(tb, 773 - tb->CFR[0], 774 - tb->rkey[0], 775 - tb->R[0], 776 - 0); 1068 + replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0); 777 1069 } 778 1070 } 779 1071 ··· 759 1111 default: /* cases d and t */ 760 1112 reiserfs_panic(tb->tb_sb, "PAP-12175", 761 1113 "rnum > 0: unexpected mode: %s(%d)", 762 - (flag == 763 - M_DELETE) ? "DELETE" : ((flag == 764 - M_CUT) ? "CUT" 765 - : "UNKNOWN"), 766 - flag); 1114 + (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag); 767 1115 } 768 1116 769 1117 } 770 1118 771 1119 /* tb->rnum[0] > 0 */ 772 1120 RFALSE(tb->blknum[0] > 3, 773 - "PAP-12180: blknum can not be %d. It must be <= 3", 774 - tb->blknum[0]); 1121 + "PAP-12180: blknum can not be %d. It must be <= 3", tb->blknum[0]); 775 1122 RFALSE(tb->blknum[0] < 0, 776 - "PAP-12185: blknum can not be %d. It must be >= 0", 777 - tb->blknum[0]); 1123 + "PAP-12185: blknum can not be %d. It must be >= 0", tb->blknum[0]); 778 1124 779 1125 /* if while adding to a node we discover that it is possible to split 780 1126 it in two, and merge the left part into the left neighbor and the ··· 819 1177 820 1178 if (n - snum[i] < item_pos) { /* new item or it's part falls to first new node S_new[i] */ 821 1179 if (item_pos == n - snum[i] + 1 && sbytes[i] != -1) { /* part of new item falls into S_new[i] */ 822 - int old_key_comp, old_len, 823 - r_zeros_number; 1180 + int old_key_comp, old_len, r_zeros_number; 824 1181 const char *r_body; 825 1182 int version; 826 1183 ··· 833 1192 old_len = ih_item_len(ih); 834 1193 835 1194 /* Calculate key component and item length to insert into S_new[i] */ 836 - set_le_ih_k_offset(ih, 837 - le_ih_k_offset(ih) + 838 - ((old_len - 839 - sbytes[i]) << 840 - (is_indirect_le_ih 841 - (ih) ? tb->tb_sb-> 842 - s_blocksize_bits - 843 - UNFM_P_SHIFT : 844 - 0))); 1195 + set_le_ih_k_offset(ih, le_ih_k_offset(ih) + 1196 + ((old_len - sbytes[i]) << (is_indirect_le_ih(ih) ? tb->tb_sb-> s_blocksize_bits - UNFM_P_SHIFT : 0))); 845 1197 846 1198 put_ih_item_len(ih, sbytes[i]); 847 1199 ··· 843 1209 844 1210 if ((old_len - sbytes[i]) > zeros_num) { 845 1211 r_zeros_number = 0; 846 - r_body = 847 - body + (old_len - 848 - sbytes[i]) - 849 - zeros_num; 1212 + r_body = body + (old_len - sbytes[i]) - zeros_num; 850 1213 } else { 851 1214 r_body = body; 852 - r_zeros_number = 853 - zeros_num - (old_len - 854 - sbytes[i]); 1215 + r_zeros_number = zeros_num - (old_len - sbytes[i]); 855 1216 zeros_num -= r_zeros_number; 856 1217 } 857 1218 858 - leaf_insert_into_buf(&bi, 0, ih, r_body, 859 - r_zeros_number); 1219 + leaf_insert_into_buf(&bi, 0, ih, r_body, r_zeros_number); 860 1220 861 1221 /* Calculate key component and item length to insert into S[i] */ 862 1222 set_le_ih_k_offset(ih, old_key_comp); 863 - put_ih_item_len(ih, 864 - old_len - sbytes[i]); 1223 + put_ih_item_len(ih, old_len - sbytes[i]); 865 1224 tb->insert_size[0] -= sbytes[i]; 866 1225 } else { /* whole new item falls into S_new[i] */ 867 1226 868 1227 /* Shift snum[0] - 1 items to S_new[i] (sbytes[i] of split item) */ 869 1228 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, 870 - snum[i] - 1, sbytes[i], 871 - S_new[i]); 1229 + snum[i] - 1, sbytes[i], S_new[i]); 872 1230 873 1231 /* Insert new item into S_new[i] */ 874 1232 buffer_info_init_bh(tb, &bi, S_new[i]); 875 - leaf_insert_into_buf(&bi, 876 - item_pos - n + 877 - snum[i] - 1, ih, 878 - body, zeros_num); 1233 + leaf_insert_into_buf(&bi, item_pos - n + snum[i] - 1, 1234 + ih, body, zeros_num); 879 1235 880 1236 zeros_num = tb->insert_size[0] = 0; 881 1237 } ··· 892 1268 893 1269 int entry_count; 894 1270 895 - entry_count = 896 - ih_entry_count(aux_ih); 1271 + entry_count = ih_entry_count(aux_ih); 897 1272 898 - if (entry_count - sbytes[i] < 899 - pos_in_item 900 - && pos_in_item <= 901 - entry_count) { 1273 + if (entry_count - sbytes[i] < pos_in_item && pos_in_item <= entry_count) { 902 1274 /* new directory entry falls into S_new[i] */ 903 1275 904 - RFALSE(!tb-> 905 - insert_size[0], 906 - "PAP-12215: insert_size is already 0"); 907 - RFALSE(sbytes[i] - 1 >= 908 - entry_count, 1276 + RFALSE(!tb->insert_size[0], "PAP-12215: insert_size is already 0"); 1277 + RFALSE(sbytes[i] - 1 >= entry_count, 909 1278 "PAP-12220: there are no so much entries (%d), only %d", 910 - sbytes[i] - 1, 911 - entry_count); 1279 + sbytes[i] - 1, entry_count); 912 1280 913 1281 /* Shift snum[i]-1 items in whole. Shift sbytes[i] directory entries from directory item number snum[i] */ 914 - leaf_move_items 915 - (LEAF_FROM_S_TO_SNEW, 916 - tb, snum[i], 917 - sbytes[i] - 1, 918 - S_new[i]); 1282 + leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, snum[i], sbytes[i] - 1, S_new[i]); 919 1283 /* Paste given directory entry to directory item */ 920 1284 buffer_info_init_bh(tb, &bi, S_new[i]); 921 - leaf_paste_in_buffer 922 - (&bi, 0, 923 - pos_in_item - 924 - entry_count + 925 - sbytes[i] - 1, 926 - tb->insert_size[0], 927 - body, zeros_num); 1285 + leaf_paste_in_buffer(&bi, 0, pos_in_item - entry_count + sbytes[i] - 1, 1286 + tb->insert_size[0], body, zeros_num); 928 1287 /* paste new directory entry */ 929 - leaf_paste_entries(&bi, 930 - 0, 931 - pos_in_item 932 - - 933 - entry_count 934 - + 935 - sbytes 936 - [i] - 937 - 1, 1, 938 - (struct 939 - reiserfs_de_head 940 - *) 941 - body, 942 - body 943 - + 944 - DEH_SIZE, 945 - tb-> 946 - insert_size 947 - [0] 948 - ); 1288 + leaf_paste_entries(&bi, 0, pos_in_item - entry_count + sbytes[i] - 1, 1, 1289 + (struct reiserfs_de_head *) body, 1290 + body + DEH_SIZE, tb->insert_size[0]); 949 1291 tb->insert_size[0] = 0; 950 1292 pos_in_item++; 951 1293 } else { /* new directory entry doesn't fall into S_new[i] */ 952 - leaf_move_items 953 - (LEAF_FROM_S_TO_SNEW, 954 - tb, snum[i], 955 - sbytes[i], 956 - S_new[i]); 1294 + leaf_move_items(LEAF_FROM_S_TO_SNEW,tb, snum[i], sbytes[i], S_new[i]); 957 1295 } 958 1296 } else { /* regular object */ 959 1297 960 - int n_shift, n_rem, 961 - r_zeros_number; 1298 + int n_shift, n_rem, r_zeros_number; 962 1299 const char *r_body; 963 1300 964 - RFALSE(pos_in_item != 965 - ih_item_len 966 - (B_N_PITEM_HEAD 967 - (tbS0, item_pos)) 968 - || tb->insert_size[0] <= 969 - 0, 1301 + RFALSE(pos_in_item != ih_item_len(B_N_PITEM_HEAD(tbS0, item_pos)) || tb->insert_size[0] <= 0, 970 1302 "PAP-12225: item too short or insert_size <= 0"); 971 1303 972 1304 /* Calculate number of bytes which must be shifted from appended item */ 973 - n_shift = 974 - sbytes[i] - 975 - tb->insert_size[0]; 1305 + n_shift = sbytes[i] - tb->insert_size[0]; 976 1306 if (n_shift < 0) 977 1307 n_shift = 0; 978 - leaf_move_items 979 - (LEAF_FROM_S_TO_SNEW, tb, 980 - snum[i], n_shift, 981 - S_new[i]); 1308 + leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, snum[i], n_shift, S_new[i]); 982 1309 983 1310 /* Calculate number of bytes which must remain in body after append to S_new[i] */ 984 - n_rem = 985 - tb->insert_size[0] - 986 - sbytes[i]; 1311 + n_rem = tb->insert_size[0] - sbytes[i]; 987 1312 if (n_rem < 0) 988 1313 n_rem = 0; 989 1314 /* Append part of body into S_new[0] */ 990 1315 buffer_info_init_bh(tb, &bi, S_new[i]); 991 1316 if (n_rem > zeros_num) { 992 1317 r_zeros_number = 0; 993 - r_body = 994 - body + n_rem - 995 - zeros_num; 1318 + r_body = body + n_rem - zeros_num; 996 1319 } else { 997 1320 r_body = body; 998 - r_zeros_number = 999 - zeros_num - n_rem; 1000 - zeros_num -= 1001 - r_zeros_number; 1321 + r_zeros_number = zeros_num - n_rem; 1322 + zeros_num -= r_zeros_number; 1002 1323 } 1003 1324 1004 - leaf_paste_in_buffer(&bi, 0, 1005 - n_shift, 1006 - tb-> 1007 - insert_size 1008 - [0] - 1009 - n_rem, 1010 - r_body, 1011 - r_zeros_number); 1325 + leaf_paste_in_buffer(&bi, 0, n_shift, 1326 + tb->insert_size[0] - n_rem, 1327 + r_body, r_zeros_number); 1012 1328 { 1013 1329 struct item_head *tmp; 1014 1330 1015 - tmp = 1016 - B_N_PITEM_HEAD(S_new 1017 - [i], 1018 - 0); 1331 + tmp = B_N_PITEM_HEAD(S_new[i], 0); 1019 1332 if (is_indirect_le_ih 1020 1333 (tmp)) { 1021 - set_ih_free_space 1022 - (tmp, 0); 1023 - set_le_ih_k_offset 1024 - (tmp, 1025 - le_ih_k_offset 1026 - (tmp) + 1027 - (n_rem << 1028 - (tb-> 1029 - tb_sb-> 1030 - s_blocksize_bits 1031 - - 1032 - UNFM_P_SHIFT))); 1334 + set_ih_free_space(tmp, 0); 1335 + set_le_ih_k_offset(tmp, le_ih_k_offset(tmp) + (n_rem << (tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT))); 1033 1336 } else { 1034 - set_le_ih_k_offset 1035 - (tmp, 1036 - le_ih_k_offset 1037 - (tmp) + 1038 - n_rem); 1337 + set_le_ih_k_offset(tmp, le_ih_k_offset(tmp) + n_rem); 1039 1338 } 1040 1339 } 1041 1340 ··· 973 1426 struct item_head *pasted; 974 1427 975 1428 #ifdef CONFIG_REISERFS_CHECK 976 - struct item_head *ih_check = 977 - B_N_PITEM_HEAD(tbS0, item_pos); 1429 + struct item_head *ih_check = B_N_PITEM_HEAD(tbS0, item_pos); 978 1430 979 1431 if (!is_direntry_le_ih(ih_check) 980 1432 && (pos_in_item != ih_item_len(ih_check) ··· 985 1439 "to ih_item_len"); 986 1440 #endif /* CONFIG_REISERFS_CHECK */ 987 1441 988 - leaf_mi = 989 - leaf_move_items(LEAF_FROM_S_TO_SNEW, 1442 + leaf_mi = leaf_move_items(LEAF_FROM_S_TO_SNEW, 990 1443 tb, snum[i], 991 1444 sbytes[i], 992 1445 S_new[i]); ··· 997 1452 /* paste into item */ 998 1453 buffer_info_init_bh(tb, &bi, S_new[i]); 999 1454 leaf_paste_in_buffer(&bi, 1000 - item_pos - n + 1001 - snum[i], 1455 + item_pos - n + snum[i], 1002 1456 pos_in_item, 1003 1457 tb->insert_size[0], 1004 1458 body, zeros_num); 1005 1459 1006 - pasted = 1007 - B_N_PITEM_HEAD(S_new[i], 1008 - item_pos - n + 1009 - snum[i]); 1460 + pasted = B_N_PITEM_HEAD(S_new[i], item_pos - n + snum[i]); 1010 1461 if (is_direntry_le_ih(pasted)) { 1011 1462 leaf_paste_entries(&bi, 1012 - item_pos - 1013 - n + snum[i], 1014 - pos_in_item, 1015 - 1, 1016 - (struct 1017 - reiserfs_de_head 1018 - *)body, 1019 - body + 1020 - DEH_SIZE, 1021 - tb-> 1022 - insert_size 1023 - [0] 1463 + item_pos - n + snum[i], 1464 + pos_in_item, 1, 1465 + (struct reiserfs_de_head *)body, 1466 + body + DEH_SIZE, 1467 + tb->insert_size[0] 1024 1468 ); 1025 1469 } 1026 1470 ··· 1029 1495 default: /* cases d and t */ 1030 1496 reiserfs_panic(tb->tb_sb, "PAP-12245", 1031 1497 "blknum > 2: unexpected mode: %s(%d)", 1032 - (flag == 1033 - M_DELETE) ? "DELETE" : ((flag == 1034 - M_CUT) ? "CUT" 1035 - : "UNKNOWN"), 1036 - flag); 1498 + (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag); 1037 1499 } 1038 1500 1039 1501 memcpy(insert_key + i, B_N_PKEY(S_new[i], 0), KEY_SIZE); ··· 1054 1524 /* If we insert the first key change the delimiting key */ 1055 1525 if (item_pos == 0) { 1056 1526 if (tb->CFL[0]) /* can be 0 in reiserfsck */ 1057 - replace_key(tb, tb->CFL[0], tb->lkey[0], 1058 - tbS0, 0); 1059 - 1527 + replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0); 1060 1528 } 1061 1529 break; 1062 1530 ··· 1064 1536 pasted = B_N_PITEM_HEAD(tbS0, item_pos); 1065 1537 /* when directory, may be new entry already pasted */ 1066 1538 if (is_direntry_le_ih(pasted)) { 1067 - if (pos_in_item >= 0 && 1068 - pos_in_item <= 1069 - ih_entry_count(pasted)) { 1539 + if (pos_in_item >= 0 && pos_in_item <= ih_entry_count(pasted)) { 1070 1540 1071 1541 RFALSE(!tb->insert_size[0], 1072 1542 "PAP-12260: insert_size is 0 already"); 1073 1543 1074 1544 /* prepare space */ 1075 1545 buffer_info_init_tbS0(tb, &bi); 1076 - leaf_paste_in_buffer(&bi, 1077 - item_pos, 1078 - pos_in_item, 1079 - tb-> 1080 - insert_size 1081 - [0], body, 1546 + leaf_paste_in_buffer(&bi, item_pos, pos_in_item, 1547 + tb->insert_size[0], body, 1082 1548 zeros_num); 1083 1549 1084 1550 /* paste entry */ 1085 - leaf_paste_entries(&bi, 1086 - item_pos, 1087 - pos_in_item, 1088 - 1, 1089 - (struct 1090 - reiserfs_de_head 1091 - *)body, 1092 - body + 1093 - DEH_SIZE, 1094 - tb-> 1095 - insert_size 1096 - [0] 1097 - ); 1551 + leaf_paste_entries(&bi, item_pos, pos_in_item, 1, 1552 + (struct reiserfs_de_head *)body, 1553 + body + DEH_SIZE, 1554 + tb->insert_size[0]); 1098 1555 if (!item_pos && !pos_in_item) { 1099 - RFALSE(!tb->CFL[0] 1100 - || !tb->L[0], 1556 + RFALSE(!tb->CFL[0] || !tb->L[0], 1101 1557 "PAP-12270: CFL[0]/L[0] must be specified"); 1102 - if (tb->CFL[0]) { 1103 - replace_key(tb, 1104 - tb-> 1105 - CFL 1106 - [0], 1107 - tb-> 1108 - lkey 1109 - [0], 1110 - tbS0, 1111 - 0); 1112 - 1113 - } 1558 + if (tb->CFL[0]) 1559 + replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0); 1114 1560 } 1115 1561 tb->insert_size[0] = 0; 1116 1562 } ··· 1095 1593 "PAP-12275: insert size must not be %d", 1096 1594 tb->insert_size[0]); 1097 1595 buffer_info_init_tbS0(tb, &bi); 1098 - leaf_paste_in_buffer(&bi, 1099 - item_pos, 1100 - pos_in_item, 1101 - tb-> 1102 - insert_size 1103 - [0], body, 1104 - zeros_num); 1596 + leaf_paste_in_buffer(&bi, item_pos, pos_in_item, 1597 + tb->insert_size[0], body, zeros_num); 1105 1598 1106 1599 if (is_indirect_le_ih(pasted)) { 1107 1600 #if 0 ··· 1108 1611 tb-> 1109 1612 insert_size[0]); 1110 1613 #endif 1111 - set_ih_free_space 1112 - (pasted, 0); 1614 + set_ih_free_space(pasted, 0); 1113 1615 } 1114 1616 tb->insert_size[0] = 0; 1115 1617 } ··· 1116 1620 else { 1117 1621 if (tb->insert_size[0]) { 1118 1622 print_cur_tb("12285"); 1119 - reiserfs_panic(tb-> 1120 - tb_sb, 1623 + reiserfs_panic(tb->tb_sb, 1121 1624 "PAP-12285", 1122 1625 "insert_size " 1123 1626 "must be 0 "
+6 -9
fs/sync.c
··· 27 27 * wait == 1 case since in that case write_inode() functions do 28 28 * sync_dirty_buffer() and thus effectively write one block at a time. 29 29 */ 30 - static int __sync_filesystem(struct super_block *sb, int wait, 31 - unsigned long start) 30 + static int __sync_filesystem(struct super_block *sb, int wait) 32 31 { 33 32 if (wait) 34 - sync_inodes_sb(sb, start); 33 + sync_inodes_sb(sb); 35 34 else 36 35 writeback_inodes_sb(sb, WB_REASON_SYNC); 37 36 ··· 47 48 int sync_filesystem(struct super_block *sb) 48 49 { 49 50 int ret; 50 - unsigned long start = jiffies; 51 51 52 52 /* 53 53 * We need to be protected against the filesystem going from ··· 60 62 if (sb->s_flags & MS_RDONLY) 61 63 return 0; 62 64 63 - ret = __sync_filesystem(sb, 0, start); 65 + ret = __sync_filesystem(sb, 0); 64 66 if (ret < 0) 65 67 return ret; 66 - return __sync_filesystem(sb, 1, start); 68 + return __sync_filesystem(sb, 1); 67 69 } 68 70 EXPORT_SYMBOL_GPL(sync_filesystem); 69 71 70 72 static void sync_inodes_one_sb(struct super_block *sb, void *arg) 71 73 { 72 74 if (!(sb->s_flags & MS_RDONLY)) 73 - sync_inodes_sb(sb, *((unsigned long *)arg)); 75 + sync_inodes_sb(sb); 74 76 } 75 77 76 78 static void sync_fs_one_sb(struct super_block *sb, void *arg) ··· 102 104 SYSCALL_DEFINE0(sync) 103 105 { 104 106 int nowait = 0, wait = 1; 105 - unsigned long start = jiffies; 106 107 107 108 wakeup_flusher_threads(0, WB_REASON_SYNC); 108 - iterate_supers(sync_inodes_one_sb, &start); 109 + iterate_supers(sync_inodes_one_sb, NULL); 109 110 iterate_supers(sync_fs_one_sb, &nowait); 110 111 iterate_supers(sync_fs_one_sb, &wait); 111 112 iterate_bdevs(fdatawrite_one_bdev, NULL);
+3 -2
fs/sysfs/mount.c
··· 27 27 { 28 28 struct dentry *root; 29 29 void *ns; 30 + bool new_sb; 30 31 31 32 if (!(flags & MS_KERNMOUNT)) { 32 33 if (!capable(CAP_SYS_ADMIN) && !fs_fully_visible(fs_type)) ··· 38 37 } 39 38 40 39 ns = kobj_ns_grab_current(KOBJ_NS_TYPE_NET); 41 - root = kernfs_mount_ns(fs_type, flags, sysfs_root, ns); 42 - if (IS_ERR(root)) 40 + root = kernfs_mount_ns(fs_type, flags, sysfs_root, &new_sb, ns); 41 + if (IS_ERR(root) || !new_sb) 43 42 kobj_ns_drop(KOBJ_NS_TYPE_NET, ns); 44 43 return root; 45 44 }
+12 -2
fs/udf/file.c
··· 144 144 size_t count = iocb->ki_nbytes; 145 145 struct udf_inode_info *iinfo = UDF_I(inode); 146 146 147 + mutex_lock(&inode->i_mutex); 147 148 down_write(&iinfo->i_data_sem); 148 149 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { 149 150 if (file->f_flags & O_APPEND) ··· 157 156 pos + count)) { 158 157 err = udf_expand_file_adinicb(inode); 159 158 if (err) { 159 + mutex_unlock(&inode->i_mutex); 160 160 udf_debug("udf_expand_adinicb: err=%d\n", err); 161 161 return err; 162 162 } ··· 171 169 } else 172 170 up_write(&iinfo->i_data_sem); 173 171 174 - retval = generic_file_aio_write(iocb, iov, nr_segs, ppos); 175 - if (retval > 0) 172 + retval = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos); 173 + mutex_unlock(&inode->i_mutex); 174 + 175 + if (retval > 0) { 176 + ssize_t err; 177 + 176 178 mark_inode_dirty(inode); 179 + err = generic_write_sync(file, iocb->ki_pos - retval, retval); 180 + if (err < 0) 181 + retval = err; 182 + } 177 183 178 184 return retval; 179 185 }
+1
fs/udf/inode.c
··· 265 265 .nr_to_write = 1, 266 266 }; 267 267 268 + WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex)); 268 269 if (!iinfo->i_lenAlloc) { 269 270 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) 270 271 iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
+8 -8
fs/xfs/xfs_iops.c
··· 705 705 { 706 706 struct xfs_mount *mp = ip->i_mount; 707 707 struct inode *inode = VFS_I(ip); 708 - int mask = iattr->ia_valid; 709 708 xfs_off_t oldsize, newsize; 710 709 struct xfs_trans *tp; 711 710 int error; ··· 725 726 726 727 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 727 728 ASSERT(S_ISREG(ip->i_d.di_mode)); 728 - ASSERT((mask & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET| 729 - ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0); 729 + ASSERT((iattr->ia_valid & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET| 730 + ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0); 730 731 731 732 oldsize = inode->i_size; 732 733 newsize = iattr->ia_size; ··· 735 736 * Short circuit the truncate case for zero length files. 736 737 */ 737 738 if (newsize == 0 && oldsize == 0 && ip->i_d.di_nextents == 0) { 738 - if (!(mask & (ATTR_CTIME|ATTR_MTIME))) 739 + if (!(iattr->ia_valid & (ATTR_CTIME|ATTR_MTIME))) 739 740 return 0; 740 741 741 742 /* ··· 823 824 * these flags set. For all other operations the VFS set these flags 824 825 * explicitly if it wants a timestamp update. 825 826 */ 826 - if (newsize != oldsize && (!(mask & (ATTR_CTIME | ATTR_MTIME)))) { 827 + if (newsize != oldsize && 828 + !(iattr->ia_valid & (ATTR_CTIME | ATTR_MTIME))) { 827 829 iattr->ia_ctime = iattr->ia_mtime = 828 830 current_fs_time(inode->i_sb); 829 - mask |= ATTR_CTIME | ATTR_MTIME; 831 + iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME; 830 832 } 831 833 832 834 /* ··· 863 863 xfs_inode_clear_eofblocks_tag(ip); 864 864 } 865 865 866 - if (mask & ATTR_MODE) 866 + if (iattr->ia_valid & ATTR_MODE) 867 867 xfs_setattr_mode(ip, iattr); 868 - if (mask & (ATTR_ATIME|ATTR_CTIME|ATTR_MTIME)) 868 + if (iattr->ia_valid & (ATTR_ATIME|ATTR_CTIME|ATTR_MTIME)) 869 869 xfs_setattr_time(ip, iattr); 870 870 871 871 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+15 -4
fs/xfs/xfs_log_cil.c
··· 205 205 /* 206 206 * We 64-bit align the length of each iovec so that the start 207 207 * of the next one is naturally aligned. We'll need to 208 - * account for that slack space here. 208 + * account for that slack space here. Then round nbytes up 209 + * to 64-bit alignment so that the initial buffer alignment is 210 + * easy to calculate and verify. 209 211 */ 210 212 nbytes += niovecs * sizeof(uint64_t); 213 + nbytes = round_up(nbytes, sizeof(uint64_t)); 211 214 212 215 /* grab the old item if it exists for reservation accounting */ 213 216 old_lv = lip->li_lv; 214 217 215 - /* calc buffer size */ 216 - buf_size = sizeof(struct xfs_log_vec) + nbytes + 217 - niovecs * sizeof(struct xfs_log_iovec); 218 + /* 219 + * The data buffer needs to start 64-bit aligned, so round up 220 + * that space to ensure we can align it appropriately and not 221 + * overrun the buffer. 222 + */ 223 + buf_size = nbytes + 224 + round_up((sizeof(struct xfs_log_vec) + 225 + niovecs * sizeof(struct xfs_log_iovec)), 226 + sizeof(uint64_t)); 218 227 219 228 /* compare to existing item size */ 220 229 if (lip->li_lv && buf_size <= lip->li_lv->lv_size) { ··· 260 251 /* The allocated data region lies beyond the iovec region */ 261 252 lv->lv_buf_len = 0; 262 253 lv->lv_buf = (char *)lv + buf_size - nbytes; 254 + ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t))); 255 + 263 256 lip->li_ops->iop_format(lip, lv); 264 257 insert: 265 258 ASSERT(lv->lv_buf_len <= nbytes);
+16 -8
fs/xfs/xfs_mount.c
··· 282 282 struct xfs_sb *sbp = &mp->m_sb; 283 283 int error; 284 284 int loud = !(flags & XFS_MFSI_QUIET); 285 + const struct xfs_buf_ops *buf_ops; 285 286 286 287 ASSERT(mp->m_sb_bp == NULL); 287 288 ASSERT(mp->m_ddev_targp != NULL); 289 + 290 + /* 291 + * For the initial read, we must guess at the sector 292 + * size based on the block device. It's enough to 293 + * get the sb_sectsize out of the superblock and 294 + * then reread with the proper length. 295 + * We don't verify it yet, because it may not be complete. 296 + */ 297 + sector_size = xfs_getsize_buftarg(mp->m_ddev_targp); 298 + buf_ops = NULL; 288 299 289 300 /* 290 301 * Allocate a (locked) buffer to hold the superblock. 291 302 * This will be kept around at all times to optimize 292 303 * access to the superblock. 293 304 */ 294 - sector_size = xfs_getsize_buftarg(mp->m_ddev_targp); 295 - 296 305 reread: 297 306 bp = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR, 298 - BTOBB(sector_size), 0, 299 - loud ? &xfs_sb_buf_ops 300 - : &xfs_sb_quiet_buf_ops); 307 + BTOBB(sector_size), 0, buf_ops); 301 308 if (!bp) { 302 309 if (loud) 303 310 xfs_warn(mp, "SB buffer read failed"); ··· 335 328 } 336 329 337 330 /* 338 - * If device sector size is smaller than the superblock size, 339 - * re-read the superblock so the buffer is correctly sized. 331 + * Re-read the superblock so the buffer is correctly sized, 332 + * and properly verified. 340 333 */ 341 - if (sector_size < sbp->sb_sectsize) { 334 + if (buf_ops == NULL) { 342 335 xfs_buf_relse(bp); 343 336 sector_size = sbp->sb_sectsize; 337 + buf_ops = loud ? &xfs_sb_buf_ops : &xfs_sb_quiet_buf_ops; 344 338 goto reread; 345 339 } 346 340
+4 -6
fs/xfs/xfs_sb.c
··· 295 295 sbp->sb_dblocks == 0 || 296 296 sbp->sb_dblocks > XFS_MAX_DBLOCKS(sbp) || 297 297 sbp->sb_dblocks < XFS_MIN_DBLOCKS(sbp))) { 298 - XFS_CORRUPTION_ERROR("SB sanity check failed", 299 - XFS_ERRLEVEL_LOW, mp, sbp); 298 + xfs_notice(mp, "SB sanity check failed"); 300 299 return XFS_ERROR(EFSCORRUPTED); 301 300 } 302 301 ··· 610 611 XFS_SB_VERSION_5) || 611 612 dsb->sb_crc != 0)) { 612 613 613 - if (!xfs_verify_cksum(bp->b_addr, be16_to_cpu(dsb->sb_sectsize), 614 + if (!xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length), 614 615 offsetof(struct xfs_sb, sb_crc))) { 615 616 /* Only fail bad secondaries on a known V5 filesystem */ 616 - if (bp->b_bn != XFS_SB_DADDR && 617 + if (bp->b_bn == XFS_SB_DADDR || 617 618 xfs_sb_version_hascrc(&mp->m_sb)) { 618 619 error = EFSCORRUPTED; 619 620 goto out_error; ··· 624 625 625 626 out_error: 626 627 if (error) { 627 - if (error != EWRONGFS) 628 + if (error == EFSCORRUPTED) 628 629 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, 629 630 mp, bp->b_addr); 630 631 xfs_buf_ioerror(bp, error); ··· 642 643 struct xfs_buf *bp) 643 644 { 644 645 struct xfs_dsb *dsb = XFS_BUF_TO_SBP(bp); 645 - 646 646 647 647 if (dsb->sb_magicnum == cpu_to_be32(XFS_SB_MAGIC)) { 648 648 /* XFS filesystem, verify noisily! */
+1 -1
fs/xfs/xfs_super.c
··· 913 913 struct super_block *sb = mp->m_super; 914 914 915 915 if (down_read_trylock(&sb->s_umount)) { 916 - sync_inodes_sb(sb, jiffies); 916 + sync_inodes_sb(sb); 917 917 up_read(&sb->s_umount); 918 918 } 919 919 }
+39
include/asm-generic/pgtable.h
··· 701 701 } 702 702 #endif 703 703 704 + #ifndef ptep_set_numa 705 + static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr, 706 + pte_t *ptep) 707 + { 708 + pte_t ptent = *ptep; 709 + 710 + ptent = pte_mknuma(ptent); 711 + set_pte_at(mm, addr, ptep, ptent); 712 + return; 713 + } 714 + #endif 715 + 704 716 #ifndef pmd_mknuma 705 717 static inline pmd_t pmd_mknuma(pmd_t pmd) 706 718 { 707 719 pmd = pmd_set_flags(pmd, _PAGE_NUMA); 708 720 return pmd_clear_flags(pmd, _PAGE_PRESENT); 721 + } 722 + #endif 723 + 724 + #ifndef pmdp_set_numa 725 + static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, 726 + pmd_t *pmdp) 727 + { 728 + pmd_t pmd = *pmdp; 729 + 730 + pmd = pmd_mknuma(pmd); 731 + set_pmd_at(mm, addr, pmdp, pmd); 732 + return; 709 733 } 710 734 #endif 711 735 #else ··· 739 715 extern pmd_t pmd_mknonnuma(pmd_t pmd); 740 716 extern pte_t pte_mknuma(pte_t pte); 741 717 extern pmd_t pmd_mknuma(pmd_t pmd); 718 + extern void ptep_set_numa(struct mm_struct *mm, unsigned long addr, pte_t *ptep); 719 + extern void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp); 742 720 #endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */ 743 721 #else 744 722 static inline int pmd_numa(pmd_t pmd) ··· 768 742 return pte; 769 743 } 770 744 745 + static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr, 746 + pte_t *ptep) 747 + { 748 + return; 749 + } 750 + 751 + 771 752 static inline pmd_t pmd_mknuma(pmd_t pmd) 772 753 { 773 754 return pmd; 755 + } 756 + 757 + static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, 758 + pmd_t *pmdp) 759 + { 760 + return ; 774 761 } 775 762 #endif /* CONFIG_NUMA_BALANCING */ 776 763
+3
include/drm/drm_crtc.h
··· 907 907 908 908 /* whether async page flip is supported or not */ 909 909 bool async_page_flip; 910 + 911 + /* cursor size */ 912 + uint32_t cursor_width, cursor_height; 910 913 }; 911 914 912 915 #define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
+2
include/drm/ttm/ttm_page_alloc.h
··· 29 29 #include <drm/ttm/ttm_bo_driver.h> 30 30 #include <drm/ttm/ttm_memory.h> 31 31 32 + struct device; 33 + 32 34 /** 33 35 * Initialize pool allocator. 34 36 */
+2 -2
include/dt-bindings/clock/tegra124-car.h
··· 36 36 #define TEGRA124_CLK_PWM 17 37 37 #define TEGRA124_CLK_I2S2 18 38 38 /* 20 (register bit affects vi and vi_sensor) */ 39 - #define TEGRA124_CLK_GR_2D 21 39 + /* 21 */ 40 40 #define TEGRA124_CLK_USBD 22 41 41 #define TEGRA124_CLK_ISP 23 42 - #define TEGRA124_CLK_GR_3D 24 42 + /* 26 */ 43 43 /* 25 */ 44 44 #define TEGRA124_CLK_DISP2 26 45 45 #define TEGRA124_CLK_DISP1 27
+8 -3
include/linux/blk-mq.h
··· 121 121 122 122 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); 123 123 124 - void blk_mq_insert_request(struct request_queue *, struct request *, 125 - bool, bool); 124 + void blk_mq_insert_request(struct request *, bool, bool, bool); 126 125 void blk_mq_run_queues(struct request_queue *q, bool async); 127 126 void blk_mq_free_request(struct request *rq); 128 127 bool blk_mq_can_queue(struct blk_mq_hw_ctx *); ··· 133 134 struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *, unsigned int); 134 135 void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int); 135 136 136 - void blk_mq_end_io(struct request *rq, int error); 137 + bool blk_mq_end_io_partial(struct request *rq, int error, 138 + unsigned int nr_bytes); 139 + static inline void blk_mq_end_io(struct request *rq, int error) 140 + { 141 + bool done = !blk_mq_end_io_partial(rq, error, blk_rq_bytes(rq)); 142 + BUG_ON(!done); 143 + } 137 144 138 145 void blk_mq_complete_request(struct request *rq); 139 146
+3 -2
include/linux/ceph/ceph_fs.h
··· 373 373 /* 374 374 * Ceph setxattr request flags. 375 375 */ 376 - #define CEPH_XATTR_CREATE 1 377 - #define CEPH_XATTR_REPLACE 2 376 + #define CEPH_XATTR_CREATE (1 << 0) 377 + #define CEPH_XATTR_REPLACE (1 << 1) 378 + #define CEPH_XATTR_REMOVE (1 << 31) 378 379 379 380 union ceph_mds_request_args { 380 381 struct {
+2
include/linux/cgroup.h
··· 166 166 * 167 167 * The ID of the root cgroup is always 0, and a new cgroup 168 168 * will be assigned with a smallest available ID. 169 + * 170 + * Allocating/Removing ID must be protected by cgroup_mutex. 169 171 */ 170 172 int id; 171 173
+4
include/linux/clk/ti.h
··· 245 245 void omap2_init_clk_clkdm(struct clk_hw *clk); 246 246 unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw, 247 247 unsigned long parent_rate); 248 + int omap3_clkoutx2_set_rate(struct clk_hw *hw, unsigned long rate, 249 + unsigned long parent_rate); 250 + long omap3_clkoutx2_round_rate(struct clk_hw *hw, unsigned long rate, 251 + unsigned long *prate); 248 252 int omap2_clkops_enable_clkdm(struct clk_hw *hw); 249 253 void omap2_clkops_disable_clkdm(struct clk_hw *hw); 250 254 int omap2_clk_disable_autoidle_all(void);
+1 -1
include/linux/dma-buf.h
··· 171 171 size_t size, int flags, const char *); 172 172 173 173 #define dma_buf_export(priv, ops, size, flags) \ 174 - dma_buf_export_named(priv, ops, size, flags, __FILE__) 174 + dma_buf_export_named(priv, ops, size, flags, KBUILD_MODNAME) 175 175 176 176 int dma_buf_fd(struct dma_buf *dmabuf, int flags); 177 177 struct dma_buf *dma_buf_get(int fd);
+1
include/linux/firewire.h
··· 200 200 unsigned irmc:1; 201 201 unsigned bc_implemented:2; 202 202 203 + work_func_t workfn; 203 204 struct delayed_work work; 204 205 struct fw_attribute_group attribute_group; 205 206 };
+2 -2
include/linux/fsnotify_backend.h
··· 99 99 struct fsnotify_mark *inode_mark, 100 100 struct fsnotify_mark *vfsmount_mark, 101 101 u32 mask, void *data, int data_type, 102 - const unsigned char *file_name); 102 + const unsigned char *file_name, u32 cookie); 103 103 void (*free_group_priv)(struct fsnotify_group *group); 104 104 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group); 105 105 void (*free_event)(struct fsnotify_event *event); ··· 160 160 161 161 struct fasync_struct *fsn_fa; /* async notification */ 162 162 163 - struct fsnotify_event overflow_event; /* Event we queue when the 163 + struct fsnotify_event *overflow_event; /* Event we queue when the 164 164 * notification list is too 165 165 * full */ 166 166
-41
include/linux/huge_mm.h
··· 157 157 return HPAGE_PMD_NR; 158 158 return 1; 159 159 } 160 - /* 161 - * compound_trans_head() should be used instead of compound_head(), 162 - * whenever the "page" passed as parameter could be the tail of a 163 - * transparent hugepage that could be undergoing a 164 - * __split_huge_page_refcount(). The page structure layout often 165 - * changes across releases and it makes extensive use of unions. So if 166 - * the page structure layout will change in a way that 167 - * page->first_page gets clobbered by __split_huge_page_refcount, the 168 - * implementation making use of smp_rmb() will be required. 169 - * 170 - * Currently we define compound_trans_head as compound_head, because 171 - * page->private is in the same union with page->first_page, and 172 - * page->private isn't clobbered. However this also means we're 173 - * currently leaving dirt into the page->private field of anonymous 174 - * pages resulting from a THP split, instead of setting page->private 175 - * to zero like for every other page that has PG_private not set. But 176 - * anonymous pages don't use page->private so this is not a problem. 177 - */ 178 - #if 0 179 - /* This will be needed if page->private will be clobbered in split_huge_page */ 180 - static inline struct page *compound_trans_head(struct page *page) 181 - { 182 - if (PageTail(page)) { 183 - struct page *head; 184 - head = page->first_page; 185 - smp_rmb(); 186 - /* 187 - * head may be a dangling pointer. 188 - * __split_huge_page_refcount clears PageTail before 189 - * overwriting first_page, so if PageTail is still 190 - * there it means the head pointer isn't dangling. 191 - */ 192 - if (PageTail(page)) 193 - return head; 194 - } 195 - return page; 196 - } 197 - #else 198 - #define compound_trans_head(page) compound_head(page) 199 - #endif 200 160 201 161 extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, 202 162 unsigned long addr, pmd_t pmd, pmd_t *pmdp); ··· 186 226 do { } while (0) 187 227 #define split_huge_page_pmd_mm(__mm, __address, __pmd) \ 188 228 do { } while (0) 189 - #define compound_trans_head(page) compound_head(page) 190 229 static inline int hugepage_madvise(struct vm_area_struct *vma, 191 230 unsigned long *vm_flags, int advice) 192 231 {
-2
include/linux/ipc_namespace.h
··· 118 118 * the new maximum will handle anyone else. I may have to revisit this 119 119 * in the future. 120 120 */ 121 - #define MIN_QUEUESMAX 1 122 121 #define DFLT_QUEUESMAX 256 123 - #define HARD_QUEUESMAX 1024 124 122 #define MIN_MSGMAX 1 125 123 #define DFLT_MSG 10U 126 124 #define DFLT_MSGMAX 10
+6
include/linux/irq.h
··· 303 303 * @irq_pm_shutdown: function called from core code on shutdown once per chip 304 304 * @irq_calc_mask: Optional function to set irq_data.mask for special cases 305 305 * @irq_print_chip: optional to print special chip info in show_interrupts 306 + * @irq_request_resources: optional to request resources before calling 307 + * any other callback related to this irq 308 + * @irq_release_resources: optional to release resources acquired with 309 + * irq_request_resources 306 310 * @flags: chip specific flags 307 311 */ 308 312 struct irq_chip { ··· 340 336 void (*irq_calc_mask)(struct irq_data *data); 341 337 342 338 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p); 339 + int (*irq_request_resources)(struct irq_data *data); 340 + void (*irq_release_resources)(struct irq_data *data); 343 341 344 342 unsigned long flags; 345 343 };
+5 -4
include/linux/kernfs.h
··· 249 249 250 250 const void *kernfs_super_ns(struct super_block *sb); 251 251 struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags, 252 - struct kernfs_root *root, const void *ns); 252 + struct kernfs_root *root, bool *new_sb_created, 253 + const void *ns); 253 254 void kernfs_kill_sb(struct super_block *sb); 254 255 255 256 void kernfs_init(void); ··· 318 317 319 318 static inline struct dentry * 320 319 kernfs_mount_ns(struct file_system_type *fs_type, int flags, 321 - struct kernfs_root *root, const void *ns) 320 + struct kernfs_root *root, bool *new_sb_created, const void *ns) 322 321 { return ERR_PTR(-ENOSYS); } 323 322 324 323 static inline void kernfs_kill_sb(struct super_block *sb) { } ··· 369 368 370 369 static inline struct dentry * 371 370 kernfs_mount(struct file_system_type *fs_type, int flags, 372 - struct kernfs_root *root) 371 + struct kernfs_root *root, bool *new_sb_created) 373 372 { 374 - return kernfs_mount_ns(fs_type, flags, root, NULL); 373 + return kernfs_mount_ns(fs_type, flags, root, new_sb_created, NULL); 375 374 } 376 375 377 376 #endif /* __LINUX_KERNFS_H */
+1 -1
include/linux/mfd/max8997-private.h
··· 387 387 struct i2c_client *muic; /* slave addr 0x4a */ 388 388 struct mutex iolock; 389 389 390 - int type; 390 + unsigned long type; 391 391 struct platform_device *battery; /* battery control (not fuel gauge) */ 392 392 393 393 int irq;
+1 -1
include/linux/mfd/max8998-private.h
··· 163 163 int ono; 164 164 u8 irq_masks_cur[MAX8998_NUM_IRQ_REGS]; 165 165 u8 irq_masks_cache[MAX8998_NUM_IRQ_REGS]; 166 - int type; 166 + unsigned long type; 167 167 bool wakeup; 168 168 }; 169 169
+2 -2
include/linux/mfd/tps65217.h
··· 252 252 struct tps65217 { 253 253 struct device *dev; 254 254 struct tps65217_board *pdata; 255 - unsigned int id; 255 + unsigned long id; 256 256 struct regulator_desc desc[TPS65217_NUM_REGULATOR]; 257 257 struct regulator_dev *rdev[TPS65217_NUM_REGULATOR]; 258 258 struct regmap *regmap; ··· 263 263 return dev_get_drvdata(dev); 264 264 } 265 265 266 - static inline int tps65217_chip_id(struct tps65217 *tps65217) 266 + static inline unsigned long tps65217_chip_id(struct tps65217 *tps65217) 267 267 { 268 268 return tps65217->id; 269 269 }
+15 -5
include/linux/mm.h
··· 175 175 * Special vmas that are non-mergable, non-mlock()able. 176 176 * Note: mm/huge_memory.c VM_NO_THP depends on this definition. 177 177 */ 178 - #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP) 178 + #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) 179 179 180 180 /* 181 181 * mapping from the currently active vm_flags protection bits (the ··· 399 399 400 400 static inline struct page *compound_head(struct page *page) 401 401 { 402 - if (unlikely(PageTail(page))) 403 - return page->first_page; 402 + if (unlikely(PageTail(page))) { 403 + struct page *head = page->first_page; 404 + 405 + /* 406 + * page->first_page may be a dangling pointer to an old 407 + * compound page, so recheck that it is still a tail 408 + * page before returning. 409 + */ 410 + smp_rmb(); 411 + if (likely(PageTail(page))) 412 + return head; 413 + } 404 414 return page; 405 415 } 406 416 ··· 767 757 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS 768 758 static inline int page_cpupid_xchg_last(struct page *page, int cpupid) 769 759 { 770 - return xchg(&page->_last_cpupid, cpupid); 760 + return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK); 771 761 } 772 762 773 763 static inline int page_cpupid_last(struct page *page) ··· 776 766 } 777 767 static inline void page_cpupid_reset_last(struct page *page) 778 768 { 779 - page->_last_cpupid = -1; 769 + page->_last_cpupid = -1 & LAST_CPUPID_MASK; 780 770 } 781 771 #else 782 772 static inline int page_cpupid_last(struct page *page)
+32 -4
include/linux/netdevice.h
··· 752 752 unsigned char id_len; 753 753 }; 754 754 755 + typedef u16 (*select_queue_fallback_t)(struct net_device *dev, 756 + struct sk_buff *skb); 757 + 755 758 /* 756 759 * This structure defines the management hooks for network devices. 757 760 * The following hooks can be defined; unless noted otherwise, they are ··· 786 783 * Required can not be NULL. 787 784 * 788 785 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, 789 - * void *accel_priv); 786 + * void *accel_priv, select_queue_fallback_t fallback); 790 787 * Called to decide which queue to when device supports multiple 791 788 * transmit queues. 792 789 * ··· 1008 1005 struct net_device *dev); 1009 1006 u16 (*ndo_select_queue)(struct net_device *dev, 1010 1007 struct sk_buff *skb, 1011 - void *accel_priv); 1008 + void *accel_priv, 1009 + select_queue_fallback_t fallback); 1012 1010 void (*ndo_change_rx_flags)(struct net_device *dev, 1013 1011 int flags); 1014 1012 void (*ndo_set_rx_mode)(struct net_device *dev); ··· 1555 1551 struct netdev_queue *netdev_pick_tx(struct net_device *dev, 1556 1552 struct sk_buff *skb, 1557 1553 void *accel_priv); 1558 - u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); 1559 1554 1560 1555 /* 1561 1556 * Net namespace inlines ··· 2276 2273 static inline void netdev_reset_queue(struct net_device *dev_queue) 2277 2274 { 2278 2275 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0)); 2276 + } 2277 + 2278 + /** 2279 + * netdev_cap_txqueue - check if selected tx queue exceeds device queues 2280 + * @dev: network device 2281 + * @queue_index: given tx queue index 2282 + * 2283 + * Returns 0 if given tx queue index >= number of device tx queues, 2284 + * otherwise returns the originally passed tx queue index. 2285 + */ 2286 + static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index) 2287 + { 2288 + if (unlikely(queue_index >= dev->real_num_tx_queues)) { 2289 + net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n", 2290 + dev->name, queue_index, 2291 + dev->real_num_tx_queues); 2292 + return 0; 2293 + } 2294 + 2295 + return queue_index; 2279 2296 } 2280 2297 2281 2298 /** ··· 3091 3068 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 3092 3069 struct net_device *dev); 3093 3070 3094 - netdev_features_t netif_skb_features(struct sk_buff *skb); 3071 + netdev_features_t netif_skb_dev_features(struct sk_buff *skb, 3072 + const struct net_device *dev); 3073 + static inline netdev_features_t netif_skb_features(struct sk_buff *skb) 3074 + { 3075 + return netif_skb_dev_features(skb, skb->dev); 3076 + } 3095 3077 3096 3078 static inline bool net_gso_ok(netdev_features_t features, int gso_type) 3097 3079 {
+5
include/linux/nfs_xdr.h
··· 467 467 }; 468 468 469 469 struct nfs_release_lockowner_args { 470 + struct nfs4_sequence_args seq_args; 470 471 struct nfs_lowner lock_owner; 472 + }; 473 + 474 + struct nfs_release_lockowner_res { 475 + struct nfs4_sequence_res seq_res; 471 476 }; 472 477 473 478 struct nfs4_delegreturnargs {
+20
include/linux/pci.h
··· 1169 1169 void pci_restore_msi_state(struct pci_dev *dev); 1170 1170 int pci_msi_enabled(void); 1171 1171 int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec); 1172 + static inline int pci_enable_msi_exact(struct pci_dev *dev, int nvec) 1173 + { 1174 + int rc = pci_enable_msi_range(dev, nvec, nvec); 1175 + if (rc < 0) 1176 + return rc; 1177 + return 0; 1178 + } 1172 1179 int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, 1173 1180 int minvec, int maxvec); 1181 + static inline int pci_enable_msix_exact(struct pci_dev *dev, 1182 + struct msix_entry *entries, int nvec) 1183 + { 1184 + int rc = pci_enable_msix_range(dev, entries, nvec, nvec); 1185 + if (rc < 0) 1186 + return rc; 1187 + return 0; 1188 + } 1174 1189 #else 1175 1190 static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; } 1176 1191 static inline int pci_enable_msi_block(struct pci_dev *dev, int nvec) ··· 1204 1189 static inline int pci_enable_msi_range(struct pci_dev *dev, int minvec, 1205 1190 int maxvec) 1206 1191 { return -ENOSYS; } 1192 + static inline int pci_enable_msi_exact(struct pci_dev *dev, int nvec) 1193 + { return -ENOSYS; } 1207 1194 static inline int pci_enable_msix_range(struct pci_dev *dev, 1208 1195 struct msix_entry *entries, int minvec, int maxvec) 1196 + { return -ENOSYS; } 1197 + static inline int pci_enable_msix_exact(struct pci_dev *dev, 1198 + struct msix_entry *entries, int nvec) 1209 1199 { return -ENOSYS; } 1210 1200 #endif 1211 1201
+21 -1
include/linux/skbuff.h
··· 2725 2725 2726 2726 static inline void nf_reset_trace(struct sk_buff *skb) 2727 2727 { 2728 - #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) 2728 + #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) 2729 2729 skb->nf_trace = 0; 2730 2730 #endif 2731 2731 } ··· 2741 2741 #ifdef CONFIG_BRIDGE_NETFILTER 2742 2742 dst->nf_bridge = src->nf_bridge; 2743 2743 nf_bridge_get(src->nf_bridge); 2744 + #endif 2745 + #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) 2746 + dst->nf_trace = src->nf_trace; 2744 2747 #endif 2745 2748 } 2746 2749 ··· 2918 2915 static inline bool skb_head_is_locked(const struct sk_buff *skb) 2919 2916 { 2920 2917 return !skb->head_frag || skb_cloned(skb); 2918 + } 2919 + 2920 + /** 2921 + * skb_gso_network_seglen - Return length of individual segments of a gso packet 2922 + * 2923 + * @skb: GSO skb 2924 + * 2925 + * skb_gso_network_seglen is used to determine the real size of the 2926 + * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP). 2927 + * 2928 + * The MAC/L2 header is not accounted for. 2929 + */ 2930 + static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb) 2931 + { 2932 + unsigned int hdr_len = skb_transport_header(skb) - 2933 + skb_network_header(skb); 2934 + return hdr_len + skb_gso_transport_seglen(skb); 2921 2935 } 2922 2936 #endif /* __KERNEL__ */ 2923 2937 #endif /* _LINUX_SKBUFF_H */
+4 -2
include/linux/syscalls.h
··· 281 281 asmlinkage long sys_sched_setparam(pid_t pid, 282 282 struct sched_param __user *param); 283 283 asmlinkage long sys_sched_setattr(pid_t pid, 284 - struct sched_attr __user *attr); 284 + struct sched_attr __user *attr, 285 + unsigned int flags); 285 286 asmlinkage long sys_sched_getscheduler(pid_t pid); 286 287 asmlinkage long sys_sched_getparam(pid_t pid, 287 288 struct sched_param __user *param); 288 289 asmlinkage long sys_sched_getattr(pid_t pid, 289 290 struct sched_attr __user *attr, 290 - unsigned int size); 291 + unsigned int size, 292 + unsigned int flags); 291 293 asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, 292 294 unsigned long __user *user_mask_ptr); 293 295 asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
+6
include/linux/tracepoint.h
··· 60 60 unsigned int num_tracepoints; 61 61 struct tracepoint * const *tracepoints_ptrs; 62 62 }; 63 + bool trace_module_has_bad_taint(struct module *mod); 64 + #else 65 + static inline bool trace_module_has_bad_taint(struct module *mod) 66 + { 67 + return false; 68 + } 63 69 #endif /* CONFIG_MODULES */ 64 70 65 71 struct tracepoint_iter {
+1 -4
include/linux/workqueue.h
··· 419 419 static struct lock_class_key __key; \ 420 420 const char *__lock_name; \ 421 421 \ 422 - if (__builtin_constant_p(fmt)) \ 423 - __lock_name = (fmt); \ 424 - else \ 425 - __lock_name = #fmt; \ 422 + __lock_name = #fmt#args; \ 426 423 \ 427 424 __alloc_workqueue_key((fmt), (flags), (max_active), \ 428 425 &__key, __lock_name, ##args); \
+1 -1
include/linux/writeback.h
··· 97 97 int try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason); 98 98 int try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr, 99 99 enum wb_reason reason); 100 - void sync_inodes_sb(struct super_block *sb, unsigned long older_than_this); 100 + void sync_inodes_sb(struct super_block *); 101 101 void wakeup_flusher_threads(long nr_pages, enum wb_reason reason); 102 102 void inode_wait_for_writeback(struct inode *inode); 103 103
+1
include/net/ip_tunnels.h
··· 129 129 int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], 130 130 struct ip_tunnel_parm *p); 131 131 void ip_tunnel_setup(struct net_device *dev, int net_id); 132 + void ip_tunnel_dst_reset_all(struct ip_tunnel *t); 132 133 133 134 /* Extract dsfield from inner protocol */ 134 135 static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
+1 -13
include/net/sctp/structs.h
··· 1653 1653 /* This is the last advertised value of rwnd over a SACK chunk. */ 1654 1654 __u32 a_rwnd; 1655 1655 1656 - /* Number of bytes by which the rwnd has slopped. The rwnd is allowed 1657 - * to slop over a maximum of the association's frag_point. 1658 - */ 1659 - __u32 rwnd_over; 1660 - 1661 - /* Keeps treack of rwnd pressure. This happens when we have 1662 - * a window, but not recevie buffer (i.e small packets). This one 1663 - * is releases slowly (1 PMTU at a time ). 1664 - */ 1665 - __u32 rwnd_press; 1666 - 1667 1656 /* This is the sndbuf size in use for the association. 1668 1657 * This corresponds to the sndbuf size for the association, 1669 1658 * as specified in the sk->sndbuf. ··· 1881 1892 __u32 sctp_association_get_next_tsn(struct sctp_association *); 1882 1893 1883 1894 void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *); 1884 - void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int); 1885 - void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int); 1895 + void sctp_assoc_rwnd_update(struct sctp_association *, bool); 1886 1896 void sctp_assoc_set_primary(struct sctp_association *, 1887 1897 struct sctp_transport *); 1888 1898 void sctp_assoc_del_nonprimary_peers(struct sctp_association *,
+2 -1
include/net/tcp.h
··· 1303 1303 /* Fast Open cookie. Size 0 means a cookie request */ 1304 1304 struct tcp_fastopen_cookie cookie; 1305 1305 struct msghdr *data; /* data in MSG_FASTOPEN */ 1306 - u16 copied; /* queued in tcp_connect() */ 1306 + size_t size; 1307 + int copied; /* queued in tcp_connect() */ 1307 1308 }; 1308 1309 void tcp_free_fastopen_req(struct tcp_sock *tp); 1309 1310
+11
include/net/xfrm.h
··· 1648 1648 } 1649 1649 #endif 1650 1650 1651 + static inline int aead_len(struct xfrm_algo_aead *alg) 1652 + { 1653 + return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); 1654 + } 1655 + 1651 1656 static inline int xfrm_alg_len(const struct xfrm_algo *alg) 1652 1657 { 1653 1658 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); ··· 1690 1685 1691 1686 return 0; 1692 1687 } 1688 + 1689 + static inline struct xfrm_algo_aead *xfrm_algo_aead_clone(struct xfrm_algo_aead *orig) 1690 + { 1691 + return kmemdup(orig, aead_len(orig), GFP_KERNEL); 1692 + } 1693 + 1693 1694 1694 1695 static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig) 1695 1696 {
+8
include/sound/soc-dapm.h
··· 449 449 /* dapm audio pin control and status */ 450 450 int snd_soc_dapm_enable_pin(struct snd_soc_dapm_context *dapm, 451 451 const char *pin); 452 + int snd_soc_dapm_enable_pin_unlocked(struct snd_soc_dapm_context *dapm, 453 + const char *pin); 452 454 int snd_soc_dapm_disable_pin(struct snd_soc_dapm_context *dapm, 453 455 const char *pin); 456 + int snd_soc_dapm_disable_pin_unlocked(struct snd_soc_dapm_context *dapm, 457 + const char *pin); 454 458 int snd_soc_dapm_nc_pin(struct snd_soc_dapm_context *dapm, const char *pin); 459 + int snd_soc_dapm_nc_pin_unlocked(struct snd_soc_dapm_context *dapm, 460 + const char *pin); 455 461 int snd_soc_dapm_get_pin_status(struct snd_soc_dapm_context *dapm, 456 462 const char *pin); 457 463 int snd_soc_dapm_sync(struct snd_soc_dapm_context *dapm); 458 464 int snd_soc_dapm_force_enable_pin(struct snd_soc_dapm_context *dapm, 459 465 const char *pin); 466 + int snd_soc_dapm_force_enable_pin_unlocked(struct snd_soc_dapm_context *dapm, 467 + const char *pin); 460 468 int snd_soc_dapm_ignore_suspend(struct snd_soc_dapm_context *dapm, 461 469 const char *pin); 462 470 void snd_soc_dapm_auto_nc_codec_pins(struct snd_soc_codec *codec);
+1
include/target/iscsi/iscsi_transport.h
··· 12 12 int (*iscsit_setup_np)(struct iscsi_np *, struct __kernel_sockaddr_storage *); 13 13 int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *); 14 14 void (*iscsit_free_np)(struct iscsi_np *); 15 + void (*iscsit_wait_conn)(struct iscsi_conn *); 15 16 void (*iscsit_free_conn)(struct iscsi_conn *); 16 17 int (*iscsit_get_login_rx)(struct iscsi_conn *, struct iscsi_login *); 17 18 int (*iscsit_put_login_tx)(struct iscsi_conn *, struct iscsi_login *, u32);
+2 -2
include/trace/events/sunrpc.h
··· 83 83 ), 84 84 85 85 TP_fast_assign( 86 - __entry->client_id = clnt->cl_clid; 86 + __entry->client_id = clnt ? clnt->cl_clid : -1; 87 87 __entry->task_id = task->tk_pid; 88 88 __entry->action = action; 89 89 __entry->runstate = task->tk_runstate; ··· 91 91 __entry->flags = task->tk_flags; 92 92 ), 93 93 94 - TP_printk("task:%u@%u flags=%4.4x state=%4.4lx status=%d action=%pf", 94 + TP_printk("task:%u@%d flags=%4.4x state=%4.4lx status=%d action=%pf", 95 95 __entry->task_id, __entry->client_id, 96 96 __entry->flags, 97 97 __entry->runstate,
+3 -3
include/trace/events/writeback.h
··· 287 287 __field(int, reason) 288 288 ), 289 289 TP_fast_assign( 290 - unsigned long older_than_this = work->older_than_this; 290 + unsigned long *older_than_this = work->older_than_this; 291 291 strncpy(__entry->name, dev_name(wb->bdi->dev), 32); 292 - __entry->older = older_than_this; 292 + __entry->older = older_than_this ? *older_than_this : 0; 293 293 __entry->age = older_than_this ? 294 - (jiffies - older_than_this) * 1000 / HZ : -1; 294 + (jiffies - *older_than_this) * 1000 / HZ : -1; 295 295 __entry->moved = moved; 296 296 __entry->reason = work->reason; 297 297 ),
+5 -1
include/uapi/asm-generic/unistd.h
··· 692 692 __SYSCALL(__NR_kcmp, sys_kcmp) 693 693 #define __NR_finit_module 273 694 694 __SYSCALL(__NR_finit_module, sys_finit_module) 695 + #define __NR_sched_setattr 274 696 + __SYSCALL(__NR_sched_setattr, sys_sched_setattr) 697 + #define __NR_sched_getattr 275 698 + __SYSCALL(__NR_sched_getattr, sys_sched_getattr) 695 699 696 700 #undef __NR_syscalls 697 - #define __NR_syscalls 274 701 + #define __NR_syscalls 276 698 702 699 703 /* 700 704 * All syscalls below here should go away really,
+2
include/uapi/drm/drm.h
··· 619 619 #define DRM_PRIME_CAP_EXPORT 0x2 620 620 #define DRM_CAP_TIMESTAMP_MONOTONIC 0x6 621 621 #define DRM_CAP_ASYNC_PAGE_FLIP 0x7 622 + #define DRM_CAP_CURSOR_WIDTH 0x8 623 + #define DRM_CAP_CURSOR_HEIGHT 0x9 622 624 623 625 /** DRM_IOCTL_GET_CAP ioctl argument type */ 624 626 struct drm_get_cap {
+1
include/uapi/drm/vmwgfx_drm.h
··· 87 87 #define DRM_VMW_PARAM_MAX_SURF_MEMORY 7 88 88 #define DRM_VMW_PARAM_3D_CAPS_SIZE 8 89 89 #define DRM_VMW_PARAM_MAX_MOB_MEMORY 9 90 + #define DRM_VMW_PARAM_MAX_MOB_SIZE 10 90 91 91 92 /** 92 93 * struct drm_vmw_getparam_arg
+12 -6
ipc/mq_sysctl.c
··· 22 22 return which; 23 23 } 24 24 25 + static int proc_mq_dointvec(ctl_table *table, int write, 26 + void __user *buffer, size_t *lenp, loff_t *ppos) 27 + { 28 + struct ctl_table mq_table; 29 + memcpy(&mq_table, table, sizeof(mq_table)); 30 + mq_table.data = get_mq(table); 31 + 32 + return proc_dointvec(&mq_table, write, buffer, lenp, ppos); 33 + } 34 + 25 35 static int proc_mq_dointvec_minmax(ctl_table *table, int write, 26 36 void __user *buffer, size_t *lenp, loff_t *ppos) 27 37 { ··· 43 33 lenp, ppos); 44 34 } 45 35 #else 36 + #define proc_mq_dointvec NULL 46 37 #define proc_mq_dointvec_minmax NULL 47 38 #endif 48 - 49 - static int msg_queues_limit_min = MIN_QUEUESMAX; 50 - static int msg_queues_limit_max = HARD_QUEUESMAX; 51 39 52 40 static int msg_max_limit_min = MIN_MSGMAX; 53 41 static int msg_max_limit_max = HARD_MSGMAX; ··· 59 51 .data = &init_ipc_ns.mq_queues_max, 60 52 .maxlen = sizeof(int), 61 53 .mode = 0644, 62 - .proc_handler = proc_mq_dointvec_minmax, 63 - .extra1 = &msg_queues_limit_min, 64 - .extra2 = &msg_queues_limit_max, 54 + .proc_handler = proc_mq_dointvec, 65 55 }, 66 56 { 67 57 .procname = "msg_max",
+3 -3
ipc/mqueue.c
··· 433 433 error = -EACCES; 434 434 goto out_unlock; 435 435 } 436 - if (ipc_ns->mq_queues_count >= HARD_QUEUESMAX || 437 - (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max && 438 - !capable(CAP_SYS_RESOURCE))) { 436 + 437 + if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max && 438 + !capable(CAP_SYS_RESOURCE)) { 439 439 error = -ENOSPC; 440 440 goto out_unlock; 441 441 }
+1 -1
kernel/audit_tree.c
··· 916 916 struct fsnotify_mark *inode_mark, 917 917 struct fsnotify_mark *vfsmount_mark, 918 918 u32 mask, void *data, int data_type, 919 - const unsigned char *file_name) 919 + const unsigned char *file_name, u32 cookie) 920 920 { 921 921 return 0; 922 922 }
+1 -1
kernel/audit_watch.c
··· 471 471 struct fsnotify_mark *inode_mark, 472 472 struct fsnotify_mark *vfsmount_mark, 473 473 u32 mask, void *data, int data_type, 474 - const unsigned char *dname) 474 + const unsigned char *dname, u32 cookie) 475 475 { 476 476 struct inode *inode; 477 477 struct audit_parent *parent;
+32 -28
kernel/cgroup.c
··· 886 886 * per-subsystem and moved to css->id so that lookups are 887 887 * successful until the target css is released. 888 888 */ 889 + mutex_lock(&cgroup_mutex); 889 890 idr_remove(&cgrp->root->cgroup_idr, cgrp->id); 891 + mutex_unlock(&cgroup_mutex); 890 892 cgrp->id = -1; 891 893 892 894 call_rcu(&cgrp->rcu_head, cgroup_free_rcu); ··· 1568 1566 mutex_lock(&cgroup_mutex); 1569 1567 mutex_lock(&cgroup_root_mutex); 1570 1568 1571 - root_cgrp->id = idr_alloc(&root->cgroup_idr, root_cgrp, 1572 - 0, 1, GFP_KERNEL); 1573 - if (root_cgrp->id < 0) 1569 + ret = idr_alloc(&root->cgroup_idr, root_cgrp, 0, 1, GFP_KERNEL); 1570 + if (ret < 0) 1574 1571 goto unlock_drop; 1572 + root_cgrp->id = ret; 1575 1573 1576 1574 /* Check for name clashes with existing mounts */ 1577 1575 ret = -EBUSY; ··· 2765 2763 */ 2766 2764 update_before = cgroup_serial_nr_next; 2767 2765 2768 - mutex_unlock(&cgroup_mutex); 2769 - 2770 2766 /* add/rm files for all cgroups created before */ 2771 - rcu_read_lock(); 2772 2767 css_for_each_descendant_pre(css, cgroup_css(root, ss)) { 2773 2768 struct cgroup *cgrp = css->cgroup; 2774 2769 ··· 2774 2775 2775 2776 inode = cgrp->dentry->d_inode; 2776 2777 dget(cgrp->dentry); 2777 - rcu_read_unlock(); 2778 - 2779 2778 dput(prev); 2780 2779 prev = cgrp->dentry; 2781 2780 2781 + mutex_unlock(&cgroup_mutex); 2782 2782 mutex_lock(&inode->i_mutex); 2783 2783 mutex_lock(&cgroup_mutex); 2784 2784 if (cgrp->serial_nr < update_before && !cgroup_is_dead(cgrp)) 2785 2785 ret = cgroup_addrm_files(cgrp, cfts, is_add); 2786 - mutex_unlock(&cgroup_mutex); 2787 2786 mutex_unlock(&inode->i_mutex); 2788 - 2789 - rcu_read_lock(); 2790 2787 if (ret) 2791 2788 break; 2792 2789 } 2793 - rcu_read_unlock(); 2790 + mutex_unlock(&cgroup_mutex); 2794 2791 dput(prev); 2795 2792 deactivate_super(sb); 2796 2793 return ret; ··· 2905 2910 * We should check if the process is exiting, otherwise 2906 2911 * it will race with cgroup_exit() in that the list 2907 2912 * entry won't be deleted though the process has exited. 2913 + * Do it while holding siglock so that we don't end up 2914 + * racing against cgroup_exit(). 2908 2915 */ 2916 + spin_lock_irq(&p->sighand->siglock); 2909 2917 if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list)) 2910 2918 list_add(&p->cg_list, &task_css_set(p)->tasks); 2919 + spin_unlock_irq(&p->sighand->siglock); 2920 + 2911 2921 task_unlock(p); 2912 2922 } while_each_thread(g, p); 2913 2923 read_unlock(&tasklist_lock); ··· 4158 4158 struct cgroup *cgrp; 4159 4159 struct cgroup_name *name; 4160 4160 struct cgroupfs_root *root = parent->root; 4161 - int ssid, err = 0; 4161 + int ssid, err; 4162 4162 struct cgroup_subsys *ss; 4163 4163 struct super_block *sb = root->sb; 4164 4164 ··· 4168 4168 return -ENOMEM; 4169 4169 4170 4170 name = cgroup_alloc_name(dentry); 4171 - if (!name) 4171 + if (!name) { 4172 + err = -ENOMEM; 4172 4173 goto err_free_cgrp; 4174 + } 4173 4175 rcu_assign_pointer(cgrp->name, name); 4174 - 4175 - /* 4176 - * Temporarily set the pointer to NULL, so idr_find() won't return 4177 - * a half-baked cgroup. 4178 - */ 4179 - cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL); 4180 - if (cgrp->id < 0) 4181 - goto err_free_name; 4182 4176 4183 4177 /* 4184 4178 * Only live parents can have children. Note that the liveliness ··· 4183 4189 */ 4184 4190 if (!cgroup_lock_live_group(parent)) { 4185 4191 err = -ENODEV; 4186 - goto err_free_id; 4192 + goto err_free_name; 4193 + } 4194 + 4195 + /* 4196 + * Temporarily set the pointer to NULL, so idr_find() won't return 4197 + * a half-baked cgroup. 4198 + */ 4199 + cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL); 4200 + if (cgrp->id < 0) { 4201 + err = -ENOMEM; 4202 + goto err_unlock; 4187 4203 } 4188 4204 4189 4205 /* Grab a reference on the superblock so the hierarchy doesn't ··· 4225 4221 */ 4226 4222 err = cgroup_create_file(dentry, S_IFDIR | mode, sb); 4227 4223 if (err < 0) 4228 - goto err_unlock; 4224 + goto err_free_id; 4229 4225 lockdep_assert_held(&dentry->d_inode->i_mutex); 4230 4226 4231 4227 cgrp->serial_nr = cgroup_serial_nr_next++; ··· 4261 4257 4262 4258 return 0; 4263 4259 4264 - err_unlock: 4265 - mutex_unlock(&cgroup_mutex); 4266 - /* Release the reference count that we took on the superblock */ 4267 - deactivate_super(sb); 4268 4260 err_free_id: 4269 4261 idr_remove(&root->cgroup_idr, cgrp->id); 4262 + /* Release the reference count that we took on the superblock */ 4263 + deactivate_super(sb); 4264 + err_unlock: 4265 + mutex_unlock(&cgroup_mutex); 4270 4266 err_free_name: 4271 4267 kfree(rcu_dereference_raw(cgrp->name)); 4272 4268 err_free_cgrp:
+3 -7
kernel/cpuset.c
··· 974 974 * Temporarilly set tasks mems_allowed to target nodes of migration, 975 975 * so that the migration code can allocate pages on these nodes. 976 976 * 977 - * Call holding cpuset_mutex, so current's cpuset won't change 978 - * during this call, as manage_mutex holds off any cpuset_attach() 979 - * calls. Therefore we don't need to take task_lock around the 980 - * call to guarantee_online_mems(), as we know no one is changing 981 - * our task's cpuset. 982 - * 983 977 * While the mm_struct we are migrating is typically from some 984 978 * other task, the task_struct mems_allowed that we are hacking 985 979 * is for our current task, which must allocate new pages for that ··· 990 996 991 997 do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL); 992 998 999 + rcu_read_lock(); 993 1000 mems_cs = effective_nodemask_cpuset(task_cs(tsk)); 994 1001 guarantee_online_mems(mems_cs, &tsk->mems_allowed); 1002 + rcu_read_unlock(); 995 1003 } 996 1004 997 1005 /* ··· 2482 2486 2483 2487 task_lock(current); 2484 2488 cs = nearest_hardwall_ancestor(task_cs(current)); 2489 + allowed = node_isset(node, cs->mems_allowed); 2485 2490 task_unlock(current); 2486 2491 2487 - allowed = node_isset(node, cs->mems_allowed); 2488 2492 mutex_unlock(&callback_mutex); 2489 2493 return allowed; 2490 2494 }
+6 -6
kernel/events/core.c
··· 7856 7856 static void __perf_event_exit_context(void *__info) 7857 7857 { 7858 7858 struct perf_event_context *ctx = __info; 7859 - struct perf_event *event, *tmp; 7859 + struct perf_event *event; 7860 7860 7861 7861 perf_pmu_rotate_stop(ctx->pmu); 7862 7862 7863 - list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry) 7863 + rcu_read_lock(); 7864 + list_for_each_entry_rcu(event, &ctx->event_list, event_entry) 7864 7865 __perf_remove_from_context(event); 7865 - list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry) 7866 - __perf_remove_from_context(event); 7866 + rcu_read_unlock(); 7867 7867 } 7868 7868 7869 7869 static void perf_event_exit_cpu_context(int cpu) ··· 7887 7887 { 7888 7888 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 7889 7889 7890 + perf_event_exit_cpu_context(cpu); 7891 + 7890 7892 mutex_lock(&swhash->hlist_mutex); 7891 7893 swevent_hlist_release(swhash); 7892 7894 mutex_unlock(&swhash->hlist_mutex); 7893 - 7894 - perf_event_exit_cpu_context(cpu); 7895 7895 } 7896 7896 #else 7897 7897 static inline void perf_event_exit_cpu(int cpu) { }
+1
kernel/irq/irqdomain.c
··· 10 10 #include <linux/mutex.h> 11 11 #include <linux/of.h> 12 12 #include <linux/of_address.h> 13 + #include <linux/of_irq.h> 13 14 #include <linux/topology.h> 14 15 #include <linux/seq_file.h> 15 16 #include <linux/slab.h>
+28 -3
kernel/irq/manage.c
··· 832 832 833 833 static void wake_threads_waitq(struct irq_desc *desc) 834 834 { 835 - if (atomic_dec_and_test(&desc->threads_active) && 836 - waitqueue_active(&desc->wait_for_threads)) 835 + if (atomic_dec_and_test(&desc->threads_active)) 837 836 wake_up(&desc->wait_for_threads); 838 837 } 839 838 ··· 951 952 new->thread_fn = new->handler; 952 953 new->handler = irq_default_primary_handler; 953 954 } 955 + } 956 + 957 + static int irq_request_resources(struct irq_desc *desc) 958 + { 959 + struct irq_data *d = &desc->irq_data; 960 + struct irq_chip *c = d->chip; 961 + 962 + return c->irq_request_resources ? c->irq_request_resources(d) : 0; 963 + } 964 + 965 + static void irq_release_resources(struct irq_desc *desc) 966 + { 967 + struct irq_data *d = &desc->irq_data; 968 + struct irq_chip *c = d->chip; 969 + 970 + if (c->irq_release_resources) 971 + c->irq_release_resources(d); 954 972 } 955 973 956 974 /* ··· 1165 1149 } 1166 1150 1167 1151 if (!shared) { 1152 + ret = irq_request_resources(desc); 1153 + if (ret) { 1154 + pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n", 1155 + new->name, irq, desc->irq_data.chip->name); 1156 + goto out_mask; 1157 + } 1158 + 1168 1159 init_waitqueue_head(&desc->wait_for_threads); 1169 1160 1170 1161 /* Setup the type (level, edge polarity) if configured: */ ··· 1342 1319 *action_ptr = action->next; 1343 1320 1344 1321 /* If this was the last handler, shut down the IRQ line: */ 1345 - if (!desc->action) 1322 + if (!desc->action) { 1346 1323 irq_shutdown(desc); 1324 + irq_release_resources(desc); 1325 + } 1347 1326 1348 1327 #ifdef CONFIG_SMP 1349 1328 /* make sure affinity_hint is cleaned up */
+1
kernel/power/console.c
··· 9 9 #include <linux/kbd_kern.h> 10 10 #include <linux/vt.h> 11 11 #include <linux/module.h> 12 + #include <linux/slab.h> 12 13 #include "power.h" 13 14 14 15 #define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1)
-2
kernel/printk/printk.c
··· 1076 1076 next_seq = log_next_seq; 1077 1077 1078 1078 len = 0; 1079 - prev = 0; 1080 1079 while (len >= 0 && seq < next_seq) { 1081 1080 struct printk_log *msg = log_from_idx(idx); 1082 1081 int textlen; ··· 2787 2788 next_idx = idx; 2788 2789 2789 2790 l = 0; 2790 - prev = 0; 2791 2791 while (seq < dumper->next_seq) { 2792 2792 struct printk_log *msg = log_from_idx(idx); 2793 2793
+16 -12
kernel/sched/core.c
··· 1952 1952 { 1953 1953 1954 1954 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 1955 - u64 period = attr->sched_period; 1955 + u64 period = attr->sched_period ?: attr->sched_deadline; 1956 1956 u64 runtime = attr->sched_runtime; 1957 1957 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; 1958 1958 int cpus, err = -1; ··· 3661 3661 * @pid: the pid in question. 3662 3662 * @uattr: structure containing the extended parameters. 3663 3663 */ 3664 - SYSCALL_DEFINE2(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr) 3664 + SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, 3665 + unsigned int, flags) 3665 3666 { 3666 3667 struct sched_attr attr; 3667 3668 struct task_struct *p; 3668 3669 int retval; 3669 3670 3670 - if (!uattr || pid < 0) 3671 + if (!uattr || pid < 0 || flags) 3671 3672 return -EINVAL; 3672 3673 3673 3674 if (sched_copy_attr(uattr, &attr)) ··· 3787 3786 attr->size = usize; 3788 3787 } 3789 3788 3790 - ret = copy_to_user(uattr, attr, usize); 3789 + ret = copy_to_user(uattr, attr, attr->size); 3791 3790 if (ret) 3792 3791 return -EFAULT; 3793 3792 ··· 3805 3804 * @uattr: structure containing the extended parameters. 3806 3805 * @size: sizeof(attr) for fwd/bwd comp. 3807 3806 */ 3808 - SYSCALL_DEFINE3(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 3809 - unsigned int, size) 3807 + SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 3808 + unsigned int, size, unsigned int, flags) 3810 3809 { 3811 3810 struct sched_attr attr = { 3812 3811 .size = sizeof(struct sched_attr), ··· 3815 3814 int retval; 3816 3815 3817 3816 if (!uattr || pid < 0 || size > PAGE_SIZE || 3818 - size < SCHED_ATTR_SIZE_VER0) 3817 + size < SCHED_ATTR_SIZE_VER0 || flags) 3819 3818 return -EINVAL; 3820 3819 3821 3820 rcu_read_lock(); ··· 7423 7422 u64 period = global_rt_period(); 7424 7423 u64 new_bw = to_ratio(period, runtime); 7425 7424 int cpu, ret = 0; 7425 + unsigned long flags; 7426 7426 7427 7427 /* 7428 7428 * Here we want to check the bandwidth not being set to some ··· 7437 7435 for_each_possible_cpu(cpu) { 7438 7436 struct dl_bw *dl_b = dl_bw_of(cpu); 7439 7437 7440 - raw_spin_lock(&dl_b->lock); 7438 + raw_spin_lock_irqsave(&dl_b->lock, flags); 7441 7439 if (new_bw < dl_b->total_bw) 7442 7440 ret = -EBUSY; 7443 - raw_spin_unlock(&dl_b->lock); 7441 + raw_spin_unlock_irqrestore(&dl_b->lock, flags); 7444 7442 7445 7443 if (ret) 7446 7444 break; ··· 7453 7451 { 7454 7452 u64 new_bw = -1; 7455 7453 int cpu; 7454 + unsigned long flags; 7456 7455 7457 7456 def_dl_bandwidth.dl_period = global_rt_period(); 7458 7457 def_dl_bandwidth.dl_runtime = global_rt_runtime(); ··· 7467 7464 for_each_possible_cpu(cpu) { 7468 7465 struct dl_bw *dl_b = dl_bw_of(cpu); 7469 7466 7470 - raw_spin_lock(&dl_b->lock); 7467 + raw_spin_lock_irqsave(&dl_b->lock, flags); 7471 7468 dl_b->bw = new_bw; 7472 - raw_spin_unlock(&dl_b->lock); 7469 + raw_spin_unlock_irqrestore(&dl_b->lock, flags); 7473 7470 } 7474 7471 } 7475 7472 ··· 7478 7475 if (sysctl_sched_rt_period <= 0) 7479 7476 return -EINVAL; 7480 7477 7481 - if (sysctl_sched_rt_runtime > sysctl_sched_rt_period) 7478 + if ((sysctl_sched_rt_runtime != RUNTIME_INF) && 7479 + (sysctl_sched_rt_runtime > sysctl_sched_rt_period)) 7482 7480 return -EINVAL; 7483 7481 7484 7482 return 0;
+3 -3
kernel/sched/cpudeadline.c
··· 70 70 71 71 static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl) 72 72 { 73 - WARN_ON(idx > num_present_cpus() || idx == IDX_INVALID); 73 + WARN_ON(idx == IDX_INVALID || !cpu_present(idx)); 74 74 75 75 if (dl_time_before(new_dl, cp->elements[idx].dl)) { 76 76 cp->elements[idx].dl = new_dl; ··· 117 117 } 118 118 119 119 out: 120 - WARN_ON(best_cpu > num_present_cpus() && best_cpu != -1); 120 + WARN_ON(best_cpu != -1 && !cpu_present(best_cpu)); 121 121 122 122 return best_cpu; 123 123 } ··· 137 137 int old_idx, new_cpu; 138 138 unsigned long flags; 139 139 140 - WARN_ON(cpu > num_present_cpus()); 140 + WARN_ON(!cpu_present(cpu)); 141 141 142 142 raw_spin_lock_irqsave(&cp->lock, flags); 143 143 old_idx = cp->cpu_to_idx[cpu];
+9 -11
kernel/sched/deadline.c
··· 121 121 122 122 static void update_dl_migration(struct dl_rq *dl_rq) 123 123 { 124 - if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_total > 1) { 124 + if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) { 125 125 if (!dl_rq->overloaded) { 126 126 dl_set_overload(rq_of_dl_rq(dl_rq)); 127 127 dl_rq->overloaded = 1; ··· 135 135 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 136 136 { 137 137 struct task_struct *p = dl_task_of(dl_se); 138 - dl_rq = &rq_of_dl_rq(dl_rq)->dl; 139 138 140 - dl_rq->dl_nr_total++; 141 139 if (p->nr_cpus_allowed > 1) 142 140 dl_rq->dl_nr_migratory++; 143 141 ··· 145 147 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) 146 148 { 147 149 struct task_struct *p = dl_task_of(dl_se); 148 - dl_rq = &rq_of_dl_rq(dl_rq)->dl; 149 150 150 - dl_rq->dl_nr_total--; 151 151 if (p->nr_cpus_allowed > 1) 152 152 dl_rq->dl_nr_migratory--; 153 153 ··· 562 566 return 1; 563 567 } 564 568 569 + extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); 570 + 565 571 /* 566 572 * Update the current task's runtime statistics (provided it is still 567 573 * a -deadline task and has not been removed from the dl_rq). ··· 627 629 struct rt_rq *rt_rq = &rq->rt; 628 630 629 631 raw_spin_lock(&rt_rq->rt_runtime_lock); 630 - rt_rq->rt_time += delta_exec; 631 632 /* 632 633 * We'll let actual RT tasks worry about the overflow here, we 633 - * have our own CBS to keep us inline -- see above. 634 + * have our own CBS to keep us inline; only account when RT 635 + * bandwidth is relevant. 634 636 */ 637 + if (sched_rt_bandwidth_account(rt_rq)) 638 + rt_rq->rt_time += delta_exec; 635 639 raw_spin_unlock(&rt_rq->rt_runtime_lock); 636 640 } 637 641 } ··· 717 717 718 718 WARN_ON(!dl_prio(prio)); 719 719 dl_rq->dl_nr_running++; 720 + inc_nr_running(rq_of_dl_rq(dl_rq)); 720 721 721 722 inc_dl_deadline(dl_rq, deadline); 722 723 inc_dl_migration(dl_se, dl_rq); ··· 731 730 WARN_ON(!dl_prio(prio)); 732 731 WARN_ON(!dl_rq->dl_nr_running); 733 732 dl_rq->dl_nr_running--; 733 + dec_nr_running(rq_of_dl_rq(dl_rq)); 734 734 735 735 dec_dl_deadline(dl_rq, dl_se->deadline); 736 736 dec_dl_migration(dl_se, dl_rq); ··· 838 836 839 837 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) 840 838 enqueue_pushable_dl_task(rq, p); 841 - 842 - inc_nr_running(rq); 843 839 } 844 840 845 841 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) ··· 850 850 { 851 851 update_curr_dl(rq); 852 852 __dequeue_task_dl(rq, p, flags); 853 - 854 - dec_nr_running(rq); 855 853 } 856 854 857 855 /*
+6 -4
kernel/sched/fair.c
··· 1757 1757 start = end; 1758 1758 if (pages <= 0) 1759 1759 goto out; 1760 + 1761 + cond_resched(); 1760 1762 } while (end != vma->vm_end); 1761 1763 } 1762 1764 ··· 7001 6999 struct cfs_rq *cfs_rq = cfs_rq_of(se); 7002 7000 7003 7001 /* 7004 - * Ensure the task's vruntime is normalized, so that when its 7002 + * Ensure the task's vruntime is normalized, so that when it's 7005 7003 * switched back to the fair class the enqueue_entity(.flags=0) will 7006 7004 * do the right thing. 7007 7005 * 7008 - * If it was on_rq, then the dequeue_entity(.flags=0) will already 7009 - * have normalized the vruntime, if it was !on_rq, then only when 7006 + * If it's on_rq, then the dequeue_entity(.flags=0) will already 7007 + * have normalized the vruntime, if it's !on_rq, then only when 7010 7008 * the task is sleeping will it still have non-normalized vruntime. 7011 7009 */ 7012 - if (!se->on_rq && p->state != TASK_RUNNING) { 7010 + if (!p->on_rq && p->state != TASK_RUNNING) { 7013 7011 /* 7014 7012 * Fix up our vruntime so that the current sleep doesn't 7015 7013 * cause 'unlimited' sleep bonus.
+8
kernel/sched/rt.c
··· 538 538 539 539 #endif /* CONFIG_RT_GROUP_SCHED */ 540 540 541 + bool sched_rt_bandwidth_account(struct rt_rq *rt_rq) 542 + { 543 + struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 544 + 545 + return (hrtimer_active(&rt_b->rt_period_timer) || 546 + rt_rq->rt_time < rt_b->rt_runtime); 547 + } 548 + 541 549 #ifdef CONFIG_SMP 542 550 /* 543 551 * We ran out of runtime, see if we can borrow some from our neighbours.
-1
kernel/sched/sched.h
··· 462 462 } earliest_dl; 463 463 464 464 unsigned long dl_nr_migratory; 465 - unsigned long dl_nr_total; 466 465 int overloaded; 467 466 468 467 /*
+29 -17
kernel/time/sched_clock.c
··· 116 116 void __init sched_clock_register(u64 (*read)(void), int bits, 117 117 unsigned long rate) 118 118 { 119 + u64 res, wrap, new_mask, new_epoch, cyc, ns; 120 + u32 new_mult, new_shift; 121 + ktime_t new_wrap_kt; 119 122 unsigned long r; 120 - u64 res, wrap; 121 123 char r_unit; 122 124 123 125 if (cd.rate > rate) 124 126 return; 125 127 126 128 WARN_ON(!irqs_disabled()); 127 - read_sched_clock = read; 128 - sched_clock_mask = CLOCKSOURCE_MASK(bits); 129 - cd.rate = rate; 130 129 131 130 /* calculate the mult/shift to convert counter ticks to ns. */ 132 - clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 3600); 131 + clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600); 132 + 133 + new_mask = CLOCKSOURCE_MASK(bits); 134 + 135 + /* calculate how many ns until we wrap */ 136 + wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask); 137 + new_wrap_kt = ns_to_ktime(wrap - (wrap >> 3)); 138 + 139 + /* update epoch for new counter and update epoch_ns from old counter*/ 140 + new_epoch = read(); 141 + cyc = read_sched_clock(); 142 + ns = cd.epoch_ns + cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask, 143 + cd.mult, cd.shift); 144 + 145 + raw_write_seqcount_begin(&cd.seq); 146 + read_sched_clock = read; 147 + sched_clock_mask = new_mask; 148 + cd.rate = rate; 149 + cd.wrap_kt = new_wrap_kt; 150 + cd.mult = new_mult; 151 + cd.shift = new_shift; 152 + cd.epoch_cyc = new_epoch; 153 + cd.epoch_ns = ns; 154 + raw_write_seqcount_end(&cd.seq); 133 155 134 156 r = rate; 135 157 if (r >= 4000000) { ··· 163 141 } else 164 142 r_unit = ' '; 165 143 166 - /* calculate how many ns until we wrap */ 167 - wrap = clocks_calc_max_nsecs(cd.mult, cd.shift, 0, sched_clock_mask); 168 - cd.wrap_kt = ns_to_ktime(wrap - (wrap >> 3)); 169 - 170 144 /* calculate the ns resolution of this counter */ 171 - res = cyc_to_ns(1ULL, cd.mult, cd.shift); 145 + res = cyc_to_ns(1ULL, new_mult, new_shift); 146 + 172 147 pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n", 173 148 bits, r, r_unit, res, wrap); 174 - 175 - update_sched_clock(); 176 - 177 - /* 178 - * Ensure that sched_clock() starts off at 0ns 179 - */ 180 - cd.epoch_ns = 0; 181 149 182 150 /* Enable IRQ time accounting if we have a fast enough sched_clock */ 183 151 if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
+10
kernel/trace/trace_events.c
··· 1777 1777 { 1778 1778 struct ftrace_event_call **call, **start, **end; 1779 1779 1780 + if (!mod->num_trace_events) 1781 + return; 1782 + 1783 + /* Don't add infrastructure for mods without tracepoints */ 1784 + if (trace_module_has_bad_taint(mod)) { 1785 + pr_err("%s: module has bad taint, not creating trace events\n", 1786 + mod->name); 1787 + return; 1788 + } 1789 + 1780 1790 start = mod->trace_events; 1781 1791 end = mod->trace_events + mod->num_trace_events; 1782 1792
+6 -1
kernel/tracepoint.c
··· 631 631 EXPORT_SYMBOL_GPL(tracepoint_iter_reset); 632 632 633 633 #ifdef CONFIG_MODULES 634 + bool trace_module_has_bad_taint(struct module *mod) 635 + { 636 + return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)); 637 + } 638 + 634 639 static int tracepoint_module_coming(struct module *mod) 635 640 { 636 641 struct tp_module *tp_mod, *iter; ··· 646 641 * module headers (for forced load), to make sure we don't cause a crash. 647 642 * Staging and out-of-tree GPL modules are fine. 648 643 */ 649 - if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP))) 644 + if (trace_module_has_bad_taint(mod)) 650 645 return 0; 651 646 mutex_lock(&tracepoints_mutex); 652 647 tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
+1 -1
kernel/user_namespace.c
··· 225 225 * 226 226 * When there is no mapping defined for the user-namespace uid 227 227 * pair INVALID_UID is returned. Callers are expected to test 228 - * for and handle handle INVALID_UID being returned. INVALID_UID 228 + * for and handle INVALID_UID being returned. INVALID_UID 229 229 * may be tested for using uid_valid(). 230 230 */ 231 231 kuid_t make_kuid(struct user_namespace *ns, uid_t uid)
+7
kernel/workqueue.c
··· 1851 1851 if (worker->flags & WORKER_IDLE) 1852 1852 pool->nr_idle--; 1853 1853 1854 + /* 1855 + * Once WORKER_DIE is set, the kworker may destroy itself at any 1856 + * point. Pin to ensure the task stays until we're done with it. 1857 + */ 1858 + get_task_struct(worker->task); 1859 + 1854 1860 list_del_init(&worker->entry); 1855 1861 worker->flags |= WORKER_DIE; 1856 1862 ··· 1865 1859 spin_unlock_irq(&pool->lock); 1866 1860 1867 1861 kthread_stop(worker->task); 1862 + put_task_struct(worker->task); 1868 1863 kfree(worker); 1869 1864 1870 1865 spin_lock_irq(&pool->lock);
+85 -46
lib/dma-debug.c
··· 424 424 EXPORT_SYMBOL(debug_dma_dump_mappings); 425 425 426 426 /* 427 - * For each page mapped (initial page in the case of 428 - * dma_alloc_coherent/dma_map_{single|page}, or each page in a 429 - * scatterlist) insert into this tree using the pfn as the key. At 427 + * For each mapping (initial cacheline in the case of 428 + * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a 429 + * scatterlist, or the cacheline specified in dma_map_single) insert 430 + * into this tree using the cacheline as the key. At 430 431 * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If 431 - * the pfn already exists at insertion time add a tag as a reference 432 + * the entry already exists at insertion time add a tag as a reference 432 433 * count for the overlapping mappings. For now, the overlap tracking 433 - * just ensures that 'unmaps' balance 'maps' before marking the pfn 434 - * idle, but we should also be flagging overlaps as an API violation. 434 + * just ensures that 'unmaps' balance 'maps' before marking the 435 + * cacheline idle, but we should also be flagging overlaps as an API 436 + * violation. 435 437 * 436 438 * Memory usage is mostly constrained by the maximum number of available 437 439 * dma-debug entries in that we need a free dma_debug_entry before 438 - * inserting into the tree. In the case of dma_map_{single|page} and 439 - * dma_alloc_coherent there is only one dma_debug_entry and one pfn to 440 - * track per event. dma_map_sg(), on the other hand, 441 - * consumes a single dma_debug_entry, but inserts 'nents' entries into 442 - * the tree. 440 + * inserting into the tree. In the case of dma_map_page and 441 + * dma_alloc_coherent there is only one dma_debug_entry and one 442 + * dma_active_cacheline entry to track per event. dma_map_sg(), on the 443 + * other hand, consumes a single dma_debug_entry, but inserts 'nents' 444 + * entries into the tree. 443 445 * 444 446 * At any time debug_dma_assert_idle() can be called to trigger a 445 - * warning if the given page is in the active set. 447 + * warning if any cachelines in the given page are in the active set. 446 448 */ 447 - static RADIX_TREE(dma_active_pfn, GFP_NOWAIT); 449 + static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT); 448 450 static DEFINE_SPINLOCK(radix_lock); 449 - #define ACTIVE_PFN_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1) 451 + #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1) 452 + #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT) 453 + #define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT) 450 454 451 - static int active_pfn_read_overlap(unsigned long pfn) 455 + static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry) 456 + { 457 + return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) + 458 + (entry->offset >> L1_CACHE_SHIFT); 459 + } 460 + 461 + static int active_cacheline_read_overlap(phys_addr_t cln) 452 462 { 453 463 int overlap = 0, i; 454 464 455 465 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) 456 - if (radix_tree_tag_get(&dma_active_pfn, pfn, i)) 466 + if (radix_tree_tag_get(&dma_active_cacheline, cln, i)) 457 467 overlap |= 1 << i; 458 468 return overlap; 459 469 } 460 470 461 - static int active_pfn_set_overlap(unsigned long pfn, int overlap) 471 + static int active_cacheline_set_overlap(phys_addr_t cln, int overlap) 462 472 { 463 473 int i; 464 474 465 - if (overlap > ACTIVE_PFN_MAX_OVERLAP || overlap < 0) 475 + if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0) 466 476 return overlap; 467 477 468 478 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) 469 479 if (overlap & 1 << i) 470 - radix_tree_tag_set(&dma_active_pfn, pfn, i); 480 + radix_tree_tag_set(&dma_active_cacheline, cln, i); 471 481 else 472 - radix_tree_tag_clear(&dma_active_pfn, pfn, i); 482 + radix_tree_tag_clear(&dma_active_cacheline, cln, i); 473 483 474 484 return overlap; 475 485 } 476 486 477 - static void active_pfn_inc_overlap(unsigned long pfn) 487 + static void active_cacheline_inc_overlap(phys_addr_t cln) 478 488 { 479 - int overlap = active_pfn_read_overlap(pfn); 489 + int overlap = active_cacheline_read_overlap(cln); 480 490 481 - overlap = active_pfn_set_overlap(pfn, ++overlap); 491 + overlap = active_cacheline_set_overlap(cln, ++overlap); 482 492 483 493 /* If we overflowed the overlap counter then we're potentially 484 494 * leaking dma-mappings. Otherwise, if maps and unmaps are 485 495 * balanced then this overflow may cause false negatives in 486 - * debug_dma_assert_idle() as the pfn may be marked idle 496 + * debug_dma_assert_idle() as the cacheline may be marked idle 487 497 * prematurely. 488 498 */ 489 - WARN_ONCE(overlap > ACTIVE_PFN_MAX_OVERLAP, 490 - "DMA-API: exceeded %d overlapping mappings of pfn %lx\n", 491 - ACTIVE_PFN_MAX_OVERLAP, pfn); 499 + WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP, 500 + "DMA-API: exceeded %d overlapping mappings of cacheline %pa\n", 501 + ACTIVE_CACHELINE_MAX_OVERLAP, &cln); 492 502 } 493 503 494 - static int active_pfn_dec_overlap(unsigned long pfn) 504 + static int active_cacheline_dec_overlap(phys_addr_t cln) 495 505 { 496 - int overlap = active_pfn_read_overlap(pfn); 506 + int overlap = active_cacheline_read_overlap(cln); 497 507 498 - return active_pfn_set_overlap(pfn, --overlap); 508 + return active_cacheline_set_overlap(cln, --overlap); 499 509 } 500 510 501 - static int active_pfn_insert(struct dma_debug_entry *entry) 511 + static int active_cacheline_insert(struct dma_debug_entry *entry) 502 512 { 513 + phys_addr_t cln = to_cacheline_number(entry); 503 514 unsigned long flags; 504 515 int rc; 505 516 517 + /* If the device is not writing memory then we don't have any 518 + * concerns about the cpu consuming stale data. This mitigates 519 + * legitimate usages of overlapping mappings. 520 + */ 521 + if (entry->direction == DMA_TO_DEVICE) 522 + return 0; 523 + 506 524 spin_lock_irqsave(&radix_lock, flags); 507 - rc = radix_tree_insert(&dma_active_pfn, entry->pfn, entry); 525 + rc = radix_tree_insert(&dma_active_cacheline, cln, entry); 508 526 if (rc == -EEXIST) 509 - active_pfn_inc_overlap(entry->pfn); 527 + active_cacheline_inc_overlap(cln); 510 528 spin_unlock_irqrestore(&radix_lock, flags); 511 529 512 530 return rc; 513 531 } 514 532 515 - static void active_pfn_remove(struct dma_debug_entry *entry) 533 + static void active_cacheline_remove(struct dma_debug_entry *entry) 516 534 { 535 + phys_addr_t cln = to_cacheline_number(entry); 517 536 unsigned long flags; 537 + 538 + /* ...mirror the insert case */ 539 + if (entry->direction == DMA_TO_DEVICE) 540 + return; 518 541 519 542 spin_lock_irqsave(&radix_lock, flags); 520 543 /* since we are counting overlaps the final put of the 521 - * entry->pfn will occur when the overlap count is 0. 522 - * active_pfn_dec_overlap() returns -1 in that case 544 + * cacheline will occur when the overlap count is 0. 545 + * active_cacheline_dec_overlap() returns -1 in that case 523 546 */ 524 - if (active_pfn_dec_overlap(entry->pfn) < 0) 525 - radix_tree_delete(&dma_active_pfn, entry->pfn); 547 + if (active_cacheline_dec_overlap(cln) < 0) 548 + radix_tree_delete(&dma_active_cacheline, cln); 526 549 spin_unlock_irqrestore(&radix_lock, flags); 527 550 } 528 551 529 552 /** 530 553 * debug_dma_assert_idle() - assert that a page is not undergoing dma 531 - * @page: page to lookup in the dma_active_pfn tree 554 + * @page: page to lookup in the dma_active_cacheline tree 532 555 * 533 556 * Place a call to this routine in cases where the cpu touching the page 534 557 * before the dma completes (page is dma_unmapped) will lead to data ··· 559 536 */ 560 537 void debug_dma_assert_idle(struct page *page) 561 538 { 539 + static struct dma_debug_entry *ents[CACHELINES_PER_PAGE]; 540 + struct dma_debug_entry *entry = NULL; 541 + void **results = (void **) &ents; 542 + unsigned int nents, i; 562 543 unsigned long flags; 563 - struct dma_debug_entry *entry; 544 + phys_addr_t cln; 564 545 565 546 if (!page) 566 547 return; 567 548 549 + cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT; 568 550 spin_lock_irqsave(&radix_lock, flags); 569 - entry = radix_tree_lookup(&dma_active_pfn, page_to_pfn(page)); 551 + nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln, 552 + CACHELINES_PER_PAGE); 553 + for (i = 0; i < nents; i++) { 554 + phys_addr_t ent_cln = to_cacheline_number(ents[i]); 555 + 556 + if (ent_cln == cln) { 557 + entry = ents[i]; 558 + break; 559 + } else if (ent_cln >= cln + CACHELINES_PER_PAGE) 560 + break; 561 + } 570 562 spin_unlock_irqrestore(&radix_lock, flags); 571 563 572 564 if (!entry) 573 565 return; 574 566 567 + cln = to_cacheline_number(entry); 575 568 err_printk(entry->dev, entry, 576 - "DMA-API: cpu touching an active dma mapped page " 577 - "[pfn=0x%lx]\n", entry->pfn); 569 + "DMA-API: cpu touching an active dma mapped cacheline [cln=%pa]\n", 570 + &cln); 578 571 } 579 572 580 573 /* ··· 607 568 hash_bucket_add(bucket, entry); 608 569 put_hash_bucket(bucket, &flags); 609 570 610 - rc = active_pfn_insert(entry); 571 + rc = active_cacheline_insert(entry); 611 572 if (rc == -ENOMEM) { 612 - pr_err("DMA-API: pfn tracking ENOMEM, dma-debug disabled\n"); 573 + pr_err("DMA-API: cacheline tracking ENOMEM, dma-debug disabled\n"); 613 574 global_disable = true; 614 575 } 615 576 ··· 670 631 { 671 632 unsigned long flags; 672 633 673 - active_pfn_remove(entry); 634 + active_cacheline_remove(entry); 674 635 675 636 /* 676 637 * add to beginning of the list - this way the entries are
+3 -1
lib/radix-tree.c
··· 1253 1253 1254 1254 node = indirect_to_ptr(node); 1255 1255 max_index = radix_tree_maxindex(node->height); 1256 - if (cur_index > max_index) 1256 + if (cur_index > max_index) { 1257 + rcu_read_unlock(); 1257 1258 break; 1259 + } 1258 1260 1259 1261 cur_index = __locate(node, item, cur_index, &found_index); 1260 1262 rcu_read_unlock();
+9 -11
mm/huge_memory.c
··· 1166 1166 } else { 1167 1167 ret = do_huge_pmd_wp_page_fallback(mm, vma, address, 1168 1168 pmd, orig_pmd, page, haddr); 1169 - if (ret & VM_FAULT_OOM) 1169 + if (ret & VM_FAULT_OOM) { 1170 1170 split_huge_page(page); 1171 + ret |= VM_FAULT_FALLBACK; 1172 + } 1171 1173 put_page(page); 1172 1174 } 1173 1175 count_vm_event(THP_FAULT_FALLBACK); ··· 1181 1179 if (page) { 1182 1180 split_huge_page(page); 1183 1181 put_page(page); 1184 - } 1182 + } else 1183 + split_huge_page_pmd(vma, address, pmd); 1184 + ret |= VM_FAULT_FALLBACK; 1185 1185 count_vm_event(THP_FAULT_FALLBACK); 1186 - ret |= VM_FAULT_OOM; 1187 1186 goto out; 1188 1187 } 1189 1188 ··· 1548 1545 entry = pmd_mknonnuma(entry); 1549 1546 entry = pmd_modify(entry, newprot); 1550 1547 ret = HPAGE_PMD_NR; 1548 + set_pmd_at(mm, addr, pmd, entry); 1551 1549 BUG_ON(pmd_write(entry)); 1552 1550 } else { 1553 1551 struct page *page = pmd_page(*pmd); ··· 1561 1557 */ 1562 1558 if (!is_huge_zero_page(page) && 1563 1559 !pmd_numa(*pmd)) { 1564 - entry = *pmd; 1565 - entry = pmd_mknuma(entry); 1560 + pmdp_set_numa(mm, addr, pmd); 1566 1561 ret = HPAGE_PMD_NR; 1567 1562 } 1568 1563 } 1569 - 1570 - /* Set PMD if cleared earlier */ 1571 - if (ret == HPAGE_PMD_NR) 1572 - set_pmd_at(mm, addr, pmd, entry); 1573 - 1574 1564 spin_unlock(ptl); 1575 1565 } 1576 1566 ··· 1961 1963 return ret; 1962 1964 } 1963 1965 1964 - #define VM_NO_THP (VM_SPECIAL|VM_MIXEDMAP|VM_HUGETLB|VM_SHARED|VM_MAYSHARE) 1966 + #define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE) 1965 1967 1966 1968 int hugepage_madvise(struct vm_area_struct *vma, 1967 1969 unsigned long *vm_flags, int advice)
+1 -1
mm/ksm.c
··· 444 444 static struct page *page_trans_compound_anon(struct page *page) 445 445 { 446 446 if (PageTransCompound(page)) { 447 - struct page *head = compound_trans_head(page); 447 + struct page *head = compound_head(page); 448 448 /* 449 449 * head may actually be splitted and freed from under 450 450 * us but it's ok here.
+14 -6
mm/memcontrol.c
··· 1127 1127 * skipping css reference should be safe. 1128 1128 */ 1129 1129 if (next_css) { 1130 - if ((next_css->flags & CSS_ONLINE) && 1131 - (next_css == &root->css || css_tryget(next_css))) 1130 + if ((next_css == &root->css) || 1131 + ((next_css->flags & CSS_ONLINE) && css_tryget(next_css))) 1132 1132 return mem_cgroup_from_css(next_css); 1133 1133 1134 1134 prev_css = next_css; ··· 1687 1687 * protects memcg_name and makes sure that parallel ooms do not 1688 1688 * interleave 1689 1689 */ 1690 - static DEFINE_SPINLOCK(oom_info_lock); 1690 + static DEFINE_MUTEX(oom_info_lock); 1691 1691 struct cgroup *task_cgrp; 1692 1692 struct cgroup *mem_cgrp; 1693 1693 static char memcg_name[PATH_MAX]; ··· 1698 1698 if (!p) 1699 1699 return; 1700 1700 1701 - spin_lock(&oom_info_lock); 1701 + mutex_lock(&oom_info_lock); 1702 1702 rcu_read_lock(); 1703 1703 1704 1704 mem_cgrp = memcg->css.cgroup; ··· 1767 1767 1768 1768 pr_cont("\n"); 1769 1769 } 1770 - spin_unlock(&oom_info_lock); 1770 + mutex_unlock(&oom_info_lock); 1771 1771 } 1772 1772 1773 1773 /* ··· 6595 6595 { 6596 6596 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 6597 6597 struct mem_cgroup_event *event, *tmp; 6598 + struct cgroup_subsys_state *iter; 6598 6599 6599 6600 /* 6600 6601 * Unregister events and notify userspace. ··· 6612 6611 kmem_cgroup_css_offline(memcg); 6613 6612 6614 6613 mem_cgroup_invalidate_reclaim_iterators(memcg); 6615 - mem_cgroup_reparent_charges(memcg); 6614 + 6615 + /* 6616 + * This requires that offlining is serialized. Right now that is 6617 + * guaranteed because css_killed_work_fn() holds the cgroup_mutex. 6618 + */ 6619 + css_for_each_descendant_post(iter, css) 6620 + mem_cgroup_reparent_charges(mem_cgroup_from_css(iter)); 6621 + 6616 6622 mem_cgroup_destroy_all_caches(memcg); 6617 6623 vmpressure_cleanup(&memcg->vmpressure); 6618 6624 }
+1 -1
mm/memory-failure.c
··· 1651 1651 { 1652 1652 int ret; 1653 1653 unsigned long pfn = page_to_pfn(page); 1654 - struct page *hpage = compound_trans_head(page); 1654 + struct page *hpage = compound_head(page); 1655 1655 1656 1656 if (PageHWPoison(page)) { 1657 1657 pr_info("soft offline: %#lx page already poisoned\n", pfn);
+4 -11
mm/memory.c
··· 3348 3348 if (ret & VM_FAULT_LOCKED) 3349 3349 unlock_page(vmf.page); 3350 3350 ret = VM_FAULT_HWPOISON; 3351 + page_cache_release(vmf.page); 3351 3352 goto uncharge_out; 3352 3353 } 3353 3354 ··· 3704 3703 if (unlikely(is_vm_hugetlb_page(vma))) 3705 3704 return hugetlb_fault(mm, vma, address, flags); 3706 3705 3707 - retry: 3708 3706 pgd = pgd_offset(mm, address); 3709 3707 pud = pud_alloc(mm, pgd, address); 3710 3708 if (!pud) ··· 3741 3741 if (dirty && !pmd_write(orig_pmd)) { 3742 3742 ret = do_huge_pmd_wp_page(mm, vma, address, pmd, 3743 3743 orig_pmd); 3744 - /* 3745 - * If COW results in an oom, the huge pmd will 3746 - * have been split, so retry the fault on the 3747 - * pte for a smaller charge. 3748 - */ 3749 - if (unlikely(ret & VM_FAULT_OOM)) 3750 - goto retry; 3751 - return ret; 3744 + if (!(ret & VM_FAULT_FALLBACK)) 3745 + return ret; 3752 3746 } else { 3753 3747 huge_pmd_set_accessed(mm, vma, address, pmd, 3754 3748 orig_pmd, dirty); 3749 + return 0; 3755 3750 } 3756 - 3757 - return 0; 3758 3751 } 3759 3752 } 3760 3753
+8 -17
mm/mprotect.c
··· 58 58 if (pte_numa(ptent)) 59 59 ptent = pte_mknonnuma(ptent); 60 60 ptent = pte_modify(ptent, newprot); 61 + /* 62 + * Avoid taking write faults for pages we 63 + * know to be dirty. 64 + */ 65 + if (dirty_accountable && pte_dirty(ptent)) 66 + ptent = pte_mkwrite(ptent); 67 + ptep_modify_prot_commit(mm, addr, pte, ptent); 61 68 updated = true; 62 69 } else { 63 70 struct page *page; 64 71 65 - ptent = *pte; 66 72 page = vm_normal_page(vma, addr, oldpte); 67 73 if (page && !PageKsm(page)) { 68 74 if (!pte_numa(oldpte)) { 69 - ptent = pte_mknuma(ptent); 70 - set_pte_at(mm, addr, pte, ptent); 75 + ptep_set_numa(mm, addr, pte); 71 76 updated = true; 72 77 } 73 78 } 74 79 } 75 - 76 - /* 77 - * Avoid taking write faults for pages we know to be 78 - * dirty. 79 - */ 80 - if (dirty_accountable && pte_dirty(ptent)) { 81 - ptent = pte_mkwrite(ptent); 82 - updated = true; 83 - } 84 - 85 80 if (updated) 86 81 pages++; 87 - 88 - /* Only !prot_numa always clears the pte */ 89 - if (!prot_numa) 90 - ptep_modify_prot_commit(mm, addr, pte, ptent); 91 82 } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) { 92 83 swp_entry_t entry = pte_to_swp_entry(oldpte); 93 84
+25 -5
mm/page_alloc.c
··· 369 369 __SetPageHead(page); 370 370 for (i = 1; i < nr_pages; i++) { 371 371 struct page *p = page + i; 372 - __SetPageTail(p); 373 372 set_page_count(p, 0); 374 373 p->first_page = page; 374 + /* Make sure p->first_page is always valid for PageTail() */ 375 + smp_wmb(); 376 + __SetPageTail(p); 375 377 } 376 378 } 377 379 ··· 1238 1236 } 1239 1237 local_irq_restore(flags); 1240 1238 } 1239 + static bool gfp_thisnode_allocation(gfp_t gfp_mask) 1240 + { 1241 + return (gfp_mask & GFP_THISNODE) == GFP_THISNODE; 1242 + } 1243 + #else 1244 + static bool gfp_thisnode_allocation(gfp_t gfp_mask) 1245 + { 1246 + return false; 1247 + } 1241 1248 #endif 1242 1249 1243 1250 /* ··· 1583 1572 get_pageblock_migratetype(page)); 1584 1573 } 1585 1574 1586 - __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); 1575 + /* 1576 + * NOTE: GFP_THISNODE allocations do not partake in the kswapd 1577 + * aging protocol, so they can't be fair. 1578 + */ 1579 + if (!gfp_thisnode_allocation(gfp_flags)) 1580 + __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); 1581 + 1587 1582 __count_zone_vm_events(PGALLOC, zone, 1 << order); 1588 1583 zone_statistics(preferred_zone, zone, gfp_flags); 1589 1584 local_irq_restore(flags); ··· 1961 1944 * ultimately fall back to remote zones that do not 1962 1945 * partake in the fairness round-robin cycle of this 1963 1946 * zonelist. 1947 + * 1948 + * NOTE: GFP_THISNODE allocations do not partake in 1949 + * the kswapd aging protocol, so they can't be fair. 1964 1950 */ 1965 - if (alloc_flags & ALLOC_WMARK_LOW) { 1951 + if ((alloc_flags & ALLOC_WMARK_LOW) && 1952 + !gfp_thisnode_allocation(gfp_mask)) { 1966 1953 if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0) 1967 1954 continue; 1968 1955 if (!zone_local(preferred_zone, zone)) ··· 2522 2501 * allowed per node queues are empty and that nodes are 2523 2502 * over allocated. 2524 2503 */ 2525 - if (IS_ENABLED(CONFIG_NUMA) && 2526 - (gfp_mask & GFP_THISNODE) == GFP_THISNODE) 2504 + if (gfp_thisnode_allocation(gfp_mask)) 2527 2505 goto nopage; 2528 2506 2529 2507 restart:
+2 -2
mm/swap.c
··· 98 98 } 99 99 100 100 /* __split_huge_page_refcount can run under us */ 101 - page_head = compound_trans_head(page); 101 + page_head = compound_head(page); 102 102 103 103 /* 104 104 * THP can not break up slab pages so avoid taking ··· 253 253 */ 254 254 unsigned long flags; 255 255 bool got; 256 - struct page *page_head = compound_trans_head(page); 256 + struct page *page_head = compound_head(page); 257 257 258 258 /* Ref to put_compound_page() comment. */ 259 259 if (!__compound_tail_refcounted(page_head)) {
+1
mm/vmpressure.c
··· 19 19 #include <linux/mm.h> 20 20 #include <linux/vmstat.h> 21 21 #include <linux/eventfd.h> 22 + #include <linux/slab.h> 22 23 #include <linux/swap.h> 23 24 #include <linux/printk.h> 24 25 #include <linux/vmpressure.h>
+20 -10
net/batman-adv/bat_iv_ogm.c
··· 241 241 size = bat_priv->num_ifaces * sizeof(uint8_t); 242 242 orig_node->bat_iv.bcast_own_sum = kzalloc(size, GFP_ATOMIC); 243 243 if (!orig_node->bat_iv.bcast_own_sum) 244 - goto free_bcast_own; 244 + goto free_orig_node; 245 245 246 246 hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig, 247 247 batadv_choose_orig, orig_node, 248 248 &orig_node->hash_entry); 249 249 if (hash_added != 0) 250 - goto free_bcast_own; 250 + goto free_orig_node; 251 251 252 252 return orig_node; 253 253 254 - free_bcast_own: 255 - kfree(orig_node->bat_iv.bcast_own); 256 254 free_orig_node: 255 + /* free twice, as batadv_orig_node_new sets refcount to 2 */ 256 + batadv_orig_node_free_ref(orig_node); 257 257 batadv_orig_node_free_ref(orig_node); 258 258 259 259 return NULL; ··· 266 266 struct batadv_orig_node *orig_neigh) 267 267 { 268 268 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 269 - struct batadv_neigh_node *neigh_node; 269 + struct batadv_neigh_node *neigh_node, *tmp_neigh_node; 270 270 271 271 neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, orig_node); 272 272 if (!neigh_node) ··· 281 281 neigh_node->orig_node = orig_neigh; 282 282 neigh_node->if_incoming = hard_iface; 283 283 284 - batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 285 - "Creating new neighbor %pM for orig_node %pM on interface %s\n", 286 - neigh_addr, orig_node->orig, hard_iface->net_dev->name); 287 - 288 284 spin_lock_bh(&orig_node->neigh_list_lock); 289 - hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list); 285 + tmp_neigh_node = batadv_neigh_node_get(orig_node, hard_iface, 286 + neigh_addr); 287 + if (!tmp_neigh_node) { 288 + hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list); 289 + } else { 290 + kfree(neigh_node); 291 + batadv_hardif_free_ref(hard_iface); 292 + neigh_node = tmp_neigh_node; 293 + } 290 294 spin_unlock_bh(&orig_node->neigh_list_lock); 295 + 296 + if (!tmp_neigh_node) 297 + batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 298 + "Creating new neighbor %pM for orig_node %pM on interface %s\n", 299 + neigh_addr, orig_node->orig, 300 + hard_iface->net_dev->name); 291 301 292 302 out: 293 303 return neigh_node;
+14 -8
net/batman-adv/hard-interface.c
··· 241 241 { 242 242 struct batadv_priv *bat_priv = netdev_priv(soft_iface); 243 243 const struct batadv_hard_iface *hard_iface; 244 - int min_mtu = ETH_DATA_LEN; 244 + int min_mtu = INT_MAX; 245 245 246 246 rcu_read_lock(); 247 247 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { ··· 256 256 } 257 257 rcu_read_unlock(); 258 258 259 - atomic_set(&bat_priv->packet_size_max, min_mtu); 260 - 261 259 if (atomic_read(&bat_priv->fragmentation) == 0) 262 260 goto out; 263 261 ··· 266 268 min_mtu = min_t(int, min_mtu, BATADV_FRAG_MAX_FRAG_SIZE); 267 269 min_mtu -= sizeof(struct batadv_frag_packet); 268 270 min_mtu *= BATADV_FRAG_MAX_FRAGMENTS; 269 - atomic_set(&bat_priv->packet_size_max, min_mtu); 270 - 271 - /* with fragmentation enabled we can fragment external packets easily */ 272 - min_mtu = min_t(int, min_mtu, ETH_DATA_LEN); 273 271 274 272 out: 275 - return min_mtu - batadv_max_header_len(); 273 + /* report to the other components the maximum amount of bytes that 274 + * batman-adv can send over the wire (without considering the payload 275 + * overhead). For example, this value is used by TT to compute the 276 + * maximum local table table size 277 + */ 278 + atomic_set(&bat_priv->packet_size_max, min_mtu); 279 + 280 + /* the real soft-interface MTU is computed by removing the payload 281 + * overhead from the maximum amount of bytes that was just computed. 282 + * 283 + * However batman-adv does not support MTUs bigger than ETH_DATA_LEN 284 + */ 285 + return min_t(int, min_mtu - batadv_max_header_len(), ETH_DATA_LEN); 276 286 } 277 287 278 288 /* adjusts the MTU if a new interface with a smaller MTU appeared. */
+36
net/batman-adv/originator.c
··· 458 458 } 459 459 460 460 /** 461 + * batadv_neigh_node_get - retrieve a neighbour from the list 462 + * @orig_node: originator which the neighbour belongs to 463 + * @hard_iface: the interface where this neighbour is connected to 464 + * @addr: the address of the neighbour 465 + * 466 + * Looks for and possibly returns a neighbour belonging to this originator list 467 + * which is connected through the provided hard interface. 468 + * Returns NULL if the neighbour is not found. 469 + */ 470 + struct batadv_neigh_node * 471 + batadv_neigh_node_get(const struct batadv_orig_node *orig_node, 472 + const struct batadv_hard_iface *hard_iface, 473 + const uint8_t *addr) 474 + { 475 + struct batadv_neigh_node *tmp_neigh_node, *res = NULL; 476 + 477 + rcu_read_lock(); 478 + hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) { 479 + if (!batadv_compare_eth(tmp_neigh_node->addr, addr)) 480 + continue; 481 + 482 + if (tmp_neigh_node->if_incoming != hard_iface) 483 + continue; 484 + 485 + if (!atomic_inc_not_zero(&tmp_neigh_node->refcount)) 486 + continue; 487 + 488 + res = tmp_neigh_node; 489 + break; 490 + } 491 + rcu_read_unlock(); 492 + 493 + return res; 494 + } 495 + 496 + /** 461 497 * batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object 462 498 * @rcu: rcu pointer of the orig_ifinfo object 463 499 */
+4
net/batman-adv/originator.h
··· 29 29 struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv, 30 30 const uint8_t *addr); 31 31 struct batadv_neigh_node * 32 + batadv_neigh_node_get(const struct batadv_orig_node *orig_node, 33 + const struct batadv_hard_iface *hard_iface, 34 + const uint8_t *addr); 35 + struct batadv_neigh_node * 32 36 batadv_neigh_node_new(struct batadv_hard_iface *hard_iface, 33 37 const uint8_t *neigh_addr, 34 38 struct batadv_orig_node *orig_node);
+3 -1
net/batman-adv/routing.c
··· 688 688 int is_old_ttvn; 689 689 690 690 /* check if there is enough data before accessing it */ 691 - if (pskb_may_pull(skb, hdr_len + ETH_HLEN) < 0) 691 + if (!pskb_may_pull(skb, hdr_len + ETH_HLEN)) 692 692 return 0; 693 693 694 694 /* create a copy of the skb (in case of for re-routing) to modify it. */ ··· 918 918 919 919 if (ret != NET_RX_SUCCESS) 920 920 ret = batadv_route_unicast_packet(skb, recv_if); 921 + else 922 + consume_skb(skb); 921 923 922 924 return ret; 923 925 }
+7 -2
net/batman-adv/send.c
··· 254 254 struct batadv_orig_node *orig_node, 255 255 unsigned short vid) 256 256 { 257 - struct ethhdr *ethhdr = (struct ethhdr *)skb->data; 257 + struct ethhdr *ethhdr; 258 258 struct batadv_unicast_packet *unicast_packet; 259 - int ret = NET_XMIT_DROP; 259 + int ret = NET_XMIT_DROP, hdr_size; 260 260 261 261 if (!orig_node) 262 262 goto out; ··· 265 265 case BATADV_UNICAST: 266 266 if (!batadv_send_skb_prepare_unicast(skb, orig_node)) 267 267 goto out; 268 + 269 + hdr_size = sizeof(*unicast_packet); 268 270 break; 269 271 case BATADV_UNICAST_4ADDR: 270 272 if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb, 271 273 orig_node, 272 274 packet_subtype)) 273 275 goto out; 276 + 277 + hdr_size = sizeof(struct batadv_unicast_4addr_packet); 274 278 break; 275 279 default: 276 280 /* this function supports UNICAST and UNICAST_4ADDR only. It ··· 283 279 goto out; 284 280 } 285 281 282 + ethhdr = (struct ethhdr *)(skb->data + hdr_size); 286 283 unicast_packet = (struct batadv_unicast_packet *)skb->data; 287 284 288 285 /* inform the destination node that we are still missing a correct route
+17 -6
net/batman-adv/translation-table.c
··· 1975 1975 struct hlist_head *head; 1976 1976 uint32_t i, crc_tmp, crc = 0; 1977 1977 uint8_t flags; 1978 + __be16 tmp_vid; 1978 1979 1979 1980 for (i = 0; i < hash->size; i++) { 1980 1981 head = &hash->table[i]; ··· 2012 2011 orig_node)) 2013 2012 continue; 2014 2013 2015 - crc_tmp = crc32c(0, &tt_common->vid, 2016 - sizeof(tt_common->vid)); 2014 + /* use network order to read the VID: this ensures that 2015 + * every node reads the bytes in the same order. 2016 + */ 2017 + tmp_vid = htons(tt_common->vid); 2018 + crc_tmp = crc32c(0, &tmp_vid, sizeof(tmp_vid)); 2017 2019 2018 2020 /* compute the CRC on flags that have to be kept in sync 2019 2021 * among nodes ··· 2050 2046 struct hlist_head *head; 2051 2047 uint32_t i, crc_tmp, crc = 0; 2052 2048 uint8_t flags; 2049 + __be16 tmp_vid; 2053 2050 2054 2051 for (i = 0; i < hash->size; i++) { 2055 2052 head = &hash->table[i]; ··· 2069 2064 if (tt_common->flags & BATADV_TT_CLIENT_NEW) 2070 2065 continue; 2071 2066 2072 - crc_tmp = crc32c(0, &tt_common->vid, 2073 - sizeof(tt_common->vid)); 2067 + /* use network order to read the VID: this ensures that 2068 + * every node reads the bytes in the same order. 2069 + */ 2070 + tmp_vid = htons(tt_common->vid); 2071 + crc_tmp = crc32c(0, &tmp_vid, sizeof(tmp_vid)); 2074 2072 2075 2073 /* compute the CRC on flags that have to be kept in sync 2076 2074 * among nodes ··· 2270 2262 { 2271 2263 struct batadv_tvlv_tt_vlan_data *tt_vlan_tmp; 2272 2264 struct batadv_orig_node_vlan *vlan; 2265 + uint32_t crc; 2273 2266 int i; 2274 2267 2275 2268 /* check if each received CRC matches the locally stored one */ ··· 2290 2281 if (!vlan) 2291 2282 return false; 2292 2283 2293 - if (vlan->tt.crc != ntohl(tt_vlan_tmp->crc)) 2284 + crc = vlan->tt.crc; 2285 + batadv_orig_node_vlan_free_ref(vlan); 2286 + 2287 + if (crc != ntohl(tt_vlan_tmp->crc)) 2294 2288 return false; 2295 2289 } 2296 2290 ··· 3230 3218 3231 3219 spin_lock_bh(&orig_node->tt_lock); 3232 3220 3233 - tt_change = (struct batadv_tvlv_tt_change *)tt_buff; 3234 3221 batadv_tt_update_changes(bat_priv, orig_node, tt_num_changes, 3235 3222 ttvn, tt_change); 3236 3223
+14 -2
net/bluetooth/hidp/core.c
··· 430 430 del_timer(&session->timer); 431 431 } 432 432 433 + static void hidp_process_report(struct hidp_session *session, 434 + int type, const u8 *data, int len, int intr) 435 + { 436 + if (len > HID_MAX_BUFFER_SIZE) 437 + len = HID_MAX_BUFFER_SIZE; 438 + 439 + memcpy(session->input_buf, data, len); 440 + hid_input_report(session->hid, type, session->input_buf, len, intr); 441 + } 442 + 433 443 static void hidp_process_handshake(struct hidp_session *session, 434 444 unsigned char param) 435 445 { ··· 512 502 hidp_input_report(session, skb); 513 503 514 504 if (session->hid) 515 - hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 0); 505 + hidp_process_report(session, HID_INPUT_REPORT, 506 + skb->data, skb->len, 0); 516 507 break; 517 508 518 509 case HIDP_DATA_RTYPE_OTHER: ··· 595 584 hidp_input_report(session, skb); 596 585 597 586 if (session->hid) { 598 - hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 1); 587 + hidp_process_report(session, HID_INPUT_REPORT, 588 + skb->data, skb->len, 1); 599 589 BT_DBG("report len %d", skb->len); 600 590 } 601 591 } else {
+4
net/bluetooth/hidp/hidp.h
··· 24 24 #define __HIDP_H 25 25 26 26 #include <linux/types.h> 27 + #include <linux/hid.h> 27 28 #include <linux/kref.h> 28 29 #include <net/bluetooth/bluetooth.h> 29 30 #include <net/bluetooth/l2cap.h> ··· 180 179 181 180 /* Used in hidp_output_raw_report() */ 182 181 int output_report_success; /* boolean */ 182 + 183 + /* temporary input buffer */ 184 + u8 input_buf[HID_MAX_BUFFER_SIZE]; 183 185 }; 184 186 185 187 /* HIDP init defines */
+5 -21
net/can/raw.c
··· 121 121 if (!ro->recv_own_msgs && oskb->sk == sk) 122 122 return; 123 123 124 - /* do not pass frames with DLC > 8 to a legacy socket */ 125 - if (!ro->fd_frames) { 126 - struct canfd_frame *cfd = (struct canfd_frame *)oskb->data; 127 - 128 - if (unlikely(cfd->len > CAN_MAX_DLEN)) 129 - return; 130 - } 124 + /* do not pass non-CAN2.0 frames to a legacy socket */ 125 + if (!ro->fd_frames && oskb->len != CAN_MTU) 126 + return; 131 127 132 128 /* clone the given skb to be able to enqueue it into the rcv queue */ 133 129 skb = skb_clone(oskb, GFP_ATOMIC); ··· 734 738 struct msghdr *msg, size_t size, int flags) 735 739 { 736 740 struct sock *sk = sock->sk; 737 - struct raw_sock *ro = raw_sk(sk); 738 741 struct sk_buff *skb; 739 - int rxmtu; 740 742 int err = 0; 741 743 int noblock; 742 744 ··· 745 751 if (!skb) 746 752 return err; 747 753 748 - /* 749 - * when serving a legacy socket the DLC <= 8 is already checked inside 750 - * raw_rcv(). Now check if we need to pass a canfd_frame to a legacy 751 - * socket and cut the possible CANFD_MTU/CAN_MTU length to CAN_MTU 752 - */ 753 - if (!ro->fd_frames) 754 - rxmtu = CAN_MTU; 755 - else 756 - rxmtu = skb->len; 757 - 758 - if (size < rxmtu) 754 + if (size < skb->len) 759 755 msg->msg_flags |= MSG_TRUNC; 760 756 else 761 - size = rxmtu; 757 + size = skb->len; 762 758 763 759 err = memcpy_toiovec(msg->msg_iov, skb->data, size); 764 760 if (err < 0) {
+12 -10
net/core/dev.c
··· 2420 2420 * 2. No high memory really exists on this machine. 2421 2421 */ 2422 2422 2423 - static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) 2423 + static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb) 2424 2424 { 2425 2425 #ifdef CONFIG_HIGHMEM 2426 2426 int i; ··· 2495 2495 } 2496 2496 2497 2497 static netdev_features_t harmonize_features(struct sk_buff *skb, 2498 - netdev_features_t features) 2498 + const struct net_device *dev, 2499 + netdev_features_t features) 2499 2500 { 2500 2501 if (skb->ip_summed != CHECKSUM_NONE && 2501 2502 !can_checksum_protocol(features, skb_network_protocol(skb))) { 2502 2503 features &= ~NETIF_F_ALL_CSUM; 2503 - } else if (illegal_highdma(skb->dev, skb)) { 2504 + } else if (illegal_highdma(dev, skb)) { 2504 2505 features &= ~NETIF_F_SG; 2505 2506 } 2506 2507 2507 2508 return features; 2508 2509 } 2509 2510 2510 - netdev_features_t netif_skb_features(struct sk_buff *skb) 2511 + netdev_features_t netif_skb_dev_features(struct sk_buff *skb, 2512 + const struct net_device *dev) 2511 2513 { 2512 2514 __be16 protocol = skb->protocol; 2513 - netdev_features_t features = skb->dev->features; 2515 + netdev_features_t features = dev->features; 2514 2516 2515 - if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs) 2517 + if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs) 2516 2518 features &= ~NETIF_F_GSO_MASK; 2517 2519 2518 2520 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) { 2519 2521 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 2520 2522 protocol = veh->h_vlan_encapsulated_proto; 2521 2523 } else if (!vlan_tx_tag_present(skb)) { 2522 - return harmonize_features(skb, features); 2524 + return harmonize_features(skb, dev, features); 2523 2525 } 2524 2526 2525 - features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX | 2527 + features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX | 2526 2528 NETIF_F_HW_VLAN_STAG_TX); 2527 2529 2528 2530 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) ··· 2532 2530 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX | 2533 2531 NETIF_F_HW_VLAN_STAG_TX; 2534 2532 2535 - return harmonize_features(skb, features); 2533 + return harmonize_features(skb, dev, features); 2536 2534 } 2537 - EXPORT_SYMBOL(netif_skb_features); 2535 + EXPORT_SYMBOL(netif_skb_dev_features); 2538 2536 2539 2537 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2540 2538 struct netdev_queue *txq)
+4 -16
net/core/flow_dissector.c
··· 323 323 return poff; 324 324 } 325 325 326 - static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index) 327 - { 328 - if (unlikely(queue_index >= dev->real_num_tx_queues)) { 329 - net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n", 330 - dev->name, queue_index, 331 - dev->real_num_tx_queues); 332 - return 0; 333 - } 334 - return queue_index; 335 - } 336 - 337 326 static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) 338 327 { 339 328 #ifdef CONFIG_XPS ··· 361 372 #endif 362 373 } 363 374 364 - u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) 375 + static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) 365 376 { 366 377 struct sock *sk = skb->sk; 367 378 int queue_index = sk_tx_queue_get(sk); ··· 381 392 382 393 return queue_index; 383 394 } 384 - EXPORT_SYMBOL(__netdev_pick_tx); 385 395 386 396 struct netdev_queue *netdev_pick_tx(struct net_device *dev, 387 397 struct sk_buff *skb, ··· 391 403 if (dev->real_num_tx_queues != 1) { 392 404 const struct net_device_ops *ops = dev->netdev_ops; 393 405 if (ops->ndo_select_queue) 394 - queue_index = ops->ndo_select_queue(dev, skb, 395 - accel_priv); 406 + queue_index = ops->ndo_select_queue(dev, skb, accel_priv, 407 + __netdev_pick_tx); 396 408 else 397 409 queue_index = __netdev_pick_tx(dev, skb); 398 410 399 411 if (!accel_priv) 400 - queue_index = dev_cap_txqueue(dev, queue_index); 412 + queue_index = netdev_cap_txqueue(dev, queue_index); 401 413 } 402 414 403 415 skb_set_queue_mapping(skb, queue_index);
+4 -4
net/core/neighbour.c
··· 766 766 nht = rcu_dereference_protected(tbl->nht, 767 767 lockdep_is_held(&tbl->lock)); 768 768 769 - if (atomic_read(&tbl->entries) < tbl->gc_thresh1) 770 - goto out; 771 - 772 769 /* 773 770 * periodically recompute ReachableTime from random function 774 771 */ ··· 777 780 p->reachable_time = 778 781 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); 779 782 } 783 + 784 + if (atomic_read(&tbl->entries) < tbl->gc_thresh1) 785 + goto out; 780 786 781 787 for (i = 0 ; i < (1 << nht->hash_shift); i++) { 782 788 np = &nht->hash_buckets[i]; ··· 3046 3046 if (!t) 3047 3047 goto err; 3048 3048 3049 - for (i = 0; i < ARRAY_SIZE(t->neigh_vars); i++) { 3049 + for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) { 3050 3050 t->neigh_vars[i].data += (long) p; 3051 3051 t->neigh_vars[i].extra1 = dev; 3052 3052 t->neigh_vars[i].extra2 = p;
+12 -7
net/core/rtnetlink.c
··· 1963 1963 1964 1964 dev->ifindex = ifm->ifi_index; 1965 1965 1966 - if (ops->newlink) 1966 + if (ops->newlink) { 1967 1967 err = ops->newlink(net, dev, tb, data); 1968 - else 1968 + /* Drivers should call free_netdev() in ->destructor 1969 + * and unregister it on failure so that device could be 1970 + * finally freed in rtnl_unlock. 1971 + */ 1972 + if (err < 0) 1973 + goto out; 1974 + } else { 1969 1975 err = register_netdevice(dev); 1970 - 1971 - if (err < 0) { 1972 - free_netdev(dev); 1973 - goto out; 1976 + if (err < 0) { 1977 + free_netdev(dev); 1978 + goto out; 1979 + } 1974 1980 } 1975 - 1976 1981 err = rtnl_configure_link(dev, ifm); 1977 1982 if (err < 0) 1978 1983 unregister_netdevice(dev);
-3
net/core/skbuff.c
··· 707 707 new->mark = old->mark; 708 708 new->skb_iif = old->skb_iif; 709 709 __nf_copy(new, old); 710 - #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) 711 - new->nf_trace = old->nf_trace; 712 - #endif 713 710 #ifdef CONFIG_NET_SCHED 714 711 new->tc_index = old->tc_index; 715 712 #ifdef CONFIG_NET_CLS_ACT
+1 -1
net/dccp/ccids/lib/tfrc.c
··· 8 8 #include "tfrc.h" 9 9 10 10 #ifdef CONFIG_IP_DCCP_TFRC_DEBUG 11 - static bool tfrc_debug; 11 + bool tfrc_debug; 12 12 module_param(tfrc_debug, bool, 0644); 13 13 MODULE_PARM_DESC(tfrc_debug, "Enable TFRC debug messages"); 14 14 #endif
+1
net/dccp/ccids/lib/tfrc.h
··· 21 21 #include "packet_history.h" 22 22 23 23 #ifdef CONFIG_IP_DCCP_TFRC_DEBUG 24 + extern bool tfrc_debug; 24 25 #define tfrc_pr_debug(format, a...) DCCP_PR_DEBUG(tfrc_debug, format, ##a) 25 26 #else 26 27 #define tfrc_pr_debug(format, a...)
+1 -1
net/hsr/hsr_framereg.c
··· 297 297 298 298 void hsr_register_frame_in(struct node_entry *node, enum hsr_dev_idx dev_idx) 299 299 { 300 - if ((dev_idx < 0) || (dev_idx >= HSR_MAX_DEV)) { 300 + if ((dev_idx < 0) || (dev_idx >= HSR_MAX_SLAVE)) { 301 301 WARN_ONCE(1, "%s: Invalid dev_idx (%d)\n", __func__, dev_idx); 302 302 return; 303 303 }
+5 -2
net/ipv4/af_inet.c
··· 1296 1296 1297 1297 segs = ERR_PTR(-EPROTONOSUPPORT); 1298 1298 1299 - /* Note : following gso_segment() might change skb->encapsulation */ 1300 - udpfrag = !skb->encapsulation && proto == IPPROTO_UDP; 1299 + if (skb->encapsulation && 1300 + skb_shinfo(skb)->gso_type & (SKB_GSO_SIT|SKB_GSO_IPIP)) 1301 + udpfrag = proto == IPPROTO_UDP && encap; 1302 + else 1303 + udpfrag = proto == IPPROTO_UDP && !skb->encapsulation; 1301 1304 1302 1305 ops = rcu_dereference(inet_offloads[proto]); 1303 1306 if (likely(ops && ops->callbacks.gso_segment))
+69 -2
net/ipv4/ip_forward.c
··· 39 39 #include <net/route.h> 40 40 #include <net/xfrm.h> 41 41 42 + static bool ip_may_fragment(const struct sk_buff *skb) 43 + { 44 + return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) || 45 + !skb->local_df; 46 + } 47 + 48 + static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) 49 + { 50 + if (skb->len <= mtu || skb->local_df) 51 + return false; 52 + 53 + if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu) 54 + return false; 55 + 56 + return true; 57 + } 58 + 59 + static bool ip_gso_exceeds_dst_mtu(const struct sk_buff *skb) 60 + { 61 + unsigned int mtu; 62 + 63 + if (skb->local_df || !skb_is_gso(skb)) 64 + return false; 65 + 66 + mtu = ip_dst_mtu_maybe_forward(skb_dst(skb), true); 67 + 68 + /* if seglen > mtu, do software segmentation for IP fragmentation on 69 + * output. DF bit cannot be set since ip_forward would have sent 70 + * icmp error. 71 + */ 72 + return skb_gso_network_seglen(skb) > mtu; 73 + } 74 + 75 + /* called if GSO skb needs to be fragmented on forward */ 76 + static int ip_forward_finish_gso(struct sk_buff *skb) 77 + { 78 + struct dst_entry *dst = skb_dst(skb); 79 + netdev_features_t features; 80 + struct sk_buff *segs; 81 + int ret = 0; 82 + 83 + features = netif_skb_dev_features(skb, dst->dev); 84 + segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); 85 + if (IS_ERR(segs)) { 86 + kfree_skb(skb); 87 + return -ENOMEM; 88 + } 89 + 90 + consume_skb(skb); 91 + 92 + do { 93 + struct sk_buff *nskb = segs->next; 94 + int err; 95 + 96 + segs->next = NULL; 97 + err = dst_output(segs); 98 + 99 + if (err && ret == 0) 100 + ret = err; 101 + segs = nskb; 102 + } while (segs); 103 + 104 + return ret; 105 + } 106 + 42 107 static int ip_forward_finish(struct sk_buff *skb) 43 108 { 44 109 struct ip_options *opt = &(IPCB(skb)->opt); ··· 113 48 114 49 if (unlikely(opt->optlen)) 115 50 ip_forward_options(skb); 51 + 52 + if (ip_gso_exceeds_dst_mtu(skb)) 53 + return ip_forward_finish_gso(skb); 116 54 117 55 return dst_output(skb); 118 56 } ··· 159 91 160 92 IPCB(skb)->flags |= IPSKB_FORWARDED; 161 93 mtu = ip_dst_mtu_maybe_forward(&rt->dst, true); 162 - if (unlikely(skb->len > mtu && !skb_is_gso(skb) && 163 - (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) { 94 + if (!ip_may_fragment(skb) && ip_exceeds_mtu(skb, mtu)) { 164 95 IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS); 165 96 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 166 97 htonl(mtu));
-3
net/ipv4/ip_output.c
··· 422 422 to->tc_index = from->tc_index; 423 423 #endif 424 424 nf_copy(to, from); 425 - #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) 426 - to->nf_trace = from->nf_trace; 427 - #endif 428 425 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) 429 426 to->ipvs_property = from->ipvs_property; 430 427 #endif
+4 -49
net/ipv4/ip_tunnel.c
··· 93 93 tunnel_dst_set(t, NULL); 94 94 } 95 95 96 - static void tunnel_dst_reset_all(struct ip_tunnel *t) 96 + void ip_tunnel_dst_reset_all(struct ip_tunnel *t) 97 97 { 98 98 int i; 99 99 100 100 for_each_possible_cpu(i) 101 101 __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL); 102 102 } 103 + EXPORT_SYMBOL(ip_tunnel_dst_reset_all); 103 104 104 105 static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie) 105 106 { ··· 119 118 rcu_read_unlock(); 120 119 return (struct rtable *)dst; 121 120 } 122 - 123 - /* Often modified stats are per cpu, other are shared (netdev->stats) */ 124 - struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev, 125 - struct rtnl_link_stats64 *tot) 126 - { 127 - int i; 128 - 129 - for_each_possible_cpu(i) { 130 - const struct pcpu_sw_netstats *tstats = 131 - per_cpu_ptr(dev->tstats, i); 132 - u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 133 - unsigned int start; 134 - 135 - do { 136 - start = u64_stats_fetch_begin_bh(&tstats->syncp); 137 - rx_packets = tstats->rx_packets; 138 - tx_packets = tstats->tx_packets; 139 - rx_bytes = tstats->rx_bytes; 140 - tx_bytes = tstats->tx_bytes; 141 - } while (u64_stats_fetch_retry_bh(&tstats->syncp, start)); 142 - 143 - tot->rx_packets += rx_packets; 144 - tot->tx_packets += tx_packets; 145 - tot->rx_bytes += rx_bytes; 146 - tot->tx_bytes += tx_bytes; 147 - } 148 - 149 - tot->multicast = dev->stats.multicast; 150 - 151 - tot->rx_crc_errors = dev->stats.rx_crc_errors; 152 - tot->rx_fifo_errors = dev->stats.rx_fifo_errors; 153 - tot->rx_length_errors = dev->stats.rx_length_errors; 154 - tot->rx_frame_errors = dev->stats.rx_frame_errors; 155 - tot->rx_errors = dev->stats.rx_errors; 156 - 157 - tot->tx_fifo_errors = dev->stats.tx_fifo_errors; 158 - tot->tx_carrier_errors = dev->stats.tx_carrier_errors; 159 - tot->tx_dropped = dev->stats.tx_dropped; 160 - tot->tx_aborted_errors = dev->stats.tx_aborted_errors; 161 - tot->tx_errors = dev->stats.tx_errors; 162 - 163 - tot->collisions = dev->stats.collisions; 164 - 165 - return tot; 166 - } 167 - EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64); 168 121 169 122 static bool ip_tunnel_key_match(const struct ip_tunnel_parm *p, 170 123 __be16 flags, __be32 key) ··· 714 759 if (set_mtu) 715 760 dev->mtu = mtu; 716 761 } 717 - tunnel_dst_reset_all(t); 762 + ip_tunnel_dst_reset_all(t); 718 763 netdev_state_change(dev); 719 764 } 720 765 ··· 1043 1088 if (itn->fb_tunnel_dev != dev) 1044 1089 ip_tunnel_del(netdev_priv(dev)); 1045 1090 1046 - tunnel_dst_reset_all(tunnel); 1091 + ip_tunnel_dst_reset_all(tunnel); 1047 1092 } 1048 1093 EXPORT_SYMBOL_GPL(ip_tunnel_uninit); 1049 1094
+46 -1
net/ipv4/ip_tunnel_core.c
··· 108 108 nf_reset(skb); 109 109 secpath_reset(skb); 110 110 skb_clear_hash_if_not_l4(skb); 111 - skb_dst_drop(skb); 112 111 skb->vlan_tci = 0; 113 112 skb_set_queue_mapping(skb, 0); 114 113 skb->pkt_type = PACKET_HOST; ··· 147 148 return ERR_PTR(err); 148 149 } 149 150 EXPORT_SYMBOL_GPL(iptunnel_handle_offloads); 151 + 152 + /* Often modified stats are per cpu, other are shared (netdev->stats) */ 153 + struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev, 154 + struct rtnl_link_stats64 *tot) 155 + { 156 + int i; 157 + 158 + for_each_possible_cpu(i) { 159 + const struct pcpu_sw_netstats *tstats = 160 + per_cpu_ptr(dev->tstats, i); 161 + u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 162 + unsigned int start; 163 + 164 + do { 165 + start = u64_stats_fetch_begin_bh(&tstats->syncp); 166 + rx_packets = tstats->rx_packets; 167 + tx_packets = tstats->tx_packets; 168 + rx_bytes = tstats->rx_bytes; 169 + tx_bytes = tstats->tx_bytes; 170 + } while (u64_stats_fetch_retry_bh(&tstats->syncp, start)); 171 + 172 + tot->rx_packets += rx_packets; 173 + tot->tx_packets += tx_packets; 174 + tot->rx_bytes += rx_bytes; 175 + tot->tx_bytes += tx_bytes; 176 + } 177 + 178 + tot->multicast = dev->stats.multicast; 179 + 180 + tot->rx_crc_errors = dev->stats.rx_crc_errors; 181 + tot->rx_fifo_errors = dev->stats.rx_fifo_errors; 182 + tot->rx_length_errors = dev->stats.rx_length_errors; 183 + tot->rx_frame_errors = dev->stats.rx_frame_errors; 184 + tot->rx_errors = dev->stats.rx_errors; 185 + 186 + tot->tx_fifo_errors = dev->stats.tx_fifo_errors; 187 + tot->tx_carrier_errors = dev->stats.tx_carrier_errors; 188 + tot->tx_dropped = dev->stats.tx_dropped; 189 + tot->tx_aborted_errors = dev->stats.tx_aborted_errors; 190 + tot->tx_errors = dev->stats.tx_errors; 191 + 192 + tot->collisions = dev->stats.collisions; 193 + 194 + return tot; 195 + } 196 + EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
+1 -1
net/ipv4/ipconfig.c
··· 273 273 274 274 msleep(1); 275 275 276 - if time_before(jiffies, next_msg) 276 + if (time_before(jiffies, next_msg)) 277 277 continue; 278 278 279 279 elapsed = jiffies_to_msecs(jiffies - start);
+2 -2
net/ipv4/netfilter/nf_nat_snmp_basic.c
··· 1198 1198 map.to = NOCT1(&ct->tuplehash[!dir].tuple.dst.u3.ip); 1199 1199 } else { 1200 1200 /* DNAT replies */ 1201 - map.from = NOCT1(&ct->tuplehash[dir].tuple.src.u3.ip); 1202 - map.to = NOCT1(&ct->tuplehash[!dir].tuple.dst.u3.ip); 1201 + map.from = NOCT1(&ct->tuplehash[!dir].tuple.src.u3.ip); 1202 + map.to = NOCT1(&ct->tuplehash[dir].tuple.dst.u3.ip); 1203 1203 } 1204 1204 1205 1205 if (map.from == map.to)
+9 -4
net/ipv4/route.c
··· 1597 1597 rth->rt_gateway = 0; 1598 1598 rth->rt_uses_gateway = 0; 1599 1599 INIT_LIST_HEAD(&rth->rt_uncached); 1600 + RT_CACHE_STAT_INC(in_slow_tot); 1600 1601 1601 1602 rth->dst.input = ip_forward; 1602 1603 rth->dst.output = ip_output; ··· 1696 1695 fl4.daddr = daddr; 1697 1696 fl4.saddr = saddr; 1698 1697 err = fib_lookup(net, &fl4, &res); 1699 - if (err != 0) 1698 + if (err != 0) { 1699 + if (!IN_DEV_FORWARD(in_dev)) 1700 + err = -EHOSTUNREACH; 1700 1701 goto no_route; 1701 - 1702 - RT_CACHE_STAT_INC(in_slow_tot); 1702 + } 1703 1703 1704 1704 if (res.type == RTN_BROADCAST) 1705 1705 goto brd_input; ··· 1714 1712 goto local_input; 1715 1713 } 1716 1714 1717 - if (!IN_DEV_FORWARD(in_dev)) 1715 + if (!IN_DEV_FORWARD(in_dev)) { 1716 + err = -EHOSTUNREACH; 1718 1717 goto no_route; 1718 + } 1719 1719 if (res.type != RTN_UNICAST) 1720 1720 goto martian_destination; 1721 1721 ··· 1772 1768 rth->rt_gateway = 0; 1773 1769 rth->rt_uses_gateway = 0; 1774 1770 INIT_LIST_HEAD(&rth->rt_uncached); 1771 + RT_CACHE_STAT_INC(in_slow_tot); 1775 1772 if (res.type == RTN_UNREACHABLE) { 1776 1773 rth->dst.input= ip_error; 1777 1774 rth->dst.error= -err;
+5 -3
net/ipv4/tcp.c
··· 1044 1044 } 1045 1045 } 1046 1046 1047 - static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *size) 1047 + static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, 1048 + int *copied, size_t size) 1048 1049 { 1049 1050 struct tcp_sock *tp = tcp_sk(sk); 1050 1051 int err, flags; ··· 1060 1059 if (unlikely(tp->fastopen_req == NULL)) 1061 1060 return -ENOBUFS; 1062 1061 tp->fastopen_req->data = msg; 1062 + tp->fastopen_req->size = size; 1063 1063 1064 1064 flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; 1065 1065 err = __inet_stream_connect(sk->sk_socket, msg->msg_name, 1066 1066 msg->msg_namelen, flags); 1067 - *size = tp->fastopen_req->copied; 1067 + *copied = tp->fastopen_req->copied; 1068 1068 tcp_free_fastopen_req(tp); 1069 1069 return err; 1070 1070 } ··· 1085 1083 1086 1084 flags = msg->msg_flags; 1087 1085 if (flags & MSG_FASTOPEN) { 1088 - err = tcp_sendmsg_fastopen(sk, msg, &copied_syn); 1086 + err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size); 1089 1087 if (err == -EINPROGRESS && copied_syn > 0) 1090 1088 goto out; 1091 1089 else if (err)
+1 -2
net/ipv4/tcp_cong.c
··· 290 290 left = tp->snd_cwnd - in_flight; 291 291 if (sk_can_gso(sk) && 292 292 left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd && 293 - left * tp->mss_cache < sk->sk_gso_max_size && 294 - left < sk->sk_gso_max_segs) 293 + left < tp->xmit_size_goal_segs) 295 294 return true; 296 295 return left <= tcp_max_tso_deferred_mss(tp); 297 296 }
+2 -1
net/ipv4/tcp_input.c
··· 1945 1945 if (skb == tcp_send_head(sk)) 1946 1946 break; 1947 1947 1948 - if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) 1948 + if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) 1949 1949 tp->undo_marker = 0; 1950 + 1950 1951 TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED; 1951 1952 if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || how) { 1952 1953 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
+16 -6
net/ipv4/tcp_output.c
··· 864 864 865 865 if (unlikely(skb->fclone == SKB_FCLONE_ORIG && 866 866 fclone->fclone == SKB_FCLONE_CLONE)) 867 - NET_INC_STATS_BH(sock_net(sk), 868 - LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); 867 + NET_INC_STATS(sock_net(sk), 868 + LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); 869 869 870 870 if (unlikely(skb_cloned(skb))) 871 871 skb = pskb_copy(skb, gfp_mask); ··· 2337 2337 struct tcp_sock *tp = tcp_sk(sk); 2338 2338 struct inet_connection_sock *icsk = inet_csk(sk); 2339 2339 unsigned int cur_mss; 2340 + int err; 2340 2341 2341 2342 /* Inconslusive MTU probe */ 2342 2343 if (icsk->icsk_mtup.probe_size) { ··· 2401 2400 skb_headroom(skb) >= 0xFFFF)) { 2402 2401 struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, 2403 2402 GFP_ATOMIC); 2404 - return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : 2405 - -ENOBUFS; 2403 + err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : 2404 + -ENOBUFS; 2406 2405 } else { 2407 - return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2406 + err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2408 2407 } 2408 + 2409 + if (likely(!err)) 2410 + TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS; 2411 + return err; 2409 2412 } 2410 2413 2411 2414 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) ··· 2913 2908 space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - 2914 2909 MAX_TCP_OPTION_SPACE; 2915 2910 2916 - syn_data = skb_copy_expand(syn, skb_headroom(syn), space, 2911 + space = min_t(size_t, space, fo->size); 2912 + 2913 + /* limit to order-0 allocations */ 2914 + space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER)); 2915 + 2916 + syn_data = skb_copy_expand(syn, MAX_TCP_HEADER, space, 2917 2917 sk->sk_allocation); 2918 2918 if (syn_data == NULL) 2919 2919 goto fallback;
+1
net/ipv6/Kconfig
··· 138 138 config IPV6_VTI 139 139 tristate "Virtual (secure) IPv6: tunneling" 140 140 select IPV6_TUNNEL 141 + select NET_IP_TUNNEL 141 142 depends on INET6_XFRM_MODE_TUNNEL 142 143 ---help--- 143 144 Tunneling means encapsulating data of one protocol type within
+2
net/ipv6/addrconf.c
··· 2783 2783 ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0); 2784 2784 if (!ipv6_generate_eui64(addr.s6_addr + 8, dev)) 2785 2785 addrconf_add_linklocal(idev, &addr); 2786 + else 2787 + addrconf_prefix_route(&addr, 64, dev, 0, 0); 2786 2788 } 2787 2789 #endif 2788 2790
+1 -1
net/ipv6/exthdrs_core.c
··· 212 212 found = (nexthdr == target); 213 213 214 214 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) { 215 - if (target < 0) 215 + if (target < 0 || found) 216 216 break; 217 217 return -ENOENT; 218 218 }
+12 -8
net/ipv6/ip6_offload.c
··· 89 89 unsigned int unfrag_ip6hlen; 90 90 u8 *prevhdr; 91 91 int offset = 0; 92 - bool tunnel; 92 + bool encap, udpfrag; 93 93 int nhoff; 94 94 95 95 if (unlikely(skb_shinfo(skb)->gso_type & ··· 110 110 if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) 111 111 goto out; 112 112 113 - tunnel = SKB_GSO_CB(skb)->encap_level > 0; 114 - if (tunnel) 113 + encap = SKB_GSO_CB(skb)->encap_level > 0; 114 + if (encap) 115 115 features = skb->dev->hw_enc_features & netif_skb_features(skb); 116 116 SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h); 117 117 ··· 120 120 segs = ERR_PTR(-EPROTONOSUPPORT); 121 121 122 122 proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); 123 + 124 + if (skb->encapsulation && 125 + skb_shinfo(skb)->gso_type & (SKB_GSO_SIT|SKB_GSO_IPIP)) 126 + udpfrag = proto == IPPROTO_UDP && encap; 127 + else 128 + udpfrag = proto == IPPROTO_UDP && !skb->encapsulation; 123 129 124 130 ops = rcu_dereference(inet6_offloads[proto]); 125 131 if (likely(ops && ops->callbacks.gso_segment)) { ··· 139 133 for (skb = segs; skb; skb = skb->next) { 140 134 ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff); 141 135 ipv6h->payload_len = htons(skb->len - nhoff - sizeof(*ipv6h)); 142 - if (tunnel) { 143 - skb_reset_inner_headers(skb); 144 - skb->encapsulation = 1; 145 - } 146 136 skb->network_header = (u8 *)ipv6h - skb->head; 147 137 148 - if (!tunnel && proto == IPPROTO_UDP) { 138 + if (udpfrag) { 149 139 unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); 150 140 fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen); 151 141 fptr->frag_off = htons(offset); ··· 150 148 offset += (ntohs(ipv6h->payload_len) - 151 149 sizeof(struct frag_hdr)); 152 150 } 151 + if (encap) 152 + skb_reset_inner_headers(skb); 153 153 } 154 154 155 155 out:
+15 -5
net/ipv6/ip6_output.c
··· 342 342 return mtu; 343 343 } 344 344 345 + static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu) 346 + { 347 + if (skb->len <= mtu || skb->local_df) 348 + return false; 349 + 350 + if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu) 351 + return true; 352 + 353 + if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu) 354 + return false; 355 + 356 + return true; 357 + } 358 + 345 359 int ip6_forward(struct sk_buff *skb) 346 360 { 347 361 struct dst_entry *dst = skb_dst(skb); ··· 480 466 if (mtu < IPV6_MIN_MTU) 481 467 mtu = IPV6_MIN_MTU; 482 468 483 - if ((!skb->local_df && skb->len > mtu && !skb_is_gso(skb)) || 484 - (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)) { 469 + if (ip6_pkt_too_big(skb, mtu)) { 485 470 /* Again, force OUTPUT device used as source address */ 486 471 skb->dev = dst->dev; 487 472 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); ··· 530 517 to->tc_index = from->tc_index; 531 518 #endif 532 519 nf_copy(to, from); 533 - #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) 534 - to->nf_trace = from->nf_trace; 535 - #endif 536 520 skb_copy_secmark(to, from); 537 521 } 538 522
+1
net/ipv6/ping.c
··· 135 135 fl6.flowi6_proto = IPPROTO_ICMPV6; 136 136 fl6.saddr = np->saddr; 137 137 fl6.daddr = *daddr; 138 + fl6.flowi6_mark = sk->sk_mark; 138 139 fl6.fl6_icmp_type = user_icmph.icmp6_type; 139 140 fl6.fl6_icmp_code = user_icmph.icmp6_code; 140 141 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+19
net/ipv6/sit.c
··· 475 475 ipip6_tunnel_unlink(sitn, tunnel); 476 476 ipip6_tunnel_del_prl(tunnel, NULL); 477 477 } 478 + ip_tunnel_dst_reset_all(tunnel); 478 479 dev_put(dev); 479 480 } 480 481 ··· 1083 1082 t->parms.link = p->link; 1084 1083 ipip6_tunnel_bind_dev(t->dev); 1085 1084 } 1085 + ip_tunnel_dst_reset_all(t); 1086 1086 netdev_state_change(t->dev); 1087 1087 } 1088 1088 ··· 1114 1112 t->ip6rd.relay_prefix = relay_prefix; 1115 1113 t->ip6rd.prefixlen = ip6rd->prefixlen; 1116 1114 t->ip6rd.relay_prefixlen = ip6rd->relay_prefixlen; 1115 + ip_tunnel_dst_reset_all(t); 1117 1116 netdev_state_change(t->dev); 1118 1117 return 0; 1119 1118 } ··· 1274 1271 err = ipip6_tunnel_add_prl(t, &prl, cmd == SIOCCHGPRL); 1275 1272 break; 1276 1273 } 1274 + ip_tunnel_dst_reset_all(t); 1277 1275 netdev_state_change(dev); 1278 1276 break; 1279 1277 ··· 1330 1326 1331 1327 static void ipip6_dev_free(struct net_device *dev) 1332 1328 { 1329 + struct ip_tunnel *tunnel = netdev_priv(dev); 1330 + 1331 + free_percpu(tunnel->dst_cache); 1333 1332 free_percpu(dev->tstats); 1334 1333 free_netdev(dev); 1335 1334 } ··· 1382 1375 u64_stats_init(&ipip6_tunnel_stats->syncp); 1383 1376 } 1384 1377 1378 + tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst); 1379 + if (!tunnel->dst_cache) { 1380 + free_percpu(dev->tstats); 1381 + return -ENOMEM; 1382 + } 1383 + 1385 1384 return 0; 1386 1385 } 1387 1386 ··· 1416 1403 struct pcpu_sw_netstats *ipip6_fb_stats; 1417 1404 ipip6_fb_stats = per_cpu_ptr(dev->tstats, i); 1418 1405 u64_stats_init(&ipip6_fb_stats->syncp); 1406 + } 1407 + 1408 + tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst); 1409 + if (!tunnel->dst_cache) { 1410 + free_percpu(dev->tstats); 1411 + return -ENOMEM; 1419 1412 } 1420 1413 1421 1414 dev_hold(dev);
+1 -1
net/ipv6/udp_offload.c
··· 113 113 fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); 114 114 fptr->nexthdr = nexthdr; 115 115 fptr->reserved = 0; 116 - ipv6_select_ident(fptr, (struct rt6_info *)skb_dst(skb)); 116 + fptr->identification = skb_shinfo(skb)->ip6_frag_id; 117 117 118 118 /* Fragment the skb. ipv6 header and the remaining fields of the 119 119 * fragment header are updated in ipv6_gso_segment()
+2 -8
net/mac80211/ieee80211_i.h
··· 1692 1692 void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue); 1693 1693 void ieee80211_add_pending_skb(struct ieee80211_local *local, 1694 1694 struct sk_buff *skb); 1695 - void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local, 1696 - struct sk_buff_head *skbs, 1697 - void (*fn)(void *data), void *data); 1698 - static inline void ieee80211_add_pending_skbs(struct ieee80211_local *local, 1699 - struct sk_buff_head *skbs) 1700 - { 1701 - ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL); 1702 - } 1695 + void ieee80211_add_pending_skbs(struct ieee80211_local *local, 1696 + struct sk_buff_head *skbs); 1703 1697 void ieee80211_flush_queues(struct ieee80211_local *local, 1704 1698 struct ieee80211_sub_if_data *sdata); 1705 1699
+4 -2
net/mac80211/iface.c
··· 1057 1057 1058 1058 static u16 ieee80211_netdev_select_queue(struct net_device *dev, 1059 1059 struct sk_buff *skb, 1060 - void *accel_priv) 1060 + void *accel_priv, 1061 + select_queue_fallback_t fallback) 1061 1062 { 1062 1063 return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); 1063 1064 } ··· 1076 1075 1077 1076 static u16 ieee80211_monitor_select_queue(struct net_device *dev, 1078 1077 struct sk_buff *skb, 1079 - void *accel_priv) 1078 + void *accel_priv, 1079 + select_queue_fallback_t fallback) 1080 1080 { 1081 1081 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1082 1082 struct ieee80211_local *local = sdata->local;
+24
net/mac80211/mlme.c
··· 222 222 switch (vht_oper->chan_width) { 223 223 case IEEE80211_VHT_CHANWIDTH_USE_HT: 224 224 vht_chandef.width = chandef->width; 225 + vht_chandef.center_freq1 = chandef->center_freq1; 225 226 break; 226 227 case IEEE80211_VHT_CHANWIDTH_80MHZ: 227 228 vht_chandef.width = NL80211_CHAN_WIDTH_80; ··· 272 271 ret = 0; 273 272 274 273 out: 274 + /* 275 + * When tracking the current AP, don't do any further checks if the 276 + * new chandef is identical to the one we're currently using for the 277 + * connection. This keeps us from playing ping-pong with regulatory, 278 + * without it the following can happen (for example): 279 + * - connect to an AP with 80 MHz, world regdom allows 80 MHz 280 + * - AP advertises regdom US 281 + * - CRDA loads regdom US with 80 MHz prohibited (old database) 282 + * - the code below detects an unsupported channel, downgrades, and 283 + * we disconnect from the AP in the caller 284 + * - disconnect causes CRDA to reload world regdomain and the game 285 + * starts anew. 286 + * (see https://bugzilla.kernel.org/show_bug.cgi?id=70881) 287 + * 288 + * It seems possible that there are still scenarios with CSA or real 289 + * bandwidth changes where a this could happen, but those cases are 290 + * less common and wouldn't completely prevent using the AP. 291 + */ 292 + if (tracking && 293 + cfg80211_chandef_identical(chandef, &sdata->vif.bss_conf.chandef)) 294 + return ret; 295 + 275 296 /* don't print the message below for VHT mismatch if VHT is disabled */ 276 297 if (ret & IEEE80211_STA_DISABLE_VHT) 277 298 vht_chandef = *chandef; ··· 3776 3753 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); 3777 3754 if (WARN_ON(!chanctx_conf)) { 3778 3755 rcu_read_unlock(); 3756 + sta_info_free(local, new_sta); 3779 3757 return -EINVAL; 3780 3758 } 3781 3759 rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
+7
net/mac80211/rx.c
··· 1128 1128 sta->sta.addr, sta->sta.aid); 1129 1129 1130 1130 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { 1131 + /* 1132 + * Clear the flag only if the other one is still set 1133 + * so that the TX path won't start TX'ing new frames 1134 + * directly ... In the case that the driver flag isn't 1135 + * set ieee80211_sta_ps_deliver_wakeup() will clear it. 1136 + */ 1137 + clear_sta_flag(sta, WLAN_STA_PS_STA); 1131 1138 ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n", 1132 1139 sta->sta.addr, sta->sta.aid); 1133 1140 return;
+42 -24
net/mac80211/sta_info.c
··· 91 91 return -ENOENT; 92 92 } 93 93 94 - static void cleanup_single_sta(struct sta_info *sta) 94 + static void __cleanup_single_sta(struct sta_info *sta) 95 95 { 96 96 int ac, i; 97 97 struct tid_ampdu_tx *tid_tx; ··· 99 99 struct ieee80211_local *local = sdata->local; 100 100 struct ps_data *ps; 101 101 102 - if (test_sta_flag(sta, WLAN_STA_PS_STA)) { 102 + if (test_sta_flag(sta, WLAN_STA_PS_STA) || 103 + test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { 103 104 if (sta->sdata->vif.type == NL80211_IFTYPE_AP || 104 105 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 105 106 ps = &sdata->bss->ps; ··· 110 109 return; 111 110 112 111 clear_sta_flag(sta, WLAN_STA_PS_STA); 112 + clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 113 113 114 114 atomic_dec(&ps->num_sta_ps); 115 115 sta_info_recalc_tim(sta); ··· 141 139 ieee80211_purge_tx_queue(&local->hw, &tid_tx->pending); 142 140 kfree(tid_tx); 143 141 } 142 + } 144 143 144 + static void cleanup_single_sta(struct sta_info *sta) 145 + { 146 + struct ieee80211_sub_if_data *sdata = sta->sdata; 147 + struct ieee80211_local *local = sdata->local; 148 + 149 + __cleanup_single_sta(sta); 145 150 sta_info_free(local, sta); 146 151 } 147 152 ··· 339 330 rcu_read_unlock(); 340 331 341 332 spin_lock_init(&sta->lock); 333 + spin_lock_init(&sta->ps_lock); 342 334 INIT_WORK(&sta->drv_unblock_wk, sta_unblock); 343 335 INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work); 344 336 mutex_init(&sta->ampdu_mlme.mtx); ··· 497 487 goto out_err; 498 488 } 499 489 500 - /* notify driver */ 501 - err = sta_info_insert_drv_state(local, sdata, sta); 502 - if (err) 503 - goto out_err; 504 - 505 490 local->num_sta++; 506 491 local->sta_generation++; 507 492 smp_mb(); 493 + 494 + /* simplify things and don't accept BA sessions yet */ 495 + set_sta_flag(sta, WLAN_STA_BLOCK_BA); 508 496 509 497 /* make the station visible */ 510 498 sta_info_hash_add(local, sta); 511 499 512 500 list_add_rcu(&sta->list, &local->sta_list); 513 501 502 + /* notify driver */ 503 + err = sta_info_insert_drv_state(local, sdata, sta); 504 + if (err) 505 + goto out_remove; 506 + 514 507 set_sta_flag(sta, WLAN_STA_INSERTED); 508 + /* accept BA sessions now */ 509 + clear_sta_flag(sta, WLAN_STA_BLOCK_BA); 515 510 516 511 ieee80211_recalc_min_chandef(sdata); 517 512 ieee80211_sta_debugfs_add(sta); ··· 537 522 mesh_accept_plinks_update(sdata); 538 523 539 524 return 0; 525 + out_remove: 526 + sta_info_hash_del(local, sta); 527 + list_del_rcu(&sta->list); 528 + local->num_sta--; 529 + synchronize_net(); 530 + __cleanup_single_sta(sta); 540 531 out_err: 541 532 mutex_unlock(&local->sta_mtx); 542 533 rcu_read_lock(); ··· 1092 1071 } 1093 1072 EXPORT_SYMBOL(ieee80211_find_sta); 1094 1073 1095 - static void clear_sta_ps_flags(void *_sta) 1074 + /* powersave support code */ 1075 + void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) 1096 1076 { 1097 - struct sta_info *sta = _sta; 1098 1077 struct ieee80211_sub_if_data *sdata = sta->sdata; 1078 + struct ieee80211_local *local = sdata->local; 1079 + struct sk_buff_head pending; 1080 + int filtered = 0, buffered = 0, ac; 1081 + unsigned long flags; 1099 1082 struct ps_data *ps; 1100 1083 1101 1084 if (sdata->vif.type == NL80211_IFTYPE_AP || ··· 1109 1084 ps = &sdata->u.mesh.ps; 1110 1085 else 1111 1086 return; 1112 - 1113 - clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 1114 - if (test_and_clear_sta_flag(sta, WLAN_STA_PS_STA)) 1115 - atomic_dec(&ps->num_sta_ps); 1116 - } 1117 - 1118 - /* powersave support code */ 1119 - void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) 1120 - { 1121 - struct ieee80211_sub_if_data *sdata = sta->sdata; 1122 - struct ieee80211_local *local = sdata->local; 1123 - struct sk_buff_head pending; 1124 - int filtered = 0, buffered = 0, ac; 1125 - unsigned long flags; 1126 1087 1127 1088 clear_sta_flag(sta, WLAN_STA_SP); 1128 1089 ··· 1120 1109 1121 1110 skb_queue_head_init(&pending); 1122 1111 1112 + /* sync with ieee80211_tx_h_unicast_ps_buf */ 1113 + spin_lock(&sta->ps_lock); 1123 1114 /* Send all buffered frames to the station */ 1124 1115 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 1125 1116 int count = skb_queue_len(&pending), tmp; ··· 1140 1127 buffered += tmp - count; 1141 1128 } 1142 1129 1143 - ieee80211_add_pending_skbs_fn(local, &pending, clear_sta_ps_flags, sta); 1130 + ieee80211_add_pending_skbs(local, &pending); 1131 + clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 1132 + clear_sta_flag(sta, WLAN_STA_PS_STA); 1133 + spin_unlock(&sta->ps_lock); 1134 + 1135 + atomic_dec(&ps->num_sta_ps); 1144 1136 1145 1137 /* This station just woke up and isn't aware of our SMPS state */ 1146 1138 if (!ieee80211_smps_is_restrictive(sta->known_smps_mode,
+3 -4
net/mac80211/sta_info.h
··· 267 267 * @drv_unblock_wk: used for driver PS unblocking 268 268 * @listen_interval: listen interval of this station, when we're acting as AP 269 269 * @_flags: STA flags, see &enum ieee80211_sta_info_flags, do not use directly 270 + * @ps_lock: used for powersave (when mac80211 is the AP) related locking 270 271 * @ps_tx_buf: buffers (per AC) of frames to transmit to this station 271 272 * when it leaves power saving state or polls 272 273 * @tx_filtered: buffers (per AC) of frames we already tried to ··· 357 356 /* use the accessors defined below */ 358 357 unsigned long _flags; 359 358 360 - /* 361 - * STA powersave frame queues, no more than the internal 362 - * locking required. 363 - */ 359 + /* STA powersave lock and frame queues */ 360 + spinlock_t ps_lock; 364 361 struct sk_buff_head ps_tx_buf[IEEE80211_NUM_ACS]; 365 362 struct sk_buff_head tx_filtered[IEEE80211_NUM_ACS]; 366 363 unsigned long driver_buffered_tids;
+15
net/mac80211/tx.c
··· 478 478 sta->sta.addr, sta->sta.aid, ac); 479 479 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) 480 480 purge_old_ps_buffers(tx->local); 481 + 482 + /* sync with ieee80211_sta_ps_deliver_wakeup */ 483 + spin_lock(&sta->ps_lock); 484 + /* 485 + * STA woke up the meantime and all the frames on ps_tx_buf have 486 + * been queued to pending queue. No reordering can happen, go 487 + * ahead and Tx the packet. 488 + */ 489 + if (!test_sta_flag(sta, WLAN_STA_PS_STA) && 490 + !test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { 491 + spin_unlock(&sta->ps_lock); 492 + return TX_CONTINUE; 493 + } 494 + 481 495 if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) { 482 496 struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]); 483 497 ps_dbg(tx->sdata, ··· 506 492 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 507 493 info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS; 508 494 skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb); 495 + spin_unlock(&sta->ps_lock); 509 496 510 497 if (!timer_pending(&local->sta_cleanup)) 511 498 mod_timer(&local->sta_cleanup,
+22 -26
net/mac80211/util.c
··· 435 435 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 436 436 } 437 437 438 - void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local, 439 - struct sk_buff_head *skbs, 440 - void (*fn)(void *data), void *data) 438 + void ieee80211_add_pending_skbs(struct ieee80211_local *local, 439 + struct sk_buff_head *skbs) 441 440 { 442 441 struct ieee80211_hw *hw = &local->hw; 443 442 struct sk_buff *skb; ··· 459 460 460 461 __skb_queue_tail(&local->pending[queue], skb); 461 462 } 462 - 463 - if (fn) 464 - fn(data); 465 463 466 464 for (i = 0; i < hw->queues; i++) 467 465 __ieee80211_wake_queue(hw, i, ··· 1737 1741 IEEE80211_QUEUE_STOP_REASON_SUSPEND); 1738 1742 1739 1743 /* 1744 + * Reconfigure sched scan if it was interrupted by FW restart or 1745 + * suspend. 1746 + */ 1747 + mutex_lock(&local->mtx); 1748 + sched_scan_sdata = rcu_dereference_protected(local->sched_scan_sdata, 1749 + lockdep_is_held(&local->mtx)); 1750 + if (sched_scan_sdata && local->sched_scan_req) 1751 + /* 1752 + * Sched scan stopped, but we don't want to report it. Instead, 1753 + * we're trying to reschedule. 1754 + */ 1755 + if (__ieee80211_request_sched_scan_start(sched_scan_sdata, 1756 + local->sched_scan_req)) 1757 + sched_scan_stopped = true; 1758 + mutex_unlock(&local->mtx); 1759 + 1760 + if (sched_scan_stopped) 1761 + cfg80211_sched_scan_stopped(local->hw.wiphy); 1762 + 1763 + /* 1740 1764 * If this is for hw restart things are still running. 1741 1765 * We may want to change that later, however. 1742 1766 */ ··· 1783 1767 #else 1784 1768 WARN_ON(1); 1785 1769 #endif 1786 - 1787 - /* 1788 - * Reconfigure sched scan if it was interrupted by FW restart or 1789 - * suspend. 1790 - */ 1791 - mutex_lock(&local->mtx); 1792 - sched_scan_sdata = rcu_dereference_protected(local->sched_scan_sdata, 1793 - lockdep_is_held(&local->mtx)); 1794 - if (sched_scan_sdata && local->sched_scan_req) 1795 - /* 1796 - * Sched scan stopped, but we don't want to report it. Instead, 1797 - * we're trying to reschedule. 1798 - */ 1799 - if (__ieee80211_request_sched_scan_start(sched_scan_sdata, 1800 - local->sched_scan_req)) 1801 - sched_scan_stopped = true; 1802 - mutex_unlock(&local->mtx); 1803 - 1804 - if (sched_scan_stopped) 1805 - cfg80211_sched_scan_stopped(local->hw.wiphy); 1806 1770 1807 1771 return 0; 1808 1772 }
+5
net/mac80211/wme.c
··· 154 154 return IEEE80211_AC_BE; 155 155 } 156 156 157 + if (skb->protocol == sdata->control_port_protocol) { 158 + skb->priority = 7; 159 + return ieee80211_downgrade_queue(sdata, skb); 160 + } 161 + 157 162 /* use the data classifier to determine what 802.1d tag the 158 163 * data frame has */ 159 164 rcu_read_lock();
+14 -21
net/netfilter/nf_conntrack_netlink.c
··· 1310 1310 } 1311 1311 1312 1312 static int 1313 - ctnetlink_change_nat(struct nf_conn *ct, const struct nlattr * const cda[]) 1313 + ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[]) 1314 1314 { 1315 1315 #ifdef CONFIG_NF_NAT_NEEDED 1316 1316 int ret; 1317 1317 1318 - if (cda[CTA_NAT_DST]) { 1319 - ret = ctnetlink_parse_nat_setup(ct, 1320 - NF_NAT_MANIP_DST, 1321 - cda[CTA_NAT_DST]); 1322 - if (ret < 0) 1323 - return ret; 1324 - } 1325 - if (cda[CTA_NAT_SRC]) { 1326 - ret = ctnetlink_parse_nat_setup(ct, 1327 - NF_NAT_MANIP_SRC, 1328 - cda[CTA_NAT_SRC]); 1329 - if (ret < 0) 1330 - return ret; 1331 - } 1332 - return 0; 1318 + ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST, 1319 + cda[CTA_NAT_DST]); 1320 + if (ret < 0) 1321 + return ret; 1322 + 1323 + ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_SRC, 1324 + cda[CTA_NAT_SRC]); 1325 + return ret; 1333 1326 #else 1327 + if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC]) 1328 + return 0; 1334 1329 return -EOPNOTSUPP; 1335 1330 #endif 1336 1331 } ··· 1654 1659 goto err2; 1655 1660 } 1656 1661 1657 - if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) { 1658 - err = ctnetlink_change_nat(ct, cda); 1659 - if (err < 0) 1660 - goto err2; 1661 - } 1662 + err = ctnetlink_setup_nat(ct, cda); 1663 + if (err < 0) 1664 + goto err2; 1662 1665 1663 1666 nf_ct_acct_ext_add(ct, GFP_ATOMIC); 1664 1667 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
+35 -21
net/netfilter/nf_nat_core.c
··· 432 432 } 433 433 EXPORT_SYMBOL(nf_nat_setup_info); 434 434 435 - unsigned int 436 - nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum) 435 + static unsigned int 436 + __nf_nat_alloc_null_binding(struct nf_conn *ct, enum nf_nat_manip_type manip) 437 437 { 438 438 /* Force range to this IP; let proto decide mapping for 439 439 * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED). 440 440 * Use reply in case it's already been mangled (eg local packet). 441 441 */ 442 442 union nf_inet_addr ip = 443 - (HOOK2MANIP(hooknum) == NF_NAT_MANIP_SRC ? 443 + (manip == NF_NAT_MANIP_SRC ? 444 444 ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 : 445 445 ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3); 446 446 struct nf_nat_range range = { ··· 448 448 .min_addr = ip, 449 449 .max_addr = ip, 450 450 }; 451 - return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum)); 451 + return nf_nat_setup_info(ct, &range, manip); 452 + } 453 + 454 + unsigned int 455 + nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum) 456 + { 457 + return __nf_nat_alloc_null_binding(ct, HOOK2MANIP(hooknum)); 452 458 } 453 459 EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding); 454 460 ··· 708 702 709 703 static int 710 704 nfnetlink_parse_nat(const struct nlattr *nat, 711 - const struct nf_conn *ct, struct nf_nat_range *range) 705 + const struct nf_conn *ct, struct nf_nat_range *range, 706 + const struct nf_nat_l3proto *l3proto) 712 707 { 713 - const struct nf_nat_l3proto *l3proto; 714 708 struct nlattr *tb[CTA_NAT_MAX+1]; 715 709 int err; 716 710 ··· 720 714 if (err < 0) 721 715 return err; 722 716 723 - rcu_read_lock(); 724 - l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct)); 725 - if (l3proto == NULL) { 726 - err = -EAGAIN; 727 - goto out; 728 - } 729 717 err = l3proto->nlattr_to_range(tb, range); 730 718 if (err < 0) 731 - goto out; 719 + return err; 732 720 733 721 if (!tb[CTA_NAT_PROTO]) 734 - goto out; 722 + return 0; 735 723 736 - err = nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range); 737 - out: 738 - rcu_read_unlock(); 739 - return err; 724 + return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range); 740 725 } 741 726 727 + /* This function is called under rcu_read_lock() */ 742 728 static int 743 729 nfnetlink_parse_nat_setup(struct nf_conn *ct, 744 730 enum nf_nat_manip_type manip, 745 731 const struct nlattr *attr) 746 732 { 747 733 struct nf_nat_range range; 734 + const struct nf_nat_l3proto *l3proto; 748 735 int err; 749 736 750 - err = nfnetlink_parse_nat(attr, ct, &range); 737 + /* Should not happen, restricted to creating new conntracks 738 + * via ctnetlink. 739 + */ 740 + if (WARN_ON_ONCE(nf_nat_initialized(ct, manip))) 741 + return -EEXIST; 742 + 743 + /* Make sure that L3 NAT is there by when we call nf_nat_setup_info to 744 + * attach the null binding, otherwise this may oops. 745 + */ 746 + l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct)); 747 + if (l3proto == NULL) 748 + return -EAGAIN; 749 + 750 + /* No NAT information has been passed, allocate the null-binding */ 751 + if (attr == NULL) 752 + return __nf_nat_alloc_null_binding(ct, manip); 753 + 754 + err = nfnetlink_parse_nat(attr, ct, &range, l3proto); 751 755 if (err < 0) 752 756 return err; 753 - if (nf_nat_initialized(ct, manip)) 754 - return -EEXIST; 755 757 756 758 return nf_nat_setup_info(ct, &range, manip); 757 759 }
+2 -2
net/netfilter/nft_meta.c
··· 116 116 skb->sk->sk_socket->file->f_cred->fsgid); 117 117 read_unlock_bh(&skb->sk->sk_callback_lock); 118 118 break; 119 - #ifdef CONFIG_NET_CLS_ROUTE 119 + #ifdef CONFIG_IP_ROUTE_CLASSID 120 120 case NFT_META_RTCLASSID: { 121 121 const struct dst_entry *dst = skb_dst(skb); 122 122 ··· 199 199 case NFT_META_OIFTYPE: 200 200 case NFT_META_SKUID: 201 201 case NFT_META_SKGID: 202 - #ifdef CONFIG_NET_CLS_ROUTE 202 + #ifdef CONFIG_IP_ROUTE_CLASSID 203 203 case NFT_META_RTCLASSID: 204 204 #endif 205 205 #ifdef CONFIG_NETWORK_SECMARK
+2 -1
net/netfilter/nft_payload.c
··· 135 135 if (len == 0 || len > FIELD_SIZEOF(struct nft_data, data)) 136 136 return ERR_PTR(-EINVAL); 137 137 138 - if (len <= 4 && IS_ALIGNED(offset, len) && base != NFT_PAYLOAD_LL_HEADER) 138 + if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) && 139 + base != NFT_PAYLOAD_LL_HEADER) 139 140 return &nft_payload_fast_ops; 140 141 else 141 142 return &nft_payload_ops;
+2 -2
net/netfilter/nft_reject_inet.c
··· 21 21 { 22 22 switch (pkt->ops->pf) { 23 23 case NFPROTO_IPV4: 24 - nft_reject_ipv4_eval(expr, data, pkt); 24 + return nft_reject_ipv4_eval(expr, data, pkt); 25 25 case NFPROTO_IPV6: 26 - nft_reject_ipv6_eval(expr, data, pkt); 26 + return nft_reject_ipv6_eval(expr, data, pkt); 27 27 } 28 28 } 29 29
+2 -2
net/netlink/af_netlink.c
··· 1489 1489 if (addr->sa_family != AF_NETLINK) 1490 1490 return -EINVAL; 1491 1491 1492 - /* Only superuser is allowed to send multicasts */ 1493 - if (nladdr->nl_groups && !netlink_capable(sock, NL_CFG_F_NONROOT_SEND)) 1492 + if ((nladdr->nl_groups || nladdr->nl_pid) && 1493 + !netlink_capable(sock, NL_CFG_F_NONROOT_SEND)) 1494 1494 return -EPERM; 1495 1495 1496 1496 if (!nlk->portid)
+1 -1
net/nfc/nci/core.c
··· 301 301 rc = __nci_request(ndev, nci_reset_req, 0, 302 302 msecs_to_jiffies(NCI_RESET_TIMEOUT)); 303 303 304 - if (ndev->ops->setup(ndev)) 304 + if (ndev->ops->setup) 305 305 ndev->ops->setup(ndev); 306 306 307 307 if (!rc) {
+22 -4
net/packet/af_packet.c
··· 308 308 return po->xmit == packet_direct_xmit; 309 309 } 310 310 311 - static u16 packet_pick_tx_queue(struct net_device *dev) 311 + static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) 312 312 { 313 313 return (u16) raw_smp_processor_id() % dev->real_num_tx_queues; 314 + } 315 + 316 + static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) 317 + { 318 + const struct net_device_ops *ops = dev->netdev_ops; 319 + u16 queue_index; 320 + 321 + if (ops->ndo_select_queue) { 322 + queue_index = ops->ndo_select_queue(dev, skb, NULL, 323 + __packet_pick_tx_queue); 324 + queue_index = netdev_cap_txqueue(dev, queue_index); 325 + } else { 326 + queue_index = __packet_pick_tx_queue(dev, skb); 327 + } 328 + 329 + skb_set_queue_mapping(skb, queue_index); 314 330 } 315 331 316 332 /* register_prot_hook must be invoked with the po->bind_lock held, ··· 2301 2285 } 2302 2286 } 2303 2287 2304 - skb_set_queue_mapping(skb, packet_pick_tx_queue(dev)); 2288 + packet_pick_tx_queue(dev, skb); 2289 + 2305 2290 skb->destructor = tpacket_destruct_skb; 2306 2291 __packet_set_status(po, ph, TP_STATUS_SENDING); 2307 2292 packet_inc_pending(&po->tx_ring); ··· 2516 2499 skb->dev = dev; 2517 2500 skb->priority = sk->sk_priority; 2518 2501 skb->mark = sk->sk_mark; 2519 - skb_set_queue_mapping(skb, packet_pick_tx_queue(dev)); 2502 + 2503 + packet_pick_tx_queue(dev, skb); 2520 2504 2521 2505 if (po->has_vnet_hdr) { 2522 2506 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { ··· 3804 3786 */ 3805 3787 if (!tx_ring) 3806 3788 init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring); 3807 - break; 3789 + break; 3808 3790 default: 3809 3791 break; 3810 3792 }
+16 -5
net/sched/sch_pie.c
··· 15 15 * 16 16 * ECN support is added by Naeem Khademi <naeemk@ifi.uio.no> 17 17 * University of Oslo, Norway. 18 + * 19 + * References: 20 + * IETF draft submission: http://tools.ietf.org/html/draft-pan-aqm-pie-00 21 + * IEEE Conference on High Performance Switching and Routing 2013 : 22 + * "PIE: A * Lightweight Control Scheme to Address the Bufferbloat Problem" 18 23 */ 19 24 20 25 #include <linux/module.h> ··· 41 36 psched_time_t target; /* user specified target delay in pschedtime */ 42 37 u32 tupdate; /* timer frequency (in jiffies) */ 43 38 u32 limit; /* number of packets that can be enqueued */ 44 - u32 alpha; /* alpha and beta are between -4 and 4 */ 39 + u32 alpha; /* alpha and beta are between 0 and 32 */ 45 40 u32 beta; /* and are used for shift relative to 1 */ 46 41 bool ecn; /* true if ecn is enabled */ 47 42 bool bytemode; /* to scale drop early prob based on pkt size */ ··· 331 326 if (qdelay == 0 && qlen != 0) 332 327 update_prob = false; 333 328 334 - /* Add ranges for alpha and beta, more aggressive for high dropping 335 - * mode and gentle steps for light dropping mode 336 - * In light dropping mode, take gentle steps; in medium dropping mode, 337 - * take medium steps; in high dropping mode, take big steps. 329 + /* In the algorithm, alpha and beta are between 0 and 2 with typical 330 + * value for alpha as 0.125. In this implementation, we use values 0-32 331 + * passed from user space to represent this. Also, alpha and beta have 332 + * unit of HZ and need to be scaled before they can used to update 333 + * probability. alpha/beta are updated locally below by 1) scaling them 334 + * appropriately 2) scaling down by 16 to come to 0-2 range. 335 + * Please see paper for details. 336 + * 337 + * We scale alpha and beta differently depending on whether we are in 338 + * light, medium or high dropping mode. 338 339 */ 339 340 if (q->vars.prob < MAX_PROB / 100) { 340 341 alpha =
+12 -12
net/sched/sch_tbf.c
··· 334 334 qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate, 335 335 tb[TCA_TBF_PTAB])); 336 336 337 - if (q->qdisc != &noop_qdisc) { 338 - err = fifo_set_limit(q->qdisc, qopt->limit); 339 - if (err) 340 - goto done; 341 - } else if (qopt->limit > 0) { 342 - child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit); 343 - if (IS_ERR(child)) { 344 - err = PTR_ERR(child); 345 - goto done; 346 - } 347 - } 348 - 349 337 buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U); 350 338 mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U); 351 339 ··· 376 388 if (!max_size) { 377 389 err = -EINVAL; 378 390 goto done; 391 + } 392 + 393 + if (q->qdisc != &noop_qdisc) { 394 + err = fifo_set_limit(q->qdisc, qopt->limit); 395 + if (err) 396 + goto done; 397 + } else if (qopt->limit > 0) { 398 + child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit); 399 + if (IS_ERR(child)) { 400 + err = PTR_ERR(child); 401 + goto done; 402 + } 379 403 } 380 404 381 405 sch_tree_lock(sch);
+106 -125
net/sctp/associola.c
··· 1239 1239 } 1240 1240 1241 1241 /* Update the retran path for sending a retransmitted packet. 1242 - * Round-robin through the active transports, else round-robin 1243 - * through the inactive transports as this is the next best thing 1244 - * we can try. 1242 + * See also RFC4960, 6.4. Multi-Homed SCTP Endpoints: 1243 + * 1244 + * When there is outbound data to send and the primary path 1245 + * becomes inactive (e.g., due to failures), or where the 1246 + * SCTP user explicitly requests to send data to an 1247 + * inactive destination transport address, before reporting 1248 + * an error to its ULP, the SCTP endpoint should try to send 1249 + * the data to an alternate active destination transport 1250 + * address if one exists. 1251 + * 1252 + * When retransmitting data that timed out, if the endpoint 1253 + * is multihomed, it should consider each source-destination 1254 + * address pair in its retransmission selection policy. 1255 + * When retransmitting timed-out data, the endpoint should 1256 + * attempt to pick the most divergent source-destination 1257 + * pair from the original source-destination pair to which 1258 + * the packet was transmitted. 1259 + * 1260 + * Note: Rules for picking the most divergent source-destination 1261 + * pair are an implementation decision and are not specified 1262 + * within this document. 1263 + * 1264 + * Our basic strategy is to round-robin transports in priorities 1265 + * according to sctp_state_prio_map[] e.g., if no such 1266 + * transport with state SCTP_ACTIVE exists, round-robin through 1267 + * SCTP_UNKNOWN, etc. You get the picture. 1245 1268 */ 1246 - void sctp_assoc_update_retran_path(struct sctp_association *asoc) 1269 + static const u8 sctp_trans_state_to_prio_map[] = { 1270 + [SCTP_ACTIVE] = 3, /* best case */ 1271 + [SCTP_UNKNOWN] = 2, 1272 + [SCTP_PF] = 1, 1273 + [SCTP_INACTIVE] = 0, /* worst case */ 1274 + }; 1275 + 1276 + static u8 sctp_trans_score(const struct sctp_transport *trans) 1247 1277 { 1248 - struct sctp_transport *t, *next; 1249 - struct list_head *head = &asoc->peer.transport_addr_list; 1250 - struct list_head *pos; 1251 - 1252 - if (asoc->peer.transport_count == 1) 1253 - return; 1254 - 1255 - /* Find the next transport in a round-robin fashion. */ 1256 - t = asoc->peer.retran_path; 1257 - pos = &t->transports; 1258 - next = NULL; 1259 - 1260 - while (1) { 1261 - /* Skip the head. */ 1262 - if (pos->next == head) 1263 - pos = head->next; 1264 - else 1265 - pos = pos->next; 1266 - 1267 - t = list_entry(pos, struct sctp_transport, transports); 1268 - 1269 - /* We have exhausted the list, but didn't find any 1270 - * other active transports. If so, use the next 1271 - * transport. 1272 - */ 1273 - if (t == asoc->peer.retran_path) { 1274 - t = next; 1275 - break; 1276 - } 1277 - 1278 - /* Try to find an active transport. */ 1279 - 1280 - if ((t->state == SCTP_ACTIVE) || 1281 - (t->state == SCTP_UNKNOWN)) { 1282 - break; 1283 - } else { 1284 - /* Keep track of the next transport in case 1285 - * we don't find any active transport. 1286 - */ 1287 - if (t->state != SCTP_UNCONFIRMED && !next) 1288 - next = t; 1289 - } 1290 - } 1291 - 1292 - if (t) 1293 - asoc->peer.retran_path = t; 1294 - else 1295 - t = asoc->peer.retran_path; 1296 - 1297 - pr_debug("%s: association:%p addr:%pISpc\n", __func__, asoc, 1298 - &t->ipaddr.sa); 1278 + return sctp_trans_state_to_prio_map[trans->state]; 1299 1279 } 1300 1280 1301 - /* Choose the transport for sending retransmit packet. */ 1302 - struct sctp_transport *sctp_assoc_choose_alter_transport( 1303 - struct sctp_association *asoc, struct sctp_transport *last_sent_to) 1281 + static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr, 1282 + struct sctp_transport *best) 1283 + { 1284 + if (best == NULL) 1285 + return curr; 1286 + 1287 + return sctp_trans_score(curr) > sctp_trans_score(best) ? curr : best; 1288 + } 1289 + 1290 + void sctp_assoc_update_retran_path(struct sctp_association *asoc) 1291 + { 1292 + struct sctp_transport *trans = asoc->peer.retran_path; 1293 + struct sctp_transport *trans_next = NULL; 1294 + 1295 + /* We're done as we only have the one and only path. */ 1296 + if (asoc->peer.transport_count == 1) 1297 + return; 1298 + /* If active_path and retran_path are the same and active, 1299 + * then this is the only active path. Use it. 1300 + */ 1301 + if (asoc->peer.active_path == asoc->peer.retran_path && 1302 + asoc->peer.active_path->state == SCTP_ACTIVE) 1303 + return; 1304 + 1305 + /* Iterate from retran_path's successor back to retran_path. */ 1306 + for (trans = list_next_entry(trans, transports); 1; 1307 + trans = list_next_entry(trans, transports)) { 1308 + /* Manually skip the head element. */ 1309 + if (&trans->transports == &asoc->peer.transport_addr_list) 1310 + continue; 1311 + if (trans->state == SCTP_UNCONFIRMED) 1312 + continue; 1313 + trans_next = sctp_trans_elect_best(trans, trans_next); 1314 + /* Active is good enough for immediate return. */ 1315 + if (trans_next->state == SCTP_ACTIVE) 1316 + break; 1317 + /* We've reached the end, time to update path. */ 1318 + if (trans == asoc->peer.retran_path) 1319 + break; 1320 + } 1321 + 1322 + if (trans_next != NULL) 1323 + asoc->peer.retran_path = trans_next; 1324 + 1325 + pr_debug("%s: association:%p updated new path to addr:%pISpc\n", 1326 + __func__, asoc, &asoc->peer.retran_path->ipaddr.sa); 1327 + } 1328 + 1329 + struct sctp_transport * 1330 + sctp_assoc_choose_alter_transport(struct sctp_association *asoc, 1331 + struct sctp_transport *last_sent_to) 1304 1332 { 1305 1333 /* If this is the first time packet is sent, use the active path, 1306 1334 * else use the retran path. If the last packet was sent over the 1307 1335 * retran path, update the retran path and use it. 1308 1336 */ 1309 - if (!last_sent_to) 1337 + if (last_sent_to == NULL) { 1310 1338 return asoc->peer.active_path; 1311 - else { 1339 + } else { 1312 1340 if (last_sent_to == asoc->peer.retran_path) 1313 1341 sctp_assoc_update_retran_path(asoc); 1342 + 1314 1343 return asoc->peer.retran_path; 1315 1344 } 1316 1345 } ··· 1396 1367 return false; 1397 1368 } 1398 1369 1399 - /* Increase asoc's rwnd by len and send any window update SACK if needed. */ 1400 - void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len) 1370 + /* Update asoc's rwnd for the approximated state in the buffer, 1371 + * and check whether SACK needs to be sent. 1372 + */ 1373 + void sctp_assoc_rwnd_update(struct sctp_association *asoc, bool update_peer) 1401 1374 { 1375 + int rx_count; 1402 1376 struct sctp_chunk *sack; 1403 1377 struct timer_list *timer; 1404 1378 1405 - if (asoc->rwnd_over) { 1406 - if (asoc->rwnd_over >= len) { 1407 - asoc->rwnd_over -= len; 1408 - } else { 1409 - asoc->rwnd += (len - asoc->rwnd_over); 1410 - asoc->rwnd_over = 0; 1411 - } 1412 - } else { 1413 - asoc->rwnd += len; 1414 - } 1379 + if (asoc->ep->rcvbuf_policy) 1380 + rx_count = atomic_read(&asoc->rmem_alloc); 1381 + else 1382 + rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); 1415 1383 1416 - /* If we had window pressure, start recovering it 1417 - * once our rwnd had reached the accumulated pressure 1418 - * threshold. The idea is to recover slowly, but up 1419 - * to the initial advertised window. 1420 - */ 1421 - if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) { 1422 - int change = min(asoc->pathmtu, asoc->rwnd_press); 1423 - asoc->rwnd += change; 1424 - asoc->rwnd_press -= change; 1425 - } 1384 + if ((asoc->base.sk->sk_rcvbuf - rx_count) > 0) 1385 + asoc->rwnd = (asoc->base.sk->sk_rcvbuf - rx_count) >> 1; 1386 + else 1387 + asoc->rwnd = 0; 1426 1388 1427 - pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n", 1428 - __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, 1429 - asoc->a_rwnd); 1389 + pr_debug("%s: asoc:%p rwnd=%u, rx_count=%d, sk_rcvbuf=%d\n", 1390 + __func__, asoc, asoc->rwnd, rx_count, 1391 + asoc->base.sk->sk_rcvbuf); 1430 1392 1431 1393 /* Send a window update SACK if the rwnd has increased by at least the 1432 1394 * minimum of the association's PMTU and half of the receive buffer. 1433 1395 * The algorithm used is similar to the one described in 1434 1396 * Section 4.2.3.3 of RFC 1122. 1435 1397 */ 1436 - if (sctp_peer_needs_update(asoc)) { 1398 + if (update_peer && sctp_peer_needs_update(asoc)) { 1437 1399 asoc->a_rwnd = asoc->rwnd; 1438 1400 1439 1401 pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u " ··· 1446 1426 } 1447 1427 } 1448 1428 1449 - /* Decrease asoc's rwnd by len. */ 1450 - void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len) 1451 - { 1452 - int rx_count; 1453 - int over = 0; 1454 - 1455 - if (unlikely(!asoc->rwnd || asoc->rwnd_over)) 1456 - pr_debug("%s: association:%p has asoc->rwnd:%u, " 1457 - "asoc->rwnd_over:%u!\n", __func__, asoc, 1458 - asoc->rwnd, asoc->rwnd_over); 1459 - 1460 - if (asoc->ep->rcvbuf_policy) 1461 - rx_count = atomic_read(&asoc->rmem_alloc); 1462 - else 1463 - rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); 1464 - 1465 - /* If we've reached or overflowed our receive buffer, announce 1466 - * a 0 rwnd if rwnd would still be positive. Store the 1467 - * the potential pressure overflow so that the window can be restored 1468 - * back to original value. 1469 - */ 1470 - if (rx_count >= asoc->base.sk->sk_rcvbuf) 1471 - over = 1; 1472 - 1473 - if (asoc->rwnd >= len) { 1474 - asoc->rwnd -= len; 1475 - if (over) { 1476 - asoc->rwnd_press += asoc->rwnd; 1477 - asoc->rwnd = 0; 1478 - } 1479 - } else { 1480 - asoc->rwnd_over = len - asoc->rwnd; 1481 - asoc->rwnd = 0; 1482 - } 1483 - 1484 - pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n", 1485 - __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, 1486 - asoc->rwnd_press); 1487 - } 1488 1429 1489 1430 /* Build the bind address list for the association based on info from the 1490 1431 * local endpoint and the remote peer.
+4 -3
net/sctp/sm_sideeffect.c
··· 495 495 } 496 496 497 497 /* If the transport error count is greater than the pf_retrans 498 - * threshold, and less than pathmaxrtx, then mark this transport 499 - * as Partially Failed, ee SCTP Quick Failover Draft, secon 5.1, 500 - * point 1 498 + * threshold, and less than pathmaxrtx, and if the current state 499 + * is not SCTP_UNCONFIRMED, then mark this transport as Partially 500 + * Failed, see SCTP Quick Failover Draft, section 5.1 501 501 */ 502 502 if ((transport->state != SCTP_PF) && 503 + (transport->state != SCTP_UNCONFIRMED) && 503 504 (asoc->pf_retrans < transport->pathmaxrxt) && 504 505 (transport->error_count > asoc->pf_retrans)) { 505 506
+8 -1
net/sctp/sm_statefuns.c
··· 758 758 struct sctp_chunk auth; 759 759 sctp_ierror_t ret; 760 760 761 + /* Make sure that we and the peer are AUTH capable */ 762 + if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) { 763 + kfree_skb(chunk->auth_chunk); 764 + sctp_association_free(new_asoc); 765 + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); 766 + } 767 + 761 768 /* set-up our fake chunk so that we can process it */ 762 769 auth.skb = chunk->auth_chunk; 763 770 auth.asoc = chunk->asoc; ··· 6183 6176 * PMTU. In cases, such as loopback, this might be a rather 6184 6177 * large spill over. 6185 6178 */ 6186 - if ((!chunk->data_accepted) && (!asoc->rwnd || asoc->rwnd_over || 6179 + if ((!chunk->data_accepted) && (!asoc->rwnd || 6187 6180 (datalen > asoc->rwnd + asoc->frag_point))) { 6188 6181 6189 6182 /* If this is the next TSN, consider reneging to make
+32 -15
net/sctp/socket.c
··· 64 64 #include <linux/crypto.h> 65 65 #include <linux/slab.h> 66 66 #include <linux/file.h> 67 + #include <linux/compat.h> 67 68 68 69 #include <net/ip.h> 69 70 #include <net/icmp.h> ··· 1369 1368 /* 1370 1369 * New (hopefully final) interface for the API. 1371 1370 * We use the sctp_getaddrs_old structure so that use-space library 1372 - * can avoid any unnecessary allocations. The only defferent part 1371 + * can avoid any unnecessary allocations. The only different part 1373 1372 * is that we store the actual length of the address buffer into the 1374 - * addrs_num structure member. That way we can re-use the existing 1373 + * addrs_num structure member. That way we can re-use the existing 1375 1374 * code. 1376 1375 */ 1376 + #ifdef CONFIG_COMPAT 1377 + struct compat_sctp_getaddrs_old { 1378 + sctp_assoc_t assoc_id; 1379 + s32 addr_num; 1380 + compat_uptr_t addrs; /* struct sockaddr * */ 1381 + }; 1382 + #endif 1383 + 1377 1384 static int sctp_getsockopt_connectx3(struct sock *sk, int len, 1378 1385 char __user *optval, 1379 1386 int __user *optlen) ··· 1390 1381 sctp_assoc_t assoc_id = 0; 1391 1382 int err = 0; 1392 1383 1393 - if (len < sizeof(param)) 1394 - return -EINVAL; 1384 + #ifdef CONFIG_COMPAT 1385 + if (is_compat_task()) { 1386 + struct compat_sctp_getaddrs_old param32; 1395 1387 1396 - if (copy_from_user(&param, optval, sizeof(param))) 1397 - return -EFAULT; 1388 + if (len < sizeof(param32)) 1389 + return -EINVAL; 1390 + if (copy_from_user(&param32, optval, sizeof(param32))) 1391 + return -EFAULT; 1398 1392 1399 - err = __sctp_setsockopt_connectx(sk, 1400 - (struct sockaddr __user *)param.addrs, 1401 - param.addr_num, &assoc_id); 1393 + param.assoc_id = param32.assoc_id; 1394 + param.addr_num = param32.addr_num; 1395 + param.addrs = compat_ptr(param32.addrs); 1396 + } else 1397 + #endif 1398 + { 1399 + if (len < sizeof(param)) 1400 + return -EINVAL; 1401 + if (copy_from_user(&param, optval, sizeof(param))) 1402 + return -EFAULT; 1403 + } 1402 1404 1405 + err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *) 1406 + param.addrs, param.addr_num, 1407 + &assoc_id); 1403 1408 if (err == 0 || err == -EINPROGRESS) { 1404 1409 if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) 1405 1410 return -EFAULT; ··· 2115 2092 sctp_skb_pull(skb, copied); 2116 2093 skb_queue_head(&sk->sk_receive_queue, skb); 2117 2094 2118 - /* When only partial message is copied to the user, increase 2119 - * rwnd by that amount. If all the data in the skb is read, 2120 - * rwnd is updated when the event is freed. 2121 - */ 2122 - if (!sctp_ulpevent_is_notification(event)) 2123 - sctp_assoc_rwnd_increase(event->asoc, copied); 2124 2095 goto out; 2125 2096 } else if ((event->msg_flags & MSG_NOTIFICATION) || 2126 2097 (event->msg_flags & MSG_EOR))
+11 -7
net/sctp/sysctl.c
··· 151 151 }, 152 152 { 153 153 .procname = "cookie_hmac_alg", 154 + .data = &init_net.sctp.sctp_hmac_alg, 154 155 .maxlen = 8, 155 156 .mode = 0644, 156 157 .proc_handler = proc_sctp_do_hmac_alg, ··· 402 401 403 402 int sctp_sysctl_net_register(struct net *net) 404 403 { 405 - struct ctl_table *table; 406 - int i; 404 + struct ctl_table *table = sctp_net_table; 407 405 408 - table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL); 409 - if (!table) 410 - return -ENOMEM; 406 + if (!net_eq(net, &init_net)) { 407 + int i; 411 408 412 - for (i = 0; table[i].data; i++) 413 - table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp; 409 + table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL); 410 + if (!table) 411 + return -ENOMEM; 412 + 413 + for (i = 0; table[i].data; i++) 414 + table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp; 415 + } 414 416 415 417 net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table); 416 418 return 0;
+6 -2
net/sctp/ulpevent.c
··· 989 989 skb = sctp_event2skb(event); 990 990 /* Set the owner and charge rwnd for bytes received. */ 991 991 sctp_ulpevent_set_owner(event, asoc); 992 - sctp_assoc_rwnd_decrease(asoc, skb_headlen(skb)); 992 + sctp_assoc_rwnd_update(asoc, false); 993 993 994 994 if (!skb->data_len) 995 995 return; ··· 1011 1011 { 1012 1012 struct sk_buff *skb, *frag; 1013 1013 unsigned int len; 1014 + struct sctp_association *asoc; 1014 1015 1015 1016 /* Current stack structures assume that the rcv buffer is 1016 1017 * per socket. For UDP style sockets this is not true as ··· 1036 1035 } 1037 1036 1038 1037 done: 1039 - sctp_assoc_rwnd_increase(event->asoc, len); 1038 + asoc = event->asoc; 1039 + sctp_association_hold(asoc); 1040 1040 sctp_ulpevent_release_owner(event); 1041 + sctp_assoc_rwnd_update(asoc, true); 1042 + sctp_association_put(asoc); 1041 1043 } 1042 1044 1043 1045 static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event)
+16 -3
net/sunrpc/auth_gss/auth_gss.c
··· 108 108 static DEFINE_SPINLOCK(pipe_version_lock); 109 109 static struct rpc_wait_queue pipe_version_rpc_waitqueue; 110 110 static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue); 111 + static void gss_put_auth(struct gss_auth *gss_auth); 111 112 112 113 static void gss_free_ctx(struct gss_cl_ctx *); 113 114 static const struct rpc_pipe_ops gss_upcall_ops_v0; ··· 321 320 if (gss_msg->ctx != NULL) 322 321 gss_put_ctx(gss_msg->ctx); 323 322 rpc_destroy_wait_queue(&gss_msg->rpc_waitqueue); 323 + gss_put_auth(gss_msg->auth); 324 324 kfree(gss_msg); 325 325 } 326 326 ··· 500 498 default: 501 499 err = gss_encode_v1_msg(gss_msg, service_name, gss_auth->target_name); 502 500 if (err) 503 - goto err_free_msg; 501 + goto err_put_pipe_version; 504 502 }; 503 + kref_get(&gss_auth->kref); 505 504 return gss_msg; 505 + err_put_pipe_version: 506 + put_pipe_version(gss_auth->net); 506 507 err_free_msg: 507 508 kfree(gss_msg); 508 509 err: ··· 996 991 gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor); 997 992 if (gss_auth->service == 0) 998 993 goto err_put_mech; 994 + if (!gssd_running(gss_auth->net)) 995 + goto err_put_mech; 999 996 auth = &gss_auth->rpc_auth; 1000 997 auth->au_cslack = GSS_CRED_SLACK >> 2; 1001 998 auth->au_rslack = GSS_VERF_SLACK >> 2; ··· 1069 1062 } 1070 1063 1071 1064 static void 1065 + gss_put_auth(struct gss_auth *gss_auth) 1066 + { 1067 + kref_put(&gss_auth->kref, gss_free_callback); 1068 + } 1069 + 1070 + static void 1072 1071 gss_destroy(struct rpc_auth *auth) 1073 1072 { 1074 1073 struct gss_auth *gss_auth = container_of(auth, ··· 1095 1082 gss_auth->gss_pipe[1] = NULL; 1096 1083 rpcauth_destroy_credcache(auth); 1097 1084 1098 - kref_put(&gss_auth->kref, gss_free_callback); 1085 + gss_put_auth(gss_auth); 1099 1086 } 1100 1087 1101 1088 /* ··· 1266 1253 call_rcu(&cred->cr_rcu, gss_free_cred_callback); 1267 1254 if (ctx) 1268 1255 gss_put_ctx(ctx); 1269 - kref_put(&gss_auth->kref, gss_free_callback); 1256 + gss_put_auth(gss_auth); 1270 1257 } 1271 1258 1272 1259 static void
+4 -2
net/sunrpc/backchannel_rqst.c
··· 64 64 free_page((unsigned long)xbufp->head[0].iov_base); 65 65 xbufp = &req->rq_snd_buf; 66 66 free_page((unsigned long)xbufp->head[0].iov_base); 67 - list_del(&req->rq_bc_pa_list); 68 67 kfree(req); 69 68 } 70 69 ··· 167 168 /* 168 169 * Memory allocation failed, free the temporary list 169 170 */ 170 - list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list) 171 + list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list) { 172 + list_del(&req->rq_bc_pa_list); 171 173 xprt_free_allocation(req); 174 + } 172 175 173 176 dprintk("RPC: setup backchannel transport failed\n"); 174 177 return -ENOMEM; ··· 199 198 xprt_dec_alloc_count(xprt, max_reqs); 200 199 list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) { 201 200 dprintk("RPC: req=%p\n", req); 201 + list_del(&req->rq_bc_pa_list); 202 202 xprt_free_allocation(req); 203 203 if (--max_reqs == 0) 204 204 break;
+5 -1
net/sunrpc/xprtsock.c
··· 510 510 struct rpc_rqst *req = task->tk_rqstp; 511 511 struct rpc_xprt *xprt = req->rq_xprt; 512 512 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 513 + struct sock *sk = transport->inet; 513 514 int ret = -EAGAIN; 514 515 515 516 dprintk("RPC: %5u xmit incomplete (%u left of %u)\n", ··· 528 527 * window size 529 528 */ 530 529 set_bit(SOCK_NOSPACE, &transport->sock->flags); 531 - transport->inet->sk_write_pending++; 530 + sk->sk_write_pending++; 532 531 /* ...and wait for more buffer space */ 533 532 xprt_wait_for_buffer_space(task, xs_nospace_callback); 534 533 } ··· 538 537 } 539 538 540 539 spin_unlock_bh(&xprt->transport_lock); 540 + 541 + /* Race breaker in case memory is freed before above code is called */ 542 + sk->sk_write_space(sk); 541 543 return ret; 542 544 } 543 545
+6 -1
net/tipc/bearer.c
··· 610 610 611 611 int tipc_bearer_setup(void) 612 612 { 613 + int err; 614 + 615 + err = register_netdevice_notifier(&notifier); 616 + if (err) 617 + return err; 613 618 dev_add_pack(&tipc_packet_type); 614 - return register_netdevice_notifier(&notifier); 619 + return 0; 615 620 } 616 621 617 622 void tipc_bearer_cleanup(void)
+1 -1
net/tipc/config.c
··· 181 181 if (tipc_own_addr) 182 182 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 183 183 " (cannot change node address once assigned)"); 184 - tipc_core_start_net(addr); 184 + tipc_net_start(addr); 185 185 return tipc_cfg_reply_none(); 186 186 } 187 187
+59 -50
net/tipc/core.c
··· 77 77 } 78 78 79 79 /** 80 - * tipc_core_stop_net - shut down TIPC networking sub-systems 81 - */ 82 - static void tipc_core_stop_net(void) 83 - { 84 - tipc_net_stop(); 85 - tipc_bearer_cleanup(); 86 - } 87 - 88 - /** 89 - * start_net - start TIPC networking sub-systems 90 - */ 91 - int tipc_core_start_net(unsigned long addr) 92 - { 93 - int res; 94 - 95 - tipc_net_start(addr); 96 - res = tipc_bearer_setup(); 97 - if (res < 0) 98 - goto err; 99 - return res; 100 - 101 - err: 102 - tipc_core_stop_net(); 103 - return res; 104 - } 105 - 106 - /** 107 80 * tipc_core_stop - switch TIPC from SINGLE NODE to NOT RUNNING mode 108 81 */ 109 82 static void tipc_core_stop(void) 110 83 { 84 + tipc_handler_stop(); 85 + tipc_net_stop(); 86 + tipc_bearer_cleanup(); 111 87 tipc_netlink_stop(); 112 88 tipc_cfg_stop(); 113 89 tipc_subscr_stop(); ··· 98 122 */ 99 123 static int tipc_core_start(void) 100 124 { 101 - int res; 125 + int err; 102 126 103 127 get_random_bytes(&tipc_random, sizeof(tipc_random)); 104 128 105 - res = tipc_handler_start(); 106 - if (!res) 107 - res = tipc_ref_table_init(tipc_max_ports, tipc_random); 108 - if (!res) 109 - res = tipc_nametbl_init(); 110 - if (!res) 111 - res = tipc_netlink_start(); 112 - if (!res) 113 - res = tipc_socket_init(); 114 - if (!res) 115 - res = tipc_register_sysctl(); 116 - if (!res) 117 - res = tipc_subscr_start(); 118 - if (!res) 119 - res = tipc_cfg_init(); 120 - if (res) { 121 - tipc_handler_stop(); 122 - tipc_core_stop(); 123 - } 124 - return res; 129 + err = tipc_handler_start(); 130 + if (err) 131 + goto out_handler; 132 + 133 + err = tipc_ref_table_init(tipc_max_ports, tipc_random); 134 + if (err) 135 + goto out_reftbl; 136 + 137 + err = tipc_nametbl_init(); 138 + if (err) 139 + goto out_nametbl; 140 + 141 + err = tipc_netlink_start(); 142 + if (err) 143 + goto out_netlink; 144 + 145 + err = tipc_socket_init(); 146 + if (err) 147 + goto out_socket; 148 + 149 + err = tipc_register_sysctl(); 150 + if (err) 151 + goto out_sysctl; 152 + 153 + err = tipc_subscr_start(); 154 + if (err) 155 + goto out_subscr; 156 + 157 + err = tipc_cfg_init(); 158 + if (err) 159 + goto out_cfg; 160 + 161 + err = tipc_bearer_setup(); 162 + if (err) 163 + goto out_bearer; 164 + 165 + return 0; 166 + out_bearer: 167 + tipc_cfg_stop(); 168 + out_cfg: 169 + tipc_subscr_stop(); 170 + out_subscr: 171 + tipc_unregister_sysctl(); 172 + out_sysctl: 173 + tipc_socket_stop(); 174 + out_socket: 175 + tipc_netlink_stop(); 176 + out_netlink: 177 + tipc_nametbl_stop(); 178 + out_nametbl: 179 + tipc_ref_table_stop(); 180 + out_reftbl: 181 + tipc_handler_stop(); 182 + out_handler: 183 + return err; 125 184 } 126 185 127 186 static int __init tipc_init(void) ··· 185 174 186 175 static void __exit tipc_exit(void) 187 176 { 188 - tipc_handler_stop(); 189 - tipc_core_stop_net(); 190 177 tipc_core_stop(); 191 178 pr_info("Deactivated\n"); 192 179 }
+1 -1
net/tipc/core.h
··· 90 90 /* 91 91 * Routines available to privileged subsystems 92 92 */ 93 - int tipc_core_start_net(unsigned long); 94 93 int tipc_handler_start(void); 95 94 void tipc_handler_stop(void); 96 95 int tipc_netlink_start(void); ··· 191 192 192 193 struct tipc_skb_cb { 193 194 void *handle; 195 + bool deferred; 194 196 }; 195 197 196 198 #define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
+7
net/tipc/link.c
··· 1391 1391 u32 hdr_size; 1392 1392 u32 min_hdr_size; 1393 1393 1394 + /* If this packet comes from the defer queue, the skb has already 1395 + * been validated 1396 + */ 1397 + if (unlikely(TIPC_SKB_CB(buf)->deferred)) 1398 + return 1; 1399 + 1394 1400 if (unlikely(buf->len < MIN_H_SIZE)) 1395 1401 return 0; 1396 1402 ··· 1709 1703 &l_ptr->newest_deferred_in, buf)) { 1710 1704 l_ptr->deferred_inqueue_sz++; 1711 1705 l_ptr->stats.deferred_recv++; 1706 + TIPC_SKB_CB(buf)->deferred = true; 1712 1707 if ((l_ptr->deferred_inqueue_sz % 16) == 1) 1713 1708 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 1714 1709 } else
-3
net/tipc/name_table.c
··· 945 945 { 946 946 u32 i; 947 947 948 - if (!table.types) 949 - return; 950 - 951 948 /* Verify name table is empty, then release it */ 952 949 write_lock_bh(&tipc_nametbl_lock); 953 950 for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
-8
net/tipc/netlink.c
··· 83 83 }, 84 84 }; 85 85 86 - static int tipc_genl_family_registered; 87 - 88 86 int tipc_netlink_start(void) 89 87 { 90 88 int res; ··· 92 94 pr_err("Failed to register netlink interface\n"); 93 95 return res; 94 96 } 95 - 96 - tipc_genl_family_registered = 1; 97 97 return 0; 98 98 } 99 99 100 100 void tipc_netlink_stop(void) 101 101 { 102 - if (!tipc_genl_family_registered) 103 - return; 104 - 105 102 genl_unregister_family(&tipc_genl_family); 106 - tipc_genl_family_registered = 0; 107 103 }
-3
net/tipc/ref.c
··· 126 126 */ 127 127 void tipc_ref_table_stop(void) 128 128 { 129 - if (!tipc_ref_table.entries) 130 - return; 131 - 132 129 vfree(tipc_ref_table.entries); 133 130 tipc_ref_table.entries = NULL; 134 131 }
-5
net/tipc/server.c
··· 573 573 kmem_cache_destroy(s->rcvbuf_cache); 574 574 return ret; 575 575 } 576 - s->enabled = 1; 577 576 return ret; 578 577 } 579 578 ··· 582 583 int total = 0; 583 584 int id; 584 585 585 - if (!s->enabled) 586 - return; 587 - 588 - s->enabled = 0; 589 586 spin_lock_bh(&s->idr_lock); 590 587 for (id = 0; total < s->idr_in_use; id++) { 591 588 con = idr_find(&s->conn_idr, id);
-2
net/tipc/server.h
··· 56 56 * @name: server name 57 57 * @imp: message importance 58 58 * @type: socket type 59 - * @enabled: identify whether server is launched or not 60 59 */ 61 60 struct tipc_server { 62 61 struct idr conn_idr; ··· 73 74 const char name[TIPC_SERVER_NAME_LEN]; 74 75 int imp; 75 76 int type; 76 - int enabled; 77 77 }; 78 78 79 79 int tipc_conn_sendmsg(struct tipc_server *s, int conid,
-8
net/tipc/socket.c
··· 70 70 static struct proto tipc_proto; 71 71 static struct proto tipc_proto_kern; 72 72 73 - static int sockets_enabled; 74 - 75 73 /* 76 74 * Revised TIPC socket locking policy: 77 75 * ··· 2025 2027 proto_unregister(&tipc_proto); 2026 2028 goto out; 2027 2029 } 2028 - 2029 - sockets_enabled = 1; 2030 2030 out: 2031 2031 return res; 2032 2032 } ··· 2034 2038 */ 2035 2039 void tipc_socket_stop(void) 2036 2040 { 2037 - if (!sockets_enabled) 2038 - return; 2039 - 2040 - sockets_enabled = 0; 2041 2041 sock_unregister(tipc_family_ops.family); 2042 2042 proto_unregister(&tipc_proto); 2043 2043 }
+10 -2
net/wireless/reg.c
··· 1700 1700 return; 1701 1701 case NL80211_REGDOM_SET_BY_USER: 1702 1702 treatment = reg_process_hint_user(reg_request); 1703 - if (treatment == REG_REQ_OK || 1703 + if (treatment == REG_REQ_IGNORE || 1704 1704 treatment == REG_REQ_ALREADY_SET) 1705 1705 return; 1706 1706 schedule_delayed_work(&reg_timeout, msecs_to_jiffies(3142)); ··· 2373 2373 int set_regdom(const struct ieee80211_regdomain *rd) 2374 2374 { 2375 2375 struct regulatory_request *lr; 2376 + bool user_reset = false; 2376 2377 int r; 2377 2378 2378 2379 if (!reg_is_valid_request(rd->alpha2)) { ··· 2390 2389 break; 2391 2390 case NL80211_REGDOM_SET_BY_USER: 2392 2391 r = reg_set_rd_user(rd, lr); 2392 + user_reset = true; 2393 2393 break; 2394 2394 case NL80211_REGDOM_SET_BY_DRIVER: 2395 2395 r = reg_set_rd_driver(rd, lr); ··· 2404 2402 } 2405 2403 2406 2404 if (r) { 2407 - if (r == -EALREADY) 2405 + switch (r) { 2406 + case -EALREADY: 2408 2407 reg_set_request_processed(); 2408 + break; 2409 + default: 2410 + /* Back to world regulatory in case of errors */ 2411 + restore_regulatory_settings(user_reset); 2412 + } 2409 2413 2410 2414 kfree(rd); 2411 2415 return r;
+1 -1
net/xfrm/xfrm_policy.c
··· 1158 1158 if (hlist_unhashed(&pol->bydst)) 1159 1159 return NULL; 1160 1160 1161 - hlist_del(&pol->bydst); 1161 + hlist_del_init(&pol->bydst); 1162 1162 hlist_del(&pol->byidx); 1163 1163 list_del(&pol->walk.all); 1164 1164 net->xfrm.policy_count[dir]--;
+17 -6
net/xfrm/xfrm_state.c
··· 1159 1159 } 1160 1160 x->props.aalgo = orig->props.aalgo; 1161 1161 1162 + if (orig->aead) { 1163 + x->aead = xfrm_algo_aead_clone(orig->aead); 1164 + if (!x->aead) 1165 + goto error; 1166 + } 1162 1167 if (orig->ealg) { 1163 1168 x->ealg = xfrm_algo_clone(orig->ealg); 1164 1169 if (!x->ealg) ··· 1206 1201 x->props.flags = orig->props.flags; 1207 1202 x->props.extra_flags = orig->props.extra_flags; 1208 1203 1204 + x->tfcpad = orig->tfcpad; 1205 + x->replay_maxdiff = orig->replay_maxdiff; 1206 + x->replay_maxage = orig->replay_maxage; 1209 1207 x->curlft.add_time = orig->curlft.add_time; 1210 1208 x->km.state = orig->km.state; 1211 1209 x->km.seq = orig->km.seq; ··· 1223 1215 return NULL; 1224 1216 } 1225 1217 1226 - /* net->xfrm.xfrm_state_lock is held */ 1227 1218 struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net) 1228 1219 { 1229 1220 unsigned int h; 1230 - struct xfrm_state *x; 1221 + struct xfrm_state *x = NULL; 1222 + 1223 + spin_lock_bh(&net->xfrm.xfrm_state_lock); 1231 1224 1232 1225 if (m->reqid) { 1233 1226 h = xfrm_dst_hash(net, &m->old_daddr, &m->old_saddr, ··· 1245 1236 m->old_family)) 1246 1237 continue; 1247 1238 xfrm_state_hold(x); 1248 - return x; 1239 + break; 1249 1240 } 1250 1241 } else { 1251 1242 h = xfrm_src_hash(net, &m->old_daddr, &m->old_saddr, ··· 1260 1251 m->old_family)) 1261 1252 continue; 1262 1253 xfrm_state_hold(x); 1263 - return x; 1254 + break; 1264 1255 } 1265 1256 } 1266 1257 1267 - return NULL; 1258 + spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1259 + 1260 + return x; 1268 1261 } 1269 1262 EXPORT_SYMBOL(xfrm_migrate_state_find); 1270 1263 ··· 1462 1451 { 1463 1452 int err = 0; 1464 1453 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); 1465 - struct net *net = xs_net(*dst); 1454 + struct net *net = xs_net(*src); 1466 1455 1467 1456 if (!afinfo) 1468 1457 return -EAFNOSUPPORT;
-5
net/xfrm/xfrm_user.c
··· 32 32 #include <linux/in6.h> 33 33 #endif 34 34 35 - static inline int aead_len(struct xfrm_algo_aead *alg) 36 - { 37 - return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); 38 - } 39 - 40 35 static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type) 41 36 { 42 37 struct nlattr *rt = attrs[type];
+1
scripts/Makefile.lib
··· 152 152 dtc_cpp_flags = -Wp,-MD,$(depfile).pre.tmp -nostdinc \ 153 153 -I$(srctree)/arch/$(SRCARCH)/boot/dts \ 154 154 -I$(srctree)/arch/$(SRCARCH)/boot/dts/include \ 155 + -I$(srctree)/drivers/of/testcase-data \ 155 156 -undef -D__DTS__ 156 157 157 158 # Finds the multi-part object the current object will be linked into
+1 -1
scripts/gen_initramfs_list.sh
··· 257 257 && compr="lzop -9 -f" 258 258 echo "$output_file" | grep -q "\.lz4$" \ 259 259 && [ -x "`which lz4 2> /dev/null`" ] \ 260 - && compr="lz4 -9 -f" 260 + && compr="lz4 -l -9 -f" 261 261 echo "$output_file" | grep -q "\.cpio$" && compr="cat" 262 262 shift 263 263 ;;
+1 -2
scripts/kallsyms.c
··· 330 330 printf("\tPTR\t_text + %#llx\n", 331 331 table[i].addr - _text); 332 332 else 333 - printf("\tPTR\t_text - %#llx\n", 334 - _text - table[i].addr); 333 + printf("\tPTR\t%#llx\n", table[i].addr); 335 334 } else { 336 335 printf("\tPTR\t%#llx\n", table[i].addr); 337 336 }
+13
scripts/mod/modpost.c
··· 1502 1502 #define R_ARM_JUMP24 29 1503 1503 #endif 1504 1504 1505 + #ifndef R_ARM_THM_CALL 1506 + #define R_ARM_THM_CALL 10 1507 + #endif 1508 + #ifndef R_ARM_THM_JUMP24 1509 + #define R_ARM_THM_JUMP24 30 1510 + #endif 1511 + #ifndef R_ARM_THM_JUMP19 1512 + #define R_ARM_THM_JUMP19 51 1513 + #endif 1514 + 1505 1515 static int addend_arm_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r) 1506 1516 { 1507 1517 unsigned int r_typ = ELF_R_TYPE(r->r_info); ··· 1525 1515 case R_ARM_PC24: 1526 1516 case R_ARM_CALL: 1527 1517 case R_ARM_JUMP24: 1518 + case R_ARM_THM_CALL: 1519 + case R_ARM_THM_JUMP24: 1520 + case R_ARM_THM_JUMP19: 1528 1521 /* From ARM ABI: ((S + A) | T) - P */ 1529 1522 r->r_addend = (int)(long)(elf->hdr + 1530 1523 sechdr->sh_offset +
+5 -1
security/keys/keyring.c
··· 1000 1000 1001 1001 kenter("{%d}", key->serial); 1002 1002 1003 - BUG_ON(key != ctx->match_data); 1003 + /* We might get a keyring with matching index-key that is nonetheless a 1004 + * different keyring. */ 1005 + if (key != ctx->match_data) 1006 + return 0; 1007 + 1004 1008 ctx->result = ERR_PTR(-EDEADLK); 1005 1009 return 1; 1006 1010 }
+4 -4
security/selinux/ss/policydb.c
··· 3338 3338 if (rc) 3339 3339 return rc; 3340 3340 3341 - buf[0] = ft->stype; 3342 - buf[1] = ft->ttype; 3343 - buf[2] = ft->tclass; 3344 - buf[3] = otype->otype; 3341 + buf[0] = cpu_to_le32(ft->stype); 3342 + buf[1] = cpu_to_le32(ft->ttype); 3343 + buf[2] = cpu_to_le32(ft->tclass); 3344 + buf[3] = cpu_to_le32(otype->otype); 3345 3345 3346 3346 rc = put_entry(buf, sizeof(u32), 4, fp); 3347 3347 if (rc)
+4
sound/pci/hda/patch_analog.c
··· 1026 1026 spec->gen.keep_eapd_on = 1; 1027 1027 spec->gen.vmaster_mute.hook = ad_vmaster_eapd_hook; 1028 1028 spec->eapd_nid = 0x12; 1029 + /* Analog PC Beeper - allow firmware/ACPI beeps */ 1030 + spec->beep_amp = HDA_COMPOSE_AMP_VAL(0x20, 3, 3, HDA_INPUT); 1031 + spec->gen.beep_nid = 0; /* no digital beep */ 1029 1032 } 1030 1033 } 1031 1034 ··· 1095 1092 spec = codec->spec; 1096 1093 1097 1094 spec->gen.mixer_nid = 0x20; 1095 + spec->gen.mixer_merge_nid = 0x21; 1098 1096 spec->gen.beep_nid = 0x10; 1099 1097 set_beep_amp(spec, 0x10, 0, HDA_OUTPUT); 1100 1098
+7 -61
sound/pci/hda/patch_ca0132.c
··· 2662 2662 } 2663 2663 2664 2664 /* 2665 - * PCM stuffs 2666 - */ 2667 - static void ca0132_setup_stream(struct hda_codec *codec, hda_nid_t nid, 2668 - u32 stream_tag, 2669 - int channel_id, int format) 2670 - { 2671 - unsigned int oldval, newval; 2672 - 2673 - if (!nid) 2674 - return; 2675 - 2676 - snd_printdd( 2677 - "ca0132_setup_stream: NID=0x%x, stream=0x%x, " 2678 - "channel=%d, format=0x%x\n", 2679 - nid, stream_tag, channel_id, format); 2680 - 2681 - /* update the format-id if changed */ 2682 - oldval = snd_hda_codec_read(codec, nid, 0, 2683 - AC_VERB_GET_STREAM_FORMAT, 2684 - 0); 2685 - if (oldval != format) { 2686 - msleep(20); 2687 - snd_hda_codec_write(codec, nid, 0, 2688 - AC_VERB_SET_STREAM_FORMAT, 2689 - format); 2690 - } 2691 - 2692 - oldval = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0); 2693 - newval = (stream_tag << 4) | channel_id; 2694 - if (oldval != newval) { 2695 - snd_hda_codec_write(codec, nid, 0, 2696 - AC_VERB_SET_CHANNEL_STREAMID, 2697 - newval); 2698 - } 2699 - } 2700 - 2701 - static void ca0132_cleanup_stream(struct hda_codec *codec, hda_nid_t nid) 2702 - { 2703 - unsigned int val; 2704 - 2705 - if (!nid) 2706 - return; 2707 - 2708 - snd_printdd(KERN_INFO "ca0132_cleanup_stream: NID=0x%x\n", nid); 2709 - 2710 - val = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0); 2711 - if (!val) 2712 - return; 2713 - 2714 - snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_STREAM_FORMAT, 0); 2715 - snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CHANNEL_STREAMID, 0); 2716 - } 2717 - 2718 - /* 2719 2665 * PCM callbacks 2720 2666 */ 2721 2667 static int ca0132_playback_pcm_prepare(struct hda_pcm_stream *hinfo, ··· 2672 2726 { 2673 2727 struct ca0132_spec *spec = codec->spec; 2674 2728 2675 - ca0132_setup_stream(codec, spec->dacs[0], stream_tag, 0, format); 2729 + snd_hda_codec_setup_stream(codec, spec->dacs[0], stream_tag, 0, format); 2676 2730 2677 2731 return 0; 2678 2732 } ··· 2691 2745 if (spec->effects_switch[PLAY_ENHANCEMENT - EFFECT_START_NID]) 2692 2746 msleep(50); 2693 2747 2694 - ca0132_cleanup_stream(codec, spec->dacs[0]); 2748 + snd_hda_codec_cleanup_stream(codec, spec->dacs[0]); 2695 2749 2696 2750 return 0; 2697 2751 } ··· 2768 2822 unsigned int format, 2769 2823 struct snd_pcm_substream *substream) 2770 2824 { 2771 - struct ca0132_spec *spec = codec->spec; 2772 - 2773 - ca0132_setup_stream(codec, spec->adcs[substream->number], 2774 - stream_tag, 0, format); 2825 + snd_hda_codec_setup_stream(codec, hinfo->nid, 2826 + stream_tag, 0, format); 2775 2827 2776 2828 return 0; 2777 2829 } ··· 2783 2839 if (spec->dsp_state == DSP_DOWNLOADING) 2784 2840 return 0; 2785 2841 2786 - ca0132_cleanup_stream(codec, hinfo->nid); 2842 + snd_hda_codec_cleanup_stream(codec, hinfo->nid); 2787 2843 return 0; 2788 2844 } 2789 2845 ··· 4686 4742 return err; 4687 4743 4688 4744 codec->patch_ops = ca0132_patch_ops; 4745 + codec->pcm_format_first = 1; 4746 + codec->no_sticky_stream = 1; 4689 4747 4690 4748 return 0; 4691 4749 }
+52 -1
sound/pci/hda/patch_realtek.c
··· 4253 4253 }; 4254 4254 4255 4255 static const struct snd_pci_quirk alc269_fixup_tbl[] = { 4256 + SND_PCI_QUIRK(0x1025, 0x0283, "Acer TravelMate 8371", ALC269_FIXUP_INV_DMIC), 4256 4257 SND_PCI_QUIRK(0x1025, 0x029b, "Acer 1810TZ", ALC269_FIXUP_INV_DMIC), 4257 4258 SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC), 4258 4259 SND_PCI_QUIRK(0x1025, 0x047c, "Acer AC700", ALC269_FIXUP_ACER_AC700), ··· 4309 4308 SND_PCI_QUIRK(0x1028, 0x0651, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4310 4309 SND_PCI_QUIRK(0x1028, 0x0652, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4311 4310 SND_PCI_QUIRK(0x1028, 0x0653, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4311 + SND_PCI_QUIRK(0x1028, 0x0657, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4312 4312 SND_PCI_QUIRK(0x1028, 0x0658, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 4313 + SND_PCI_QUIRK(0x1028, 0x065f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4313 4314 SND_PCI_QUIRK(0x1028, 0x0662, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 4314 4315 SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), 4315 4316 SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), ··· 4320 4317 SND_PCI_QUIRK(0x103c, 0x1973, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4321 4318 SND_PCI_QUIRK(0x103c, 0x1983, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4322 4319 SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED), 4320 + /* ALC282 */ 4321 + SND_PCI_QUIRK(0x103c, 0x220f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4322 + SND_PCI_QUIRK(0x103c, 0x2213, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4323 + SND_PCI_QUIRK(0x103c, 0x2266, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4324 + SND_PCI_QUIRK(0x103c, 0x2267, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4325 + SND_PCI_QUIRK(0x103c, 0x2268, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4326 + SND_PCI_QUIRK(0x103c, 0x2269, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4327 + SND_PCI_QUIRK(0x103c, 0x226a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4328 + SND_PCI_QUIRK(0x103c, 0x226b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4329 + SND_PCI_QUIRK(0x103c, 0x227a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4330 + SND_PCI_QUIRK(0x103c, 0x227b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4331 + SND_PCI_QUIRK(0x103c, 0x229e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4332 + SND_PCI_QUIRK(0x103c, 0x22a0, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4333 + SND_PCI_QUIRK(0x103c, 0x22b2, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4334 + SND_PCI_QUIRK(0x103c, 0x22b7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4335 + SND_PCI_QUIRK(0x103c, 0x22bf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4336 + SND_PCI_QUIRK(0x103c, 0x22c0, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4337 + SND_PCI_QUIRK(0x103c, 0x22c1, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4338 + SND_PCI_QUIRK(0x103c, 0x22c2, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4339 + SND_PCI_QUIRK(0x103c, 0x22cd, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4340 + SND_PCI_QUIRK(0x103c, 0x22ce, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4341 + SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4342 + SND_PCI_QUIRK(0x103c, 0x22d0, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4343 + /* ALC290 */ 4344 + SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4345 + SND_PCI_QUIRK(0x103c, 0x2261, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4346 + SND_PCI_QUIRK(0x103c, 0x2262, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4347 + SND_PCI_QUIRK(0x103c, 0x2263, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4348 + SND_PCI_QUIRK(0x103c, 0x2264, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4349 + SND_PCI_QUIRK(0x103c, 0x2265, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4350 + SND_PCI_QUIRK(0x103c, 0x227d, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4351 + SND_PCI_QUIRK(0x103c, 0x227e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4352 + SND_PCI_QUIRK(0x103c, 0x227f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4353 + SND_PCI_QUIRK(0x103c, 0x2280, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4354 + SND_PCI_QUIRK(0x103c, 0x2281, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4355 + SND_PCI_QUIRK(0x103c, 0x2282, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4356 + SND_PCI_QUIRK(0x103c, 0x2289, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4357 + SND_PCI_QUIRK(0x103c, 0x228a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4358 + SND_PCI_QUIRK(0x103c, 0x228b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4359 + SND_PCI_QUIRK(0x103c, 0x228c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4360 + SND_PCI_QUIRK(0x103c, 0x228d, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4361 + SND_PCI_QUIRK(0x103c, 0x228e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4362 + SND_PCI_QUIRK(0x103c, 0x22c5, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4363 + SND_PCI_QUIRK(0x103c, 0x22c6, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4364 + SND_PCI_QUIRK(0x103c, 0x22c7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4365 + SND_PCI_QUIRK(0x103c, 0x22c8, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4366 + SND_PCI_QUIRK(0x103c, 0x22c3, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4367 + SND_PCI_QUIRK(0x103c, 0x22c4, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4323 4368 SND_PCI_QUIRK_VENDOR(0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED), 4324 4369 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), 4325 4370 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), ··· 5164 5113 SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), 5165 5114 SND_PCI_QUIRK(0x1028, 0x0626, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), 5166 5115 SND_PCI_QUIRK(0x1028, 0x0628, "Dell", ALC668_FIXUP_AUTO_MUTE), 5167 - SND_PCI_QUIRK(0x1028, 0x064e, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), 5116 + SND_PCI_QUIRK(0x1028, 0x064e, "Dell", ALC668_FIXUP_AUTO_MUTE), 5168 5117 SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800), 5169 5118 SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A_CHMAP), 5170 5119 SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_CHMAP),
+33
sound/pci/hda/patch_sigmatel.c
··· 83 83 STAC_DELL_M6_BOTH, 84 84 STAC_DELL_EQ, 85 85 STAC_ALIENWARE_M17X, 86 + STAC_92HD89XX_HP_FRONT_JACK, 86 87 STAC_92HD73XX_MODELS 87 88 }; 88 89 ··· 98 97 STAC_92HD83XXX_HP_LED, 99 98 STAC_92HD83XXX_HP_INV_LED, 100 99 STAC_92HD83XXX_HP_MIC_LED, 100 + STAC_HP_LED_GPIO10, 101 101 STAC_92HD83XXX_HEADSET_JACK, 102 102 STAC_92HD83XXX_HP, 103 103 STAC_HP_ENVY_BASS, ··· 1797 1795 {} 1798 1796 }; 1799 1797 1798 + static const struct hda_pintbl stac92hd89xx_hp_front_jack_pin_configs[] = { 1799 + { 0x0a, 0x02214030 }, 1800 + { 0x0b, 0x02A19010 }, 1801 + {} 1802 + }; 1803 + 1800 1804 static void stac92hd73xx_fixup_ref(struct hda_codec *codec, 1801 1805 const struct hda_fixup *fix, int action) 1802 1806 { ··· 1921 1913 [STAC_92HD73XX_NO_JD] = { 1922 1914 .type = HDA_FIXUP_FUNC, 1923 1915 .v.func = stac92hd73xx_fixup_no_jd, 1916 + }, 1917 + [STAC_92HD89XX_HP_FRONT_JACK] = { 1918 + .type = HDA_FIXUP_PINS, 1919 + .v.pins = stac92hd89xx_hp_front_jack_pin_configs, 1924 1920 } 1925 1921 }; 1926 1922 ··· 1985 1973 "Alienware M17x", STAC_ALIENWARE_M17X), 1986 1974 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490, 1987 1975 "Alienware M17x R3", STAC_DELL_EQ), 1976 + SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17, 1977 + "unknown HP", STAC_92HD89XX_HP_FRONT_JACK), 1988 1978 {} /* terminator */ 1989 1979 }; 1990 1980 ··· 2128 2114 spec->mic_mute_led_gpio = 0x08; /* GPIO3 */ 2129 2115 /* resetting controller clears GPIO, so we need to keep on */ 2130 2116 codec->bus->power_keep_link_on = 1; 2117 + } 2118 + } 2119 + 2120 + static void stac92hd83xxx_fixup_hp_led_gpio10(struct hda_codec *codec, 2121 + const struct hda_fixup *fix, int action) 2122 + { 2123 + struct sigmatel_spec *spec = codec->spec; 2124 + 2125 + if (action == HDA_FIXUP_ACT_PRE_PROBE) { 2126 + spec->gpio_led = 0x10; /* GPIO4 */ 2127 + spec->default_polarity = 0; 2131 2128 } 2132 2129 } 2133 2130 ··· 2636 2611 .chained = true, 2637 2612 .chain_id = STAC_92HD83XXX_HP, 2638 2613 }, 2614 + [STAC_HP_LED_GPIO10] = { 2615 + .type = HDA_FIXUP_FUNC, 2616 + .v.func = stac92hd83xxx_fixup_hp_led_gpio10, 2617 + .chained = true, 2618 + .chain_id = STAC_92HD83XXX_HP, 2619 + }, 2639 2620 [STAC_92HD83XXX_HEADSET_JACK] = { 2640 2621 .type = HDA_FIXUP_FUNC, 2641 2622 .v.func = stac92hd83xxx_fixup_headset_jack, ··· 2720 2689 "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), 2721 2690 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1888, 2722 2691 "HP Envy Spectre", STAC_HP_ENVY_BASS), 2692 + SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1899, 2693 + "HP Folio 13", STAC_HP_LED_GPIO10), 2723 2694 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x18df, 2724 2695 "HP Folio", STAC_HP_BNB13_EQ), 2725 2696 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x18F8,
+5 -6
sound/soc/blackfin/Kconfig
··· 11 11 12 12 config SND_BF5XX_SOC_SSM2602 13 13 tristate "SoC SSM2602 Audio Codec Add-On Card support" 14 - depends on SND_BF5XX_I2S && (SPI_MASTER || I2C) 14 + depends on SND_BF5XX_I2S && SND_SOC_I2C_AND_SPI 15 15 select SND_BF5XX_SOC_I2S if !BF60x 16 16 select SND_BF6XX_SOC_I2S if BF60x 17 17 select SND_SOC_SSM2602 ··· 21 21 22 22 config SND_SOC_BFIN_EVAL_ADAU1701 23 23 tristate "Support for the EVAL-ADAU1701MINIZ board on Blackfin eval boards" 24 - depends on SND_BF5XX_I2S 24 + depends on SND_BF5XX_I2S && I2C 25 25 select SND_BF5XX_SOC_I2S 26 26 select SND_SOC_ADAU1701 27 - select I2C 28 27 help 29 28 Say Y if you want to add support for the Analog Devices EVAL-ADAU1701MINIZ 30 29 board connected to one of the Blackfin evaluation boards like the ··· 44 45 45 46 config SND_SOC_BFIN_EVAL_ADAV80X 46 47 tristate "Support for the EVAL-ADAV80X boards on Blackfin eval boards" 47 - depends on SND_BF5XX_I2S && (SPI_MASTER || I2C) 48 + depends on SND_BF5XX_I2S && SND_SOC_I2C_AND_SPI 48 49 select SND_BF5XX_SOC_I2S 49 50 select SND_SOC_ADAV80X 50 51 help ··· 57 58 58 59 config SND_BF5XX_SOC_AD1836 59 60 tristate "SoC AD1836 Audio support for BF5xx" 60 - depends on SND_BF5XX_I2S 61 + depends on SND_BF5XX_I2S && SPI_MASTER 61 62 select SND_BF5XX_SOC_I2S 62 63 select SND_SOC_AD1836 63 64 help ··· 65 66 66 67 config SND_BF5XX_SOC_AD193X 67 68 tristate "SoC AD193X Audio support for Blackfin" 68 - depends on SND_BF5XX_I2S 69 + depends on SND_BF5XX_I2S && SND_SOC_I2C_AND_SPI 69 70 select SND_BF5XX_SOC_I2S 70 71 select SND_SOC_AD193X 71 72 help
+2 -2
sound/soc/codecs/ad1980.c
··· 57 57 static const char *ad1980_rec_sel[] = {"Mic", "CD", "NC", "AUX", "Line", 58 58 "Stereo Mix", "Mono Mix", "Phone"}; 59 59 60 - static const struct soc_enum ad1980_cap_src = 61 - SOC_ENUM_DOUBLE(AC97_REC_SEL, 8, 0, 7, ad1980_rec_sel); 60 + static SOC_ENUM_DOUBLE_DECL(ad1980_cap_src, 61 + AC97_REC_SEL, 8, 0, ad1980_rec_sel); 62 62 63 63 static const struct snd_kcontrol_new ad1980_snd_ac97_controls[] = { 64 64 SOC_DOUBLE("Master Playback Volume", AC97_MASTER, 8, 0, 31, 1),
+12
sound/soc/codecs/da732x.c
··· 1268 1268 }, 1269 1269 }; 1270 1270 1271 + static bool da732x_volatile(struct device *dev, unsigned int reg) 1272 + { 1273 + switch (reg) { 1274 + case DA732X_REG_HPL_DAC_OFF_CNTL: 1275 + case DA732X_REG_HPR_DAC_OFF_CNTL: 1276 + return true; 1277 + default: 1278 + return false; 1279 + } 1280 + } 1281 + 1271 1282 static const struct regmap_config da732x_regmap = { 1272 1283 .reg_bits = 8, 1273 1284 .val_bits = 8, 1274 1285 1275 1286 .max_register = DA732X_MAX_REG, 1287 + .volatile_reg = da732x_volatile, 1276 1288 .reg_defaults = da732x_reg_cache, 1277 1289 .num_reg_defaults = ARRAY_SIZE(da732x_reg_cache), 1278 1290 .cache_type = REGCACHE_RBTREE,
+9 -2
sound/soc/codecs/da9055.c
··· 1523 1523 return 0; 1524 1524 } 1525 1525 1526 + /* 1527 + * DO NOT change the device Ids. The naming is intentionally specific as both 1528 + * the CODEC and PMIC parts of this chip are instantiated separately as I2C 1529 + * devices (both have configurable I2C addresses, and are to all intents and 1530 + * purposes separate). As a result there are specific DA9055 Ids for CODEC 1531 + * and PMIC, which must be different to operate together. 1532 + */ 1526 1533 static const struct i2c_device_id da9055_i2c_id[] = { 1527 - { "da9055", 0 }, 1534 + { "da9055-codec", 0 }, 1528 1535 { } 1529 1536 }; 1530 1537 MODULE_DEVICE_TABLE(i2c, da9055_i2c_id); ··· 1539 1532 /* I2C codec control layer */ 1540 1533 static struct i2c_driver da9055_i2c_driver = { 1541 1534 .driver = { 1542 - .name = "da9055", 1535 + .name = "da9055-codec", 1543 1536 .owner = THIS_MODULE, 1544 1537 }, 1545 1538 .probe = da9055_i2c_probe,
+30 -22
sound/soc/codecs/isabelle.c
··· 140 140 static const char *isabelle_rx2_texts[] = {"VRX2", "ARX2"}; 141 141 142 142 static const struct soc_enum isabelle_rx1_enum[] = { 143 - SOC_ENUM_SINGLE(ISABELLE_VOICE_HPF_CFG_REG, 3, 1, isabelle_rx1_texts), 144 - SOC_ENUM_SINGLE(ISABELLE_AUDIO_HPF_CFG_REG, 5, 1, isabelle_rx1_texts), 143 + SOC_ENUM_SINGLE(ISABELLE_VOICE_HPF_CFG_REG, 3, 144 + ARRAY_SIZE(isabelle_rx1_texts), isabelle_rx1_texts), 145 + SOC_ENUM_SINGLE(ISABELLE_AUDIO_HPF_CFG_REG, 5, 146 + ARRAY_SIZE(isabelle_rx1_texts), isabelle_rx1_texts), 145 147 }; 146 148 147 149 static const struct soc_enum isabelle_rx2_enum[] = { 148 - SOC_ENUM_SINGLE(ISABELLE_VOICE_HPF_CFG_REG, 2, 1, isabelle_rx2_texts), 149 - SOC_ENUM_SINGLE(ISABELLE_AUDIO_HPF_CFG_REG, 4, 1, isabelle_rx2_texts), 150 + SOC_ENUM_SINGLE(ISABELLE_VOICE_HPF_CFG_REG, 2, 151 + ARRAY_SIZE(isabelle_rx2_texts), isabelle_rx2_texts), 152 + SOC_ENUM_SINGLE(ISABELLE_AUDIO_HPF_CFG_REG, 4, 153 + ARRAY_SIZE(isabelle_rx2_texts), isabelle_rx2_texts), 150 154 }; 151 155 152 156 /* Headset DAC playback switches */ ··· 165 161 static const char *isabelle_vtx_texts[] = {"AMIC2", "DMIC"}; 166 162 167 163 static const struct soc_enum isabelle_atx_enum[] = { 168 - SOC_ENUM_SINGLE(ISABELLE_AMIC_CFG_REG, 7, 1, isabelle_atx_texts), 169 - SOC_ENUM_SINGLE(ISABELLE_DMIC_CFG_REG, 0, 1, isabelle_atx_texts), 164 + SOC_ENUM_SINGLE(ISABELLE_AMIC_CFG_REG, 7, 165 + ARRAY_SIZE(isabelle_atx_texts), isabelle_atx_texts), 166 + SOC_ENUM_SINGLE(ISABELLE_DMIC_CFG_REG, 0, 167 + ARRAY_SIZE(isabelle_atx_texts), isabelle_atx_texts), 170 168 }; 171 169 172 170 static const struct soc_enum isabelle_vtx_enum[] = { 173 - SOC_ENUM_SINGLE(ISABELLE_AMIC_CFG_REG, 6, 1, isabelle_vtx_texts), 174 - SOC_ENUM_SINGLE(ISABELLE_DMIC_CFG_REG, 0, 1, isabelle_vtx_texts), 171 + SOC_ENUM_SINGLE(ISABELLE_AMIC_CFG_REG, 6, 172 + ARRAY_SIZE(isabelle_vtx_texts), isabelle_vtx_texts), 173 + SOC_ENUM_SINGLE(ISABELLE_DMIC_CFG_REG, 0, 174 + ARRAY_SIZE(isabelle_vtx_texts), isabelle_vtx_texts), 175 175 }; 176 176 177 177 static const struct snd_kcontrol_new atx_mux_controls = ··· 191 183 /* Left analog microphone selection */ 192 184 static const char *isabelle_amic2_texts[] = {"Sub Mic", "Aux/FM Right"}; 193 185 194 - static const struct soc_enum isabelle_amic1_enum[] = { 195 - SOC_ENUM_SINGLE(ISABELLE_AMIC_CFG_REG, 5, 196 - ARRAY_SIZE(isabelle_amic1_texts), 197 - isabelle_amic1_texts), 198 - }; 186 + static SOC_ENUM_SINGLE_DECL(isabelle_amic1_enum, 187 + ISABELLE_AMIC_CFG_REG, 5, 188 + isabelle_amic1_texts); 199 189 200 - static const struct soc_enum isabelle_amic2_enum[] = { 201 - SOC_ENUM_SINGLE(ISABELLE_AMIC_CFG_REG, 4, 202 - ARRAY_SIZE(isabelle_amic2_texts), 203 - isabelle_amic2_texts), 204 - }; 190 + static SOC_ENUM_SINGLE_DECL(isabelle_amic2_enum, 191 + ISABELLE_AMIC_CFG_REG, 4, 192 + isabelle_amic2_texts); 205 193 206 194 static const struct snd_kcontrol_new amic1_control = 207 195 SOC_DAPM_ENUM("Route", isabelle_amic1_enum); ··· 210 206 static const char *isabelle_st_voice_texts[] = {"VTX1", "VTX2"}; 211 207 212 208 static const struct soc_enum isabelle_st_audio_enum[] = { 213 - SOC_ENUM_SINGLE(ISABELLE_ATX_STPGA1_CFG_REG, 7, 1, 209 + SOC_ENUM_SINGLE(ISABELLE_ATX_STPGA1_CFG_REG, 7, 210 + ARRAY_SIZE(isabelle_st_audio_texts), 214 211 isabelle_st_audio_texts), 215 - SOC_ENUM_SINGLE(ISABELLE_ATX_STPGA2_CFG_REG, 7, 1, 212 + SOC_ENUM_SINGLE(ISABELLE_ATX_STPGA2_CFG_REG, 7, 213 + ARRAY_SIZE(isabelle_st_audio_texts), 216 214 isabelle_st_audio_texts), 217 215 }; 218 216 219 217 static const struct soc_enum isabelle_st_voice_enum[] = { 220 - SOC_ENUM_SINGLE(ISABELLE_VTX_STPGA1_CFG_REG, 7, 1, 218 + SOC_ENUM_SINGLE(ISABELLE_VTX_STPGA1_CFG_REG, 7, 219 + ARRAY_SIZE(isabelle_st_voice_texts), 221 220 isabelle_st_voice_texts), 222 - SOC_ENUM_SINGLE(ISABELLE_VTX2_STPGA2_CFG_REG, 7, 1, 221 + SOC_ENUM_SINGLE(ISABELLE_VTX2_STPGA2_CFG_REG, 7, 222 + ARRAY_SIZE(isabelle_st_voice_texts), 223 223 isabelle_st_voice_texts), 224 224 }; 225 225
+11 -10
sound/soc/codecs/max98090.c
··· 336 336 case M98090_REG_RECORD_TDM_SLOT: 337 337 case M98090_REG_SAMPLE_RATE: 338 338 case M98090_REG_DMIC34_BIQUAD_BASE ... M98090_REG_DMIC34_BIQUAD_BASE + 0x0E: 339 + case M98090_REG_REVISION_ID: 339 340 return true; 340 341 default: 341 342 return false; ··· 1770 1769 1771 1770 switch (level) { 1772 1771 case SND_SOC_BIAS_ON: 1773 - if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { 1774 - ret = regcache_sync(max98090->regmap); 1775 - 1776 - if (ret != 0) { 1777 - dev_err(codec->dev, 1778 - "Failed to sync cache: %d\n", ret); 1779 - return ret; 1780 - } 1781 - } 1782 - 1783 1772 if (max98090->jack_state == M98090_JACK_STATE_HEADSET) { 1784 1773 /* 1785 1774 * Set to normal bias level. ··· 1783 1792 break; 1784 1793 1785 1794 case SND_SOC_BIAS_STANDBY: 1795 + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { 1796 + ret = regcache_sync(max98090->regmap); 1797 + if (ret != 0) { 1798 + dev_err(codec->dev, 1799 + "Failed to sync cache: %d\n", ret); 1800 + return ret; 1801 + } 1802 + } 1803 + break; 1804 + 1786 1805 case SND_SOC_BIAS_OFF: 1787 1806 /* Set internal pull-up to lowest power mode */ 1788 1807 snd_soc_update_bits(codec, M98090_REG_JACK_DETECT,
+1
sound/soc/codecs/rt5640.c
··· 2093 2093 #ifdef CONFIG_ACPI 2094 2094 static struct acpi_device_id rt5640_acpi_match[] = { 2095 2095 { "INT33CA", 0 }, 2096 + { "10EC5640", 0 }, 2096 2097 { }, 2097 2098 }; 2098 2099 MODULE_DEVICE_TABLE(acpi, rt5640_acpi_match);
+38 -38
sound/soc/codecs/sta32x.c
··· 187 187 13, 16, TLV_DB_SCALE_ITEM(-1500, 300, 0), 188 188 }; 189 189 190 - static const struct soc_enum sta32x_drc_ac_enum = 191 - SOC_ENUM_SINGLE(STA32X_CONFD, STA32X_CONFD_DRC_SHIFT, 192 - 2, sta32x_drc_ac); 193 - static const struct soc_enum sta32x_auto_eq_enum = 194 - SOC_ENUM_SINGLE(STA32X_AUTO1, STA32X_AUTO1_AMEQ_SHIFT, 195 - 3, sta32x_auto_eq_mode); 196 - static const struct soc_enum sta32x_auto_gc_enum = 197 - SOC_ENUM_SINGLE(STA32X_AUTO1, STA32X_AUTO1_AMGC_SHIFT, 198 - 4, sta32x_auto_gc_mode); 199 - static const struct soc_enum sta32x_auto_xo_enum = 200 - SOC_ENUM_SINGLE(STA32X_AUTO2, STA32X_AUTO2_XO_SHIFT, 201 - 16, sta32x_auto_xo_mode); 202 - static const struct soc_enum sta32x_preset_eq_enum = 203 - SOC_ENUM_SINGLE(STA32X_AUTO3, STA32X_AUTO3_PEQ_SHIFT, 204 - 32, sta32x_preset_eq_mode); 205 - static const struct soc_enum sta32x_limiter_ch1_enum = 206 - SOC_ENUM_SINGLE(STA32X_C1CFG, STA32X_CxCFG_LS_SHIFT, 207 - 3, sta32x_limiter_select); 208 - static const struct soc_enum sta32x_limiter_ch2_enum = 209 - SOC_ENUM_SINGLE(STA32X_C2CFG, STA32X_CxCFG_LS_SHIFT, 210 - 3, sta32x_limiter_select); 211 - static const struct soc_enum sta32x_limiter_ch3_enum = 212 - SOC_ENUM_SINGLE(STA32X_C3CFG, STA32X_CxCFG_LS_SHIFT, 213 - 3, sta32x_limiter_select); 214 - static const struct soc_enum sta32x_limiter1_attack_rate_enum = 215 - SOC_ENUM_SINGLE(STA32X_L1AR, STA32X_LxA_SHIFT, 216 - 16, sta32x_limiter_attack_rate); 217 - static const struct soc_enum sta32x_limiter2_attack_rate_enum = 218 - SOC_ENUM_SINGLE(STA32X_L2AR, STA32X_LxA_SHIFT, 219 - 16, sta32x_limiter_attack_rate); 220 - static const struct soc_enum sta32x_limiter1_release_rate_enum = 221 - SOC_ENUM_SINGLE(STA32X_L1AR, STA32X_LxR_SHIFT, 222 - 16, sta32x_limiter_release_rate); 223 - static const struct soc_enum sta32x_limiter2_release_rate_enum = 224 - SOC_ENUM_SINGLE(STA32X_L2AR, STA32X_LxR_SHIFT, 225 - 16, sta32x_limiter_release_rate); 190 + static SOC_ENUM_SINGLE_DECL(sta32x_drc_ac_enum, 191 + STA32X_CONFD, STA32X_CONFD_DRC_SHIFT, 192 + sta32x_drc_ac); 193 + static SOC_ENUM_SINGLE_DECL(sta32x_auto_eq_enum, 194 + STA32X_AUTO1, STA32X_AUTO1_AMEQ_SHIFT, 195 + sta32x_auto_eq_mode); 196 + static SOC_ENUM_SINGLE_DECL(sta32x_auto_gc_enum, 197 + STA32X_AUTO1, STA32X_AUTO1_AMGC_SHIFT, 198 + sta32x_auto_gc_mode); 199 + static SOC_ENUM_SINGLE_DECL(sta32x_auto_xo_enum, 200 + STA32X_AUTO2, STA32X_AUTO2_XO_SHIFT, 201 + sta32x_auto_xo_mode); 202 + static SOC_ENUM_SINGLE_DECL(sta32x_preset_eq_enum, 203 + STA32X_AUTO3, STA32X_AUTO3_PEQ_SHIFT, 204 + sta32x_preset_eq_mode); 205 + static SOC_ENUM_SINGLE_DECL(sta32x_limiter_ch1_enum, 206 + STA32X_C1CFG, STA32X_CxCFG_LS_SHIFT, 207 + sta32x_limiter_select); 208 + static SOC_ENUM_SINGLE_DECL(sta32x_limiter_ch2_enum, 209 + STA32X_C2CFG, STA32X_CxCFG_LS_SHIFT, 210 + sta32x_limiter_select); 211 + static SOC_ENUM_SINGLE_DECL(sta32x_limiter_ch3_enum, 212 + STA32X_C3CFG, STA32X_CxCFG_LS_SHIFT, 213 + sta32x_limiter_select); 214 + static SOC_ENUM_SINGLE_DECL(sta32x_limiter1_attack_rate_enum, 215 + STA32X_L1AR, STA32X_LxA_SHIFT, 216 + sta32x_limiter_attack_rate); 217 + static SOC_ENUM_SINGLE_DECL(sta32x_limiter2_attack_rate_enum, 218 + STA32X_L2AR, STA32X_LxA_SHIFT, 219 + sta32x_limiter_attack_rate); 220 + static SOC_ENUM_SINGLE_DECL(sta32x_limiter1_release_rate_enum, 221 + STA32X_L1AR, STA32X_LxR_SHIFT, 222 + sta32x_limiter_release_rate); 223 + static SOC_ENUM_SINGLE_DECL(sta32x_limiter2_release_rate_enum, 224 + STA32X_L2AR, STA32X_LxR_SHIFT, 225 + sta32x_limiter_release_rate); 226 226 227 227 /* byte array controls for setting biquad, mixer, scaling coefficients; 228 228 * for biquads all five coefficients need to be set in one go, ··· 331 331 332 332 static int sta32x_cache_sync(struct snd_soc_codec *codec) 333 333 { 334 - struct sta32x_priv *sta32x = codec->control_data; 334 + struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec); 335 335 unsigned int mute; 336 336 int rc; 337 337 ··· 434 434 SOC_ENUM("Limiter1 Attack Rate (dB/ms)", sta32x_limiter1_attack_rate_enum), 435 435 SOC_ENUM("Limiter2 Attack Rate (dB/ms)", sta32x_limiter2_attack_rate_enum), 436 436 SOC_ENUM("Limiter1 Release Rate (dB/ms)", sta32x_limiter1_release_rate_enum), 437 - SOC_ENUM("Limiter2 Release Rate (dB/ms)", sta32x_limiter1_release_rate_enum), 437 + SOC_ENUM("Limiter2 Release Rate (dB/ms)", sta32x_limiter2_release_rate_enum), 438 438 439 439 /* depending on mode, the attack/release thresholds have 440 440 * two different enum definitions; provide both
+20 -14
sound/soc/codecs/wm8400.c
··· 117 117 static const char *wm8400_digital_sidetone[] = 118 118 {"None", "Left ADC", "Right ADC", "Reserved"}; 119 119 120 - static const struct soc_enum wm8400_left_digital_sidetone_enum = 121 - SOC_ENUM_SINGLE(WM8400_DIGITAL_SIDE_TONE, 122 - WM8400_ADC_TO_DACL_SHIFT, 2, wm8400_digital_sidetone); 120 + static SOC_ENUM_SINGLE_DECL(wm8400_left_digital_sidetone_enum, 121 + WM8400_DIGITAL_SIDE_TONE, 122 + WM8400_ADC_TO_DACL_SHIFT, 123 + wm8400_digital_sidetone); 123 124 124 - static const struct soc_enum wm8400_right_digital_sidetone_enum = 125 - SOC_ENUM_SINGLE(WM8400_DIGITAL_SIDE_TONE, 126 - WM8400_ADC_TO_DACR_SHIFT, 2, wm8400_digital_sidetone); 125 + static SOC_ENUM_SINGLE_DECL(wm8400_right_digital_sidetone_enum, 126 + WM8400_DIGITAL_SIDE_TONE, 127 + WM8400_ADC_TO_DACR_SHIFT, 128 + wm8400_digital_sidetone); 127 129 128 130 static const char *wm8400_adcmode[] = 129 131 {"Hi-fi mode", "Voice mode 1", "Voice mode 2", "Voice mode 3"}; 130 132 131 - static const struct soc_enum wm8400_right_adcmode_enum = 132 - SOC_ENUM_SINGLE(WM8400_ADC_CTRL, WM8400_ADC_HPF_CUT_SHIFT, 3, wm8400_adcmode); 133 + static SOC_ENUM_SINGLE_DECL(wm8400_right_adcmode_enum, 134 + WM8400_ADC_CTRL, 135 + WM8400_ADC_HPF_CUT_SHIFT, 136 + wm8400_adcmode); 133 137 134 138 static const struct snd_kcontrol_new wm8400_snd_controls[] = { 135 139 /* INMIXL */ ··· 426 422 static const char *wm8400_ainlmux[] = 427 423 {"INMIXL Mix", "RXVOICE Mix", "DIFFINL Mix"}; 428 424 429 - static const struct soc_enum wm8400_ainlmux_enum = 430 - SOC_ENUM_SINGLE( WM8400_INPUT_MIXER1, WM8400_AINLMODE_SHIFT, 431 - ARRAY_SIZE(wm8400_ainlmux), wm8400_ainlmux); 425 + static SOC_ENUM_SINGLE_DECL(wm8400_ainlmux_enum, 426 + WM8400_INPUT_MIXER1, 427 + WM8400_AINLMODE_SHIFT, 428 + wm8400_ainlmux); 432 429 433 430 static const struct snd_kcontrol_new wm8400_dapm_ainlmux_controls = 434 431 SOC_DAPM_ENUM("Route", wm8400_ainlmux_enum); ··· 440 435 static const char *wm8400_ainrmux[] = 441 436 {"INMIXR Mix", "RXVOICE Mix", "DIFFINR Mix"}; 442 437 443 - static const struct soc_enum wm8400_ainrmux_enum = 444 - SOC_ENUM_SINGLE( WM8400_INPUT_MIXER1, WM8400_AINRMODE_SHIFT, 445 - ARRAY_SIZE(wm8400_ainrmux), wm8400_ainrmux); 438 + static SOC_ENUM_SINGLE_DECL(wm8400_ainrmux_enum, 439 + WM8400_INPUT_MIXER1, 440 + WM8400_AINRMODE_SHIFT, 441 + wm8400_ainrmux); 446 442 447 443 static const struct snd_kcontrol_new wm8400_dapm_ainrmux_controls = 448 444 SOC_DAPM_ENUM("Route", wm8400_ainrmux_enum);
+2 -2
sound/soc/codecs/wm8770.c
··· 196 196 "AIN5", "AIN6", "AIN7", "AIN8" 197 197 }; 198 198 199 - static const struct soc_enum ain_enum = 200 - SOC_ENUM_DOUBLE(WM8770_ADCMUX, 0, 4, 8, ain_text); 199 + static SOC_ENUM_DOUBLE_DECL(ain_enum, 200 + WM8770_ADCMUX, 0, 4, ain_text); 201 201 202 202 static const struct snd_kcontrol_new ain_mux = 203 203 SOC_DAPM_ENUM("Capture Mux", ain_enum);
+22 -22
sound/soc/codecs/wm8900.c
··· 304 304 305 305 static const char *mic_bias_level_txt[] = { "0.9*AVDD", "0.65*AVDD" }; 306 306 307 - static const struct soc_enum mic_bias_level = 308 - SOC_ENUM_SINGLE(WM8900_REG_INCTL, 8, 2, mic_bias_level_txt); 307 + static SOC_ENUM_SINGLE_DECL(mic_bias_level, 308 + WM8900_REG_INCTL, 8, mic_bias_level_txt); 309 309 310 310 static const char *dac_mute_rate_txt[] = { "Fast", "Slow" }; 311 311 312 - static const struct soc_enum dac_mute_rate = 313 - SOC_ENUM_SINGLE(WM8900_REG_DACCTRL, 7, 2, dac_mute_rate_txt); 312 + static SOC_ENUM_SINGLE_DECL(dac_mute_rate, 313 + WM8900_REG_DACCTRL, 7, dac_mute_rate_txt); 314 314 315 315 static const char *dac_deemphasis_txt[] = { 316 316 "Disabled", "32kHz", "44.1kHz", "48kHz" 317 317 }; 318 318 319 - static const struct soc_enum dac_deemphasis = 320 - SOC_ENUM_SINGLE(WM8900_REG_DACCTRL, 4, 4, dac_deemphasis_txt); 319 + static SOC_ENUM_SINGLE_DECL(dac_deemphasis, 320 + WM8900_REG_DACCTRL, 4, dac_deemphasis_txt); 321 321 322 322 static const char *adc_hpf_cut_txt[] = { 323 323 "Hi-fi mode", "Voice mode 1", "Voice mode 2", "Voice mode 3" 324 324 }; 325 325 326 - static const struct soc_enum adc_hpf_cut = 327 - SOC_ENUM_SINGLE(WM8900_REG_ADCCTRL, 5, 4, adc_hpf_cut_txt); 326 + static SOC_ENUM_SINGLE_DECL(adc_hpf_cut, 327 + WM8900_REG_ADCCTRL, 5, adc_hpf_cut_txt); 328 328 329 329 static const char *lr_txt[] = { 330 330 "Left", "Right" 331 331 }; 332 332 333 - static const struct soc_enum aifl_src = 334 - SOC_ENUM_SINGLE(WM8900_REG_AUDIO1, 15, 2, lr_txt); 333 + static SOC_ENUM_SINGLE_DECL(aifl_src, 334 + WM8900_REG_AUDIO1, 15, lr_txt); 335 335 336 - static const struct soc_enum aifr_src = 337 - SOC_ENUM_SINGLE(WM8900_REG_AUDIO1, 14, 2, lr_txt); 336 + static SOC_ENUM_SINGLE_DECL(aifr_src, 337 + WM8900_REG_AUDIO1, 14, lr_txt); 338 338 339 - static const struct soc_enum dacl_src = 340 - SOC_ENUM_SINGLE(WM8900_REG_AUDIO2, 15, 2, lr_txt); 339 + static SOC_ENUM_SINGLE_DECL(dacl_src, 340 + WM8900_REG_AUDIO2, 15, lr_txt); 341 341 342 - static const struct soc_enum dacr_src = 343 - SOC_ENUM_SINGLE(WM8900_REG_AUDIO2, 14, 2, lr_txt); 342 + static SOC_ENUM_SINGLE_DECL(dacr_src, 343 + WM8900_REG_AUDIO2, 14, lr_txt); 344 344 345 345 static const char *sidetone_txt[] = { 346 346 "Disabled", "Left ADC", "Right ADC" 347 347 }; 348 348 349 - static const struct soc_enum dacl_sidetone = 350 - SOC_ENUM_SINGLE(WM8900_REG_SIDETONE, 2, 3, sidetone_txt); 349 + static SOC_ENUM_SINGLE_DECL(dacl_sidetone, 350 + WM8900_REG_SIDETONE, 2, sidetone_txt); 351 351 352 - static const struct soc_enum dacr_sidetone = 353 - SOC_ENUM_SINGLE(WM8900_REG_SIDETONE, 0, 3, sidetone_txt); 352 + static SOC_ENUM_SINGLE_DECL(dacr_sidetone, 353 + WM8900_REG_SIDETONE, 0, sidetone_txt); 354 354 355 355 static const struct snd_kcontrol_new wm8900_snd_controls[] = { 356 356 SOC_ENUM("Mic Bias Level", mic_bias_level), ··· 496 496 497 497 static const char *wm8900_lp_mux[] = { "Disabled", "Enabled" }; 498 498 499 - static const struct soc_enum wm8900_lineout2_lp_mux = 500 - SOC_ENUM_SINGLE(WM8900_REG_LOUTMIXCTL1, 1, 2, wm8900_lp_mux); 499 + static SOC_ENUM_SINGLE_DECL(wm8900_lineout2_lp_mux, 500 + WM8900_REG_LOUTMIXCTL1, 1, wm8900_lp_mux); 501 501 502 502 static const struct snd_kcontrol_new wm8900_lineout2_lp = 503 503 SOC_DAPM_ENUM("Route", wm8900_lineout2_lp_mux);
+1 -1
sound/soc/codecs/wm8958-dsp2.c
··· 153 153 154 154 data32 &= 0xffffff; 155 155 156 - wm8994_bulk_write(codec->control_data, 156 + wm8994_bulk_write(wm8994->wm8994, 157 157 data32 & 0xffffff, 158 158 block_len / 2, 159 159 (void *)(data + 8));
-1
sound/soc/codecs/wm8993.c
··· 1562 1562 struct wm8993_priv *wm8993 = snd_soc_codec_get_drvdata(codec); 1563 1563 1564 1564 wm8993_set_bias_level(codec, SND_SOC_BIAS_OFF); 1565 - regulator_bulk_free(ARRAY_SIZE(wm8993->supplies), wm8993->supplies); 1566 1565 return 0; 1567 1566 } 1568 1567
+70 -65
sound/soc/codecs/wm8994.c
··· 265 265 "2.7kHz", "1.35kHz", "675Hz", "370Hz", "180Hz", "90Hz", "45Hz" 266 266 }; 267 267 268 - static const struct soc_enum sidetone_hpf = 269 - SOC_ENUM_SINGLE(WM8994_SIDETONE, 7, 7, sidetone_hpf_text); 268 + static SOC_ENUM_SINGLE_DECL(sidetone_hpf, 269 + WM8994_SIDETONE, 7, sidetone_hpf_text); 270 270 271 271 static const char *adc_hpf_text[] = { 272 272 "HiFi", "Voice 1", "Voice 2", "Voice 3" 273 273 }; 274 274 275 - static const struct soc_enum aif1adc1_hpf = 276 - SOC_ENUM_SINGLE(WM8994_AIF1_ADC1_FILTERS, 13, 4, adc_hpf_text); 275 + static SOC_ENUM_SINGLE_DECL(aif1adc1_hpf, 276 + WM8994_AIF1_ADC1_FILTERS, 13, adc_hpf_text); 277 277 278 - static const struct soc_enum aif1adc2_hpf = 279 - SOC_ENUM_SINGLE(WM8994_AIF1_ADC2_FILTERS, 13, 4, adc_hpf_text); 278 + static SOC_ENUM_SINGLE_DECL(aif1adc2_hpf, 279 + WM8994_AIF1_ADC2_FILTERS, 13, adc_hpf_text); 280 280 281 - static const struct soc_enum aif2adc_hpf = 282 - SOC_ENUM_SINGLE(WM8994_AIF2_ADC_FILTERS, 13, 4, adc_hpf_text); 281 + static SOC_ENUM_SINGLE_DECL(aif2adc_hpf, 282 + WM8994_AIF2_ADC_FILTERS, 13, adc_hpf_text); 283 283 284 284 static const DECLARE_TLV_DB_SCALE(aif_tlv, 0, 600, 0); 285 285 static const DECLARE_TLV_DB_SCALE(digital_tlv, -7200, 75, 1); ··· 501 501 "Left", "Right" 502 502 }; 503 503 504 - static const struct soc_enum aif1adcl_src = 505 - SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_1, 15, 2, aif_chan_src_text); 504 + static SOC_ENUM_SINGLE_DECL(aif1adcl_src, 505 + WM8994_AIF1_CONTROL_1, 15, aif_chan_src_text); 506 506 507 - static const struct soc_enum aif1adcr_src = 508 - SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_1, 14, 2, aif_chan_src_text); 507 + static SOC_ENUM_SINGLE_DECL(aif1adcr_src, 508 + WM8994_AIF1_CONTROL_1, 14, aif_chan_src_text); 509 509 510 - static const struct soc_enum aif2adcl_src = 511 - SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_1, 15, 2, aif_chan_src_text); 510 + static SOC_ENUM_SINGLE_DECL(aif2adcl_src, 511 + WM8994_AIF2_CONTROL_1, 15, aif_chan_src_text); 512 512 513 - static const struct soc_enum aif2adcr_src = 514 - SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_1, 14, 2, aif_chan_src_text); 513 + static SOC_ENUM_SINGLE_DECL(aif2adcr_src, 514 + WM8994_AIF2_CONTROL_1, 14, aif_chan_src_text); 515 515 516 - static const struct soc_enum aif1dacl_src = 517 - SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_2, 15, 2, aif_chan_src_text); 516 + static SOC_ENUM_SINGLE_DECL(aif1dacl_src, 517 + WM8994_AIF1_CONTROL_2, 15, aif_chan_src_text); 518 518 519 - static const struct soc_enum aif1dacr_src = 520 - SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_2, 14, 2, aif_chan_src_text); 519 + static SOC_ENUM_SINGLE_DECL(aif1dacr_src, 520 + WM8994_AIF1_CONTROL_2, 14, aif_chan_src_text); 521 521 522 - static const struct soc_enum aif2dacl_src = 523 - SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_2, 15, 2, aif_chan_src_text); 522 + static SOC_ENUM_SINGLE_DECL(aif2dacl_src, 523 + WM8994_AIF2_CONTROL_2, 15, aif_chan_src_text); 524 524 525 - static const struct soc_enum aif2dacr_src = 526 - SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_2, 14, 2, aif_chan_src_text); 525 + static SOC_ENUM_SINGLE_DECL(aif2dacr_src, 526 + WM8994_AIF2_CONTROL_2, 14, aif_chan_src_text); 527 527 528 528 static const char *osr_text[] = { 529 529 "Low Power", "High Performance", 530 530 }; 531 531 532 - static const struct soc_enum dac_osr = 533 - SOC_ENUM_SINGLE(WM8994_OVERSAMPLING, 0, 2, osr_text); 532 + static SOC_ENUM_SINGLE_DECL(dac_osr, 533 + WM8994_OVERSAMPLING, 0, osr_text); 534 534 535 - static const struct soc_enum adc_osr = 536 - SOC_ENUM_SINGLE(WM8994_OVERSAMPLING, 1, 2, osr_text); 535 + static SOC_ENUM_SINGLE_DECL(adc_osr, 536 + WM8994_OVERSAMPLING, 1, osr_text); 537 537 538 538 static const struct snd_kcontrol_new wm8994_snd_controls[] = { 539 539 SOC_DOUBLE_R_TLV("AIF1ADC1 Volume", WM8994_AIF1_ADC1_LEFT_VOLUME, ··· 690 690 "30ms", "125ms", "250ms", "500ms", 691 691 }; 692 692 693 - static const struct soc_enum wm8958_aif1dac1_ng_hold = 694 - SOC_ENUM_SINGLE(WM8958_AIF1_DAC1_NOISE_GATE, 695 - WM8958_AIF1DAC1_NG_THR_SHIFT, 4, wm8958_ng_text); 693 + static SOC_ENUM_SINGLE_DECL(wm8958_aif1dac1_ng_hold, 694 + WM8958_AIF1_DAC1_NOISE_GATE, 695 + WM8958_AIF1DAC1_NG_THR_SHIFT, 696 + wm8958_ng_text); 696 697 697 - static const struct soc_enum wm8958_aif1dac2_ng_hold = 698 - SOC_ENUM_SINGLE(WM8958_AIF1_DAC2_NOISE_GATE, 699 - WM8958_AIF1DAC2_NG_THR_SHIFT, 4, wm8958_ng_text); 698 + static SOC_ENUM_SINGLE_DECL(wm8958_aif1dac2_ng_hold, 699 + WM8958_AIF1_DAC2_NOISE_GATE, 700 + WM8958_AIF1DAC2_NG_THR_SHIFT, 701 + wm8958_ng_text); 700 702 701 - static const struct soc_enum wm8958_aif2dac_ng_hold = 702 - SOC_ENUM_SINGLE(WM8958_AIF2_DAC_NOISE_GATE, 703 - WM8958_AIF2DAC_NG_THR_SHIFT, 4, wm8958_ng_text); 703 + static SOC_ENUM_SINGLE_DECL(wm8958_aif2dac_ng_hold, 704 + WM8958_AIF2_DAC_NOISE_GATE, 705 + WM8958_AIF2DAC_NG_THR_SHIFT, 706 + wm8958_ng_text); 704 707 705 708 static const struct snd_kcontrol_new wm8958_snd_controls[] = { 706 709 SOC_SINGLE_TLV("AIF3 Boost Volume", WM8958_AIF3_CONTROL_2, 10, 3, 0, aif_tlv), ··· 1344 1341 "DMIC", 1345 1342 }; 1346 1343 1347 - static const struct soc_enum adc_enum = 1348 - SOC_ENUM_SINGLE(0, 0, 2, adc_mux_text); 1344 + static SOC_ENUM_SINGLE_DECL(adc_enum, 1345 + 0, 0, adc_mux_text); 1349 1346 1350 1347 static const struct snd_kcontrol_new adcl_mux = 1351 1348 SOC_DAPM_ENUM_VIRT("ADCL Mux", adc_enum); ··· 1481 1478 "ADC/DMIC1", "DMIC2", 1482 1479 }; 1483 1480 1484 - static const struct soc_enum sidetone1_enum = 1485 - SOC_ENUM_SINGLE(WM8994_SIDETONE, 0, 2, sidetone_text); 1481 + static SOC_ENUM_SINGLE_DECL(sidetone1_enum, 1482 + WM8994_SIDETONE, 0, sidetone_text); 1486 1483 1487 1484 static const struct snd_kcontrol_new sidetone1_mux = 1488 1485 SOC_DAPM_ENUM("Left Sidetone Mux", sidetone1_enum); 1489 1486 1490 - static const struct soc_enum sidetone2_enum = 1491 - SOC_ENUM_SINGLE(WM8994_SIDETONE, 1, 2, sidetone_text); 1487 + static SOC_ENUM_SINGLE_DECL(sidetone2_enum, 1488 + WM8994_SIDETONE, 1, sidetone_text); 1492 1489 1493 1490 static const struct snd_kcontrol_new sidetone2_mux = 1494 1491 SOC_DAPM_ENUM("Right Sidetone Mux", sidetone2_enum); ··· 1501 1498 "None", "ADCDAT", 1502 1499 }; 1503 1500 1504 - static const struct soc_enum aif1_loopback_enum = 1505 - SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_2, WM8994_AIF1_LOOPBACK_SHIFT, 2, 1506 - loopback_text); 1501 + static SOC_ENUM_SINGLE_DECL(aif1_loopback_enum, 1502 + WM8994_AIF1_CONTROL_2, 1503 + WM8994_AIF1_LOOPBACK_SHIFT, 1504 + loopback_text); 1507 1505 1508 1506 static const struct snd_kcontrol_new aif1_loopback = 1509 1507 SOC_DAPM_ENUM("AIF1 Loopback", aif1_loopback_enum); 1510 1508 1511 - static const struct soc_enum aif2_loopback_enum = 1512 - SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_2, WM8994_AIF2_LOOPBACK_SHIFT, 2, 1513 - loopback_text); 1509 + static SOC_ENUM_SINGLE_DECL(aif2_loopback_enum, 1510 + WM8994_AIF2_CONTROL_2, 1511 + WM8994_AIF2_LOOPBACK_SHIFT, 1512 + loopback_text); 1514 1513 1515 1514 static const struct snd_kcontrol_new aif2_loopback = 1516 1515 SOC_DAPM_ENUM("AIF2 Loopback", aif2_loopback_enum); 1517 1516 1518 - static const struct soc_enum aif1dac_enum = 1519 - SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 0, 2, aif1dac_text); 1517 + static SOC_ENUM_SINGLE_DECL(aif1dac_enum, 1518 + WM8994_POWER_MANAGEMENT_6, 0, aif1dac_text); 1520 1519 1521 1520 static const struct snd_kcontrol_new aif1dac_mux = 1522 1521 SOC_DAPM_ENUM("AIF1DAC Mux", aif1dac_enum); ··· 1527 1522 "AIF2DACDAT", "AIF3DACDAT", 1528 1523 }; 1529 1524 1530 - static const struct soc_enum aif2dac_enum = 1531 - SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 1, 2, aif2dac_text); 1525 + static SOC_ENUM_SINGLE_DECL(aif2dac_enum, 1526 + WM8994_POWER_MANAGEMENT_6, 1, aif2dac_text); 1532 1527 1533 1528 static const struct snd_kcontrol_new aif2dac_mux = 1534 1529 SOC_DAPM_ENUM("AIF2DAC Mux", aif2dac_enum); ··· 1537 1532 "AIF2ADCDAT", "AIF3DACDAT", 1538 1533 }; 1539 1534 1540 - static const struct soc_enum aif2adc_enum = 1541 - SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 2, 2, aif2adc_text); 1535 + static SOC_ENUM_SINGLE_DECL(aif2adc_enum, 1536 + WM8994_POWER_MANAGEMENT_6, 2, aif2adc_text); 1542 1537 1543 1538 static const struct snd_kcontrol_new aif2adc_mux = 1544 1539 SOC_DAPM_ENUM("AIF2ADC Mux", aif2adc_enum); ··· 1547 1542 "AIF1ADCDAT", "AIF2ADCDAT", "AIF2DACDAT", "Mono PCM", 1548 1543 }; 1549 1544 1550 - static const struct soc_enum wm8994_aif3adc_enum = 1551 - SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 3, 3, aif3adc_text); 1545 + static SOC_ENUM_SINGLE_DECL(wm8994_aif3adc_enum, 1546 + WM8994_POWER_MANAGEMENT_6, 3, aif3adc_text); 1552 1547 1553 1548 static const struct snd_kcontrol_new wm8994_aif3adc_mux = 1554 1549 SOC_DAPM_ENUM("AIF3ADC Mux", wm8994_aif3adc_enum); 1555 1550 1556 - static const struct soc_enum wm8958_aif3adc_enum = 1557 - SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 3, 4, aif3adc_text); 1551 + static SOC_ENUM_SINGLE_DECL(wm8958_aif3adc_enum, 1552 + WM8994_POWER_MANAGEMENT_6, 3, aif3adc_text); 1558 1553 1559 1554 static const struct snd_kcontrol_new wm8958_aif3adc_mux = 1560 1555 SOC_DAPM_ENUM("AIF3ADC Mux", wm8958_aif3adc_enum); ··· 1563 1558 "None", "AIF2ADCL", "AIF2ADCR", 1564 1559 }; 1565 1560 1566 - static const struct soc_enum mono_pcm_out_enum = 1567 - SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 9, 3, mono_pcm_out_text); 1561 + static SOC_ENUM_SINGLE_DECL(mono_pcm_out_enum, 1562 + WM8994_POWER_MANAGEMENT_6, 9, mono_pcm_out_text); 1568 1563 1569 1564 static const struct snd_kcontrol_new mono_pcm_out_mux = 1570 1565 SOC_DAPM_ENUM("Mono PCM Out Mux", mono_pcm_out_enum); ··· 1574 1569 }; 1575 1570 1576 1571 /* Note that these two control shouldn't be simultaneously switched to AIF3 */ 1577 - static const struct soc_enum aif2dacl_src_enum = 1578 - SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 7, 2, aif2dac_src_text); 1572 + static SOC_ENUM_SINGLE_DECL(aif2dacl_src_enum, 1573 + WM8994_POWER_MANAGEMENT_6, 7, aif2dac_src_text); 1579 1574 1580 1575 static const struct snd_kcontrol_new aif2dacl_src_mux = 1581 1576 SOC_DAPM_ENUM("AIF2DACL Mux", aif2dacl_src_enum); 1582 1577 1583 - static const struct soc_enum aif2dacr_src_enum = 1584 - SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 8, 2, aif2dac_src_text); 1578 + static SOC_ENUM_SINGLE_DECL(aif2dacr_src_enum, 1579 + WM8994_POWER_MANAGEMENT_6, 8, aif2dac_src_text); 1585 1580 1586 1581 static const struct snd_kcontrol_new aif2dacr_src_mux = 1587 1582 SOC_DAPM_ENUM("AIF2DACR Mux", aif2dacr_src_enum);
+1
sound/soc/davinci/davinci-evm.c
··· 399 399 .driver = { 400 400 .name = "davinci_evm", 401 401 .owner = THIS_MODULE, 402 + .pm = &snd_soc_pm_ops, 402 403 .of_match_table = of_match_ptr(davinci_evm_dt_ids), 403 404 }, 404 405 };
+36 -43
sound/soc/davinci/davinci-mcasp.c
··· 263 263 unsigned int fmt) 264 264 { 265 265 struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(cpu_dai); 266 + int ret = 0; 266 267 268 + pm_runtime_get_sync(mcasp->dev); 267 269 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { 268 270 case SND_SOC_DAIFMT_DSP_B: 269 271 case SND_SOC_DAIFMT_AC97: ··· 319 317 break; 320 318 321 319 default: 322 - return -EINVAL; 320 + ret = -EINVAL; 321 + goto out; 323 322 } 324 323 325 324 switch (fmt & SND_SOC_DAIFMT_INV_MASK) { ··· 357 354 break; 358 355 359 356 default: 360 - return -EINVAL; 357 + ret = -EINVAL; 358 + break; 361 359 } 362 - 363 - return 0; 360 + out: 361 + pm_runtime_put_sync(mcasp->dev); 362 + return ret; 364 363 } 365 364 366 365 static int davinci_mcasp_set_clkdiv(struct snd_soc_dai *dai, int div_id, int div) ··· 453 448 return 0; 454 449 } 455 450 456 - static int davinci_hw_common_param(struct davinci_mcasp *mcasp, int stream, 451 + static int mcasp_common_hw_param(struct davinci_mcasp *mcasp, int stream, 457 452 int channels) 458 453 { 459 454 int i; ··· 529 524 return 0; 530 525 } 531 526 532 - static void davinci_hw_param(struct davinci_mcasp *mcasp, int stream) 527 + static int mcasp_i2s_hw_param(struct davinci_mcasp *mcasp, int stream) 533 528 { 534 529 int i, active_slots; 535 530 u32 mask = 0; 536 531 u32 busel = 0; 532 + 533 + if ((mcasp->tdm_slots < 2) || (mcasp->tdm_slots > 32)) { 534 + dev_err(mcasp->dev, "tdm slot %d not supported\n", 535 + mcasp->tdm_slots); 536 + return -EINVAL; 537 + } 537 538 538 539 active_slots = (mcasp->tdm_slots > 31) ? 32 : mcasp->tdm_slots; 539 540 for (i = 0; i < active_slots; i++) ··· 550 539 if (!mcasp->dat_port) 551 540 busel = TXSEL; 552 541 553 - if (stream == SNDRV_PCM_STREAM_PLAYBACK) { 554 - /* bit stream is MSB first with no delay */ 555 - /* DSP_B mode */ 556 - mcasp_set_reg(mcasp, DAVINCI_MCASP_TXTDM_REG, mask); 557 - mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, busel | TXORD); 542 + mcasp_set_reg(mcasp, DAVINCI_MCASP_TXTDM_REG, mask); 543 + mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, busel | TXORD); 544 + mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, 545 + FSXMOD(mcasp->tdm_slots), FSXMOD(0x1FF)); 558 546 559 - if ((mcasp->tdm_slots >= 2) && (mcasp->tdm_slots <= 32)) 560 - mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, 561 - FSXMOD(mcasp->tdm_slots), FSXMOD(0x1FF)); 562 - else 563 - printk(KERN_ERR "playback tdm slot %d not supported\n", 564 - mcasp->tdm_slots); 565 - } else { 566 - /* bit stream is MSB first with no delay */ 567 - /* DSP_B mode */ 568 - mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, busel | RXORD); 569 - mcasp_set_reg(mcasp, DAVINCI_MCASP_RXTDM_REG, mask); 547 + mcasp_set_reg(mcasp, DAVINCI_MCASP_RXTDM_REG, mask); 548 + mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, busel | RXORD); 549 + mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, 550 + FSRMOD(mcasp->tdm_slots), FSRMOD(0x1FF)); 570 551 571 - if ((mcasp->tdm_slots >= 2) && (mcasp->tdm_slots <= 32)) 572 - mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, 573 - FSRMOD(mcasp->tdm_slots), FSRMOD(0x1FF)); 574 - else 575 - printk(KERN_ERR "capture tdm slot %d not supported\n", 576 - mcasp->tdm_slots); 577 - } 552 + return 0; 578 553 } 579 554 580 555 /* S/PDIF */ 581 - static void davinci_hw_dit_param(struct davinci_mcasp *mcasp) 556 + static int mcasp_dit_hw_param(struct davinci_mcasp *mcasp) 582 557 { 583 558 /* Set the TX format : 24 bit right rotation, 32 bit slot, Pad 0 584 559 and LSB first */ ··· 586 589 587 590 /* Enable the DIT */ 588 591 mcasp_set_bits(mcasp, DAVINCI_MCASP_TXDITCTL_REG, DITEN); 592 + 593 + return 0; 589 594 } 590 595 591 596 static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream, ··· 604 605 u8 slots = mcasp->tdm_slots; 605 606 u8 active_serializers; 606 607 int channels; 608 + int ret; 607 609 struct snd_interval *pcm_channels = hw_param_interval(params, 608 610 SNDRV_PCM_HW_PARAM_CHANNELS); 609 611 channels = pcm_channels->min; 610 612 611 613 active_serializers = (channels + slots - 1) / slots; 612 614 613 - if (davinci_hw_common_param(mcasp, substream->stream, channels) == -EINVAL) 615 + if (mcasp_common_hw_param(mcasp, substream->stream, channels) == -EINVAL) 614 616 return -EINVAL; 615 617 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 616 618 fifo_level = mcasp->txnumevt * active_serializers; ··· 619 619 fifo_level = mcasp->rxnumevt * active_serializers; 620 620 621 621 if (mcasp->op_mode == DAVINCI_MCASP_DIT_MODE) 622 - davinci_hw_dit_param(mcasp); 622 + ret = mcasp_dit_hw_param(mcasp); 623 623 else 624 - davinci_hw_param(mcasp, substream->stream); 624 + ret = mcasp_i2s_hw_param(mcasp, substream->stream); 625 + 626 + if (ret) 627 + return ret; 625 628 626 629 switch (params_format(params)) { 627 630 case SNDRV_PCM_FORMAT_U8: ··· 681 678 case SNDRV_PCM_TRIGGER_RESUME: 682 679 case SNDRV_PCM_TRIGGER_START: 683 680 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 684 - ret = pm_runtime_get_sync(mcasp->dev); 685 - if (IS_ERR_VALUE(ret)) 686 - dev_err(mcasp->dev, "pm_runtime_get_sync() failed\n"); 687 681 davinci_mcasp_start(mcasp, substream->stream); 688 682 break; 689 - 690 683 case SNDRV_PCM_TRIGGER_SUSPEND: 691 - davinci_mcasp_stop(mcasp, substream->stream); 692 - ret = pm_runtime_put_sync(mcasp->dev); 693 - if (IS_ERR_VALUE(ret)) 694 - dev_err(mcasp->dev, "pm_runtime_put_sync() failed\n"); 695 - break; 696 - 697 684 case SNDRV_PCM_TRIGGER_STOP: 698 685 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 699 686 davinci_mcasp_stop(mcasp, substream->stream);
+2 -2
sound/soc/fsl/fsl_esai.c
··· 326 326 regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMA, 327 327 ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(tx_mask)); 328 328 regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMB, 329 - ESAI_xSMA_xS_MASK, ESAI_xSMB_xS(tx_mask)); 329 + ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(tx_mask)); 330 330 331 331 regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR, 332 332 ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots)); ··· 334 334 regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMA, 335 335 ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(rx_mask)); 336 336 regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMB, 337 - ESAI_xSMA_xS_MASK, ESAI_xSMB_xS(rx_mask)); 337 + ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(rx_mask)); 338 338 339 339 esai_priv->slot_width = slot_width; 340 340
+1 -1
sound/soc/fsl/fsl_esai.h
··· 322 322 #define ESAI_xSMB_xS_SHIFT 0 323 323 #define ESAI_xSMB_xS_WIDTH 16 324 324 #define ESAI_xSMB_xS_MASK (((1 << ESAI_xSMB_xS_WIDTH) - 1) << ESAI_xSMB_xS_SHIFT) 325 - #define ESAI_xSMB_xS(v) (((v) >> ESAI_xSMA_xS_WIDTH) & ESAI_xSMA_xS_MASK) 325 + #define ESAI_xSMB_xS(v) (((v) >> ESAI_xSMA_xS_WIDTH) & ESAI_xSMB_xS_MASK) 326 326 327 327 /* Port C Direction Register -- REG_ESAI_PRRC 0xF8 */ 328 328 #define ESAI_PRRC_PDC_SHIFT 0
-1
sound/soc/fsl/imx-mc13783.c
··· 160 160 .driver = { 161 161 .name = "imx_mc13783", 162 162 .owner = THIS_MODULE, 163 - .pm = &snd_soc_pm_ops, 164 163 }, 165 164 .probe = imx_mc13783_probe, 166 165 .remove = imx_mc13783_remove
+6 -4
sound/soc/fsl/imx-sgtl5000.c
··· 33 33 34 34 static int imx_sgtl5000_dai_init(struct snd_soc_pcm_runtime *rtd) 35 35 { 36 - struct imx_sgtl5000_data *data = container_of(rtd->card, 37 - struct imx_sgtl5000_data, card); 36 + struct imx_sgtl5000_data *data = snd_soc_card_get_drvdata(rtd->card); 38 37 struct device *dev = rtd->card->dev; 39 38 int ret; 40 39 ··· 158 159 data->card.dapm_widgets = imx_sgtl5000_dapm_widgets; 159 160 data->card.num_dapm_widgets = ARRAY_SIZE(imx_sgtl5000_dapm_widgets); 160 161 162 + platform_set_drvdata(pdev, &data->card); 163 + snd_soc_card_set_drvdata(&data->card, data); 164 + 161 165 ret = devm_snd_soc_register_card(&pdev->dev, &data->card); 162 166 if (ret) { 163 167 dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret); 164 168 goto fail; 165 169 } 166 170 167 - platform_set_drvdata(pdev, data); 168 171 of_node_put(ssi_np); 169 172 of_node_put(codec_np); 170 173 ··· 185 184 186 185 static int imx_sgtl5000_remove(struct platform_device *pdev) 187 186 { 188 - struct imx_sgtl5000_data *data = platform_get_drvdata(pdev); 187 + struct snd_soc_card *card = platform_get_drvdata(pdev); 188 + struct imx_sgtl5000_data *data = snd_soc_card_get_drvdata(card); 189 189 190 190 clk_put(data->codec_clk); 191 191
+7 -4
sound/soc/fsl/imx-wm8962.c
··· 71 71 { 72 72 struct snd_soc_dai *codec_dai = card->rtd[0].codec_dai; 73 73 struct imx_priv *priv = &card_priv; 74 - struct imx_wm8962_data *data = platform_get_drvdata(priv->pdev); 74 + struct imx_wm8962_data *data = snd_soc_card_get_drvdata(card); 75 75 struct device *dev = &priv->pdev->dev; 76 76 unsigned int pll_out; 77 77 int ret; ··· 137 137 { 138 138 struct snd_soc_dai *codec_dai = card->rtd[0].codec_dai; 139 139 struct imx_priv *priv = &card_priv; 140 - struct imx_wm8962_data *data = platform_get_drvdata(priv->pdev); 140 + struct imx_wm8962_data *data = snd_soc_card_get_drvdata(card); 141 141 struct device *dev = &priv->pdev->dev; 142 142 int ret; 143 143 ··· 264 264 data->card.late_probe = imx_wm8962_late_probe; 265 265 data->card.set_bias_level = imx_wm8962_set_bias_level; 266 266 267 + platform_set_drvdata(pdev, &data->card); 268 + snd_soc_card_set_drvdata(&data->card, data); 269 + 267 270 ret = devm_snd_soc_register_card(&pdev->dev, &data->card); 268 271 if (ret) { 269 272 dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret); 270 273 goto clk_fail; 271 274 } 272 275 273 - platform_set_drvdata(pdev, data); 274 276 of_node_put(ssi_np); 275 277 of_node_put(codec_np); 276 278 ··· 291 289 292 290 static int imx_wm8962_remove(struct platform_device *pdev) 293 291 { 294 - struct imx_wm8962_data *data = platform_get_drvdata(pdev); 292 + struct snd_soc_card *card = platform_get_drvdata(pdev); 293 + struct imx_wm8962_data *data = snd_soc_card_get_drvdata(card); 295 294 296 295 if (!IS_ERR(data->codec_clk)) 297 296 clk_disable_unprepare(data->codec_clk);
+3 -3
sound/soc/samsung/Kconfig
··· 59 59 select SND_SOC_WM8750 60 60 select SND_S3C2412_SOC_I2S 61 61 help 62 - Sat Y if you want to add support for SoC audio on the Jive. 62 + Say Y if you want to add support for SoC audio on the Jive. 63 63 64 64 config SND_SOC_SAMSUNG_SMDK_WM8580 65 65 tristate "SoC I2S Audio support for WM8580 on SMDK" ··· 145 145 146 146 config SND_SOC_SAMSUNG_SMDK_WM9713 147 147 tristate "SoC AC97 Audio support for SMDK with WM9713" 148 - depends on SND_SOC_SAMSUNG && (MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDKV210 || MACH_SMDKC110 || MACH_SMDKV310 || MACH_SMDKC210) 148 + depends on SND_SOC_SAMSUNG && (MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDKV210 || MACH_SMDKC110) 149 149 select SND_SOC_WM9713 150 150 select SND_SAMSUNG_AC97 151 151 help 152 - Sat Y if you want to add support for SoC audio on the SMDK. 152 + Say Y if you want to add support for SoC audio on the SMDK. 153 153 154 154 config SND_SOC_SMARTQ 155 155 tristate "SoC I2S Audio support for SmartQ board"
+133 -20
sound/soc/soc-dapm.c
··· 1218 1218 ret = regulator_allow_bypass(w->regulator, false); 1219 1219 if (ret != 0) 1220 1220 dev_warn(w->dapm->dev, 1221 - "ASoC: Failed to bypass %s: %d\n", 1221 + "ASoC: Failed to unbypass %s: %d\n", 1222 1222 w->name, ret); 1223 1223 } 1224 1224 ··· 1228 1228 ret = regulator_allow_bypass(w->regulator, true); 1229 1229 if (ret != 0) 1230 1230 dev_warn(w->dapm->dev, 1231 - "ASoC: Failed to unbypass %s: %d\n", 1231 + "ASoC: Failed to bypass %s: %d\n", 1232 1232 w->name, ret); 1233 1233 } 1234 1234 ··· 3210 3210 struct snd_soc_card *card = snd_kcontrol_chip(kcontrol); 3211 3211 const char *pin = (const char *)kcontrol->private_value; 3212 3212 3213 - mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); 3214 - 3215 3213 if (ucontrol->value.integer.value[0]) 3216 3214 snd_soc_dapm_enable_pin(&card->dapm, pin); 3217 3215 else 3218 3216 snd_soc_dapm_disable_pin(&card->dapm, pin); 3219 - 3220 - mutex_unlock(&card->dapm_mutex); 3221 3217 3222 3218 snd_soc_dapm_sync(&card->dapm); 3223 3219 return 0; ··· 3244 3248 ret = regulator_allow_bypass(w->regulator, true); 3245 3249 if (ret != 0) 3246 3250 dev_warn(w->dapm->dev, 3247 - "ASoC: Failed to unbypass %s: %d\n", 3251 + "ASoC: Failed to bypass %s: %d\n", 3248 3252 w->name, ret); 3249 3253 } 3250 3254 break; ··· 3763 3767 } 3764 3768 3765 3769 /** 3770 + * snd_soc_dapm_enable_pin_unlocked - enable pin. 3771 + * @dapm: DAPM context 3772 + * @pin: pin name 3773 + * 3774 + * Enables input/output pin and its parents or children widgets iff there is 3775 + * a valid audio route and active audio stream. 3776 + * 3777 + * Requires external locking. 3778 + * 3779 + * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to 3780 + * do any widget power switching. 3781 + */ 3782 + int snd_soc_dapm_enable_pin_unlocked(struct snd_soc_dapm_context *dapm, 3783 + const char *pin) 3784 + { 3785 + return snd_soc_dapm_set_pin(dapm, pin, 1); 3786 + } 3787 + EXPORT_SYMBOL_GPL(snd_soc_dapm_enable_pin_unlocked); 3788 + 3789 + /** 3766 3790 * snd_soc_dapm_enable_pin - enable pin. 3767 3791 * @dapm: DAPM context 3768 3792 * @pin: pin name 3769 3793 * 3770 3794 * Enables input/output pin and its parents or children widgets iff there is 3771 3795 * a valid audio route and active audio stream. 3796 + * 3772 3797 * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to 3773 3798 * do any widget power switching. 3774 3799 */ 3775 3800 int snd_soc_dapm_enable_pin(struct snd_soc_dapm_context *dapm, const char *pin) 3776 3801 { 3777 - return snd_soc_dapm_set_pin(dapm, pin, 1); 3802 + int ret; 3803 + 3804 + mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); 3805 + 3806 + ret = snd_soc_dapm_set_pin(dapm, pin, 1); 3807 + 3808 + mutex_unlock(&dapm->card->dapm_mutex); 3809 + 3810 + return ret; 3778 3811 } 3779 3812 EXPORT_SYMBOL_GPL(snd_soc_dapm_enable_pin); 3813 + 3814 + /** 3815 + * snd_soc_dapm_force_enable_pin_unlocked - force a pin to be enabled 3816 + * @dapm: DAPM context 3817 + * @pin: pin name 3818 + * 3819 + * Enables input/output pin regardless of any other state. This is 3820 + * intended for use with microphone bias supplies used in microphone 3821 + * jack detection. 3822 + * 3823 + * Requires external locking. 3824 + * 3825 + * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to 3826 + * do any widget power switching. 3827 + */ 3828 + int snd_soc_dapm_force_enable_pin_unlocked(struct snd_soc_dapm_context *dapm, 3829 + const char *pin) 3830 + { 3831 + struct snd_soc_dapm_widget *w = dapm_find_widget(dapm, pin, true); 3832 + 3833 + if (!w) { 3834 + dev_err(dapm->dev, "ASoC: unknown pin %s\n", pin); 3835 + return -EINVAL; 3836 + } 3837 + 3838 + dev_dbg(w->dapm->dev, "ASoC: force enable pin %s\n", pin); 3839 + w->connected = 1; 3840 + w->force = 1; 3841 + dapm_mark_dirty(w, "force enable"); 3842 + 3843 + return 0; 3844 + } 3845 + EXPORT_SYMBOL_GPL(snd_soc_dapm_force_enable_pin_unlocked); 3780 3846 3781 3847 /** 3782 3848 * snd_soc_dapm_force_enable_pin - force a pin to be enabled ··· 3855 3797 int snd_soc_dapm_force_enable_pin(struct snd_soc_dapm_context *dapm, 3856 3798 const char *pin) 3857 3799 { 3858 - struct snd_soc_dapm_widget *w = dapm_find_widget(dapm, pin, true); 3800 + int ret; 3859 3801 3860 - if (!w) { 3861 - dev_err(dapm->dev, "ASoC: unknown pin %s\n", pin); 3862 - return -EINVAL; 3863 - } 3802 + mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); 3864 3803 3865 - dev_dbg(w->dapm->dev, "ASoC: force enable pin %s\n", pin); 3866 - w->connected = 1; 3867 - w->force = 1; 3868 - dapm_mark_dirty(w, "force enable"); 3804 + ret = snd_soc_dapm_force_enable_pin_unlocked(dapm, pin); 3869 3805 3870 - return 0; 3806 + mutex_unlock(&dapm->card->dapm_mutex); 3807 + 3808 + return ret; 3871 3809 } 3872 3810 EXPORT_SYMBOL_GPL(snd_soc_dapm_force_enable_pin); 3811 + 3812 + /** 3813 + * snd_soc_dapm_disable_pin_unlocked - disable pin. 3814 + * @dapm: DAPM context 3815 + * @pin: pin name 3816 + * 3817 + * Disables input/output pin and its parents or children widgets. 3818 + * 3819 + * Requires external locking. 3820 + * 3821 + * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to 3822 + * do any widget power switching. 3823 + */ 3824 + int snd_soc_dapm_disable_pin_unlocked(struct snd_soc_dapm_context *dapm, 3825 + const char *pin) 3826 + { 3827 + return snd_soc_dapm_set_pin(dapm, pin, 0); 3828 + } 3829 + EXPORT_SYMBOL_GPL(snd_soc_dapm_disable_pin_unlocked); 3873 3830 3874 3831 /** 3875 3832 * snd_soc_dapm_disable_pin - disable pin. ··· 3892 3819 * @pin: pin name 3893 3820 * 3894 3821 * Disables input/output pin and its parents or children widgets. 3822 + * 3895 3823 * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to 3896 3824 * do any widget power switching. 3897 3825 */ 3898 3826 int snd_soc_dapm_disable_pin(struct snd_soc_dapm_context *dapm, 3899 3827 const char *pin) 3900 3828 { 3901 - return snd_soc_dapm_set_pin(dapm, pin, 0); 3829 + int ret; 3830 + 3831 + mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); 3832 + 3833 + ret = snd_soc_dapm_set_pin(dapm, pin, 0); 3834 + 3835 + mutex_unlock(&dapm->card->dapm_mutex); 3836 + 3837 + return ret; 3902 3838 } 3903 3839 EXPORT_SYMBOL_GPL(snd_soc_dapm_disable_pin); 3840 + 3841 + /** 3842 + * snd_soc_dapm_nc_pin_unlocked - permanently disable pin. 3843 + * @dapm: DAPM context 3844 + * @pin: pin name 3845 + * 3846 + * Marks the specified pin as being not connected, disabling it along 3847 + * any parent or child widgets. At present this is identical to 3848 + * snd_soc_dapm_disable_pin() but in future it will be extended to do 3849 + * additional things such as disabling controls which only affect 3850 + * paths through the pin. 3851 + * 3852 + * Requires external locking. 3853 + * 3854 + * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to 3855 + * do any widget power switching. 3856 + */ 3857 + int snd_soc_dapm_nc_pin_unlocked(struct snd_soc_dapm_context *dapm, 3858 + const char *pin) 3859 + { 3860 + return snd_soc_dapm_set_pin(dapm, pin, 0); 3861 + } 3862 + EXPORT_SYMBOL_GPL(snd_soc_dapm_nc_pin_unlocked); 3904 3863 3905 3864 /** 3906 3865 * snd_soc_dapm_nc_pin - permanently disable pin. ··· 3950 3845 */ 3951 3846 int snd_soc_dapm_nc_pin(struct snd_soc_dapm_context *dapm, const char *pin) 3952 3847 { 3953 - return snd_soc_dapm_set_pin(dapm, pin, 0); 3848 + int ret; 3849 + 3850 + mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); 3851 + 3852 + ret = snd_soc_dapm_set_pin(dapm, pin, 0); 3853 + 3854 + mutex_unlock(&dapm->card->dapm_mutex); 3855 + 3856 + return ret; 3954 3857 } 3955 3858 EXPORT_SYMBOL_GPL(snd_soc_dapm_nc_pin); 3956 3859
+5 -3
sound/soc/txx9/txx9aclc-ac97.c
··· 183 183 irq = platform_get_irq(pdev, 0); 184 184 if (irq < 0) 185 185 return irq; 186 + 187 + drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL); 188 + if (!drvdata) 189 + return -ENOMEM; 190 + 186 191 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 187 192 drvdata->base = devm_ioremap_resource(&pdev->dev, r); 188 193 if (IS_ERR(drvdata->base)) 189 194 return PTR_ERR(drvdata->base); 190 195 191 - drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL); 192 - if (!drvdata) 193 - return -ENOMEM; 194 196 platform_set_drvdata(pdev, drvdata); 195 197 drvdata->physbase = r->start; 196 198 if (sizeof(drvdata->physbase) > sizeof(r->start) &&
+1
sound/usb/mixer.c
··· 883 883 } 884 884 break; 885 885 886 + case USB_ID(0x046d, 0x0807): /* Logitech Webcam C500 */ 886 887 case USB_ID(0x046d, 0x0808): 887 888 case USB_ID(0x046d, 0x0809): 888 889 case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
+9
sound/usb/mixer_maps.c
··· 328 328 {} 329 329 }; 330 330 331 + static const struct usbmix_name_map kef_x300a_map[] = { 332 + { 10, NULL }, /* firmware locks up (?) when we try to access this FU */ 333 + { 0 } 334 + }; 335 + 331 336 /* 332 337 * Control map entries 333 338 */ ··· 423 418 { 424 419 .id = USB_ID(0x200c, 0x1018), 425 420 .map = ebox44_map, 421 + }, 422 + { 423 + .id = USB_ID(0x27ac, 0x1000), 424 + .map = kef_x300a_map, 426 425 }, 427 426 { 0 } /* terminator */ 428 427 };
+3 -3
tools/lib/lockdep/Makefile
··· 87 87 # We process the rest of the Makefile if this is the final invocation of make 88 88 ifeq ($(skip-makefile),) 89 89 90 - srctree := $(if $(BUILD_SRC),$(BUILD_SRC),$(CURDIR)) 91 - objtree := $(CURDIR) 90 + srctree := $(realpath $(if $(BUILD_SRC),$(BUILD_SRC),$(CURDIR))) 91 + objtree := $(realpath $(CURDIR)) 92 92 src := $(srctree) 93 93 obj := $(objtree) 94 94 ··· 112 112 113 113 LIBLOCKDEP_VERSION = $(LL_VERSION).$(LL_PATCHLEVEL).$(LL_EXTRAVERSION) 114 114 115 - INCLUDES = -I. -I/usr/local/include -I./uinclude $(CONFIG_INCLUDES) 115 + INCLUDES = -I. -I/usr/local/include -I./uinclude -I./include $(CONFIG_INCLUDES) 116 116 117 117 # Set compile option CFLAGS if not set elsewhere 118 118 CFLAGS ?= -g -DCONFIG_LOCKDEP -DCONFIG_STACKTRACE -DCONFIG_PROVE_LOCKING -DBITS_PER_LONG=__WORDSIZE -DLIBLOCKDEP_VERSION='"$(LIBLOCKDEP_VERSION)"' -rdynamic -O0 -g
+1 -1
tools/lib/lockdep/preload.c
··· 418 418 419 419 __attribute__((constructor)) static void init_preload(void) 420 420 { 421 - if (__init_state != done) 421 + if (__init_state == done) 422 422 return; 423 423 424 424 #ifndef __GLIBC__
tools/lib/lockdep/run_tests.sh
+6
tools/lib/lockdep/uinclude/asm/hash.h
··· 1 + #ifndef __ASM_GENERIC_HASH_H 2 + #define __ASM_GENERIC_HASH_H 3 + 4 + /* Stub */ 5 + 6 + #endif /* __ASM_GENERIC_HASH_H */
+5
tools/lib/lockdep/uinclude/linux/rcu.h
··· 13 13 return 1; 14 14 } 15 15 16 + static inline bool rcu_is_watching(void) 17 + { 18 + return false; 19 + } 20 + 16 21 #endif
+23 -15
tools/perf/builtin-report.c
··· 113 113 if (!he) 114 114 return -ENOMEM; 115 115 116 - err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr); 117 - if (err) 118 - goto out; 116 + if (ui__has_annotation()) { 117 + err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr); 118 + if (err) 119 + goto out; 119 120 120 - mx = he->mem_info; 121 - err = addr_map_symbol__inc_samples(&mx->daddr, evsel->idx); 122 - if (err) 123 - goto out; 121 + mx = he->mem_info; 122 + err = addr_map_symbol__inc_samples(&mx->daddr, evsel->idx); 123 + if (err) 124 + goto out; 125 + } 124 126 125 127 evsel->hists.stats.total_period += cost; 126 128 hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE); ··· 166 164 he = __hists__add_entry(&evsel->hists, al, parent, &bi[i], NULL, 167 165 1, 1, 0); 168 166 if (he) { 169 - bx = he->branch_info; 170 - err = addr_map_symbol__inc_samples(&bx->from, evsel->idx); 171 - if (err) 172 - goto out; 167 + if (ui__has_annotation()) { 168 + bx = he->branch_info; 169 + err = addr_map_symbol__inc_samples(&bx->from, 170 + evsel->idx); 171 + if (err) 172 + goto out; 173 173 174 - err = addr_map_symbol__inc_samples(&bx->to, evsel->idx); 175 - if (err) 176 - goto out; 174 + err = addr_map_symbol__inc_samples(&bx->to, 175 + evsel->idx); 176 + if (err) 177 + goto out; 178 + } 177 179 178 180 evsel->hists.stats.total_period += 1; 179 181 hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE); ··· 211 205 if (err) 212 206 goto out; 213 207 214 - err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr); 208 + if (ui__has_annotation()) 209 + err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr); 210 + 215 211 evsel->hists.stats.total_period += sample->period; 216 212 hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE); 217 213 out:
+4 -2
tools/perf/builtin-top.c
··· 176 176 { 177 177 struct annotation *notes; 178 178 struct symbol *sym; 179 - int err; 179 + int err = 0; 180 180 181 181 if (he == NULL || he->ms.sym == NULL || 182 182 ((top->sym_filter_entry == NULL || ··· 190 190 return; 191 191 192 192 ip = he->ms.map->map_ip(he->ms.map, ip); 193 - err = hist_entry__inc_addr_samples(he, counter, ip); 193 + 194 + if (ui__has_annotation()) 195 + err = hist_entry__inc_addr_samples(he, counter, ip); 194 196 195 197 pthread_mutex_unlock(&notes->lock); 196 198
+22
tools/perf/builtin-trace.c
··· 37 37 # define MADV_UNMERGEABLE 13 38 38 #endif 39 39 40 + #ifndef EFD_SEMAPHORE 41 + # define EFD_SEMAPHORE 1 42 + #endif 43 + 40 44 struct tp_field { 41 45 int offset; 42 46 union { ··· 283 279 284 280 #define SCA_STRARRAY syscall_arg__scnprintf_strarray 285 281 282 + #if defined(__i386__) || defined(__x86_64__) 283 + /* 284 + * FIXME: Make this available to all arches as soon as the ioctl beautifier 285 + * gets rewritten to support all arches. 286 + */ 286 287 static size_t syscall_arg__scnprintf_strhexarray(char *bf, size_t size, 287 288 struct syscall_arg *arg) 288 289 { ··· 295 286 } 296 287 297 288 #define SCA_STRHEXARRAY syscall_arg__scnprintf_strhexarray 289 + #endif /* defined(__i386__) || defined(__x86_64__) */ 298 290 299 291 static size_t syscall_arg__scnprintf_fd(char *bf, size_t size, 300 292 struct syscall_arg *arg); ··· 849 839 850 840 #define SCA_SIGNUM syscall_arg__scnprintf_signum 851 841 842 + #if defined(__i386__) || defined(__x86_64__) 843 + /* 844 + * FIXME: Make this available to all arches. 845 + */ 852 846 #define TCGETS 0x5401 853 847 854 848 static const char *tioctls[] = { ··· 874 860 }; 875 861 876 862 static DEFINE_STRARRAY_OFFSET(tioctls, 0x5401); 863 + #endif /* defined(__i386__) || defined(__x86_64__) */ 877 864 878 865 #define STRARRAY(arg, name, array) \ 879 866 .arg_scnprintf = { [arg] = SCA_STRARRAY, }, \ ··· 956 941 { .name = "getrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), }, 957 942 { .name = "ioctl", .errmsg = true, 958 943 .arg_scnprintf = { [0] = SCA_FD, /* fd */ 944 + #if defined(__i386__) || defined(__x86_64__) 945 + /* 946 + * FIXME: Make this available to all arches. 947 + */ 959 948 [1] = SCA_STRHEXARRAY, /* cmd */ 960 949 [2] = SCA_HEX, /* arg */ }, 961 950 .arg_parm = { [1] = &strarray__tioctls, /* cmd */ }, }, 951 + #else 952 + [2] = SCA_HEX, /* arg */ }, }, 953 + #endif 962 954 { .name = "kill", .errmsg = true, 963 955 .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, }, 964 956 { .name = "linkat", .errmsg = true,
+1 -1
tools/perf/config/Makefile
··· 478 478 endif 479 479 480 480 ifeq ($(feature-libbfd), 1) 481 - EXTLIBS += -lbfd 481 + EXTLIBS += -lbfd -lz -liberty 482 482 endif 483 483 484 484 ifdef NO_DEMANGLE
+1 -1
tools/perf/config/feature-checks/Makefile
··· 121 121 $(BUILD) $(FLAGS_PYTHON_EMBED) 122 122 123 123 test-libbfd.bin: 124 - $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl 124 + $(BUILD) -DPACKAGE='"perf"' -lbfd -lz -liberty -ldl 125 125 126 126 test-liberty.bin: 127 127 $(CC) -o $(OUTPUT)$@ test-libbfd.c -DPACKAGE='"perf"' -lbfd -ldl -liberty
+8 -1
tools/perf/util/annotate.c
··· 8 8 */ 9 9 10 10 #include "util.h" 11 + #include "ui/ui.h" 12 + #include "sort.h" 11 13 #include "build-id.h" 12 14 #include "color.h" 13 15 #include "cache.h" ··· 491 489 { 492 490 struct annotation *notes; 493 491 494 - if (sym == NULL || use_browser != 1 || !sort__has_sym) 492 + if (sym == NULL) 495 493 return 0; 496 494 497 495 notes = symbol__annotation(sym); ··· 1400 1398 int hist_entry__annotate(struct hist_entry *he, size_t privsize) 1401 1399 { 1402 1400 return symbol__annotate(he->ms.sym, he->ms.map, privsize); 1401 + } 1402 + 1403 + bool ui__has_annotation(void) 1404 + { 1405 + return use_browser == 1 && sort__has_sym; 1403 1406 }
+2
tools/perf/util/annotate.h
··· 151 151 void symbol__annotate_decay_histogram(struct symbol *sym, int evidx); 152 152 void disasm__purge(struct list_head *head); 153 153 154 + bool ui__has_annotation(void); 155 + 154 156 int symbol__tty_annotate(struct symbol *sym, struct map *map, 155 157 struct perf_evsel *evsel, bool print_lines, 156 158 bool full_paths, int min_pcnt, int max_lines);
+3 -1
tools/perf/util/include/linux/bitops.h
··· 87 87 return num; 88 88 } 89 89 90 + typedef const unsigned long __attribute__((__may_alias__)) long_alias_t; 91 + 90 92 /* 91 93 * Find the first set bit in a memory region. 92 94 */ 93 95 static inline unsigned long 94 96 find_first_bit(const unsigned long *addr, unsigned long size) 95 97 { 96 - const unsigned long *p = addr; 98 + long_alias_t *p = (long_alias_t *) addr; 97 99 unsigned long result = 0; 98 100 unsigned long tmp; 99 101
+15 -2
tools/perf/util/parse-events.c
··· 1091 1091 static bool is_event_supported(u8 type, unsigned config) 1092 1092 { 1093 1093 bool ret = true; 1094 + int open_return; 1094 1095 struct perf_evsel *evsel; 1095 1096 struct perf_event_attr attr = { 1096 1097 .type = type, 1097 1098 .config = config, 1098 1099 .disabled = 1, 1099 - .exclude_kernel = 1, 1100 1100 }; 1101 1101 struct { 1102 1102 struct thread_map map; ··· 1108 1108 1109 1109 evsel = perf_evsel__new(&attr); 1110 1110 if (evsel) { 1111 - ret = perf_evsel__open(evsel, NULL, &tmap.map) >= 0; 1111 + open_return = perf_evsel__open(evsel, NULL, &tmap.map); 1112 + ret = open_return >= 0; 1113 + 1114 + if (open_return == -EACCES) { 1115 + /* 1116 + * This happens if the paranoid value 1117 + * /proc/sys/kernel/perf_event_paranoid is set to 2 1118 + * Re-run with exclude_kernel set; we don't do that 1119 + * by default as some ARM machines do not support it. 1120 + * 1121 + */ 1122 + evsel->attr.exclude_kernel = 1; 1123 + ret = perf_evsel__open(evsel, NULL, &tmap.map) >= 0; 1124 + } 1112 1125 perf_evsel__delete(evsel); 1113 1126 } 1114 1127
+1 -1
tools/perf/util/probe-event.c
··· 336 336 return ret; 337 337 338 338 for (i = 0; i < ntevs && ret >= 0; i++) { 339 + /* point.address is the addres of point.symbol + point.offset */ 339 340 offset = tevs[i].point.address - stext; 340 - offset += tevs[i].point.offset; 341 341 tevs[i].point.offset = 0; 342 342 zfree(&tevs[i].point.symbol); 343 343 ret = e_snprintf(buf, 32, "0x%lx", offset);
+6
tools/perf/util/session.c
··· 1008 1008 if (err == 0) 1009 1009 perf_session__set_id_hdr_size(session); 1010 1010 return err; 1011 + case PERF_RECORD_HEADER_EVENT_TYPE: 1012 + /* 1013 + * Depreceated, but we need to handle it for sake 1014 + * of old data files create in pipe mode. 1015 + */ 1016 + return 0; 1011 1017 case PERF_RECORD_HEADER_TRACING_DATA: 1012 1018 /* setup for reading amidst mmap */ 1013 1019 lseek(fd, file_offset, SEEK_SET);
+2
tools/perf/util/symbol.c
··· 1336 1336 1337 1337 if (syms_ss && runtime_ss) 1338 1338 break; 1339 + } else { 1340 + symsrc__destroy(ss); 1339 1341 } 1340 1342 1341 1343 }