Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge commit 'v3.5-rc3' into x86/debug

Merge it in to pick up a fix that we are going to clean up in this
branch.

Signed-off-by: Ingo Molnar <mingo@kernel.org>

+6339 -2841
+93
Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt
··· 1 + Pinctrl-based I2C Bus Mux 2 + 3 + This binding describes an I2C bus multiplexer that uses pin multiplexing to 4 + route the I2C signals, and represents the pin multiplexing configuration 5 + using the pinctrl device tree bindings. 6 + 7 + +-----+ +-----+ 8 + | dev | | dev | 9 + +------------------------+ +-----+ +-----+ 10 + | SoC | | | 11 + | /----|------+--------+ 12 + | +---+ +------+ | child bus A, on first set of pins 13 + | |I2C|---|Pinmux| | 14 + | +---+ +------+ | child bus B, on second set of pins 15 + | \----|------+--------+--------+ 16 + | | | | | 17 + +------------------------+ +-----+ +-----+ +-----+ 18 + | dev | | dev | | dev | 19 + +-----+ +-----+ +-----+ 20 + 21 + Required properties: 22 + - compatible: i2c-mux-pinctrl 23 + - i2c-parent: The phandle of the I2C bus that this multiplexer's master-side 24 + port is connected to. 25 + 26 + Also required are: 27 + 28 + * Standard pinctrl properties that specify the pin mux state for each child 29 + bus. See ../pinctrl/pinctrl-bindings.txt. 30 + 31 + * Standard I2C mux properties. See mux.txt in this directory. 32 + 33 + * I2C child bus nodes. See mux.txt in this directory. 34 + 35 + For each named state defined in the pinctrl-names property, an I2C child bus 36 + will be created. I2C child bus numbers are assigned based on the index into 37 + the pinctrl-names property. 38 + 39 + The only exception is that no bus will be created for a state named "idle". If 40 + such a state is defined, it must be the last entry in pinctrl-names. For 41 + example: 42 + 43 + pinctrl-names = "ddc", "pta", "idle" -> ddc = bus 0, pta = bus 1 44 + pinctrl-names = "ddc", "idle", "pta" -> Invalid ("idle" not last) 45 + pinctrl-names = "idle", "ddc", "pta" -> Invalid ("idle" not last) 46 + 47 + Whenever an access is made to a device on a child bus, the relevant pinctrl 48 + state will be programmed into hardware. 49 + 50 + If an idle state is defined, whenever an access is not being made to a device 51 + on a child bus, the idle pinctrl state will be programmed into hardware. 52 + 53 + If an idle state is not defined, the most recently used pinctrl state will be 54 + left programmed into hardware whenever no access is being made of a device on 55 + a child bus. 56 + 57 + Example: 58 + 59 + i2cmux { 60 + compatible = "i2c-mux-pinctrl"; 61 + #address-cells = <1>; 62 + #size-cells = <0>; 63 + 64 + i2c-parent = <&i2c1>; 65 + 66 + pinctrl-names = "ddc", "pta", "idle"; 67 + pinctrl-0 = <&state_i2cmux_ddc>; 68 + pinctrl-1 = <&state_i2cmux_pta>; 69 + pinctrl-2 = <&state_i2cmux_idle>; 70 + 71 + i2c@0 { 72 + reg = <0>; 73 + #address-cells = <1>; 74 + #size-cells = <0>; 75 + 76 + eeprom { 77 + compatible = "eeprom"; 78 + reg = <0x50>; 79 + }; 80 + }; 81 + 82 + i2c@1 { 83 + reg = <1>; 84 + #address-cells = <1>; 85 + #size-cells = <0>; 86 + 87 + eeprom { 88 + compatible = "eeprom"; 89 + reg = <0x50>; 90 + }; 91 + }; 92 + }; 93 +
+25 -19
Documentation/networking/stmmac.txt
··· 10 10 (i.e. 7xxx/5xxx SoCs), SPEAr (arm), Loongson1B (mips) and XLINX XC2V3000 11 11 FF1152AMT0221 D1215994A VIRTEX FPGA board. 12 12 13 - DWC Ether MAC 10/100/1000 Universal version 3.60a (and older) and DWC Ether MAC 10/100 14 - Universal version 4.0 have been used for developing this driver. 13 + DWC Ether MAC 10/100/1000 Universal version 3.60a (and older) and DWC Ether 14 + MAC 10/100 Universal version 4.0 have been used for developing this driver. 15 15 16 16 This driver supports both the platform bus and PCI. 17 17 ··· 54 54 When one or more packets are received, an interrupt happens. The interrupts 55 55 are not queued so the driver has to scan all the descriptors in the ring during 56 56 the receive process. 57 - This is based on NAPI so the interrupt handler signals only if there is work to be 58 - done, and it exits. 57 + This is based on NAPI so the interrupt handler signals only if there is work 58 + to be done, and it exits. 59 59 Then the poll method will be scheduled at some future point. 60 60 The incoming packets are stored, by the DMA, in a list of pre-allocated socket 61 61 buffers in order to avoid the memcpy (Zero-copy). 62 62 63 63 4.3) Timer-Driver Interrupt 64 - Instead of having the device that asynchronously notifies the frame receptions, the 65 - driver configures a timer to generate an interrupt at regular intervals. 66 - Based on the granularity of the timer, the frames that are received by the device 67 - will experience different levels of latency. Some NICs have dedicated timer 68 - device to perform this task. STMMAC can use either the RTC device or the TMU 69 - channel 2 on STLinux platforms. 64 + Instead of having the device that asynchronously notifies the frame receptions, 65 + the driver configures a timer to generate an interrupt at regular intervals. 66 + Based on the granularity of the timer, the frames that are received by the 67 + device will experience different levels of latency. Some NICs have dedicated 68 + timer device to perform this task. STMMAC can use either the RTC device or the 69 + TMU channel 2 on STLinux platforms. 70 70 The timers frequency can be passed to the driver as parameter; when change it, 71 71 take care of both hardware capability and network stability/performance impact. 72 - Several performance tests on STM platforms showed this optimisation allows to spare 73 - the CPU while having the maximum throughput. 72 + Several performance tests on STM platforms showed this optimisation allows to 73 + spare the CPU while having the maximum throughput. 74 74 75 75 4.4) WOL 76 - Wake up on Lan feature through Magic and Unicast frames are supported for the GMAC 77 - core. 76 + Wake up on Lan feature through Magic and Unicast frames are supported for the 77 + GMAC core. 78 78 79 79 4.5) DMA descriptors 80 80 Driver handles both normal and enhanced descriptors. The latter has been only ··· 106 106 These are included in the include/linux/stmmac.h header file 107 107 and detailed below as well: 108 108 109 - struct plat_stmmacenet_data { 109 + struct plat_stmmacenet_data { 110 + char *phy_bus_name; 110 111 int bus_id; 111 112 int phy_addr; 112 113 int interface; ··· 125 124 void (*bus_setup)(void __iomem *ioaddr); 126 125 int (*init)(struct platform_device *pdev); 127 126 void (*exit)(struct platform_device *pdev); 127 + void *custom_cfg; 128 + void *custom_data; 128 129 void *bsp_priv; 129 130 }; 130 131 131 132 Where: 133 + o phy_bus_name: phy bus name to attach to the stmmac. 132 134 o bus_id: bus identifier. 133 135 o phy_addr: the physical address can be passed from the platform. 134 136 If it is set to -1 the driver will automatically 135 137 detect it at run-time by probing all the 32 addresses. 136 138 o interface: PHY device's interface. 137 139 o mdio_bus_data: specific platform fields for the MDIO bus. 138 - o pbl: the Programmable Burst Length is maximum number of beats to 140 + o dma_cfg: internal DMA parameters 141 + o pbl: the Programmable Burst Length is maximum number of beats to 139 142 be transferred in one DMA transaction. 140 143 GMAC also enables the 4xPBL by default. 144 + o fixed_burst/mixed_burst/burst_len 141 145 o clk_csr: fixed CSR Clock range selection. 142 146 o has_gmac: uses the GMAC core. 143 147 o enh_desc: if sets the MAC will use the enhanced descriptor structure. ··· 166 160 this is sometime necessary on some platforms (e.g. ST boxes) 167 161 where the HW needs to have set some PIO lines or system cfg 168 162 registers. 169 - o custom_cfg: this is a custom configuration that can be passed while 170 - initialising the resources. 163 + o custom_cfg/custom_data: this is a custom configuration that can be passed 164 + while initialising the resources. 165 + o bsp_priv: another private poiter. 171 166 172 167 For MDIO bus The we have: 173 168 ··· 186 179 o phy_mask: phy mask passed when register the MDIO bus within the driver. 187 180 o irqs: list of IRQs, one per PHY. 188 181 o probed_phy_irq: if irqs is NULL, use this for probed PHY. 189 - 190 182 191 183 For DMA engine we have the following internal fields that should be 192 184 tuned according to the HW capabilities.
+38 -21
MAINTAINERS
··· 1077 1077 ARM/SAMSUNG S5P SERIES Multi Format Codec (MFC) SUPPORT 1078 1078 M: Kyungmin Park <kyungmin.park@samsung.com> 1079 1079 M: Kamil Debski <k.debski@samsung.com> 1080 - M: Jeongtae Park <jtp.park@samsung.com> 1080 + M: Jeongtae Park <jtp.park@samsung.com> 1081 1081 L: linux-arm-kernel@lists.infradead.org 1082 1082 L: linux-media@vger.kernel.org 1083 1083 S: Maintained ··· 1646 1646 F: drivers/gpio/gpio-bt8xx.c 1647 1647 1648 1648 BTRFS FILE SYSTEM 1649 - M: Chris Mason <chris.mason@oracle.com> 1649 + M: Chris Mason <chris.mason@fusionio.com> 1650 1650 L: linux-btrfs@vger.kernel.org 1651 1651 W: http://btrfs.wiki.kernel.org/ 1652 1652 Q: http://patchwork.kernel.org/project/linux-btrfs/list/ 1653 - T: git git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable.git 1653 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs.git 1654 1654 S: Maintained 1655 1655 F: Documentation/filesystems/btrfs.txt 1656 1656 F: fs/btrfs/ ··· 1743 1743 CAPABILITIES 1744 1744 M: Serge Hallyn <serge.hallyn@canonical.com> 1745 1745 L: linux-security-module@vger.kernel.org 1746 - S: Supported 1746 + S: Supported 1747 1747 F: include/linux/capability.h 1748 1748 F: security/capability.c 1749 - F: security/commoncap.c 1749 + F: security/commoncap.c 1750 1750 F: kernel/capability.c 1751 1751 1752 1752 CELL BROADBAND ENGINE ARCHITECTURE ··· 1800 1800 CFG80211 and NL80211 1801 1801 M: Johannes Berg <johannes@sipsolutions.net> 1802 1802 L: linux-wireless@vger.kernel.org 1803 + W: http://wireless.kernel.org/ 1804 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git 1805 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git 1803 1806 S: Maintained 1804 1807 F: include/linux/nl80211.h 1805 1808 F: include/net/cfg80211.h ··· 2149 2146 F: drivers/net/wan/pc300* 2150 2147 2151 2148 CYTTSP TOUCHSCREEN DRIVER 2152 - M: Javier Martinez Canillas <javier@dowhile0.org> 2153 - L: linux-input@vger.kernel.org 2154 - S: Maintained 2155 - F: drivers/input/touchscreen/cyttsp* 2156 - F: include/linux/input/cyttsp.h 2149 + M: Javier Martinez Canillas <javier@dowhile0.org> 2150 + L: linux-input@vger.kernel.org 2151 + S: Maintained 2152 + F: drivers/input/touchscreen/cyttsp* 2153 + F: include/linux/input/cyttsp.h 2157 2154 2158 2155 DAMA SLAVE for AX.25 2159 2156 M: Joerg Reuter <jreuter@yaina.de> ··· 2273 2270 F: include/linux/dm-*.h 2274 2271 2275 2272 DIOLAN U2C-12 I2C DRIVER 2276 - M: Guenter Roeck <guenter.roeck@ericsson.com> 2273 + M: Guenter Roeck <linux@roeck-us.net> 2277 2274 L: linux-i2c@vger.kernel.org 2278 2275 S: Maintained 2279 2276 F: drivers/i2c/busses/i2c-diolan-u2c.c ··· 3148 3145 3149 3146 HARDWARE MONITORING 3150 3147 M: Jean Delvare <khali@linux-fr.org> 3151 - M: Guenter Roeck <guenter.roeck@ericsson.com> 3148 + M: Guenter Roeck <linux@roeck-us.net> 3152 3149 L: lm-sensors@lm-sensors.org 3153 3150 W: http://www.lm-sensors.org/ 3154 3151 T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/ ··· 4106 4103 LED SUBSYSTEM 4107 4104 M: Bryan Wu <bryan.wu@canonical.com> 4108 4105 M: Richard Purdie <rpurdie@rpsys.net> 4106 + L: linux-leds@vger.kernel.org 4107 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/cooloney/linux-leds.git 4109 4108 S: Maintained 4110 4109 F: drivers/leds/ 4111 4110 F: include/linux/leds.h ··· 4352 4347 M: Johannes Berg <johannes@sipsolutions.net> 4353 4348 L: linux-wireless@vger.kernel.org 4354 4349 W: http://linuxwireless.org/ 4355 - T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless.git 4350 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git 4351 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git 4356 4352 S: Maintained 4357 4353 F: Documentation/networking/mac80211-injection.txt 4358 4354 F: include/net/mac80211.h ··· 4364 4358 M: Mattias Nissler <mattias.nissler@gmx.de> 4365 4359 L: linux-wireless@vger.kernel.org 4366 4360 W: http://linuxwireless.org/en/developers/Documentation/mac80211/RateControl/PID 4367 - T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless.git 4361 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git 4362 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git 4368 4363 S: Maintained 4369 4364 F: net/mac80211/rc80211_pid* 4370 4365 ··· 4424 4417 S: Orphan 4425 4418 F: drivers/video/matrox/matroxfb_* 4426 4419 F: include/linux/matroxfb.h 4420 + 4421 + MAX16065 HARDWARE MONITOR DRIVER 4422 + M: Guenter Roeck <linux@roeck-us.net> 4423 + L: lm-sensors@lm-sensors.org 4424 + S: Maintained 4425 + F: Documentation/hwmon/max16065 4426 + F: drivers/hwmon/max16065.c 4427 4427 4428 4428 MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER 4429 4429 M: "Hans J. Koch" <hjk@hansjkoch.de> ··· 5170 5156 F: include/linux/leds-pca9532.h 5171 5157 5172 5158 PCA9541 I2C BUS MASTER SELECTOR DRIVER 5173 - M: Guenter Roeck <guenter.roeck@ericsson.com> 5159 + M: Guenter Roeck <linux@roeck-us.net> 5174 5160 L: linux-i2c@vger.kernel.org 5175 5161 S: Maintained 5176 5162 F: drivers/i2c/muxes/i2c-mux-pca9541.c ··· 5190 5176 F: drivers/firmware/pcdp.* 5191 5177 5192 5178 PCI ERROR RECOVERY 5193 - M: Linas Vepstas <linasvepstas@gmail.com> 5179 + M: Linas Vepstas <linasvepstas@gmail.com> 5194 5180 L: linux-pci@vger.kernel.org 5195 5181 S: Supported 5196 5182 F: Documentation/PCI/pci-error-recovery.txt ··· 5320 5306 F: drivers/rtc/rtc-puv3.c 5321 5307 5322 5308 PMBUS HARDWARE MONITORING DRIVERS 5323 - M: Guenter Roeck <guenter.roeck@ericsson.com> 5309 + M: Guenter Roeck <linux@roeck-us.net> 5324 5310 L: lm-sensors@lm-sensors.org 5325 5311 W: http://www.lm-sensors.org/ 5326 5312 W: http://www.roeck-us.net/linux/drivers/ ··· 5716 5702 RFKILL 5717 5703 M: Johannes Berg <johannes@sipsolutions.net> 5718 5704 L: linux-wireless@vger.kernel.org 5705 + W: http://wireless.kernel.org/ 5706 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git 5707 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git 5719 5708 S: Maintained 5720 5709 F: Documentation/rfkill.txt 5721 5710 F: net/rfkill/ ··· 7315 7298 F: drivers/uio/ 7316 7299 F: include/linux/uio*.h 7317 7300 7318 - UTIL-LINUX-NG PACKAGE 7301 + UTIL-LINUX PACKAGE 7319 7302 M: Karel Zak <kzak@redhat.com> 7320 - L: util-linux-ng@vger.kernel.org 7321 - W: http://kernel.org/~kzak/util-linux-ng/ 7322 - T: git git://git.kernel.org/pub/scm/utils/util-linux-ng/util-linux-ng.git 7303 + L: util-linux@vger.kernel.org 7304 + W: http://en.wikipedia.org/wiki/Util-linux 7305 + T: git git://git.kernel.org/pub/scm/utils/util-linux/util-linux.git 7323 7306 S: Maintained 7324 7307 7325 7308 UVESAFB DRIVER
+1 -1
Makefile
··· 1 1 VERSION = 3 2 2 PATCHLEVEL = 5 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc1 4 + EXTRAVERSION = -rc3 5 5 NAME = Saber-toothed Squirrel 6 6 7 7 # *DOCUMENTATION*
-1
arch/arm/Kconfig
··· 7 7 select HAVE_IDE if PCI || ISA || PCMCIA 8 8 select HAVE_DMA_ATTRS 9 9 select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7) 10 - select CMA if (CPU_V6 || CPU_V6K || CPU_V7) 11 10 select HAVE_MEMBLOCK 12 11 select RTC_LIB 13 12 select SYS_SUPPORTS_APM_EMULATION
+8 -8
arch/arm/common/dmabounce.c
··· 366 366 struct safe_buffer *buf; 367 367 unsigned long off; 368 368 369 - dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", 370 - __func__, addr, off, sz, dir); 369 + dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n", 370 + __func__, addr, sz, dir); 371 371 372 372 buf = find_safe_buffer_dev(dev, addr, __func__); 373 373 if (!buf) ··· 377 377 378 378 BUG_ON(buf->direction != dir); 379 379 380 - dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", 381 - __func__, buf->ptr, virt_to_dma(dev, buf->ptr), 380 + dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n", 381 + __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off, 382 382 buf->safe, buf->safe_dma_addr); 383 383 384 384 DO_STATS(dev->archdata.dmabounce->bounce_count++); ··· 406 406 struct safe_buffer *buf; 407 407 unsigned long off; 408 408 409 - dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", 410 - __func__, addr, off, sz, dir); 409 + dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n", 410 + __func__, addr, sz, dir); 411 411 412 412 buf = find_safe_buffer_dev(dev, addr, __func__); 413 413 if (!buf) ··· 417 417 418 418 BUG_ON(buf->direction != dir); 419 419 420 - dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", 421 - __func__, buf->ptr, virt_to_dma(dev, buf->ptr), 420 + dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n", 421 + __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off, 422 422 buf->safe, buf->safe_dma_addr); 423 423 424 424 DO_STATS(dev->archdata.dmabounce->bounce_count++);
+2 -2
arch/arm/mach-omap2/display.c
··· 271 271 goto err; 272 272 } 273 273 274 - r = omap_device_register(pdev); 274 + r = platform_device_add(pdev); 275 275 if (r) { 276 - pr_err("Could not register omap_device for %s\n", pdev_name); 276 + pr_err("Could not register platform_device for %s\n", pdev_name); 277 277 goto err; 278 278 } 279 279
+6 -8
arch/arm/mm/dma-mapping.c
··· 228 228 229 229 #define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M 230 230 231 - unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE; 231 + static unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE; 232 232 233 233 void __init init_consistent_dma_size(unsigned long size) 234 234 { ··· 268 268 unsigned long base = consistent_base; 269 269 unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT; 270 270 271 - #ifndef CONFIG_ARM_DMA_USE_IOMMU 272 - if (cpu_architecture() >= CPU_ARCH_ARMv6) 271 + if (IS_ENABLED(CONFIG_CMA) && !IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) 273 272 return 0; 274 - #endif 275 273 276 274 consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL); 277 275 if (!consistent_pte) { ··· 321 323 .vm_list = LIST_HEAD_INIT(coherent_head.vm_list), 322 324 }; 323 325 324 - size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8; 326 + static size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8; 325 327 326 328 static int __init early_coherent_pool(char *p) 327 329 { ··· 340 342 struct page *page; 341 343 void *ptr; 342 344 343 - if (cpu_architecture() < CPU_ARCH_ARMv6) 345 + if (!IS_ENABLED(CONFIG_CMA)) 344 346 return 0; 345 347 346 348 ptr = __alloc_from_contiguous(NULL, size, prot, &page); ··· 702 704 703 705 if (arch_is_coherent() || nommu()) 704 706 addr = __alloc_simple_buffer(dev, size, gfp, &page); 705 - else if (cpu_architecture() < CPU_ARCH_ARMv6) 707 + else if (!IS_ENABLED(CONFIG_CMA)) 706 708 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); 707 709 else if (gfp & GFP_ATOMIC) 708 710 addr = __alloc_from_pool(dev, size, &page, caller); ··· 771 773 772 774 if (arch_is_coherent() || nommu()) { 773 775 __dma_free_buffer(page, size); 774 - } else if (cpu_architecture() < CPU_ARCH_ARMv6) { 776 + } else if (!IS_ENABLED(CONFIG_CMA)) { 775 777 __dma_free_remap(cpu_addr, size); 776 778 __dma_free_buffer(page, size); 777 779 } else {
+1 -1
arch/arm/mm/init.c
··· 212 212 * allocations. This must be the smallest DMA mask in the system, 213 213 * so a successful GFP_DMA allocation will always satisfy this. 214 214 */ 215 - u32 arm_dma_limit; 215 + phys_addr_t arm_dma_limit; 216 216 217 217 static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, 218 218 unsigned long dma_size)
+1 -1
arch/arm/mm/mm.h
··· 62 62 #endif 63 63 64 64 #ifdef CONFIG_ZONE_DMA 65 - extern u32 arm_dma_limit; 65 + extern phys_addr_t arm_dma_limit; 66 66 #else 67 67 #define arm_dma_limit ((u32)~0) 68 68 #endif
+2
arch/m68k/Kconfig
··· 7 7 select GENERIC_IRQ_SHOW 8 8 select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS 9 9 select GENERIC_CPU_DEVICES 10 + select GENERIC_STRNCPY_FROM_USER if MMU 11 + select GENERIC_STRNLEN_USER if MMU 10 12 select FPU if MMU 11 13 select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE 12 14
+2
arch/m68k/include/asm/Kbuild
··· 1 1 include include/asm-generic/Kbuild.asm 2 2 header-y += cachectl.h 3 + 4 + generic-y += word-at-a-time.h
+1 -1
arch/m68k/include/asm/m528xsim.h
··· 86 86 /* 87 87 * QSPI module. 88 88 */ 89 - #define MCFQSPI_IOBASE (MCF_IPSBAR + 0x340) 89 + #define MCFQSPI_BASE (MCF_IPSBAR + 0x340) 90 90 #define MCFQSPI_SIZE 0x40 91 91 92 92 #define MCFQSPI_CS0 147
+7 -4
arch/m68k/include/asm/uaccess_mm.h
··· 379 379 #define copy_from_user(to, from, n) __copy_from_user(to, from, n) 380 380 #define copy_to_user(to, from, n) __copy_to_user(to, from, n) 381 381 382 - long strncpy_from_user(char *dst, const char __user *src, long count); 383 - long strnlen_user(const char __user *src, long n); 382 + #define user_addr_max() \ 383 + (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) 384 + 385 + extern long strncpy_from_user(char *dst, const char __user *src, long count); 386 + extern __must_check long strlen_user(const char __user *str); 387 + extern __must_check long strnlen_user(const char __user *str, long n); 388 + 384 389 unsigned long __clear_user(void __user *to, unsigned long n); 385 390 386 391 #define clear_user __clear_user 387 - 388 - #define strlen_user(str) strnlen_user(str, 32767) 389 392 390 393 #endif /* _M68K_UACCESS_H */
+1 -1
arch/m68k/kernel/ptrace.c
··· 286 286 } 287 287 } 288 288 289 - #ifdef CONFIG_COLDFIRE 289 + #if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU) 290 290 asmlinkage int syscall_trace_enter(void) 291 291 { 292 292 int ret = 0;
+2 -2
arch/m68k/kernel/time.c
··· 85 85 mach_sched_init(timer_interrupt); 86 86 } 87 87 88 - #ifdef CONFIG_M68KCLASSIC 88 + #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET 89 89 90 90 u32 arch_gettimeoffset(void) 91 91 { ··· 108 108 109 109 module_init(rtc_init); 110 110 111 - #endif /* CONFIG_M68KCLASSIC */ 111 + #endif /* CONFIG_ARCH_USES_GETTIMEOFFSET */
-74
arch/m68k/lib/uaccess.c
··· 104 104 EXPORT_SYMBOL(__generic_copy_to_user); 105 105 106 106 /* 107 - * Copy a null terminated string from userspace. 108 - */ 109 - long strncpy_from_user(char *dst, const char __user *src, long count) 110 - { 111 - long res; 112 - char c; 113 - 114 - if (count <= 0) 115 - return count; 116 - 117 - asm volatile ("\n" 118 - "1: "MOVES".b (%2)+,%4\n" 119 - " move.b %4,(%1)+\n" 120 - " jeq 2f\n" 121 - " subq.l #1,%3\n" 122 - " jne 1b\n" 123 - "2: sub.l %3,%0\n" 124 - "3:\n" 125 - " .section .fixup,\"ax\"\n" 126 - " .even\n" 127 - "10: move.l %5,%0\n" 128 - " jra 3b\n" 129 - " .previous\n" 130 - "\n" 131 - " .section __ex_table,\"a\"\n" 132 - " .align 4\n" 133 - " .long 1b,10b\n" 134 - " .previous" 135 - : "=d" (res), "+a" (dst), "+a" (src), "+r" (count), "=&d" (c) 136 - : "i" (-EFAULT), "0" (count)); 137 - 138 - return res; 139 - } 140 - EXPORT_SYMBOL(strncpy_from_user); 141 - 142 - /* 143 - * Return the size of a string (including the ending 0) 144 - * 145 - * Return 0 on exception, a value greater than N if too long 146 - */ 147 - long strnlen_user(const char __user *src, long n) 148 - { 149 - char c; 150 - long res; 151 - 152 - asm volatile ("\n" 153 - "1: subq.l #1,%1\n" 154 - " jmi 3f\n" 155 - "2: "MOVES".b (%0)+,%2\n" 156 - " tst.b %2\n" 157 - " jne 1b\n" 158 - " jra 4f\n" 159 - "\n" 160 - "3: addq.l #1,%0\n" 161 - "4: sub.l %4,%0\n" 162 - "5:\n" 163 - " .section .fixup,\"ax\"\n" 164 - " .even\n" 165 - "20: sub.l %0,%0\n" 166 - " jra 5b\n" 167 - " .previous\n" 168 - "\n" 169 - " .section __ex_table,\"a\"\n" 170 - " .align 4\n" 171 - " .long 2b,20b\n" 172 - " .previous\n" 173 - : "=&a" (res), "+d" (n), "=&d" (c) 174 - : "0" (src), "r" (src)); 175 - 176 - return res; 177 - } 178 - EXPORT_SYMBOL(strnlen_user); 179 - 180 - /* 181 107 * Zero Userspace 182 108 */ 183 109
+4 -2
arch/m68k/platform/68328/timers.c
··· 53 53 #endif 54 54 55 55 static u32 m68328_tick_cnt; 56 + static irq_handler_t timer_interrupt; 56 57 57 58 /***************************************************************************/ 58 59 ··· 63 62 TSTAT &= 0; 64 63 65 64 m68328_tick_cnt += TICKS_PER_JIFFY; 66 - return arch_timer_interrupt(irq, dummy); 65 + return timer_interrupt(irq, dummy); 67 66 } 68 67 69 68 /***************************************************************************/ ··· 100 99 101 100 /***************************************************************************/ 102 101 103 - void hw_timer_init(void) 102 + void hw_timer_init(irq_handler_t handler) 104 103 { 105 104 /* disable timer 1 */ 106 105 TCTL = 0; ··· 116 115 /* Enable timer 1 */ 117 116 TCTL |= TCTL_TEN; 118 117 clocksource_register_hz(&m68328_clk, TICKS_PER_JIFFY*HZ); 118 + timer_interrupt = handler; 119 119 } 120 120 121 121 /***************************************************************************/
+5 -2
arch/m68k/platform/68360/config.c
··· 35 35 #define OSCILLATOR (unsigned long int)33000000 36 36 #endif 37 37 38 + static irq_handler_t timer_interrupt; 38 39 unsigned long int system_clock; 39 40 40 41 extern QUICC *pquicc; ··· 53 52 54 53 pquicc->timer_ter1 = 0x0002; /* clear timer event */ 55 54 56 - return arch_timer_interrupt(irq, dummy); 55 + return timer_interrupt(irq, dummy); 57 56 } 58 57 59 58 static struct irqaction m68360_timer_irq = { ··· 62 61 .handler = hw_tick, 63 62 }; 64 63 65 - void hw_timer_init(void) 64 + void hw_timer_init(irq_handler_t handler) 66 65 { 67 66 unsigned char prescaler; 68 67 unsigned short tgcr_save; ··· 94 93 pquicc->timer_trr1 = (system_clock/ prescaler) / HZ; /* reference count */ 95 94 96 95 pquicc->timer_ter1 = 0x0003; /* clear timer events */ 96 + 97 + timer_interrupt = handler; 97 98 98 99 /* enable timer 1 interrupt in CIMR */ 99 100 setup_irq(CPMVEC_TIMER1, &m68360_timer_irq);
+2 -1
arch/parisc/Makefile
··· 21 21 22 22 NM = sh $(srctree)/arch/parisc/nm 23 23 CHECKFLAGS += -D__hppa__=1 24 + LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) 24 25 25 26 MACHINE := $(shell uname -m) 26 27 ifeq ($(MACHINE),parisc*) ··· 80 79 kernel-$(CONFIG_HPUX) += hpux/ 81 80 82 81 core-y += $(addprefix arch/parisc/, $(kernel-y)) 83 - libs-y += arch/parisc/lib/ `$(CC) -print-libgcc-file-name` 82 + libs-y += arch/parisc/lib/ $(LIBGCC) 84 83 85 84 drivers-$(CONFIG_OPROFILE) += arch/parisc/oprofile/ 86 85
+1
arch/parisc/include/asm/Kbuild
··· 1 1 include include/asm-generic/Kbuild.asm 2 2 3 3 header-y += pdc.h 4 + generic-y += word-at-a-time.h
+2
arch/parisc/include/asm/bug.h
··· 1 1 #ifndef _PARISC_BUG_H 2 2 #define _PARISC_BUG_H 3 3 4 + #include <linux/kernel.h> /* for BUGFLAG_TAINT */ 5 + 4 6 /* 5 7 * Tell the user there is some problem. 6 8 * The offending file and line are encoded in the __bug_table section.
+3
arch/powerpc/include/asm/hw_irq.h
··· 100 100 get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; 101 101 } 102 102 103 + /* include/linux/interrupt.h needs hard_irq_disable to be a macro */ 104 + #define hard_irq_disable hard_irq_disable 105 + 103 106 /* 104 107 * This is called by asynchronous interrupts to conditionally 105 108 * re-enable hard interrupts when soft-disabled after having
+5 -6
arch/powerpc/kernel/module_32.c
··· 176 176 177 177 static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val) 178 178 { 179 - if (entry->jump[0] == 0x3d600000 + ((val + 0x8000) >> 16) 180 - && entry->jump[1] == 0x396b0000 + (val & 0xffff)) 179 + if (entry->jump[0] == 0x3d800000 + ((val + 0x8000) >> 16) 180 + && entry->jump[1] == 0x398c0000 + (val & 0xffff)) 181 181 return 1; 182 182 return 0; 183 183 } ··· 204 204 entry++; 205 205 } 206 206 207 - /* Stolen from Paul Mackerras as well... */ 208 - entry->jump[0] = 0x3d600000+((val+0x8000)>>16); /* lis r11,sym@ha */ 209 - entry->jump[1] = 0x396b0000 + (val&0xffff); /* addi r11,r11,sym@l*/ 210 - entry->jump[2] = 0x7d6903a6; /* mtctr r11 */ 207 + entry->jump[0] = 0x3d800000+((val+0x8000)>>16); /* lis r12,sym@ha */ 208 + entry->jump[1] = 0x398c0000 + (val&0xffff); /* addi r12,r12,sym@l*/ 209 + entry->jump[2] = 0x7d8903a6; /* mtctr r12 */ 211 210 entry->jump[3] = 0x4e800420; /* bctr */ 212 211 213 212 DEBUGP("Initialized plt for 0x%x at %p\n", val, entry);
+11 -3
arch/powerpc/kernel/time.c
··· 475 475 struct pt_regs *old_regs; 476 476 u64 *next_tb = &__get_cpu_var(decrementers_next_tb); 477 477 struct clock_event_device *evt = &__get_cpu_var(decrementers); 478 + u64 now; 478 479 479 480 /* Ensure a positive value is written to the decrementer, or else 480 481 * some CPUs will continue to take decrementer exceptions. ··· 510 509 irq_work_run(); 511 510 } 512 511 513 - *next_tb = ~(u64)0; 514 - if (evt->event_handler) 515 - evt->event_handler(evt); 512 + now = get_tb_or_rtc(); 513 + if (now >= *next_tb) { 514 + *next_tb = ~(u64)0; 515 + if (evt->event_handler) 516 + evt->event_handler(evt); 517 + } else { 518 + now = *next_tb - now; 519 + if (now <= DECREMENTER_MAX) 520 + set_dec((int)now); 521 + } 516 522 517 523 #ifdef CONFIG_PPC64 518 524 /* collect purr register values often, for accurate calculations */
+2
arch/sh/Kconfig
··· 32 32 select GENERIC_SMP_IDLE_THREAD 33 33 select GENERIC_CLOCKEVENTS 34 34 select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST 35 + select GENERIC_STRNCPY_FROM_USER 36 + select GENERIC_STRNLEN_USER 35 37 help 36 38 The SuperH is a RISC processor targeted for use in embedded systems 37 39 and consumer electronics; it was also used in the Sega Dreamcast
+8 -8
arch/sh/Makefile
··· 9 9 # License. See the file "COPYING" in the main directory of this archive 10 10 # for more details. 11 11 # 12 + ifneq ($(SUBARCH),$(ARCH)) 13 + ifeq ($(CROSS_COMPILE),) 14 + CROSS_COMPILE := $(call cc-cross-prefix, $(UTS_MACHINE)-linux- $(UTS_MACHINE)-linux-gnu- $(UTS_MACHINE)-unknown-linux-gnu-) 15 + endif 16 + endif 17 + 12 18 isa-y := any 13 19 isa-$(CONFIG_SH_DSP) := sh 14 20 isa-$(CONFIG_CPU_SH2) := sh2 ··· 112 106 KBUILD_DEFCONFIG := cayman_defconfig 113 107 endif 114 108 115 - ifneq ($(SUBARCH),$(ARCH)) 116 - ifeq ($(CROSS_COMPILE),) 117 - CROSS_COMPILE := $(call cc-cross-prefix, $(UTS_MACHINE)-linux- $(UTS_MACHINE)-linux-gnu- $(UTS_MACHINE)-unknown-linux-gnu-) 118 - endif 119 - endif 120 - 121 109 ifdef CONFIG_CPU_LITTLE_ENDIAN 122 110 ld-bfd := elf32-$(UTS_MACHINE)-linux 123 - LDFLAGS_vmlinux += --defsym 'jiffies=jiffies_64' --oformat $(ld-bfd) 111 + LDFLAGS_vmlinux += --defsym jiffies=jiffies_64 --oformat $(ld-bfd) 124 112 LDFLAGS += -EL 125 113 else 126 114 ld-bfd := elf32-$(UTS_MACHINE)big-linux 127 - LDFLAGS_vmlinux += --defsym 'jiffies=jiffies_64+4' --oformat $(ld-bfd) 115 + LDFLAGS_vmlinux += --defsym jiffies=jiffies_64+4 --oformat $(ld-bfd) 128 116 LDFLAGS += -EB 129 117 endif 130 118
+34
arch/sh/include/asm/Kbuild
··· 1 1 include include/asm-generic/Kbuild.asm 2 2 3 + generic-y += bitsperlong.h 4 + generic-y += cputime.h 5 + generic-y += current.h 6 + generic-y += delay.h 7 + generic-y += div64.h 8 + generic-y += emergency-restart.h 9 + generic-y += errno.h 10 + generic-y += fcntl.h 11 + generic-y += ioctl.h 12 + generic-y += ipcbuf.h 13 + generic-y += irq_regs.h 14 + generic-y += kvm_para.h 15 + generic-y += local.h 16 + generic-y += local64.h 17 + generic-y += param.h 18 + generic-y += parport.h 19 + generic-y += percpu.h 20 + generic-y += poll.h 21 + generic-y += mman.h 22 + generic-y += msgbuf.h 23 + generic-y += resource.h 24 + generic-y += scatterlist.h 25 + generic-y += sembuf.h 26 + generic-y += serial.h 27 + generic-y += shmbuf.h 28 + generic-y += siginfo.h 29 + generic-y += sizes.h 30 + generic-y += socket.h 31 + generic-y += statfs.h 32 + generic-y += termbits.h 33 + generic-y += termios.h 34 + generic-y += ucontext.h 35 + generic-y += xor.h 36 + 3 37 header-y += cachectl.h 4 38 header-y += cpu-features.h 5 39 header-y += hw_breakpoint.h
-1
arch/sh/include/asm/bitsperlong.h
··· 1 - #include <asm-generic/bitsperlong.h>
-6
arch/sh/include/asm/cputime.h
··· 1 - #ifndef __SH_CPUTIME_H 2 - #define __SH_CPUTIME_H 3 - 4 - #include <asm-generic/cputime.h> 5 - 6 - #endif /* __SH_CPUTIME_H */
-1
arch/sh/include/asm/current.h
··· 1 - #include <asm-generic/current.h>
-1
arch/sh/include/asm/delay.h
··· 1 - #include <asm-generic/delay.h>
-1
arch/sh/include/asm/div64.h
··· 1 - #include <asm-generic/div64.h>
-6
arch/sh/include/asm/emergency-restart.h
··· 1 - #ifndef _ASM_EMERGENCY_RESTART_H 2 - #define _ASM_EMERGENCY_RESTART_H 3 - 4 - #include <asm-generic/emergency-restart.h> 5 - 6 - #endif /* _ASM_EMERGENCY_RESTART_H */
-6
arch/sh/include/asm/errno.h
··· 1 - #ifndef __ASM_SH_ERRNO_H 2 - #define __ASM_SH_ERRNO_H 3 - 4 - #include <asm-generic/errno.h> 5 - 6 - #endif /* __ASM_SH_ERRNO_H */
-1
arch/sh/include/asm/fcntl.h
··· 1 - #include <asm-generic/fcntl.h>
-1
arch/sh/include/asm/ioctl.h
··· 1 - #include <asm-generic/ioctl.h>
-1
arch/sh/include/asm/ipcbuf.h
··· 1 - #include <asm-generic/ipcbuf.h>
-1
arch/sh/include/asm/irq_regs.h
··· 1 - #include <asm-generic/irq_regs.h>
-1
arch/sh/include/asm/kvm_para.h
··· 1 - #include <asm-generic/kvm_para.h>
-7
arch/sh/include/asm/local.h
··· 1 - #ifndef __ASM_SH_LOCAL_H 2 - #define __ASM_SH_LOCAL_H 3 - 4 - #include <asm-generic/local.h> 5 - 6 - #endif /* __ASM_SH_LOCAL_H */ 7 -
-1
arch/sh/include/asm/local64.h
··· 1 - #include <asm-generic/local64.h>
-1
arch/sh/include/asm/mman.h
··· 1 - #include <asm-generic/mman.h>
-1
arch/sh/include/asm/msgbuf.h
··· 1 - #include <asm-generic/msgbuf.h>
-1
arch/sh/include/asm/param.h
··· 1 - #include <asm-generic/param.h>
-1
arch/sh/include/asm/parport.h
··· 1 - #include <asm-generic/parport.h>
-6
arch/sh/include/asm/percpu.h
··· 1 - #ifndef __ARCH_SH_PERCPU 2 - #define __ARCH_SH_PERCPU 3 - 4 - #include <asm-generic/percpu.h> 5 - 6 - #endif /* __ARCH_SH_PERCPU */
-1
arch/sh/include/asm/poll.h
··· 1 - #include <asm-generic/poll.h>
-6
arch/sh/include/asm/resource.h
··· 1 - #ifndef __ASM_SH_RESOURCE_H 2 - #define __ASM_SH_RESOURCE_H 3 - 4 - #include <asm-generic/resource.h> 5 - 6 - #endif /* __ASM_SH_RESOURCE_H */
-6
arch/sh/include/asm/scatterlist.h
··· 1 - #ifndef __ASM_SH_SCATTERLIST_H 2 - #define __ASM_SH_SCATTERLIST_H 3 - 4 - #include <asm-generic/scatterlist.h> 5 - 6 - #endif /* __ASM_SH_SCATTERLIST_H */
-1
arch/sh/include/asm/sembuf.h
··· 1 - #include <asm-generic/sembuf.h>
-1
arch/sh/include/asm/serial.h
··· 1 - #include <asm-generic/serial.h>
-1
arch/sh/include/asm/shmbuf.h
··· 1 - #include <asm-generic/shmbuf.h>
-6
arch/sh/include/asm/siginfo.h
··· 1 - #ifndef __ASM_SH_SIGINFO_H 2 - #define __ASM_SH_SIGINFO_H 3 - 4 - #include <asm-generic/siginfo.h> 5 - 6 - #endif /* __ASM_SH_SIGINFO_H */
-1
arch/sh/include/asm/sizes.h
··· 1 - #include <asm-generic/sizes.h>
-1
arch/sh/include/asm/socket.h
··· 1 - #include <asm-generic/socket.h>
-6
arch/sh/include/asm/statfs.h
··· 1 - #ifndef __ASM_SH_STATFS_H 2 - #define __ASM_SH_STATFS_H 3 - 4 - #include <asm-generic/statfs.h> 5 - 6 - #endif /* __ASM_SH_STATFS_H */
-1
arch/sh/include/asm/termbits.h
··· 1 - #include <asm-generic/termbits.h>
-1
arch/sh/include/asm/termios.h
··· 1 - #include <asm-generic/termios.h>
+7 -68
arch/sh/include/asm/uaccess.h
··· 25 25 (__chk_user_ptr(addr), \ 26 26 __access_ok((unsigned long __force)(addr), (size))) 27 27 28 + #define user_addr_max() (current_thread_info()->addr_limit.seg) 29 + 28 30 /* 29 31 * Uh, these should become the main single-value transfer routines ... 30 32 * They automatically use the right size if we just have the right ··· 102 100 # include "uaccess_64.h" 103 101 #endif 104 102 103 + extern long strncpy_from_user(char *dest, const char __user *src, long count); 104 + 105 + extern __must_check long strlen_user(const char __user *str); 106 + extern __must_check long strnlen_user(const char __user *str, long n); 107 + 105 108 /* Generic arbitrary sized copy. */ 106 109 /* Return the number of bytes NOT copied */ 107 110 __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); ··· 144 137 __cl_size; \ 145 138 }) 146 139 147 - /** 148 - * strncpy_from_user: - Copy a NUL terminated string from userspace. 149 - * @dst: Destination address, in kernel space. This buffer must be at 150 - * least @count bytes long. 151 - * @src: Source address, in user space. 152 - * @count: Maximum number of bytes to copy, including the trailing NUL. 153 - * 154 - * Copies a NUL-terminated string from userspace to kernel space. 155 - * 156 - * On success, returns the length of the string (not including the trailing 157 - * NUL). 158 - * 159 - * If access to userspace fails, returns -EFAULT (some data may have been 160 - * copied). 161 - * 162 - * If @count is smaller than the length of the string, copies @count bytes 163 - * and returns @count. 164 - */ 165 - #define strncpy_from_user(dest,src,count) \ 166 - ({ \ 167 - unsigned long __sfu_src = (unsigned long)(src); \ 168 - int __sfu_count = (int)(count); \ 169 - long __sfu_res = -EFAULT; \ 170 - \ 171 - if (__access_ok(__sfu_src, __sfu_count)) \ 172 - __sfu_res = __strncpy_from_user((unsigned long)(dest), \ 173 - __sfu_src, __sfu_count); \ 174 - \ 175 - __sfu_res; \ 176 - }) 177 - 178 140 static inline unsigned long 179 141 copy_from_user(void *to, const void __user *from, unsigned long n) 180 142 { ··· 167 191 168 192 return __copy_size; 169 193 } 170 - 171 - /** 172 - * strnlen_user: - Get the size of a string in user space. 173 - * @s: The string to measure. 174 - * @n: The maximum valid length 175 - * 176 - * Context: User context only. This function may sleep. 177 - * 178 - * Get the size of a NUL-terminated string in user space. 179 - * 180 - * Returns the size of the string INCLUDING the terminating NUL. 181 - * On exception, returns 0. 182 - * If the string is too long, returns a value greater than @n. 183 - */ 184 - static inline long strnlen_user(const char __user *s, long n) 185 - { 186 - if (!__addr_ok(s)) 187 - return 0; 188 - else 189 - return __strnlen_user(s, n); 190 - } 191 - 192 - /** 193 - * strlen_user: - Get the size of a string in user space. 194 - * @str: The string to measure. 195 - * 196 - * Context: User context only. This function may sleep. 197 - * 198 - * Get the size of a NUL-terminated string in user space. 199 - * 200 - * Returns the size of the string INCLUDING the terminating NUL. 201 - * On exception, returns 0. 202 - * 203 - * If there is a limit on the length of a valid string, you may wish to 204 - * consider using strnlen_user() instead. 205 - */ 206 - #define strlen_user(str) strnlen_user(str, ~0UL >> 1) 207 194 208 195 /* 209 196 * The exception table consists of pairs of addresses: the first is the
-75
arch/sh/include/asm/uaccess_32.h
··· 170 170 171 171 extern void __put_user_unknown(void); 172 172 173 - static inline int 174 - __strncpy_from_user(unsigned long __dest, unsigned long __user __src, int __count) 175 - { 176 - __kernel_size_t res; 177 - unsigned long __dummy, _d, _s, _c; 178 - 179 - __asm__ __volatile__( 180 - "9:\n" 181 - "mov.b @%2+, %1\n\t" 182 - "cmp/eq #0, %1\n\t" 183 - "bt/s 2f\n" 184 - "1:\n" 185 - "mov.b %1, @%3\n\t" 186 - "dt %4\n\t" 187 - "bf/s 9b\n\t" 188 - " add #1, %3\n\t" 189 - "2:\n\t" 190 - "sub %4, %0\n" 191 - "3:\n" 192 - ".section .fixup,\"ax\"\n" 193 - "4:\n\t" 194 - "mov.l 5f, %1\n\t" 195 - "jmp @%1\n\t" 196 - " mov %9, %0\n\t" 197 - ".balign 4\n" 198 - "5: .long 3b\n" 199 - ".previous\n" 200 - ".section __ex_table,\"a\"\n" 201 - " .balign 4\n" 202 - " .long 9b,4b\n" 203 - ".previous" 204 - : "=r" (res), "=&z" (__dummy), "=r" (_s), "=r" (_d), "=r"(_c) 205 - : "0" (__count), "2" (__src), "3" (__dest), "4" (__count), 206 - "i" (-EFAULT) 207 - : "memory", "t"); 208 - 209 - return res; 210 - } 211 - 212 - /* 213 - * Return the size of a string (including the ending 0 even when we have 214 - * exceeded the maximum string length). 215 - */ 216 - static inline long __strnlen_user(const char __user *__s, long __n) 217 - { 218 - unsigned long res; 219 - unsigned long __dummy; 220 - 221 - __asm__ __volatile__( 222 - "1:\t" 223 - "mov.b @(%0,%3), %1\n\t" 224 - "cmp/eq %4, %0\n\t" 225 - "bt/s 2f\n\t" 226 - " add #1, %0\n\t" 227 - "tst %1, %1\n\t" 228 - "bf 1b\n\t" 229 - "2:\n" 230 - ".section .fixup,\"ax\"\n" 231 - "3:\n\t" 232 - "mov.l 4f, %1\n\t" 233 - "jmp @%1\n\t" 234 - " mov #0, %0\n" 235 - ".balign 4\n" 236 - "4: .long 2b\n" 237 - ".previous\n" 238 - ".section __ex_table,\"a\"\n" 239 - " .balign 4\n" 240 - " .long 1b,3b\n" 241 - ".previous" 242 - : "=z" (res), "=&r" (__dummy) 243 - : "0" (0), "r" (__s), "r" (__n) 244 - : "t"); 245 - return res; 246 - } 247 - 248 173 #endif /* __ASM_SH_UACCESS_32_H */
-4
arch/sh/include/asm/uaccess_64.h
··· 84 84 extern long __put_user_asm_q(void *, long); 85 85 extern void __put_user_unknown(void); 86 86 87 - extern long __strnlen_user(const char *__s, long __n); 88 - extern int __strncpy_from_user(unsigned long __dest, 89 - unsigned long __user __src, int __count); 90 - 91 87 #endif /* __ASM_SH_UACCESS_64_H */
-1
arch/sh/include/asm/ucontext.h
··· 1 - #include <asm-generic/ucontext.h>
+53
arch/sh/include/asm/word-at-a-time.h
··· 1 + #ifndef __ASM_SH_WORD_AT_A_TIME_H 2 + #define __ASM_SH_WORD_AT_A_TIME_H 3 + 4 + #ifdef CONFIG_CPU_BIG_ENDIAN 5 + # include <asm-generic/word-at-a-time.h> 6 + #else 7 + /* 8 + * Little-endian version cribbed from x86. 9 + */ 10 + struct word_at_a_time { 11 + const unsigned long one_bits, high_bits; 12 + }; 13 + 14 + #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } 15 + 16 + /* Carl Chatfield / Jan Achrenius G+ version for 32-bit */ 17 + static inline long count_masked_bytes(long mask) 18 + { 19 + /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ 20 + long a = (0x0ff0001+mask) >> 23; 21 + /* Fix the 1 for 00 case */ 22 + return a & mask; 23 + } 24 + 25 + /* Return nonzero if it has a zero */ 26 + static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c) 27 + { 28 + unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; 29 + *bits = mask; 30 + return mask; 31 + } 32 + 33 + static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c) 34 + { 35 + return bits; 36 + } 37 + 38 + static inline unsigned long create_zero_mask(unsigned long bits) 39 + { 40 + bits = (bits - 1) & ~bits; 41 + return bits >> 7; 42 + } 43 + 44 + /* The mask we created is directly usable as a bytemask */ 45 + #define zero_bytemask(mask) (mask) 46 + 47 + static inline unsigned long find_zero(unsigned long mask) 48 + { 49 + return count_masked_bytes(mask); 50 + } 51 + #endif 52 + 53 + #endif
-1
arch/sh/include/asm/xor.h
··· 1 - #include <asm-generic/xor.h>
-28
arch/sh/include/cpu-sh2a/cpu/ubc.h
··· 1 - /* 2 - * SH-2A UBC definitions 3 - * 4 - * Copyright (C) 2008 Kieran Bingham 5 - * 6 - * This file is subject to the terms and conditions of the GNU General Public 7 - * License. See the file "COPYING" in the main directory of this archive 8 - * for more details. 9 - */ 10 - 11 - #ifndef __ASM_CPU_SH2A_UBC_H 12 - #define __ASM_CPU_SH2A_UBC_H 13 - 14 - #define UBC_BARA 0xfffc0400 15 - #define UBC_BAMRA 0xfffc0404 16 - #define UBC_BBRA 0xfffc04a0 /* 16 bit access */ 17 - #define UBC_BDRA 0xfffc0408 18 - #define UBC_BDMRA 0xfffc040c 19 - 20 - #define UBC_BARB 0xfffc0410 21 - #define UBC_BAMRB 0xfffc0414 22 - #define UBC_BBRB 0xfffc04b0 /* 16 bit access */ 23 - #define UBC_BDRB 0xfffc0418 24 - #define UBC_BDMRB 0xfffc041c 25 - 26 - #define UBC_BRCR 0xfffc04c0 27 - 28 - #endif /* __ASM_CPU_SH2A_UBC_H */
-82
arch/sh/kernel/cpu/sh5/entry.S
··· 1569 1569 #endif /* CONFIG_MMU */ 1570 1570 1571 1571 /* 1572 - * int __strncpy_from_user(unsigned long __dest, unsigned long __src, 1573 - * int __count) 1574 - * 1575 - * Inputs: 1576 - * (r2) target address 1577 - * (r3) source address 1578 - * (r4) maximum size in bytes 1579 - * 1580 - * Ouputs: 1581 - * (*r2) copied data 1582 - * (r2) -EFAULT (in case of faulting) 1583 - * copied data (otherwise) 1584 - */ 1585 - .global __strncpy_from_user 1586 - __strncpy_from_user: 1587 - pta ___strncpy_from_user1, tr0 1588 - pta ___strncpy_from_user_done, tr1 1589 - or r4, ZERO, r5 /* r5 = original count */ 1590 - beq/u r4, r63, tr1 /* early exit if r4==0 */ 1591 - movi -(EFAULT), r6 /* r6 = reply, no real fixup */ 1592 - or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */ 1593 - 1594 - ___strncpy_from_user1: 1595 - ld.b r3, 0, r7 /* Fault address: only in reading */ 1596 - st.b r2, 0, r7 1597 - addi r2, 1, r2 1598 - addi r3, 1, r3 1599 - beq/u ZERO, r7, tr1 1600 - addi r4, -1, r4 /* return real number of copied bytes */ 1601 - bne/l ZERO, r4, tr0 1602 - 1603 - ___strncpy_from_user_done: 1604 - sub r5, r4, r6 /* If done, return copied */ 1605 - 1606 - ___strncpy_from_user_exit: 1607 - or r6, ZERO, r2 1608 - ptabs LINK, tr0 1609 - blink tr0, ZERO 1610 - 1611 - /* 1612 - * extern long __strnlen_user(const char *__s, long __n) 1613 - * 1614 - * Inputs: 1615 - * (r2) source address 1616 - * (r3) source size in bytes 1617 - * 1618 - * Ouputs: 1619 - * (r2) -EFAULT (in case of faulting) 1620 - * string length (otherwise) 1621 - */ 1622 - .global __strnlen_user 1623 - __strnlen_user: 1624 - pta ___strnlen_user_set_reply, tr0 1625 - pta ___strnlen_user1, tr1 1626 - or ZERO, ZERO, r5 /* r5 = counter */ 1627 - movi -(EFAULT), r6 /* r6 = reply, no real fixup */ 1628 - or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */ 1629 - beq r3, ZERO, tr0 1630 - 1631 - ___strnlen_user1: 1632 - ldx.b r2, r5, r7 /* Fault address: only in reading */ 1633 - addi r3, -1, r3 /* No real fixup */ 1634 - addi r5, 1, r5 1635 - beq r3, ZERO, tr0 1636 - bne r7, ZERO, tr1 1637 - ! The line below used to be active. This meant led to a junk byte lying between each pair 1638 - ! of entries in the argv & envp structures in memory. Whilst the program saw the right data 1639 - ! via the argv and envp arguments to main, it meant the 'flat' representation visible through 1640 - ! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example. 1641 - ! addi r5, 1, r5 /* Include '\0' */ 1642 - 1643 - ___strnlen_user_set_reply: 1644 - or r5, ZERO, r6 /* If done, return counter */ 1645 - 1646 - ___strnlen_user_exit: 1647 - or r6, ZERO, r2 1648 - ptabs LINK, tr0 1649 - blink tr0, ZERO 1650 - 1651 - /* 1652 1572 * extern long __get_user_asm_?(void *val, long addr) 1653 1573 * 1654 1574 * Inputs: ··· 1902 1982 .long ___copy_user2, ___copy_user_exit 1903 1983 .long ___clear_user1, ___clear_user_exit 1904 1984 #endif 1905 - .long ___strncpy_from_user1, ___strncpy_from_user_exit 1906 - .long ___strnlen_user1, ___strnlen_user_exit 1907 1985 .long ___get_user_asm_b1, ___get_user_asm_b_exit 1908 1986 .long ___get_user_asm_w1, ___get_user_asm_w_exit 1909 1987 .long ___get_user_asm_l1, ___get_user_asm_l_exit
+1
arch/sh/kernel/process.c
··· 4 4 #include <linux/sched.h> 5 5 #include <linux/export.h> 6 6 #include <linux/stackprotector.h> 7 + #include <asm/fpu.h> 7 8 8 9 struct kmem_cache *task_xstate_cachep = NULL; 9 10 unsigned int xstate_size;
+1
arch/sh/kernel/process_64.c
··· 33 33 #include <asm/switch_to.h> 34 34 35 35 struct task_struct *last_task_used_math = NULL; 36 + struct pt_regs fake_swapper_regs = { 0, }; 36 37 37 38 void show_regs(struct pt_regs *regs) 38 39 {
-2
arch/sh/kernel/sh_ksyms_64.c
··· 32 32 EXPORT_SYMBOL(__get_user_asm_w); 33 33 EXPORT_SYMBOL(__get_user_asm_l); 34 34 EXPORT_SYMBOL(__get_user_asm_q); 35 - EXPORT_SYMBOL(__strnlen_user); 36 - EXPORT_SYMBOL(__strncpy_from_user); 37 35 EXPORT_SYMBOL(__clear_user); 38 36 EXPORT_SYMBOL(copy_page); 39 37 EXPORT_SYMBOL(__copy_user);
-59
arch/sparc/include/asm/cmt.h
··· 1 - #ifndef _SPARC64_CMT_H 2 - #define _SPARC64_CMT_H 3 - 4 - /* cmt.h: Chip Multi-Threading register definitions 5 - * 6 - * Copyright (C) 2004 David S. Miller (davem@redhat.com) 7 - */ 8 - 9 - /* ASI_CORE_ID - private */ 10 - #define LP_ID 0x0000000000000010UL 11 - #define LP_ID_MAX 0x00000000003f0000UL 12 - #define LP_ID_ID 0x000000000000003fUL 13 - 14 - /* ASI_INTR_ID - private */ 15 - #define LP_INTR_ID 0x0000000000000000UL 16 - #define LP_INTR_ID_ID 0x00000000000003ffUL 17 - 18 - /* ASI_CESR_ID - private */ 19 - #define CESR_ID 0x0000000000000040UL 20 - #define CESR_ID_ID 0x00000000000000ffUL 21 - 22 - /* ASI_CORE_AVAILABLE - shared */ 23 - #define LP_AVAIL 0x0000000000000000UL 24 - #define LP_AVAIL_1 0x0000000000000002UL 25 - #define LP_AVAIL_0 0x0000000000000001UL 26 - 27 - /* ASI_CORE_ENABLE_STATUS - shared */ 28 - #define LP_ENAB_STAT 0x0000000000000010UL 29 - #define LP_ENAB_STAT_1 0x0000000000000002UL 30 - #define LP_ENAB_STAT_0 0x0000000000000001UL 31 - 32 - /* ASI_CORE_ENABLE - shared */ 33 - #define LP_ENAB 0x0000000000000020UL 34 - #define LP_ENAB_1 0x0000000000000002UL 35 - #define LP_ENAB_0 0x0000000000000001UL 36 - 37 - /* ASI_CORE_RUNNING - shared */ 38 - #define LP_RUNNING_RW 0x0000000000000050UL 39 - #define LP_RUNNING_W1S 0x0000000000000060UL 40 - #define LP_RUNNING_W1C 0x0000000000000068UL 41 - #define LP_RUNNING_1 0x0000000000000002UL 42 - #define LP_RUNNING_0 0x0000000000000001UL 43 - 44 - /* ASI_CORE_RUNNING_STAT - shared */ 45 - #define LP_RUN_STAT 0x0000000000000058UL 46 - #define LP_RUN_STAT_1 0x0000000000000002UL 47 - #define LP_RUN_STAT_0 0x0000000000000001UL 48 - 49 - /* ASI_XIR_STEERING - shared */ 50 - #define LP_XIR_STEER 0x0000000000000030UL 51 - #define LP_XIR_STEER_1 0x0000000000000002UL 52 - #define LP_XIR_STEER_0 0x0000000000000001UL 53 - 54 - /* ASI_CMT_ERROR_STEERING - shared */ 55 - #define CMT_ER_STEER 0x0000000000000040UL 56 - #define CMT_ER_STEER_1 0x0000000000000002UL 57 - #define CMT_ER_STEER_0 0x0000000000000001UL 58 - 59 - #endif /* _SPARC64_CMT_H */
-67
arch/sparc/include/asm/mpmbox.h
··· 1 - /* 2 - * mpmbox.h: Interface and defines for the OpenProm mailbox 3 - * facilities for MP machines under Linux. 4 - * 5 - * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 6 - */ 7 - 8 - #ifndef _SPARC_MPMBOX_H 9 - #define _SPARC_MPMBOX_H 10 - 11 - /* The prom allocates, for each CPU on the machine an unsigned 12 - * byte in physical ram. You probe the device tree prom nodes 13 - * for these values. The purpose of this byte is to be able to 14 - * pass messages from one cpu to another. 15 - */ 16 - 17 - /* These are the main message types we have to look for in our 18 - * Cpu mailboxes, based upon these values we decide what course 19 - * of action to take. 20 - */ 21 - 22 - /* The CPU is executing code in the kernel. */ 23 - #define MAILBOX_ISRUNNING 0xf0 24 - 25 - /* Another CPU called romvec->pv_exit(), you should call 26 - * prom_stopcpu() when you see this in your mailbox. 27 - */ 28 - #define MAILBOX_EXIT 0xfb 29 - 30 - /* Another CPU called romvec->pv_enter(), you should call 31 - * prom_cpuidle() when this is seen. 32 - */ 33 - #define MAILBOX_GOSPIN 0xfc 34 - 35 - /* Another CPU has hit a breakpoint either into kadb or the prom 36 - * itself. Just like MAILBOX_GOSPIN, you should call prom_cpuidle() 37 - * at this point. 38 - */ 39 - #define MAILBOX_BPT_SPIN 0xfd 40 - 41 - /* Oh geese, some other nitwit got a damn watchdog reset. The party's 42 - * over so go call prom_stopcpu(). 43 - */ 44 - #define MAILBOX_WDOG_STOP 0xfe 45 - 46 - #ifndef __ASSEMBLY__ 47 - 48 - /* Handy macro's to determine a cpu's state. */ 49 - 50 - /* Is the cpu still in Power On Self Test? */ 51 - #define MBOX_POST_P(letter) ((letter) >= 0x00 && (letter) <= 0x7f) 52 - 53 - /* Is the cpu at the 'ok' prompt of the PROM? */ 54 - #define MBOX_PROMPROMPT_P(letter) ((letter) >= 0x80 && (letter) <= 0x8f) 55 - 56 - /* Is the cpu spinning in the PROM? */ 57 - #define MBOX_PROMSPIN_P(letter) ((letter) >= 0x90 && (letter) <= 0xef) 58 - 59 - /* Sanity check... This is junk mail, throw it out. */ 60 - #define MBOX_BOGON_P(letter) ((letter) >= 0xf1 && (letter) <= 0xfa) 61 - 62 - /* Is the cpu actively running an application/kernel-code? */ 63 - #define MBOX_RUNNING_P(letter) ((letter) == MAILBOX_ISRUNNING) 64 - 65 - #endif /* !(__ASSEMBLY__) */ 66 - 67 - #endif /* !(_SPARC_MPMBOX_H) */
-5
arch/tile/include/asm/thread_info.h
··· 91 91 /* Enable interrupts racelessly and nap forever: helper for cpu_idle(). */ 92 92 extern void _cpu_idle(void); 93 93 94 - /* Switch boot idle thread to a freshly-allocated stack and free old stack. */ 95 - extern void cpu_idle_on_new_stack(struct thread_info *old_ti, 96 - unsigned long new_sp, 97 - unsigned long new_ss10); 98 - 99 94 #else /* __ASSEMBLY__ */ 100 95 101 96 /*
+1 -1
arch/tile/include/asm/uaccess.h
··· 146 146 #ifdef __tilegx__ 147 147 #define __get_user_1(x, ptr, ret) __get_user_asm(ld1u, x, ptr, ret) 148 148 #define __get_user_2(x, ptr, ret) __get_user_asm(ld2u, x, ptr, ret) 149 - #define __get_user_4(x, ptr, ret) __get_user_asm(ld4u, x, ptr, ret) 149 + #define __get_user_4(x, ptr, ret) __get_user_asm(ld4s, x, ptr, ret) 150 150 #define __get_user_8(x, ptr, ret) __get_user_asm(ld, x, ptr, ret) 151 151 #else 152 152 #define __get_user_1(x, ptr, ret) __get_user_asm(lb_u, x, ptr, ret)
-14
arch/tile/kernel/entry.S
··· 68 68 jrp lr /* keep backtracer happy */ 69 69 STD_ENDPROC(KBacktraceIterator_init_current) 70 70 71 - /* 72 - * Reset our stack to r1/r2 (sp and ksp0+cpu respectively), then 73 - * free the old stack (passed in r0) and re-invoke cpu_idle(). 74 - * We update sp and ksp0 simultaneously to avoid backtracer warnings. 75 - */ 76 - STD_ENTRY(cpu_idle_on_new_stack) 77 - { 78 - move sp, r1 79 - mtspr SPR_SYSTEM_SAVE_K_0, r2 80 - } 81 - jal free_thread_info 82 - j cpu_idle 83 - STD_ENDPROC(cpu_idle_on_new_stack) 84 - 85 71 /* Loop forever on a nap during SMP boot. */ 86 72 STD_ENTRY(smp_nap) 87 73 nap
+1
arch/tile/kernel/setup.c
··· 29 29 #include <linux/smp.h> 30 30 #include <linux/timex.h> 31 31 #include <linux/hugetlb.h> 32 + #include <linux/start_kernel.h> 32 33 #include <asm/setup.h> 33 34 #include <asm/sections.h> 34 35 #include <asm/cacheflush.h>
+31 -11
arch/x86/boot/header.S
··· 94 94 95 95 .section ".bsdata", "a" 96 96 bugger_off_msg: 97 - .ascii "Direct booting from floppy is no longer supported.\r\n" 98 - .ascii "Please use a boot loader program instead.\r\n" 97 + .ascii "Direct floppy boot is not supported. " 98 + .ascii "Use a boot loader program instead.\r\n" 99 99 .ascii "\n" 100 - .ascii "Remove disk and press any key to reboot . . .\r\n" 100 + .ascii "Remove disk and press any key to reboot ...\r\n" 101 101 .byte 0 102 102 103 103 #ifdef CONFIG_EFI_STUB ··· 111 111 #else 112 112 .word 0x8664 # x86-64 113 113 #endif 114 - .word 2 # nr_sections 114 + .word 3 # nr_sections 115 115 .long 0 # TimeDateStamp 116 116 .long 0 # PointerToSymbolTable 117 117 .long 1 # NumberOfSymbols ··· 158 158 #else 159 159 .quad 0 # ImageBase 160 160 #endif 161 - .long 0x1000 # SectionAlignment 162 - .long 0x200 # FileAlignment 161 + .long 0x20 # SectionAlignment 162 + .long 0x20 # FileAlignment 163 163 .word 0 # MajorOperatingSystemVersion 164 164 .word 0 # MinorOperatingSystemVersion 165 165 .word 0 # MajorImageVersion ··· 200 200 201 201 # Section table 202 202 section_table: 203 - .ascii ".text" 204 - .byte 0 203 + # 204 + # The offset & size fields are filled in by build.c. 205 + # 206 + .ascii ".setup" 205 207 .byte 0 206 208 .byte 0 207 209 .long 0 ··· 219 217 220 218 # 221 219 # The EFI application loader requires a relocation section 222 - # because EFI applications must be relocatable. But since 223 - # we don't need the loader to fixup any relocs for us, we 224 - # just create an empty (zero-length) .reloc section header. 220 + # because EFI applications must be relocatable. The .reloc 221 + # offset & size fields are filled in by build.c. 225 222 # 226 223 .ascii ".reloc" 227 224 .byte 0 ··· 234 233 .word 0 # NumberOfRelocations 235 234 .word 0 # NumberOfLineNumbers 236 235 .long 0x42100040 # Characteristics (section flags) 236 + 237 + # 238 + # The offset & size fields are filled in by build.c. 239 + # 240 + .ascii ".text" 241 + .byte 0 242 + .byte 0 243 + .byte 0 244 + .long 0 245 + .long 0x0 # startup_{32,64} 246 + .long 0 # Size of initialized data 247 + # on disk 248 + .long 0x0 # startup_{32,64} 249 + .long 0 # PointerToRelocations 250 + .long 0 # PointerToLineNumbers 251 + .word 0 # NumberOfRelocations 252 + .word 0 # NumberOfLineNumbers 253 + .long 0x60500020 # Characteristics (section flags) 254 + 237 255 #endif /* CONFIG_EFI_STUB */ 238 256 239 257 # Kernel attributes; used by setup. This is part 1 of the
+109 -63
arch/x86/boot/tools/build.c
··· 50 50 u8 buf[SETUP_SECT_MAX*512]; 51 51 int is_big_kernel; 52 52 53 + #define PECOFF_RELOC_RESERVE 0x20 54 + 53 55 /*----------------------------------------------------------------------*/ 54 56 55 57 static const u32 crctab32[] = { ··· 135 133 die("Usage: build setup system [> image]"); 136 134 } 137 135 136 + #ifdef CONFIG_EFI_STUB 137 + 138 + static void update_pecoff_section_header(char *section_name, u32 offset, u32 size) 139 + { 140 + unsigned int pe_header; 141 + unsigned short num_sections; 142 + u8 *section; 143 + 144 + pe_header = get_unaligned_le32(&buf[0x3c]); 145 + num_sections = get_unaligned_le16(&buf[pe_header + 6]); 146 + 147 + #ifdef CONFIG_X86_32 148 + section = &buf[pe_header + 0xa8]; 149 + #else 150 + section = &buf[pe_header + 0xb8]; 151 + #endif 152 + 153 + while (num_sections > 0) { 154 + if (strncmp((char*)section, section_name, 8) == 0) { 155 + /* section header size field */ 156 + put_unaligned_le32(size, section + 0x8); 157 + 158 + /* section header vma field */ 159 + put_unaligned_le32(offset, section + 0xc); 160 + 161 + /* section header 'size of initialised data' field */ 162 + put_unaligned_le32(size, section + 0x10); 163 + 164 + /* section header 'file offset' field */ 165 + put_unaligned_le32(offset, section + 0x14); 166 + 167 + break; 168 + } 169 + section += 0x28; 170 + num_sections--; 171 + } 172 + } 173 + 174 + static void update_pecoff_setup_and_reloc(unsigned int size) 175 + { 176 + u32 setup_offset = 0x200; 177 + u32 reloc_offset = size - PECOFF_RELOC_RESERVE; 178 + u32 setup_size = reloc_offset - setup_offset; 179 + 180 + update_pecoff_section_header(".setup", setup_offset, setup_size); 181 + update_pecoff_section_header(".reloc", reloc_offset, PECOFF_RELOC_RESERVE); 182 + 183 + /* 184 + * Modify .reloc section contents with a single entry. The 185 + * relocation is applied to offset 10 of the relocation section. 186 + */ 187 + put_unaligned_le32(reloc_offset + 10, &buf[reloc_offset]); 188 + put_unaligned_le32(10, &buf[reloc_offset + 4]); 189 + } 190 + 191 + static void update_pecoff_text(unsigned int text_start, unsigned int file_sz) 192 + { 193 + unsigned int pe_header; 194 + unsigned int text_sz = file_sz - text_start; 195 + 196 + pe_header = get_unaligned_le32(&buf[0x3c]); 197 + 198 + /* Size of image */ 199 + put_unaligned_le32(file_sz, &buf[pe_header + 0x50]); 200 + 201 + /* 202 + * Size of code: Subtract the size of the first sector (512 bytes) 203 + * which includes the header. 204 + */ 205 + put_unaligned_le32(file_sz - 512, &buf[pe_header + 0x1c]); 206 + 207 + #ifdef CONFIG_X86_32 208 + /* 209 + * Address of entry point. 210 + * 211 + * The EFI stub entry point is +16 bytes from the start of 212 + * the .text section. 213 + */ 214 + put_unaligned_le32(text_start + 16, &buf[pe_header + 0x28]); 215 + #else 216 + /* 217 + * Address of entry point. startup_32 is at the beginning and 218 + * the 64-bit entry point (startup_64) is always 512 bytes 219 + * after. The EFI stub entry point is 16 bytes after that, as 220 + * the first instruction allows legacy loaders to jump over 221 + * the EFI stub initialisation 222 + */ 223 + put_unaligned_le32(text_start + 528, &buf[pe_header + 0x28]); 224 + #endif /* CONFIG_X86_32 */ 225 + 226 + update_pecoff_section_header(".text", text_start, text_sz); 227 + } 228 + 229 + #endif /* CONFIG_EFI_STUB */ 230 + 138 231 int main(int argc, char ** argv) 139 232 { 140 - #ifdef CONFIG_EFI_STUB 141 - unsigned int file_sz, pe_header; 142 - #endif 143 233 unsigned int i, sz, setup_sectors; 144 234 int c; 145 235 u32 sys_size; ··· 257 163 die("Boot block hasn't got boot flag (0xAA55)"); 258 164 fclose(file); 259 165 166 + #ifdef CONFIG_EFI_STUB 167 + /* Reserve 0x20 bytes for .reloc section */ 168 + memset(buf+c, 0, PECOFF_RELOC_RESERVE); 169 + c += PECOFF_RELOC_RESERVE; 170 + #endif 171 + 260 172 /* Pad unused space with zeros */ 261 173 setup_sectors = (c + 511) / 512; 262 174 if (setup_sectors < SETUP_SECT_MIN) 263 175 setup_sectors = SETUP_SECT_MIN; 264 176 i = setup_sectors*512; 265 177 memset(buf+c, 0, i-c); 178 + 179 + #ifdef CONFIG_EFI_STUB 180 + update_pecoff_setup_and_reloc(i); 181 + #endif 266 182 267 183 /* Set the default root device */ 268 184 put_unaligned_le16(DEFAULT_ROOT_DEV, &buf[508]); ··· 298 194 put_unaligned_le32(sys_size, &buf[0x1f4]); 299 195 300 196 #ifdef CONFIG_EFI_STUB 301 - file_sz = sz + i + ((sys_size * 16) - sz); 302 - 303 - pe_header = get_unaligned_le32(&buf[0x3c]); 304 - 305 - /* Size of image */ 306 - put_unaligned_le32(file_sz, &buf[pe_header + 0x50]); 307 - 308 - /* 309 - * Subtract the size of the first section (512 bytes) which 310 - * includes the header and .reloc section. The remaining size 311 - * is that of the .text section. 312 - */ 313 - file_sz -= 512; 314 - 315 - /* Size of code */ 316 - put_unaligned_le32(file_sz, &buf[pe_header + 0x1c]); 317 - 318 - #ifdef CONFIG_X86_32 319 - /* 320 - * Address of entry point. 321 - * 322 - * The EFI stub entry point is +16 bytes from the start of 323 - * the .text section. 324 - */ 325 - put_unaligned_le32(i + 16, &buf[pe_header + 0x28]); 326 - 327 - /* .text size */ 328 - put_unaligned_le32(file_sz, &buf[pe_header + 0xb0]); 329 - 330 - /* .text vma */ 331 - put_unaligned_le32(0x200, &buf[pe_header + 0xb4]); 332 - 333 - /* .text size of initialised data */ 334 - put_unaligned_le32(file_sz, &buf[pe_header + 0xb8]); 335 - 336 - /* .text file offset */ 337 - put_unaligned_le32(0x200, &buf[pe_header + 0xbc]); 338 - #else 339 - /* 340 - * Address of entry point. startup_32 is at the beginning and 341 - * the 64-bit entry point (startup_64) is always 512 bytes 342 - * after. The EFI stub entry point is 16 bytes after that, as 343 - * the first instruction allows legacy loaders to jump over 344 - * the EFI stub initialisation 345 - */ 346 - put_unaligned_le32(i + 528, &buf[pe_header + 0x28]); 347 - 348 - /* .text size */ 349 - put_unaligned_le32(file_sz, &buf[pe_header + 0xc0]); 350 - 351 - /* .text vma */ 352 - put_unaligned_le32(0x200, &buf[pe_header + 0xc4]); 353 - 354 - /* .text size of initialised data */ 355 - put_unaligned_le32(file_sz, &buf[pe_header + 0xc8]); 356 - 357 - /* .text file offset */ 358 - put_unaligned_le32(0x200, &buf[pe_header + 0xcc]); 359 - #endif /* CONFIG_X86_32 */ 360 - #endif /* CONFIG_EFI_STUB */ 197 + update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz)); 198 + #endif 361 199 362 200 crc = partial_crc32(buf, i, crc); 363 201 if (fwrite(buf, 1, i, stdout) != i)
+4 -2
arch/x86/crypto/aesni-intel_asm.S
··· 2460 2460 pxor IN3, STATE4 2461 2461 movaps IN4, IV 2462 2462 #else 2463 - pxor (INP), STATE2 2464 - pxor 0x10(INP), STATE3 2465 2463 pxor IN1, STATE4 2466 2464 movaps IN2, IV 2465 + movups (INP), IN1 2466 + pxor IN1, STATE2 2467 + movups 0x10(INP), IN2 2468 + pxor IN2, STATE3 2467 2469 #endif 2468 2470 movups STATE1, (OUTP) 2469 2471 movups STATE2, 0x10(OUTP)
+14
arch/x86/include/asm/nmi.h
··· 54 54 __register_nmi_handler((t), &fn##_na); \ 55 55 }) 56 56 57 + /* 58 + * For special handlers that register/unregister in the 59 + * init section only. This should be considered rare. 60 + */ 61 + #define register_nmi_handler_initonly(t, fn, fg, n) \ 62 + ({ \ 63 + static struct nmiaction fn##_na __initdata = { \ 64 + .handler = (fn), \ 65 + .name = (n), \ 66 + .flags = (fg), \ 67 + }; \ 68 + __register_nmi_handler((t), &fn##_na); \ 69 + }) 70 + 57 71 int __register_nmi_handler(unsigned int, struct nmiaction *); 58 72 59 73 void unregister_nmi_handler(unsigned int, const char *);
+6 -6
arch/x86/include/asm/uaccess.h
··· 33 33 #define segment_eq(a, b) ((a).seg == (b).seg) 34 34 35 35 #define user_addr_max() (current_thread_info()->addr_limit.seg) 36 - #define __addr_ok(addr) \ 37 - ((unsigned long __force)(addr) < \ 38 - (current_thread_info()->addr_limit.seg)) 36 + #define __addr_ok(addr) \ 37 + ((unsigned long __force)(addr) < user_addr_max()) 39 38 40 39 /* 41 40 * Test whether a block of memory is a valid user space address. ··· 46 47 * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry... 47 48 */ 48 49 49 - #define __range_not_ok(addr, size) \ 50 + #define __range_not_ok(addr, size, limit) \ 50 51 ({ \ 51 52 unsigned long flag, roksum; \ 52 53 __chk_user_ptr(addr); \ 53 54 asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \ 54 55 : "=&r" (flag), "=r" (roksum) \ 55 56 : "1" (addr), "g" ((long)(size)), \ 56 - "rm" (current_thread_info()->addr_limit.seg)); \ 57 + "rm" (limit)); \ 57 58 flag; \ 58 59 }) 59 60 ··· 76 77 * checks that the pointer is in the user space range - after calling 77 78 * this function, memory access functions may still return -EFAULT. 78 79 */ 79 - #define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0)) 80 + #define access_ok(type, addr, size) \ 81 + (likely(__range_not_ok(addr, size, user_addr_max()) == 0)) 80 82 81 83 /* 82 84 * The exception table consists of pairs of addresses relative to the
-1
arch/x86/include/asm/uv/uv_bau.h
··· 149 149 /* 4 bits of software ack period */ 150 150 #define UV2_ACK_MASK 0x7UL 151 151 #define UV2_ACK_UNITS_SHFT 3 152 - #define UV2_LEG_SHFT UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 153 152 #define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 154 153 155 154 /*
-6
arch/x86/kernel/aperture_64.c
··· 20 20 #include <linux/bitops.h> 21 21 #include <linux/ioport.h> 22 22 #include <linux/suspend.h> 23 - #include <linux/kmemleak.h> 24 23 #include <asm/e820.h> 25 24 #include <asm/io.h> 26 25 #include <asm/iommu.h> ··· 94 95 return 0; 95 96 } 96 97 memblock_reserve(addr, aper_size); 97 - /* 98 - * Kmemleak should not scan this block as it may not be mapped via the 99 - * kernel direct mapping. 100 - */ 101 - kmemleak_ignore(phys_to_virt(addr)); 102 98 printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n", 103 99 aper_size >> 10, addr); 104 100 insert_aperture_resource((u32)addr, aper_size);
+2 -2
arch/x86/kernel/apic/io_apic.c
··· 1195 1195 BUG_ON(!cfg->vector); 1196 1196 1197 1197 vector = cfg->vector; 1198 - for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) 1198 + for_each_cpu(cpu, cfg->domain) 1199 1199 per_cpu(vector_irq, cpu)[vector] = -1; 1200 1200 1201 1201 cfg->vector = 0; ··· 1203 1203 1204 1204 if (likely(!cfg->move_in_progress)) 1205 1205 return; 1206 - for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { 1206 + for_each_cpu(cpu, cfg->old_domain) { 1207 1207 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; 1208 1208 vector++) { 1209 1209 if (per_cpu(vector_irq, cpu)[vector] != irq)
+2 -2
arch/x86/kernel/cpu/mcheck/mce.c
··· 1278 1278 */ 1279 1279 iv = __this_cpu_read(mce_next_interval); 1280 1280 if (mce_notify_irq()) 1281 - iv = max(iv, (unsigned long) HZ/100); 1281 + iv = max(iv / 2, (unsigned long) HZ/100); 1282 1282 else 1283 1283 iv = min(iv * 2, round_jiffies_relative(check_interval * HZ)); 1284 1284 __this_cpu_write(mce_next_interval, iv); ··· 1560 1560 static void __mcheck_cpu_init_timer(void) 1561 1561 { 1562 1562 struct timer_list *t = &__get_cpu_var(mce_timer); 1563 - unsigned long iv = __this_cpu_read(mce_next_interval); 1563 + unsigned long iv = check_interval * HZ; 1564 1564 1565 1565 setup_timer(t, mce_timer_fn, smp_processor_id()); 1566 1566
+9 -2
arch/x86/kernel/cpu/perf_event.c
··· 1496 1496 if (!cpuc->shared_regs) 1497 1497 goto error; 1498 1498 } 1499 + cpuc->is_fake = 1; 1499 1500 return cpuc; 1500 1501 error: 1501 1502 free_fake_cpuc(cpuc); ··· 1757 1756 dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry); 1758 1757 } 1759 1758 1759 + static inline int 1760 + valid_user_frame(const void __user *fp, unsigned long size) 1761 + { 1762 + return (__range_not_ok(fp, size, TASK_SIZE) == 0); 1763 + } 1764 + 1760 1765 #ifdef CONFIG_COMPAT 1761 1766 1762 1767 #include <asm/compat.h> ··· 1787 1780 if (bytes != sizeof(frame)) 1788 1781 break; 1789 1782 1790 - if (fp < compat_ptr(regs->sp)) 1783 + if (!valid_user_frame(fp, sizeof(frame))) 1791 1784 break; 1792 1785 1793 1786 perf_callchain_store(entry, frame.return_address); ··· 1833 1826 if (bytes != sizeof(frame)) 1834 1827 break; 1835 1828 1836 - if ((unsigned long)fp < regs->sp) 1829 + if (!valid_user_frame(fp, sizeof(frame))) 1837 1830 break; 1838 1831 1839 1832 perf_callchain_store(entry, frame.return_address);
+2
arch/x86/kernel/cpu/perf_event.h
··· 117 117 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ 118 118 119 119 unsigned int group_flag; 120 + int is_fake; 120 121 121 122 /* 122 123 * Intel DebugStore bits ··· 365 364 int pebs_record_size; 366 365 void (*drain_pebs)(struct pt_regs *regs); 367 366 struct event_constraint *pebs_constraints; 367 + void (*pebs_aliases)(struct perf_event *event); 368 368 369 369 /* 370 370 * Intel LBR
+108 -37
arch/x86/kernel/cpu/perf_event_intel.c
··· 1121 1121 return NULL; 1122 1122 } 1123 1123 1124 - static bool intel_try_alt_er(struct perf_event *event, int orig_idx) 1124 + static int intel_alt_er(int idx) 1125 1125 { 1126 1126 if (!(x86_pmu.er_flags & ERF_HAS_RSP_1)) 1127 - return false; 1127 + return idx; 1128 1128 1129 - if (event->hw.extra_reg.idx == EXTRA_REG_RSP_0) { 1130 - event->hw.config &= ~INTEL_ARCH_EVENT_MASK; 1131 - event->hw.config |= 0x01bb; 1132 - event->hw.extra_reg.idx = EXTRA_REG_RSP_1; 1133 - event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1; 1134 - } else if (event->hw.extra_reg.idx == EXTRA_REG_RSP_1) { 1129 + if (idx == EXTRA_REG_RSP_0) 1130 + return EXTRA_REG_RSP_1; 1131 + 1132 + if (idx == EXTRA_REG_RSP_1) 1133 + return EXTRA_REG_RSP_0; 1134 + 1135 + return idx; 1136 + } 1137 + 1138 + static void intel_fixup_er(struct perf_event *event, int idx) 1139 + { 1140 + event->hw.extra_reg.idx = idx; 1141 + 1142 + if (idx == EXTRA_REG_RSP_0) { 1135 1143 event->hw.config &= ~INTEL_ARCH_EVENT_MASK; 1136 1144 event->hw.config |= 0x01b7; 1137 - event->hw.extra_reg.idx = EXTRA_REG_RSP_0; 1138 1145 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0; 1146 + } else if (idx == EXTRA_REG_RSP_1) { 1147 + event->hw.config &= ~INTEL_ARCH_EVENT_MASK; 1148 + event->hw.config |= 0x01bb; 1149 + event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1; 1139 1150 } 1140 - 1141 - if (event->hw.extra_reg.idx == orig_idx) 1142 - return false; 1143 - 1144 - return true; 1145 1151 } 1146 1152 1147 1153 /* ··· 1165 1159 struct event_constraint *c = &emptyconstraint; 1166 1160 struct er_account *era; 1167 1161 unsigned long flags; 1168 - int orig_idx = reg->idx; 1162 + int idx = reg->idx; 1169 1163 1170 - /* already allocated shared msr */ 1171 - if (reg->alloc) 1164 + /* 1165 + * reg->alloc can be set due to existing state, so for fake cpuc we 1166 + * need to ignore this, otherwise we might fail to allocate proper fake 1167 + * state for this extra reg constraint. Also see the comment below. 1168 + */ 1169 + if (reg->alloc && !cpuc->is_fake) 1172 1170 return NULL; /* call x86_get_event_constraint() */ 1173 1171 1174 1172 again: 1175 - era = &cpuc->shared_regs->regs[reg->idx]; 1173 + era = &cpuc->shared_regs->regs[idx]; 1176 1174 /* 1177 1175 * we use spin_lock_irqsave() to avoid lockdep issues when 1178 1176 * passing a fake cpuc ··· 1185 1175 1186 1176 if (!atomic_read(&era->ref) || era->config == reg->config) { 1187 1177 1178 + /* 1179 + * If its a fake cpuc -- as per validate_{group,event}() we 1180 + * shouldn't touch event state and we can avoid doing so 1181 + * since both will only call get_event_constraints() once 1182 + * on each event, this avoids the need for reg->alloc. 1183 + * 1184 + * Not doing the ER fixup will only result in era->reg being 1185 + * wrong, but since we won't actually try and program hardware 1186 + * this isn't a problem either. 1187 + */ 1188 + if (!cpuc->is_fake) { 1189 + if (idx != reg->idx) 1190 + intel_fixup_er(event, idx); 1191 + 1192 + /* 1193 + * x86_schedule_events() can call get_event_constraints() 1194 + * multiple times on events in the case of incremental 1195 + * scheduling(). reg->alloc ensures we only do the ER 1196 + * allocation once. 1197 + */ 1198 + reg->alloc = 1; 1199 + } 1200 + 1188 1201 /* lock in msr value */ 1189 1202 era->config = reg->config; 1190 1203 era->reg = reg->reg; ··· 1215 1182 /* one more user */ 1216 1183 atomic_inc(&era->ref); 1217 1184 1218 - /* no need to reallocate during incremental event scheduling */ 1219 - reg->alloc = 1; 1220 - 1221 1185 /* 1222 1186 * need to call x86_get_event_constraint() 1223 1187 * to check if associated event has constraints 1224 1188 */ 1225 1189 c = NULL; 1226 - } else if (intel_try_alt_er(event, orig_idx)) { 1227 - raw_spin_unlock_irqrestore(&era->lock, flags); 1228 - goto again; 1190 + } else { 1191 + idx = intel_alt_er(idx); 1192 + if (idx != reg->idx) { 1193 + raw_spin_unlock_irqrestore(&era->lock, flags); 1194 + goto again; 1195 + } 1229 1196 } 1230 1197 raw_spin_unlock_irqrestore(&era->lock, flags); 1231 1198 ··· 1239 1206 struct er_account *era; 1240 1207 1241 1208 /* 1242 - * only put constraint if extra reg was actually 1243 - * allocated. Also takes care of event which do 1244 - * not use an extra shared reg 1209 + * Only put constraint if extra reg was actually allocated. Also takes 1210 + * care of event which do not use an extra shared reg. 1211 + * 1212 + * Also, if this is a fake cpuc we shouldn't touch any event state 1213 + * (reg->alloc) and we don't care about leaving inconsistent cpuc state 1214 + * either since it'll be thrown out. 1245 1215 */ 1246 - if (!reg->alloc) 1216 + if (!reg->alloc || cpuc->is_fake) 1247 1217 return; 1248 1218 1249 1219 era = &cpuc->shared_regs->regs[reg->idx]; ··· 1338 1302 intel_put_shared_regs_event_constraints(cpuc, event); 1339 1303 } 1340 1304 1341 - static int intel_pmu_hw_config(struct perf_event *event) 1305 + static void intel_pebs_aliases_core2(struct perf_event *event) 1342 1306 { 1343 - int ret = x86_pmu_hw_config(event); 1344 - 1345 - if (ret) 1346 - return ret; 1347 - 1348 - if (event->attr.precise_ip && 1349 - (event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { 1307 + if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { 1350 1308 /* 1351 1309 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P 1352 1310 * (0x003c) so that we can use it with PEBS. ··· 1361 1331 */ 1362 1332 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16); 1363 1333 1334 + alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); 1335 + event->hw.config = alt_config; 1336 + } 1337 + } 1338 + 1339 + static void intel_pebs_aliases_snb(struct perf_event *event) 1340 + { 1341 + if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { 1342 + /* 1343 + * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P 1344 + * (0x003c) so that we can use it with PEBS. 1345 + * 1346 + * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't 1347 + * PEBS capable. However we can use UOPS_RETIRED.ALL 1348 + * (0x01c2), which is a PEBS capable event, to get the same 1349 + * count. 1350 + * 1351 + * UOPS_RETIRED.ALL counts the number of cycles that retires 1352 + * CNTMASK micro-ops. By setting CNTMASK to a value (16) 1353 + * larger than the maximum number of micro-ops that can be 1354 + * retired per cycle (4) and then inverting the condition, we 1355 + * count all cycles that retire 16 or less micro-ops, which 1356 + * is every cycle. 1357 + * 1358 + * Thereby we gain a PEBS capable cycle counter. 1359 + */ 1360 + u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16); 1364 1361 1365 1362 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); 1366 1363 event->hw.config = alt_config; 1367 1364 } 1365 + } 1366 + 1367 + static int intel_pmu_hw_config(struct perf_event *event) 1368 + { 1369 + int ret = x86_pmu_hw_config(event); 1370 + 1371 + if (ret) 1372 + return ret; 1373 + 1374 + if (event->attr.precise_ip && x86_pmu.pebs_aliases) 1375 + x86_pmu.pebs_aliases(event); 1368 1376 1369 1377 if (intel_pmu_needs_lbr_smpl(event)) { 1370 1378 ret = intel_pmu_setup_lbr_filter(event); ··· 1677 1609 .max_period = (1ULL << 31) - 1, 1678 1610 .get_event_constraints = intel_get_event_constraints, 1679 1611 .put_event_constraints = intel_put_event_constraints, 1612 + .pebs_aliases = intel_pebs_aliases_core2, 1680 1613 1681 1614 .format_attrs = intel_arch3_formats_attr, 1682 1615 ··· 1911 1842 break; 1912 1843 1913 1844 case 42: /* SandyBridge */ 1914 - x86_add_quirk(intel_sandybridge_quirk); 1915 1845 case 45: /* SandyBridge, "Romely-EP" */ 1846 + x86_add_quirk(intel_sandybridge_quirk); 1847 + case 58: /* IvyBridge */ 1916 1848 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, 1917 1849 sizeof(hw_cache_event_ids)); 1918 1850 ··· 1921 1851 1922 1852 x86_pmu.event_constraints = intel_snb_event_constraints; 1923 1853 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; 1854 + x86_pmu.pebs_aliases = intel_pebs_aliases_snb; 1924 1855 x86_pmu.extra_regs = intel_snb_extra_regs; 1925 1856 /* all extra regs are per-cpu when HT is on */ 1926 1857 x86_pmu.er_flags |= ERF_HAS_RSP_1;
+1 -8
arch/x86/kernel/cpu/perf_event_intel_ds.c
··· 400 400 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ 401 401 INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ 402 402 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */ 403 - INTEL_UEVENT_CONSTRAINT(0x11d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_LOADS */ 404 - INTEL_UEVENT_CONSTRAINT(0x12d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_STORES */ 405 - INTEL_UEVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOP_RETIRED.LOCK_LOADS */ 406 - INTEL_UEVENT_CONSTRAINT(0x22d0, 0xf), /* MEM_UOP_RETIRED.LOCK_STORES */ 407 - INTEL_UEVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_LOADS */ 408 - INTEL_UEVENT_CONSTRAINT(0x42d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_STORES */ 409 - INTEL_UEVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOP_RETIRED.ANY_LOADS */ 410 - INTEL_UEVENT_CONSTRAINT(0x82d0, 0xf), /* MEM_UOP_RETIRED.ANY_STORES */ 403 + INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ 411 404 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 412 405 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ 413 406 INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */
-5
arch/x86/kernel/kvmclock.c
··· 120 120 bool ret = false; 121 121 struct pvclock_vcpu_time_info *src; 122 122 123 - /* 124 - * per_cpu() is safe here because this function is only called from 125 - * timer functions where preemption is already disabled. 126 - */ 127 - WARN_ON(!in_atomic()); 128 123 src = &__get_cpu_var(hv_clock); 129 124 if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) { 130 125 __this_cpu_and(hv_clock.flags, ~PVCLOCK_GUEST_STOPPED);
+2 -2
arch/x86/kernel/nmi_selftest.c
··· 42 42 static void __init init_nmi_testsuite(void) 43 43 { 44 44 /* trap all the unknown NMIs we may generate */ 45 - register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk"); 45 + register_nmi_handler_initonly(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk"); 46 46 } 47 47 48 48 static void __init cleanup_nmi_testsuite(void) ··· 64 64 { 65 65 unsigned long timeout; 66 66 67 - if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback, 67 + if (register_nmi_handler_initonly(NMI_LOCAL, test_nmi_ipi_callback, 68 68 NMI_FLAG_FIRST, "nmi_selftest")) { 69 69 nmi_fail = FAILURE; 70 70 return;
+2 -1
arch/x86/kernel/pci-dma.c
··· 100 100 struct dma_attrs *attrs) 101 101 { 102 102 unsigned long dma_mask; 103 - struct page *page = NULL; 103 + struct page *page; 104 104 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 105 105 dma_addr_t addr; 106 106 ··· 108 108 109 109 flag |= __GFP_ZERO; 110 110 again: 111 + page = NULL; 111 112 if (!(flag & GFP_ATOMIC)) 112 113 page = dma_alloc_from_contiguous(dev, count, get_order(size)); 113 114 if (!page)
+4 -2
arch/x86/kernel/reboot.c
··· 643 643 set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id)); 644 644 645 645 /* 646 - * O.K Now that I'm on the appropriate processor, 647 - * stop all of the others. 646 + * O.K Now that I'm on the appropriate processor, stop all of the 647 + * others. Also disable the local irq to not receive the per-cpu 648 + * timer interrupt which may trigger scheduler's load balance. 648 649 */ 650 + local_irq_disable(); 649 651 stop_other_cpus(); 650 652 #endif 651 653
+14 -2
arch/x86/kernel/smpboot.c
··· 351 351 352 352 static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 353 353 { 354 - if (c->phys_proc_id == o->phys_proc_id) 355 - return topology_sane(c, o, "mc"); 354 + if (c->phys_proc_id == o->phys_proc_id) { 355 + if (cpu_has(c, X86_FEATURE_AMD_DCM)) 356 + return true; 356 357 358 + return topology_sane(c, o, "mc"); 359 + } 357 360 return false; 358 361 } 359 362 ··· 386 383 387 384 if ((i == cpu) || (has_mc && match_llc(c, o))) 388 385 link_mask(llc_shared, cpu, i); 386 + 387 + } 388 + 389 + /* 390 + * This needs a separate iteration over the cpus because we rely on all 391 + * cpu_sibling_mask links to be set-up. 392 + */ 393 + for_each_cpu(i, cpu_sibling_setup_mask) { 394 + o = &cpu_data(i); 389 395 390 396 if ((i == cpu) || (has_mc && match_mc(c, o))) { 391 397 link_mask(core, cpu, i);
+4
arch/x86/lib/usercopy.c
··· 8 8 #include <linux/module.h> 9 9 10 10 #include <asm/word-at-a-time.h> 11 + #include <linux/sched.h> 11 12 12 13 /* 13 14 * best effort, GUP based copy_from_user() that is NMI-safe ··· 21 20 struct page *page; 22 21 void *map; 23 22 int ret; 23 + 24 + if (__range_not_ok(from, n, TASK_SIZE)) 25 + return len; 24 26 25 27 do { 26 28 ret = __get_user_pages_fast(addr, 1, 0, &page);
+4 -4
arch/x86/lib/x86-opcode-map.txt
··· 28 28 # - (66): the last prefix is 0x66 29 29 # - (F3): the last prefix is 0xF3 30 30 # - (F2): the last prefix is 0xF2 31 - # 31 + # - (!F3) : the last prefix is not 0xF3 (including non-last prefix case) 32 32 33 33 Table: one byte opcode 34 34 Referrer: ··· 515 515 b5: LGS Gv,Mp 516 516 b6: MOVZX Gv,Eb 517 517 b7: MOVZX Gv,Ew 518 - b8: JMPE | POPCNT Gv,Ev (F3) 518 + b8: JMPE (!F3) | POPCNT Gv,Ev (F3) 519 519 b9: Grp10 (1A) 520 520 ba: Grp8 Ev,Ib (1A) 521 521 bb: BTC Ev,Gv 522 - bc: BSF Gv,Ev | TZCNT Gv,Ev (F3) 523 - bd: BSR Gv,Ev | LZCNT Gv,Ev (F3) 522 + bc: BSF Gv,Ev (!F3) | TZCNT Gv,Ev (F3) 523 + bd: BSR Gv,Ev (!F3) | LZCNT Gv,Ev (F3) 524 524 be: MOVSX Gv,Eb 525 525 bf: MOVSX Gv,Ew 526 526 # 0x0f 0xc0-0xcf
+2 -1
arch/x86/mm/init.c
··· 62 62 extra += PMD_SIZE; 63 63 #endif 64 64 /* The first 2/4M doesn't use large pages. */ 65 - extra += mr->end - mr->start; 65 + if (mr->start < PMD_SIZE) 66 + extra += mr->end - mr->start; 66 67 67 68 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; 68 69 } else
+2 -2
arch/x86/mm/ioremap.c
··· 180 180 181 181 /** 182 182 * ioremap_nocache - map bus memory into CPU space 183 - * @offset: bus address of the memory 183 + * @phys_addr: bus address of the memory 184 184 * @size: size of the resource to map 185 185 * 186 186 * ioremap_nocache performs a platform specific sequence of operations to ··· 217 217 218 218 /** 219 219 * ioremap_wc - map memory into CPU space write combined 220 - * @offset: bus address of the memory 220 + * @phys_addr: bus address of the memory 221 221 * @size: size of the resource to map 222 222 * 223 223 * This version of ioremap ensures that the memory is marked write combining.
+1 -1
arch/x86/mm/pageattr.c
··· 122 122 123 123 /** 124 124 * clflush_cache_range - flush a cache range with clflush 125 - * @addr: virtual start address 125 + * @vaddr: virtual start address 126 126 * @size: number of bytes to flush 127 127 * 128 128 * clflush is an unordered instruction which needs fencing with mfence
+2
arch/x86/mm/srat.c
··· 176 176 return; 177 177 } 178 178 179 + node_set(node, numa_nodes_parsed); 180 + 179 181 printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n", 180 182 node, pxm, 181 183 (unsigned long long) start, (unsigned long long) end - 1);
+1 -1
arch/x86/platform/mrst/mrst.c
··· 782 782 EXPORT_SYMBOL_GPL(intel_scu_notifier); 783 783 784 784 /* Called by IPC driver */ 785 - void intel_scu_devices_create(void) 785 + void __devinit intel_scu_devices_create(void) 786 786 { 787 787 int i; 788 788
-1
arch/x86/platform/uv/tlb_uv.c
··· 1295 1295 */ 1296 1296 mmr_image |= (1L << SOFTACK_MSHIFT); 1297 1297 if (is_uv2_hub()) { 1298 - mmr_image &= ~(1L << UV2_LEG_SHFT); 1299 1298 mmr_image |= (1L << UV2_EXT_SHFT); 1300 1299 } 1301 1300 write_mmr_misc_control(pnode, mmr_image);
+9 -5
arch/x86/tools/gen-insn-attr-x86.awk
··· 66 66 rex_expr = "^REX(\\.[XRWB]+)*" 67 67 fpu_expr = "^ESC" # TODO 68 68 69 - lprefix1_expr = "\\(66\\)" 69 + lprefix1_expr = "\\((66|!F3)\\)" 70 70 lprefix2_expr = "\\(F3\\)" 71 - lprefix3_expr = "\\(F2\\)" 71 + lprefix3_expr = "\\((F2|!F3)\\)" 72 + lprefix_expr = "\\((66|F2|F3)\\)" 72 73 max_lprefix = 4 73 74 74 75 # All opcodes starting with lower-case 'v' or with (v1) superscript ··· 334 333 if (match(ext, lprefix1_expr)) { 335 334 lptable1[idx] = add_flags(lptable1[idx],flags) 336 335 variant = "INAT_VARIANT" 337 - } else if (match(ext, lprefix2_expr)) { 336 + } 337 + if (match(ext, lprefix2_expr)) { 338 338 lptable2[idx] = add_flags(lptable2[idx],flags) 339 339 variant = "INAT_VARIANT" 340 - } else if (match(ext, lprefix3_expr)) { 340 + } 341 + if (match(ext, lprefix3_expr)) { 341 342 lptable3[idx] = add_flags(lptable3[idx],flags) 342 343 variant = "INAT_VARIANT" 343 - } else { 344 + } 345 + if (!match(ext, lprefix_expr)){ 344 346 table[idx] = add_flags(table[idx],flags) 345 347 } 346 348 }
+2 -2
arch/x86/um/sys_call_table_32.c
··· 39 39 #undef __SYSCALL_I386 40 40 #define __SYSCALL_I386(nr, sym, compat) [ nr ] = sym, 41 41 42 - typedef void (*sys_call_ptr_t)(void); 42 + typedef asmlinkage void (*sys_call_ptr_t)(void); 43 43 44 - extern void sys_ni_syscall(void); 44 + extern asmlinkage void sys_ni_syscall(void); 45 45 46 46 const sys_call_ptr_t sys_call_table[] __cacheline_aligned = { 47 47 /*
+8
arch/x86/xen/enlighten.c
··· 209 209 xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : ""); 210 210 } 211 211 212 + #define CPUID_THERM_POWER_LEAF 6 213 + #define APERFMPERF_PRESENT 0 214 + 212 215 static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0; 213 216 static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0; 214 217 ··· 244 241 *cx = cpuid_leaf5_ecx_val; 245 242 *dx = cpuid_leaf5_edx_val; 246 243 return; 244 + 245 + case CPUID_THERM_POWER_LEAF: 246 + /* Disabling APERFMPERF for kernel usage */ 247 + maskecx = ~(1 << APERFMPERF_PRESENT); 248 + break; 247 249 248 250 case 0xb: 249 251 /* Suppress extended topology stuff */
+36
arch/x86/xen/p2m.c
··· 706 706 unsigned long uninitialized_var(address); 707 707 unsigned level; 708 708 pte_t *ptep = NULL; 709 + int ret = 0; 709 710 710 711 pfn = page_to_pfn(page); 711 712 if (!PageHighMem(page)) { ··· 742 741 list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]); 743 742 spin_unlock_irqrestore(&m2p_override_lock, flags); 744 743 744 + /* p2m(m2p(mfn)) == mfn: the mfn is already present somewhere in 745 + * this domain. Set the FOREIGN_FRAME_BIT in the p2m for the other 746 + * pfn so that the following mfn_to_pfn(mfn) calls will return the 747 + * pfn from the m2p_override (the backend pfn) instead. 748 + * We need to do this because the pages shared by the frontend 749 + * (xen-blkfront) can be already locked (lock_page, called by 750 + * do_read_cache_page); when the userspace backend tries to use them 751 + * with direct_IO, mfn_to_pfn returns the pfn of the frontend, so 752 + * do_blockdev_direct_IO is going to try to lock the same pages 753 + * again resulting in a deadlock. 754 + * As a side effect get_user_pages_fast might not be safe on the 755 + * frontend pages while they are being shared with the backend, 756 + * because mfn_to_pfn (that ends up being called by GUPF) will 757 + * return the backend pfn rather than the frontend pfn. */ 758 + ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); 759 + if (ret == 0 && get_phys_to_machine(pfn) == mfn) 760 + set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)); 761 + 745 762 return 0; 746 763 } 747 764 EXPORT_SYMBOL_GPL(m2p_add_override); ··· 771 752 unsigned long uninitialized_var(address); 772 753 unsigned level; 773 754 pte_t *ptep = NULL; 755 + int ret = 0; 774 756 775 757 pfn = page_to_pfn(page); 776 758 mfn = get_phys_to_machine(pfn); ··· 840 820 } 841 821 } else 842 822 set_phys_to_machine(pfn, page->index); 823 + 824 + /* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present 825 + * somewhere in this domain, even before being added to the 826 + * m2p_override (see comment above in m2p_add_override). 827 + * If there are no other entries in the m2p_override corresponding 828 + * to this mfn, then remove the FOREIGN_FRAME_BIT from the p2m for 829 + * the original pfn (the one shared by the frontend): the backend 830 + * cannot do any IO on this page anymore because it has been 831 + * unshared. Removing the FOREIGN_FRAME_BIT from the p2m entry of 832 + * the original pfn causes mfn_to_pfn(mfn) to return the frontend 833 + * pfn again. */ 834 + mfn &= ~FOREIGN_FRAME_BIT; 835 + ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); 836 + if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) && 837 + m2p_find_override(mfn) == NULL) 838 + set_phys_to_machine(pfn, mfn); 843 839 844 840 return 0; 845 841 }
+2 -1
arch/x86/xen/setup.c
··· 371 371 populated = xen_populate_chunk(map, memmap.nr_entries, 372 372 max_pfn, &last_pfn, xen_released_pages); 373 373 374 - extra_pages += (xen_released_pages - populated); 374 + xen_released_pages -= populated; 375 + extra_pages += xen_released_pages; 375 376 376 377 if (last_pfn > max_pfn) { 377 378 max_pfn = min(MAX_DOMAIN_PAGES, last_pfn);
+1 -1
drivers/acpi/Kconfig
··· 208 208 209 209 config ACPI_HOTPLUG_CPU 210 210 bool 211 - depends on ACPI_PROCESSOR && HOTPLUG_CPU 211 + depends on EXPERIMENTAL && ACPI_PROCESSOR && HOTPLUG_CPU 212 212 select ACPI_CONTAINER 213 213 default y 214 214
+9 -1
drivers/acpi/battery.c
··· 643 643 644 644 static void acpi_battery_refresh(struct acpi_battery *battery) 645 645 { 646 + int power_unit; 647 + 646 648 if (!battery->bat.dev) 647 649 return; 648 650 651 + power_unit = battery->power_unit; 652 + 649 653 acpi_battery_get_info(battery); 650 - /* The battery may have changed its reporting units. */ 654 + 655 + if (power_unit == battery->power_unit) 656 + return; 657 + 658 + /* The battery has changed its reporting units. */ 651 659 sysfs_remove_battery(battery); 652 660 sysfs_add_battery(battery); 653 661 }
+25 -5
drivers/acpi/processor_perflib.c
··· 333 333 struct acpi_buffer state = { 0, NULL }; 334 334 union acpi_object *pss = NULL; 335 335 int i; 336 + int last_invalid = -1; 336 337 337 338 338 339 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); ··· 395 394 ((u32)(px->core_frequency * 1000) != 396 395 (px->core_frequency * 1000))) { 397 396 printk(KERN_ERR FW_BUG PREFIX 398 - "Invalid BIOS _PSS frequency: 0x%llx MHz\n", 399 - px->core_frequency); 400 - result = -EFAULT; 401 - kfree(pr->performance->states); 402 - goto end; 397 + "Invalid BIOS _PSS frequency found for processor %d: 0x%llx MHz\n", 398 + pr->id, px->core_frequency); 399 + if (last_invalid == -1) 400 + last_invalid = i; 401 + } else { 402 + if (last_invalid != -1) { 403 + /* 404 + * Copy this valid entry over last_invalid entry 405 + */ 406 + memcpy(&(pr->performance->states[last_invalid]), 407 + px, sizeof(struct acpi_processor_px)); 408 + ++last_invalid; 409 + } 403 410 } 404 411 } 412 + 413 + if (last_invalid == 0) { 414 + printk(KERN_ERR FW_BUG PREFIX 415 + "No valid BIOS _PSS frequency found for processor %d\n", pr->id); 416 + result = -EFAULT; 417 + kfree(pr->performance->states); 418 + pr->performance->states = NULL; 419 + } 420 + 421 + if (last_invalid > 0) 422 + pr->performance->state_count = last_invalid; 405 423 406 424 end: 407 425 kfree(buffer.pointer);
+22 -11
drivers/acpi/video.c
··· 1687 1687 set_bit(KEY_BRIGHTNESS_ZERO, input->keybit); 1688 1688 set_bit(KEY_DISPLAY_OFF, input->keybit); 1689 1689 1690 - error = input_register_device(input); 1691 - if (error) 1692 - goto err_stop_video; 1693 - 1694 1690 printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s rom: %s post: %s)\n", 1695 1691 ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device), 1696 1692 video->flags.multihead ? "yes" : "no", ··· 1697 1701 video->pm_nb.priority = 0; 1698 1702 error = register_pm_notifier(&video->pm_nb); 1699 1703 if (error) 1700 - goto err_unregister_input_dev; 1704 + goto err_stop_video; 1705 + 1706 + error = input_register_device(input); 1707 + if (error) 1708 + goto err_unregister_pm_notifier; 1701 1709 1702 1710 return 0; 1703 1711 1704 - err_unregister_input_dev: 1705 - input_unregister_device(input); 1712 + err_unregister_pm_notifier: 1713 + unregister_pm_notifier(&video->pm_nb); 1706 1714 err_stop_video: 1707 1715 acpi_video_bus_stop_devices(video); 1708 1716 err_free_input_dev: ··· 1743 1743 return 0; 1744 1744 } 1745 1745 1746 + static int __init is_i740(struct pci_dev *dev) 1747 + { 1748 + if (dev->device == 0x00D1) 1749 + return 1; 1750 + if (dev->device == 0x7000) 1751 + return 1; 1752 + return 0; 1753 + } 1754 + 1746 1755 static int __init intel_opregion_present(void) 1747 1756 { 1748 - #if defined(CONFIG_DRM_I915) || defined(CONFIG_DRM_I915_MODULE) 1757 + int opregion = 0; 1749 1758 struct pci_dev *dev = NULL; 1750 1759 u32 address; 1751 1760 ··· 1763 1754 continue; 1764 1755 if (dev->vendor != PCI_VENDOR_ID_INTEL) 1765 1756 continue; 1757 + /* We don't want to poke around undefined i740 registers */ 1758 + if (is_i740(dev)) 1759 + continue; 1766 1760 pci_read_config_dword(dev, 0xfc, &address); 1767 1761 if (!address) 1768 1762 continue; 1769 - return 1; 1763 + opregion = 1; 1770 1764 } 1771 - #endif 1772 - return 0; 1765 + return opregion; 1773 1766 } 1774 1767 1775 1768 int acpi_video_register(void)
+6 -4
drivers/base/regmap/regmap.c
··· 246 246 map->lock = regmap_lock_mutex; 247 247 map->unlock = regmap_unlock_mutex; 248 248 } 249 - map->format.buf_size = (config->reg_bits + config->val_bits) / 8; 250 249 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); 251 250 map->format.pad_bytes = config->pad_bits / 8; 252 251 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); 253 - map->format.buf_size += map->format.pad_bytes; 252 + map->format.buf_size = DIV_ROUND_UP(config->reg_bits + 253 + config->val_bits + config->pad_bits, 8); 254 254 map->reg_shift = config->pad_bits % 8; 255 255 if (config->reg_stride) 256 256 map->reg_stride = config->reg_stride; ··· 368 368 369 369 ret = regcache_init(map, config); 370 370 if (ret < 0) 371 - goto err_free_workbuf; 371 + goto err_debugfs; 372 372 373 373 /* Add a devres resource for dev_get_regmap() */ 374 374 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); ··· 383 383 384 384 err_cache: 385 385 regcache_exit(map); 386 - err_free_workbuf: 386 + err_debugfs: 387 + regmap_debugfs_exit(map); 387 388 kfree(map->work_buf); 388 389 err_map: 389 390 kfree(map); ··· 472 471 473 472 return ret; 474 473 } 474 + EXPORT_SYMBOL_GPL(regmap_reinit_cache); 475 475 476 476 /** 477 477 * regmap_exit(): Free a previously allocated register map
+3 -1
drivers/bcma/driver_chipcommon_pmu.c
··· 139 139 bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x7); 140 140 break; 141 141 case 0x4331: 142 - /* BCM4331 workaround is SPROM-related, we put it in sprom.c */ 142 + case 43431: 143 + /* Ext PA lines must be enabled for tx on BCM4331 */ 144 + bcma_chipco_bcm4331_ext_pa_lines_ctl(cc, true); 143 145 break; 144 146 case 43224: 145 147 if (bus->chipinfo.rev == 0) {
+4 -2
drivers/bcma/driver_pci.c
··· 232 232 int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core, 233 233 bool enable) 234 234 { 235 - struct pci_dev *pdev = pc->core->bus->host_pci; 235 + struct pci_dev *pdev; 236 236 u32 coremask, tmp; 237 237 int err = 0; 238 238 239 - if (core->bus->hosttype != BCMA_HOSTTYPE_PCI) { 239 + if (!pc || core->bus->hosttype != BCMA_HOSTTYPE_PCI) { 240 240 /* This bcma device is not on a PCI host-bus. So the IRQs are 241 241 * not routed through the PCI core. 242 242 * So we must not enable routing through the PCI core. */ 243 243 goto out; 244 244 } 245 + 246 + pdev = pc->core->bus->host_pci; 245 247 246 248 err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp); 247 249 if (err)
+2 -2
drivers/bcma/sprom.c
··· 579 579 if (!sprom) 580 580 return -ENOMEM; 581 581 582 - if (bus->chipinfo.id == 0x4331) 582 + if (bus->chipinfo.id == 0x4331 || bus->chipinfo.id == 43431) 583 583 bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, false); 584 584 585 585 pr_debug("SPROM offset 0x%x\n", offset); 586 586 bcma_sprom_read(bus, offset, sprom); 587 587 588 - if (bus->chipinfo.id == 0x4331) 588 + if (bus->chipinfo.id == 0x4331 || bus->chipinfo.id == 43431) 589 589 bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true); 590 590 591 591 err = bcma_sprom_valid(sprom);
+1
drivers/char/agp/intel-agp.c
··· 898 898 ID(PCI_DEVICE_ID_INTEL_B43_HB), 899 899 ID(PCI_DEVICE_ID_INTEL_B43_1_HB), 900 900 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB), 901 + ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB), 901 902 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB), 902 903 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB), 903 904 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
+1
drivers/char/agp/intel-agp.h
··· 212 212 #define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30 213 213 #define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32 214 214 #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040 215 + #define PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB 0x0069 215 216 #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042 216 217 #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044 217 218 #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
+7
drivers/char/hw_random/atmel-rng.c
··· 36 36 /* data ready? */ 37 37 if (readl(trng->base + TRNG_ODATA) & 1) { 38 38 *data = readl(trng->base + TRNG_ODATA); 39 + /* 40 + ensure data ready is only set again AFTER the next data 41 + word is ready in case it got set between checking ISR 42 + and reading ODATA, so we don't risk re-reading the 43 + same word 44 + */ 45 + readl(trng->base + TRNG_ISR); 39 46 return 4; 40 47 } else 41 48 return 0;
+13 -13
drivers/clocksource/sh_cmt.c
··· 48 48 unsigned long next_match_value; 49 49 unsigned long max_match_value; 50 50 unsigned long rate; 51 - spinlock_t lock; 51 + raw_spinlock_t lock; 52 52 struct clock_event_device ced; 53 53 struct clocksource cs; 54 54 unsigned long total_cycles; 55 55 }; 56 56 57 - static DEFINE_SPINLOCK(sh_cmt_lock); 57 + static DEFINE_RAW_SPINLOCK(sh_cmt_lock); 58 58 59 59 #define CMSTR -1 /* shared register */ 60 60 #define CMCSR 0 /* channel register */ ··· 139 139 unsigned long flags, value; 140 140 141 141 /* start stop register shared by multiple timer channels */ 142 - spin_lock_irqsave(&sh_cmt_lock, flags); 142 + raw_spin_lock_irqsave(&sh_cmt_lock, flags); 143 143 value = sh_cmt_read(p, CMSTR); 144 144 145 145 if (start) ··· 148 148 value &= ~(1 << cfg->timer_bit); 149 149 150 150 sh_cmt_write(p, CMSTR, value); 151 - spin_unlock_irqrestore(&sh_cmt_lock, flags); 151 + raw_spin_unlock_irqrestore(&sh_cmt_lock, flags); 152 152 } 153 153 154 154 static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) ··· 328 328 { 329 329 unsigned long flags; 330 330 331 - spin_lock_irqsave(&p->lock, flags); 331 + raw_spin_lock_irqsave(&p->lock, flags); 332 332 __sh_cmt_set_next(p, delta); 333 - spin_unlock_irqrestore(&p->lock, flags); 333 + raw_spin_unlock_irqrestore(&p->lock, flags); 334 334 } 335 335 336 336 static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) ··· 385 385 int ret = 0; 386 386 unsigned long flags; 387 387 388 - spin_lock_irqsave(&p->lock, flags); 388 + raw_spin_lock_irqsave(&p->lock, flags); 389 389 390 390 if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) 391 391 ret = sh_cmt_enable(p, &p->rate); ··· 398 398 if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT))) 399 399 __sh_cmt_set_next(p, p->max_match_value); 400 400 out: 401 - spin_unlock_irqrestore(&p->lock, flags); 401 + raw_spin_unlock_irqrestore(&p->lock, flags); 402 402 403 403 return ret; 404 404 } ··· 408 408 unsigned long flags; 409 409 unsigned long f; 410 410 411 - spin_lock_irqsave(&p->lock, flags); 411 + raw_spin_lock_irqsave(&p->lock, flags); 412 412 413 413 f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE); 414 414 p->flags &= ~flag; ··· 420 420 if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE)) 421 421 __sh_cmt_set_next(p, p->max_match_value); 422 422 423 - spin_unlock_irqrestore(&p->lock, flags); 423 + raw_spin_unlock_irqrestore(&p->lock, flags); 424 424 } 425 425 426 426 static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs) ··· 435 435 unsigned long value; 436 436 int has_wrapped; 437 437 438 - spin_lock_irqsave(&p->lock, flags); 438 + raw_spin_lock_irqsave(&p->lock, flags); 439 439 value = p->total_cycles; 440 440 raw = sh_cmt_get_counter(p, &has_wrapped); 441 441 442 442 if (unlikely(has_wrapped)) 443 443 raw += p->match_value + 1; 444 - spin_unlock_irqrestore(&p->lock, flags); 444 + raw_spin_unlock_irqrestore(&p->lock, flags); 445 445 446 446 return value + raw; 447 447 } ··· 591 591 p->max_match_value = (1 << p->width) - 1; 592 592 593 593 p->match_value = p->max_match_value; 594 - spin_lock_init(&p->lock); 594 + raw_spin_lock_init(&p->lock); 595 595 596 596 if (clockevent_rating) 597 597 sh_cmt_register_clockevent(p, name, clockevent_rating);
+3 -3
drivers/clocksource/sh_mtu2.c
··· 43 43 struct clock_event_device ced; 44 44 }; 45 45 46 - static DEFINE_SPINLOCK(sh_mtu2_lock); 46 + static DEFINE_RAW_SPINLOCK(sh_mtu2_lock); 47 47 48 48 #define TSTR -1 /* shared register */ 49 49 #define TCR 0 /* channel register */ ··· 107 107 unsigned long flags, value; 108 108 109 109 /* start stop register shared by multiple timer channels */ 110 - spin_lock_irqsave(&sh_mtu2_lock, flags); 110 + raw_spin_lock_irqsave(&sh_mtu2_lock, flags); 111 111 value = sh_mtu2_read(p, TSTR); 112 112 113 113 if (start) ··· 116 116 value &= ~(1 << cfg->timer_bit); 117 117 118 118 sh_mtu2_write(p, TSTR, value); 119 - spin_unlock_irqrestore(&sh_mtu2_lock, flags); 119 + raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags); 120 120 } 121 121 122 122 static int sh_mtu2_enable(struct sh_mtu2_priv *p)
+6 -10
drivers/clocksource/sh_tmu.c
··· 45 45 struct clocksource cs; 46 46 }; 47 47 48 - static DEFINE_SPINLOCK(sh_tmu_lock); 48 + static DEFINE_RAW_SPINLOCK(sh_tmu_lock); 49 49 50 50 #define TSTR -1 /* shared register */ 51 51 #define TCOR 0 /* channel register */ ··· 95 95 unsigned long flags, value; 96 96 97 97 /* start stop register shared by multiple timer channels */ 98 - spin_lock_irqsave(&sh_tmu_lock, flags); 98 + raw_spin_lock_irqsave(&sh_tmu_lock, flags); 99 99 value = sh_tmu_read(p, TSTR); 100 100 101 101 if (start) ··· 104 104 value &= ~(1 << cfg->timer_bit); 105 105 106 106 sh_tmu_write(p, TSTR, value); 107 - spin_unlock_irqrestore(&sh_tmu_lock, flags); 107 + raw_spin_unlock_irqrestore(&sh_tmu_lock, flags); 108 108 } 109 109 110 110 static int sh_tmu_enable(struct sh_tmu_priv *p) ··· 245 245 246 246 sh_tmu_enable(p); 247 247 248 - /* TODO: calculate good shift from rate and counter bit width */ 249 - 250 - ced->shift = 32; 251 - ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift); 252 - ced->max_delta_ns = clockevent_delta2ns(0xffffffff, ced); 253 - ced->min_delta_ns = 5000; 248 + clockevents_config(ced, p->rate); 254 249 255 250 if (periodic) { 256 251 p->periodic = (p->rate + HZ/2) / HZ; ··· 318 323 ced->set_mode = sh_tmu_clock_event_mode; 319 324 320 325 dev_info(&p->pdev->dev, "used for clock events\n"); 321 - clockevents_register_device(ced); 326 + 327 + clockevents_config_and_register(ced, 1, 0x300, 0xffffffff); 322 328 323 329 ret = setup_irq(p->irqaction.irq, &p->irqaction); 324 330 if (ret) {
+2 -2
drivers/gpu/drm/exynos/exynos_drm_drv.c
··· 244 244 }; 245 245 246 246 static struct drm_driver exynos_drm_driver = { 247 - .driver_features = DRIVER_HAVE_IRQ | DRIVER_BUS_PLATFORM | 248 - DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, 247 + .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET | 248 + DRIVER_GEM | DRIVER_PRIME, 249 249 .load = exynos_drm_load, 250 250 .unload = exynos_drm_unload, 251 251 .open = exynos_drm_open,
-7
drivers/gpu/drm/exynos/exynos_drm_encoder.c
··· 172 172 manager_ops->commit(manager->dev); 173 173 } 174 174 175 - static struct drm_crtc * 176 - exynos_drm_encoder_get_crtc(struct drm_encoder *encoder) 177 - { 178 - return encoder->crtc; 179 - } 180 - 181 175 static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = { 182 176 .dpms = exynos_drm_encoder_dpms, 183 177 .mode_fixup = exynos_drm_encoder_mode_fixup, 184 178 .mode_set = exynos_drm_encoder_mode_set, 185 179 .prepare = exynos_drm_encoder_prepare, 186 180 .commit = exynos_drm_encoder_commit, 187 - .get_crtc = exynos_drm_encoder_get_crtc, 188 181 }; 189 182 190 183 static void exynos_drm_encoder_destroy(struct drm_encoder *encoder)
+14 -5
drivers/gpu/drm/exynos/exynos_drm_fb.c
··· 51 51 static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) 52 52 { 53 53 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); 54 + unsigned int i; 54 55 55 56 DRM_DEBUG_KMS("%s\n", __FILE__); 56 57 57 58 drm_framebuffer_cleanup(fb); 59 + 60 + for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) { 61 + struct drm_gem_object *obj; 62 + 63 + if (exynos_fb->exynos_gem_obj[i] == NULL) 64 + continue; 65 + 66 + obj = &exynos_fb->exynos_gem_obj[i]->base; 67 + drm_gem_object_unreference_unlocked(obj); 68 + } 58 69 59 70 kfree(exynos_fb); 60 71 exynos_fb = NULL; ··· 145 134 return ERR_PTR(-ENOENT); 146 135 } 147 136 148 - drm_gem_object_unreference_unlocked(obj); 149 - 150 137 fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj); 151 - if (IS_ERR(fb)) 138 + if (IS_ERR(fb)) { 139 + drm_gem_object_unreference_unlocked(obj); 152 140 return fb; 141 + } 153 142 154 143 exynos_fb = to_exynos_fb(fb); 155 144 nr = exynos_drm_format_num_buffers(fb->pixel_format); ··· 162 151 exynos_drm_fb_destroy(fb); 163 152 return ERR_PTR(-ENOENT); 164 153 } 165 - 166 - drm_gem_object_unreference_unlocked(obj); 167 154 168 155 exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj); 169 156 }
+2 -2
drivers/gpu/drm/exynos/exynos_drm_fb.h
··· 31 31 static inline int exynos_drm_format_num_buffers(uint32_t format) 32 32 { 33 33 switch (format) { 34 - case DRM_FORMAT_NV12M: 34 + case DRM_FORMAT_NV12: 35 35 case DRM_FORMAT_NV12MT: 36 36 return 2; 37 - case DRM_FORMAT_YUV420M: 37 + case DRM_FORMAT_YUV420: 38 38 return 3; 39 39 default: 40 40 return 1;
+3 -6
drivers/gpu/drm/exynos/exynos_drm_gem.c
··· 689 689 struct drm_device *dev, uint32_t handle, 690 690 uint64_t *offset) 691 691 { 692 - struct exynos_drm_gem_obj *exynos_gem_obj; 693 692 struct drm_gem_object *obj; 694 693 int ret = 0; 695 694 ··· 709 710 goto unlock; 710 711 } 711 712 712 - exynos_gem_obj = to_exynos_gem_obj(obj); 713 - 714 - if (!exynos_gem_obj->base.map_list.map) { 715 - ret = drm_gem_create_mmap_offset(&exynos_gem_obj->base); 713 + if (!obj->map_list.map) { 714 + ret = drm_gem_create_mmap_offset(obj); 716 715 if (ret) 717 716 goto out; 718 717 } 719 718 720 - *offset = (u64)exynos_gem_obj->base.map_list.hash.key << PAGE_SHIFT; 719 + *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT; 721 720 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); 722 721 723 722 out:
+7 -5
drivers/gpu/drm/exynos/exynos_mixer.c
··· 365 365 switch (win_data->pixel_format) { 366 366 case DRM_FORMAT_NV12MT: 367 367 tiled_mode = true; 368 - case DRM_FORMAT_NV12M: 368 + case DRM_FORMAT_NV12: 369 369 crcb_mode = false; 370 370 buf_num = 2; 371 371 break; ··· 601 601 mixer_reg_write(res, MXR_BG_COLOR2, 0x008080); 602 602 603 603 /* setting graphical layers */ 604 - 605 604 val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */ 606 605 val |= MXR_GRP_CFG_WIN_BLEND_EN; 606 + val |= MXR_GRP_CFG_BLEND_PRE_MUL; 607 + val |= MXR_GRP_CFG_PIXEL_BLEND_EN; 607 608 val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */ 608 609 609 610 /* the same configuration for both layers */ 610 611 mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val); 611 - 612 - val |= MXR_GRP_CFG_BLEND_PRE_MUL; 613 - val |= MXR_GRP_CFG_PIXEL_BLEND_EN; 614 612 mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val); 613 + 614 + /* setting video layers */ 615 + val = MXR_GRP_CFG_ALPHA_VAL(0); 616 + mixer_reg_write(res, MXR_VIDEO_CFG, val); 615 617 616 618 /* configuration of Video Processor Registers */ 617 619 vp_win_reset(ctx);
+9 -4
drivers/gpu/drm/i915/i915_drv.c
··· 233 233 .has_blt_ring = 1, 234 234 .has_llc = 1, 235 235 .has_pch_split = 1, 236 + .has_force_wake = 1, 236 237 }; 237 238 238 239 static const struct intel_device_info intel_sandybridge_m_info = { ··· 244 243 .has_blt_ring = 1, 245 244 .has_llc = 1, 246 245 .has_pch_split = 1, 246 + .has_force_wake = 1, 247 247 }; 248 248 249 249 static const struct intel_device_info intel_ivybridge_d_info = { ··· 254 252 .has_blt_ring = 1, 255 253 .has_llc = 1, 256 254 .has_pch_split = 1, 255 + .has_force_wake = 1, 257 256 }; 258 257 259 258 static const struct intel_device_info intel_ivybridge_m_info = { ··· 265 262 .has_blt_ring = 1, 266 263 .has_llc = 1, 267 264 .has_pch_split = 1, 265 + .has_force_wake = 1, 268 266 }; 269 267 270 268 static const struct intel_device_info intel_valleyview_m_info = { ··· 293 289 .has_blt_ring = 1, 294 290 .has_llc = 1, 295 291 .has_pch_split = 1, 292 + .has_force_wake = 1, 296 293 }; 297 294 298 295 static const struct intel_device_info intel_haswell_m_info = { ··· 303 298 .has_blt_ring = 1, 304 299 .has_llc = 1, 305 300 .has_pch_split = 1, 301 + .has_force_wake = 1, 306 302 }; 307 303 308 304 static const struct pci_device_id pciidlist[] = { /* aka */ ··· 1145 1139 1146 1140 /* We give fast paths for the really cool registers */ 1147 1141 #define NEEDS_FORCE_WAKE(dev_priv, reg) \ 1148 - (((dev_priv)->info->gen >= 6) && \ 1149 - ((reg) < 0x40000) && \ 1150 - ((reg) != FORCEWAKE)) && \ 1151 - (!IS_VALLEYVIEW((dev_priv)->dev)) 1142 + ((HAS_FORCE_WAKE((dev_priv)->dev)) && \ 1143 + ((reg) < 0x40000) && \ 1144 + ((reg) != FORCEWAKE)) 1152 1145 1153 1146 #define __i915_read(x, y) \ 1154 1147 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
+3
drivers/gpu/drm/i915/i915_drv.h
··· 285 285 u8 is_ivybridge:1; 286 286 u8 is_valleyview:1; 287 287 u8 has_pch_split:1; 288 + u8 has_force_wake:1; 288 289 u8 is_haswell:1; 289 290 u8 has_fbc:1; 290 291 u8 has_pipe_cxsr:1; ··· 1101 1100 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) 1102 1101 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 1103 1102 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) 1103 + 1104 + #define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) 1104 1105 1105 1106 #include "i915_trace.h" 1106 1107
+35 -3
drivers/gpu/drm/i915/i915_irq.c
··· 510 510 return ret; 511 511 } 512 512 513 - static void pch_irq_handler(struct drm_device *dev, u32 pch_iir) 513 + static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 514 514 { 515 515 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 516 516 int pipe; ··· 548 548 DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n"); 549 549 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 550 550 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); 551 + } 552 + 553 + static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 554 + { 555 + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 556 + int pipe; 557 + 558 + if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) 559 + DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 560 + (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 561 + SDE_AUDIO_POWER_SHIFT_CPT); 562 + 563 + if (pch_iir & SDE_AUX_MASK_CPT) 564 + DRM_DEBUG_DRIVER("AUX channel interrupt\n"); 565 + 566 + if (pch_iir & SDE_GMBUS_CPT) 567 + DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); 568 + 569 + if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 570 + DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 571 + 572 + if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 573 + DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 574 + 575 + if (pch_iir & SDE_FDI_MASK_CPT) 576 + for_each_pipe(pipe) 577 + DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 578 + pipe_name(pipe), 579 + I915_READ(FDI_RX_IIR(pipe))); 551 580 } 552 581 553 582 static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) ··· 620 591 621 592 if (pch_iir & SDE_HOTPLUG_MASK_CPT) 622 593 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 623 - pch_irq_handler(dev, pch_iir); 594 + cpt_irq_handler(dev, pch_iir); 624 595 625 596 /* clear PCH hotplug event before clear CPU irq */ 626 597 I915_WRITE(SDEIIR, pch_iir); ··· 713 684 if (de_iir & DE_PCH_EVENT) { 714 685 if (pch_iir & hotplug_mask) 715 686 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 716 - pch_irq_handler(dev, pch_iir); 687 + if (HAS_PCH_CPT(dev)) 688 + cpt_irq_handler(dev, pch_iir); 689 + else 690 + ibx_irq_handler(dev, pch_iir); 717 691 } 718 692 719 693 if (de_iir & DE_PCU_EVENT) {
+40 -3
drivers/gpu/drm/i915/i915_reg.h
··· 210 210 #define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) 211 211 #define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1) 212 212 #define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) 213 + /* IVB has funny definitions for which plane to flip. */ 214 + #define MI_DISPLAY_FLIP_IVB_PLANE_A (0 << 19) 215 + #define MI_DISPLAY_FLIP_IVB_PLANE_B (1 << 19) 216 + #define MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19) 217 + #define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19) 218 + #define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19) 219 + #define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19) 220 + 213 221 #define MI_SET_CONTEXT MI_INSTR(0x18, 0) 214 222 #define MI_MM_SPACE_GTT (1<<8) 215 223 #define MI_MM_SPACE_PHYSICAL (0<<8) ··· 3321 3313 3322 3314 /* PCH */ 3323 3315 3324 - /* south display engine interrupt */ 3316 + /* south display engine interrupt: IBX */ 3325 3317 #define SDE_AUDIO_POWER_D (1 << 27) 3326 3318 #define SDE_AUDIO_POWER_C (1 << 26) 3327 3319 #define SDE_AUDIO_POWER_B (1 << 25) ··· 3357 3349 #define SDE_TRANSA_CRC_ERR (1 << 1) 3358 3350 #define SDE_TRANSA_FIFO_UNDER (1 << 0) 3359 3351 #define SDE_TRANS_MASK (0x3f) 3360 - /* CPT */ 3361 - #define SDE_CRT_HOTPLUG_CPT (1 << 19) 3352 + 3353 + /* south display engine interrupt: CPT/PPT */ 3354 + #define SDE_AUDIO_POWER_D_CPT (1 << 31) 3355 + #define SDE_AUDIO_POWER_C_CPT (1 << 30) 3356 + #define SDE_AUDIO_POWER_B_CPT (1 << 29) 3357 + #define SDE_AUDIO_POWER_SHIFT_CPT 29 3358 + #define SDE_AUDIO_POWER_MASK_CPT (7 << 29) 3359 + #define SDE_AUXD_CPT (1 << 27) 3360 + #define SDE_AUXC_CPT (1 << 26) 3361 + #define SDE_AUXB_CPT (1 << 25) 3362 + #define SDE_AUX_MASK_CPT (7 << 25) 3362 3363 #define SDE_PORTD_HOTPLUG_CPT (1 << 23) 3363 3364 #define SDE_PORTC_HOTPLUG_CPT (1 << 22) 3364 3365 #define SDE_PORTB_HOTPLUG_CPT (1 << 21) 3366 + #define SDE_CRT_HOTPLUG_CPT (1 << 19) 3365 3367 #define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \ 3366 3368 SDE_PORTD_HOTPLUG_CPT | \ 3367 3369 SDE_PORTC_HOTPLUG_CPT | \ 3368 3370 SDE_PORTB_HOTPLUG_CPT) 3371 + #define SDE_GMBUS_CPT (1 << 17) 3372 + #define SDE_AUDIO_CP_REQ_C_CPT (1 << 10) 3373 + #define SDE_AUDIO_CP_CHG_C_CPT (1 << 9) 3374 + #define SDE_FDI_RXC_CPT (1 << 8) 3375 + #define SDE_AUDIO_CP_REQ_B_CPT (1 << 6) 3376 + #define SDE_AUDIO_CP_CHG_B_CPT (1 << 5) 3377 + #define SDE_FDI_RXB_CPT (1 << 4) 3378 + #define SDE_AUDIO_CP_REQ_A_CPT (1 << 2) 3379 + #define SDE_AUDIO_CP_CHG_A_CPT (1 << 1) 3380 + #define SDE_FDI_RXA_CPT (1 << 0) 3381 + #define SDE_AUDIO_CP_REQ_CPT (SDE_AUDIO_CP_REQ_C_CPT | \ 3382 + SDE_AUDIO_CP_REQ_B_CPT | \ 3383 + SDE_AUDIO_CP_REQ_A_CPT) 3384 + #define SDE_AUDIO_CP_CHG_CPT (SDE_AUDIO_CP_CHG_C_CPT | \ 3385 + SDE_AUDIO_CP_CHG_B_CPT | \ 3386 + SDE_AUDIO_CP_CHG_A_CPT) 3387 + #define SDE_FDI_MASK_CPT (SDE_FDI_RXC_CPT | \ 3388 + SDE_FDI_RXB_CPT | \ 3389 + SDE_FDI_RXA_CPT) 3369 3390 3370 3391 #define SDEISR 0xc4000 3371 3392 #define SDEIMR 0xc4004
+18 -1
drivers/gpu/drm/i915/intel_display.c
··· 6158 6158 struct drm_i915_private *dev_priv = dev->dev_private; 6159 6159 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6160 6160 struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; 6161 + uint32_t plane_bit = 0; 6161 6162 int ret; 6162 6163 6163 6164 ret = intel_pin_and_fence_fb_obj(dev, obj, ring); 6164 6165 if (ret) 6165 6166 goto err; 6166 6167 6168 + switch(intel_crtc->plane) { 6169 + case PLANE_A: 6170 + plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A; 6171 + break; 6172 + case PLANE_B: 6173 + plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B; 6174 + break; 6175 + case PLANE_C: 6176 + plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C; 6177 + break; 6178 + default: 6179 + WARN_ONCE(1, "unknown plane in flip command\n"); 6180 + ret = -ENODEV; 6181 + goto err; 6182 + } 6183 + 6167 6184 ret = intel_ring_begin(ring, 4); 6168 6185 if (ret) 6169 6186 goto err_unpin; 6170 6187 6171 - intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19)); 6188 + intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); 6172 6189 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); 6173 6190 intel_ring_emit(ring, (obj->gtt_offset)); 6174 6191 intel_ring_emit(ring, (MI_NOOP));
+18 -3
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 266 266 267 267 static int init_ring_common(struct intel_ring_buffer *ring) 268 268 { 269 - drm_i915_private_t *dev_priv = ring->dev->dev_private; 269 + struct drm_device *dev = ring->dev; 270 + drm_i915_private_t *dev_priv = dev->dev_private; 270 271 struct drm_i915_gem_object *obj = ring->obj; 272 + int ret = 0; 271 273 u32 head; 274 + 275 + if (HAS_FORCE_WAKE(dev)) 276 + gen6_gt_force_wake_get(dev_priv); 272 277 273 278 /* Stop the ring if it's running. */ 274 279 I915_WRITE_CTL(ring, 0); ··· 322 317 I915_READ_HEAD(ring), 323 318 I915_READ_TAIL(ring), 324 319 I915_READ_START(ring)); 325 - return -EIO; 320 + ret = -EIO; 321 + goto out; 326 322 } 327 323 328 324 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) ··· 332 326 ring->head = I915_READ_HEAD(ring); 333 327 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 334 328 ring->space = ring_space(ring); 329 + ring->last_retired_head = -1; 335 330 } 336 331 337 - return 0; 332 + out: 333 + if (HAS_FORCE_WAKE(dev)) 334 + gen6_gt_force_wake_put(dev_priv); 335 + 336 + return ret; 338 337 } 339 338 340 339 static int ··· 997 986 ret = i915_gem_object_pin(obj, PAGE_SIZE, true); 998 987 if (ret) 999 988 goto err_unref; 989 + 990 + ret = i915_gem_object_set_to_gtt_domain(obj, true); 991 + if (ret) 992 + goto err_unpin; 1000 993 1001 994 ring->virtual_start = ioremap_wc(dev->agp->base + obj->gtt_offset, 1002 995 ring->size);
+45 -4
drivers/gpu/drm/radeon/evergreen_cs.c
··· 52 52 u32 cb_color_view[12]; 53 53 u32 cb_color_pitch[12]; 54 54 u32 cb_color_slice[12]; 55 + u32 cb_color_slice_idx[12]; 55 56 u32 cb_color_attrib[12]; 56 57 u32 cb_color_cmask_slice[8];/* unused */ 57 58 u32 cb_color_fmask_slice[8];/* unused */ ··· 128 127 track->cb_color_info[i] = 0; 129 128 track->cb_color_view[i] = 0xFFFFFFFF; 130 129 track->cb_color_pitch[i] = 0; 131 - track->cb_color_slice[i] = 0; 130 + track->cb_color_slice[i] = 0xfffffff; 131 + track->cb_color_slice_idx[i] = 0; 132 132 } 133 133 track->cb_target_mask = 0xFFFFFFFF; 134 134 track->cb_shader_mask = 0xFFFFFFFF; 135 135 track->cb_dirty = true; 136 136 137 + track->db_depth_slice = 0xffffffff; 137 138 track->db_depth_view = 0xFFFFC000; 138 139 track->db_depth_size = 0xFFFFFFFF; 139 140 track->db_depth_control = 0xFFFFFFFF; ··· 253 250 { 254 251 struct evergreen_cs_track *track = p->track; 255 252 unsigned palign, halign, tileb, slice_pt; 253 + unsigned mtile_pr, mtile_ps, mtileb; 256 254 257 255 tileb = 64 * surf->bpe * surf->nsamples; 258 - palign = track->group_size / (8 * surf->bpe * surf->nsamples); 259 - palign = MAX(8, palign); 260 256 slice_pt = 1; 261 257 if (tileb > surf->tsplit) { 262 258 slice_pt = tileb / surf->tsplit; ··· 264 262 /* macro tile width & height */ 265 263 palign = (8 * surf->bankw * track->npipes) * surf->mtilea; 266 264 halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea; 267 - surf->layer_size = surf->nbx * surf->nby * surf->bpe * slice_pt; 265 + mtileb = (palign / 8) * (halign / 8) * tileb;; 266 + mtile_pr = surf->nbx / palign; 267 + mtile_ps = (mtile_pr * surf->nby) / halign; 268 + surf->layer_size = mtile_ps * mtileb * slice_pt; 268 269 surf->base_align = (palign / 8) * (halign / 8) * tileb; 269 270 surf->palign = palign; 270 271 surf->halign = halign; ··· 439 434 440 435 offset += surf.layer_size * mslice; 441 436 if (offset > radeon_bo_size(track->cb_color_bo[id])) { 437 + /* old ddx are broken they allocate bo with w*h*bpp but 438 + * program slice with ALIGN(h, 8), catch this and patch 439 + * command stream. 440 + */ 441 + if (!surf.mode) { 442 + volatile u32 *ib = p->ib.ptr; 443 + unsigned long tmp, nby, bsize, size, min = 0; 444 + 445 + /* find the height the ddx wants */ 446 + if (surf.nby > 8) { 447 + min = surf.nby - 8; 448 + } 449 + bsize = radeon_bo_size(track->cb_color_bo[id]); 450 + tmp = track->cb_color_bo_offset[id] << 8; 451 + for (nby = surf.nby; nby > min; nby--) { 452 + size = nby * surf.nbx * surf.bpe * surf.nsamples; 453 + if ((tmp + size * mslice) <= bsize) { 454 + break; 455 + } 456 + } 457 + if (nby > min) { 458 + surf.nby = nby; 459 + slice = ((nby * surf.nbx) / 64) - 1; 460 + if (!evergreen_surface_check(p, &surf, "cb")) { 461 + /* check if this one works */ 462 + tmp += surf.layer_size * mslice; 463 + if (tmp <= bsize) { 464 + ib[track->cb_color_slice_idx[id]] = slice; 465 + goto old_ddx_ok; 466 + } 467 + } 468 + } 469 + } 442 470 dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, " 443 471 "offset %d, max layer %d, bo size %ld, slice %d)\n", 444 472 __func__, __LINE__, id, surf.layer_size, ··· 484 446 surf.tsplit, surf.mtilea); 485 447 return -EINVAL; 486 448 } 449 + old_ddx_ok: 487 450 488 451 return 0; 489 452 } ··· 1571 1532 case CB_COLOR7_SLICE: 1572 1533 tmp = (reg - CB_COLOR0_SLICE) / 0x3c; 1573 1534 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); 1535 + track->cb_color_slice_idx[tmp] = idx; 1574 1536 track->cb_dirty = true; 1575 1537 break; 1576 1538 case CB_COLOR8_SLICE: ··· 1580 1540 case CB_COLOR11_SLICE: 1581 1541 tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8; 1582 1542 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); 1543 + track->cb_color_slice_idx[tmp] = idx; 1583 1544 track->cb_dirty = true; 1584 1545 break; 1585 1546 case CB_COLOR0_ATTRIB:
+17 -4
drivers/gpu/drm/radeon/ni.c
··· 460 460 rdev->config.cayman.max_pipes_per_simd = 4; 461 461 rdev->config.cayman.max_tile_pipes = 2; 462 462 if ((rdev->pdev->device == 0x9900) || 463 - (rdev->pdev->device == 0x9901)) { 463 + (rdev->pdev->device == 0x9901) || 464 + (rdev->pdev->device == 0x9905) || 465 + (rdev->pdev->device == 0x9906) || 466 + (rdev->pdev->device == 0x9907) || 467 + (rdev->pdev->device == 0x9908) || 468 + (rdev->pdev->device == 0x9909) || 469 + (rdev->pdev->device == 0x9910) || 470 + (rdev->pdev->device == 0x9917)) { 464 471 rdev->config.cayman.max_simds_per_se = 6; 465 472 rdev->config.cayman.max_backends_per_se = 2; 466 473 } else if ((rdev->pdev->device == 0x9903) || 467 - (rdev->pdev->device == 0x9904)) { 474 + (rdev->pdev->device == 0x9904) || 475 + (rdev->pdev->device == 0x990A) || 476 + (rdev->pdev->device == 0x9913) || 477 + (rdev->pdev->device == 0x9918)) { 468 478 rdev->config.cayman.max_simds_per_se = 4; 469 479 rdev->config.cayman.max_backends_per_se = 2; 470 - } else if ((rdev->pdev->device == 0x9990) || 471 - (rdev->pdev->device == 0x9991)) { 480 + } else if ((rdev->pdev->device == 0x9919) || 481 + (rdev->pdev->device == 0x9990) || 482 + (rdev->pdev->device == 0x9991) || 483 + (rdev->pdev->device == 0x9994) || 484 + (rdev->pdev->device == 0x99A0)) { 472 485 rdev->config.cayman.max_simds_per_se = 3; 473 486 rdev->config.cayman.max_backends_per_se = 1; 474 487 } else {
+6 -9
drivers/gpu/drm/radeon/r600.c
··· 2426 2426 if (r) 2427 2427 return r; 2428 2428 2429 + r = r600_audio_init(rdev); 2430 + if (r) { 2431 + DRM_ERROR("radeon: audio init failed\n"); 2432 + return r; 2433 + } 2434 + 2429 2435 return 0; 2430 2436 } 2431 2437 ··· 2465 2459 if (r) { 2466 2460 DRM_ERROR("r600 startup failed on resume\n"); 2467 2461 rdev->accel_working = false; 2468 - return r; 2469 - } 2470 - 2471 - r = r600_audio_init(rdev); 2472 - if (r) { 2473 - DRM_ERROR("radeon: audio resume failed\n"); 2474 2462 return r; 2475 2463 } 2476 2464 ··· 2577 2577 rdev->accel_working = false; 2578 2578 } 2579 2579 2580 - r = r600_audio_init(rdev); 2581 - if (r) 2582 - return r; /* TODO error handling */ 2583 2580 return 0; 2584 2581 } 2585 2582
+3 -2
drivers/gpu/drm/radeon/r600_audio.c
··· 192 192 struct radeon_device *rdev = dev->dev_private; 193 193 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 194 194 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 195 + struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); 195 196 int base_rate = 48000; 196 197 197 198 switch (radeon_encoder->encoder_id) { ··· 218 217 WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10); 219 218 WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071); 220 219 221 - /* Some magic trigger or src sel? */ 222 - WREG32_P(0x5ac, 0x01, ~0x77); 220 + /* Select DTO source */ 221 + WREG32(0x5ac, radeon_crtc->crtc_id); 223 222 } else { 224 223 switch (dig->dig_encoder) { 225 224 case 0:
-1
drivers/gpu/drm/radeon/r600_hdmi.c
··· 348 348 WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset, 349 349 HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */ 350 350 HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */ 351 - HDMI0_AUDIO_SEND_MAX_PACKETS | /* send NULL packets if no audio is available */ 352 351 HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */ 353 352 HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */ 354 353 }
+2 -3
drivers/gpu/drm/radeon/radeon.h
··· 1374 1374 1375 1375 struct si_asic { 1376 1376 unsigned max_shader_engines; 1377 - unsigned max_pipes_per_simd; 1378 1377 unsigned max_tile_pipes; 1379 - unsigned max_simds_per_se; 1378 + unsigned max_cu_per_sh; 1379 + unsigned max_sh_per_se; 1380 1380 unsigned max_backends_per_se; 1381 1381 unsigned max_texture_channel_caches; 1382 1382 unsigned max_gprs; ··· 1387 1387 unsigned sc_hiz_tile_fifo_size; 1388 1388 unsigned sc_earlyz_tile_fifo_size; 1389 1389 1390 - unsigned num_shader_engines; 1391 1390 unsigned num_tile_pipes; 1392 1391 unsigned num_backends_per_se; 1393 1392 unsigned backend_disable_mask_per_asic;
+2 -1
drivers/gpu/drm/radeon/radeon_drv.c
··· 57 57 * 2.13.0 - virtual memory support, streamout 58 58 * 2.14.0 - add evergreen tiling informations 59 59 * 2.15.0 - add max_pipes query 60 + * 2.16.0 - fix evergreen 2D tiled surface calculation 60 61 */ 61 62 #define KMS_DRIVER_MAJOR 2 62 - #define KMS_DRIVER_MINOR 15 63 + #define KMS_DRIVER_MINOR 16 63 64 #define KMS_DRIVER_PATCHLEVEL 0 64 65 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 65 66 int radeon_driver_unload_kms(struct drm_device *dev);
+12 -7
drivers/gpu/drm/radeon/radeon_gart.c
··· 476 476 477 477 mutex_lock(&vm->mutex); 478 478 if (last_pfn > vm->last_pfn) { 479 - /* grow va space 32M by 32M */ 480 - unsigned align = ((32 << 20) >> 12) - 1; 479 + /* release mutex and lock in right order */ 480 + mutex_unlock(&vm->mutex); 481 481 radeon_mutex_lock(&rdev->cs_mutex); 482 - radeon_vm_unbind_locked(rdev, vm); 482 + mutex_lock(&vm->mutex); 483 + /* and check again */ 484 + if (last_pfn > vm->last_pfn) { 485 + /* grow va space 32M by 32M */ 486 + unsigned align = ((32 << 20) >> 12) - 1; 487 + radeon_vm_unbind_locked(rdev, vm); 488 + vm->last_pfn = (last_pfn + align) & ~align; 489 + } 483 490 radeon_mutex_unlock(&rdev->cs_mutex); 484 - vm->last_pfn = (last_pfn + align) & ~align; 485 491 } 486 492 head = &vm->va; 487 493 last_offset = 0; ··· 601 595 if (bo_va == NULL) 602 596 return 0; 603 597 604 - mutex_lock(&vm->mutex); 605 598 radeon_mutex_lock(&rdev->cs_mutex); 599 + mutex_lock(&vm->mutex); 606 600 radeon_vm_bo_update_pte(rdev, vm, bo, NULL); 607 601 radeon_mutex_unlock(&rdev->cs_mutex); 608 602 list_del(&bo_va->vm_list); ··· 647 641 struct radeon_bo_va *bo_va, *tmp; 648 642 int r; 649 643 650 - mutex_lock(&vm->mutex); 651 - 652 644 radeon_mutex_lock(&rdev->cs_mutex); 645 + mutex_lock(&vm->mutex); 653 646 radeon_vm_unbind_locked(rdev, vm); 654 647 radeon_mutex_unlock(&rdev->cs_mutex); 655 648
+1 -1
drivers/gpu/drm/radeon/radeon_kms.c
··· 273 273 break; 274 274 case RADEON_INFO_MAX_PIPES: 275 275 if (rdev->family >= CHIP_TAHITI) 276 - value = rdev->config.si.max_pipes_per_simd; 276 + value = rdev->config.si.max_cu_per_sh; 277 277 else if (rdev->family >= CHIP_CAYMAN) 278 278 value = rdev->config.cayman.max_pipes_per_simd; 279 279 else if (rdev->family >= CHIP_CEDAR)
+6 -6
drivers/gpu/drm/radeon/rs600.c
··· 908 908 return r; 909 909 } 910 910 911 - r = r600_audio_init(rdev); 912 - if (r) { 913 - dev_err(rdev->dev, "failed initializing audio\n"); 914 - return r; 915 - } 916 - 917 911 r = radeon_ib_pool_start(rdev); 918 912 if (r) 919 913 return r; ··· 915 921 r = radeon_ib_ring_tests(rdev); 916 922 if (r) 917 923 return r; 924 + 925 + r = r600_audio_init(rdev); 926 + if (r) { 927 + dev_err(rdev->dev, "failed initializing audio\n"); 928 + return r; 929 + } 918 930 919 931 return 0; 920 932 }
+6 -6
drivers/gpu/drm/radeon/rs690.c
··· 637 637 return r; 638 638 } 639 639 640 - r = r600_audio_init(rdev); 641 - if (r) { 642 - dev_err(rdev->dev, "failed initializing audio\n"); 643 - return r; 644 - } 645 - 646 640 r = radeon_ib_pool_start(rdev); 647 641 if (r) 648 642 return r; ··· 644 650 r = radeon_ib_ring_tests(rdev); 645 651 if (r) 646 652 return r; 653 + 654 + r = r600_audio_init(rdev); 655 + if (r) { 656 + dev_err(rdev->dev, "failed initializing audio\n"); 657 + return r; 658 + } 647 659 648 660 return 0; 649 661 }
+6 -12
drivers/gpu/drm/radeon/rv770.c
··· 956 956 if (r) 957 957 return r; 958 958 959 + r = r600_audio_init(rdev); 960 + if (r) { 961 + DRM_ERROR("radeon: audio init failed\n"); 962 + return r; 963 + } 964 + 959 965 return 0; 960 966 } 961 967 ··· 981 975 if (r) { 982 976 DRM_ERROR("r600 startup failed on resume\n"); 983 977 rdev->accel_working = false; 984 - return r; 985 - } 986 - 987 - r = r600_audio_init(rdev); 988 - if (r) { 989 - dev_err(rdev->dev, "radeon: audio init failed\n"); 990 978 return r; 991 979 } 992 980 ··· 1090 1090 radeon_irq_kms_fini(rdev); 1091 1091 rv770_pcie_gart_fini(rdev); 1092 1092 rdev->accel_working = false; 1093 - } 1094 - 1095 - r = r600_audio_init(rdev); 1096 - if (r) { 1097 - dev_err(rdev->dev, "radeon: audio init failed\n"); 1098 - return r; 1099 1093 } 1100 1094 1101 1095 return 0;
+164 -317
drivers/gpu/drm/radeon/si.c
··· 867 867 /* 868 868 * Core functions 869 869 */ 870 - static u32 si_get_tile_pipe_to_backend_map(struct radeon_device *rdev, 871 - u32 num_tile_pipes, 872 - u32 num_backends_per_asic, 873 - u32 *backend_disable_mask_per_asic, 874 - u32 num_shader_engines) 875 - { 876 - u32 backend_map = 0; 877 - u32 enabled_backends_mask = 0; 878 - u32 enabled_backends_count = 0; 879 - u32 num_backends_per_se; 880 - u32 cur_pipe; 881 - u32 swizzle_pipe[SI_MAX_PIPES]; 882 - u32 cur_backend = 0; 883 - u32 i; 884 - bool force_no_swizzle; 885 - 886 - /* force legal values */ 887 - if (num_tile_pipes < 1) 888 - num_tile_pipes = 1; 889 - if (num_tile_pipes > rdev->config.si.max_tile_pipes) 890 - num_tile_pipes = rdev->config.si.max_tile_pipes; 891 - if (num_shader_engines < 1) 892 - num_shader_engines = 1; 893 - if (num_shader_engines > rdev->config.si.max_shader_engines) 894 - num_shader_engines = rdev->config.si.max_shader_engines; 895 - if (num_backends_per_asic < num_shader_engines) 896 - num_backends_per_asic = num_shader_engines; 897 - if (num_backends_per_asic > (rdev->config.si.max_backends_per_se * num_shader_engines)) 898 - num_backends_per_asic = rdev->config.si.max_backends_per_se * num_shader_engines; 899 - 900 - /* make sure we have the same number of backends per se */ 901 - num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines); 902 - /* set up the number of backends per se */ 903 - num_backends_per_se = num_backends_per_asic / num_shader_engines; 904 - if (num_backends_per_se > rdev->config.si.max_backends_per_se) { 905 - num_backends_per_se = rdev->config.si.max_backends_per_se; 906 - num_backends_per_asic = num_backends_per_se * num_shader_engines; 907 - } 908 - 909 - /* create enable mask and count for enabled backends */ 910 - for (i = 0; i < SI_MAX_BACKENDS; ++i) { 911 - if (((*backend_disable_mask_per_asic >> i) & 1) == 0) { 912 - enabled_backends_mask |= (1 << i); 913 - ++enabled_backends_count; 914 - } 915 - if (enabled_backends_count == num_backends_per_asic) 916 - break; 917 - } 918 - 919 - /* force the backends mask to match the current number of backends */ 920 - if (enabled_backends_count != num_backends_per_asic) { 921 - u32 this_backend_enabled; 922 - u32 shader_engine; 923 - u32 backend_per_se; 924 - 925 - enabled_backends_mask = 0; 926 - enabled_backends_count = 0; 927 - *backend_disable_mask_per_asic = SI_MAX_BACKENDS_MASK; 928 - for (i = 0; i < SI_MAX_BACKENDS; ++i) { 929 - /* calc the current se */ 930 - shader_engine = i / rdev->config.si.max_backends_per_se; 931 - /* calc the backend per se */ 932 - backend_per_se = i % rdev->config.si.max_backends_per_se; 933 - /* default to not enabled */ 934 - this_backend_enabled = 0; 935 - if ((shader_engine < num_shader_engines) && 936 - (backend_per_se < num_backends_per_se)) 937 - this_backend_enabled = 1; 938 - if (this_backend_enabled) { 939 - enabled_backends_mask |= (1 << i); 940 - *backend_disable_mask_per_asic &= ~(1 << i); 941 - ++enabled_backends_count; 942 - } 943 - } 944 - } 945 - 946 - 947 - memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * SI_MAX_PIPES); 948 - switch (rdev->family) { 949 - case CHIP_TAHITI: 950 - case CHIP_PITCAIRN: 951 - case CHIP_VERDE: 952 - force_no_swizzle = true; 953 - break; 954 - default: 955 - force_no_swizzle = false; 956 - break; 957 - } 958 - if (force_no_swizzle) { 959 - bool last_backend_enabled = false; 960 - 961 - force_no_swizzle = false; 962 - for (i = 0; i < SI_MAX_BACKENDS; ++i) { 963 - if (((enabled_backends_mask >> i) & 1) == 1) { 964 - if (last_backend_enabled) 965 - force_no_swizzle = true; 966 - last_backend_enabled = true; 967 - } else 968 - last_backend_enabled = false; 969 - } 970 - } 971 - 972 - switch (num_tile_pipes) { 973 - case 1: 974 - case 3: 975 - case 5: 976 - case 7: 977 - DRM_ERROR("odd number of pipes!\n"); 978 - break; 979 - case 2: 980 - swizzle_pipe[0] = 0; 981 - swizzle_pipe[1] = 1; 982 - break; 983 - case 4: 984 - if (force_no_swizzle) { 985 - swizzle_pipe[0] = 0; 986 - swizzle_pipe[1] = 1; 987 - swizzle_pipe[2] = 2; 988 - swizzle_pipe[3] = 3; 989 - } else { 990 - swizzle_pipe[0] = 0; 991 - swizzle_pipe[1] = 2; 992 - swizzle_pipe[2] = 1; 993 - swizzle_pipe[3] = 3; 994 - } 995 - break; 996 - case 6: 997 - if (force_no_swizzle) { 998 - swizzle_pipe[0] = 0; 999 - swizzle_pipe[1] = 1; 1000 - swizzle_pipe[2] = 2; 1001 - swizzle_pipe[3] = 3; 1002 - swizzle_pipe[4] = 4; 1003 - swizzle_pipe[5] = 5; 1004 - } else { 1005 - swizzle_pipe[0] = 0; 1006 - swizzle_pipe[1] = 2; 1007 - swizzle_pipe[2] = 4; 1008 - swizzle_pipe[3] = 1; 1009 - swizzle_pipe[4] = 3; 1010 - swizzle_pipe[5] = 5; 1011 - } 1012 - break; 1013 - case 8: 1014 - if (force_no_swizzle) { 1015 - swizzle_pipe[0] = 0; 1016 - swizzle_pipe[1] = 1; 1017 - swizzle_pipe[2] = 2; 1018 - swizzle_pipe[3] = 3; 1019 - swizzle_pipe[4] = 4; 1020 - swizzle_pipe[5] = 5; 1021 - swizzle_pipe[6] = 6; 1022 - swizzle_pipe[7] = 7; 1023 - } else { 1024 - swizzle_pipe[0] = 0; 1025 - swizzle_pipe[1] = 2; 1026 - swizzle_pipe[2] = 4; 1027 - swizzle_pipe[3] = 6; 1028 - swizzle_pipe[4] = 1; 1029 - swizzle_pipe[5] = 3; 1030 - swizzle_pipe[6] = 5; 1031 - swizzle_pipe[7] = 7; 1032 - } 1033 - break; 1034 - } 1035 - 1036 - for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { 1037 - while (((1 << cur_backend) & enabled_backends_mask) == 0) 1038 - cur_backend = (cur_backend + 1) % SI_MAX_BACKENDS; 1039 - 1040 - backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4))); 1041 - 1042 - cur_backend = (cur_backend + 1) % SI_MAX_BACKENDS; 1043 - } 1044 - 1045 - return backend_map; 1046 - } 1047 - 1048 - static u32 si_get_disable_mask_per_asic(struct radeon_device *rdev, 1049 - u32 disable_mask_per_se, 1050 - u32 max_disable_mask_per_se, 1051 - u32 num_shader_engines) 1052 - { 1053 - u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se); 1054 - u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se; 1055 - 1056 - if (num_shader_engines == 1) 1057 - return disable_mask_per_asic; 1058 - else if (num_shader_engines == 2) 1059 - return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se); 1060 - else 1061 - return 0xffffffff; 1062 - } 1063 - 1064 870 static void si_tiling_mode_table_init(struct radeon_device *rdev) 1065 871 { 1066 872 const u32 num_tile_mode_states = 32; ··· 1368 1562 DRM_ERROR("unknown asic: 0x%x\n", rdev->family); 1369 1563 } 1370 1564 1565 + static void si_select_se_sh(struct radeon_device *rdev, 1566 + u32 se_num, u32 sh_num) 1567 + { 1568 + u32 data = INSTANCE_BROADCAST_WRITES; 1569 + 1570 + if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) 1571 + data = SH_BROADCAST_WRITES | SE_BROADCAST_WRITES; 1572 + else if (se_num == 0xffffffff) 1573 + data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num); 1574 + else if (sh_num == 0xffffffff) 1575 + data |= SH_BROADCAST_WRITES | SE_INDEX(se_num); 1576 + else 1577 + data |= SH_INDEX(sh_num) | SE_INDEX(se_num); 1578 + WREG32(GRBM_GFX_INDEX, data); 1579 + } 1580 + 1581 + static u32 si_create_bitmask(u32 bit_width) 1582 + { 1583 + u32 i, mask = 0; 1584 + 1585 + for (i = 0; i < bit_width; i++) { 1586 + mask <<= 1; 1587 + mask |= 1; 1588 + } 1589 + return mask; 1590 + } 1591 + 1592 + static u32 si_get_cu_enabled(struct radeon_device *rdev, u32 cu_per_sh) 1593 + { 1594 + u32 data, mask; 1595 + 1596 + data = RREG32(CC_GC_SHADER_ARRAY_CONFIG); 1597 + if (data & 1) 1598 + data &= INACTIVE_CUS_MASK; 1599 + else 1600 + data = 0; 1601 + data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG); 1602 + 1603 + data >>= INACTIVE_CUS_SHIFT; 1604 + 1605 + mask = si_create_bitmask(cu_per_sh); 1606 + 1607 + return ~data & mask; 1608 + } 1609 + 1610 + static void si_setup_spi(struct radeon_device *rdev, 1611 + u32 se_num, u32 sh_per_se, 1612 + u32 cu_per_sh) 1613 + { 1614 + int i, j, k; 1615 + u32 data, mask, active_cu; 1616 + 1617 + for (i = 0; i < se_num; i++) { 1618 + for (j = 0; j < sh_per_se; j++) { 1619 + si_select_se_sh(rdev, i, j); 1620 + data = RREG32(SPI_STATIC_THREAD_MGMT_3); 1621 + active_cu = si_get_cu_enabled(rdev, cu_per_sh); 1622 + 1623 + mask = 1; 1624 + for (k = 0; k < 16; k++) { 1625 + mask <<= k; 1626 + if (active_cu & mask) { 1627 + data &= ~mask; 1628 + WREG32(SPI_STATIC_THREAD_MGMT_3, data); 1629 + break; 1630 + } 1631 + } 1632 + } 1633 + } 1634 + si_select_se_sh(rdev, 0xffffffff, 0xffffffff); 1635 + } 1636 + 1637 + static u32 si_get_rb_disabled(struct radeon_device *rdev, 1638 + u32 max_rb_num, u32 se_num, 1639 + u32 sh_per_se) 1640 + { 1641 + u32 data, mask; 1642 + 1643 + data = RREG32(CC_RB_BACKEND_DISABLE); 1644 + if (data & 1) 1645 + data &= BACKEND_DISABLE_MASK; 1646 + else 1647 + data = 0; 1648 + data |= RREG32(GC_USER_RB_BACKEND_DISABLE); 1649 + 1650 + data >>= BACKEND_DISABLE_SHIFT; 1651 + 1652 + mask = si_create_bitmask(max_rb_num / se_num / sh_per_se); 1653 + 1654 + return data & mask; 1655 + } 1656 + 1657 + static void si_setup_rb(struct radeon_device *rdev, 1658 + u32 se_num, u32 sh_per_se, 1659 + u32 max_rb_num) 1660 + { 1661 + int i, j; 1662 + u32 data, mask; 1663 + u32 disabled_rbs = 0; 1664 + u32 enabled_rbs = 0; 1665 + 1666 + for (i = 0; i < se_num; i++) { 1667 + for (j = 0; j < sh_per_se; j++) { 1668 + si_select_se_sh(rdev, i, j); 1669 + data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se); 1670 + disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH); 1671 + } 1672 + } 1673 + si_select_se_sh(rdev, 0xffffffff, 0xffffffff); 1674 + 1675 + mask = 1; 1676 + for (i = 0; i < max_rb_num; i++) { 1677 + if (!(disabled_rbs & mask)) 1678 + enabled_rbs |= mask; 1679 + mask <<= 1; 1680 + } 1681 + 1682 + for (i = 0; i < se_num; i++) { 1683 + si_select_se_sh(rdev, i, 0xffffffff); 1684 + data = 0; 1685 + for (j = 0; j < sh_per_se; j++) { 1686 + switch (enabled_rbs & 3) { 1687 + case 1: 1688 + data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2); 1689 + break; 1690 + case 2: 1691 + data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2); 1692 + break; 1693 + case 3: 1694 + default: 1695 + data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2); 1696 + break; 1697 + } 1698 + enabled_rbs >>= 2; 1699 + } 1700 + WREG32(PA_SC_RASTER_CONFIG, data); 1701 + } 1702 + si_select_se_sh(rdev, 0xffffffff, 0xffffffff); 1703 + } 1704 + 1371 1705 static void si_gpu_init(struct radeon_device *rdev) 1372 1706 { 1373 - u32 cc_rb_backend_disable = 0; 1374 - u32 cc_gc_shader_array_config; 1375 1707 u32 gb_addr_config = 0; 1376 1708 u32 mc_shared_chmap, mc_arb_ramcfg; 1377 - u32 gb_backend_map; 1378 - u32 cgts_tcc_disable; 1379 1709 u32 sx_debug_1; 1380 - u32 gc_user_shader_array_config; 1381 - u32 gc_user_rb_backend_disable; 1382 - u32 cgts_user_tcc_disable; 1383 1710 u32 hdp_host_path_cntl; 1384 1711 u32 tmp; 1385 1712 int i, j; ··· 1520 1581 switch (rdev->family) { 1521 1582 case CHIP_TAHITI: 1522 1583 rdev->config.si.max_shader_engines = 2; 1523 - rdev->config.si.max_pipes_per_simd = 4; 1524 1584 rdev->config.si.max_tile_pipes = 12; 1525 - rdev->config.si.max_simds_per_se = 8; 1585 + rdev->config.si.max_cu_per_sh = 8; 1586 + rdev->config.si.max_sh_per_se = 2; 1526 1587 rdev->config.si.max_backends_per_se = 4; 1527 1588 rdev->config.si.max_texture_channel_caches = 12; 1528 1589 rdev->config.si.max_gprs = 256; ··· 1533 1594 rdev->config.si.sc_prim_fifo_size_backend = 0x100; 1534 1595 rdev->config.si.sc_hiz_tile_fifo_size = 0x30; 1535 1596 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; 1597 + gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN; 1536 1598 break; 1537 1599 case CHIP_PITCAIRN: 1538 1600 rdev->config.si.max_shader_engines = 2; 1539 - rdev->config.si.max_pipes_per_simd = 4; 1540 1601 rdev->config.si.max_tile_pipes = 8; 1541 - rdev->config.si.max_simds_per_se = 5; 1602 + rdev->config.si.max_cu_per_sh = 5; 1603 + rdev->config.si.max_sh_per_se = 2; 1542 1604 rdev->config.si.max_backends_per_se = 4; 1543 1605 rdev->config.si.max_texture_channel_caches = 8; 1544 1606 rdev->config.si.max_gprs = 256; ··· 1550 1610 rdev->config.si.sc_prim_fifo_size_backend = 0x100; 1551 1611 rdev->config.si.sc_hiz_tile_fifo_size = 0x30; 1552 1612 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; 1613 + gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN; 1553 1614 break; 1554 1615 case CHIP_VERDE: 1555 1616 default: 1556 1617 rdev->config.si.max_shader_engines = 1; 1557 - rdev->config.si.max_pipes_per_simd = 4; 1558 1618 rdev->config.si.max_tile_pipes = 4; 1559 - rdev->config.si.max_simds_per_se = 2; 1619 + rdev->config.si.max_cu_per_sh = 2; 1620 + rdev->config.si.max_sh_per_se = 2; 1560 1621 rdev->config.si.max_backends_per_se = 4; 1561 1622 rdev->config.si.max_texture_channel_caches = 4; 1562 1623 rdev->config.si.max_gprs = 256; ··· 1568 1627 rdev->config.si.sc_prim_fifo_size_backend = 0x40; 1569 1628 rdev->config.si.sc_hiz_tile_fifo_size = 0x30; 1570 1629 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; 1630 + gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN; 1571 1631 break; 1572 1632 } 1573 1633 ··· 1590 1648 mc_shared_chmap = RREG32(MC_SHARED_CHMAP); 1591 1649 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 1592 1650 1593 - cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE); 1594 - cc_gc_shader_array_config = RREG32(CC_GC_SHADER_ARRAY_CONFIG); 1595 - cgts_tcc_disable = 0xffff0000; 1596 - for (i = 0; i < rdev->config.si.max_texture_channel_caches; i++) 1597 - cgts_tcc_disable &= ~(1 << (16 + i)); 1598 - gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE); 1599 - gc_user_shader_array_config = RREG32(GC_USER_SHADER_ARRAY_CONFIG); 1600 - cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE); 1601 - 1602 - rdev->config.si.num_shader_engines = rdev->config.si.max_shader_engines; 1603 1651 rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes; 1604 - tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT; 1605 - rdev->config.si.num_backends_per_se = r600_count_pipe_bits(tmp); 1606 - tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT; 1607 - rdev->config.si.backend_disable_mask_per_asic = 1608 - si_get_disable_mask_per_asic(rdev, tmp, SI_MAX_BACKENDS_PER_SE_MASK, 1609 - rdev->config.si.num_shader_engines); 1610 - rdev->config.si.backend_map = 1611 - si_get_tile_pipe_to_backend_map(rdev, rdev->config.si.num_tile_pipes, 1612 - rdev->config.si.num_backends_per_se * 1613 - rdev->config.si.num_shader_engines, 1614 - &rdev->config.si.backend_disable_mask_per_asic, 1615 - rdev->config.si.num_shader_engines); 1616 - tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT; 1617 - rdev->config.si.num_texture_channel_caches = r600_count_pipe_bits(tmp); 1618 1652 rdev->config.si.mem_max_burst_length_bytes = 256; 1619 1653 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT; 1620 1654 rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; ··· 1601 1683 rdev->config.si.num_gpus = 1; 1602 1684 rdev->config.si.multi_gpu_tile_size = 64; 1603 1685 1604 - gb_addr_config = 0; 1605 - switch (rdev->config.si.num_tile_pipes) { 1606 - case 1: 1607 - gb_addr_config |= NUM_PIPES(0); 1608 - break; 1609 - case 2: 1610 - gb_addr_config |= NUM_PIPES(1); 1611 - break; 1612 - case 4: 1613 - gb_addr_config |= NUM_PIPES(2); 1614 - break; 1615 - case 8: 1616 - default: 1617 - gb_addr_config |= NUM_PIPES(3); 1618 - break; 1619 - } 1620 - 1621 - tmp = (rdev->config.si.mem_max_burst_length_bytes / 256) - 1; 1622 - gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp); 1623 - gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.si.num_shader_engines - 1); 1624 - tmp = (rdev->config.si.shader_engine_tile_size / 16) - 1; 1625 - gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp); 1626 - switch (rdev->config.si.num_gpus) { 1627 - case 1: 1628 - default: 1629 - gb_addr_config |= NUM_GPUS(0); 1630 - break; 1631 - case 2: 1632 - gb_addr_config |= NUM_GPUS(1); 1633 - break; 1634 - case 4: 1635 - gb_addr_config |= NUM_GPUS(2); 1636 - break; 1637 - } 1638 - switch (rdev->config.si.multi_gpu_tile_size) { 1639 - case 16: 1640 - gb_addr_config |= MULTI_GPU_TILE_SIZE(0); 1641 - break; 1642 - case 32: 1643 - default: 1644 - gb_addr_config |= MULTI_GPU_TILE_SIZE(1); 1645 - break; 1646 - case 64: 1647 - gb_addr_config |= MULTI_GPU_TILE_SIZE(2); 1648 - break; 1649 - case 128: 1650 - gb_addr_config |= MULTI_GPU_TILE_SIZE(3); 1651 - break; 1652 - } 1686 + /* fix up row size */ 1687 + gb_addr_config &= ~ROW_SIZE_MASK; 1653 1688 switch (rdev->config.si.mem_row_size_in_kb) { 1654 1689 case 1: 1655 1690 default: ··· 1615 1744 gb_addr_config |= ROW_SIZE(2); 1616 1745 break; 1617 1746 } 1618 - 1619 - tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT; 1620 - rdev->config.si.num_tile_pipes = (1 << tmp); 1621 - tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT; 1622 - rdev->config.si.mem_max_burst_length_bytes = (tmp + 1) * 256; 1623 - tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT; 1624 - rdev->config.si.num_shader_engines = tmp + 1; 1625 - tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT; 1626 - rdev->config.si.num_gpus = tmp + 1; 1627 - tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT; 1628 - rdev->config.si.multi_gpu_tile_size = 1 << tmp; 1629 - tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT; 1630 - rdev->config.si.mem_row_size_in_kb = 1 << tmp; 1631 - 1632 - gb_backend_map = 1633 - si_get_tile_pipe_to_backend_map(rdev, rdev->config.si.num_tile_pipes, 1634 - rdev->config.si.num_backends_per_se * 1635 - rdev->config.si.num_shader_engines, 1636 - &rdev->config.si.backend_disable_mask_per_asic, 1637 - rdev->config.si.num_shader_engines); 1638 1747 1639 1748 /* setup tiling info dword. gb_addr_config is not adequate since it does 1640 1749 * not have bank info, so create a custom tiling dword. ··· 1640 1789 rdev->config.si.tile_config |= (3 << 0); 1641 1790 break; 1642 1791 } 1643 - rdev->config.si.tile_config |= 1644 - ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; 1792 + if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) 1793 + rdev->config.si.tile_config |= 1 << 4; 1794 + else 1795 + rdev->config.si.tile_config |= 0 << 4; 1645 1796 rdev->config.si.tile_config |= 1646 1797 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; 1647 1798 rdev->config.si.tile_config |= 1648 1799 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; 1649 1800 1650 - rdev->config.si.backend_map = gb_backend_map; 1651 1801 WREG32(GB_ADDR_CONFIG, gb_addr_config); 1652 1802 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 1653 1803 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 1654 1804 1655 - /* primary versions */ 1656 - WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 1657 - WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); 1658 - WREG32(CC_GC_SHADER_ARRAY_CONFIG, cc_gc_shader_array_config); 1659 - 1660 - WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable); 1661 - 1662 - /* user versions */ 1663 - WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable); 1664 - WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); 1665 - WREG32(GC_USER_SHADER_ARRAY_CONFIG, cc_gc_shader_array_config); 1666 - 1667 - WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable); 1668 - 1669 1805 si_tiling_mode_table_init(rdev); 1806 + 1807 + si_setup_rb(rdev, rdev->config.si.max_shader_engines, 1808 + rdev->config.si.max_sh_per_se, 1809 + rdev->config.si.max_backends_per_se); 1810 + 1811 + si_setup_spi(rdev, rdev->config.si.max_shader_engines, 1812 + rdev->config.si.max_sh_per_se, 1813 + rdev->config.si.max_cu_per_sh); 1814 + 1670 1815 1671 1816 /* set HW defaults for 3D engine */ 1672 1817 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
+19
drivers/gpu/drm/radeon/sid.h
··· 24 24 #ifndef SI_H 25 25 #define SI_H 26 26 27 + #define TAHITI_RB_BITMAP_WIDTH_PER_SH 2 28 + 29 + #define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003 30 + #define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002 31 + 27 32 #define CG_MULT_THERMAL_STATUS 0x714 28 33 #define ASIC_MAX_TEMP(x) ((x) << 0) 29 34 #define ASIC_MAX_TEMP_MASK 0x000001ff ··· 413 408 #define SOFT_RESET_IA (1 << 15) 414 409 415 410 #define GRBM_GFX_INDEX 0x802C 411 + #define INSTANCE_INDEX(x) ((x) << 0) 412 + #define SH_INDEX(x) ((x) << 8) 413 + #define SE_INDEX(x) ((x) << 16) 414 + #define SH_BROADCAST_WRITES (1 << 29) 415 + #define INSTANCE_BROADCAST_WRITES (1 << 30) 416 + #define SE_BROADCAST_WRITES (1 << 31) 416 417 417 418 #define GRBM_INT_CNTL 0x8060 418 419 # define RDERR_INT_ENABLE (1 << 0) ··· 491 480 #define VGT_TF_MEMORY_BASE 0x89B8 492 481 493 482 #define CC_GC_SHADER_ARRAY_CONFIG 0x89bc 483 + #define INACTIVE_CUS_MASK 0xFFFF0000 484 + #define INACTIVE_CUS_SHIFT 16 494 485 #define GC_USER_SHADER_ARRAY_CONFIG 0x89c0 495 486 496 487 #define PA_CL_ENHANCE 0x8A14 ··· 700 687 701 688 #define RLC_MC_CNTL 0xC344 702 689 #define RLC_UCODE_CNTL 0xC348 690 + 691 + #define PA_SC_RASTER_CONFIG 0x28350 692 + # define RASTER_CONFIG_RB_MAP_0 0 693 + # define RASTER_CONFIG_RB_MAP_1 1 694 + # define RASTER_CONFIG_RB_MAP_2 2 695 + # define RASTER_CONFIG_RB_MAP_3 3 703 696 704 697 #define VGT_EVENT_INITIATOR 0x28a90 705 698 # define SAMPLE_STREAMOUTSTATS1 (1 << 0)
+3 -10
drivers/gpu/drm/ttm/ttm_bo.c
··· 1204 1204 (*destroy)(bo); 1205 1205 else 1206 1206 kfree(bo); 1207 + ttm_mem_global_free(mem_glob, acc_size); 1207 1208 return -EINVAL; 1208 1209 } 1209 1210 bo->destroy = destroy; ··· 1308 1307 struct ttm_buffer_object **p_bo) 1309 1308 { 1310 1309 struct ttm_buffer_object *bo; 1311 - struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; 1312 1310 size_t acc_size; 1313 1311 int ret; 1314 1312 1315 - acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); 1316 - ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); 1317 - if (unlikely(ret != 0)) 1318 - return ret; 1319 - 1320 1313 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 1321 - 1322 - if (unlikely(bo == NULL)) { 1323 - ttm_mem_global_free(mem_glob, acc_size); 1314 + if (unlikely(bo == NULL)) 1324 1315 return -ENOMEM; 1325 - } 1326 1316 1317 + acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); 1327 1318 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, 1328 1319 buffer_start, interruptible, 1329 1320 persistent_swap_storage, acc_size, NULL, NULL);
+21 -6
drivers/gpu/vga/vga_switcheroo.c
··· 190 190 return NULL; 191 191 } 192 192 193 + int vga_switcheroo_get_client_state(struct pci_dev *pdev) 194 + { 195 + struct vga_switcheroo_client *client; 196 + 197 + client = find_client_from_pci(&vgasr_priv.clients, pdev); 198 + if (!client) 199 + return VGA_SWITCHEROO_NOT_FOUND; 200 + if (!vgasr_priv.active) 201 + return VGA_SWITCHEROO_INIT; 202 + return client->pwr_state; 203 + } 204 + EXPORT_SYMBOL(vga_switcheroo_get_client_state); 205 + 193 206 void vga_switcheroo_unregister_client(struct pci_dev *pdev) 194 207 { 195 208 struct vga_switcheroo_client *client; ··· 304 291 vga_switchon(new_client); 305 292 306 293 vga_set_default_device(new_client->pdev); 307 - set_audio_state(new_client->id, VGA_SWITCHEROO_ON); 308 - 309 294 return 0; 310 295 } 311 296 ··· 319 308 320 309 active->active = false; 321 310 311 + set_audio_state(active->id, VGA_SWITCHEROO_OFF); 312 + 322 313 if (new_client->fb_info) { 323 314 struct fb_event event; 324 315 event.info = new_client->fb_info; ··· 334 321 if (new_client->ops->reprobe) 335 322 new_client->ops->reprobe(new_client->pdev); 336 323 337 - set_audio_state(active->id, VGA_SWITCHEROO_OFF); 338 - 339 324 if (active->pwr_state == VGA_SWITCHEROO_ON) 340 325 vga_switchoff(active); 326 + 327 + set_audio_state(new_client->id, VGA_SWITCHEROO_ON); 341 328 342 329 new_client->active = true; 343 330 return 0; ··· 384 371 /* pwr off the device not in use */ 385 372 if (strncmp(usercmd, "OFF", 3) == 0) { 386 373 list_for_each_entry(client, &vgasr_priv.clients, list) { 387 - if (client->active) 374 + if (client->active || client_is_audio(client)) 388 375 continue; 376 + set_audio_state(client->id, VGA_SWITCHEROO_OFF); 389 377 if (client->pwr_state == VGA_SWITCHEROO_ON) 390 378 vga_switchoff(client); 391 379 } ··· 395 381 /* pwr on the device not in use */ 396 382 if (strncmp(usercmd, "ON", 2) == 0) { 397 383 list_for_each_entry(client, &vgasr_priv.clients, list) { 398 - if (client->active) 384 + if (client->active || client_is_audio(client)) 399 385 continue; 400 386 if (client->pwr_state == VGA_SWITCHEROO_OFF) 401 387 vga_switchon(client); 388 + set_audio_state(client->id, VGA_SWITCHEROO_ON); 402 389 } 403 390 goto out; 404 391 }
+12
drivers/i2c/muxes/Kconfig
··· 37 37 This driver can also be built as a module. If so, the module 38 38 will be called i2c-mux-pca954x. 39 39 40 + config I2C_MUX_PINCTRL 41 + tristate "pinctrl-based I2C multiplexer" 42 + depends on PINCTRL 43 + help 44 + If you say yes to this option, support will be included for an I2C 45 + multiplexer that uses the pinctrl subsystem, i.e. pin multiplexing. 46 + This is useful for SoCs whose I2C module's signals can be routed to 47 + different sets of pins at run-time. 48 + 49 + This driver can also be built as a module. If so, the module will be 50 + called pinctrl-i2cmux. 51 + 40 52 endmenu
+1
drivers/i2c/muxes/Makefile
··· 4 4 obj-$(CONFIG_I2C_MUX_GPIO) += i2c-mux-gpio.o 5 5 obj-$(CONFIG_I2C_MUX_PCA9541) += i2c-mux-pca9541.o 6 6 obj-$(CONFIG_I2C_MUX_PCA954x) += i2c-mux-pca954x.o 7 + obj-$(CONFIG_I2C_MUX_PINCTRL) += i2c-mux-pinctrl.o 7 8 8 9 ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG
+279
drivers/i2c/muxes/i2c-mux-pinctrl.c
··· 1 + /* 2 + * I2C multiplexer using pinctrl API 3 + * 4 + * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms and conditions of the GNU General Public License, 8 + * version 2, as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope it will be useful, but WITHOUT 11 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 + * more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 + */ 18 + 19 + #include <linux/i2c.h> 20 + #include <linux/i2c-mux.h> 21 + #include <linux/init.h> 22 + #include <linux/module.h> 23 + #include <linux/of_i2c.h> 24 + #include <linux/pinctrl/consumer.h> 25 + #include <linux/i2c-mux-pinctrl.h> 26 + #include <linux/platform_device.h> 27 + #include <linux/slab.h> 28 + 29 + struct i2c_mux_pinctrl { 30 + struct device *dev; 31 + struct i2c_mux_pinctrl_platform_data *pdata; 32 + struct pinctrl *pinctrl; 33 + struct pinctrl_state **states; 34 + struct pinctrl_state *state_idle; 35 + struct i2c_adapter *parent; 36 + struct i2c_adapter **busses; 37 + }; 38 + 39 + static int i2c_mux_pinctrl_select(struct i2c_adapter *adap, void *data, 40 + u32 chan) 41 + { 42 + struct i2c_mux_pinctrl *mux = data; 43 + 44 + return pinctrl_select_state(mux->pinctrl, mux->states[chan]); 45 + } 46 + 47 + static int i2c_mux_pinctrl_deselect(struct i2c_adapter *adap, void *data, 48 + u32 chan) 49 + { 50 + struct i2c_mux_pinctrl *mux = data; 51 + 52 + return pinctrl_select_state(mux->pinctrl, mux->state_idle); 53 + } 54 + 55 + #ifdef CONFIG_OF 56 + static int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux, 57 + struct platform_device *pdev) 58 + { 59 + struct device_node *np = pdev->dev.of_node; 60 + int num_names, i, ret; 61 + struct device_node *adapter_np; 62 + struct i2c_adapter *adapter; 63 + 64 + if (!np) 65 + return 0; 66 + 67 + mux->pdata = devm_kzalloc(&pdev->dev, sizeof(*mux->pdata), GFP_KERNEL); 68 + if (!mux->pdata) { 69 + dev_err(mux->dev, 70 + "Cannot allocate i2c_mux_pinctrl_platform_data\n"); 71 + return -ENOMEM; 72 + } 73 + 74 + num_names = of_property_count_strings(np, "pinctrl-names"); 75 + if (num_names < 0) { 76 + dev_err(mux->dev, "Cannot parse pinctrl-names: %d\n", 77 + num_names); 78 + return num_names; 79 + } 80 + 81 + mux->pdata->pinctrl_states = devm_kzalloc(&pdev->dev, 82 + sizeof(*mux->pdata->pinctrl_states) * num_names, 83 + GFP_KERNEL); 84 + if (!mux->pdata->pinctrl_states) { 85 + dev_err(mux->dev, "Cannot allocate pinctrl_states\n"); 86 + return -ENOMEM; 87 + } 88 + 89 + for (i = 0; i < num_names; i++) { 90 + ret = of_property_read_string_index(np, "pinctrl-names", i, 91 + &mux->pdata->pinctrl_states[mux->pdata->bus_count]); 92 + if (ret < 0) { 93 + dev_err(mux->dev, "Cannot parse pinctrl-names: %d\n", 94 + ret); 95 + return ret; 96 + } 97 + if (!strcmp(mux->pdata->pinctrl_states[mux->pdata->bus_count], 98 + "idle")) { 99 + if (i != num_names - 1) { 100 + dev_err(mux->dev, "idle state must be last\n"); 101 + return -EINVAL; 102 + } 103 + mux->pdata->pinctrl_state_idle = "idle"; 104 + } else { 105 + mux->pdata->bus_count++; 106 + } 107 + } 108 + 109 + adapter_np = of_parse_phandle(np, "i2c-parent", 0); 110 + if (!adapter_np) { 111 + dev_err(mux->dev, "Cannot parse i2c-parent\n"); 112 + return -ENODEV; 113 + } 114 + adapter = of_find_i2c_adapter_by_node(adapter_np); 115 + if (!adapter) { 116 + dev_err(mux->dev, "Cannot find parent bus\n"); 117 + return -ENODEV; 118 + } 119 + mux->pdata->parent_bus_num = i2c_adapter_id(adapter); 120 + put_device(&adapter->dev); 121 + 122 + return 0; 123 + } 124 + #else 125 + static inline int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux, 126 + struct platform_device *pdev) 127 + { 128 + return 0; 129 + } 130 + #endif 131 + 132 + static int __devinit i2c_mux_pinctrl_probe(struct platform_device *pdev) 133 + { 134 + struct i2c_mux_pinctrl *mux; 135 + int (*deselect)(struct i2c_adapter *, void *, u32); 136 + int i, ret; 137 + 138 + mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL); 139 + if (!mux) { 140 + dev_err(&pdev->dev, "Cannot allocate i2c_mux_pinctrl\n"); 141 + ret = -ENOMEM; 142 + goto err; 143 + } 144 + platform_set_drvdata(pdev, mux); 145 + 146 + mux->dev = &pdev->dev; 147 + 148 + mux->pdata = pdev->dev.platform_data; 149 + if (!mux->pdata) { 150 + ret = i2c_mux_pinctrl_parse_dt(mux, pdev); 151 + if (ret < 0) 152 + goto err; 153 + } 154 + if (!mux->pdata) { 155 + dev_err(&pdev->dev, "Missing platform data\n"); 156 + ret = -ENODEV; 157 + goto err; 158 + } 159 + 160 + mux->states = devm_kzalloc(&pdev->dev, 161 + sizeof(*mux->states) * mux->pdata->bus_count, 162 + GFP_KERNEL); 163 + if (!mux->states) { 164 + dev_err(&pdev->dev, "Cannot allocate states\n"); 165 + ret = -ENOMEM; 166 + goto err; 167 + } 168 + 169 + mux->busses = devm_kzalloc(&pdev->dev, 170 + sizeof(mux->busses) * mux->pdata->bus_count, 171 + GFP_KERNEL); 172 + if (!mux->states) { 173 + dev_err(&pdev->dev, "Cannot allocate busses\n"); 174 + ret = -ENOMEM; 175 + goto err; 176 + } 177 + 178 + mux->pinctrl = devm_pinctrl_get(&pdev->dev); 179 + if (IS_ERR(mux->pinctrl)) { 180 + ret = PTR_ERR(mux->pinctrl); 181 + dev_err(&pdev->dev, "Cannot get pinctrl: %d\n", ret); 182 + goto err; 183 + } 184 + for (i = 0; i < mux->pdata->bus_count; i++) { 185 + mux->states[i] = pinctrl_lookup_state(mux->pinctrl, 186 + mux->pdata->pinctrl_states[i]); 187 + if (IS_ERR(mux->states[i])) { 188 + ret = PTR_ERR(mux->states[i]); 189 + dev_err(&pdev->dev, 190 + "Cannot look up pinctrl state %s: %d\n", 191 + mux->pdata->pinctrl_states[i], ret); 192 + goto err; 193 + } 194 + } 195 + if (mux->pdata->pinctrl_state_idle) { 196 + mux->state_idle = pinctrl_lookup_state(mux->pinctrl, 197 + mux->pdata->pinctrl_state_idle); 198 + if (IS_ERR(mux->state_idle)) { 199 + ret = PTR_ERR(mux->state_idle); 200 + dev_err(&pdev->dev, 201 + "Cannot look up pinctrl state %s: %d\n", 202 + mux->pdata->pinctrl_state_idle, ret); 203 + goto err; 204 + } 205 + 206 + deselect = i2c_mux_pinctrl_deselect; 207 + } else { 208 + deselect = NULL; 209 + } 210 + 211 + mux->parent = i2c_get_adapter(mux->pdata->parent_bus_num); 212 + if (!mux->parent) { 213 + dev_err(&pdev->dev, "Parent adapter (%d) not found\n", 214 + mux->pdata->parent_bus_num); 215 + ret = -ENODEV; 216 + goto err; 217 + } 218 + 219 + for (i = 0; i < mux->pdata->bus_count; i++) { 220 + u32 bus = mux->pdata->base_bus_num ? 221 + (mux->pdata->base_bus_num + i) : 0; 222 + 223 + mux->busses[i] = i2c_add_mux_adapter(mux->parent, &pdev->dev, 224 + mux, bus, i, 225 + i2c_mux_pinctrl_select, 226 + deselect); 227 + if (!mux->busses[i]) { 228 + ret = -ENODEV; 229 + dev_err(&pdev->dev, "Failed to add adapter %d\n", i); 230 + goto err_del_adapter; 231 + } 232 + } 233 + 234 + return 0; 235 + 236 + err_del_adapter: 237 + for (; i > 0; i--) 238 + i2c_del_mux_adapter(mux->busses[i - 1]); 239 + i2c_put_adapter(mux->parent); 240 + err: 241 + return ret; 242 + } 243 + 244 + static int __devexit i2c_mux_pinctrl_remove(struct platform_device *pdev) 245 + { 246 + struct i2c_mux_pinctrl *mux = platform_get_drvdata(pdev); 247 + int i; 248 + 249 + for (i = 0; i < mux->pdata->bus_count; i++) 250 + i2c_del_mux_adapter(mux->busses[i]); 251 + 252 + i2c_put_adapter(mux->parent); 253 + 254 + return 0; 255 + } 256 + 257 + #ifdef CONFIG_OF 258 + static const struct of_device_id i2c_mux_pinctrl_of_match[] __devinitconst = { 259 + { .compatible = "i2c-mux-pinctrl", }, 260 + {}, 261 + }; 262 + MODULE_DEVICE_TABLE(of, i2c_mux_pinctrl_of_match); 263 + #endif 264 + 265 + static struct platform_driver i2c_mux_pinctrl_driver = { 266 + .driver = { 267 + .name = "i2c-mux-pinctrl", 268 + .owner = THIS_MODULE, 269 + .of_match_table = of_match_ptr(i2c_mux_pinctrl_of_match), 270 + }, 271 + .probe = i2c_mux_pinctrl_probe, 272 + .remove = __devexit_p(i2c_mux_pinctrl_remove), 273 + }; 274 + module_platform_driver(i2c_mux_pinctrl_driver); 275 + 276 + MODULE_DESCRIPTION("pinctrl-based I2C multiplexer driver"); 277 + MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>"); 278 + MODULE_LICENSE("GPL v2"); 279 + MODULE_ALIAS("platform:i2c-mux-pinctrl");
+8 -9
drivers/ide/icside.c
··· 236 236 */ 237 237 static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) 238 238 { 239 - unsigned long cycle_time; 239 + unsigned long cycle_time = 0; 240 240 int use_dma_info = 0; 241 241 const u8 xfer_mode = drive->dma_mode; 242 242 ··· 271 271 272 272 ide_set_drivedata(drive, (void *)cycle_time); 273 273 274 - printk("%s: %s selected (peak %dMB/s)\n", drive->name, 275 - ide_xfer_verbose(xfer_mode), 276 - 2000 / (unsigned long)ide_get_drivedata(drive)); 274 + printk(KERN_INFO "%s: %s selected (peak %luMB/s)\n", 275 + drive->name, ide_xfer_verbose(xfer_mode), 276 + 2000 / (cycle_time ? cycle_time : (unsigned long) -1)); 277 277 } 278 278 279 279 static const struct ide_port_ops icside_v6_port_ops = { ··· 375 375 .dma_test_irq = icside_dma_test_irq, 376 376 .dma_lost_irq = ide_dma_lost_irq, 377 377 }; 378 - #else 379 - #define icside_v6_dma_ops NULL 380 378 #endif 381 379 382 380 static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d) ··· 454 456 static const struct ide_port_info icside_v6_port_info __initdata = { 455 457 .init_dma = icside_dma_off_init, 456 458 .port_ops = &icside_v6_no_dma_port_ops, 457 - .dma_ops = &icside_v6_dma_ops, 458 459 .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO, 459 460 .mwdma_mask = ATA_MWDMA2, 460 461 .swdma_mask = ATA_SWDMA2, ··· 515 518 516 519 ecard_set_drvdata(ec, state); 517 520 521 + #ifdef CONFIG_BLK_DEV_IDEDMA_ICS 518 522 if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) { 519 523 d.init_dma = icside_dma_init; 520 524 d.port_ops = &icside_v6_port_ops; 521 - } else 522 - d.dma_ops = NULL; 525 + d.dma_ops = &icside_v6_dma_ops; 526 + } 527 + #endif 523 528 524 529 ret = ide_host_register(host, &d, hws); 525 530 if (ret)
+2 -1
drivers/ide/ide-cs.c
··· 167 167 { 168 168 int *is_kme = priv_data; 169 169 170 - if (!(pdev->resource[0]->flags & IO_DATA_PATH_WIDTH_8)) { 170 + if ((pdev->resource[0]->flags & IO_DATA_PATH_WIDTH) 171 + != IO_DATA_PATH_WIDTH_8) { 171 172 pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; 172 173 pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; 173 174 }
+4
drivers/infiniband/hw/cxgb4/cm.c
··· 1593 1593 struct net_device *pdev; 1594 1594 1595 1595 pdev = ip_dev_find(&init_net, peer_ip); 1596 + if (!pdev) { 1597 + err = -ENODEV; 1598 + goto out; 1599 + } 1596 1600 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, 1597 1601 n, pdev, 0); 1598 1602 if (!ep->l2t)
+8 -13
drivers/infiniband/hw/mlx4/main.c
··· 140 140 props->max_mr_size = ~0ull; 141 141 props->page_size_cap = dev->dev->caps.page_size_cap; 142 142 props->max_qp = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps; 143 - props->max_qp_wr = dev->dev->caps.max_wqes; 143 + props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE; 144 144 props->max_sge = min(dev->dev->caps.max_sq_sg, 145 145 dev->dev->caps.max_rq_sg); 146 146 props->max_cq = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs; ··· 1084 1084 int total_eqs = 0; 1085 1085 int i, j, eq; 1086 1086 1087 - /* Init eq table */ 1088 - ibdev->eq_table = NULL; 1089 - ibdev->eq_added = 0; 1090 - 1091 - /* Legacy mode? */ 1092 - if (dev->caps.comp_pool == 0) 1087 + /* Legacy mode or comp_pool is not large enough */ 1088 + if (dev->caps.comp_pool == 0 || 1089 + dev->caps.num_ports > dev->caps.comp_pool) 1093 1090 return; 1094 1091 1095 1092 eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/ ··· 1132 1135 static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) 1133 1136 { 1134 1137 int i; 1135 - int total_eqs; 1138 + 1139 + /* no additional eqs were added */ 1140 + if (!ibdev->eq_table) 1141 + return; 1136 1142 1137 1143 /* Reset the advertised EQ number */ 1138 1144 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; ··· 1148 1148 mlx4_release_eq(dev, ibdev->eq_table[i]); 1149 1149 } 1150 1150 1151 - total_eqs = dev->caps.num_comp_vectors + ibdev->eq_added; 1152 - memset(ibdev->eq_table, 0, total_eqs * sizeof(int)); 1153 1151 kfree(ibdev->eq_table); 1154 - 1155 - ibdev->eq_table = NULL; 1156 - ibdev->eq_added = 0; 1157 1152 } 1158 1153 1159 1154 static void *mlx4_ib_add(struct mlx4_dev *dev)
+8
drivers/infiniband/hw/mlx4/mlx4_ib.h
··· 44 44 #include <linux/mlx4/device.h> 45 45 #include <linux/mlx4/doorbell.h> 46 46 47 + enum { 48 + MLX4_IB_SQ_MIN_WQE_SHIFT = 6, 49 + MLX4_IB_MAX_HEADROOM = 2048 50 + }; 51 + 52 + #define MLX4_IB_SQ_HEADROOM(shift) ((MLX4_IB_MAX_HEADROOM >> (shift)) + 1) 53 + #define MLX4_IB_SQ_MAX_SPARE (MLX4_IB_SQ_HEADROOM(MLX4_IB_SQ_MIN_WQE_SHIFT)) 54 + 47 55 struct mlx4_ib_ucontext { 48 56 struct ib_ucontext ibucontext; 49 57 struct mlx4_uar uar;
+15 -6
drivers/infiniband/hw/mlx4/qp.c
··· 310 310 int is_user, int has_rq, struct mlx4_ib_qp *qp) 311 311 { 312 312 /* Sanity check RQ size before proceeding */ 313 - if (cap->max_recv_wr > dev->dev->caps.max_wqes || 314 - cap->max_recv_sge > dev->dev->caps.max_rq_sg) 313 + if (cap->max_recv_wr > dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE || 314 + cap->max_recv_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg)) 315 315 return -EINVAL; 316 316 317 317 if (!has_rq) { ··· 329 329 qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg)); 330 330 } 331 331 332 - cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt; 333 - cap->max_recv_sge = qp->rq.max_gs; 332 + /* leave userspace return values as they were, so as not to break ABI */ 333 + if (is_user) { 334 + cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt; 335 + cap->max_recv_sge = qp->rq.max_gs; 336 + } else { 337 + cap->max_recv_wr = qp->rq.max_post = 338 + min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt); 339 + cap->max_recv_sge = min(qp->rq.max_gs, 340 + min(dev->dev->caps.max_sq_sg, 341 + dev->dev->caps.max_rq_sg)); 342 + } 334 343 335 344 return 0; 336 345 } ··· 350 341 int s; 351 342 352 343 /* Sanity check SQ size before proceeding */ 353 - if (cap->max_send_wr > dev->dev->caps.max_wqes || 354 - cap->max_send_sge > dev->dev->caps.max_sq_sg || 344 + if (cap->max_send_wr > (dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE) || 345 + cap->max_send_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg) || 355 346 cap->max_inline_data + send_wqe_overhead(type, qp->flags) + 356 347 sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz) 357 348 return -EINVAL;
-1
drivers/infiniband/hw/ocrdma/ocrdma.h
··· 231 231 u32 entry_size; 232 232 u32 max_cnt; 233 233 u32 max_wqe_idx; 234 - u32 free_delta; 235 234 u16 dbid; /* qid, where to ring the doorbell. */ 236 235 u32 len; 237 236 dma_addr_t pa;
+1 -4
drivers/infiniband/hw/ocrdma/ocrdma_abi.h
··· 101 101 u32 rsvd1; 102 102 u32 num_wqe_allocated; 103 103 u32 num_rqe_allocated; 104 - u32 free_wqe_delta; 105 - u32 free_rqe_delta; 106 104 u32 db_sq_offset; 107 105 u32 db_rq_offset; 108 106 u32 db_shift; ··· 124 126 u32 db_rq_offset; 125 127 u32 db_shift; 126 128 127 - u32 free_rqe_delta; 128 - u32 rsvd2; 129 + u64 rsvd2; 129 130 u64 rsvd3; 130 131 } __packed; 131 132
+1 -8
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
··· 732 732 break; 733 733 case OCRDMA_SRQ_LIMIT_EVENT: 734 734 ib_evt.element.srq = &qp->srq->ibsrq; 735 - ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED; 735 + ib_evt.event = IB_EVENT_SRQ_LIMIT_REACHED; 736 736 srq_event = 1; 737 737 qp_event = 0; 738 738 break; ··· 1990 1990 max_wqe_allocated = 1 << max_wqe_allocated; 1991 1991 max_rqe_allocated = 1 << ((u16)rsp->max_wqe_rqe); 1992 1992 1993 - if (qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { 1994 - qp->sq.free_delta = 0; 1995 - qp->rq.free_delta = 1; 1996 - } else 1997 - qp->sq.free_delta = 1; 1998 - 1999 1993 qp->sq.max_cnt = max_wqe_allocated; 2000 1994 qp->sq.max_wqe_idx = max_wqe_allocated - 1; 2001 1995 2002 1996 if (!attrs->srq) { 2003 1997 qp->rq.max_cnt = max_rqe_allocated; 2004 1998 qp->rq.max_wqe_idx = max_rqe_allocated - 1; 2005 - qp->rq.free_delta = 1; 2006 1999 } 2007 2000 } 2008 2001
-1
drivers/infiniband/hw/ocrdma/ocrdma_main.c
··· 26 26 *******************************************************************/ 27 27 28 28 #include <linux/module.h> 29 - #include <linux/version.h> 30 29 #include <linux/idr.h> 31 30 #include <rdma/ib_verbs.h> 32 31 #include <rdma/ib_user_verbs.h>
-5
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
··· 940 940 uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET; 941 941 uresp.db_shift = 16; 942 942 } 943 - uresp.free_wqe_delta = qp->sq.free_delta; 944 - uresp.free_rqe_delta = qp->rq.free_delta; 945 943 946 944 if (qp->dpp_enabled) { 947 945 uresp.dpp_credit = dpp_credit_lmt; ··· 1305 1307 free_cnt = (q->max_cnt - q->head) + q->tail; 1306 1308 else 1307 1309 free_cnt = q->tail - q->head; 1308 - if (q->free_delta) 1309 - free_cnt -= q->free_delta; 1310 1310 return free_cnt; 1311 1311 } 1312 1312 ··· 1497 1501 (srq->pd->id * srq->dev->nic_info.db_page_size); 1498 1502 uresp.db_page_size = srq->dev->nic_info.db_page_size; 1499 1503 uresp.num_rqe_allocated = srq->rq.max_cnt; 1500 - uresp.free_rqe_delta = 1; 1501 1504 if (srq->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { 1502 1505 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ1_OFFSET; 1503 1506 uresp.db_shift = 24;
-1
drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
··· 28 28 #ifndef __OCRDMA_VERBS_H__ 29 29 #define __OCRDMA_VERBS_H__ 30 30 31 - #include <linux/version.h> 32 31 int ocrdma_post_send(struct ib_qp *, struct ib_send_wr *, 33 32 struct ib_send_wr **bad_wr); 34 33 int ocrdma_post_recv(struct ib_qp *, struct ib_recv_wr *,
+44 -27
drivers/iommu/amd_iommu.c
··· 547 547 spin_unlock_irqrestore(&iommu->lock, flags); 548 548 } 549 549 550 - static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head) 550 + static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw) 551 551 { 552 552 struct amd_iommu_fault fault; 553 - volatile u64 *raw; 554 - int i; 555 553 556 554 INC_STATS_COUNTER(pri_requests); 557 - 558 - raw = (u64 *)(iommu->ppr_log + head); 559 - 560 - /* 561 - * Hardware bug: Interrupt may arrive before the entry is written to 562 - * memory. If this happens we need to wait for the entry to arrive. 563 - */ 564 - for (i = 0; i < LOOP_TIMEOUT; ++i) { 565 - if (PPR_REQ_TYPE(raw[0]) != 0) 566 - break; 567 - udelay(1); 568 - } 569 555 570 556 if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) { 571 557 pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n"); ··· 564 578 fault.tag = PPR_TAG(raw[0]); 565 579 fault.flags = PPR_FLAGS(raw[0]); 566 580 567 - /* 568 - * To detect the hardware bug we need to clear the entry 569 - * to back to zero. 570 - */ 571 - raw[0] = raw[1] = 0; 572 - 573 581 atomic_notifier_call_chain(&ppr_notifier, 0, &fault); 574 582 } 575 583 ··· 575 595 if (iommu->ppr_log == NULL) 576 596 return; 577 597 598 + /* enable ppr interrupts again */ 599 + writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET); 600 + 578 601 spin_lock_irqsave(&iommu->lock, flags); 579 602 580 603 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); 581 604 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); 582 605 583 606 while (head != tail) { 607 + volatile u64 *raw; 608 + u64 entry[2]; 609 + int i; 584 610 585 - /* Handle PPR entry */ 586 - iommu_handle_ppr_entry(iommu, head); 611 + raw = (u64 *)(iommu->ppr_log + head); 587 612 588 - /* Update and refresh ring-buffer state*/ 613 + /* 614 + * Hardware bug: Interrupt may arrive before the entry is 615 + * written to memory. If this happens we need to wait for the 616 + * entry to arrive. 617 + */ 618 + for (i = 0; i < LOOP_TIMEOUT; ++i) { 619 + if (PPR_REQ_TYPE(raw[0]) != 0) 620 + break; 621 + udelay(1); 622 + } 623 + 624 + /* Avoid memcpy function-call overhead */ 625 + entry[0] = raw[0]; 626 + entry[1] = raw[1]; 627 + 628 + /* 629 + * To detect the hardware bug we need to clear the entry 630 + * back to zero. 631 + */ 632 + raw[0] = raw[1] = 0UL; 633 + 634 + /* Update head pointer of hardware ring-buffer */ 589 635 head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE; 590 636 writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); 637 + 638 + /* 639 + * Release iommu->lock because ppr-handling might need to 640 + * re-aquire it 641 + */ 642 + spin_unlock_irqrestore(&iommu->lock, flags); 643 + 644 + /* Handle PPR entry */ 645 + iommu_handle_ppr_entry(iommu, entry); 646 + 647 + spin_lock_irqsave(&iommu->lock, flags); 648 + 649 + /* Refresh ring-buffer information */ 650 + head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); 591 651 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); 592 652 } 593 - 594 - /* enable ppr interrupts again */ 595 - writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET); 596 653 597 654 spin_unlock_irqrestore(&iommu->lock, flags); 598 655 }
+5 -8
drivers/iommu/amd_iommu_init.c
··· 1029 1029 if (!iommu->dev) 1030 1030 return 1; 1031 1031 1032 + iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number, 1033 + PCI_DEVFN(0, 0)); 1034 + 1032 1035 iommu->cap_ptr = h->cap_ptr; 1033 1036 iommu->pci_seg = h->pci_seg; 1034 1037 iommu->mmio_phys = h->mmio_phys; ··· 1326 1323 { 1327 1324 int i, j; 1328 1325 u32 ioc_feature_control; 1329 - struct pci_dev *pdev = NULL; 1326 + struct pci_dev *pdev = iommu->root_pdev; 1330 1327 1331 1328 /* RD890 BIOSes may not have completely reconfigured the iommu */ 1332 - if (!is_rd890_iommu(iommu->dev)) 1329 + if (!is_rd890_iommu(iommu->dev) || !pdev) 1333 1330 return; 1334 1331 1335 1332 /* 1336 1333 * First, we need to ensure that the iommu is enabled. This is 1337 1334 * controlled by a register in the northbridge 1338 1335 */ 1339 - pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0)); 1340 - 1341 - if (!pdev) 1342 - return; 1343 1336 1344 1337 /* Select Northbridge indirect register 0x75 and enable writing */ 1345 1338 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7)); ··· 1344 1345 /* Enable the iommu */ 1345 1346 if (!(ioc_feature_control & 0x1)) 1346 1347 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1); 1347 - 1348 - pci_dev_put(pdev); 1349 1348 1350 1349 /* Restore the iommu BAR */ 1351 1350 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
+3
drivers/iommu/amd_iommu_types.h
··· 481 481 /* Pointer to PCI device of this IOMMU */ 482 482 struct pci_dev *dev; 483 483 484 + /* Cache pdev to root device for resume quirks */ 485 + struct pci_dev *root_pdev; 486 + 484 487 /* physical address of MMIO space */ 485 488 u64 mmio_phys; 486 489 /* virtual address of MMIO space */
+2 -2
drivers/leds/Kconfig
··· 379 379 380 380 config LEDS_ASIC3 381 381 bool "LED support for the HTC ASIC3" 382 - depends on LEDS_CLASS 382 + depends on LEDS_CLASS=y 383 383 depends on MFD_ASIC3 384 384 default y 385 385 help ··· 390 390 391 391 config LEDS_RENESAS_TPU 392 392 bool "LED support for Renesas TPU" 393 - depends on LEDS_CLASS && HAVE_CLK && GENERIC_GPIO 393 + depends on LEDS_CLASS=y && HAVE_CLK && GENERIC_GPIO 394 394 help 395 395 This option enables build of the LED TPU platform driver, 396 396 suitable to drive any TPU channel on newer Renesas SoCs.
+1 -1
drivers/leds/led-class.c
··· 29 29 led_cdev->brightness = led_cdev->brightness_get(led_cdev); 30 30 } 31 31 32 - static ssize_t led_brightness_show(struct device *dev, 32 + static ssize_t led_brightness_show(struct device *dev, 33 33 struct device_attribute *attr, char *buf) 34 34 { 35 35 struct led_classdev *led_cdev = dev_get_drvdata(dev);
-7
drivers/leds/led-core.c
··· 44 44 if (!led_cdev->blink_brightness) 45 45 led_cdev->blink_brightness = led_cdev->max_brightness; 46 46 47 - if (led_get_trigger_data(led_cdev) && 48 - delay_on == led_cdev->blink_delay_on && 49 - delay_off == led_cdev->blink_delay_off) 50 - return; 51 - 52 - led_stop_software_blink(led_cdev); 53 - 54 47 led_cdev->blink_delay_on = delay_on; 55 48 led_cdev->blink_delay_off = delay_off; 56 49
+4
drivers/md/raid1.c
··· 2550 2550 err = -EINVAL; 2551 2551 spin_lock_init(&conf->device_lock); 2552 2552 rdev_for_each(rdev, mddev) { 2553 + struct request_queue *q; 2553 2554 int disk_idx = rdev->raid_disk; 2554 2555 if (disk_idx >= mddev->raid_disks 2555 2556 || disk_idx < 0) ··· 2563 2562 if (disk->rdev) 2564 2563 goto abort; 2565 2564 disk->rdev = rdev; 2565 + q = bdev_get_queue(rdev->bdev); 2566 + if (q->merge_bvec_fn) 2567 + mddev->merge_check_needed = 1; 2566 2568 2567 2569 disk->head_position = 0; 2568 2570 }
+4
drivers/md/raid10.c
··· 3475 3475 3476 3476 rdev_for_each(rdev, mddev) { 3477 3477 long long diff; 3478 + struct request_queue *q; 3478 3479 3479 3480 disk_idx = rdev->raid_disk; 3480 3481 if (disk_idx < 0) ··· 3494 3493 goto out_free_conf; 3495 3494 disk->rdev = rdev; 3496 3495 } 3496 + q = bdev_get_queue(rdev->bdev); 3497 + if (q->merge_bvec_fn) 3498 + mddev->merge_check_needed = 1; 3497 3499 diff = (rdev->new_data_offset - rdev->data_offset); 3498 3500 if (!mddev->reshape_backwards) 3499 3501 diff = -diff;
+10 -2
drivers/mtd/ubi/debug.c
··· 264 264 */ 265 265 int ubi_debugfs_init(void) 266 266 { 267 + if (!IS_ENABLED(DEBUG_FS)) 268 + return 0; 269 + 267 270 dfs_rootdir = debugfs_create_dir("ubi", NULL); 268 271 if (IS_ERR_OR_NULL(dfs_rootdir)) { 269 272 int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir); ··· 284 281 */ 285 282 void ubi_debugfs_exit(void) 286 283 { 287 - debugfs_remove(dfs_rootdir); 284 + if (IS_ENABLED(DEBUG_FS)) 285 + debugfs_remove(dfs_rootdir); 288 286 } 289 287 290 288 /* Read an UBI debugfs file */ ··· 407 403 struct dentry *dent; 408 404 struct ubi_debug_info *d = ubi->dbg; 409 405 406 + if (!IS_ENABLED(DEBUG_FS)) 407 + return 0; 408 + 410 409 n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME, 411 410 ubi->ubi_num); 412 411 if (n == UBI_DFS_DIR_LEN) { ··· 477 470 */ 478 471 void ubi_debugfs_exit_dev(struct ubi_device *ubi) 479 472 { 480 - debugfs_remove_recursive(ubi->dbg->dfs_dir); 473 + if (IS_ENABLED(DEBUG_FS)) 474 + debugfs_remove_recursive(ubi->dbg->dfs_dir); 481 475 }
+13 -4
drivers/mtd/ubi/wl.c
··· 1262 1262 dbg_wl("flush pending work for LEB %d:%d (%d pending works)", 1263 1263 vol_id, lnum, ubi->works_count); 1264 1264 1265 - down_write(&ubi->work_sem); 1266 1265 while (found) { 1267 1266 struct ubi_work *wrk; 1268 1267 found = 0; 1269 1268 1269 + down_read(&ubi->work_sem); 1270 1270 spin_lock(&ubi->wl_lock); 1271 1271 list_for_each_entry(wrk, &ubi->works, list) { 1272 1272 if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) && ··· 1277 1277 spin_unlock(&ubi->wl_lock); 1278 1278 1279 1279 err = wrk->func(ubi, wrk, 0); 1280 - if (err) 1281 - goto out; 1280 + if (err) { 1281 + up_read(&ubi->work_sem); 1282 + return err; 1283 + } 1284 + 1282 1285 spin_lock(&ubi->wl_lock); 1283 1286 found = 1; 1284 1287 break; 1285 1288 } 1286 1289 } 1287 1290 spin_unlock(&ubi->wl_lock); 1291 + up_read(&ubi->work_sem); 1288 1292 } 1289 1293 1290 - out: 1294 + /* 1295 + * Make sure all the works which have been done in parallel are 1296 + * finished. 1297 + */ 1298 + down_write(&ubi->work_sem); 1291 1299 up_write(&ubi->work_sem); 1300 + 1292 1301 return err; 1293 1302 } 1294 1303
+5 -4
drivers/net/bonding/bond_main.c
··· 76 76 #include <net/route.h> 77 77 #include <net/net_namespace.h> 78 78 #include <net/netns/generic.h> 79 + #include <net/pkt_sched.h> 79 80 #include "bonding.h" 80 81 #include "bond_3ad.h" 81 82 #include "bond_alb.h" ··· 382 381 return next; 383 382 } 384 383 385 - #define bond_queue_mapping(skb) (*(u16 *)((skb)->cb)) 386 - 387 384 /** 388 385 * bond_dev_queue_xmit - Prepare skb for xmit. 389 386 * ··· 394 395 { 395 396 skb->dev = slave_dev; 396 397 397 - skb->queue_mapping = bond_queue_mapping(skb); 398 + BUILD_BUG_ON(sizeof(skb->queue_mapping) != 399 + sizeof(qdisc_skb_cb(skb)->bond_queue_mapping)); 400 + skb->queue_mapping = qdisc_skb_cb(skb)->bond_queue_mapping; 398 401 399 402 if (unlikely(netpoll_tx_running(slave_dev))) 400 403 bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); ··· 4172 4171 /* 4173 4172 * Save the original txq to restore before passing to the driver 4174 4173 */ 4175 - bond_queue_mapping(skb) = skb->queue_mapping; 4174 + qdisc_skb_cb(skb)->bond_queue_mapping = skb->queue_mapping; 4176 4175 4177 4176 if (unlikely(txq >= dev->real_num_tx_queues)) { 4178 4177 do {
+6 -2
drivers/net/bonding/bond_sysfs.c
··· 1082 1082 } 1083 1083 } 1084 1084 1085 - pr_info("%s: Unable to set %.*s as primary slave.\n", 1086 - bond->dev->name, (int)strlen(buf) - 1, buf); 1085 + strncpy(bond->params.primary, ifname, IFNAMSIZ); 1086 + bond->params.primary[IFNAMSIZ - 1] = 0; 1087 + 1088 + pr_info("%s: Recording %s as primary, " 1089 + "but it has not been enslaved to %s yet.\n", 1090 + bond->dev->name, ifname, bond->dev->name); 1087 1091 out: 1088 1092 write_unlock_bh(&bond->curr_slave_lock); 1089 1093 read_unlock(&bond->lock);
+9 -7
drivers/net/can/c_can/c_can.c
··· 686 686 * 687 687 * We iterate from priv->tx_echo to priv->tx_next and check if the 688 688 * packet has been transmitted, echo it back to the CAN framework. 689 - * If we discover a not yet transmitted package, stop looking for more. 689 + * If we discover a not yet transmitted packet, stop looking for more. 690 690 */ 691 691 static void c_can_do_tx(struct net_device *dev) 692 692 { ··· 698 698 for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) { 699 699 msg_obj_no = get_tx_echo_msg_obj(priv); 700 700 val = c_can_read_reg32(priv, &priv->regs->txrqst1); 701 - if (!(val & (1 << msg_obj_no))) { 701 + if (!(val & (1 << (msg_obj_no - 1)))) { 702 702 can_get_echo_skb(dev, 703 703 msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST); 704 704 stats->tx_bytes += priv->read_reg(priv, ··· 706 706 & IF_MCONT_DLC_MASK; 707 707 stats->tx_packets++; 708 708 c_can_inval_msg_object(dev, 0, msg_obj_no); 709 + } else { 710 + break; 709 711 } 710 712 } 711 713 ··· 952 950 struct net_device *dev = napi->dev; 953 951 struct c_can_priv *priv = netdev_priv(dev); 954 952 955 - irqstatus = priv->read_reg(priv, &priv->regs->interrupt); 953 + irqstatus = priv->irqstatus; 956 954 if (!irqstatus) 957 955 goto end; 958 956 ··· 1030 1028 1031 1029 static irqreturn_t c_can_isr(int irq, void *dev_id) 1032 1030 { 1033 - u16 irqstatus; 1034 1031 struct net_device *dev = (struct net_device *)dev_id; 1035 1032 struct c_can_priv *priv = netdev_priv(dev); 1036 1033 1037 - irqstatus = priv->read_reg(priv, &priv->regs->interrupt); 1038 - if (!irqstatus) 1034 + priv->irqstatus = priv->read_reg(priv, &priv->regs->interrupt); 1035 + if (!priv->irqstatus) 1039 1036 return IRQ_NONE; 1040 1037 1041 1038 /* disable all interrupts and schedule the NAPI */ ··· 1064 1063 goto exit_irq_fail; 1065 1064 } 1066 1065 1066 + napi_enable(&priv->napi); 1067 + 1067 1068 /* start the c_can controller */ 1068 1069 c_can_start(dev); 1069 1070 1070 - napi_enable(&priv->napi); 1071 1071 netif_start_queue(dev); 1072 1072 1073 1073 return 0;
+1
drivers/net/can/c_can/c_can.h
··· 76 76 unsigned int tx_next; 77 77 unsigned int tx_echo; 78 78 void *priv; /* for board-specific data */ 79 + u16 irqstatus; 79 80 }; 80 81 81 82 struct net_device *alloc_c_can_dev(void);
+1 -1
drivers/net/can/cc770/cc770_platform.c
··· 154 154 struct cc770_platform_data *pdata = pdev->dev.platform_data; 155 155 156 156 priv->can.clock.freq = pdata->osc_freq; 157 - if (priv->cpu_interface | CPUIF_DSC) 157 + if (priv->cpu_interface & CPUIF_DSC) 158 158 priv->can.clock.freq /= 2; 159 159 priv->clkout = pdata->cor; 160 160 priv->bus_config = pdata->bcr;
+3 -1
drivers/net/dummy.c
··· 187 187 rtnl_lock(); 188 188 err = __rtnl_link_register(&dummy_link_ops); 189 189 190 - for (i = 0; i < numdummies && !err; i++) 190 + for (i = 0; i < numdummies && !err; i++) { 191 191 err = dummy_init_one(); 192 + cond_resched(); 193 + } 192 194 if (err < 0) 193 195 __rtnl_link_unregister(&dummy_link_ops); 194 196 rtnl_unlock();
-15
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
··· 747 747 748 748 #define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG 749 749 750 - #define BNX2X_IP_CSUM_ERR(cqe) \ 751 - (!((cqe)->fast_path_cqe.status_flags & \ 752 - ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \ 753 - ((cqe)->fast_path_cqe.type_error_flags & \ 754 - ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) 755 - 756 - #define BNX2X_L4_CSUM_ERR(cqe) \ 757 - (!((cqe)->fast_path_cqe.status_flags & \ 758 - ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \ 759 - ((cqe)->fast_path_cqe.type_error_flags & \ 760 - ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) 761 - 762 - #define BNX2X_RX_CSUM_OK(cqe) \ 763 - (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe))) 764 - 765 750 #define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \ 766 751 (((le16_to_cpu(flags) & \ 767 752 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \
+21 -6
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
··· 617 617 return 0; 618 618 } 619 619 620 + static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe, 621 + struct bnx2x_fastpath *fp) 622 + { 623 + /* Do nothing if no IP/L4 csum validation was done */ 624 + 625 + if (cqe->fast_path_cqe.status_flags & 626 + (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG | 627 + ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) 628 + return; 629 + 630 + /* If both IP/L4 validation were done, check if an error was found. */ 631 + 632 + if (cqe->fast_path_cqe.type_error_flags & 633 + (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | 634 + ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) 635 + fp->eth_q_stats.hw_csum_err++; 636 + else 637 + skb->ip_summed = CHECKSUM_UNNECESSARY; 638 + } 620 639 621 640 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) 622 641 { ··· 825 806 826 807 skb_checksum_none_assert(skb); 827 808 828 - if (bp->dev->features & NETIF_F_RXCSUM) { 809 + if (bp->dev->features & NETIF_F_RXCSUM) 810 + bnx2x_csum_validate(skb, cqe, fp); 829 811 830 - if (likely(BNX2X_RX_CSUM_OK(cqe))) 831 - skb->ip_summed = CHECKSUM_UNNECESSARY; 832 - else 833 - fp->eth_q_stats.hw_csum_err++; 834 - } 835 812 836 813 skb_record_rx_queue(skb, fp->rx_queue); 837 814
+2 -1
drivers/net/ethernet/broadcom/tg3.c
··· 14275 14275 } 14276 14276 } 14277 14277 14278 - if (tg3_flag(tp, 5755_PLUS)) 14278 + if (tg3_flag(tp, 5755_PLUS) || 14279 + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 14279 14280 tg3_flag_set(tp, SHORT_DMA_BUG); 14280 14281 14281 14282 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+3 -2
drivers/net/ethernet/emulex/benet/be_main.c
··· 736 736 737 737 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb); 738 738 if (copied) { 739 + int gso_segs = skb_shinfo(skb)->gso_segs; 740 + 739 741 /* record the sent skb in the sent_skb table */ 740 742 BUG_ON(txo->sent_skb_list[start]); 741 743 txo->sent_skb_list[start] = skb; ··· 755 753 756 754 be_txq_notify(adapter, txq->id, wrb_cnt); 757 755 758 - be_tx_stats_update(txo, wrb_cnt, copied, 759 - skb_shinfo(skb)->gso_segs, stopped); 756 + be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped); 760 757 } else { 761 758 txq->head = start; 762 759 dev_kfree_skb_any(skb);
+4 -2
drivers/net/ethernet/intel/e1000e/ethtool.c
··· 258 258 * When SoL/IDER sessions are active, autoneg/speed/duplex 259 259 * cannot be changed 260 260 */ 261 - if (hw->phy.ops.check_reset_block(hw)) { 261 + if (hw->phy.ops.check_reset_block && 262 + hw->phy.ops.check_reset_block(hw)) { 262 263 e_err("Cannot change link characteristics when SoL/IDER is active.\n"); 263 264 return -EINVAL; 264 265 } ··· 1616 1615 * PHY loopback cannot be performed if SoL/IDER 1617 1616 * sessions are active 1618 1617 */ 1619 - if (hw->phy.ops.check_reset_block(hw)) { 1618 + if (hw->phy.ops.check_reset_block && 1619 + hw->phy.ops.check_reset_block(hw)) { 1620 1620 e_err("Cannot do PHY loopback test when SoL/IDER is active.\n"); 1621 1621 *data = 0; 1622 1622 goto out;
+1 -1
drivers/net/ethernet/intel/e1000e/mac.c
··· 709 709 * In the case of the phy reset being blocked, we already have a link. 710 710 * We do not need to set it up again. 711 711 */ 712 - if (hw->phy.ops.check_reset_block(hw)) 712 + if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) 713 713 return 0; 714 714 715 715 /*
+2 -2
drivers/net/ethernet/intel/e1000e/netdev.c
··· 6237 6237 adapter->hw.phy.ms_type = e1000_ms_hw_default; 6238 6238 } 6239 6239 6240 - if (hw->phy.ops.check_reset_block(hw)) 6240 + if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) 6241 6241 e_info("PHY reset is blocked due to SOL/IDER session.\n"); 6242 6242 6243 6243 /* Set initial default active device features */ ··· 6404 6404 if (!(adapter->flags & FLAG_HAS_AMT)) 6405 6405 e1000e_release_hw_control(adapter); 6406 6406 err_eeprom: 6407 - if (!hw->phy.ops.check_reset_block(hw)) 6407 + if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw)) 6408 6408 e1000_phy_hw_reset(&adapter->hw); 6409 6409 err_hw_init: 6410 6410 kfree(adapter->tx_ring);
+5 -3
drivers/net/ethernet/intel/e1000e/phy.c
··· 2155 2155 s32 ret_val; 2156 2156 u32 ctrl; 2157 2157 2158 - ret_val = phy->ops.check_reset_block(hw); 2159 - if (ret_val) 2160 - return 0; 2158 + if (phy->ops.check_reset_block) { 2159 + ret_val = phy->ops.check_reset_block(hw); 2160 + if (ret_val) 2161 + return 0; 2162 + } 2161 2163 2162 2164 ret_val = phy->ops.acquire(hw); 2163 2165 if (ret_val)
+10 -12
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 1390 1390 union ixgbe_adv_rx_desc *rx_desc, 1391 1391 struct sk_buff *skb) 1392 1392 { 1393 + struct net_device *dev = rx_ring->netdev; 1394 + 1393 1395 ixgbe_update_rsc_stats(rx_ring, skb); 1394 1396 1395 1397 ixgbe_rx_hash(rx_ring, rx_desc, skb); ··· 1403 1401 ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb); 1404 1402 #endif 1405 1403 1406 - if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { 1404 + if ((dev->features & NETIF_F_HW_VLAN_RX) && 1405 + ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { 1407 1406 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); 1408 1407 __vlan_hwaccel_put_tag(skb, vid); 1409 1408 } 1410 1409 1411 1410 skb_record_rx_queue(skb, rx_ring->queue_index); 1412 1411 1413 - skb->protocol = eth_type_trans(skb, rx_ring->netdev); 1412 + skb->protocol = eth_type_trans(skb, dev); 1414 1413 } 1415 1414 1416 1415 static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, ··· 3609 3606 3610 3607 if (hw->mac.type == ixgbe_mac_82598EB) 3611 3608 netif_set_gso_max_size(adapter->netdev, 32768); 3612 - 3613 - 3614 - /* Enable VLAN tag insert/strip */ 3615 - adapter->netdev->features |= NETIF_F_HW_VLAN_RX; 3616 3609 3617 3610 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); 3618 3611 ··· 6700 6701 { 6701 6702 struct ixgbe_adapter *adapter = netdev_priv(netdev); 6702 6703 6703 - #ifdef CONFIG_DCB 6704 - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) 6705 - features &= ~NETIF_F_HW_VLAN_RX; 6706 - #endif 6707 - 6708 6704 /* return error if RXHASH is being enabled when RSS is not supported */ 6709 6705 if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) 6710 6706 features &= ~NETIF_F_RXHASH; ··· 6711 6717 /* Turn off LRO if not RSC capable */ 6712 6718 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) 6713 6719 features &= ~NETIF_F_LRO; 6714 - 6715 6720 6716 6721 return features; 6717 6722 } ··· 6758 6765 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; 6759 6766 need_reset = true; 6760 6767 } 6768 + 6769 + if (features & NETIF_F_HW_VLAN_RX) 6770 + ixgbe_vlan_strip_enable(adapter); 6771 + else 6772 + ixgbe_vlan_strip_disable(adapter); 6761 6773 6762 6774 if (changed & NETIF_F_RXALL) 6763 6775 need_reset = true;
+10 -5
drivers/net/ethernet/marvell/mv643xx_eth.c
··· 436 436 /* 437 437 * Hardware-specific parameters. 438 438 */ 439 + #if defined(CONFIG_HAVE_CLK) 439 440 struct clk *clk; 441 + #endif 440 442 unsigned int t_clk; 441 443 }; 442 444 ··· 2897 2895 mp->dev = dev; 2898 2896 2899 2897 /* 2900 - * Get the clk rate, if there is one, otherwise use the default. 2898 + * Start with a default rate, and if there is a clock, allow 2899 + * it to override the default. 2901 2900 */ 2901 + mp->t_clk = 133000000; 2902 + #if defined(CONFIG_HAVE_CLK) 2902 2903 mp->clk = clk_get(&pdev->dev, (pdev->id ? "1" : "0")); 2903 2904 if (!IS_ERR(mp->clk)) { 2904 2905 clk_prepare_enable(mp->clk); 2905 2906 mp->t_clk = clk_get_rate(mp->clk); 2906 - } else { 2907 - mp->t_clk = 133000000; 2908 - printk(KERN_WARNING "Unable to get clock"); 2909 2907 } 2910 - 2908 + #endif 2911 2909 set_params(mp, pd); 2912 2910 netif_set_real_num_tx_queues(dev, mp->txq_count); 2913 2911 netif_set_real_num_rx_queues(dev, mp->rxq_count); ··· 2997 2995 phy_detach(mp->phy); 2998 2996 cancel_work_sync(&mp->tx_timeout_task); 2999 2997 2998 + #if defined(CONFIG_HAVE_CLK) 3000 2999 if (!IS_ERR(mp->clk)) { 3001 3000 clk_disable_unprepare(mp->clk); 3002 3001 clk_put(mp->clk); 3003 3002 } 3003 + #endif 3004 + 3004 3005 free_netdev(mp->dev); 3005 3006 3006 3007 platform_set_drvdata(pdev, NULL);
+6 -4
drivers/net/ethernet/marvell/sky2.c
··· 4381 4381 struct sky2_port *sky2 = netdev_priv(dev); 4382 4382 netdev_features_t changed = dev->features ^ features; 4383 4383 4384 - if (changed & NETIF_F_RXCSUM) { 4385 - bool on = features & NETIF_F_RXCSUM; 4386 - sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), 4387 - on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); 4384 + if ((changed & NETIF_F_RXCSUM) && 4385 + !(sky2->hw->flags & SKY2_HW_NEW_LE)) { 4386 + sky2_write32(sky2->hw, 4387 + Q_ADDR(rxqaddr[sky2->port], Q_CSR), 4388 + (features & NETIF_F_RXCSUM) 4389 + ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); 4388 4390 } 4389 4391 4390 4392 if (changed & NETIF_F_RXHASH)
+2 -2
drivers/net/ethernet/mellanox/mlx4/port.c
··· 697 697 if (slave != dev->caps.function) 698 698 memset(inbox->buf, 0, 256); 699 699 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { 700 - *(u8 *) inbox->buf = !!reset_qkey_viols << 6; 700 + *(u8 *) inbox->buf |= !!reset_qkey_viols << 6; 701 701 ((__be32 *) inbox->buf)[2] = agg_cap_mask; 702 702 } else { 703 - ((u8 *) inbox->buf)[3] = !!reset_qkey_viols; 703 + ((u8 *) inbox->buf)[3] |= !!reset_qkey_viols; 704 704 ((__be32 *) inbox->buf)[1] = agg_cap_mask; 705 705 } 706 706
+6 -5
drivers/net/ethernet/nxp/lpc_eth.c
··· 946 946 /* Update stats */ 947 947 ndev->stats.tx_packets++; 948 948 ndev->stats.tx_bytes += skb->len; 949 - 950 - /* Free buffer */ 951 - dev_kfree_skb_irq(skb); 952 949 } 950 + dev_kfree_skb_irq(skb); 953 951 954 952 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base)); 955 953 } 956 954 957 - if (netif_queue_stopped(ndev)) 958 - netif_wake_queue(ndev); 955 + if (pldat->num_used_tx_buffs <= ENET_TX_DESC/2) { 956 + if (netif_queue_stopped(ndev)) 957 + netif_wake_queue(ndev); 958 + } 959 959 } 960 960 961 961 static int __lpc_handle_recv(struct net_device *ndev, int budget) ··· 1320 1320 .ndo_set_rx_mode = lpc_eth_set_multicast_list, 1321 1321 .ndo_do_ioctl = lpc_eth_ioctl, 1322 1322 .ndo_set_mac_address = lpc_set_mac_address, 1323 + .ndo_change_mtu = eth_change_mtu, 1323 1324 }; 1324 1325 1325 1326 static int lpc_eth_drv_probe(struct platform_device *pdev)
+1 -5
drivers/net/ethernet/realtek/r8169.c
··· 5889 5889 if (status & LinkChg) 5890 5890 __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true); 5891 5891 5892 - napi_disable(&tp->napi); 5893 - rtl_irq_disable(tp); 5894 - 5895 - napi_enable(&tp->napi); 5896 - napi_schedule(&tp->napi); 5892 + rtl_irq_enable_all(tp); 5897 5893 } 5898 5894 5899 5895 static void rtl_task(struct work_struct *work)
+2 -2
drivers/net/ethernet/stmicro/stmmac/Kconfig
··· 13 13 if STMMAC_ETH 14 14 15 15 config STMMAC_PLATFORM 16 - tristate "STMMAC platform bus support" 16 + bool "STMMAC Platform bus support" 17 17 depends on STMMAC_ETH 18 18 default y 19 19 ---help--- ··· 26 26 If unsure, say N. 27 27 28 28 config STMMAC_PCI 29 - tristate "STMMAC support on PCI bus (EXPERIMENTAL)" 29 + bool "STMMAC PCI bus support (EXPERIMENTAL)" 30 30 depends on STMMAC_ETH && PCI && EXPERIMENTAL 31 31 ---help--- 32 32 This is to select the Synopsys DWMAC available on PCI devices,
+60 -3
drivers/net/ethernet/stmicro/stmmac/stmmac.h
··· 26 26 #include <linux/clk.h> 27 27 #include <linux/stmmac.h> 28 28 #include <linux/phy.h> 29 + #include <linux/pci.h> 29 30 #include "common.h" 30 31 #ifdef CONFIG_STMMAC_TIMER 31 32 #include "stmmac_timer.h" ··· 96 95 extern void stmmac_set_ethtool_ops(struct net_device *netdev); 97 96 extern const struct stmmac_desc_ops enh_desc_ops; 98 97 extern const struct stmmac_desc_ops ndesc_ops; 99 - 100 98 int stmmac_freeze(struct net_device *ndev); 101 99 int stmmac_restore(struct net_device *ndev); 102 100 int stmmac_resume(struct net_device *ndev); ··· 109 109 static inline int stmmac_clk_enable(struct stmmac_priv *priv) 110 110 { 111 111 if (!IS_ERR(priv->stmmac_clk)) 112 - return clk_enable(priv->stmmac_clk); 112 + return clk_prepare_enable(priv->stmmac_clk); 113 113 114 114 return 0; 115 115 } ··· 119 119 if (IS_ERR(priv->stmmac_clk)) 120 120 return; 121 121 122 - clk_disable(priv->stmmac_clk); 122 + clk_disable_unprepare(priv->stmmac_clk); 123 123 } 124 124 static inline int stmmac_clk_get(struct stmmac_priv *priv) 125 125 { ··· 143 143 return 0; 144 144 } 145 145 #endif /* CONFIG_HAVE_CLK */ 146 + 147 + 148 + #ifdef CONFIG_STMMAC_PLATFORM 149 + extern struct platform_driver stmmac_pltfr_driver; 150 + static inline int stmmac_register_platform(void) 151 + { 152 + int err; 153 + 154 + err = platform_driver_register(&stmmac_pltfr_driver); 155 + if (err) 156 + pr_err("stmmac: failed to register the platform driver\n"); 157 + 158 + return err; 159 + } 160 + static inline void stmmac_unregister_platform(void) 161 + { 162 + platform_driver_register(&stmmac_pltfr_driver); 163 + } 164 + #else 165 + static inline int stmmac_register_platform(void) 166 + { 167 + pr_debug("stmmac: do not register the platf driver\n"); 168 + 169 + return -EINVAL; 170 + } 171 + static inline void stmmac_unregister_platform(void) 172 + { 173 + } 174 + #endif /* CONFIG_STMMAC_PLATFORM */ 175 + 176 + #ifdef CONFIG_STMMAC_PCI 177 + extern struct pci_driver stmmac_pci_driver; 178 + static inline int stmmac_register_pci(void) 179 + { 180 + int err; 181 + 182 + err = pci_register_driver(&stmmac_pci_driver); 183 + if (err) 184 + pr_err("stmmac: failed to register the PCI driver\n"); 185 + 186 + return err; 187 + } 188 + static inline void stmmac_unregister_pci(void) 189 + { 190 + pci_unregister_driver(&stmmac_pci_driver); 191 + } 192 + #else 193 + static inline int stmmac_register_pci(void) 194 + { 195 + pr_debug("stmmac: do not register the PCI driver\n"); 196 + 197 + return -EINVAL; 198 + } 199 + static inline void stmmac_unregister_pci(void) 200 + { 201 + } 202 + #endif /* CONFIG_STMMAC_PCI */
+33 -2
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 833 833 834 834 /** 835 835 * stmmac_selec_desc_mode 836 - * @dev : device pointer 837 - * Description: select the Enhanced/Alternate or Normal descriptors */ 836 + * @priv : private structure 837 + * Description: select the Enhanced/Alternate or Normal descriptors 838 + */ 838 839 static void stmmac_selec_desc_mode(struct stmmac_priv *priv) 839 840 { 840 841 if (priv->plat->enh_desc) { ··· 1862 1861 /** 1863 1862 * stmmac_dvr_probe 1864 1863 * @device: device pointer 1864 + * @plat_dat: platform data pointer 1865 + * @addr: iobase memory address 1865 1866 * Description: this is the main probe function used to 1866 1867 * call the alloc_etherdev, allocate the priv structure. 1867 1868 */ ··· 2092 2089 return stmmac_open(ndev); 2093 2090 } 2094 2091 #endif /* CONFIG_PM */ 2092 + 2093 + /* Driver can be configured w/ and w/ both PCI and Platf drivers 2094 + * depending on the configuration selected. 2095 + */ 2096 + static int __init stmmac_init(void) 2097 + { 2098 + int err_plt = 0; 2099 + int err_pci = 0; 2100 + 2101 + err_plt = stmmac_register_platform(); 2102 + err_pci = stmmac_register_pci(); 2103 + 2104 + if ((err_pci) && (err_plt)) { 2105 + pr_err("stmmac: driver registration failed\n"); 2106 + return -EINVAL; 2107 + } 2108 + 2109 + return 0; 2110 + } 2111 + 2112 + static void __exit stmmac_exit(void) 2113 + { 2114 + stmmac_unregister_platform(); 2115 + stmmac_unregister_pci(); 2116 + } 2117 + 2118 + module_init(stmmac_init); 2119 + module_exit(stmmac_exit); 2095 2120 2096 2121 #ifndef MODULE 2097 2122 static int __init stmmac_cmdline_opt(char *str)
+1 -28
drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
··· 179 179 180 180 MODULE_DEVICE_TABLE(pci, stmmac_id_table); 181 181 182 - static struct pci_driver stmmac_driver = { 182 + struct pci_driver stmmac_pci_driver = { 183 183 .name = STMMAC_RESOURCE_NAME, 184 184 .id_table = stmmac_id_table, 185 185 .probe = stmmac_pci_probe, ··· 189 189 .resume = stmmac_pci_resume, 190 190 #endif 191 191 }; 192 - 193 - /** 194 - * stmmac_init_module - Entry point for the driver 195 - * Description: This function is the entry point for the driver. 196 - */ 197 - static int __init stmmac_init_module(void) 198 - { 199 - int ret; 200 - 201 - ret = pci_register_driver(&stmmac_driver); 202 - if (ret < 0) 203 - pr_err("%s: ERROR: driver registration failed\n", __func__); 204 - 205 - return ret; 206 - } 207 - 208 - /** 209 - * stmmac_cleanup_module - Cleanup routine for the driver 210 - * Description: This function is the cleanup routine for the driver. 211 - */ 212 - static void __exit stmmac_cleanup_module(void) 213 - { 214 - pci_unregister_driver(&stmmac_driver); 215 - } 216 - 217 - module_init(stmmac_init_module); 218 - module_exit(stmmac_cleanup_module); 219 192 220 193 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver"); 221 194 MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>");
+1 -3
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
··· 255 255 }; 256 256 MODULE_DEVICE_TABLE(of, stmmac_dt_ids); 257 257 258 - static struct platform_driver stmmac_driver = { 258 + struct platform_driver stmmac_pltfr_driver = { 259 259 .probe = stmmac_pltfr_probe, 260 260 .remove = stmmac_pltfr_remove, 261 261 .driver = { ··· 265 265 .of_match_table = of_match_ptr(stmmac_dt_ids), 266 266 }, 267 267 }; 268 - 269 - module_platform_driver(stmmac_driver); 270 268 271 269 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver"); 272 270 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+1 -11
drivers/net/ethernet/sun/niu.c
··· 3598 3598 static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) 3599 3599 { 3600 3600 struct netdev_queue *txq; 3601 - unsigned int tx_bytes; 3602 3601 u16 pkt_cnt, tmp; 3603 3602 int cons, index; 3604 3603 u64 cs; ··· 3620 3621 netif_printk(np, tx_done, KERN_DEBUG, np->dev, 3621 3622 "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons); 3622 3623 3623 - tx_bytes = 0; 3624 - tmp = pkt_cnt; 3625 - while (tmp--) { 3626 - tx_bytes += rp->tx_buffs[cons].skb->len; 3624 + while (pkt_cnt--) 3627 3625 cons = release_tx_packet(np, rp, cons); 3628 - } 3629 3626 3630 3627 rp->cons = cons; 3631 3628 smp_mb(); 3632 - 3633 - netdev_tx_completed_queue(txq, pkt_cnt, tx_bytes); 3634 3629 3635 3630 out: 3636 3631 if (unlikely(netif_tx_queue_stopped(txq) && ··· 4326 4333 struct tx_ring_info *rp = &np->tx_rings[i]; 4327 4334 4328 4335 niu_free_tx_ring_info(np, rp); 4329 - netdev_tx_reset_queue(netdev_get_tx_queue(np->dev, i)); 4330 4336 } 4331 4337 kfree(np->tx_rings); 4332 4338 np->tx_rings = NULL; ··· 6730 6738 6731 6739 prod = NEXT_TX(rp, prod); 6732 6740 } 6733 - 6734 - netdev_tx_sent_queue(txq, skb->len); 6735 6741 6736 6742 if (prod < rp->prod) 6737 6743 rp->wrap_bit ^= TX_RING_KICK_WRAP;
+2
drivers/net/ethernet/tile/Kconfig
··· 7 7 depends on TILE 8 8 default y 9 9 select CRC32 10 + select TILE_GXIO_MPIPE if TILEGX 11 + select HIGH_RES_TIMERS if TILEGX 10 12 ---help--- 11 13 This is a standard Linux network device driver for the 12 14 on-chip Tilera Gigabit Ethernet and XAUI interfaces.
+2 -2
drivers/net/ethernet/tile/Makefile
··· 4 4 5 5 obj-$(CONFIG_TILE_NET) += tile_net.o 6 6 ifdef CONFIG_TILEGX 7 - tile_net-objs := tilegx.o mpipe.o iorpc_mpipe.o dma_queue.o 7 + tile_net-y := tilegx.o 8 8 else 9 - tile_net-objs := tilepro.o 9 + tile_net-y := tilepro.o 10 10 endif
+1898
drivers/net/ethernet/tile/tilegx.c
··· 1 + /* 2 + * Copyright 2012 Tilera Corporation. All Rights Reserved. 3 + * 4 + * This program is free software; you can redistribute it and/or 5 + * modify it under the terms of the GNU General Public License 6 + * as published by the Free Software Foundation, version 2. 7 + * 8 + * This program is distributed in the hope that it will be useful, but 9 + * WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 + * NON INFRINGEMENT. See the GNU General Public License for 12 + * more details. 13 + */ 14 + 15 + #include <linux/module.h> 16 + #include <linux/init.h> 17 + #include <linux/moduleparam.h> 18 + #include <linux/sched.h> 19 + #include <linux/kernel.h> /* printk() */ 20 + #include <linux/slab.h> /* kmalloc() */ 21 + #include <linux/errno.h> /* error codes */ 22 + #include <linux/types.h> /* size_t */ 23 + #include <linux/interrupt.h> 24 + #include <linux/in.h> 25 + #include <linux/irq.h> 26 + #include <linux/netdevice.h> /* struct device, and other headers */ 27 + #include <linux/etherdevice.h> /* eth_type_trans */ 28 + #include <linux/skbuff.h> 29 + #include <linux/ioctl.h> 30 + #include <linux/cdev.h> 31 + #include <linux/hugetlb.h> 32 + #include <linux/in6.h> 33 + #include <linux/timer.h> 34 + #include <linux/hrtimer.h> 35 + #include <linux/ktime.h> 36 + #include <linux/io.h> 37 + #include <linux/ctype.h> 38 + #include <linux/ip.h> 39 + #include <linux/tcp.h> 40 + 41 + #include <asm/checksum.h> 42 + #include <asm/homecache.h> 43 + #include <gxio/mpipe.h> 44 + #include <arch/sim.h> 45 + 46 + /* Default transmit lockup timeout period, in jiffies. */ 47 + #define TILE_NET_TIMEOUT (5 * HZ) 48 + 49 + /* The maximum number of distinct channels (idesc.channel is 5 bits). */ 50 + #define TILE_NET_CHANNELS 32 51 + 52 + /* Maximum number of idescs to handle per "poll". */ 53 + #define TILE_NET_BATCH 128 54 + 55 + /* Maximum number of packets to handle per "poll". */ 56 + #define TILE_NET_WEIGHT 64 57 + 58 + /* Number of entries in each iqueue. */ 59 + #define IQUEUE_ENTRIES 512 60 + 61 + /* Number of entries in each equeue. */ 62 + #define EQUEUE_ENTRIES 2048 63 + 64 + /* Total header bytes per equeue slot. Must be big enough for 2 bytes 65 + * of NET_IP_ALIGN alignment, plus 14 bytes (?) of L2 header, plus up to 66 + * 60 bytes of actual TCP header. We round up to align to cache lines. 67 + */ 68 + #define HEADER_BYTES 128 69 + 70 + /* Maximum completions per cpu per device (must be a power of two). 71 + * ISSUE: What is the right number here? If this is too small, then 72 + * egress might block waiting for free space in a completions array. 73 + * ISSUE: At the least, allocate these only for initialized echannels. 74 + */ 75 + #define TILE_NET_MAX_COMPS 64 76 + 77 + #define MAX_FRAGS (MAX_SKB_FRAGS + 1) 78 + 79 + /* Size of completions data to allocate. 80 + * ISSUE: Probably more than needed since we don't use all the channels. 81 + */ 82 + #define COMPS_SIZE (TILE_NET_CHANNELS * sizeof(struct tile_net_comps)) 83 + 84 + /* Size of NotifRing data to allocate. */ 85 + #define NOTIF_RING_SIZE (IQUEUE_ENTRIES * sizeof(gxio_mpipe_idesc_t)) 86 + 87 + /* Timeout to wake the per-device TX timer after we stop the queue. 88 + * We don't want the timeout too short (adds overhead, and might end 89 + * up causing stop/wake/stop/wake cycles) or too long (affects performance). 90 + * For the 10 Gb NIC, 30 usec means roughly 30+ 1500-byte packets. 91 + */ 92 + #define TX_TIMER_DELAY_USEC 30 93 + 94 + /* Timeout to wake the per-cpu egress timer to free completions. */ 95 + #define EGRESS_TIMER_DELAY_USEC 1000 96 + 97 + MODULE_AUTHOR("Tilera Corporation"); 98 + MODULE_LICENSE("GPL"); 99 + 100 + /* A "packet fragment" (a chunk of memory). */ 101 + struct frag { 102 + void *buf; 103 + size_t length; 104 + }; 105 + 106 + /* A single completion. */ 107 + struct tile_net_comp { 108 + /* The "complete_count" when the completion will be complete. */ 109 + s64 when; 110 + /* The buffer to be freed when the completion is complete. */ 111 + struct sk_buff *skb; 112 + }; 113 + 114 + /* The completions for a given cpu and echannel. */ 115 + struct tile_net_comps { 116 + /* The completions. */ 117 + struct tile_net_comp comp_queue[TILE_NET_MAX_COMPS]; 118 + /* The number of completions used. */ 119 + unsigned long comp_next; 120 + /* The number of completions freed. */ 121 + unsigned long comp_last; 122 + }; 123 + 124 + /* The transmit wake timer for a given cpu and echannel. */ 125 + struct tile_net_tx_wake { 126 + struct hrtimer timer; 127 + struct net_device *dev; 128 + }; 129 + 130 + /* Info for a specific cpu. */ 131 + struct tile_net_info { 132 + /* The NAPI struct. */ 133 + struct napi_struct napi; 134 + /* Packet queue. */ 135 + gxio_mpipe_iqueue_t iqueue; 136 + /* Our cpu. */ 137 + int my_cpu; 138 + /* True if iqueue is valid. */ 139 + bool has_iqueue; 140 + /* NAPI flags. */ 141 + bool napi_added; 142 + bool napi_enabled; 143 + /* Number of small sk_buffs which must still be provided. */ 144 + unsigned int num_needed_small_buffers; 145 + /* Number of large sk_buffs which must still be provided. */ 146 + unsigned int num_needed_large_buffers; 147 + /* A timer for handling egress completions. */ 148 + struct hrtimer egress_timer; 149 + /* True if "egress_timer" is scheduled. */ 150 + bool egress_timer_scheduled; 151 + /* Comps for each egress channel. */ 152 + struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS]; 153 + /* Transmit wake timer for each egress channel. */ 154 + struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS]; 155 + }; 156 + 157 + /* Info for egress on a particular egress channel. */ 158 + struct tile_net_egress { 159 + /* The "equeue". */ 160 + gxio_mpipe_equeue_t *equeue; 161 + /* The headers for TSO. */ 162 + unsigned char *headers; 163 + }; 164 + 165 + /* Info for a specific device. */ 166 + struct tile_net_priv { 167 + /* Our network device. */ 168 + struct net_device *dev; 169 + /* The primary link. */ 170 + gxio_mpipe_link_t link; 171 + /* The primary channel, if open, else -1. */ 172 + int channel; 173 + /* The "loopify" egress link, if needed. */ 174 + gxio_mpipe_link_t loopify_link; 175 + /* The "loopify" egress channel, if open, else -1. */ 176 + int loopify_channel; 177 + /* The egress channel (channel or loopify_channel). */ 178 + int echannel; 179 + /* Total stats. */ 180 + struct net_device_stats stats; 181 + }; 182 + 183 + /* Egress info, indexed by "priv->echannel" (lazily created as needed). */ 184 + static struct tile_net_egress egress_for_echannel[TILE_NET_CHANNELS]; 185 + 186 + /* Devices currently associated with each channel. 187 + * NOTE: The array entry can become NULL after ifconfig down, but 188 + * we do not free the underlying net_device structures, so it is 189 + * safe to use a pointer after reading it from this array. 190 + */ 191 + static struct net_device *tile_net_devs_for_channel[TILE_NET_CHANNELS]; 192 + 193 + /* A mutex for "tile_net_devs_for_channel". */ 194 + static DEFINE_MUTEX(tile_net_devs_for_channel_mutex); 195 + 196 + /* The per-cpu info. */ 197 + static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info); 198 + 199 + /* The "context" for all devices. */ 200 + static gxio_mpipe_context_t context; 201 + 202 + /* Buffer sizes and mpipe enum codes for buffer stacks. 203 + * See arch/tile/include/gxio/mpipe.h for the set of possible values. 204 + */ 205 + #define BUFFER_SIZE_SMALL_ENUM GXIO_MPIPE_BUFFER_SIZE_128 206 + #define BUFFER_SIZE_SMALL 128 207 + #define BUFFER_SIZE_LARGE_ENUM GXIO_MPIPE_BUFFER_SIZE_1664 208 + #define BUFFER_SIZE_LARGE 1664 209 + 210 + /* The small/large "buffer stacks". */ 211 + static int small_buffer_stack = -1; 212 + static int large_buffer_stack = -1; 213 + 214 + /* Amount of memory allocated for each buffer stack. */ 215 + static size_t buffer_stack_size; 216 + 217 + /* The actual memory allocated for the buffer stacks. */ 218 + static void *small_buffer_stack_va; 219 + static void *large_buffer_stack_va; 220 + 221 + /* The buckets. */ 222 + static int first_bucket = -1; 223 + static int num_buckets = 1; 224 + 225 + /* The ingress irq. */ 226 + static int ingress_irq = -1; 227 + 228 + /* Text value of tile_net.cpus if passed as a module parameter. */ 229 + static char *network_cpus_string; 230 + 231 + /* The actual cpus in "network_cpus". */ 232 + static struct cpumask network_cpus_map; 233 + 234 + /* If "loopify=LINK" was specified, this is "LINK". */ 235 + static char *loopify_link_name; 236 + 237 + /* If "tile_net.custom" was specified, this is non-NULL. */ 238 + static char *custom_str; 239 + 240 + /* The "tile_net.cpus" argument specifies the cpus that are dedicated 241 + * to handle ingress packets. 242 + * 243 + * The parameter should be in the form "tile_net.cpus=m-n[,x-y]", where 244 + * m, n, x, y are integer numbers that represent the cpus that can be 245 + * neither a dedicated cpu nor a dataplane cpu. 246 + */ 247 + static bool network_cpus_init(void) 248 + { 249 + char buf[1024]; 250 + int rc; 251 + 252 + if (network_cpus_string == NULL) 253 + return false; 254 + 255 + rc = cpulist_parse_crop(network_cpus_string, &network_cpus_map); 256 + if (rc != 0) { 257 + pr_warn("tile_net.cpus=%s: malformed cpu list\n", 258 + network_cpus_string); 259 + return false; 260 + } 261 + 262 + /* Remove dedicated cpus. */ 263 + cpumask_and(&network_cpus_map, &network_cpus_map, cpu_possible_mask); 264 + 265 + if (cpumask_empty(&network_cpus_map)) { 266 + pr_warn("Ignoring empty tile_net.cpus='%s'.\n", 267 + network_cpus_string); 268 + return false; 269 + } 270 + 271 + cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map); 272 + pr_info("Linux network CPUs: %s\n", buf); 273 + return true; 274 + } 275 + 276 + module_param_named(cpus, network_cpus_string, charp, 0444); 277 + MODULE_PARM_DESC(cpus, "cpulist of cores that handle network interrupts"); 278 + 279 + /* The "tile_net.loopify=LINK" argument causes the named device to 280 + * actually use "loop0" for ingress, and "loop1" for egress. This 281 + * allows an app to sit between the actual link and linux, passing 282 + * (some) packets along to linux, and forwarding (some) packets sent 283 + * out by linux. 284 + */ 285 + module_param_named(loopify, loopify_link_name, charp, 0444); 286 + MODULE_PARM_DESC(loopify, "name the device to use loop0/1 for ingress/egress"); 287 + 288 + /* The "tile_net.custom" argument causes us to ignore the "conventional" 289 + * classifier metadata, in particular, the "l2_offset". 290 + */ 291 + module_param_named(custom, custom_str, charp, 0444); 292 + MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier"); 293 + 294 + /* Atomically update a statistics field. 295 + * Note that on TILE-Gx, this operation is fire-and-forget on the 296 + * issuing core (single-cycle dispatch) and takes only a few cycles 297 + * longer than a regular store when the request reaches the home cache. 298 + * No expensive bus management overhead is required. 299 + */ 300 + static void tile_net_stats_add(unsigned long value, unsigned long *field) 301 + { 302 + BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(unsigned long)); 303 + atomic_long_add(value, (atomic_long_t *)field); 304 + } 305 + 306 + /* Allocate and push a buffer. */ 307 + static bool tile_net_provide_buffer(bool small) 308 + { 309 + int stack = small ? small_buffer_stack : large_buffer_stack; 310 + const unsigned long buffer_alignment = 128; 311 + struct sk_buff *skb; 312 + int len; 313 + 314 + len = sizeof(struct sk_buff **) + buffer_alignment; 315 + len += (small ? BUFFER_SIZE_SMALL : BUFFER_SIZE_LARGE); 316 + skb = dev_alloc_skb(len); 317 + if (skb == NULL) 318 + return false; 319 + 320 + /* Make room for a back-pointer to 'skb' and guarantee alignment. */ 321 + skb_reserve(skb, sizeof(struct sk_buff **)); 322 + skb_reserve(skb, -(long)skb->data & (buffer_alignment - 1)); 323 + 324 + /* Save a back-pointer to 'skb'. */ 325 + *(struct sk_buff **)(skb->data - sizeof(struct sk_buff **)) = skb; 326 + 327 + /* Make sure "skb" and the back-pointer have been flushed. */ 328 + wmb(); 329 + 330 + gxio_mpipe_push_buffer(&context, stack, 331 + (void *)va_to_tile_io_addr(skb->data)); 332 + 333 + return true; 334 + } 335 + 336 + /* Convert a raw mpipe buffer to its matching skb pointer. */ 337 + static struct sk_buff *mpipe_buf_to_skb(void *va) 338 + { 339 + /* Acquire the associated "skb". */ 340 + struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); 341 + struct sk_buff *skb = *skb_ptr; 342 + 343 + /* Paranoia. */ 344 + if (skb->data != va) { 345 + /* Panic here since there's a reasonable chance 346 + * that corrupt buffers means generic memory 347 + * corruption, with unpredictable system effects. 348 + */ 349 + panic("Corrupt linux buffer! va=%p, skb=%p, skb->data=%p", 350 + va, skb, skb->data); 351 + } 352 + 353 + return skb; 354 + } 355 + 356 + static void tile_net_pop_all_buffers(int stack) 357 + { 358 + for (;;) { 359 + tile_io_addr_t addr = 360 + (tile_io_addr_t)gxio_mpipe_pop_buffer(&context, stack); 361 + if (addr == 0) 362 + break; 363 + dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr))); 364 + } 365 + } 366 + 367 + /* Provide linux buffers to mPIPE. */ 368 + static void tile_net_provide_needed_buffers(void) 369 + { 370 + struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 371 + 372 + while (info->num_needed_small_buffers != 0) { 373 + if (!tile_net_provide_buffer(true)) 374 + goto oops; 375 + info->num_needed_small_buffers--; 376 + } 377 + 378 + while (info->num_needed_large_buffers != 0) { 379 + if (!tile_net_provide_buffer(false)) 380 + goto oops; 381 + info->num_needed_large_buffers--; 382 + } 383 + 384 + return; 385 + 386 + oops: 387 + /* Add a description to the page allocation failure dump. */ 388 + pr_notice("Tile %d still needs some buffers\n", info->my_cpu); 389 + } 390 + 391 + static inline bool filter_packet(struct net_device *dev, void *buf) 392 + { 393 + /* Filter packets received before we're up. */ 394 + if (dev == NULL || !(dev->flags & IFF_UP)) 395 + return true; 396 + 397 + /* Filter out packets that aren't for us. */ 398 + if (!(dev->flags & IFF_PROMISC) && 399 + !is_multicast_ether_addr(buf) && 400 + compare_ether_addr(dev->dev_addr, buf) != 0) 401 + return true; 402 + 403 + return false; 404 + } 405 + 406 + static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb, 407 + gxio_mpipe_idesc_t *idesc, unsigned long len) 408 + { 409 + struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 410 + struct tile_net_priv *priv = netdev_priv(dev); 411 + 412 + /* Encode the actual packet length. */ 413 + skb_put(skb, len); 414 + 415 + skb->protocol = eth_type_trans(skb, dev); 416 + 417 + /* Acknowledge "good" hardware checksums. */ 418 + if (idesc->cs && idesc->csum_seed_val == 0xFFFF) 419 + skb->ip_summed = CHECKSUM_UNNECESSARY; 420 + 421 + netif_receive_skb(skb); 422 + 423 + /* Update stats. */ 424 + tile_net_stats_add(1, &priv->stats.rx_packets); 425 + tile_net_stats_add(len, &priv->stats.rx_bytes); 426 + 427 + /* Need a new buffer. */ 428 + if (idesc->size == BUFFER_SIZE_SMALL_ENUM) 429 + info->num_needed_small_buffers++; 430 + else 431 + info->num_needed_large_buffers++; 432 + } 433 + 434 + /* Handle a packet. Return true if "processed", false if "filtered". */ 435 + static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc) 436 + { 437 + struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 438 + struct net_device *dev = tile_net_devs_for_channel[idesc->channel]; 439 + uint8_t l2_offset; 440 + void *va; 441 + void *buf; 442 + unsigned long len; 443 + bool filter; 444 + 445 + /* Drop packets for which no buffer was available. 446 + * NOTE: This happens under heavy load. 447 + */ 448 + if (idesc->be) { 449 + struct tile_net_priv *priv = netdev_priv(dev); 450 + tile_net_stats_add(1, &priv->stats.rx_dropped); 451 + gxio_mpipe_iqueue_consume(&info->iqueue, idesc); 452 + if (net_ratelimit()) 453 + pr_info("Dropping packet (insufficient buffers).\n"); 454 + return false; 455 + } 456 + 457 + /* Get the "l2_offset", if allowed. */ 458 + l2_offset = custom_str ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc); 459 + 460 + /* Get the raw buffer VA (includes "headroom"). */ 461 + va = tile_io_addr_to_va((unsigned long)(long)idesc->va); 462 + 463 + /* Get the actual packet start/length. */ 464 + buf = va + l2_offset; 465 + len = idesc->l2_size - l2_offset; 466 + 467 + /* Point "va" at the raw buffer. */ 468 + va -= NET_IP_ALIGN; 469 + 470 + filter = filter_packet(dev, buf); 471 + if (filter) { 472 + gxio_mpipe_iqueue_drop(&info->iqueue, idesc); 473 + } else { 474 + struct sk_buff *skb = mpipe_buf_to_skb(va); 475 + 476 + /* Skip headroom, and any custom header. */ 477 + skb_reserve(skb, NET_IP_ALIGN + l2_offset); 478 + 479 + tile_net_receive_skb(dev, skb, idesc, len); 480 + } 481 + 482 + gxio_mpipe_iqueue_consume(&info->iqueue, idesc); 483 + return !filter; 484 + } 485 + 486 + /* Handle some packets for the current CPU. 487 + * 488 + * This function handles up to TILE_NET_BATCH idescs per call. 489 + * 490 + * ISSUE: Since we do not provide new buffers until this function is 491 + * complete, we must initially provide enough buffers for each network 492 + * cpu to fill its iqueue and also its batched idescs. 493 + * 494 + * ISSUE: The "rotting packet" race condition occurs if a packet 495 + * arrives after the queue appears to be empty, and before the 496 + * hypervisor interrupt is re-enabled. 497 + */ 498 + static int tile_net_poll(struct napi_struct *napi, int budget) 499 + { 500 + struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 501 + unsigned int work = 0; 502 + gxio_mpipe_idesc_t *idesc; 503 + int i, n; 504 + 505 + /* Process packets. */ 506 + while ((n = gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc)) > 0) { 507 + for (i = 0; i < n; i++) { 508 + if (i == TILE_NET_BATCH) 509 + goto done; 510 + if (tile_net_handle_packet(idesc + i)) { 511 + if (++work >= budget) 512 + goto done; 513 + } 514 + } 515 + } 516 + 517 + /* There are no packets left. */ 518 + napi_complete(&info->napi); 519 + 520 + /* Re-enable hypervisor interrupts. */ 521 + gxio_mpipe_enable_notif_ring_interrupt(&context, info->iqueue.ring); 522 + 523 + /* HACK: Avoid the "rotting packet" problem. */ 524 + if (gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc) > 0) 525 + napi_schedule(&info->napi); 526 + 527 + /* ISSUE: Handle completions? */ 528 + 529 + done: 530 + tile_net_provide_needed_buffers(); 531 + 532 + return work; 533 + } 534 + 535 + /* Handle an ingress interrupt on the current cpu. */ 536 + static irqreturn_t tile_net_handle_ingress_irq(int irq, void *unused) 537 + { 538 + struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 539 + napi_schedule(&info->napi); 540 + return IRQ_HANDLED; 541 + } 542 + 543 + /* Free some completions. This must be called with interrupts blocked. */ 544 + static int tile_net_free_comps(gxio_mpipe_equeue_t *equeue, 545 + struct tile_net_comps *comps, 546 + int limit, bool force_update) 547 + { 548 + int n = 0; 549 + while (comps->comp_last < comps->comp_next) { 550 + unsigned int cid = comps->comp_last % TILE_NET_MAX_COMPS; 551 + struct tile_net_comp *comp = &comps->comp_queue[cid]; 552 + if (!gxio_mpipe_equeue_is_complete(equeue, comp->when, 553 + force_update || n == 0)) 554 + break; 555 + dev_kfree_skb_irq(comp->skb); 556 + comps->comp_last++; 557 + if (++n == limit) 558 + break; 559 + } 560 + return n; 561 + } 562 + 563 + /* Add a completion. This must be called with interrupts blocked. 564 + * tile_net_equeue_try_reserve() will have ensured a free completion entry. 565 + */ 566 + static void add_comp(gxio_mpipe_equeue_t *equeue, 567 + struct tile_net_comps *comps, 568 + uint64_t when, struct sk_buff *skb) 569 + { 570 + int cid = comps->comp_next % TILE_NET_MAX_COMPS; 571 + comps->comp_queue[cid].when = when; 572 + comps->comp_queue[cid].skb = skb; 573 + comps->comp_next++; 574 + } 575 + 576 + static void tile_net_schedule_tx_wake_timer(struct net_device *dev) 577 + { 578 + struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 579 + struct tile_net_priv *priv = netdev_priv(dev); 580 + 581 + hrtimer_start(&info->tx_wake[priv->echannel].timer, 582 + ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL), 583 + HRTIMER_MODE_REL_PINNED); 584 + } 585 + 586 + static enum hrtimer_restart tile_net_handle_tx_wake_timer(struct hrtimer *t) 587 + { 588 + struct tile_net_tx_wake *tx_wake = 589 + container_of(t, struct tile_net_tx_wake, timer); 590 + netif_wake_subqueue(tx_wake->dev, smp_processor_id()); 591 + return HRTIMER_NORESTART; 592 + } 593 + 594 + /* Make sure the egress timer is scheduled. */ 595 + static void tile_net_schedule_egress_timer(void) 596 + { 597 + struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 598 + 599 + if (!info->egress_timer_scheduled) { 600 + hrtimer_start(&info->egress_timer, 601 + ktime_set(0, EGRESS_TIMER_DELAY_USEC * 1000UL), 602 + HRTIMER_MODE_REL_PINNED); 603 + info->egress_timer_scheduled = true; 604 + } 605 + } 606 + 607 + /* The "function" for "info->egress_timer". 608 + * 609 + * This timer will reschedule itself as long as there are any pending 610 + * completions expected for this tile. 611 + */ 612 + static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t) 613 + { 614 + struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 615 + unsigned long irqflags; 616 + bool pending = false; 617 + int i; 618 + 619 + local_irq_save(irqflags); 620 + 621 + /* The timer is no longer scheduled. */ 622 + info->egress_timer_scheduled = false; 623 + 624 + /* Free all possible comps for this tile. */ 625 + for (i = 0; i < TILE_NET_CHANNELS; i++) { 626 + struct tile_net_egress *egress = &egress_for_echannel[i]; 627 + struct tile_net_comps *comps = info->comps_for_echannel[i]; 628 + if (comps->comp_last >= comps->comp_next) 629 + continue; 630 + tile_net_free_comps(egress->equeue, comps, -1, true); 631 + pending = pending || (comps->comp_last < comps->comp_next); 632 + } 633 + 634 + /* Reschedule timer if needed. */ 635 + if (pending) 636 + tile_net_schedule_egress_timer(); 637 + 638 + local_irq_restore(irqflags); 639 + 640 + return HRTIMER_NORESTART; 641 + } 642 + 643 + /* Helper function for "tile_net_update()". 644 + * "dev" (i.e. arg) is the device being brought up or down, 645 + * or NULL if all devices are now down. 646 + */ 647 + static void tile_net_update_cpu(void *arg) 648 + { 649 + struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 650 + struct net_device *dev = arg; 651 + 652 + if (!info->has_iqueue) 653 + return; 654 + 655 + if (dev != NULL) { 656 + if (!info->napi_added) { 657 + netif_napi_add(dev, &info->napi, 658 + tile_net_poll, TILE_NET_WEIGHT); 659 + info->napi_added = true; 660 + } 661 + if (!info->napi_enabled) { 662 + napi_enable(&info->napi); 663 + info->napi_enabled = true; 664 + } 665 + enable_percpu_irq(ingress_irq, 0); 666 + } else { 667 + disable_percpu_irq(ingress_irq); 668 + if (info->napi_enabled) { 669 + napi_disable(&info->napi); 670 + info->napi_enabled = false; 671 + } 672 + /* FIXME: Drain the iqueue. */ 673 + } 674 + } 675 + 676 + /* Helper function for tile_net_open() and tile_net_stop(). 677 + * Always called under tile_net_devs_for_channel_mutex. 678 + */ 679 + static int tile_net_update(struct net_device *dev) 680 + { 681 + static gxio_mpipe_rules_t rules; /* too big to fit on the stack */ 682 + bool saw_channel = false; 683 + int channel; 684 + int rc; 685 + int cpu; 686 + 687 + gxio_mpipe_rules_init(&rules, &context); 688 + 689 + for (channel = 0; channel < TILE_NET_CHANNELS; channel++) { 690 + if (tile_net_devs_for_channel[channel] == NULL) 691 + continue; 692 + if (!saw_channel) { 693 + saw_channel = true; 694 + gxio_mpipe_rules_begin(&rules, first_bucket, 695 + num_buckets, NULL); 696 + gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN); 697 + } 698 + gxio_mpipe_rules_add_channel(&rules, channel); 699 + } 700 + 701 + /* NOTE: This can fail if there is no classifier. 702 + * ISSUE: Can anything else cause it to fail? 703 + */ 704 + rc = gxio_mpipe_rules_commit(&rules); 705 + if (rc != 0) { 706 + netdev_warn(dev, "gxio_mpipe_rules_commit failed: %d\n", rc); 707 + return -EIO; 708 + } 709 + 710 + /* Update all cpus, sequentially (to protect "netif_napi_add()"). */ 711 + for_each_online_cpu(cpu) 712 + smp_call_function_single(cpu, tile_net_update_cpu, 713 + (saw_channel ? dev : NULL), 1); 714 + 715 + /* HACK: Allow packets to flow in the simulator. */ 716 + if (saw_channel) 717 + sim_enable_mpipe_links(0, -1); 718 + 719 + return 0; 720 + } 721 + 722 + /* Allocate and initialize mpipe buffer stacks, and register them in 723 + * the mPIPE TLBs, for both small and large packet sizes. 724 + * This routine supports tile_net_init_mpipe(), below. 725 + */ 726 + static int init_buffer_stacks(struct net_device *dev, int num_buffers) 727 + { 728 + pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH); 729 + int rc; 730 + 731 + /* Compute stack bytes; we round up to 64KB and then use 732 + * alloc_pages() so we get the required 64KB alignment as well. 733 + */ 734 + buffer_stack_size = 735 + ALIGN(gxio_mpipe_calc_buffer_stack_bytes(num_buffers), 736 + 64 * 1024); 737 + 738 + /* Allocate two buffer stack indices. */ 739 + rc = gxio_mpipe_alloc_buffer_stacks(&context, 2, 0, 0); 740 + if (rc < 0) { 741 + netdev_err(dev, "gxio_mpipe_alloc_buffer_stacks failed: %d\n", 742 + rc); 743 + return rc; 744 + } 745 + small_buffer_stack = rc; 746 + large_buffer_stack = rc + 1; 747 + 748 + /* Allocate the small memory stack. */ 749 + small_buffer_stack_va = 750 + alloc_pages_exact(buffer_stack_size, GFP_KERNEL); 751 + if (small_buffer_stack_va == NULL) { 752 + netdev_err(dev, 753 + "Could not alloc %zd bytes for buffer stacks\n", 754 + buffer_stack_size); 755 + return -ENOMEM; 756 + } 757 + rc = gxio_mpipe_init_buffer_stack(&context, small_buffer_stack, 758 + BUFFER_SIZE_SMALL_ENUM, 759 + small_buffer_stack_va, 760 + buffer_stack_size, 0); 761 + if (rc != 0) { 762 + netdev_err(dev, "gxio_mpipe_init_buffer_stack: %d\n", rc); 763 + return rc; 764 + } 765 + rc = gxio_mpipe_register_client_memory(&context, small_buffer_stack, 766 + hash_pte, 0); 767 + if (rc != 0) { 768 + netdev_err(dev, 769 + "gxio_mpipe_register_buffer_memory failed: %d\n", 770 + rc); 771 + return rc; 772 + } 773 + 774 + /* Allocate the large buffer stack. */ 775 + large_buffer_stack_va = 776 + alloc_pages_exact(buffer_stack_size, GFP_KERNEL); 777 + if (large_buffer_stack_va == NULL) { 778 + netdev_err(dev, 779 + "Could not alloc %zd bytes for buffer stacks\n", 780 + buffer_stack_size); 781 + return -ENOMEM; 782 + } 783 + rc = gxio_mpipe_init_buffer_stack(&context, large_buffer_stack, 784 + BUFFER_SIZE_LARGE_ENUM, 785 + large_buffer_stack_va, 786 + buffer_stack_size, 0); 787 + if (rc != 0) { 788 + netdev_err(dev, "gxio_mpipe_init_buffer_stack failed: %d\n", 789 + rc); 790 + return rc; 791 + } 792 + rc = gxio_mpipe_register_client_memory(&context, large_buffer_stack, 793 + hash_pte, 0); 794 + if (rc != 0) { 795 + netdev_err(dev, 796 + "gxio_mpipe_register_buffer_memory failed: %d\n", 797 + rc); 798 + return rc; 799 + } 800 + 801 + return 0; 802 + } 803 + 804 + /* Allocate per-cpu resources (memory for completions and idescs). 805 + * This routine supports tile_net_init_mpipe(), below. 806 + */ 807 + static int alloc_percpu_mpipe_resources(struct net_device *dev, 808 + int cpu, int ring) 809 + { 810 + struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); 811 + int order, i, rc; 812 + struct page *page; 813 + void *addr; 814 + 815 + /* Allocate the "comps". */ 816 + order = get_order(COMPS_SIZE); 817 + page = homecache_alloc_pages(GFP_KERNEL, order, cpu); 818 + if (page == NULL) { 819 + netdev_err(dev, "Failed to alloc %zd bytes comps memory\n", 820 + COMPS_SIZE); 821 + return -ENOMEM; 822 + } 823 + addr = pfn_to_kaddr(page_to_pfn(page)); 824 + memset(addr, 0, COMPS_SIZE); 825 + for (i = 0; i < TILE_NET_CHANNELS; i++) 826 + info->comps_for_echannel[i] = 827 + addr + i * sizeof(struct tile_net_comps); 828 + 829 + /* If this is a network cpu, create an iqueue. */ 830 + if (cpu_isset(cpu, network_cpus_map)) { 831 + order = get_order(NOTIF_RING_SIZE); 832 + page = homecache_alloc_pages(GFP_KERNEL, order, cpu); 833 + if (page == NULL) { 834 + netdev_err(dev, 835 + "Failed to alloc %zd bytes iqueue memory\n", 836 + NOTIF_RING_SIZE); 837 + return -ENOMEM; 838 + } 839 + addr = pfn_to_kaddr(page_to_pfn(page)); 840 + rc = gxio_mpipe_iqueue_init(&info->iqueue, &context, ring++, 841 + addr, NOTIF_RING_SIZE, 0); 842 + if (rc < 0) { 843 + netdev_err(dev, 844 + "gxio_mpipe_iqueue_init failed: %d\n", rc); 845 + return rc; 846 + } 847 + info->has_iqueue = true; 848 + } 849 + 850 + return ring; 851 + } 852 + 853 + /* Initialize NotifGroup and buckets. 854 + * This routine supports tile_net_init_mpipe(), below. 855 + */ 856 + static int init_notif_group_and_buckets(struct net_device *dev, 857 + int ring, int network_cpus_count) 858 + { 859 + int group, rc; 860 + 861 + /* Allocate one NotifGroup. */ 862 + rc = gxio_mpipe_alloc_notif_groups(&context, 1, 0, 0); 863 + if (rc < 0) { 864 + netdev_err(dev, "gxio_mpipe_alloc_notif_groups failed: %d\n", 865 + rc); 866 + return rc; 867 + } 868 + group = rc; 869 + 870 + /* Initialize global num_buckets value. */ 871 + if (network_cpus_count > 4) 872 + num_buckets = 256; 873 + else if (network_cpus_count > 1) 874 + num_buckets = 16; 875 + 876 + /* Allocate some buckets, and set global first_bucket value. */ 877 + rc = gxio_mpipe_alloc_buckets(&context, num_buckets, 0, 0); 878 + if (rc < 0) { 879 + netdev_err(dev, "gxio_mpipe_alloc_buckets failed: %d\n", rc); 880 + return rc; 881 + } 882 + first_bucket = rc; 883 + 884 + /* Init group and buckets. */ 885 + rc = gxio_mpipe_init_notif_group_and_buckets( 886 + &context, group, ring, network_cpus_count, 887 + first_bucket, num_buckets, 888 + GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY); 889 + if (rc != 0) { 890 + netdev_err( 891 + dev, 892 + "gxio_mpipe_init_notif_group_and_buckets failed: %d\n", 893 + rc); 894 + return rc; 895 + } 896 + 897 + return 0; 898 + } 899 + 900 + /* Create an irq and register it, then activate the irq and request 901 + * interrupts on all cores. Note that "ingress_irq" being initialized 902 + * is how we know not to call tile_net_init_mpipe() again. 903 + * This routine supports tile_net_init_mpipe(), below. 904 + */ 905 + static int tile_net_setup_interrupts(struct net_device *dev) 906 + { 907 + int cpu, rc; 908 + 909 + rc = create_irq(); 910 + if (rc < 0) { 911 + netdev_err(dev, "create_irq failed: %d\n", rc); 912 + return rc; 913 + } 914 + ingress_irq = rc; 915 + tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU); 916 + rc = request_irq(ingress_irq, tile_net_handle_ingress_irq, 917 + 0, NULL, NULL); 918 + if (rc != 0) { 919 + netdev_err(dev, "request_irq failed: %d\n", rc); 920 + destroy_irq(ingress_irq); 921 + ingress_irq = -1; 922 + return rc; 923 + } 924 + 925 + for_each_online_cpu(cpu) { 926 + struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); 927 + if (info->has_iqueue) { 928 + gxio_mpipe_request_notif_ring_interrupt( 929 + &context, cpu_x(cpu), cpu_y(cpu), 930 + 1, ingress_irq, info->iqueue.ring); 931 + } 932 + } 933 + 934 + return 0; 935 + } 936 + 937 + /* Undo any state set up partially by a failed call to tile_net_init_mpipe. */ 938 + static void tile_net_init_mpipe_fail(void) 939 + { 940 + int cpu; 941 + 942 + /* Do cleanups that require the mpipe context first. */ 943 + if (small_buffer_stack >= 0) 944 + tile_net_pop_all_buffers(small_buffer_stack); 945 + if (large_buffer_stack >= 0) 946 + tile_net_pop_all_buffers(large_buffer_stack); 947 + 948 + /* Destroy mpipe context so the hardware no longer owns any memory. */ 949 + gxio_mpipe_destroy(&context); 950 + 951 + for_each_online_cpu(cpu) { 952 + struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); 953 + free_pages((unsigned long)(info->comps_for_echannel[0]), 954 + get_order(COMPS_SIZE)); 955 + info->comps_for_echannel[0] = NULL; 956 + free_pages((unsigned long)(info->iqueue.idescs), 957 + get_order(NOTIF_RING_SIZE)); 958 + info->iqueue.idescs = NULL; 959 + } 960 + 961 + if (small_buffer_stack_va) 962 + free_pages_exact(small_buffer_stack_va, buffer_stack_size); 963 + if (large_buffer_stack_va) 964 + free_pages_exact(large_buffer_stack_va, buffer_stack_size); 965 + 966 + small_buffer_stack_va = NULL; 967 + large_buffer_stack_va = NULL; 968 + large_buffer_stack = -1; 969 + small_buffer_stack = -1; 970 + first_bucket = -1; 971 + } 972 + 973 + /* The first time any tilegx network device is opened, we initialize 974 + * the global mpipe state. If this step fails, we fail to open the 975 + * device, but if it succeeds, we never need to do it again, and since 976 + * tile_net can't be unloaded, we never undo it. 977 + * 978 + * Note that some resources in this path (buffer stack indices, 979 + * bindings from init_buffer_stack, etc.) are hypervisor resources 980 + * that are freed implicitly by gxio_mpipe_destroy(). 981 + */ 982 + static int tile_net_init_mpipe(struct net_device *dev) 983 + { 984 + int i, num_buffers, rc; 985 + int cpu; 986 + int first_ring, ring; 987 + int network_cpus_count = cpus_weight(network_cpus_map); 988 + 989 + if (!hash_default) { 990 + netdev_err(dev, "Networking requires hash_default!\n"); 991 + return -EIO; 992 + } 993 + 994 + rc = gxio_mpipe_init(&context, 0); 995 + if (rc != 0) { 996 + netdev_err(dev, "gxio_mpipe_init failed: %d\n", rc); 997 + return -EIO; 998 + } 999 + 1000 + /* Set up the buffer stacks. */ 1001 + num_buffers = 1002 + network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH); 1003 + rc = init_buffer_stacks(dev, num_buffers); 1004 + if (rc != 0) 1005 + goto fail; 1006 + 1007 + /* Provide initial buffers. */ 1008 + rc = -ENOMEM; 1009 + for (i = 0; i < num_buffers; i++) { 1010 + if (!tile_net_provide_buffer(true)) { 1011 + netdev_err(dev, "Cannot allocate initial sk_bufs!\n"); 1012 + goto fail; 1013 + } 1014 + } 1015 + for (i = 0; i < num_buffers; i++) { 1016 + if (!tile_net_provide_buffer(false)) { 1017 + netdev_err(dev, "Cannot allocate initial sk_bufs!\n"); 1018 + goto fail; 1019 + } 1020 + } 1021 + 1022 + /* Allocate one NotifRing for each network cpu. */ 1023 + rc = gxio_mpipe_alloc_notif_rings(&context, network_cpus_count, 0, 0); 1024 + if (rc < 0) { 1025 + netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n", 1026 + rc); 1027 + goto fail; 1028 + } 1029 + 1030 + /* Init NotifRings per-cpu. */ 1031 + first_ring = rc; 1032 + ring = first_ring; 1033 + for_each_online_cpu(cpu) { 1034 + rc = alloc_percpu_mpipe_resources(dev, cpu, ring); 1035 + if (rc < 0) 1036 + goto fail; 1037 + ring = rc; 1038 + } 1039 + 1040 + /* Initialize NotifGroup and buckets. */ 1041 + rc = init_notif_group_and_buckets(dev, first_ring, network_cpus_count); 1042 + if (rc != 0) 1043 + goto fail; 1044 + 1045 + /* Create and enable interrupts. */ 1046 + rc = tile_net_setup_interrupts(dev); 1047 + if (rc != 0) 1048 + goto fail; 1049 + 1050 + return 0; 1051 + 1052 + fail: 1053 + tile_net_init_mpipe_fail(); 1054 + return rc; 1055 + } 1056 + 1057 + /* Create persistent egress info for a given egress channel. 1058 + * Note that this may be shared between, say, "gbe0" and "xgbe0". 1059 + * ISSUE: Defer header allocation until TSO is actually needed? 1060 + */ 1061 + static int tile_net_init_egress(struct net_device *dev, int echannel) 1062 + { 1063 + struct page *headers_page, *edescs_page, *equeue_page; 1064 + gxio_mpipe_edesc_t *edescs; 1065 + gxio_mpipe_equeue_t *equeue; 1066 + unsigned char *headers; 1067 + int headers_order, edescs_order, equeue_order; 1068 + size_t edescs_size; 1069 + int edma; 1070 + int rc = -ENOMEM; 1071 + 1072 + /* Only initialize once. */ 1073 + if (egress_for_echannel[echannel].equeue != NULL) 1074 + return 0; 1075 + 1076 + /* Allocate memory for the "headers". */ 1077 + headers_order = get_order(EQUEUE_ENTRIES * HEADER_BYTES); 1078 + headers_page = alloc_pages(GFP_KERNEL, headers_order); 1079 + if (headers_page == NULL) { 1080 + netdev_warn(dev, 1081 + "Could not alloc %zd bytes for TSO headers.\n", 1082 + PAGE_SIZE << headers_order); 1083 + goto fail; 1084 + } 1085 + headers = pfn_to_kaddr(page_to_pfn(headers_page)); 1086 + 1087 + /* Allocate memory for the "edescs". */ 1088 + edescs_size = EQUEUE_ENTRIES * sizeof(*edescs); 1089 + edescs_order = get_order(edescs_size); 1090 + edescs_page = alloc_pages(GFP_KERNEL, edescs_order); 1091 + if (edescs_page == NULL) { 1092 + netdev_warn(dev, 1093 + "Could not alloc %zd bytes for eDMA ring.\n", 1094 + edescs_size); 1095 + goto fail_headers; 1096 + } 1097 + edescs = pfn_to_kaddr(page_to_pfn(edescs_page)); 1098 + 1099 + /* Allocate memory for the "equeue". */ 1100 + equeue_order = get_order(sizeof(*equeue)); 1101 + equeue_page = alloc_pages(GFP_KERNEL, equeue_order); 1102 + if (equeue_page == NULL) { 1103 + netdev_warn(dev, 1104 + "Could not alloc %zd bytes for equeue info.\n", 1105 + PAGE_SIZE << equeue_order); 1106 + goto fail_edescs; 1107 + } 1108 + equeue = pfn_to_kaddr(page_to_pfn(equeue_page)); 1109 + 1110 + /* Allocate an edma ring. Note that in practice this can't 1111 + * fail, which is good, because we will leak an edma ring if so. 1112 + */ 1113 + rc = gxio_mpipe_alloc_edma_rings(&context, 1, 0, 0); 1114 + if (rc < 0) { 1115 + netdev_warn(dev, "gxio_mpipe_alloc_edma_rings failed: %d\n", 1116 + rc); 1117 + goto fail_equeue; 1118 + } 1119 + edma = rc; 1120 + 1121 + /* Initialize the equeue. */ 1122 + rc = gxio_mpipe_equeue_init(equeue, &context, edma, echannel, 1123 + edescs, edescs_size, 0); 1124 + if (rc != 0) { 1125 + netdev_err(dev, "gxio_mpipe_equeue_init failed: %d\n", rc); 1126 + goto fail_equeue; 1127 + } 1128 + 1129 + /* Done. */ 1130 + egress_for_echannel[echannel].equeue = equeue; 1131 + egress_for_echannel[echannel].headers = headers; 1132 + return 0; 1133 + 1134 + fail_equeue: 1135 + __free_pages(equeue_page, equeue_order); 1136 + 1137 + fail_edescs: 1138 + __free_pages(edescs_page, edescs_order); 1139 + 1140 + fail_headers: 1141 + __free_pages(headers_page, headers_order); 1142 + 1143 + fail: 1144 + return rc; 1145 + } 1146 + 1147 + /* Return channel number for a newly-opened link. */ 1148 + static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link, 1149 + const char *link_name) 1150 + { 1151 + int rc = gxio_mpipe_link_open(link, &context, link_name, 0); 1152 + if (rc < 0) { 1153 + netdev_err(dev, "Failed to open '%s'\n", link_name); 1154 + return rc; 1155 + } 1156 + rc = gxio_mpipe_link_channel(link); 1157 + if (rc < 0 || rc >= TILE_NET_CHANNELS) { 1158 + netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc); 1159 + gxio_mpipe_link_close(link); 1160 + return -EINVAL; 1161 + } 1162 + return rc; 1163 + } 1164 + 1165 + /* Help the kernel activate the given network interface. */ 1166 + static int tile_net_open(struct net_device *dev) 1167 + { 1168 + struct tile_net_priv *priv = netdev_priv(dev); 1169 + int cpu, rc; 1170 + 1171 + mutex_lock(&tile_net_devs_for_channel_mutex); 1172 + 1173 + /* Do one-time initialization the first time any device is opened. */ 1174 + if (ingress_irq < 0) { 1175 + rc = tile_net_init_mpipe(dev); 1176 + if (rc != 0) 1177 + goto fail; 1178 + } 1179 + 1180 + /* Determine if this is the "loopify" device. */ 1181 + if (unlikely((loopify_link_name != NULL) && 1182 + !strcmp(dev->name, loopify_link_name))) { 1183 + rc = tile_net_link_open(dev, &priv->link, "loop0"); 1184 + if (rc < 0) 1185 + goto fail; 1186 + priv->channel = rc; 1187 + rc = tile_net_link_open(dev, &priv->loopify_link, "loop1"); 1188 + if (rc < 0) 1189 + goto fail; 1190 + priv->loopify_channel = rc; 1191 + priv->echannel = rc; 1192 + } else { 1193 + rc = tile_net_link_open(dev, &priv->link, dev->name); 1194 + if (rc < 0) 1195 + goto fail; 1196 + priv->channel = rc; 1197 + priv->echannel = rc; 1198 + } 1199 + 1200 + /* Initialize egress info (if needed). Once ever, per echannel. */ 1201 + rc = tile_net_init_egress(dev, priv->echannel); 1202 + if (rc != 0) 1203 + goto fail; 1204 + 1205 + tile_net_devs_for_channel[priv->channel] = dev; 1206 + 1207 + rc = tile_net_update(dev); 1208 + if (rc != 0) 1209 + goto fail; 1210 + 1211 + mutex_unlock(&tile_net_devs_for_channel_mutex); 1212 + 1213 + /* Initialize the transmit wake timer for this device for each cpu. */ 1214 + for_each_online_cpu(cpu) { 1215 + struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); 1216 + struct tile_net_tx_wake *tx_wake = 1217 + &info->tx_wake[priv->echannel]; 1218 + 1219 + hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC, 1220 + HRTIMER_MODE_REL); 1221 + tx_wake->timer.function = tile_net_handle_tx_wake_timer; 1222 + tx_wake->dev = dev; 1223 + } 1224 + 1225 + for_each_online_cpu(cpu) 1226 + netif_start_subqueue(dev, cpu); 1227 + netif_carrier_on(dev); 1228 + return 0; 1229 + 1230 + fail: 1231 + if (priv->loopify_channel >= 0) { 1232 + if (gxio_mpipe_link_close(&priv->loopify_link) != 0) 1233 + netdev_warn(dev, "Failed to close loopify link!\n"); 1234 + priv->loopify_channel = -1; 1235 + } 1236 + if (priv->channel >= 0) { 1237 + if (gxio_mpipe_link_close(&priv->link) != 0) 1238 + netdev_warn(dev, "Failed to close link!\n"); 1239 + priv->channel = -1; 1240 + } 1241 + priv->echannel = -1; 1242 + tile_net_devs_for_channel[priv->channel] = NULL; 1243 + mutex_unlock(&tile_net_devs_for_channel_mutex); 1244 + 1245 + /* Don't return raw gxio error codes to generic Linux. */ 1246 + return (rc > -512) ? rc : -EIO; 1247 + } 1248 + 1249 + /* Help the kernel deactivate the given network interface. */ 1250 + static int tile_net_stop(struct net_device *dev) 1251 + { 1252 + struct tile_net_priv *priv = netdev_priv(dev); 1253 + int cpu; 1254 + 1255 + for_each_online_cpu(cpu) { 1256 + struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); 1257 + struct tile_net_tx_wake *tx_wake = 1258 + &info->tx_wake[priv->echannel]; 1259 + 1260 + hrtimer_cancel(&tx_wake->timer); 1261 + netif_stop_subqueue(dev, cpu); 1262 + } 1263 + 1264 + mutex_lock(&tile_net_devs_for_channel_mutex); 1265 + tile_net_devs_for_channel[priv->channel] = NULL; 1266 + (void)tile_net_update(dev); 1267 + if (priv->loopify_channel >= 0) { 1268 + if (gxio_mpipe_link_close(&priv->loopify_link) != 0) 1269 + netdev_warn(dev, "Failed to close loopify link!\n"); 1270 + priv->loopify_channel = -1; 1271 + } 1272 + if (priv->channel >= 0) { 1273 + if (gxio_mpipe_link_close(&priv->link) != 0) 1274 + netdev_warn(dev, "Failed to close link!\n"); 1275 + priv->channel = -1; 1276 + } 1277 + priv->echannel = -1; 1278 + mutex_unlock(&tile_net_devs_for_channel_mutex); 1279 + 1280 + return 0; 1281 + } 1282 + 1283 + /* Determine the VA for a fragment. */ 1284 + static inline void *tile_net_frag_buf(skb_frag_t *f) 1285 + { 1286 + unsigned long pfn = page_to_pfn(skb_frag_page(f)); 1287 + return pfn_to_kaddr(pfn) + f->page_offset; 1288 + } 1289 + 1290 + /* Acquire a completion entry and an egress slot, or if we can't, 1291 + * stop the queue and schedule the tx_wake timer. 1292 + */ 1293 + static s64 tile_net_equeue_try_reserve(struct net_device *dev, 1294 + struct tile_net_comps *comps, 1295 + gxio_mpipe_equeue_t *equeue, 1296 + int num_edescs) 1297 + { 1298 + /* Try to acquire a completion entry. */ 1299 + if (comps->comp_next - comps->comp_last < TILE_NET_MAX_COMPS - 1 || 1300 + tile_net_free_comps(equeue, comps, 32, false) != 0) { 1301 + 1302 + /* Try to acquire an egress slot. */ 1303 + s64 slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs); 1304 + if (slot >= 0) 1305 + return slot; 1306 + 1307 + /* Freeing some completions gives the equeue time to drain. */ 1308 + tile_net_free_comps(equeue, comps, TILE_NET_MAX_COMPS, false); 1309 + 1310 + slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs); 1311 + if (slot >= 0) 1312 + return slot; 1313 + } 1314 + 1315 + /* Still nothing; give up and stop the queue for a short while. */ 1316 + netif_stop_subqueue(dev, smp_processor_id()); 1317 + tile_net_schedule_tx_wake_timer(dev); 1318 + return -1; 1319 + } 1320 + 1321 + /* Determine how many edesc's are needed for TSO. 1322 + * 1323 + * Sometimes, if "sendfile()" requires copying, we will be called with 1324 + * "data" containing the header and payload, with "frags" being empty. 1325 + * Sometimes, for example when using NFS over TCP, a single segment can 1326 + * span 3 fragments. This requires special care. 1327 + */ 1328 + static int tso_count_edescs(struct sk_buff *skb) 1329 + { 1330 + struct skb_shared_info *sh = skb_shinfo(skb); 1331 + unsigned int data_len = skb->data_len; 1332 + unsigned int p_len = sh->gso_size; 1333 + long f_id = -1; /* id of the current fragment */ 1334 + long f_size = -1; /* size of the current fragment */ 1335 + long f_used = -1; /* bytes used from the current fragment */ 1336 + long n; /* size of the current piece of payload */ 1337 + int num_edescs = 0; 1338 + int segment; 1339 + 1340 + for (segment = 0; segment < sh->gso_segs; segment++) { 1341 + 1342 + unsigned int p_used = 0; 1343 + 1344 + /* One edesc for header and for each piece of the payload. */ 1345 + for (num_edescs++; p_used < p_len; num_edescs++) { 1346 + 1347 + /* Advance as needed. */ 1348 + while (f_used >= f_size) { 1349 + f_id++; 1350 + f_size = sh->frags[f_id].size; 1351 + f_used = 0; 1352 + } 1353 + 1354 + /* Use bytes from the current fragment. */ 1355 + n = p_len - p_used; 1356 + if (n > f_size - f_used) 1357 + n = f_size - f_used; 1358 + f_used += n; 1359 + p_used += n; 1360 + } 1361 + 1362 + /* The last segment may be less than gso_size. */ 1363 + data_len -= p_len; 1364 + if (data_len < p_len) 1365 + p_len = data_len; 1366 + } 1367 + 1368 + return num_edescs; 1369 + } 1370 + 1371 + /* Prepare modified copies of the skbuff headers. 1372 + * FIXME: add support for IPv6. 1373 + */ 1374 + static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers, 1375 + s64 slot) 1376 + { 1377 + struct skb_shared_info *sh = skb_shinfo(skb); 1378 + struct iphdr *ih; 1379 + struct tcphdr *th; 1380 + unsigned int data_len = skb->data_len; 1381 + unsigned char *data = skb->data; 1382 + unsigned int ih_off, th_off, sh_len, p_len; 1383 + unsigned int isum_seed, tsum_seed, id, seq; 1384 + long f_id = -1; /* id of the current fragment */ 1385 + long f_size = -1; /* size of the current fragment */ 1386 + long f_used = -1; /* bytes used from the current fragment */ 1387 + long n; /* size of the current piece of payload */ 1388 + int segment; 1389 + 1390 + /* Locate original headers and compute various lengths. */ 1391 + ih = ip_hdr(skb); 1392 + th = tcp_hdr(skb); 1393 + ih_off = skb_network_offset(skb); 1394 + th_off = skb_transport_offset(skb); 1395 + sh_len = th_off + tcp_hdrlen(skb); 1396 + p_len = sh->gso_size; 1397 + 1398 + /* Set up seed values for IP and TCP csum and initialize id and seq. */ 1399 + isum_seed = ((0xFFFF - ih->check) + 1400 + (0xFFFF - ih->tot_len) + 1401 + (0xFFFF - ih->id)); 1402 + tsum_seed = th->check + (0xFFFF ^ htons(skb->len)); 1403 + id = ntohs(ih->id); 1404 + seq = ntohl(th->seq); 1405 + 1406 + /* Prepare all the headers. */ 1407 + for (segment = 0; segment < sh->gso_segs; segment++) { 1408 + unsigned char *buf; 1409 + unsigned int p_used = 0; 1410 + 1411 + /* Copy to the header memory for this segment. */ 1412 + buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES + 1413 + NET_IP_ALIGN; 1414 + memcpy(buf, data, sh_len); 1415 + 1416 + /* Update copied ip header. */ 1417 + ih = (struct iphdr *)(buf + ih_off); 1418 + ih->tot_len = htons(sh_len + p_len - ih_off); 1419 + ih->id = htons(id); 1420 + ih->check = csum_long(isum_seed + ih->tot_len + 1421 + ih->id) ^ 0xffff; 1422 + 1423 + /* Update copied tcp header. */ 1424 + th = (struct tcphdr *)(buf + th_off); 1425 + th->seq = htonl(seq); 1426 + th->check = csum_long(tsum_seed + htons(sh_len + p_len)); 1427 + if (segment != sh->gso_segs - 1) { 1428 + th->fin = 0; 1429 + th->psh = 0; 1430 + } 1431 + 1432 + /* Skip past the header. */ 1433 + slot++; 1434 + 1435 + /* Skip past the payload. */ 1436 + while (p_used < p_len) { 1437 + 1438 + /* Advance as needed. */ 1439 + while (f_used >= f_size) { 1440 + f_id++; 1441 + f_size = sh->frags[f_id].size; 1442 + f_used = 0; 1443 + } 1444 + 1445 + /* Use bytes from the current fragment. */ 1446 + n = p_len - p_used; 1447 + if (n > f_size - f_used) 1448 + n = f_size - f_used; 1449 + f_used += n; 1450 + p_used += n; 1451 + 1452 + slot++; 1453 + } 1454 + 1455 + id++; 1456 + seq += p_len; 1457 + 1458 + /* The last segment may be less than gso_size. */ 1459 + data_len -= p_len; 1460 + if (data_len < p_len) 1461 + p_len = data_len; 1462 + } 1463 + 1464 + /* Flush the headers so they are ready for hardware DMA. */ 1465 + wmb(); 1466 + } 1467 + 1468 + /* Pass all the data to mpipe for egress. */ 1469 + static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue, 1470 + struct sk_buff *skb, unsigned char *headers, s64 slot) 1471 + { 1472 + struct tile_net_priv *priv = netdev_priv(dev); 1473 + struct skb_shared_info *sh = skb_shinfo(skb); 1474 + unsigned int data_len = skb->data_len; 1475 + unsigned int p_len = sh->gso_size; 1476 + gxio_mpipe_edesc_t edesc_head = { { 0 } }; 1477 + gxio_mpipe_edesc_t edesc_body = { { 0 } }; 1478 + long f_id = -1; /* id of the current fragment */ 1479 + long f_size = -1; /* size of the current fragment */ 1480 + long f_used = -1; /* bytes used from the current fragment */ 1481 + long n; /* size of the current piece of payload */ 1482 + unsigned long tx_packets = 0, tx_bytes = 0; 1483 + unsigned int csum_start, sh_len; 1484 + int segment; 1485 + 1486 + /* Prepare to egress the headers: set up header edesc. */ 1487 + csum_start = skb_checksum_start_offset(skb); 1488 + sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1489 + edesc_head.csum = 1; 1490 + edesc_head.csum_start = csum_start; 1491 + edesc_head.csum_dest = csum_start + skb->csum_offset; 1492 + edesc_head.xfer_size = sh_len; 1493 + 1494 + /* This is only used to specify the TLB. */ 1495 + edesc_head.stack_idx = large_buffer_stack; 1496 + edesc_body.stack_idx = large_buffer_stack; 1497 + 1498 + /* Egress all the edescs. */ 1499 + for (segment = 0; segment < sh->gso_segs; segment++) { 1500 + void *va; 1501 + unsigned char *buf; 1502 + unsigned int p_used = 0; 1503 + 1504 + /* Egress the header. */ 1505 + buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES + 1506 + NET_IP_ALIGN; 1507 + edesc_head.va = va_to_tile_io_addr(buf); 1508 + gxio_mpipe_equeue_put_at(equeue, edesc_head, slot); 1509 + slot++; 1510 + 1511 + /* Egress the payload. */ 1512 + while (p_used < p_len) { 1513 + 1514 + /* Advance as needed. */ 1515 + while (f_used >= f_size) { 1516 + f_id++; 1517 + f_size = sh->frags[f_id].size; 1518 + f_used = 0; 1519 + } 1520 + 1521 + va = tile_net_frag_buf(&sh->frags[f_id]) + f_used; 1522 + 1523 + /* Use bytes from the current fragment. */ 1524 + n = p_len - p_used; 1525 + if (n > f_size - f_used) 1526 + n = f_size - f_used; 1527 + f_used += n; 1528 + p_used += n; 1529 + 1530 + /* Egress a piece of the payload. */ 1531 + edesc_body.va = va_to_tile_io_addr(va); 1532 + edesc_body.xfer_size = n; 1533 + edesc_body.bound = !(p_used < p_len); 1534 + gxio_mpipe_equeue_put_at(equeue, edesc_body, slot); 1535 + slot++; 1536 + } 1537 + 1538 + tx_packets++; 1539 + tx_bytes += sh_len + p_len; 1540 + 1541 + /* The last segment may be less than gso_size. */ 1542 + data_len -= p_len; 1543 + if (data_len < p_len) 1544 + p_len = data_len; 1545 + } 1546 + 1547 + /* Update stats. */ 1548 + tile_net_stats_add(tx_packets, &priv->stats.tx_packets); 1549 + tile_net_stats_add(tx_bytes, &priv->stats.tx_bytes); 1550 + } 1551 + 1552 + /* Do "TSO" handling for egress. 1553 + * 1554 + * Normally drivers set NETIF_F_TSO only to support hardware TSO; 1555 + * otherwise the stack uses scatter-gather to implement GSO in software. 1556 + * On our testing, enabling GSO support (via NETIF_F_SG) drops network 1557 + * performance down to around 7.5 Gbps on the 10G interfaces, although 1558 + * also dropping cpu utilization way down, to under 8%. But 1559 + * implementing "TSO" in the driver brings performance back up to line 1560 + * rate, while dropping cpu usage even further, to less than 4%. In 1561 + * practice, profiling of GSO shows that skb_segment() is what causes 1562 + * the performance overheads; we benefit in the driver from using 1563 + * preallocated memory to duplicate the TCP/IP headers. 1564 + */ 1565 + static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev) 1566 + { 1567 + struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 1568 + struct tile_net_priv *priv = netdev_priv(dev); 1569 + int channel = priv->echannel; 1570 + struct tile_net_egress *egress = &egress_for_echannel[channel]; 1571 + struct tile_net_comps *comps = info->comps_for_echannel[channel]; 1572 + gxio_mpipe_equeue_t *equeue = egress->equeue; 1573 + unsigned long irqflags; 1574 + int num_edescs; 1575 + s64 slot; 1576 + 1577 + /* Determine how many mpipe edesc's are needed. */ 1578 + num_edescs = tso_count_edescs(skb); 1579 + 1580 + local_irq_save(irqflags); 1581 + 1582 + /* Try to acquire a completion entry and an egress slot. */ 1583 + slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs); 1584 + if (slot < 0) { 1585 + local_irq_restore(irqflags); 1586 + return NETDEV_TX_BUSY; 1587 + } 1588 + 1589 + /* Set up copies of header data properly. */ 1590 + tso_headers_prepare(skb, egress->headers, slot); 1591 + 1592 + /* Actually pass the data to the network hardware. */ 1593 + tso_egress(dev, equeue, skb, egress->headers, slot); 1594 + 1595 + /* Add a completion record. */ 1596 + add_comp(equeue, comps, slot + num_edescs - 1, skb); 1597 + 1598 + local_irq_restore(irqflags); 1599 + 1600 + /* Make sure the egress timer is scheduled. */ 1601 + tile_net_schedule_egress_timer(); 1602 + 1603 + return NETDEV_TX_OK; 1604 + } 1605 + 1606 + /* Analyze the body and frags for a transmit request. */ 1607 + static unsigned int tile_net_tx_frags(struct frag *frags, 1608 + struct sk_buff *skb, 1609 + void *b_data, unsigned int b_len) 1610 + { 1611 + unsigned int i, n = 0; 1612 + 1613 + struct skb_shared_info *sh = skb_shinfo(skb); 1614 + 1615 + if (b_len != 0) { 1616 + frags[n].buf = b_data; 1617 + frags[n++].length = b_len; 1618 + } 1619 + 1620 + for (i = 0; i < sh->nr_frags; i++) { 1621 + skb_frag_t *f = &sh->frags[i]; 1622 + frags[n].buf = tile_net_frag_buf(f); 1623 + frags[n++].length = skb_frag_size(f); 1624 + } 1625 + 1626 + return n; 1627 + } 1628 + 1629 + /* Help the kernel transmit a packet. */ 1630 + static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) 1631 + { 1632 + struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 1633 + struct tile_net_priv *priv = netdev_priv(dev); 1634 + struct tile_net_egress *egress = &egress_for_echannel[priv->echannel]; 1635 + gxio_mpipe_equeue_t *equeue = egress->equeue; 1636 + struct tile_net_comps *comps = 1637 + info->comps_for_echannel[priv->echannel]; 1638 + unsigned int len = skb->len; 1639 + unsigned char *data = skb->data; 1640 + unsigned int num_edescs; 1641 + struct frag frags[MAX_FRAGS]; 1642 + gxio_mpipe_edesc_t edescs[MAX_FRAGS]; 1643 + unsigned long irqflags; 1644 + gxio_mpipe_edesc_t edesc = { { 0 } }; 1645 + unsigned int i; 1646 + s64 slot; 1647 + 1648 + if (skb_is_gso(skb)) 1649 + return tile_net_tx_tso(skb, dev); 1650 + 1651 + num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb)); 1652 + 1653 + /* This is only used to specify the TLB. */ 1654 + edesc.stack_idx = large_buffer_stack; 1655 + 1656 + /* Prepare the edescs. */ 1657 + for (i = 0; i < num_edescs; i++) { 1658 + edesc.xfer_size = frags[i].length; 1659 + edesc.va = va_to_tile_io_addr(frags[i].buf); 1660 + edescs[i] = edesc; 1661 + } 1662 + 1663 + /* Mark the final edesc. */ 1664 + edescs[num_edescs - 1].bound = 1; 1665 + 1666 + /* Add checksum info to the initial edesc, if needed. */ 1667 + if (skb->ip_summed == CHECKSUM_PARTIAL) { 1668 + unsigned int csum_start = skb_checksum_start_offset(skb); 1669 + edescs[0].csum = 1; 1670 + edescs[0].csum_start = csum_start; 1671 + edescs[0].csum_dest = csum_start + skb->csum_offset; 1672 + } 1673 + 1674 + local_irq_save(irqflags); 1675 + 1676 + /* Try to acquire a completion entry and an egress slot. */ 1677 + slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs); 1678 + if (slot < 0) { 1679 + local_irq_restore(irqflags); 1680 + return NETDEV_TX_BUSY; 1681 + } 1682 + 1683 + for (i = 0; i < num_edescs; i++) 1684 + gxio_mpipe_equeue_put_at(equeue, edescs[i], slot++); 1685 + 1686 + /* Add a completion record. */ 1687 + add_comp(equeue, comps, slot - 1, skb); 1688 + 1689 + /* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */ 1690 + tile_net_stats_add(1, &priv->stats.tx_packets); 1691 + tile_net_stats_add(max_t(unsigned int, len, ETH_ZLEN), 1692 + &priv->stats.tx_bytes); 1693 + 1694 + local_irq_restore(irqflags); 1695 + 1696 + /* Make sure the egress timer is scheduled. */ 1697 + tile_net_schedule_egress_timer(); 1698 + 1699 + return NETDEV_TX_OK; 1700 + } 1701 + 1702 + /* Return subqueue id on this core (one per core). */ 1703 + static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb) 1704 + { 1705 + return smp_processor_id(); 1706 + } 1707 + 1708 + /* Deal with a transmit timeout. */ 1709 + static void tile_net_tx_timeout(struct net_device *dev) 1710 + { 1711 + int cpu; 1712 + 1713 + for_each_online_cpu(cpu) 1714 + netif_wake_subqueue(dev, cpu); 1715 + } 1716 + 1717 + /* Ioctl commands. */ 1718 + static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1719 + { 1720 + return -EOPNOTSUPP; 1721 + } 1722 + 1723 + /* Get system network statistics for device. */ 1724 + static struct net_device_stats *tile_net_get_stats(struct net_device *dev) 1725 + { 1726 + struct tile_net_priv *priv = netdev_priv(dev); 1727 + return &priv->stats; 1728 + } 1729 + 1730 + /* Change the MTU. */ 1731 + static int tile_net_change_mtu(struct net_device *dev, int new_mtu) 1732 + { 1733 + if ((new_mtu < 68) || (new_mtu > 1500)) 1734 + return -EINVAL; 1735 + dev->mtu = new_mtu; 1736 + return 0; 1737 + } 1738 + 1739 + /* Change the Ethernet address of the NIC. 1740 + * 1741 + * The hypervisor driver does not support changing MAC address. However, 1742 + * the hardware does not do anything with the MAC address, so the address 1743 + * which gets used on outgoing packets, and which is accepted on incoming 1744 + * packets, is completely up to us. 1745 + * 1746 + * Returns 0 on success, negative on failure. 1747 + */ 1748 + static int tile_net_set_mac_address(struct net_device *dev, void *p) 1749 + { 1750 + struct sockaddr *addr = p; 1751 + 1752 + if (!is_valid_ether_addr(addr->sa_data)) 1753 + return -EINVAL; 1754 + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1755 + return 0; 1756 + } 1757 + 1758 + #ifdef CONFIG_NET_POLL_CONTROLLER 1759 + /* Polling 'interrupt' - used by things like netconsole to send skbs 1760 + * without having to re-enable interrupts. It's not called while 1761 + * the interrupt routine is executing. 1762 + */ 1763 + static void tile_net_netpoll(struct net_device *dev) 1764 + { 1765 + disable_percpu_irq(ingress_irq); 1766 + tile_net_handle_ingress_irq(ingress_irq, NULL); 1767 + enable_percpu_irq(ingress_irq, 0); 1768 + } 1769 + #endif 1770 + 1771 + static const struct net_device_ops tile_net_ops = { 1772 + .ndo_open = tile_net_open, 1773 + .ndo_stop = tile_net_stop, 1774 + .ndo_start_xmit = tile_net_tx, 1775 + .ndo_select_queue = tile_net_select_queue, 1776 + .ndo_do_ioctl = tile_net_ioctl, 1777 + .ndo_get_stats = tile_net_get_stats, 1778 + .ndo_change_mtu = tile_net_change_mtu, 1779 + .ndo_tx_timeout = tile_net_tx_timeout, 1780 + .ndo_set_mac_address = tile_net_set_mac_address, 1781 + #ifdef CONFIG_NET_POLL_CONTROLLER 1782 + .ndo_poll_controller = tile_net_netpoll, 1783 + #endif 1784 + }; 1785 + 1786 + /* The setup function. 1787 + * 1788 + * This uses ether_setup() to assign various fields in dev, including 1789 + * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields. 1790 + */ 1791 + static void tile_net_setup(struct net_device *dev) 1792 + { 1793 + ether_setup(dev); 1794 + dev->netdev_ops = &tile_net_ops; 1795 + dev->watchdog_timeo = TILE_NET_TIMEOUT; 1796 + dev->features |= NETIF_F_LLTX; 1797 + dev->features |= NETIF_F_HW_CSUM; 1798 + dev->features |= NETIF_F_SG; 1799 + dev->features |= NETIF_F_TSO; 1800 + dev->mtu = 1500; 1801 + } 1802 + 1803 + /* Allocate the device structure, register the device, and obtain the 1804 + * MAC address from the hypervisor. 1805 + */ 1806 + static void tile_net_dev_init(const char *name, const uint8_t *mac) 1807 + { 1808 + int ret; 1809 + int i; 1810 + int nz_addr = 0; 1811 + struct net_device *dev; 1812 + struct tile_net_priv *priv; 1813 + 1814 + /* HACK: Ignore "loop" links. */ 1815 + if (strncmp(name, "loop", 4) == 0) 1816 + return; 1817 + 1818 + /* Allocate the device structure. Normally, "name" is a 1819 + * template, instantiated by register_netdev(), but not for us. 1820 + */ 1821 + dev = alloc_netdev_mqs(sizeof(*priv), name, tile_net_setup, 1822 + NR_CPUS, 1); 1823 + if (!dev) { 1824 + pr_err("alloc_netdev_mqs(%s) failed\n", name); 1825 + return; 1826 + } 1827 + 1828 + /* Initialize "priv". */ 1829 + priv = netdev_priv(dev); 1830 + memset(priv, 0, sizeof(*priv)); 1831 + priv->dev = dev; 1832 + priv->channel = -1; 1833 + priv->loopify_channel = -1; 1834 + priv->echannel = -1; 1835 + 1836 + /* Get the MAC address and set it in the device struct; this must 1837 + * be done before the device is opened. If the MAC is all zeroes, 1838 + * we use a random address, since we're probably on the simulator. 1839 + */ 1840 + for (i = 0; i < 6; i++) 1841 + nz_addr |= mac[i]; 1842 + 1843 + if (nz_addr) { 1844 + memcpy(dev->dev_addr, mac, 6); 1845 + dev->addr_len = 6; 1846 + } else { 1847 + random_ether_addr(dev->dev_addr); 1848 + } 1849 + 1850 + /* Register the network device. */ 1851 + ret = register_netdev(dev); 1852 + if (ret) { 1853 + netdev_err(dev, "register_netdev failed %d\n", ret); 1854 + free_netdev(dev); 1855 + return; 1856 + } 1857 + } 1858 + 1859 + /* Per-cpu module initialization. */ 1860 + static void tile_net_init_module_percpu(void *unused) 1861 + { 1862 + struct tile_net_info *info = &__get_cpu_var(per_cpu_info); 1863 + int my_cpu = smp_processor_id(); 1864 + 1865 + info->has_iqueue = false; 1866 + 1867 + info->my_cpu = my_cpu; 1868 + 1869 + /* Initialize the egress timer. */ 1870 + hrtimer_init(&info->egress_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1871 + info->egress_timer.function = tile_net_handle_egress_timer; 1872 + } 1873 + 1874 + /* Module initialization. */ 1875 + static int __init tile_net_init_module(void) 1876 + { 1877 + int i; 1878 + char name[GXIO_MPIPE_LINK_NAME_LEN]; 1879 + uint8_t mac[6]; 1880 + 1881 + pr_info("Tilera Network Driver\n"); 1882 + 1883 + mutex_init(&tile_net_devs_for_channel_mutex); 1884 + 1885 + /* Initialize each CPU. */ 1886 + on_each_cpu(tile_net_init_module_percpu, NULL, 1); 1887 + 1888 + /* Find out what devices we have, and initialize them. */ 1889 + for (i = 0; gxio_mpipe_link_enumerate_mac(i, name, mac) >= 0; i++) 1890 + tile_net_dev_init(name, mac); 1891 + 1892 + if (!network_cpus_init()) 1893 + network_cpus_map = *cpu_online_mask; 1894 + 1895 + return 0; 1896 + } 1897 + 1898 + module_init(tile_net_init_module);
+1
drivers/net/hyperv/hyperv_net.h
··· 478 478 u32 nvsp_version; 479 479 480 480 atomic_t num_outstanding_sends; 481 + wait_queue_head_t wait_drain; 481 482 bool start_remove; 482 483 bool destroy; 483 484 /*
+6 -6
drivers/net/hyperv/netvsc.c
··· 42 42 if (!net_device) 43 43 return NULL; 44 44 45 + init_waitqueue_head(&net_device->wait_drain); 45 46 net_device->start_remove = false; 46 47 net_device->destroy = false; 47 48 net_device->dev = device; ··· 388 387 spin_unlock_irqrestore(&device->channel->inbound_lock, flags); 389 388 390 389 /* Wait for all send completions */ 391 - while (atomic_read(&net_device->num_outstanding_sends)) { 392 - dev_info(&device->device, 393 - "waiting for %d requests to complete...\n", 394 - atomic_read(&net_device->num_outstanding_sends)); 395 - udelay(100); 396 - } 390 + wait_event(net_device->wait_drain, 391 + atomic_read(&net_device->num_outstanding_sends) == 0); 397 392 398 393 netvsc_disconnect_vsp(net_device); 399 394 ··· 482 485 483 486 num_outstanding_sends = 484 487 atomic_dec_return(&net_device->num_outstanding_sends); 488 + 489 + if (net_device->destroy && num_outstanding_sends == 0) 490 + wake_up(&net_device->wait_drain); 485 491 486 492 if (netif_queue_stopped(ndev) && !net_device->start_remove && 487 493 (hv_ringbuf_avail_percent(&device->channel->outbound)
+7
drivers/net/phy/icplus.c
··· 41 41 #define IP1001_APS_ON 11 /* IP1001 APS Mode bit */ 42 42 #define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */ 43 43 #define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */ 44 + #define IP101A_G_IRQ_PIN_USED (1<<15) /* INTR pin used */ 45 + #define IP101A_G_IRQ_DEFAULT IP101A_G_IRQ_PIN_USED 44 46 45 47 static int ip175c_config_init(struct phy_device *phydev) 46 48 { ··· 135 133 return c; 136 134 c |= IP1001_APS_ON; 137 135 c = phy_write(phydev, IP1001_SPEC_CTRL_STATUS_2, c); 136 + if (c < 0) 137 + return c; 138 + 139 + /* INTR pin used: speed/link/duplex will cause an interrupt */ 140 + c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT); 138 141 if (c < 0) 139 142 return c; 140 143
+1 -1
drivers/net/phy/mdio_bus.c
··· 96 96 } 97 97 /** 98 98 * of_mdio_find_bus - Given an mii_bus node, find the mii_bus. 99 - * @mdio_np: Pointer to the mii_bus. 99 + * @mdio_bus_np: Pointer to the mii_bus. 100 100 * 101 101 * Returns a pointer to the mii_bus, or NULL if none found. 102 102 *
+10 -4
drivers/net/usb/sierra_net.c
··· 946 946 } 947 947 948 948 static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 }; 949 - static const struct sierra_net_info_data sierra_net_info_data_68A3 = { 949 + static const struct sierra_net_info_data sierra_net_info_data_direct_ip = { 950 950 .rx_urb_size = 8 * 1024, 951 951 .whitelist = { 952 952 .infolen = ARRAY_SIZE(sierra_net_ifnum_list), ··· 954 954 } 955 955 }; 956 956 957 - static const struct driver_info sierra_net_info_68A3 = { 957 + static const struct driver_info sierra_net_info_direct_ip = { 958 958 .description = "Sierra Wireless USB-to-WWAN Modem", 959 959 .flags = FLAG_WWAN | FLAG_SEND_ZLP, 960 960 .bind = sierra_net_bind, ··· 962 962 .status = sierra_net_status, 963 963 .rx_fixup = sierra_net_rx_fixup, 964 964 .tx_fixup = sierra_net_tx_fixup, 965 - .data = (unsigned long)&sierra_net_info_data_68A3, 965 + .data = (unsigned long)&sierra_net_info_data_direct_ip, 966 966 }; 967 967 968 968 static const struct usb_device_id products[] = { 969 969 {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */ 970 - .driver_info = (unsigned long) &sierra_net_info_68A3}, 970 + .driver_info = (unsigned long) &sierra_net_info_direct_ip}, 971 + {USB_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */ 972 + .driver_info = (unsigned long) &sierra_net_info_direct_ip}, 973 + {USB_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */ 974 + .driver_info = (unsigned long) &sierra_net_info_direct_ip}, 975 + {USB_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */ 976 + .driver_info = (unsigned long) &sierra_net_info_direct_ip}, 971 977 972 978 {}, /* last item */ 973 979 };
+12 -7
drivers/net/virtio_net.c
··· 42 42 #define VIRTNET_DRIVER_VERSION "1.0.0" 43 43 44 44 struct virtnet_stats { 45 - struct u64_stats_sync syncp; 45 + struct u64_stats_sync tx_syncp; 46 + struct u64_stats_sync rx_syncp; 46 47 u64 tx_bytes; 47 48 u64 tx_packets; 48 49 ··· 301 300 302 301 hdr = skb_vnet_hdr(skb); 303 302 304 - u64_stats_update_begin(&stats->syncp); 303 + u64_stats_update_begin(&stats->rx_syncp); 305 304 stats->rx_bytes += skb->len; 306 305 stats->rx_packets++; 307 - u64_stats_update_end(&stats->syncp); 306 + u64_stats_update_end(&stats->rx_syncp); 308 307 309 308 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 310 309 pr_debug("Needs csum!\n"); ··· 566 565 while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) { 567 566 pr_debug("Sent skb %p\n", skb); 568 567 569 - u64_stats_update_begin(&stats->syncp); 568 + u64_stats_update_begin(&stats->tx_syncp); 570 569 stats->tx_bytes += skb->len; 571 570 stats->tx_packets++; 572 - u64_stats_update_end(&stats->syncp); 571 + u64_stats_update_end(&stats->tx_syncp); 573 572 574 573 tot_sgs += skb_vnet_hdr(skb)->num_sg; 575 574 dev_kfree_skb_any(skb); ··· 704 703 u64 tpackets, tbytes, rpackets, rbytes; 705 704 706 705 do { 707 - start = u64_stats_fetch_begin(&stats->syncp); 706 + start = u64_stats_fetch_begin(&stats->tx_syncp); 708 707 tpackets = stats->tx_packets; 709 708 tbytes = stats->tx_bytes; 709 + } while (u64_stats_fetch_retry(&stats->tx_syncp, start)); 710 + 711 + do { 712 + start = u64_stats_fetch_begin(&stats->rx_syncp); 710 713 rpackets = stats->rx_packets; 711 714 rbytes = stats->rx_bytes; 712 - } while (u64_stats_fetch_retry(&stats->syncp, start)); 715 + } while (u64_stats_fetch_retry(&stats->rx_syncp, start)); 713 716 714 717 tot->rx_packets += rpackets; 715 718 tot->tx_packets += tpackets;
+4
drivers/net/wireless/b43/b43.h
··· 877 877 * from the mac80211 subsystem. */ 878 878 u16 mac80211_initially_registered_queues; 879 879 880 + /* Set this if we call ieee80211_register_hw() and check if we call 881 + * ieee80211_unregister_hw(). */ 882 + bool hw_registred; 883 + 880 884 /* We can only have one operating interface (802.11 core) 881 885 * at a time. General information about this interface follows. 882 886 */
+12 -7
drivers/net/wireless/b43/main.c
··· 2437 2437 err = ieee80211_register_hw(wl->hw); 2438 2438 if (err) 2439 2439 goto err_one_core_detach; 2440 + wl->hw_registred = true; 2440 2441 b43_leds_register(wl->current_dev); 2441 2442 goto out; 2442 2443 ··· 5300 5299 5301 5300 hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1; 5302 5301 wl->mac80211_initially_registered_queues = hw->queues; 5302 + wl->hw_registred = false; 5303 5303 hw->max_rates = 2; 5304 5304 SET_IEEE80211_DEV(hw, dev->dev); 5305 5305 if (is_valid_ether_addr(sprom->et1mac)) ··· 5372 5370 * as the ieee80211 unreg will destroy the workqueue. */ 5373 5371 cancel_work_sync(&wldev->restart_work); 5374 5372 5375 - /* Restore the queues count before unregistering, because firmware detect 5376 - * might have modified it. Restoring is important, so the networking 5377 - * stack can properly free resources. */ 5378 - wl->hw->queues = wl->mac80211_initially_registered_queues; 5379 - b43_leds_stop(wldev); 5380 - ieee80211_unregister_hw(wl->hw); 5373 + B43_WARN_ON(!wl); 5374 + if (wl->current_dev == wldev && wl->hw_registred) { 5375 + /* Restore the queues count before unregistering, because firmware detect 5376 + * might have modified it. Restoring is important, so the networking 5377 + * stack can properly free resources. */ 5378 + wl->hw->queues = wl->mac80211_initially_registered_queues; 5379 + b43_leds_stop(wldev); 5380 + ieee80211_unregister_hw(wl->hw); 5381 + } 5381 5382 5382 5383 b43_one_core_detach(wldev->dev); 5383 5384 ··· 5451 5446 cancel_work_sync(&wldev->restart_work); 5452 5447 5453 5448 B43_WARN_ON(!wl); 5454 - if (wl->current_dev == wldev) { 5449 + if (wl->current_dev == wldev && wl->hw_registred) { 5455 5450 /* Restore the queues count before unregistering, because firmware detect 5456 5451 * might have modified it. Restoring is important, so the networking 5457 5452 * stack can properly free resources. */
+2 -2
drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
··· 89 89 data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1; 90 90 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret); 91 91 92 - /* redirect, configure ane enable io for interrupt signal */ 92 + /* redirect, configure and enable io for interrupt signal */ 93 93 data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE; 94 - if (sdiodev->irq_flags | IRQF_TRIGGER_HIGH) 94 + if (sdiodev->irq_flags & IRQF_TRIGGER_HIGH) 95 95 data |= SDIO_SEPINT_ACT_HI; 96 96 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret); 97 97
+5 -15
drivers/net/wireless/ipw2x00/ipw2100.c
··· 1903 1903 netif_stop_queue(priv->net_dev); 1904 1904 } 1905 1905 1906 - /* Called by register_netdev() */ 1907 - static int ipw2100_net_init(struct net_device *dev) 1908 - { 1909 - struct ipw2100_priv *priv = libipw_priv(dev); 1910 - 1911 - return ipw2100_up(priv, 1); 1912 - } 1913 - 1914 1906 static int ipw2100_wdev_init(struct net_device *dev) 1915 1907 { 1916 1908 struct ipw2100_priv *priv = libipw_priv(dev); ··· 6079 6087 .ndo_stop = ipw2100_close, 6080 6088 .ndo_start_xmit = libipw_xmit, 6081 6089 .ndo_change_mtu = libipw_change_mtu, 6082 - .ndo_init = ipw2100_net_init, 6083 6090 .ndo_tx_timeout = ipw2100_tx_timeout, 6084 6091 .ndo_set_mac_address = ipw2100_set_address, 6085 6092 .ndo_validate_addr = eth_validate_addr, ··· 6320 6329 printk(KERN_INFO DRV_NAME 6321 6330 ": Detected Intel PRO/Wireless 2100 Network Connection\n"); 6322 6331 6332 + err = ipw2100_up(priv, 1); 6333 + if (err) 6334 + goto fail; 6335 + 6323 6336 err = ipw2100_wdev_init(dev); 6324 6337 if (err) 6325 6338 goto fail; ··· 6333 6338 * network device we would call ipw2100_up. This introduced a race 6334 6339 * condition with newer hotplug configurations (network was coming 6335 6340 * up and making calls before the device was initialized). 6336 - * 6337 - * If we called ipw2100_up before we registered the device, then the 6338 - * device name wasn't registered. So, we instead use the net_dev->init 6339 - * member to call a function that then just turns and calls ipw2100_up. 6340 - * net_dev->init is called after name allocation but before the 6341 - * notifier chain is called */ 6341 + */ 6342 6342 err = register_netdev(dev); 6343 6343 if (err) { 6344 6344 printk(KERN_WARNING DRV_NAME
+21 -2
drivers/net/wireless/iwlwifi/iwl-6000.c
··· 35 35 #define IWL6000_UCODE_API_MAX 6 36 36 #define IWL6050_UCODE_API_MAX 5 37 37 #define IWL6000G2_UCODE_API_MAX 6 38 + #define IWL6035_UCODE_API_MAX 6 38 39 39 40 /* Oldest version we won't warn about */ 40 41 #define IWL6000_UCODE_API_OK 4 41 42 #define IWL6000G2_UCODE_API_OK 5 42 43 #define IWL6050_UCODE_API_OK 5 43 44 #define IWL6000G2B_UCODE_API_OK 6 45 + #define IWL6035_UCODE_API_OK 6 44 46 45 47 /* Lowest firmware API version supported */ 46 48 #define IWL6000_UCODE_API_MIN 4 47 49 #define IWL6050_UCODE_API_MIN 4 48 - #define IWL6000G2_UCODE_API_MIN 4 50 + #define IWL6000G2_UCODE_API_MIN 5 51 + #define IWL6035_UCODE_API_MIN 6 49 52 50 53 /* EEPROM versions */ 51 54 #define EEPROM_6000_TX_POWER_VERSION (4) ··· 230 227 IWL_DEVICE_6030, 231 228 }; 232 229 230 + #define IWL_DEVICE_6035 \ 231 + .fw_name_pre = IWL6030_FW_PRE, \ 232 + .ucode_api_max = IWL6035_UCODE_API_MAX, \ 233 + .ucode_api_ok = IWL6035_UCODE_API_OK, \ 234 + .ucode_api_min = IWL6035_UCODE_API_MIN, \ 235 + .device_family = IWL_DEVICE_FAMILY_6030, \ 236 + .max_inst_size = IWL60_RTC_INST_SIZE, \ 237 + .max_data_size = IWL60_RTC_DATA_SIZE, \ 238 + .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \ 239 + .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ 240 + .base_params = &iwl6000_g2_base_params, \ 241 + .bt_params = &iwl6000_bt_params, \ 242 + .need_temp_offset_calib = true, \ 243 + .led_mode = IWL_LED_RF_STATE, \ 244 + .adv_pm = true 245 + 233 246 const struct iwl_cfg iwl6035_2agn_cfg = { 234 247 .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", 235 - IWL_DEVICE_6030, 248 + IWL_DEVICE_6035, 236 249 .ht_params = &iwl6000_ht_params, 237 250 }; 238 251
+1 -1
drivers/net/wireless/iwlwifi/iwl-agn-sta.c
··· 1267 1267 key_flags |= STA_KEY_MULTICAST_MSK; 1268 1268 1269 1269 sta_cmd.key.key_flags = key_flags; 1270 - sta_cmd.key.key_offset = WEP_INVALID_OFFSET; 1270 + sta_cmd.key.key_offset = keyconf->hw_key_idx; 1271 1271 sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK; 1272 1272 sta_cmd.mode = STA_CONTROL_MODIFY_MSK; 1273 1273
+7 -2
drivers/net/wireless/iwlwifi/iwl-drv.c
··· 861 861 862 862 /* We have our copies now, allow OS release its copies */ 863 863 release_firmware(ucode_raw); 864 - complete(&drv->request_firmware_complete); 865 864 866 865 drv->op_mode = iwl_dvm_ops.start(drv->trans, drv->cfg, &drv->fw); 867 866 868 867 if (!drv->op_mode) 869 - goto out_free_fw; 868 + goto out_unbind; 870 869 870 + /* 871 + * Complete the firmware request last so that 872 + * a driver unbind (stop) doesn't run while we 873 + * are doing the start() above. 874 + */ 875 + complete(&drv->request_firmware_complete); 871 876 return; 872 877 873 878 try_again:
+9 -9
drivers/net/wireless/iwlwifi/iwl-eeprom.c
··· 568 568 * iwl_get_max_txpower_avg - get the highest tx power from all chains. 569 569 * find the highest tx power from all chains for the channel 570 570 */ 571 - static s8 iwl_get_max_txpower_avg(const struct iwl_cfg *cfg, 571 + static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv, 572 572 struct iwl_eeprom_enhanced_txpwr *enhanced_txpower, 573 573 int element, s8 *max_txpower_in_half_dbm) 574 574 { 575 575 s8 max_txpower_avg = 0; /* (dBm) */ 576 576 577 577 /* Take the highest tx power from any valid chains */ 578 - if ((cfg->valid_tx_ant & ANT_A) && 578 + if ((priv->hw_params.valid_tx_ant & ANT_A) && 579 579 (enhanced_txpower[element].chain_a_max > max_txpower_avg)) 580 580 max_txpower_avg = enhanced_txpower[element].chain_a_max; 581 - if ((cfg->valid_tx_ant & ANT_B) && 581 + if ((priv->hw_params.valid_tx_ant & ANT_B) && 582 582 (enhanced_txpower[element].chain_b_max > max_txpower_avg)) 583 583 max_txpower_avg = enhanced_txpower[element].chain_b_max; 584 - if ((cfg->valid_tx_ant & ANT_C) && 584 + if ((priv->hw_params.valid_tx_ant & ANT_C) && 585 585 (enhanced_txpower[element].chain_c_max > max_txpower_avg)) 586 586 max_txpower_avg = enhanced_txpower[element].chain_c_max; 587 - if (((cfg->valid_tx_ant == ANT_AB) | 588 - (cfg->valid_tx_ant == ANT_BC) | 589 - (cfg->valid_tx_ant == ANT_AC)) && 587 + if (((priv->hw_params.valid_tx_ant == ANT_AB) | 588 + (priv->hw_params.valid_tx_ant == ANT_BC) | 589 + (priv->hw_params.valid_tx_ant == ANT_AC)) && 590 590 (enhanced_txpower[element].mimo2_max > max_txpower_avg)) 591 591 max_txpower_avg = enhanced_txpower[element].mimo2_max; 592 - if ((cfg->valid_tx_ant == ANT_ABC) && 592 + if ((priv->hw_params.valid_tx_ant == ANT_ABC) && 593 593 (enhanced_txpower[element].mimo3_max > max_txpower_avg)) 594 594 max_txpower_avg = enhanced_txpower[element].mimo3_max; 595 595 ··· 691 691 ((txp->delta_20_in_40 & 0xf0) >> 4), 692 692 (txp->delta_20_in_40 & 0x0f)); 693 693 694 - max_txp_avg = iwl_get_max_txpower_avg(priv->cfg, txp_array, idx, 694 + max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx, 695 695 &max_txp_avg_halfdbm); 696 696 697 697 /*
+3
drivers/net/wireless/iwlwifi/iwl-mac80211.c
··· 199 199 WIPHY_FLAG_DISABLE_BEACON_HINTS | 200 200 WIPHY_FLAG_IBSS_RSN; 201 201 202 + #ifdef CONFIG_PM_SLEEP 202 203 if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len && 203 204 priv->trans->ops->wowlan_suspend && 204 205 device_can_wakeup(priv->trans->dev)) { ··· 218 217 hw->wiphy->wowlan.pattern_max_len = 219 218 IWLAGN_WOWLAN_MAX_PATTERN_LEN; 220 219 } 220 + #endif 221 221 222 222 if (iwlwifi_mod_params.power_save) 223 223 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; ··· 251 249 ret = ieee80211_register_hw(priv->hw); 252 250 if (ret) { 253 251 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); 252 + iwl_leds_exit(priv); 254 253 return ret; 255 254 } 256 255 priv->mac80211_registered = 1;
+1
drivers/net/wireless/iwlwifi/iwl-prph.h
··· 224 224 #define SCD_TXFACT (SCD_BASE + 0x10) 225 225 #define SCD_ACTIVE (SCD_BASE + 0x14) 226 226 #define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8) 227 + #define SCD_CHAINEXT_EN (SCD_BASE + 0x244) 227 228 #define SCD_AGGR_SEL (SCD_BASE + 0x248) 228 229 #define SCD_INTERRUPT_MASK (SCD_BASE + 0x108) 229 230
+5
drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
··· 1058 1058 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, 1059 1059 trans_pcie->scd_bc_tbls.dma >> 10); 1060 1060 1061 + /* The chain extension of the SCD doesn't work well. This feature is 1062 + * enabled by default by the HW, so we need to disable it manually. 1063 + */ 1064 + iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); 1065 + 1061 1066 /* Enable DMA channel */ 1062 1067 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++) 1063 1068 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
+22
drivers/net/wireless/mac80211_hwsim.c
··· 1555 1555 hdr = (struct ieee80211_hdr *) skb->data; 1556 1556 mac80211_hwsim_monitor_ack(data2->hw, hdr->addr2); 1557 1557 } 1558 + txi->flags |= IEEE80211_TX_STAT_ACK; 1558 1559 } 1559 1560 ieee80211_tx_status_irqsafe(data2->hw, skb); 1560 1561 return 0; ··· 1722 1721 "unregister family %i\n", ret); 1723 1722 } 1724 1723 1724 + static const struct ieee80211_iface_limit hwsim_if_limits[] = { 1725 + { .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) }, 1726 + { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) | 1727 + BIT(NL80211_IFTYPE_P2P_CLIENT) | 1728 + #ifdef CONFIG_MAC80211_MESH 1729 + BIT(NL80211_IFTYPE_MESH_POINT) | 1730 + #endif 1731 + BIT(NL80211_IFTYPE_AP) | 1732 + BIT(NL80211_IFTYPE_P2P_GO) }, 1733 + }; 1734 + 1735 + static const struct ieee80211_iface_combination hwsim_if_comb = { 1736 + .limits = hwsim_if_limits, 1737 + .n_limits = ARRAY_SIZE(hwsim_if_limits), 1738 + .max_interfaces = 2048, 1739 + .num_different_channels = 1, 1740 + }; 1741 + 1725 1742 static int __init init_mac80211_hwsim(void) 1726 1743 { 1727 1744 int i, err = 0; ··· 1800 1781 data->addresses[1].addr[0] |= 0x40; 1801 1782 hw->wiphy->n_addresses = 2; 1802 1783 hw->wiphy->addresses = data->addresses; 1784 + 1785 + hw->wiphy->iface_combinations = &hwsim_if_comb; 1786 + hw->wiphy->n_iface_combinations = 1; 1803 1787 1804 1788 if (fake_hw_scan) { 1805 1789 hw->wiphy->max_scan_ssids = 255;
+13
drivers/net/wireless/mwifiex/cfg80211.c
··· 948 948 bss_cfg->ssid.ssid_len = params->ssid_len; 949 949 } 950 950 951 + switch (params->hidden_ssid) { 952 + case NL80211_HIDDEN_SSID_NOT_IN_USE: 953 + bss_cfg->bcast_ssid_ctl = 1; 954 + break; 955 + case NL80211_HIDDEN_SSID_ZERO_LEN: 956 + bss_cfg->bcast_ssid_ctl = 0; 957 + break; 958 + case NL80211_HIDDEN_SSID_ZERO_CONTENTS: 959 + /* firmware doesn't support this type of hidden SSID */ 960 + default: 961 + return -EINVAL; 962 + } 963 + 951 964 if (mwifiex_set_secure_params(priv, bss_cfg, params)) { 952 965 kfree(bss_cfg); 953 966 wiphy_err(wiphy, "Failed to parse secuirty parameters!\n");
+6
drivers/net/wireless/mwifiex/fw.h
··· 122 122 #define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42) 123 123 #define TLV_TYPE_UAP_BEACON_PERIOD (PROPRIETARY_TLV_BASE_ID + 44) 124 124 #define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45) 125 + #define TLV_TYPE_UAP_BCAST_SSID (PROPRIETARY_TLV_BASE_ID + 48) 125 126 #define TLV_TYPE_UAP_RTS_THRESHOLD (PROPRIETARY_TLV_BASE_ID + 51) 126 127 #define TLV_TYPE_UAP_WPA_PASSPHRASE (PROPRIETARY_TLV_BASE_ID + 60) 127 128 #define TLV_TYPE_UAP_ENCRY_PROTOCOL (PROPRIETARY_TLV_BASE_ID + 64) ··· 1208 1207 struct host_cmd_tlv_ssid { 1209 1208 struct host_cmd_tlv tlv; 1210 1209 u8 ssid[0]; 1210 + } __packed; 1211 + 1212 + struct host_cmd_tlv_bcast_ssid { 1213 + struct host_cmd_tlv tlv; 1214 + u8 bcast_ctl; 1211 1215 } __packed; 1212 1216 1213 1217 struct host_cmd_tlv_beacon_period {
+10
drivers/net/wireless/mwifiex/uap_cmd.c
··· 132 132 struct host_cmd_tlv_dtim_period *dtim_period; 133 133 struct host_cmd_tlv_beacon_period *beacon_period; 134 134 struct host_cmd_tlv_ssid *ssid; 135 + struct host_cmd_tlv_bcast_ssid *bcast_ssid; 135 136 struct host_cmd_tlv_channel_band *chan_band; 136 137 struct host_cmd_tlv_frag_threshold *frag_threshold; 137 138 struct host_cmd_tlv_rts_threshold *rts_threshold; ··· 154 153 cmd_size += sizeof(struct host_cmd_tlv) + 155 154 bss_cfg->ssid.ssid_len; 156 155 tlv += sizeof(struct host_cmd_tlv) + bss_cfg->ssid.ssid_len; 156 + 157 + bcast_ssid = (struct host_cmd_tlv_bcast_ssid *)tlv; 158 + bcast_ssid->tlv.type = cpu_to_le16(TLV_TYPE_UAP_BCAST_SSID); 159 + bcast_ssid->tlv.len = 160 + cpu_to_le16(sizeof(bcast_ssid->bcast_ctl)); 161 + bcast_ssid->bcast_ctl = bss_cfg->bcast_ssid_ctl; 162 + cmd_size += sizeof(struct host_cmd_tlv_bcast_ssid); 163 + tlv += sizeof(struct host_cmd_tlv_bcast_ssid); 157 164 } 158 165 if (bss_cfg->channel && bss_cfg->channel <= MAX_CHANNEL_BAND_BG) { 159 166 chan_band = (struct host_cmd_tlv_channel_band *)tlv; ··· 425 416 if (!bss_cfg) 426 417 return -ENOMEM; 427 418 419 + mwifiex_set_sys_config_invalid_data(bss_cfg); 428 420 bss_cfg->band_cfg = BAND_CONFIG_MANUAL; 429 421 bss_cfg->channel = channel; 430 422
+1 -2
drivers/net/wireless/rt2x00/rt2x00.h
··· 396 396 * for hardware which doesn't support hardware 397 397 * sequence counting. 398 398 */ 399 - spinlock_t seqlock; 400 - u16 seqno; 399 + atomic_t seqno; 401 400 }; 402 401 403 402 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
-1
drivers/net/wireless/rt2x00/rt2x00mac.c
··· 277 277 else 278 278 rt2x00dev->intf_sta_count++; 279 279 280 - spin_lock_init(&intf->seqlock); 281 280 mutex_init(&intf->beacon_skb_mutex); 282 281 intf->beacon = entry; 283 282
+6 -7
drivers/net/wireless/rt2x00/rt2x00queue.c
··· 207 207 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 208 208 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 209 209 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif); 210 + u16 seqno; 210 211 211 212 if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) 212 213 return; ··· 239 238 * sequence counting per-frame, since those will override the 240 239 * sequence counter given by mac80211. 241 240 */ 242 - spin_lock(&intf->seqlock); 243 - 244 241 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) 245 - intf->seqno += 0x10; 242 + seqno = atomic_add_return(0x10, &intf->seqno); 243 + else 244 + seqno = atomic_read(&intf->seqno); 245 + 246 246 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 247 - hdr->seq_ctrl |= cpu_to_le16(intf->seqno); 248 - 249 - spin_unlock(&intf->seqlock); 250 - 247 + hdr->seq_ctrl |= cpu_to_le16(seqno); 251 248 } 252 249 253 250 static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
+1 -1
drivers/net/wireless/rtl818x/rtl8187/leds.c
··· 117 117 radio_on = true; 118 118 } else if (radio_on) { 119 119 radio_on = false; 120 - cancel_delayed_work_sync(&priv->led_on); 120 + cancel_delayed_work(&priv->led_on); 121 121 ieee80211_queue_delayed_work(hw, &priv->led_off, 0); 122 122 } 123 123 } else if (radio_on) {
+5
drivers/pci/pci.c
··· 1744 1744 if (target_state == PCI_POWER_ERROR) 1745 1745 return -EIO; 1746 1746 1747 + /* Some devices mustn't be in D3 during system sleep */ 1748 + if (target_state == PCI_D3hot && 1749 + (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP)) 1750 + return 0; 1751 + 1747 1752 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev)); 1748 1753 1749 1754 error = pci_set_power_state(dev, target_state);
+26
drivers/pci/quirks.c
··· 2929 2929 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq); 2930 2930 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq); 2931 2931 2932 + /* 2933 + * The Intel 6 Series/C200 Series chipset's EHCI controllers on many 2934 + * ASUS motherboards will cause memory corruption or a system crash 2935 + * if they are in D3 while the system is put into S3 sleep. 2936 + */ 2937 + static void __devinit asus_ehci_no_d3(struct pci_dev *dev) 2938 + { 2939 + const char *sys_info; 2940 + static const char good_Asus_board[] = "P8Z68-V"; 2941 + 2942 + if (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP) 2943 + return; 2944 + if (dev->subsystem_vendor != PCI_VENDOR_ID_ASUSTEK) 2945 + return; 2946 + sys_info = dmi_get_system_info(DMI_BOARD_NAME); 2947 + if (sys_info && memcmp(sys_info, good_Asus_board, 2948 + sizeof(good_Asus_board) - 1) == 0) 2949 + return; 2950 + 2951 + dev_info(&dev->dev, "broken D3 during system sleep on ASUS\n"); 2952 + dev->dev_flags |= PCI_DEV_FLAGS_NO_D3_DURING_SLEEP; 2953 + device_set_wakeup_capable(&dev->dev, false); 2954 + } 2955 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c26, asus_ehci_no_d3); 2956 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c2d, asus_ehci_no_d3); 2957 + 2932 2958 static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, 2933 2959 struct pci_fixup *end) 2934 2960 {
+1 -1
drivers/pinctrl/core.c
··· 61 61 list_for_each_entry(_maps_node_, &pinctrl_maps, node) \ 62 62 for (_i_ = 0, _map_ = &_maps_node_->maps[_i_]; \ 63 63 _i_ < _maps_node_->num_maps; \ 64 - i++, _map_ = &_maps_node_->maps[_i_]) 64 + _i_++, _map_ = &_maps_node_->maps[_i_]) 65 65 66 66 /** 67 67 * pinctrl_provide_dummies() - indicate if pinctrl provides dummy state support
+16 -18
drivers/pinctrl/pinctrl-imx.c
··· 27 27 #include "core.h" 28 28 #include "pinctrl-imx.h" 29 29 30 - #define IMX_PMX_DUMP(info, p, m, c, n) \ 31 - { \ 32 - int i, j; \ 33 - printk("Format: Pin Mux Config\n"); \ 34 - for (i = 0; i < n; i++) { \ 35 - j = p[i]; \ 36 - printk("%s %d 0x%lx\n", \ 37 - info->pins[j].name, \ 38 - m[i], c[i]); \ 39 - } \ 30 + #define IMX_PMX_DUMP(info, p, m, c, n) \ 31 + { \ 32 + int i, j; \ 33 + printk(KERN_DEBUG "Format: Pin Mux Config\n"); \ 34 + for (i = 0; i < n; i++) { \ 35 + j = p[i]; \ 36 + printk(KERN_DEBUG "%s %d 0x%lx\n", \ 37 + info->pins[j].name, \ 38 + m[i], c[i]); \ 39 + } \ 40 40 } 41 41 42 42 /* The bits in CONFIG cell defined in binding doc*/ ··· 173 173 174 174 /* create mux map */ 175 175 parent = of_get_parent(np); 176 - if (!parent) 176 + if (!parent) { 177 + kfree(new_map); 177 178 return -EINVAL; 179 + } 178 180 new_map[0].type = PIN_MAP_TYPE_MUX_GROUP; 179 181 new_map[0].data.mux.function = parent->name; 180 182 new_map[0].data.mux.group = np->name; ··· 195 193 } 196 194 197 195 dev_dbg(pctldev->dev, "maps: function %s group %s num %d\n", 198 - new_map->data.mux.function, new_map->data.mux.group, map_num); 196 + (*map)->data.mux.function, (*map)->data.mux.group, map_num); 199 197 200 198 return 0; 201 199 } ··· 203 201 static void imx_dt_free_map(struct pinctrl_dev *pctldev, 204 202 struct pinctrl_map *map, unsigned num_maps) 205 203 { 206 - int i; 207 - 208 - for (i = 0; i < num_maps; i++) 209 - kfree(map); 204 + kfree(map); 210 205 } 211 206 212 207 static struct pinctrl_ops imx_pctrl_ops = { ··· 474 475 grp->configs[j] = config & ~IMX_PAD_SION; 475 476 } 476 477 477 - #ifdef DEBUG 478 478 IMX_PMX_DUMP(info, grp->pins, grp->mux_mode, grp->configs, grp->npins); 479 - #endif 479 + 480 480 return 0; 481 481 } 482 482
+10 -3
drivers/pinctrl/pinctrl-mxs.c
··· 107 107 108 108 /* Compose group name */ 109 109 group = kzalloc(length, GFP_KERNEL); 110 - if (!group) 111 - return -ENOMEM; 110 + if (!group) { 111 + ret = -ENOMEM; 112 + goto free; 113 + } 112 114 snprintf(group, length, "%s.%d", np->name, reg); 113 115 new_map[i].data.mux.group = group; 114 116 i++; ··· 120 118 pconfig = kmemdup(&config, sizeof(config), GFP_KERNEL); 121 119 if (!pconfig) { 122 120 ret = -ENOMEM; 123 - goto free; 121 + goto free_group; 124 122 } 125 123 126 124 new_map[i].type = PIN_MAP_TYPE_CONFIGS_GROUP; ··· 135 133 136 134 return 0; 137 135 136 + free_group: 137 + if (!purecfg) 138 + free(group); 138 139 free: 139 140 kfree(new_map); 140 141 return ret; ··· 516 511 return 0; 517 512 518 513 err: 514 + platform_set_drvdata(pdev, NULL); 519 515 iounmap(d->base); 520 516 return ret; 521 517 } ··· 526 520 { 527 521 struct mxs_pinctrl_data *d = platform_get_drvdata(pdev); 528 522 523 + platform_set_drvdata(pdev, NULL); 529 524 pinctrl_unregister(d->pctl); 530 525 iounmap(d->base); 531 526
+2 -1
drivers/pinctrl/pinctrl-nomadik.c
··· 673 673 * wakeup is anyhow controlled by the RIMSC and FIMSC registers. 674 674 */ 675 675 if (nmk_chip->sleepmode && on) { 676 - __nmk_gpio_set_slpm(nmk_chip, gpio % nmk_chip->chip.base, 676 + __nmk_gpio_set_slpm(nmk_chip, gpio % NMK_GPIO_PER_CHIP, 677 677 NMK_GPIO_SLPM_WAKEUP_ENABLE); 678 678 } 679 679 ··· 1246 1246 ret = PTR_ERR(clk); 1247 1247 goto out_unmap; 1248 1248 } 1249 + clk_prepare(clk); 1249 1250 1250 1251 nmk_chip = kzalloc(sizeof(*nmk_chip), GFP_KERNEL); 1251 1252 if (!nmk_chip) {
+1 -1
drivers/pinctrl/pinctrl-sirf.c
··· 1184 1184 return ret; 1185 1185 } 1186 1186 1187 - static const struct of_device_id pinmux_ids[] = { 1187 + static const struct of_device_id pinmux_ids[] __devinitconst = { 1188 1188 { .compatible = "sirf,prima2-gpio-pinmux" }, 1189 1189 {} 1190 1190 };
+1 -1
drivers/platform/x86/acerhdf.c
··· 5 5 * 6 6 * (C) 2009 - Peter Feuerer peter (a) piie.net 7 7 * http://piie.net 8 - * 2009 Borislav Petkov <petkovbb@gmail.com> 8 + * 2009 Borislav Petkov bp (a) alien8.de 9 9 * 10 10 * Inspired by and many thanks to: 11 11 * o acerfand - Rachel Greenham
+1 -1
drivers/regulator/anatop-regulator.c
··· 224 224 .of_match_table = of_anatop_regulator_match_tbl, 225 225 }, 226 226 .probe = anatop_regulator_probe, 227 - .remove = anatop_regulator_remove, 227 + .remove = __devexit_p(anatop_regulator_remove), 228 228 }; 229 229 230 230 static int __init anatop_regulator_init(void)
+3
drivers/regulator/core.c
··· 2050 2050 return -EINVAL; 2051 2051 } 2052 2052 2053 + if (min_uV < rdev->desc->min_uV) 2054 + min_uV = rdev->desc->min_uV; 2055 + 2053 2056 ret = DIV_ROUND_UP(min_uV - rdev->desc->min_uV, rdev->desc->uV_step); 2054 2057 if (ret < 0) 2055 2058 return ret;
+10 -6
drivers/regulator/gpio-regulator.c
··· 101 101 } 102 102 103 103 static int gpio_regulator_set_value(struct regulator_dev *dev, 104 - int min, int max) 104 + int min, int max, unsigned *selector) 105 105 { 106 106 struct gpio_regulator_data *data = rdev_get_drvdata(dev); 107 - int ptr, target, state, best_val = INT_MAX; 107 + int ptr, target = 0, state, best_val = INT_MAX; 108 108 109 109 for (ptr = 0; ptr < data->nr_states; ptr++) 110 110 if (data->states[ptr].value < best_val && 111 111 data->states[ptr].value >= min && 112 - data->states[ptr].value <= max) 112 + data->states[ptr].value <= max) { 113 113 target = data->states[ptr].gpios; 114 + best_val = data->states[ptr].value; 115 + if (selector) 116 + *selector = ptr; 117 + } 114 118 115 119 if (best_val == INT_MAX) 116 120 return -EINVAL; ··· 132 128 int min_uV, int max_uV, 133 129 unsigned *selector) 134 130 { 135 - return gpio_regulator_set_value(dev, min_uV, max_uV); 131 + return gpio_regulator_set_value(dev, min_uV, max_uV, selector); 136 132 } 137 133 138 134 static int gpio_regulator_list_voltage(struct regulator_dev *dev, ··· 149 145 static int gpio_regulator_set_current_limit(struct regulator_dev *dev, 150 146 int min_uA, int max_uA) 151 147 { 152 - return gpio_regulator_set_value(dev, min_uA, max_uA); 148 + return gpio_regulator_set_value(dev, min_uA, max_uA, NULL); 153 149 } 154 150 155 151 static struct regulator_ops gpio_regulator_voltage_ops = { ··· 290 286 291 287 cfg.dev = &pdev->dev; 292 288 cfg.init_data = config->init_data; 293 - cfg.driver_data = &drvdata; 289 + cfg.driver_data = drvdata; 294 290 295 291 drvdata->dev = regulator_register(&drvdata->desc, &cfg); 296 292 if (IS_ERR(drvdata->dev)) {
+1
drivers/regulator/max8649.c
··· 259 259 config.dev = &client->dev; 260 260 config.init_data = pdata->regulator; 261 261 config.driver_data = info; 262 + config.regmap = info->regmap; 262 263 263 264 info->regulator = regulator_register(&dcdc_desc, &config); 264 265 if (IS_ERR(info->regulator)) {
-7
drivers/regulator/palmas-regulator.c
··· 775 775 err_unregister_regulator: 776 776 while (--id >= 0) 777 777 regulator_unregister(pmic->rdev[id]); 778 - kfree(pmic->rdev); 779 - kfree(pmic->desc); 780 - kfree(pmic); 781 778 return ret; 782 779 } 783 780 ··· 785 788 786 789 for (id = 0; id < PALMAS_NUM_REGS; id++) 787 790 regulator_unregister(pmic->rdev[id]); 788 - 789 - kfree(pmic->rdev); 790 - kfree(pmic->desc); 791 - kfree(pmic); 792 791 return 0; 793 792 } 794 793
+1 -1
drivers/scsi/mpt2sas/mpt2sas_base.c
··· 1792 1792 static inline u8 1793 1793 _base_get_msix_index(struct MPT2SAS_ADAPTER *ioc) 1794 1794 { 1795 - return ioc->cpu_msix_table[smp_processor_id()]; 1795 + return ioc->cpu_msix_table[raw_smp_processor_id()]; 1796 1796 } 1797 1797 1798 1798 /**
+5 -6
drivers/scsi/qla2xxx/qla_target.c
··· 26 26 #include <linux/module.h> 27 27 #include <linux/init.h> 28 28 #include <linux/types.h> 29 - #include <linux/version.h> 30 29 #include <linux/blkdev.h> 31 30 #include <linux/interrupt.h> 32 31 #include <linux/pci.h> ··· 2476 2477 } 2477 2478 2478 2479 cmd = qlt_ctio_to_cmd(vha, handle, ctio); 2479 - if (cmd == NULL) { 2480 - if (status != CTIO_SUCCESS) 2481 - qlt_term_ctio_exchange(vha, ctio, NULL, status); 2480 + if (cmd == NULL) 2482 2481 return; 2483 - } 2482 + 2484 2483 se_cmd = &cmd->se_cmd; 2485 2484 tfo = se_cmd->se_tfo; 2486 2485 ··· 2724 2727 out_term: 2725 2728 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd); 2726 2729 /* 2727 - * cmd has not sent to target yet, so pass NULL as the second argument 2730 + * cmd has not sent to target yet, so pass NULL as the second 2731 + * argument to qlt_send_term_exchange() and free the memory here. 2728 2732 */ 2729 2733 spin_lock_irqsave(&ha->hardware_lock, flags); 2730 2734 qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); 2735 + kmem_cache_free(qla_tgt_cmd_cachep, cmd); 2731 2736 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2732 2737 if (sess) 2733 2738 ha->tgt.tgt_ops->put_sess(sess);
-1
drivers/scsi/qla2xxx/qla_target.h
··· 919 919 #define QLA_TGT_XMIT_STATUS 2 920 920 #define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA) 921 921 922 - #include <linux/version.h> 923 922 924 923 extern struct qla_tgt_data qla_target; 925 924 /*
+58 -94
drivers/scsi/qla2xxx/tcm_qla2xxx.c
··· 137 137 */ 138 138 static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm) 139 139 { 140 - unsigned int i, j, value; 140 + unsigned int i, j; 141 141 u8 wwn[8]; 142 142 143 143 memset(wwn, 0, sizeof(wwn)); 144 144 145 145 /* Validate and store the new name */ 146 146 for (i = 0, j = 0; i < 16; i++) { 147 + int value; 148 + 147 149 value = hex_to_bin(*ns++); 148 150 if (value >= 0) 149 151 j = (j << 4) | value; ··· 654 652 /* 655 653 * Called from qla_target.c:qlt_issue_task_mgmt() 656 654 */ 657 - int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun, 658 - uint8_t tmr_func, uint32_t tag) 655 + static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun, 656 + uint8_t tmr_func, uint32_t tag) 659 657 { 660 658 struct qla_tgt_sess *sess = mcmd->sess; 661 659 struct se_cmd *se_cmd = &mcmd->se_cmd; ··· 764 762 struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs; 765 763 struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs; 766 764 767 - static int tcm_qla2xxx_setup_nacl_from_rport( 768 - struct se_portal_group *se_tpg, 769 - struct se_node_acl *se_nacl, 770 - struct tcm_qla2xxx_lport *lport, 771 - struct tcm_qla2xxx_nacl *nacl, 772 - u64 rport_wwnn) 773 - { 774 - struct scsi_qla_host *vha = lport->qla_vha; 775 - struct Scsi_Host *sh = vha->host; 776 - struct fc_host_attrs *fc_host = shost_to_fc_host(sh); 777 - struct fc_rport *rport; 778 - unsigned long flags; 779 - void *node; 780 - int rc; 781 - 782 - /* 783 - * Scan the existing rports, and create a session for the 784 - * explict NodeACL is an matching rport->node_name already 785 - * exists. 786 - */ 787 - spin_lock_irqsave(sh->host_lock, flags); 788 - list_for_each_entry(rport, &fc_host->rports, peers) { 789 - if (rport_wwnn != rport->node_name) 790 - continue; 791 - 792 - pr_debug("Located existing rport_wwpn and rport->node_name: 0x%016LX, port_id: 0x%04x\n", 793 - rport->node_name, rport->port_id); 794 - nacl->nport_id = rport->port_id; 795 - 796 - spin_unlock_irqrestore(sh->host_lock, flags); 797 - 798 - spin_lock_irqsave(&vha->hw->hardware_lock, flags); 799 - node = btree_lookup32(&lport->lport_fcport_map, rport->port_id); 800 - if (node) { 801 - rc = btree_update32(&lport->lport_fcport_map, 802 - rport->port_id, se_nacl); 803 - } else { 804 - rc = btree_insert32(&lport->lport_fcport_map, 805 - rport->port_id, se_nacl, 806 - GFP_ATOMIC); 807 - } 808 - spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 809 - 810 - if (rc) { 811 - pr_err("Unable to insert se_nacl into fcport_map"); 812 - WARN_ON(rc > 0); 813 - return rc; 814 - } 815 - 816 - pr_debug("Inserted into fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%08x\n", 817 - se_nacl, rport_wwnn, nacl->nport_id); 818 - 819 - return 1; 820 - } 821 - spin_unlock_irqrestore(sh->host_lock, flags); 822 - 823 - return 0; 824 - } 825 - 765 + static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, 766 + struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *); 826 767 /* 827 768 * Expected to be called with struct qla_hw_data->hardware_lock held 828 769 */ ··· 787 842 788 843 pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n", 789 844 se_nacl, nacl->nport_wwnn, nacl->nport_id); 845 + /* 846 + * Now clear the se_nacl and session pointers from our HW lport lookup 847 + * table mapping for this initiator's fabric S_ID and LOOP_ID entries. 848 + * 849 + * This is done ahead of callbacks into tcm_qla2xxx_free_session() -> 850 + * target_wait_for_sess_cmds() before the session waits for outstanding 851 + * I/O to complete, to avoid a race between session shutdown execution 852 + * and incoming ATIOs or TMRs picking up a stale se_node_act reference. 853 + */ 854 + tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess); 855 + } 856 + 857 + static void tcm_qla2xxx_release_session(struct kref *kref) 858 + { 859 + struct se_session *se_sess = container_of(kref, 860 + struct se_session, sess_kref); 861 + 862 + qlt_unreg_sess(se_sess->fabric_sess_ptr); 863 + } 864 + 865 + static void tcm_qla2xxx_put_session(struct se_session *se_sess) 866 + { 867 + struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr; 868 + struct qla_hw_data *ha = sess->vha->hw; 869 + unsigned long flags; 870 + 871 + spin_lock_irqsave(&ha->hardware_lock, flags); 872 + kref_put(&se_sess->sess_kref, tcm_qla2xxx_release_session); 873 + spin_unlock_irqrestore(&ha->hardware_lock, flags); 790 874 } 791 875 792 876 static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess) 793 877 { 794 - target_put_session(sess->se_sess); 878 + tcm_qla2xxx_put_session(sess->se_sess); 795 879 } 796 880 797 881 static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess) ··· 833 859 struct config_group *group, 834 860 const char *name) 835 861 { 836 - struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; 837 - struct tcm_qla2xxx_lport *lport = container_of(se_wwn, 838 - struct tcm_qla2xxx_lport, lport_wwn); 839 862 struct se_node_acl *se_nacl, *se_nacl_new; 840 863 struct tcm_qla2xxx_nacl *nacl; 841 864 u64 wwnn; 842 865 u32 qla2xxx_nexus_depth; 843 - int rc; 844 866 845 867 if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0) 846 868 return ERR_PTR(-EINVAL); ··· 863 893 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); 864 894 nacl->nport_wwnn = wwnn; 865 895 tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn); 866 - /* 867 - * Setup a se_nacl handle based on an a matching struct fc_rport setup 868 - * via drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port() 869 - */ 870 - rc = tcm_qla2xxx_setup_nacl_from_rport(se_tpg, se_nacl, lport, 871 - nacl, wwnn); 872 - if (rc < 0) { 873 - tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new); 874 - return ERR_PTR(rc); 875 - } 876 896 877 897 return se_nacl; 878 898 } ··· 1350 1390 nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname); 1351 1391 } 1352 1392 1393 + /* 1394 + * Should always be called with qla_hw_data->hardware_lock held. 1395 + */ 1396 + static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport, 1397 + struct tcm_qla2xxx_nacl *nacl, struct qla_tgt_sess *sess) 1398 + { 1399 + struct se_session *se_sess = sess->se_sess; 1400 + unsigned char be_sid[3]; 1401 + 1402 + be_sid[0] = sess->s_id.b.domain; 1403 + be_sid[1] = sess->s_id.b.area; 1404 + be_sid[2] = sess->s_id.b.al_pa; 1405 + 1406 + tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess, 1407 + sess, be_sid); 1408 + tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess, 1409 + sess, sess->loop_id); 1410 + } 1411 + 1353 1412 static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) 1354 1413 { 1355 1414 struct qla_tgt *tgt = sess->tgt; ··· 1377 1398 struct se_node_acl *se_nacl; 1378 1399 struct tcm_qla2xxx_lport *lport; 1379 1400 struct tcm_qla2xxx_nacl *nacl; 1380 - unsigned char be_sid[3]; 1381 - unsigned long flags; 1382 1401 1383 1402 BUG_ON(in_interrupt()); 1384 1403 ··· 1396 1419 return; 1397 1420 } 1398 1421 target_wait_for_sess_cmds(se_sess, 0); 1399 - /* 1400 - * And now clear the se_nacl and session pointers from our HW lport 1401 - * mappings for fabric S_ID and LOOP_ID. 1402 - */ 1403 - memset(&be_sid, 0, 3); 1404 - be_sid[0] = sess->s_id.b.domain; 1405 - be_sid[1] = sess->s_id.b.area; 1406 - be_sid[2] = sess->s_id.b.al_pa; 1407 - 1408 - spin_lock_irqsave(&ha->hardware_lock, flags); 1409 - tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess, 1410 - sess, be_sid); 1411 - tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess, 1412 - sess, sess->loop_id); 1413 - spin_unlock_irqrestore(&ha->hardware_lock, flags); 1414 1422 1415 1423 transport_deregister_session_configfs(sess->se_sess); 1416 1424 transport_deregister_session(sess->se_sess); ··· 1693 1731 .new_cmd_map = NULL, 1694 1732 .check_stop_free = tcm_qla2xxx_check_stop_free, 1695 1733 .release_cmd = tcm_qla2xxx_release_cmd, 1734 + .put_session = tcm_qla2xxx_put_session, 1696 1735 .shutdown_session = tcm_qla2xxx_shutdown_session, 1697 1736 .close_session = tcm_qla2xxx_close_session, 1698 1737 .sess_get_index = tcm_qla2xxx_sess_get_index, ··· 1742 1779 .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, 1743 1780 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, 1744 1781 .release_cmd = tcm_qla2xxx_release_cmd, 1782 + .put_session = tcm_qla2xxx_put_session, 1745 1783 .shutdown_session = tcm_qla2xxx_shutdown_session, 1746 1784 .close_session = tcm_qla2xxx_close_session, 1747 1785 .sess_get_index = tcm_qla2xxx_sess_get_index,
+1 -3
drivers/scsi/scsi.c
··· 90 90 EXPORT_SYMBOL(scsi_logging_level); 91 91 #endif 92 92 93 - #if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_BLK_DEV_SD) 94 - /* sd and scsi_pm need to coordinate flushing async actions */ 93 + /* sd, scsi core and power management need to coordinate flushing async actions */ 95 94 LIST_HEAD(scsi_sd_probe_domain); 96 95 EXPORT_SYMBOL(scsi_sd_probe_domain); 97 - #endif 98 96 99 97 /* NB: These are exposed through /proc/scsi/scsi and form part of the ABI. 100 98 * You may not alter any existing entry (although adding new ones is
+4 -4
drivers/target/sbp/sbp_target.c
··· 587 587 { 588 588 struct sbp_tport *tport = agent->tport; 589 589 struct sbp_tpg *tpg = tport->tpg; 590 - int login_id; 590 + int id; 591 591 struct sbp_login_descriptor *login; 592 592 593 - login_id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)); 593 + id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)); 594 594 595 - login = sbp_login_find_by_id(tpg, login_id); 595 + login = sbp_login_find_by_id(tpg, id); 596 596 if (!login) { 597 - pr_warn("cannot find login: %d\n", login_id); 597 + pr_warn("cannot find login: %d\n", id); 598 598 599 599 req->status.status = cpu_to_be32( 600 600 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+3 -2
drivers/target/target_core_alua.c
··· 374 374 375 375 out: 376 376 transport_kunmap_data_sg(cmd); 377 - target_complete_cmd(cmd, GOOD); 378 - return 0; 377 + if (!rc) 378 + target_complete_cmd(cmd, GOOD); 379 + return rc; 379 380 } 380 381 381 382 static inline int core_alua_state_nonoptimized(
+17 -53
drivers/target/target_core_file.c
··· 133 133 ret = PTR_ERR(dev_p); 134 134 goto fail; 135 135 } 136 - 137 - /* O_DIRECT too? */ 138 - flags = O_RDWR | O_CREAT | O_LARGEFILE; 139 - 140 136 /* 141 - * If fd_buffered_io=1 has not been set explicitly (the default), 142 - * use O_SYNC to force FILEIO writes to disk. 137 + * Use O_DSYNC by default instead of O_SYNC to forgo syncing 138 + * of pure timestamp updates. 143 139 */ 144 - if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO)) 145 - flags |= O_SYNC; 140 + flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC; 146 141 147 142 file = filp_open(dev_p, flags, 0600); 148 143 if (IS_ERR(file)) { ··· 375 380 } 376 381 } 377 382 378 - static void fd_emulate_write_fua(struct se_cmd *cmd) 379 - { 380 - struct se_device *dev = cmd->se_dev; 381 - struct fd_dev *fd_dev = dev->dev_ptr; 382 - loff_t start = cmd->t_task_lba * 383 - dev->se_sub_dev->se_dev_attrib.block_size; 384 - loff_t end = start + cmd->data_length; 385 - int ret; 386 - 387 - pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n", 388 - cmd->t_task_lba, cmd->data_length); 389 - 390 - ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); 391 - if (ret != 0) 392 - pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); 393 - } 394 - 395 383 static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, 396 384 u32 sgl_nents, enum dma_data_direction data_direction) 397 385 { ··· 389 411 ret = fd_do_readv(cmd, sgl, sgl_nents); 390 412 } else { 391 413 ret = fd_do_writev(cmd, sgl, sgl_nents); 392 - 414 + /* 415 + * Perform implict vfs_fsync_range() for fd_do_writev() ops 416 + * for SCSI WRITEs with Forced Unit Access (FUA) set. 417 + * Allow this to happen independent of WCE=0 setting. 418 + */ 393 419 if (ret > 0 && 394 - dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 && 395 420 dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && 396 421 (cmd->se_cmd_flags & SCF_FUA)) { 397 - /* 398 - * We might need to be a bit smarter here 399 - * and return some sense data to let the initiator 400 - * know the FUA WRITE cache sync failed..? 401 - */ 402 - fd_emulate_write_fua(cmd); 403 - } 422 + struct fd_dev *fd_dev = dev->dev_ptr; 423 + loff_t start = cmd->t_task_lba * 424 + dev->se_sub_dev->se_dev_attrib.block_size; 425 + loff_t end = start + cmd->data_length; 404 426 427 + vfs_fsync_range(fd_dev->fd_file, start, end, 1); 428 + } 405 429 } 406 430 407 431 if (ret < 0) { ··· 422 442 static match_table_t tokens = { 423 443 {Opt_fd_dev_name, "fd_dev_name=%s"}, 424 444 {Opt_fd_dev_size, "fd_dev_size=%s"}, 425 - {Opt_fd_buffered_io, "fd_buffered_io=%d"}, 426 445 {Opt_err, NULL} 427 446 }; 428 447 ··· 433 454 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; 434 455 char *orig, *ptr, *arg_p, *opts; 435 456 substring_t args[MAX_OPT_ARGS]; 436 - int ret = 0, arg, token; 457 + int ret = 0, token; 437 458 438 459 opts = kstrdup(page, GFP_KERNEL); 439 460 if (!opts) ··· 477 498 " bytes\n", fd_dev->fd_dev_size); 478 499 fd_dev->fbd_flags |= FBDF_HAS_SIZE; 479 500 break; 480 - case Opt_fd_buffered_io: 481 - match_int(args, &arg); 482 - if (arg != 1) { 483 - pr_err("bogus fd_buffered_io=%d value\n", arg); 484 - ret = -EINVAL; 485 - goto out; 486 - } 487 - 488 - pr_debug("FILEIO: Using buffered I/O" 489 - " operations for struct fd_dev\n"); 490 - 491 - fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO; 492 - break; 493 501 default: 494 502 break; 495 503 } ··· 508 542 ssize_t bl = 0; 509 543 510 544 bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id); 511 - bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n", 512 - fd_dev->fd_dev_name, fd_dev->fd_dev_size, 513 - (fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ? 514 - "Buffered" : "Synchronous"); 545 + bl += sprintf(b + bl, " File: %s Size: %llu Mode: O_DSYNC\n", 546 + fd_dev->fd_dev_name, fd_dev->fd_dev_size); 515 547 return bl; 516 548 } 517 549
-1
drivers/target/target_core_file.h
··· 14 14 15 15 #define FBDF_HAS_PATH 0x01 16 16 #define FBDF_HAS_SIZE 0x02 17 - #define FDBD_USE_BUFFERED_IO 0x04 18 17 19 18 struct fd_dev { 20 19 u32 fbd_flags;
+7 -1
drivers/target/target_core_transport.c
··· 315 315 } 316 316 EXPORT_SYMBOL(transport_register_session); 317 317 318 - static void target_release_session(struct kref *kref) 318 + void target_release_session(struct kref *kref) 319 319 { 320 320 struct se_session *se_sess = container_of(kref, 321 321 struct se_session, sess_kref); ··· 332 332 333 333 void target_put_session(struct se_session *se_sess) 334 334 { 335 + struct se_portal_group *tpg = se_sess->se_tpg; 336 + 337 + if (tpg->se_tpg_tfo->put_session != NULL) { 338 + tpg->se_tpg_tfo->put_session(se_sess); 339 + return; 340 + } 335 341 kref_put(&se_sess->sess_kref, target_release_session); 336 342 } 337 343 EXPORT_SYMBOL(target_put_session);
+17 -14
drivers/tty/hvc/hvc_xen.c
··· 214 214 /* already configured */ 215 215 if (info->intf != NULL) 216 216 return 0; 217 - 217 + /* 218 + * If the toolstack (or the hypervisor) hasn't set these values, the 219 + * default value is 0. Even though mfn = 0 and evtchn = 0 are 220 + * theoretically correct values, in practice they never are and they 221 + * mean that a legacy toolstack hasn't initialized the pv console correctly. 222 + */ 218 223 r = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v); 219 - if (r < 0) { 220 - kfree(info); 221 - return -ENODEV; 222 - } 224 + if (r < 0 || v == 0) 225 + goto err; 223 226 info->evtchn = v; 224 - hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v); 225 - if (r < 0) { 226 - kfree(info); 227 - return -ENODEV; 228 - } 227 + v = 0; 228 + r = hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v); 229 + if (r < 0 || v == 0) 230 + goto err; 229 231 mfn = v; 230 232 info->intf = ioremap(mfn << PAGE_SHIFT, PAGE_SIZE); 231 - if (info->intf == NULL) { 232 - kfree(info); 233 - return -ENODEV; 234 - } 233 + if (info->intf == NULL) 234 + goto err; 235 235 info->vtermno = HVC_COOKIE; 236 236 237 237 spin_lock(&xencons_lock); ··· 239 239 spin_unlock(&xencons_lock); 240 240 241 241 return 0; 242 + err: 243 + kfree(info); 244 + return -ENODEV; 242 245 } 243 246 244 247 static int xen_pv_console_init(void)
+24 -14
drivers/tty/serial/sh-sci.c
··· 2179 2179 return 0; 2180 2180 } 2181 2181 2182 + static void sci_cleanup_single(struct sci_port *port) 2183 + { 2184 + sci_free_gpios(port); 2185 + 2186 + clk_put(port->iclk); 2187 + clk_put(port->fclk); 2188 + 2189 + pm_runtime_disable(port->port.dev); 2190 + } 2191 + 2182 2192 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE 2183 2193 static void serial_console_putchar(struct uart_port *port, int ch) 2184 2194 { ··· 2370 2360 cpufreq_unregister_notifier(&port->freq_transition, 2371 2361 CPUFREQ_TRANSITION_NOTIFIER); 2372 2362 2373 - sci_free_gpios(port); 2374 - 2375 2363 uart_remove_one_port(&sci_uart_driver, &port->port); 2376 2364 2377 - clk_put(port->iclk); 2378 - clk_put(port->fclk); 2365 + sci_cleanup_single(port); 2379 2366 2380 - pm_runtime_disable(&dev->dev); 2381 2367 return 0; 2382 2368 } 2383 2369 ··· 2391 2385 index+1, SCI_NPORTS); 2392 2386 dev_notice(&dev->dev, "Consider bumping " 2393 2387 "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n"); 2394 - return 0; 2388 + return -EINVAL; 2395 2389 } 2396 2390 2397 2391 ret = sci_init_single(dev, sciport, index, p); 2398 2392 if (ret) 2399 2393 return ret; 2400 2394 2401 - return uart_add_one_port(&sci_uart_driver, &sciport->port); 2395 + ret = uart_add_one_port(&sci_uart_driver, &sciport->port); 2396 + if (ret) { 2397 + sci_cleanup_single(sciport); 2398 + return ret; 2399 + } 2400 + 2401 + return 0; 2402 2402 } 2403 2403 2404 2404 static int __devinit sci_probe(struct platform_device *dev) ··· 2425 2413 2426 2414 ret = sci_probe_single(dev, dev->id, p, sp); 2427 2415 if (ret) 2428 - goto err_unreg; 2416 + return ret; 2429 2417 2430 2418 sp->freq_transition.notifier_call = sci_notifier; 2431 2419 2432 2420 ret = cpufreq_register_notifier(&sp->freq_transition, 2433 2421 CPUFREQ_TRANSITION_NOTIFIER); 2434 - if (unlikely(ret < 0)) 2435 - goto err_unreg; 2422 + if (unlikely(ret < 0)) { 2423 + sci_cleanup_single(sp); 2424 + return ret; 2425 + } 2436 2426 2437 2427 #ifdef CONFIG_SH_STANDARD_BIOS 2438 2428 sh_bios_gdb_detach(); 2439 2429 #endif 2440 2430 2441 2431 return 0; 2442 - 2443 - err_unreg: 2444 - sci_remove(dev); 2445 - return ret; 2446 2432 } 2447 2433 2448 2434 static int sci_suspend(struct device *dev)
+8
drivers/usb/class/cdc-acm.c
··· 567 567 568 568 usb_autopm_put_interface(acm->control); 569 569 570 + /* 571 + * Unthrottle device in case the TTY was closed while throttled. 572 + */ 573 + spin_lock_irq(&acm->read_lock); 574 + acm->throttled = 0; 575 + acm->throttle_req = 0; 576 + spin_unlock_irq(&acm->read_lock); 577 + 570 578 if (acm_submit_read_urbs(acm, GFP_KERNEL)) 571 579 goto error_submit_read_urbs; 572 580
+9
drivers/usb/class/cdc-wdm.c
··· 55 55 .bInterfaceSubClass = 1, 56 56 .bInterfaceProtocol = 9, /* NOTE: CDC ECM control interface! */ 57 57 }, 58 + { 59 + /* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */ 60 + .match_flags = USB_DEVICE_ID_MATCH_VENDOR | 61 + USB_DEVICE_ID_MATCH_INT_INFO, 62 + .idVendor = HUAWEI_VENDOR_ID, 63 + .bInterfaceClass = USB_CLASS_VENDOR_SPEC, 64 + .bInterfaceSubClass = 1, 65 + .bInterfaceProtocol = 57, /* NOTE: CDC ECM control interface! */ 66 + }, 58 67 { } 59 68 }; 60 69
-9
drivers/usb/core/hcd-pci.c
··· 493 493 494 494 pci_save_state(pci_dev); 495 495 496 - /* 497 - * Some systems crash if an EHCI controller is in D3 during 498 - * a sleep transition. We have to leave such controllers in D0. 499 - */ 500 - if (hcd->broken_pci_sleep) { 501 - dev_dbg(dev, "Staying in PCI D0\n"); 502 - return retval; 503 - } 504 - 505 496 /* If the root hub is dead rather than suspended, disallow remote 506 497 * wakeup. usb_hc_died() should ensure that both hosts are marked as 507 498 * dying, so we only need to check the primary roothub.
+1 -1
drivers/usb/core/hub.c
··· 3379 3379 return 0; 3380 3380 3381 3381 udev->lpm_disable_count++; 3382 - if ((udev->u1_params.timeout == 0 && udev->u1_params.timeout == 0)) 3382 + if ((udev->u1_params.timeout == 0 && udev->u2_params.timeout == 0)) 3383 3383 return 0; 3384 3384 3385 3385 /* If LPM is enabled, attempt to disable it. */
+2 -1
drivers/usb/core/message.c
··· 1838 1838 intfc = cp->intf_cache[i]; 1839 1839 intf->altsetting = intfc->altsetting; 1840 1840 intf->num_altsetting = intfc->num_altsetting; 1841 - intf->intf_assoc = find_iad(dev, cp, i); 1842 1841 kref_get(&intfc->ref); 1843 1842 1844 1843 alt = usb_altnum_to_altsetting(intf, 0); ··· 1850 1851 if (!alt) 1851 1852 alt = &intf->altsetting[0]; 1852 1853 1854 + intf->intf_assoc = 1855 + find_iad(dev, cp, alt->desc.bInterfaceNumber); 1853 1856 intf->cur_altsetting = alt; 1854 1857 usb_enable_interface(dev, intf, true); 1855 1858 intf->dev.parent = &dev->dev;
-6
drivers/usb/gadget/atmel_usba_udc.c
··· 599 599 600 600 spin_lock_irqsave(&ep->udc->lock, flags); 601 601 602 - if (ep->ep.desc) { 603 - spin_unlock_irqrestore(&ep->udc->lock, flags); 604 - DBG(DBG_ERR, "ep%d already enabled\n", ep->index); 605 - return -EBUSY; 606 - } 607 - 608 602 ep->ep.desc = desc; 609 603 ep->ep.maxpacket = maxpacket; 610 604
+1 -1
drivers/usb/gadget/fsl_qe_udc.c
··· 1596 1596 ep = container_of(_ep, struct qe_ep, ep); 1597 1597 1598 1598 /* catch various bogus parameters */ 1599 - if (!_ep || !desc || ep->ep.desc || _ep->name == ep_name[0] || 1599 + if (!_ep || !desc || _ep->name == ep_name[0] || 1600 1600 (desc->bDescriptorType != USB_DT_ENDPOINT)) 1601 1601 return -EINVAL; 1602 1602
+2 -2
drivers/usb/gadget/fsl_udc_core.c
··· 567 567 ep = container_of(_ep, struct fsl_ep, ep); 568 568 569 569 /* catch various bogus parameters */ 570 - if (!_ep || !desc || ep->ep.desc 570 + if (!_ep || !desc 571 571 || (desc->bDescriptorType != USB_DT_ENDPOINT)) 572 572 return -EINVAL; 573 573 ··· 2575 2575 /* for ep0: the desc defined here; 2576 2576 * for other eps, gadget layer called ep_enable with defined desc 2577 2577 */ 2578 - udc_controller->eps[0].desc = &fsl_ep0_desc; 2578 + udc_controller->eps[0].ep.desc = &fsl_ep0_desc; 2579 2579 udc_controller->eps[0].ep.maxpacket = USB_MAX_CTRL_PAYLOAD; 2580 2580 2581 2581 /* setup the udc->eps[] for non-control endpoints and link
+2 -2
drivers/usb/gadget/fsl_usb2_udc.h
··· 568 568 /* 569 569 * ### internal used help routines. 570 570 */ 571 - #define ep_index(EP) ((EP)->desc->bEndpointAddress&0xF) 571 + #define ep_index(EP) ((EP)->ep.desc->bEndpointAddress&0xF) 572 572 #define ep_maxpacket(EP) ((EP)->ep.maxpacket) 573 573 #define ep_is_in(EP) ( (ep_index(EP) == 0) ? (EP->udc->ep0_dir == \ 574 - USB_DIR_IN ):((EP)->desc->bEndpointAddress \ 574 + USB_DIR_IN) : ((EP)->ep.desc->bEndpointAddress \ 575 575 & USB_DIR_IN)==USB_DIR_IN) 576 576 #define get_ep_by_pipe(udc, pipe) ((pipe == 1)? &udc->eps[0]: \ 577 577 &udc->eps[pipe])
+1 -1
drivers/usb/gadget/goku_udc.c
··· 102 102 unsigned long flags; 103 103 104 104 ep = container_of(_ep, struct goku_ep, ep); 105 - if (!_ep || !desc || ep->ep.desc 105 + if (!_ep || !desc 106 106 || desc->bDescriptorType != USB_DT_ENDPOINT) 107 107 return -EINVAL; 108 108 dev = ep->dev;
+1 -1
drivers/usb/gadget/mv_udc_core.c
··· 464 464 ep = container_of(_ep, struct mv_ep, ep); 465 465 udc = ep->udc; 466 466 467 - if (!_ep || !desc || ep->ep.desc 467 + if (!_ep || !desc 468 468 || desc->bDescriptorType != USB_DT_ENDPOINT) 469 469 return -EINVAL; 470 470
+1 -1
drivers/usb/gadget/omap_udc.c
··· 153 153 u16 maxp; 154 154 155 155 /* catch various bogus parameters */ 156 - if (!_ep || !desc || ep->ep.desc 156 + if (!_ep || !desc 157 157 || desc->bDescriptorType != USB_DT_ENDPOINT 158 158 || ep->bEndpointAddress != desc->bEndpointAddress 159 159 || ep->maxpacket < usb_endpoint_maxp(desc)) {
+1 -1
drivers/usb/gadget/pxa25x_udc.c
··· 218 218 struct pxa25x_udc *dev; 219 219 220 220 ep = container_of (_ep, struct pxa25x_ep, ep); 221 - if (!_ep || !desc || ep->ep.desc || _ep->name == ep0name 221 + if (!_ep || !desc || _ep->name == ep0name 222 222 || desc->bDescriptorType != USB_DT_ENDPOINT 223 223 || ep->bEndpointAddress != desc->bEndpointAddress 224 224 || ep->fifo_size < usb_endpoint_maxp (desc)) {
+1 -1
drivers/usb/gadget/s3c-hsudc.c
··· 760 760 u32 ecr = 0; 761 761 762 762 hsep = our_ep(_ep); 763 - if (!_ep || !desc || hsep->ep.desc || _ep->name == ep0name 763 + if (!_ep || !desc || _ep->name == ep0name 764 764 || desc->bDescriptorType != USB_DT_ENDPOINT 765 765 || hsep->bEndpointAddress != desc->bEndpointAddress 766 766 || ep_maxpacket(hsep) < usb_endpoint_maxp(desc))
+1 -1
drivers/usb/gadget/s3c2410_udc.c
··· 1062 1062 1063 1063 ep = to_s3c2410_ep(_ep); 1064 1064 1065 - if (!_ep || !desc || ep->ep.desc 1065 + if (!_ep || !desc 1066 1066 || _ep->name == ep0name 1067 1067 || desc->bDescriptorType != USB_DT_ENDPOINT) 1068 1068 return -EINVAL;
+2
drivers/usb/host/ehci-hcd.c
··· 671 671 hw = ehci->async->hw; 672 672 hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma); 673 673 hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD); 674 + #if defined(CONFIG_PPC_PS3) 674 675 hw->hw_info1 |= cpu_to_hc32(ehci, (1 << 7)); /* I = 1 */ 676 + #endif 675 677 hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT); 676 678 hw->hw_qtd_next = EHCI_LIST_END(ehci); 677 679 ehci->async->qh_state = QH_STATE_LINKED;
+167 -1
drivers/usb/host/ehci-omap.c
··· 43 43 #include <linux/regulator/consumer.h> 44 44 #include <linux/pm_runtime.h> 45 45 #include <linux/gpio.h> 46 + #include <linux/clk.h> 46 47 47 48 /* EHCI Register Set */ 48 49 #define EHCI_INSNREG04 (0xA0) ··· 55 54 #define EHCI_INSNREG05_ULPI_REGADD_SHIFT 16 56 55 #define EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT 8 57 56 #define EHCI_INSNREG05_ULPI_WRDATA_SHIFT 0 57 + 58 + /* Errata i693 */ 59 + static struct clk *utmi_p1_fck; 60 + static struct clk *utmi_p2_fck; 61 + static struct clk *xclk60mhsp1_ck; 62 + static struct clk *xclk60mhsp2_ck; 63 + static struct clk *usbhost_p1_fck; 64 + static struct clk *usbhost_p2_fck; 65 + static struct clk *init_60m_fclk; 58 66 59 67 /*-------------------------------------------------------------------------*/ 60 68 ··· 78 68 static inline u32 ehci_read(void __iomem *base, u32 reg) 79 69 { 80 70 return __raw_readl(base + reg); 71 + } 72 + 73 + /* Erratum i693 workaround sequence */ 74 + static void omap_ehci_erratum_i693(struct ehci_hcd *ehci) 75 + { 76 + int ret = 0; 77 + 78 + /* Switch to the internal 60 MHz clock */ 79 + ret = clk_set_parent(utmi_p1_fck, init_60m_fclk); 80 + if (ret != 0) 81 + ehci_err(ehci, "init_60m_fclk set parent" 82 + "failed error:%d\n", ret); 83 + 84 + ret = clk_set_parent(utmi_p2_fck, init_60m_fclk); 85 + if (ret != 0) 86 + ehci_err(ehci, "init_60m_fclk set parent" 87 + "failed error:%d\n", ret); 88 + 89 + clk_enable(usbhost_p1_fck); 90 + clk_enable(usbhost_p2_fck); 91 + 92 + /* Wait 1ms and switch back to the external clock */ 93 + mdelay(1); 94 + ret = clk_set_parent(utmi_p1_fck, xclk60mhsp1_ck); 95 + if (ret != 0) 96 + ehci_err(ehci, "xclk60mhsp1_ck set parent" 97 + "failed error:%d\n", ret); 98 + 99 + ret = clk_set_parent(utmi_p2_fck, xclk60mhsp2_ck); 100 + if (ret != 0) 101 + ehci_err(ehci, "xclk60mhsp2_ck set parent" 102 + "failed error:%d\n", ret); 103 + 104 + clk_disable(usbhost_p1_fck); 105 + clk_disable(usbhost_p2_fck); 81 106 } 82 107 83 108 static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port) ··· 143 98 break; 144 99 } 145 100 } 101 + } 102 + 103 + static int omap_ehci_hub_control( 104 + struct usb_hcd *hcd, 105 + u16 typeReq, 106 + u16 wValue, 107 + u16 wIndex, 108 + char *buf, 109 + u16 wLength 110 + ) 111 + { 112 + struct ehci_hcd *ehci = hcd_to_ehci(hcd); 113 + u32 __iomem *status_reg = &ehci->regs->port_status[ 114 + (wIndex & 0xff) - 1]; 115 + u32 temp; 116 + unsigned long flags; 117 + int retval = 0; 118 + 119 + spin_lock_irqsave(&ehci->lock, flags); 120 + 121 + if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) { 122 + temp = ehci_readl(ehci, status_reg); 123 + if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) { 124 + retval = -EPIPE; 125 + goto done; 126 + } 127 + 128 + temp &= ~PORT_WKCONN_E; 129 + temp |= PORT_WKDISC_E | PORT_WKOC_E; 130 + ehci_writel(ehci, temp | PORT_SUSPEND, status_reg); 131 + 132 + omap_ehci_erratum_i693(ehci); 133 + 134 + set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports); 135 + goto done; 136 + } 137 + 138 + spin_unlock_irqrestore(&ehci->lock, flags); 139 + 140 + /* Handle the hub control events here */ 141 + return ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength); 142 + done: 143 + spin_unlock_irqrestore(&ehci->lock, flags); 144 + return retval; 146 145 } 147 146 148 147 static void disable_put_regulator( ··· 353 264 /* root ports should always stay powered */ 354 265 ehci_port_power(omap_ehci, 1); 355 266 267 + /* get clocks */ 268 + utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk"); 269 + if (IS_ERR(utmi_p1_fck)) { 270 + ret = PTR_ERR(utmi_p1_fck); 271 + dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret); 272 + goto err_add_hcd; 273 + } 274 + 275 + xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck"); 276 + if (IS_ERR(xclk60mhsp1_ck)) { 277 + ret = PTR_ERR(xclk60mhsp1_ck); 278 + dev_err(dev, "xclk60mhsp1_ck failed error:%d\n", ret); 279 + goto err_utmi_p1_fck; 280 + } 281 + 282 + utmi_p2_fck = clk_get(dev, "utmi_p2_gfclk"); 283 + if (IS_ERR(utmi_p2_fck)) { 284 + ret = PTR_ERR(utmi_p2_fck); 285 + dev_err(dev, "utmi_p2_gfclk failed error:%d\n", ret); 286 + goto err_xclk60mhsp1_ck; 287 + } 288 + 289 + xclk60mhsp2_ck = clk_get(dev, "xclk60mhsp2_ck"); 290 + if (IS_ERR(xclk60mhsp2_ck)) { 291 + ret = PTR_ERR(xclk60mhsp2_ck); 292 + dev_err(dev, "xclk60mhsp2_ck failed error:%d\n", ret); 293 + goto err_utmi_p2_fck; 294 + } 295 + 296 + usbhost_p1_fck = clk_get(dev, "usb_host_hs_utmi_p1_clk"); 297 + if (IS_ERR(usbhost_p1_fck)) { 298 + ret = PTR_ERR(usbhost_p1_fck); 299 + dev_err(dev, "usbhost_p1_fck failed error:%d\n", ret); 300 + goto err_xclk60mhsp2_ck; 301 + } 302 + 303 + usbhost_p2_fck = clk_get(dev, "usb_host_hs_utmi_p2_clk"); 304 + if (IS_ERR(usbhost_p2_fck)) { 305 + ret = PTR_ERR(usbhost_p2_fck); 306 + dev_err(dev, "usbhost_p2_fck failed error:%d\n", ret); 307 + goto err_usbhost_p1_fck; 308 + } 309 + 310 + init_60m_fclk = clk_get(dev, "init_60m_fclk"); 311 + if (IS_ERR(init_60m_fclk)) { 312 + ret = PTR_ERR(init_60m_fclk); 313 + dev_err(dev, "init_60m_fclk failed error:%d\n", ret); 314 + goto err_usbhost_p2_fck; 315 + } 316 + 356 317 return 0; 318 + 319 + err_usbhost_p2_fck: 320 + clk_put(usbhost_p2_fck); 321 + 322 + err_usbhost_p1_fck: 323 + clk_put(usbhost_p1_fck); 324 + 325 + err_xclk60mhsp2_ck: 326 + clk_put(xclk60mhsp2_ck); 327 + 328 + err_utmi_p2_fck: 329 + clk_put(utmi_p2_fck); 330 + 331 + err_xclk60mhsp1_ck: 332 + clk_put(xclk60mhsp1_ck); 333 + 334 + err_utmi_p1_fck: 335 + clk_put(utmi_p1_fck); 357 336 358 337 err_add_hcd: 359 338 disable_put_regulator(pdata); ··· 451 294 disable_put_regulator(dev->platform_data); 452 295 iounmap(hcd->regs); 453 296 usb_put_hcd(hcd); 297 + 298 + clk_put(utmi_p1_fck); 299 + clk_put(utmi_p2_fck); 300 + clk_put(xclk60mhsp1_ck); 301 + clk_put(xclk60mhsp2_ck); 302 + clk_put(usbhost_p1_fck); 303 + clk_put(usbhost_p2_fck); 304 + clk_put(init_60m_fclk); 305 + 454 306 pm_runtime_put_sync(dev); 455 307 pm_runtime_disable(dev); 456 308 ··· 530 364 * root hub support 531 365 */ 532 366 .hub_status_data = ehci_hub_status_data, 533 - .hub_control = ehci_hub_control, 367 + .hub_control = omap_ehci_hub_control, 534 368 .bus_suspend = ehci_bus_suspend, 535 369 .bus_resume = ehci_bus_resume, 536 370
-8
drivers/usb/host/ehci-pci.c
··· 144 144 hcd->has_tt = 1; 145 145 tdi_reset(ehci); 146 146 } 147 - if (pdev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK) { 148 - /* EHCI #1 or #2 on 6 Series/C200 Series chipset */ 149 - if (pdev->device == 0x1c26 || pdev->device == 0x1c2d) { 150 - ehci_info(ehci, "broken D3 during system sleep on ASUS\n"); 151 - hcd->broken_pci_sleep = 1; 152 - device_set_wakeup_capable(&pdev->dev, false); 153 - } 154 - } 155 147 break; 156 148 case PCI_VENDOR_ID_TDI: 157 149 if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
+1 -2
drivers/usb/host/ehci-sh.c
··· 126 126 goto fail_create_hcd; 127 127 } 128 128 129 - if (pdev->dev.platform_data != NULL) 130 - pdata = pdev->dev.platform_data; 129 + pdata = pdev->dev.platform_data; 131 130 132 131 /* initialize hcd */ 133 132 hcd = usb_create_hcd(&ehci_sh_hc_driver, &pdev->dev,
+1 -3
drivers/usb/host/ehci-xilinx-of.c
··· 270 270 * 271 271 * Properly shutdown the hcd, call driver's shutdown routine. 272 272 */ 273 - static int ehci_hcd_xilinx_of_shutdown(struct platform_device *op) 273 + static void ehci_hcd_xilinx_of_shutdown(struct platform_device *op) 274 274 { 275 275 struct usb_hcd *hcd = dev_get_drvdata(&op->dev); 276 276 277 277 if (hcd->driver->shutdown) 278 278 hcd->driver->shutdown(hcd); 279 - 280 - return 0; 281 279 } 282 280 283 281
+1 -1
drivers/usb/host/ohci-hub.c
··· 317 317 } 318 318 319 319 /* Carry out the final steps of resuming the controller device */ 320 - static void ohci_finish_controller_resume(struct usb_hcd *hcd) 320 + static void __maybe_unused ohci_finish_controller_resume(struct usb_hcd *hcd) 321 321 { 322 322 struct ohci_hcd *ohci = hcd_to_ohci(hcd); 323 323 int port;
+24 -50
drivers/usb/host/xhci-mem.c
··· 793 793 struct xhci_virt_device *virt_dev, 794 794 int slot_id) 795 795 { 796 - struct list_head *tt; 797 796 struct list_head *tt_list_head; 798 - struct list_head *tt_next; 799 - struct xhci_tt_bw_info *tt_info; 797 + struct xhci_tt_bw_info *tt_info, *next; 798 + bool slot_found = false; 800 799 801 800 /* If the device never made it past the Set Address stage, 802 801 * it may not have the real_port set correctly. ··· 807 808 } 808 809 809 810 tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts); 810 - if (list_empty(tt_list_head)) 811 - return; 812 - 813 - list_for_each(tt, tt_list_head) { 814 - tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list); 815 - if (tt_info->slot_id == slot_id) 811 + list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) { 812 + /* Multi-TT hubs will have more than one entry */ 813 + if (tt_info->slot_id == slot_id) { 814 + slot_found = true; 815 + list_del(&tt_info->tt_list); 816 + kfree(tt_info); 817 + } else if (slot_found) { 816 818 break; 819 + } 817 820 } 818 - /* Cautionary measure in case the hub was disconnected before we 819 - * stored the TT information. 820 - */ 821 - if (tt_info->slot_id != slot_id) 822 - return; 823 - 824 - tt_next = tt->next; 825 - tt_info = list_entry(tt, struct xhci_tt_bw_info, 826 - tt_list); 827 - /* Multi-TT hubs will have more than one entry */ 828 - do { 829 - list_del(tt); 830 - kfree(tt_info); 831 - tt = tt_next; 832 - if (list_empty(tt_list_head)) 833 - break; 834 - tt_next = tt->next; 835 - tt_info = list_entry(tt, struct xhci_tt_bw_info, 836 - tt_list); 837 - } while (tt_info->slot_id == slot_id); 838 821 } 839 822 840 823 int xhci_alloc_tt_info(struct xhci_hcd *xhci, ··· 1772 1791 { 1773 1792 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 1774 1793 struct dev_info *dev_info, *next; 1775 - struct list_head *tt_list_head; 1776 - struct list_head *tt; 1777 - struct list_head *endpoints; 1778 - struct list_head *ep, *q; 1779 - struct xhci_tt_bw_info *tt_info; 1780 - struct xhci_interval_bw_table *bwt; 1781 - struct xhci_virt_ep *virt_ep; 1782 - 1783 1794 unsigned long flags; 1784 1795 int size; 1785 - int i; 1796 + int i, j, num_ports; 1786 1797 1787 1798 /* Free the Event Ring Segment Table and the actual Event Ring */ 1788 1799 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); ··· 1833 1860 } 1834 1861 spin_unlock_irqrestore(&xhci->lock, flags); 1835 1862 1836 - bwt = &xhci->rh_bw->bw_table; 1837 - for (i = 0; i < XHCI_MAX_INTERVAL; i++) { 1838 - endpoints = &bwt->interval_bw[i].endpoints; 1839 - list_for_each_safe(ep, q, endpoints) { 1840 - virt_ep = list_entry(ep, struct xhci_virt_ep, bw_endpoint_list); 1841 - list_del(&virt_ep->bw_endpoint_list); 1842 - kfree(virt_ep); 1863 + num_ports = HCS_MAX_PORTS(xhci->hcs_params1); 1864 + for (i = 0; i < num_ports; i++) { 1865 + struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table; 1866 + for (j = 0; j < XHCI_MAX_INTERVAL; j++) { 1867 + struct list_head *ep = &bwt->interval_bw[j].endpoints; 1868 + while (!list_empty(ep)) 1869 + list_del_init(ep->next); 1843 1870 } 1844 1871 } 1845 1872 1846 - tt_list_head = &xhci->rh_bw->tts; 1847 - list_for_each_safe(tt, q, tt_list_head) { 1848 - tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list); 1849 - list_del(tt); 1850 - kfree(tt_info); 1873 + for (i = 0; i < num_ports; i++) { 1874 + struct xhci_tt_bw_info *tt, *n; 1875 + list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) { 1876 + list_del(&tt->tt_list); 1877 + kfree(tt); 1878 + } 1851 1879 } 1852 1880 1853 1881 xhci->num_usb2_ports = 0;
+5 -5
drivers/usb/host/xhci.c
··· 795 795 command = xhci_readl(xhci, &xhci->op_regs->command); 796 796 command |= CMD_CSS; 797 797 xhci_writel(xhci, command, &xhci->op_regs->command); 798 - if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) { 799 - xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n"); 798 + if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10 * 1000)) { 799 + xhci_warn(xhci, "WARN: xHC save state timeout\n"); 800 800 spin_unlock_irq(&xhci->lock); 801 801 return -ETIMEDOUT; 802 802 } ··· 848 848 command |= CMD_CRS; 849 849 xhci_writel(xhci, command, &xhci->op_regs->command); 850 850 if (handshake(xhci, &xhci->op_regs->status, 851 - STS_RESTORE, 0, 10*100)) { 852 - xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n"); 851 + STS_RESTORE, 0, 10 * 1000)) { 852 + xhci_warn(xhci, "WARN: xHC restore state timeout\n"); 853 853 spin_unlock_irq(&xhci->lock); 854 854 return -ETIMEDOUT; 855 855 } ··· 3906 3906 default: 3907 3907 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n", 3908 3908 __func__); 3909 - return -EINVAL; 3909 + return USB3_LPM_DISABLED; 3910 3910 } 3911 3911 3912 3912 if (sel <= max_sel_pel && pel <= max_sel_pel)
+1
drivers/usb/musb/davinci.c
··· 34 34 #include <linux/dma-mapping.h> 35 35 36 36 #include <mach/cputype.h> 37 + #include <mach/hardware.h> 37 38 38 39 #include <asm/mach-types.h> 39 40
+2 -2
drivers/usb/musb/davinci.h
··· 15 15 */ 16 16 17 17 /* Integrated highspeed/otg PHY */ 18 - #define USBPHY_CTL_PADDR (DAVINCI_SYSTEM_MODULE_BASE + 0x34) 18 + #define USBPHY_CTL_PADDR 0x01c40034 19 19 #define USBPHY_DATAPOL BIT(11) /* (dm355) switch D+/D- */ 20 20 #define USBPHY_PHYCLKGD BIT(8) 21 21 #define USBPHY_SESNDEN BIT(7) /* v(sess_end) comparator */ ··· 27 27 #define USBPHY_OTGPDWN BIT(1) 28 28 #define USBPHY_PHYPDWN BIT(0) 29 29 30 - #define DM355_DEEPSLEEP_PADDR (DAVINCI_SYSTEM_MODULE_BASE + 0x48) 30 + #define DM355_DEEPSLEEP_PADDR 0x01c40048 31 31 #define DRVVBUS_FORCE BIT(2) 32 32 #define DRVVBUS_OVERRIDE BIT(1) 33 33
+1
drivers/usb/musb/musb_gadget.c
··· 1232 1232 } 1233 1233 1234 1234 musb_ep->desc = NULL; 1235 + musb_ep->end_point.desc = NULL; 1235 1236 1236 1237 /* abort all pending DMA and requests */ 1237 1238 nuke(musb_ep, -ESHUTDOWN);
+1
drivers/usb/serial/cp210x.c
··· 82 82 { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */ 83 83 { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */ 84 84 { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */ 85 + { USB_DEVICE(0x10C4, 0x80C4) }, /* Cygnal Integrated Products, Inc., Optris infrared thermometer */ 85 86 { USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */ 86 87 { USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */ 87 88 { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */
+1
drivers/usb/serial/ftdi_sio.c
··· 737 737 { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, 738 738 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) }, 739 739 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) }, 740 + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_RTS01_PID) }, 740 741 { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, 741 742 { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) }, 742 743 { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
+1
drivers/usb/serial/ftdi_sio_ids.h
··· 784 784 #define RTSYSTEMS_VID 0x2100 /* Vendor ID */ 785 785 #define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */ 786 786 #define RTSYSTEMS_CT29B_PID 0x9e54 /* CT29B Radio Cable */ 787 + #define RTSYSTEMS_RTS01_PID 0x9e57 /* USB-RTS01 Radio Cable */ 787 788 788 789 789 790 /*
+2 -8
drivers/usb/serial/generic.c
··· 39 39 40 40 static struct usb_device_id generic_device_ids[2]; /* Initially all zeroes. */ 41 41 42 - /* we want to look at all devices, as the vendor/product id can change 43 - * depending on the command line argument */ 44 - static const struct usb_device_id generic_serial_ids[] = { 45 - {.driver_info = 42}, 46 - {} 47 - }; 48 - 49 42 /* All of the device info needed for the Generic Serial Converter */ 50 43 struct usb_serial_driver usb_serial_generic_device = { 51 44 .driver = { ··· 72 79 USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT; 73 80 74 81 /* register our generic driver with ourselves */ 75 - retval = usb_serial_register_drivers(serial_drivers, "usbserial_generic", generic_serial_ids); 82 + retval = usb_serial_register_drivers(serial_drivers, 83 + "usbserial_generic", generic_device_ids); 76 84 #endif 77 85 return retval; 78 86 }
+8 -5
drivers/usb/serial/mct_u232.c
··· 309 309 MCT_U232_SET_REQUEST_TYPE, 310 310 0, 0, buf, MCT_U232_SET_MODEM_CTRL_SIZE, 311 311 WDR_TIMEOUT); 312 - if (rc < 0) 313 - dev_err(&serial->dev->dev, 314 - "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc); 312 + kfree(buf); 313 + 315 314 dbg("set_modem_ctrl: state=0x%x ==> mcr=0x%x", control_state, mcr); 316 315 317 - kfree(buf); 318 - return rc; 316 + if (rc < 0) { 317 + dev_err(&serial->dev->dev, 318 + "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc); 319 + return rc; 320 + } 321 + return 0; 319 322 } /* mct_u232_set_modem_ctrl */ 320 323 321 324 static int mct_u232_get_modem_stat(struct usb_serial *serial,
+1 -1
drivers/usb/serial/mos7840.c
··· 190 190 191 191 static int device_type; 192 192 193 - static const struct usb_device_id id_table[] __devinitconst = { 193 + static const struct usb_device_id id_table[] = { 194 194 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, 195 195 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, 196 196 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7810)},
+45 -47
drivers/usb/serial/option.c
··· 47 47 /* Function prototypes */ 48 48 static int option_probe(struct usb_serial *serial, 49 49 const struct usb_device_id *id); 50 + static void option_release(struct usb_serial *serial); 50 51 static int option_send_setup(struct usb_serial_port *port); 51 52 static void option_instat_callback(struct urb *urb); 52 53 ··· 151 150 #define HUAWEI_PRODUCT_E14AC 0x14AC 152 151 #define HUAWEI_PRODUCT_K3806 0x14AE 153 152 #define HUAWEI_PRODUCT_K4605 0x14C6 153 + #define HUAWEI_PRODUCT_K5005 0x14C8 154 154 #define HUAWEI_PRODUCT_K3770 0x14C9 155 155 #define HUAWEI_PRODUCT_K3771 0x14CA 156 156 #define HUAWEI_PRODUCT_K4510 0x14CB ··· 427 425 #define SAMSUNG_VENDOR_ID 0x04e8 428 426 #define SAMSUNG_PRODUCT_GT_B3730 0x6889 429 427 430 - /* YUGA products www.yuga-info.com*/ 428 + /* YUGA products www.yuga-info.com gavin.kx@qq.com */ 431 429 #define YUGA_VENDOR_ID 0x257A 432 430 #define YUGA_PRODUCT_CEM600 0x1601 433 431 #define YUGA_PRODUCT_CEM610 0x1602 ··· 444 442 #define YUGA_PRODUCT_CEU516 0x160C 445 443 #define YUGA_PRODUCT_CEU528 0x160D 446 444 #define YUGA_PRODUCT_CEU526 0x160F 445 + #define YUGA_PRODUCT_CEU881 0x161F 446 + #define YUGA_PRODUCT_CEU882 0x162F 447 447 448 448 #define YUGA_PRODUCT_CWM600 0x2601 449 449 #define YUGA_PRODUCT_CWM610 0x2602 ··· 461 457 #define YUGA_PRODUCT_CWU518 0x260B 462 458 #define YUGA_PRODUCT_CWU516 0x260C 463 459 #define YUGA_PRODUCT_CWU528 0x260D 460 + #define YUGA_PRODUCT_CWU581 0x260E 464 461 #define YUGA_PRODUCT_CWU526 0x260F 462 + #define YUGA_PRODUCT_CWU582 0x261F 463 + #define YUGA_PRODUCT_CWU583 0x262F 465 464 466 - #define YUGA_PRODUCT_CLM600 0x2601 467 - #define YUGA_PRODUCT_CLM610 0x2602 468 - #define YUGA_PRODUCT_CLM500 0x2603 469 - #define YUGA_PRODUCT_CLM510 0x2604 470 - #define YUGA_PRODUCT_CLM800 0x2605 471 - #define YUGA_PRODUCT_CLM900 0x2606 465 + #define YUGA_PRODUCT_CLM600 0x3601 466 + #define YUGA_PRODUCT_CLM610 0x3602 467 + #define YUGA_PRODUCT_CLM500 0x3603 468 + #define YUGA_PRODUCT_CLM510 0x3604 469 + #define YUGA_PRODUCT_CLM800 0x3605 470 + #define YUGA_PRODUCT_CLM900 0x3606 472 471 473 - #define YUGA_PRODUCT_CLU718 0x2607 474 - #define YUGA_PRODUCT_CLU716 0x2608 475 - #define YUGA_PRODUCT_CLU728 0x2609 476 - #define YUGA_PRODUCT_CLU726 0x260A 477 - #define YUGA_PRODUCT_CLU518 0x260B 478 - #define YUGA_PRODUCT_CLU516 0x260C 479 - #define YUGA_PRODUCT_CLU528 0x260D 480 - #define YUGA_PRODUCT_CLU526 0x260F 472 + #define YUGA_PRODUCT_CLU718 0x3607 473 + #define YUGA_PRODUCT_CLU716 0x3608 474 + #define YUGA_PRODUCT_CLU728 0x3609 475 + #define YUGA_PRODUCT_CLU726 0x360A 476 + #define YUGA_PRODUCT_CLU518 0x360B 477 + #define YUGA_PRODUCT_CLU516 0x360C 478 + #define YUGA_PRODUCT_CLU528 0x360D 479 + #define YUGA_PRODUCT_CLU526 0x360F 481 480 482 481 /* Viettel products */ 483 482 #define VIETTEL_VENDOR_ID 0x2262 ··· 673 666 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) }, 674 667 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff), 675 668 .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, 669 + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x31) }, 670 + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x32) }, 671 + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x31) }, 672 + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x32) }, 673 + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x33) }, 676 674 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) }, 677 675 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) }, 678 676 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) }, ··· 1221 1209 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) }, 1222 1210 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) }, 1223 1211 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) }, 1212 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU881) }, 1213 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU882) }, 1214 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU581) }, 1215 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU582) }, 1216 + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU583) }, 1224 1217 { USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) }, 1225 1218 { USB_DEVICE_AND_INTERFACE_INFO(ZD_VENDOR_ID, ZD_PRODUCT_7000, 0xff, 0xff, 0xff) }, 1226 1219 { USB_DEVICE(LG_VENDOR_ID, LG_PRODUCT_L02C) }, /* docomo L-02C modem */ ··· 1262 1245 .ioctl = usb_wwan_ioctl, 1263 1246 .attach = usb_wwan_startup, 1264 1247 .disconnect = usb_wwan_disconnect, 1265 - .release = usb_wwan_release, 1248 + .release = option_release, 1266 1249 .read_int_callback = option_instat_callback, 1267 1250 #ifdef CONFIG_PM 1268 1251 .suspend = usb_wwan_suspend, ··· 1275 1258 }; 1276 1259 1277 1260 static bool debug; 1278 - 1279 - /* per port private data */ 1280 - 1281 - #define N_IN_URB 4 1282 - #define N_OUT_URB 4 1283 - #define IN_BUFLEN 4096 1284 - #define OUT_BUFLEN 4096 1285 - 1286 - struct option_port_private { 1287 - /* Input endpoints and buffer for this port */ 1288 - struct urb *in_urbs[N_IN_URB]; 1289 - u8 *in_buffer[N_IN_URB]; 1290 - /* Output endpoints and buffer for this port */ 1291 - struct urb *out_urbs[N_OUT_URB]; 1292 - u8 *out_buffer[N_OUT_URB]; 1293 - unsigned long out_busy; /* Bit vector of URBs in use */ 1294 - int opened; 1295 - struct usb_anchor delayed; 1296 - 1297 - /* Settings for the port */ 1298 - int rts_state; /* Handshaking pins (outputs) */ 1299 - int dtr_state; 1300 - int cts_state; /* Handshaking pins (inputs) */ 1301 - int dsr_state; 1302 - int dcd_state; 1303 - int ri_state; 1304 - 1305 - unsigned long tx_start_time[N_OUT_URB]; 1306 - }; 1307 1261 1308 1262 module_usb_serial_driver(serial_drivers, option_ids); 1309 1263 ··· 1344 1356 return 0; 1345 1357 } 1346 1358 1359 + static void option_release(struct usb_serial *serial) 1360 + { 1361 + struct usb_wwan_intf_private *priv = usb_get_serial_data(serial); 1362 + 1363 + usb_wwan_release(serial); 1364 + 1365 + kfree(priv); 1366 + } 1367 + 1347 1368 static void option_instat_callback(struct urb *urb) 1348 1369 { 1349 1370 int err; 1350 1371 int status = urb->status; 1351 1372 struct usb_serial_port *port = urb->context; 1352 - struct option_port_private *portdata = usb_get_serial_port_data(port); 1373 + struct usb_wwan_port_private *portdata = 1374 + usb_get_serial_port_data(port); 1353 1375 1354 1376 dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata); 1355 1377 ··· 1419 1421 struct usb_serial *serial = port->serial; 1420 1422 struct usb_wwan_intf_private *intfdata = 1421 1423 (struct usb_wwan_intf_private *) serial->private; 1422 - struct option_port_private *portdata; 1424 + struct usb_wwan_port_private *portdata; 1423 1425 int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber; 1424 1426 int val = 0; 1425 1427
+6
drivers/usb/serial/qcserial.c
··· 105 105 {USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */ 106 106 {USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */ 107 107 {USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */ 108 + {USB_DEVICE(0x1199, 0x9010)}, /* Sierra Wireless Gobi 3000 QDL */ 109 + {USB_DEVICE(0x1199, 0x9012)}, /* Sierra Wireless Gobi 3000 QDL */ 108 110 {USB_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */ 111 + {USB_DEVICE(0x1199, 0x9014)}, /* Sierra Wireless Gobi 3000 QDL */ 112 + {USB_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */ 113 + {USB_DEVICE(0x1199, 0x9018)}, /* Sierra Wireless Gobi 3000 QDL */ 114 + {USB_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */ 109 115 {USB_DEVICE(0x12D1, 0x14F0)}, /* Sony Gobi 3000 QDL */ 110 116 {USB_DEVICE(0x12D1, 0x14F1)}, /* Sony Gobi 3000 Composite */ 111 117 { } /* Terminating entry */
+4
drivers/usb/serial/sierra.c
··· 294 294 { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ 295 295 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist 296 296 }, 297 + /* AT&T Direct IP LTE modems */ 298 + { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF), 299 + .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist 300 + }, 297 301 { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */ 298 302 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist 299 303 },
+7 -5
drivers/usb/serial/usb-serial.c
··· 659 659 static struct usb_serial_driver *search_serial_device( 660 660 struct usb_interface *iface) 661 661 { 662 - const struct usb_device_id *id; 662 + const struct usb_device_id *id = NULL; 663 663 struct usb_serial_driver *drv; 664 + struct usb_driver *driver = to_usb_driver(iface->dev.driver); 664 665 665 666 /* Check if the usb id matches a known device */ 666 667 list_for_each_entry(drv, &usb_serial_driver_list, driver_list) { 667 - id = get_iface_id(drv, iface); 668 + if (drv->usb_driver == driver) 669 + id = get_iface_id(drv, iface); 668 670 if (id) 669 671 return drv; 670 672 } ··· 757 755 758 756 if (retval) { 759 757 dbg("sub driver rejected device"); 760 - kfree(serial); 758 + usb_serial_put(serial); 761 759 module_put(type->driver.owner); 762 760 return retval; 763 761 } ··· 829 827 */ 830 828 if (num_bulk_in == 0 || num_bulk_out == 0) { 831 829 dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n"); 832 - kfree(serial); 830 + usb_serial_put(serial); 833 831 module_put(type->driver.owner); 834 832 return -ENODEV; 835 833 } ··· 843 841 if (num_ports == 0) { 844 842 dev_err(&interface->dev, 845 843 "Generic device with no bulk out, not allowed.\n"); 846 - kfree(serial); 844 + usb_serial_put(serial); 847 845 module_put(type->driver.owner); 848 846 return -EIO; 849 847 }
+7
drivers/usb/storage/unusual_devs.h
··· 1107 1107 USB_SC_RBC, USB_PR_BULK, NULL, 1108 1108 0 ), 1109 1109 1110 + /* Feiya QDI U2 DISK, reported by Hans de Goede <hdegoede@redhat.com> */ 1111 + UNUSUAL_DEV( 0x090c, 0x1000, 0x0000, 0xffff, 1112 + "Feiya", 1113 + "QDI U2 DISK", 1114 + USB_SC_DEVICE, USB_PR_DEVICE, NULL, 1115 + US_FL_NO_READ_CAPACITY_16 ), 1116 + 1110 1117 /* aeb */ 1111 1118 UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff, 1112 1119 "Feiya",
+1 -1
drivers/video/backlight/Kconfig
··· 88 88 89 89 config LCD_TOSA 90 90 tristate "Sharp SL-6000 LCD Driver" 91 - depends on SPI && MACH_TOSA 91 + depends on I2C && SPI && MACH_TOSA 92 92 help 93 93 If you have an Sharp SL-6000 Zaurus say Y to enable a driver 94 94 for its LCD.
+1 -1
drivers/video/backlight/ili9320.c
··· 263 263 264 264 EXPORT_SYMBOL_GPL(ili9320_probe_spi); 265 265 266 - int __devexit ili9320_remove(struct ili9320 *ili) 266 + int ili9320_remove(struct ili9320 *ili) 267 267 { 268 268 ili9320_power(ili, FB_BLANK_POWERDOWN); 269 269
+2 -4
drivers/video/bfin_adv7393fb.c
··· 353 353 354 354 static int 355 355 adv7393_write_proc(struct file *file, const char __user * buffer, 356 - unsigned long count, void *data) 356 + size_t count, void *data) 357 357 { 358 358 struct adv7393fb_device *fbdev = data; 359 - char line[8]; 360 359 unsigned int val; 361 360 int ret; 362 361 363 - ret = copy_from_user(line, buffer, count); 362 + ret = kstrtouint_from_user(buffer, count, 0, &val); 364 363 if (ret) 365 364 return -EFAULT; 366 365 367 - val = simple_strtoul(line, NULL, 0); 368 366 adv7393_write(fbdev->client, val >> 8, val & 0xff); 369 367 370 368 return count;
+1 -1
drivers/video/broadsheetfb.c
··· 1211 1211 1212 1212 static struct platform_driver broadsheetfb_driver = { 1213 1213 .probe = broadsheetfb_probe, 1214 - .remove = broadsheetfb_remove, 1214 + .remove = __devexit_p(broadsheetfb_remove), 1215 1215 .driver = { 1216 1216 .owner = THIS_MODULE, 1217 1217 .name = "broadsheetfb",
+14
drivers/video/console/Kconfig
··· 224 224 big letters. It fits between the sun 12x22 and the normal 8x16 font. 225 225 If other fonts are too big or too small for you, say Y, otherwise say N. 226 226 227 + config FONT_AUTOSELECT 228 + def_bool y 229 + depends on FRAMEBUFFER_CONSOLE || SGI_NEWPORT_CONSOLE || STI_CONSOLE || USB_SISUSBVGA_CON 230 + depends on !FONT_8x8 231 + depends on !FONT_6x11 232 + depends on !FONT_7x14 233 + depends on !FONT_PEARL_8x8 234 + depends on !FONT_ACORN_8x8 235 + depends on !FONT_MINI_4x6 236 + depends on !FONT_SUN8x16 237 + depends on !FONT_SUN12x22 238 + depends on !FONT_10x18 239 + select FONT_8x16 240 + 227 241 endmenu 228 242
+1 -1
drivers/video/mbx/mbxfb.c
··· 1045 1045 1046 1046 static struct platform_driver mbxfb_driver = { 1047 1047 .probe = mbxfb_probe, 1048 - .remove = mbxfb_remove, 1048 + .remove = __devexit_p(mbxfb_remove), 1049 1049 .suspend = mbxfb_suspend, 1050 1050 .resume = mbxfb_resume, 1051 1051 .driver = {
+1 -1
drivers/video/omap2/displays/panel-taal.c
··· 526 526 { 527 527 struct omap_dss_device *dssdev = to_dss_device(dev); 528 528 struct taal_data *td = dev_get_drvdata(&dssdev->dev); 529 - u8 errors; 529 + u8 errors = 0; 530 530 int r; 531 531 532 532 mutex_lock(&td->lock);
+1 -2
drivers/video/omap2/dss/core.c
··· 194 194 static inline void dss_uninitialize_debugfs(void) 195 195 { 196 196 } 197 - static inline int dss_debugfs_create_file(const char *name, 198 - void (*write)(struct seq_file *)) 197 + int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *)) 199 198 { 200 199 return 0; 201 200 }
+1 -1
drivers/video/omap2/dss/dsi.c
··· 3724 3724 /* CLKIN4DDR = 16 * TXBYTECLKHS */ 3725 3725 tlp_avail = thsbyte_clk * (blank - trans_lp); 3726 3726 3727 - ttxclkesc = tdsi_fclk / lp_clk_div; 3727 + ttxclkesc = tdsi_fclk * lp_clk_div; 3728 3728 3729 3729 lp_inter = ((tlp_avail - 8 * thsbyte_clk - 5 * tdsi_fclk) / ttxclkesc - 3730 3730 26) / 16;
+1 -1
drivers/video/omap2/dss/dss.c
··· 731 731 DSSDBG("dss_runtime_put\n"); 732 732 733 733 r = pm_runtime_put_sync(&dss.pdev->dev); 734 - WARN_ON(r < 0); 734 + WARN_ON(r < 0 && r != -EBUSY); 735 735 } 736 736 737 737 /* DEBUGFS */
+9 -3
drivers/video/s3c-fb.c
··· 361 361 result = (unsigned int)tmp / 1000; 362 362 363 363 dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n", 364 - pixclk, clk, result, clk / result); 364 + pixclk, clk, result, result ? clk / result : clk); 365 365 366 366 return result; 367 367 } ··· 1348 1348 writel(0, regs + VIDOSD_A(win, sfb->variant)); 1349 1349 writel(0, regs + VIDOSD_B(win, sfb->variant)); 1350 1350 writel(0, regs + VIDOSD_C(win, sfb->variant)); 1351 - reg = readl(regs + SHADOWCON); 1352 - writel(reg & ~SHADOWCON_WINx_PROTECT(win), regs + SHADOWCON); 1351 + 1352 + if (sfb->variant.has_shadowcon) { 1353 + reg = readl(sfb->regs + SHADOWCON); 1354 + reg &= ~(SHADOWCON_WINx_PROTECT(win) | 1355 + SHADOWCON_CHx_ENABLE(win) | 1356 + SHADOWCON_CHx_LOCAL_ENABLE(win)); 1357 + writel(reg, sfb->regs + SHADOWCON); 1358 + } 1353 1359 } 1354 1360 1355 1361 static int __devinit s3c_fb_probe(struct platform_device *pdev)
+5 -5
drivers/video/savage/savagefb_driver.c
··· 1351 1351 /* following part not present in X11 driver */ 1352 1352 cr67 = vga_in8(0x3d5, par) & 0xf; 1353 1353 vga_out8(0x3d5, 0x50 | cr67, par); 1354 - udelay(10000); 1354 + mdelay(10); 1355 1355 vga_out8(0x3d4, 0x67, par); 1356 1356 /* end of part */ 1357 1357 vga_out8(0x3d5, reg->CR67 & ~0x0c, par); ··· 1904 1904 vga_out8(0x3d4, 0x66, par); 1905 1905 cr66 = vga_in8(0x3d5, par); 1906 1906 vga_out8(0x3d5, cr66 | 0x02, par); 1907 - udelay(10000); 1907 + mdelay(10); 1908 1908 1909 1909 vga_out8(0x3d4, 0x66, par); 1910 1910 vga_out8(0x3d5, cr66 & ~0x02, par); /* clear reset flag */ 1911 - udelay(10000); 1911 + mdelay(10); 1912 1912 1913 1913 1914 1914 /* ··· 1918 1918 vga_out8(0x3d4, 0x3f, par); 1919 1919 cr3f = vga_in8(0x3d5, par); 1920 1920 vga_out8(0x3d5, cr3f | 0x08, par); 1921 - udelay(10000); 1921 + mdelay(10); 1922 1922 1923 1923 vga_out8(0x3d4, 0x3f, par); 1924 1924 vga_out8(0x3d5, cr3f & ~0x08, par); /* clear reset flags */ 1925 - udelay(10000); 1925 + mdelay(10); 1926 1926 1927 1927 /* Savage ramdac speeds */ 1928 1928 par->numClocks = 4;
+9
drivers/xen/events.c
··· 827 827 handle_edge_irq, "event"); 828 828 829 829 xen_irq_info_evtchn_init(irq, evtchn); 830 + } else { 831 + struct irq_info *info = info_for_irq(irq); 832 + WARN_ON(info == NULL || info->type != IRQT_EVTCHN); 830 833 } 831 834 832 835 out: ··· 865 862 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi); 866 863 867 864 bind_evtchn_to_cpu(evtchn, cpu); 865 + } else { 866 + struct irq_info *info = info_for_irq(irq); 867 + WARN_ON(info == NULL || info->type != IRQT_IPI); 868 868 } 869 869 870 870 out: ··· 945 939 xen_irq_info_virq_init(cpu, irq, evtchn, virq); 946 940 947 941 bind_evtchn_to_cpu(evtchn, cpu); 942 + } else { 943 + struct irq_info *info = info_for_irq(irq); 944 + WARN_ON(info == NULL || info->type != IRQT_VIRQ); 948 945 } 949 946 950 947 out:
+1 -1
drivers/xen/pci.c
··· 59 59 60 60 #ifdef CONFIG_ACPI 61 61 handle = DEVICE_ACPI_HANDLE(&pci_dev->dev); 62 - if (!handle) 62 + if (!handle && pci_dev->bus->bridge) 63 63 handle = DEVICE_ACPI_HANDLE(pci_dev->bus->bridge); 64 64 #ifdef CONFIG_PCI_IOV 65 65 if (!handle && pci_dev->is_virtfn)
+5 -12
fs/btrfs/backref.c
··· 179 179 180 180 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, 181 181 struct ulist *parents, int level, 182 - struct btrfs_key *key, u64 wanted_disk_byte, 182 + struct btrfs_key *key, u64 time_seq, 183 + u64 wanted_disk_byte, 183 184 const u64 *extent_item_pos) 184 185 { 185 186 int ret; ··· 213 212 */ 214 213 while (1) { 215 214 eie = NULL; 216 - ret = btrfs_next_leaf(root, path); 215 + ret = btrfs_next_old_leaf(root, path, time_seq); 217 216 if (ret < 0) 218 217 return ret; 219 218 if (ret) ··· 295 294 goto out; 296 295 } 297 296 298 - if (level == 0) { 299 - if (ret == 1 && path->slots[0] >= btrfs_header_nritems(eb)) { 300 - ret = btrfs_next_leaf(root, path); 301 - if (ret) 302 - goto out; 303 - eb = path->nodes[0]; 304 - } 305 - 297 + if (level == 0) 306 298 btrfs_item_key_to_cpu(eb, &key, path->slots[0]); 307 - } 308 299 309 - ret = add_all_parents(root, path, parents, level, &key, 300 + ret = add_all_parents(root, path, parents, level, &key, time_seq, 310 301 ref->wanted_disk_byte, extent_item_pos); 311 302 out: 312 303 btrfs_free_path(path);
+1
fs/btrfs/btrfs_inode.h
··· 37 37 #define BTRFS_INODE_IN_DEFRAG 3 38 38 #define BTRFS_INODE_DELALLOC_META_RESERVED 4 39 39 #define BTRFS_INODE_HAS_ORPHAN_ITEM 5 40 + #define BTRFS_INODE_HAS_ASYNC_EXTENT 6 40 41 41 42 /* in memory btrfs inode */ 42 43 struct btrfs_inode {
+9 -7
fs/btrfs/check-integrity.c
··· 93 93 #include "print-tree.h" 94 94 #include "locking.h" 95 95 #include "check-integrity.h" 96 + #include "rcu-string.h" 96 97 97 98 #define BTRFSIC_BLOCK_HASHTABLE_SIZE 0x10000 98 99 #define BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE 0x10000 ··· 844 843 superblock_tmp->never_written = 0; 845 844 superblock_tmp->mirror_num = 1 + superblock_mirror_num; 846 845 if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE) 847 - printk(KERN_INFO "New initial S-block (bdev %p, %s)" 848 - " @%llu (%s/%llu/%d)\n", 849 - superblock_bdev, device->name, 850 - (unsigned long long)dev_bytenr, 851 - dev_state->name, 852 - (unsigned long long)dev_bytenr, 853 - superblock_mirror_num); 846 + printk_in_rcu(KERN_INFO "New initial S-block (bdev %p, %s)" 847 + " @%llu (%s/%llu/%d)\n", 848 + superblock_bdev, 849 + rcu_str_deref(device->name), 850 + (unsigned long long)dev_bytenr, 851 + dev_state->name, 852 + (unsigned long long)dev_bytenr, 853 + superblock_mirror_num); 854 854 list_add(&superblock_tmp->all_blocks_node, 855 855 &state->all_blocks_list); 856 856 btrfsic_block_hashtable_add(superblock_tmp,
+64 -24
fs/btrfs/ctree.c
··· 467 467 return 0; 468 468 } 469 469 470 + /* 471 + * This allocates memory and gets a tree modification sequence number when 472 + * needed. 473 + * 474 + * Returns 0 when no sequence number is needed, < 0 on error. 475 + * Returns 1 when a sequence number was added. In this case, 476 + * fs_info->tree_mod_seq_lock was acquired and must be released by the caller 477 + * after inserting into the rb tree. 478 + */ 470 479 static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags, 471 480 struct tree_mod_elem **tm_ret) 472 481 { ··· 500 491 */ 501 492 kfree(tm); 502 493 seq = 0; 494 + spin_unlock(&fs_info->tree_mod_seq_lock); 503 495 } else { 504 496 __get_tree_mod_seq(fs_info, &tm->elem); 505 497 seq = tm->elem.seq; 506 498 } 507 - spin_unlock(&fs_info->tree_mod_seq_lock); 508 499 509 500 return seq; 510 501 } ··· 530 521 tm->slot = slot; 531 522 tm->generation = btrfs_node_ptr_generation(eb, slot); 532 523 533 - return __tree_mod_log_insert(fs_info, tm); 524 + ret = __tree_mod_log_insert(fs_info, tm); 525 + spin_unlock(&fs_info->tree_mod_seq_lock); 526 + return ret; 534 527 } 535 528 536 529 static noinline int ··· 570 559 tm->move.nr_items = nr_items; 571 560 tm->op = MOD_LOG_MOVE_KEYS; 572 561 573 - return __tree_mod_log_insert(fs_info, tm); 562 + ret = __tree_mod_log_insert(fs_info, tm); 563 + spin_unlock(&fs_info->tree_mod_seq_lock); 564 + return ret; 574 565 } 575 566 576 567 static noinline int ··· 593 580 tm->generation = btrfs_header_generation(old_root); 594 581 tm->op = MOD_LOG_ROOT_REPLACE; 595 582 596 - return __tree_mod_log_insert(fs_info, tm); 583 + ret = __tree_mod_log_insert(fs_info, tm); 584 + spin_unlock(&fs_info->tree_mod_seq_lock); 585 + return ret; 597 586 } 598 587 599 588 static struct tree_mod_elem * ··· 1038 1023 looped = 1; 1039 1024 } 1040 1025 1026 + /* if there's no old root to return, return what we found instead */ 1027 + if (!found) 1028 + found = tm; 1029 + 1041 1030 return found; 1042 1031 } 1043 1032 ··· 1162 1143 return eb_rewin; 1163 1144 } 1164 1145 1146 + /* 1147 + * get_old_root() rewinds the state of @root's root node to the given @time_seq 1148 + * value. If there are no changes, the current root->root_node is returned. If 1149 + * anything changed in between, there's a fresh buffer allocated on which the 1150 + * rewind operations are done. In any case, the returned buffer is read locked. 1151 + * Returns NULL on error (with no locks held). 1152 + */ 1165 1153 static inline struct extent_buffer * 1166 1154 get_old_root(struct btrfs_root *root, u64 time_seq) 1167 1155 { 1168 1156 struct tree_mod_elem *tm; 1169 1157 struct extent_buffer *eb; 1170 - struct tree_mod_root *old_root; 1171 - u64 old_generation; 1158 + struct tree_mod_root *old_root = NULL; 1159 + u64 old_generation = 0; 1160 + u64 logical; 1172 1161 1162 + eb = btrfs_read_lock_root_node(root); 1173 1163 tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq); 1174 1164 if (!tm) 1175 1165 return root->node; 1176 1166 1177 - old_root = &tm->old_root; 1178 - old_generation = tm->generation; 1167 + if (tm->op == MOD_LOG_ROOT_REPLACE) { 1168 + old_root = &tm->old_root; 1169 + old_generation = tm->generation; 1170 + logical = old_root->logical; 1171 + } else { 1172 + logical = root->node->start; 1173 + } 1179 1174 1180 - tm = tree_mod_log_search(root->fs_info, old_root->logical, time_seq); 1175 + tm = tree_mod_log_search(root->fs_info, logical, time_seq); 1181 1176 /* 1182 1177 * there was an item in the log when __tree_mod_log_oldest_root 1183 1178 * returned. this one must not go away, because the time_seq passed to ··· 1199 1166 */ 1200 1167 BUG_ON(!tm); 1201 1168 1202 - if (old_root->logical == root->node->start) { 1203 - /* there are logged operations for the current root */ 1204 - eb = btrfs_clone_extent_buffer(root->node); 1205 - } else { 1206 - /* there's a root replace operation for the current root */ 1169 + if (old_root) 1207 1170 eb = alloc_dummy_extent_buffer(tm->index << PAGE_CACHE_SHIFT, 1208 1171 root->nodesize); 1172 + else 1173 + eb = btrfs_clone_extent_buffer(root->node); 1174 + btrfs_tree_read_unlock(root->node); 1175 + free_extent_buffer(root->node); 1176 + if (!eb) 1177 + return NULL; 1178 + btrfs_tree_read_lock(eb); 1179 + if (old_root) { 1209 1180 btrfs_set_header_bytenr(eb, eb->start); 1210 1181 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV); 1211 1182 btrfs_set_header_owner(eb, root->root_key.objectid); 1183 + btrfs_set_header_level(eb, old_root->level); 1184 + btrfs_set_header_generation(eb, old_generation); 1212 1185 } 1213 - if (!eb) 1214 - return NULL; 1215 - btrfs_set_header_level(eb, old_root->level); 1216 - btrfs_set_header_generation(eb, old_generation); 1217 1186 __tree_mod_log_rewind(eb, time_seq, tm); 1187 + extent_buffer_get(eb); 1218 1188 1219 1189 return eb; 1220 1190 } ··· 1686 1650 BTRFS_NODEPTRS_PER_BLOCK(root) / 4) 1687 1651 return 0; 1688 1652 1689 - btrfs_header_nritems(mid); 1690 - 1691 1653 left = read_node_slot(root, parent, pslot - 1); 1692 1654 if (left) { 1693 1655 btrfs_tree_lock(left); ··· 1715 1681 wret = push_node_left(trans, root, left, mid, 1); 1716 1682 if (wret < 0) 1717 1683 ret = wret; 1718 - btrfs_header_nritems(mid); 1719 1684 } 1720 1685 1721 1686 /* ··· 2648 2615 2649 2616 again: 2650 2617 b = get_old_root(root, time_seq); 2651 - extent_buffer_get(b); 2652 2618 level = btrfs_header_level(b); 2653 - btrfs_tree_read_lock(b); 2654 2619 p->locks[level] = BTRFS_READ_LOCK; 2655 2620 2656 2621 while (b) { ··· 5032 5001 */ 5033 5002 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) 5034 5003 { 5004 + return btrfs_next_old_leaf(root, path, 0); 5005 + } 5006 + 5007 + int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, 5008 + u64 time_seq) 5009 + { 5035 5010 int slot; 5036 5011 int level; 5037 5012 struct extent_buffer *c; ··· 5062 5025 path->keep_locks = 1; 5063 5026 path->leave_spinning = 1; 5064 5027 5065 - ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5028 + if (time_seq) 5029 + ret = btrfs_search_old_slot(root, &key, path, time_seq); 5030 + else 5031 + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5066 5032 path->keep_locks = 0; 5067 5033 5068 5034 if (ret < 0)
+2
fs/btrfs/ctree.h
··· 2753 2753 } 2754 2754 2755 2755 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); 2756 + int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, 2757 + u64 time_seq); 2756 2758 static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p) 2757 2759 { 2758 2760 ++p->slots[0];
+18
fs/btrfs/delayed-inode.c
··· 1879 1879 } 1880 1880 } 1881 1881 } 1882 + 1883 + void btrfs_destroy_delayed_inodes(struct btrfs_root *root) 1884 + { 1885 + struct btrfs_delayed_root *delayed_root; 1886 + struct btrfs_delayed_node *curr_node, *prev_node; 1887 + 1888 + delayed_root = btrfs_get_delayed_root(root); 1889 + 1890 + curr_node = btrfs_first_delayed_node(delayed_root); 1891 + while (curr_node) { 1892 + __btrfs_kill_delayed_node(curr_node); 1893 + 1894 + prev_node = curr_node; 1895 + curr_node = btrfs_next_delayed_node(curr_node); 1896 + btrfs_release_delayed_node(prev_node); 1897 + } 1898 + } 1899 +
+3
fs/btrfs/delayed-inode.h
··· 124 124 /* Used for drop dead root */ 125 125 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root); 126 126 127 + /* Used for clean the transaction */ 128 + void btrfs_destroy_delayed_inodes(struct btrfs_root *root); 129 + 127 130 /* Used for readdir() */ 128 131 void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, 129 132 struct list_head *del_list);
+48 -28
fs/btrfs/disk-io.c
··· 44 44 #include "free-space-cache.h" 45 45 #include "inode-map.h" 46 46 #include "check-integrity.h" 47 + #include "rcu-string.h" 47 48 48 49 static struct extent_io_ops btree_extent_io_ops; 49 50 static void end_workqueue_fn(struct btrfs_work *work); ··· 2119 2118 2120 2119 features = btrfs_super_incompat_flags(disk_super); 2121 2120 features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; 2122 - if (tree_root->fs_info->compress_type & BTRFS_COMPRESS_LZO) 2121 + if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO) 2123 2122 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; 2124 2123 2125 2124 /* ··· 2576 2575 struct btrfs_device *device = (struct btrfs_device *) 2577 2576 bh->b_private; 2578 2577 2579 - printk_ratelimited(KERN_WARNING "lost page write due to " 2580 - "I/O error on %s\n", device->name); 2578 + printk_ratelimited_in_rcu(KERN_WARNING "lost page write due to " 2579 + "I/O error on %s\n", 2580 + rcu_str_deref(device->name)); 2581 2581 /* note, we dont' set_buffer_write_io_error because we have 2582 2582 * our own ways of dealing with the IO errors 2583 2583 */ ··· 2751 2749 wait_for_completion(&device->flush_wait); 2752 2750 2753 2751 if (bio_flagged(bio, BIO_EOPNOTSUPP)) { 2754 - printk("btrfs: disabling barriers on dev %s\n", 2755 - device->name); 2752 + printk_in_rcu("btrfs: disabling barriers on dev %s\n", 2753 + rcu_str_deref(device->name)); 2756 2754 device->nobarriers = 1; 2757 2755 } 2758 2756 if (!bio_flagged(bio, BIO_UPTODATE)) { ··· 3402 3400 3403 3401 delayed_refs = &trans->delayed_refs; 3404 3402 3405 - again: 3406 3403 spin_lock(&delayed_refs->lock); 3407 3404 if (delayed_refs->num_entries == 0) { 3408 3405 spin_unlock(&delayed_refs->lock); ··· 3409 3408 return ret; 3410 3409 } 3411 3410 3412 - node = rb_first(&delayed_refs->root); 3413 - while (node) { 3411 + while ((node = rb_first(&delayed_refs->root)) != NULL) { 3414 3412 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); 3415 - node = rb_next(node); 3416 - 3417 - ref->in_tree = 0; 3418 - rb_erase(&ref->rb_node, &delayed_refs->root); 3419 - delayed_refs->num_entries--; 3420 3413 3421 3414 atomic_set(&ref->refs, 1); 3422 3415 if (btrfs_delayed_ref_is_head(ref)) { 3423 3416 struct btrfs_delayed_ref_head *head; 3424 3417 3425 3418 head = btrfs_delayed_node_to_head(ref); 3426 - spin_unlock(&delayed_refs->lock); 3427 - mutex_lock(&head->mutex); 3419 + if (!mutex_trylock(&head->mutex)) { 3420 + atomic_inc(&ref->refs); 3421 + spin_unlock(&delayed_refs->lock); 3422 + 3423 + /* Need to wait for the delayed ref to run */ 3424 + mutex_lock(&head->mutex); 3425 + mutex_unlock(&head->mutex); 3426 + btrfs_put_delayed_ref(ref); 3427 + 3428 + continue; 3429 + } 3430 + 3428 3431 kfree(head->extent_op); 3429 3432 delayed_refs->num_heads--; 3430 3433 if (list_empty(&head->cluster)) 3431 3434 delayed_refs->num_heads_ready--; 3432 3435 list_del_init(&head->cluster); 3433 - mutex_unlock(&head->mutex); 3434 - btrfs_put_delayed_ref(ref); 3435 - goto again; 3436 3436 } 3437 + ref->in_tree = 0; 3438 + rb_erase(&ref->rb_node, &delayed_refs->root); 3439 + delayed_refs->num_entries--; 3440 + 3437 3441 spin_unlock(&delayed_refs->lock); 3438 3442 btrfs_put_delayed_ref(ref); 3439 3443 ··· 3526 3520 &(&BTRFS_I(page->mapping->host)->io_tree)->buffer, 3527 3521 offset >> PAGE_CACHE_SHIFT); 3528 3522 spin_unlock(&dirty_pages->buffer_lock); 3529 - if (eb) { 3523 + if (eb) 3530 3524 ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY, 3531 3525 &eb->bflags); 3532 - atomic_set(&eb->refs, 1); 3533 - } 3534 3526 if (PageWriteback(page)) 3535 3527 end_page_writeback(page); 3536 3528 ··· 3542 3538 spin_unlock_irq(&page->mapping->tree_lock); 3543 3539 } 3544 3540 3545 - page->mapping->a_ops->invalidatepage(page, 0); 3546 3541 unlock_page(page); 3542 + page_cache_release(page); 3547 3543 } 3548 3544 } 3549 3545 ··· 3557 3553 u64 start; 3558 3554 u64 end; 3559 3555 int ret; 3556 + bool loop = true; 3560 3557 3561 3558 unpin = pinned_extents; 3559 + again: 3562 3560 while (1) { 3563 3561 ret = find_first_extent_bit(unpin, 0, &start, &end, 3564 3562 EXTENT_DIRTY); ··· 3578 3572 cond_resched(); 3579 3573 } 3580 3574 3575 + if (loop) { 3576 + if (unpin == &root->fs_info->freed_extents[0]) 3577 + unpin = &root->fs_info->freed_extents[1]; 3578 + else 3579 + unpin = &root->fs_info->freed_extents[0]; 3580 + loop = false; 3581 + goto again; 3582 + } 3583 + 3581 3584 return 0; 3582 3585 } 3583 3586 ··· 3600 3585 /* FIXME: cleanup wait for commit */ 3601 3586 cur_trans->in_commit = 1; 3602 3587 cur_trans->blocked = 1; 3603 - if (waitqueue_active(&root->fs_info->transaction_blocked_wait)) 3604 - wake_up(&root->fs_info->transaction_blocked_wait); 3588 + wake_up(&root->fs_info->transaction_blocked_wait); 3605 3589 3606 3590 cur_trans->blocked = 0; 3607 - if (waitqueue_active(&root->fs_info->transaction_wait)) 3608 - wake_up(&root->fs_info->transaction_wait); 3591 + wake_up(&root->fs_info->transaction_wait); 3609 3592 3610 3593 cur_trans->commit_done = 1; 3611 - if (waitqueue_active(&cur_trans->commit_wait)) 3612 - wake_up(&cur_trans->commit_wait); 3594 + wake_up(&cur_trans->commit_wait); 3595 + 3596 + btrfs_destroy_delayed_inodes(root); 3597 + btrfs_assert_delayed_root_empty(root); 3613 3598 3614 3599 btrfs_destroy_pending_snapshots(cur_trans); 3615 3600 3616 3601 btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages, 3617 3602 EXTENT_DIRTY); 3603 + btrfs_destroy_pinned_extent(root, 3604 + root->fs_info->pinned_extents); 3618 3605 3619 3606 /* 3620 3607 memset(cur_trans, 0, sizeof(*cur_trans)); ··· 3664 3647 t->commit_done = 1; 3665 3648 if (waitqueue_active(&t->commit_wait)) 3666 3649 wake_up(&t->commit_wait); 3650 + 3651 + btrfs_destroy_delayed_inodes(root); 3652 + btrfs_assert_delayed_root_empty(root); 3667 3653 3668 3654 btrfs_destroy_pending_snapshots(t); 3669 3655
+4 -3
fs/btrfs/extent_io.c
··· 20 20 #include "volumes.h" 21 21 #include "check-integrity.h" 22 22 #include "locking.h" 23 + #include "rcu-string.h" 23 24 24 25 static struct kmem_cache *extent_state_cache; 25 26 static struct kmem_cache *extent_buffer_cache; ··· 1918 1917 return -EIO; 1919 1918 } 1920 1919 1921 - printk(KERN_INFO "btrfs read error corrected: ino %lu off %llu (dev %s " 1922 - "sector %llu)\n", page->mapping->host->i_ino, start, 1923 - dev->name, sector); 1920 + printk_in_rcu(KERN_INFO "btrfs read error corrected: ino %lu off %llu " 1921 + "(dev %s sector %llu)\n", page->mapping->host->i_ino, 1922 + start, rcu_str_deref(dev->name), sector); 1924 1923 1925 1924 bio_put(bio); 1926 1925 return 0;
+58 -15
fs/btrfs/inode.c
··· 830 830 if (IS_ERR(trans)) { 831 831 extent_clear_unlock_delalloc(inode, 832 832 &BTRFS_I(inode)->io_tree, 833 - start, end, NULL, 833 + start, end, locked_page, 834 834 EXTENT_CLEAR_UNLOCK_PAGE | 835 835 EXTENT_CLEAR_UNLOCK | 836 836 EXTENT_CLEAR_DELALLOC | ··· 963 963 out_unlock: 964 964 extent_clear_unlock_delalloc(inode, 965 965 &BTRFS_I(inode)->io_tree, 966 - start, end, NULL, 966 + start, end, locked_page, 967 967 EXTENT_CLEAR_UNLOCK_PAGE | 968 968 EXTENT_CLEAR_UNLOCK | 969 969 EXTENT_CLEAR_DELALLOC | ··· 986 986 compress_file_range(async_cow->inode, async_cow->locked_page, 987 987 async_cow->start, async_cow->end, async_cow, 988 988 &num_added); 989 - if (num_added == 0) 989 + if (num_added == 0) { 990 + iput(async_cow->inode); 990 991 async_cow->inode = NULL; 992 + } 991 993 } 992 994 993 995 /* ··· 1022 1020 { 1023 1021 struct async_cow *async_cow; 1024 1022 async_cow = container_of(work, struct async_cow, work); 1023 + if (async_cow->inode) 1024 + iput(async_cow->inode); 1025 1025 kfree(async_cow); 1026 1026 } 1027 1027 ··· 1042 1038 while (start < end) { 1043 1039 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); 1044 1040 BUG_ON(!async_cow); /* -ENOMEM */ 1045 - async_cow->inode = inode; 1041 + async_cow->inode = igrab(inode); 1046 1042 async_cow->root = root; 1047 1043 async_cow->locked_page = locked_page; 1048 1044 async_cow->start = start; ··· 1140 1136 u64 ino = btrfs_ino(inode); 1141 1137 1142 1138 path = btrfs_alloc_path(); 1143 - if (!path) 1139 + if (!path) { 1140 + extent_clear_unlock_delalloc(inode, 1141 + &BTRFS_I(inode)->io_tree, 1142 + start, end, locked_page, 1143 + EXTENT_CLEAR_UNLOCK_PAGE | 1144 + EXTENT_CLEAR_UNLOCK | 1145 + EXTENT_CLEAR_DELALLOC | 1146 + EXTENT_CLEAR_DIRTY | 1147 + EXTENT_SET_WRITEBACK | 1148 + EXTENT_END_WRITEBACK); 1144 1149 return -ENOMEM; 1150 + } 1145 1151 1146 1152 nolock = btrfs_is_free_space_inode(root, inode); 1147 1153 ··· 1161 1147 trans = btrfs_join_transaction(root); 1162 1148 1163 1149 if (IS_ERR(trans)) { 1150 + extent_clear_unlock_delalloc(inode, 1151 + &BTRFS_I(inode)->io_tree, 1152 + start, end, locked_page, 1153 + EXTENT_CLEAR_UNLOCK_PAGE | 1154 + EXTENT_CLEAR_UNLOCK | 1155 + EXTENT_CLEAR_DELALLOC | 1156 + EXTENT_CLEAR_DIRTY | 1157 + EXTENT_SET_WRITEBACK | 1158 + EXTENT_END_WRITEBACK); 1164 1159 btrfs_free_path(path); 1165 1160 return PTR_ERR(trans); 1166 1161 } ··· 1350 1327 } 1351 1328 btrfs_release_path(path); 1352 1329 1353 - if (cur_offset <= end && cow_start == (u64)-1) 1330 + if (cur_offset <= end && cow_start == (u64)-1) { 1354 1331 cow_start = cur_offset; 1332 + cur_offset = end; 1333 + } 1334 + 1355 1335 if (cow_start != (u64)-1) { 1356 1336 ret = cow_file_range(inode, locked_page, cow_start, end, 1357 1337 page_started, nr_written, 1); ··· 1373 1347 if (!ret) 1374 1348 ret = err; 1375 1349 1350 + if (ret && cur_offset < end) 1351 + extent_clear_unlock_delalloc(inode, 1352 + &BTRFS_I(inode)->io_tree, 1353 + cur_offset, end, locked_page, 1354 + EXTENT_CLEAR_UNLOCK_PAGE | 1355 + EXTENT_CLEAR_UNLOCK | 1356 + EXTENT_CLEAR_DELALLOC | 1357 + EXTENT_CLEAR_DIRTY | 1358 + EXTENT_SET_WRITEBACK | 1359 + EXTENT_END_WRITEBACK); 1360 + 1376 1361 btrfs_free_path(path); 1377 1362 return ret; 1378 1363 } ··· 1398 1361 int ret; 1399 1362 struct btrfs_root *root = BTRFS_I(inode)->root; 1400 1363 1401 - if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) 1364 + if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) { 1402 1365 ret = run_delalloc_nocow(inode, locked_page, start, end, 1403 1366 page_started, 1, nr_written); 1404 - else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) 1367 + } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) { 1405 1368 ret = run_delalloc_nocow(inode, locked_page, start, end, 1406 1369 page_started, 0, nr_written); 1407 - else if (!btrfs_test_opt(root, COMPRESS) && 1408 - !(BTRFS_I(inode)->force_compress) && 1409 - !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) 1370 + } else if (!btrfs_test_opt(root, COMPRESS) && 1371 + !(BTRFS_I(inode)->force_compress) && 1372 + !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) { 1410 1373 ret = cow_file_range(inode, locked_page, start, end, 1411 1374 page_started, nr_written, 1); 1412 - else 1375 + } else { 1376 + set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 1377 + &BTRFS_I(inode)->runtime_flags); 1413 1378 ret = cow_file_range_async(inode, locked_page, start, end, 1414 1379 page_started, nr_written); 1380 + } 1415 1381 return ret; 1416 1382 } 1417 1383 ··· 7094 7054 else 7095 7055 b_inode->flags &= ~BTRFS_INODE_NODATACOW; 7096 7056 7097 - if (b_dir->flags & BTRFS_INODE_COMPRESS) 7057 + if (b_dir->flags & BTRFS_INODE_COMPRESS) { 7098 7058 b_inode->flags |= BTRFS_INODE_COMPRESS; 7099 - else 7100 - b_inode->flags &= ~BTRFS_INODE_COMPRESS; 7059 + b_inode->flags &= ~BTRFS_INODE_NOCOMPRESS; 7060 + } else { 7061 + b_inode->flags &= ~(BTRFS_INODE_COMPRESS | 7062 + BTRFS_INODE_NOCOMPRESS); 7063 + } 7101 7064 } 7102 7065 7103 7066 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+69 -53
fs/btrfs/ioctl.c
··· 52 52 #include "locking.h" 53 53 #include "inode-map.h" 54 54 #include "backref.h" 55 + #include "rcu-string.h" 55 56 56 57 /* Mask out flags that are inappropriate for the given type of inode. */ 57 58 static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) ··· 786 785 return -ENOENT; 787 786 } 788 787 789 - /* 790 - * Validaty check of prev em and next em: 791 - * 1) no prev/next em 792 - * 2) prev/next em is an hole/inline extent 793 - */ 794 - static int check_adjacent_extents(struct inode *inode, struct extent_map *em) 788 + static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start) 795 789 { 796 790 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 797 - struct extent_map *prev = NULL, *next = NULL; 798 - int ret = 0; 799 - 800 - read_lock(&em_tree->lock); 801 - prev = lookup_extent_mapping(em_tree, em->start - 1, (u64)-1); 802 - next = lookup_extent_mapping(em_tree, em->start + em->len, (u64)-1); 803 - read_unlock(&em_tree->lock); 804 - 805 - if ((!prev || prev->block_start >= EXTENT_MAP_LAST_BYTE) && 806 - (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)) 807 - ret = 1; 808 - free_extent_map(prev); 809 - free_extent_map(next); 810 - 811 - return ret; 812 - } 813 - 814 - static int should_defrag_range(struct inode *inode, u64 start, u64 len, 815 - int thresh, u64 *last_len, u64 *skip, 816 - u64 *defrag_end) 817 - { 818 791 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 819 - struct extent_map *em = NULL; 820 - struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 821 - int ret = 1; 822 - 823 - /* 824 - * make sure that once we start defragging an extent, we keep on 825 - * defragging it 826 - */ 827 - if (start < *defrag_end) 828 - return 1; 829 - 830 - *skip = 0; 792 + struct extent_map *em; 793 + u64 len = PAGE_CACHE_SIZE; 831 794 832 795 /* 833 796 * hopefully we have this extent in the tree already, try without ··· 808 843 unlock_extent(io_tree, start, start + len - 1); 809 844 810 845 if (IS_ERR(em)) 811 - return 0; 846 + return NULL; 812 847 } 848 + 849 + return em; 850 + } 851 + 852 + static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em) 853 + { 854 + struct extent_map *next; 855 + bool ret = true; 856 + 857 + /* this is the last extent */ 858 + if (em->start + em->len >= i_size_read(inode)) 859 + return false; 860 + 861 + next = defrag_lookup_extent(inode, em->start + em->len); 862 + if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE) 863 + ret = false; 864 + 865 + free_extent_map(next); 866 + return ret; 867 + } 868 + 869 + static int should_defrag_range(struct inode *inode, u64 start, int thresh, 870 + u64 *last_len, u64 *skip, u64 *defrag_end) 871 + { 872 + struct extent_map *em; 873 + int ret = 1; 874 + bool next_mergeable = true; 875 + 876 + /* 877 + * make sure that once we start defragging an extent, we keep on 878 + * defragging it 879 + */ 880 + if (start < *defrag_end) 881 + return 1; 882 + 883 + *skip = 0; 884 + 885 + em = defrag_lookup_extent(inode, start); 886 + if (!em) 887 + return 0; 813 888 814 889 /* this will cover holes, and inline extents */ 815 890 if (em->block_start >= EXTENT_MAP_LAST_BYTE) { ··· 857 852 goto out; 858 853 } 859 854 860 - /* If we have nothing to merge with us, just skip. */ 861 - if (check_adjacent_extents(inode, em)) { 862 - ret = 0; 863 - goto out; 864 - } 855 + next_mergeable = defrag_check_next_extent(inode, em); 865 856 866 857 /* 867 - * we hit a real extent, if it is big don't bother defragging it again 858 + * we hit a real extent, if it is big or the next extent is not a 859 + * real extent, don't bother defragging it 868 860 */ 869 - if ((*last_len == 0 || *last_len >= thresh) && em->len >= thresh) 861 + if ((*last_len == 0 || *last_len >= thresh) && 862 + (em->len >= thresh || !next_mergeable)) 870 863 ret = 0; 871 - 872 864 out: 873 865 /* 874 866 * last_len ends up being a counter of how many bytes we've defragged. ··· 1144 1142 break; 1145 1143 1146 1144 if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, 1147 - PAGE_CACHE_SIZE, extent_thresh, 1148 - &last_len, &skip, &defrag_end)) { 1145 + extent_thresh, &last_len, &skip, 1146 + &defrag_end)) { 1149 1147 unsigned long next; 1150 1148 /* 1151 1149 * the should_defrag function tells us how much to skip ··· 1306 1304 ret = -EINVAL; 1307 1305 goto out_free; 1308 1306 } 1307 + if (device->fs_devices && device->fs_devices->seeding) { 1308 + printk(KERN_INFO "btrfs: resizer unable to apply on " 1309 + "seeding device %llu\n", 1310 + (unsigned long long)devid); 1311 + ret = -EINVAL; 1312 + goto out_free; 1313 + } 1314 + 1309 1315 if (!strcmp(sizestr, "max")) 1310 1316 new_size = device->bdev->bd_inode->i_size; 1311 1317 else { ··· 1355 1345 do_div(new_size, root->sectorsize); 1356 1346 new_size *= root->sectorsize; 1357 1347 1358 - printk(KERN_INFO "btrfs: new size for %s is %llu\n", 1359 - device->name, (unsigned long long)new_size); 1348 + printk_in_rcu(KERN_INFO "btrfs: new size for %s is %llu\n", 1349 + rcu_str_deref(device->name), 1350 + (unsigned long long)new_size); 1360 1351 1361 1352 if (new_size > old_size) { 1362 1353 trans = btrfs_start_transaction(root, 0); ··· 2275 2264 di_args->total_bytes = dev->total_bytes; 2276 2265 memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid)); 2277 2266 if (dev->name) { 2278 - strncpy(di_args->path, dev->name, sizeof(di_args->path)); 2267 + struct rcu_string *name; 2268 + 2269 + rcu_read_lock(); 2270 + name = rcu_dereference(dev->name); 2271 + strncpy(di_args->path, name->str, sizeof(di_args->path)); 2272 + rcu_read_unlock(); 2279 2273 di_args->path[sizeof(di_args->path) - 1] = 0; 2280 2274 } else { 2281 2275 di_args->path[0] = '\0';
+21 -1
fs/btrfs/ordered-data.c
··· 627 627 /* start IO across the range first to instantiate any delalloc 628 628 * extents 629 629 */ 630 - filemap_write_and_wait_range(inode->i_mapping, start, orig_end); 630 + filemap_fdatawrite_range(inode->i_mapping, start, orig_end); 631 + 632 + /* 633 + * So with compression we will find and lock a dirty page and clear the 634 + * first one as dirty, setup an async extent, and immediately return 635 + * with the entire range locked but with nobody actually marked with 636 + * writeback. So we can't just filemap_write_and_wait_range() and 637 + * expect it to work since it will just kick off a thread to do the 638 + * actual work. So we need to call filemap_fdatawrite_range _again_ 639 + * since it will wait on the page lock, which won't be unlocked until 640 + * after the pages have been marked as writeback and so we're good to go 641 + * from there. We have to do this otherwise we'll miss the ordered 642 + * extents and that results in badness. Please Josef, do not think you 643 + * know better and pull this out at some point in the future, it is 644 + * right and you are wrong. 645 + */ 646 + if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, 647 + &BTRFS_I(inode)->runtime_flags)) 648 + filemap_fdatawrite_range(inode->i_mapping, start, orig_end); 649 + 650 + filemap_fdatawait_range(inode->i_mapping, start, orig_end); 631 651 632 652 end = orig_end; 633 653 found = 0;
+56
fs/btrfs/rcu-string.h
··· 1 + /* 2 + * Copyright (C) 2012 Red Hat. All rights reserved. 3 + * 4 + * This program is free software; you can redistribute it and/or 5 + * modify it under the terms of the GNU General Public 6 + * License v2 as published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 + * General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public 14 + * License along with this program; if not, write to the 15 + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 + * Boston, MA 021110-1307, USA. 17 + */ 18 + 19 + struct rcu_string { 20 + struct rcu_head rcu; 21 + char str[0]; 22 + }; 23 + 24 + static inline struct rcu_string *rcu_string_strdup(const char *src, gfp_t mask) 25 + { 26 + size_t len = strlen(src) + 1; 27 + struct rcu_string *ret = kzalloc(sizeof(struct rcu_string) + 28 + (len * sizeof(char)), mask); 29 + if (!ret) 30 + return ret; 31 + strncpy(ret->str, src, len); 32 + return ret; 33 + } 34 + 35 + static inline void rcu_string_free(struct rcu_string *str) 36 + { 37 + if (str) 38 + kfree_rcu(str, rcu); 39 + } 40 + 41 + #define printk_in_rcu(fmt, ...) do { \ 42 + rcu_read_lock(); \ 43 + printk(fmt, __VA_ARGS__); \ 44 + rcu_read_unlock(); \ 45 + } while (0) 46 + 47 + #define printk_ratelimited_in_rcu(fmt, ...) do { \ 48 + rcu_read_lock(); \ 49 + printk_ratelimited(fmt, __VA_ARGS__); \ 50 + rcu_read_unlock(); \ 51 + } while (0) 52 + 53 + #define rcu_str_deref(rcu_str) ({ \ 54 + struct rcu_string *__str = rcu_dereference(rcu_str); \ 55 + __str->str; \ 56 + })
+18 -12
fs/btrfs/scrub.c
··· 26 26 #include "backref.h" 27 27 #include "extent_io.h" 28 28 #include "check-integrity.h" 29 + #include "rcu-string.h" 29 30 30 31 /* 31 32 * This is only the first step towards a full-features scrub. It reads all ··· 321 320 * hold all of the paths here 322 321 */ 323 322 for (i = 0; i < ipath->fspath->elem_cnt; ++i) 324 - printk(KERN_WARNING "btrfs: %s at logical %llu on dev " 323 + printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev " 325 324 "%s, sector %llu, root %llu, inode %llu, offset %llu, " 326 325 "length %llu, links %u (path: %s)\n", swarn->errstr, 327 - swarn->logical, swarn->dev->name, 326 + swarn->logical, rcu_str_deref(swarn->dev->name), 328 327 (unsigned long long)swarn->sector, root, inum, offset, 329 328 min(isize - offset, (u64)PAGE_SIZE), nlink, 330 329 (char *)(unsigned long)ipath->fspath->val[i]); ··· 333 332 return 0; 334 333 335 334 err: 336 - printk(KERN_WARNING "btrfs: %s at logical %llu on dev " 335 + printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev " 337 336 "%s, sector %llu, root %llu, inode %llu, offset %llu: path " 338 337 "resolving failed with ret=%d\n", swarn->errstr, 339 - swarn->logical, swarn->dev->name, 338 + swarn->logical, rcu_str_deref(swarn->dev->name), 340 339 (unsigned long long)swarn->sector, root, inum, offset, ret); 341 340 342 341 free_ipath(ipath); ··· 391 390 do { 392 391 ret = tree_backref_for_extent(&ptr, eb, ei, item_size, 393 392 &ref_root, &ref_level); 394 - printk(KERN_WARNING 393 + printk_in_rcu(KERN_WARNING 395 394 "btrfs: %s at logical %llu on dev %s, " 396 395 "sector %llu: metadata %s (level %d) in tree " 397 - "%llu\n", errstr, swarn.logical, dev->name, 396 + "%llu\n", errstr, swarn.logical, 397 + rcu_str_deref(dev->name), 398 398 (unsigned long long)swarn.sector, 399 399 ref_level ? "node" : "leaf", 400 400 ret < 0 ? -1 : ref_level, ··· 582 580 spin_lock(&sdev->stat_lock); 583 581 ++sdev->stat.uncorrectable_errors; 584 582 spin_unlock(&sdev->stat_lock); 585 - printk_ratelimited(KERN_ERR 583 + 584 + printk_ratelimited_in_rcu(KERN_ERR 586 585 "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n", 587 - (unsigned long long)fixup->logical, sdev->dev->name); 586 + (unsigned long long)fixup->logical, 587 + rcu_str_deref(sdev->dev->name)); 588 588 } 589 589 590 590 btrfs_free_path(path); ··· 940 936 spin_lock(&sdev->stat_lock); 941 937 sdev->stat.corrected_errors++; 942 938 spin_unlock(&sdev->stat_lock); 943 - printk_ratelimited(KERN_ERR 939 + printk_ratelimited_in_rcu(KERN_ERR 944 940 "btrfs: fixed up error at logical %llu on dev %s\n", 945 - (unsigned long long)logical, sdev->dev->name); 941 + (unsigned long long)logical, 942 + rcu_str_deref(sdev->dev->name)); 946 943 } 947 944 } else { 948 945 did_not_correct_error: 949 946 spin_lock(&sdev->stat_lock); 950 947 sdev->stat.uncorrectable_errors++; 951 948 spin_unlock(&sdev->stat_lock); 952 - printk_ratelimited(KERN_ERR 949 + printk_ratelimited_in_rcu(KERN_ERR 953 950 "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n", 954 - (unsigned long long)logical, sdev->dev->name); 951 + (unsigned long long)logical, 952 + rcu_str_deref(sdev->dev->name)); 955 953 } 956 954 957 955 out:
+33
fs/btrfs/super.c
··· 54 54 #include "version.h" 55 55 #include "export.h" 56 56 #include "compression.h" 57 + #include "rcu-string.h" 57 58 58 59 #define CREATE_TRACE_POINTS 59 60 #include <trace/events/btrfs.h> ··· 1483 1482 "error %d\n", btrfs_ino(inode), ret); 1484 1483 } 1485 1484 1485 + static int btrfs_show_devname(struct seq_file *m, struct dentry *root) 1486 + { 1487 + struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb); 1488 + struct btrfs_fs_devices *cur_devices; 1489 + struct btrfs_device *dev, *first_dev = NULL; 1490 + struct list_head *head; 1491 + struct rcu_string *name; 1492 + 1493 + mutex_lock(&fs_info->fs_devices->device_list_mutex); 1494 + cur_devices = fs_info->fs_devices; 1495 + while (cur_devices) { 1496 + head = &cur_devices->devices; 1497 + list_for_each_entry(dev, head, dev_list) { 1498 + if (!first_dev || dev->devid < first_dev->devid) 1499 + first_dev = dev; 1500 + } 1501 + cur_devices = cur_devices->seed; 1502 + } 1503 + 1504 + if (first_dev) { 1505 + rcu_read_lock(); 1506 + name = rcu_dereference(first_dev->name); 1507 + seq_escape(m, name->str, " \t\n\\"); 1508 + rcu_read_unlock(); 1509 + } else { 1510 + WARN_ON(1); 1511 + } 1512 + mutex_unlock(&fs_info->fs_devices->device_list_mutex); 1513 + return 0; 1514 + } 1515 + 1486 1516 static const struct super_operations btrfs_super_ops = { 1487 1517 .drop_inode = btrfs_drop_inode, 1488 1518 .evict_inode = btrfs_evict_inode, 1489 1519 .put_super = btrfs_put_super, 1490 1520 .sync_fs = btrfs_sync_fs, 1491 1521 .show_options = btrfs_show_options, 1522 + .show_devname = btrfs_show_devname, 1492 1523 .write_inode = btrfs_write_inode, 1493 1524 .dirty_inode = btrfs_fs_dirty_inode, 1494 1525 .alloc_inode = btrfs_alloc_inode,
+12 -2
fs/btrfs/transaction.c
··· 100 100 kmem_cache_free(btrfs_transaction_cachep, cur_trans); 101 101 cur_trans = fs_info->running_transaction; 102 102 goto loop; 103 + } else if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { 104 + spin_unlock(&root->fs_info->trans_lock); 105 + kmem_cache_free(btrfs_transaction_cachep, cur_trans); 106 + return -EROFS; 103 107 } 104 108 105 109 atomic_set(&cur_trans->num_writers, 1); ··· 1217 1213 1218 1214 1219 1215 static void cleanup_transaction(struct btrfs_trans_handle *trans, 1220 - struct btrfs_root *root) 1216 + struct btrfs_root *root, int err) 1221 1217 { 1222 1218 struct btrfs_transaction *cur_trans = trans->transaction; 1223 1219 1224 1220 WARN_ON(trans->use_count > 1); 1225 1221 1222 + btrfs_abort_transaction(trans, root, err); 1223 + 1226 1224 spin_lock(&root->fs_info->trans_lock); 1227 1225 list_del_init(&cur_trans->list); 1226 + if (cur_trans == root->fs_info->running_transaction) { 1227 + root->fs_info->running_transaction = NULL; 1228 + root->fs_info->trans_no_join = 0; 1229 + } 1228 1230 spin_unlock(&root->fs_info->trans_lock); 1229 1231 1230 1232 btrfs_cleanup_one_transaction(trans->transaction, root); ··· 1536 1526 // WARN_ON(1); 1537 1527 if (current->journal_info == trans) 1538 1528 current->journal_info = NULL; 1539 - cleanup_transaction(trans, root); 1529 + cleanup_transaction(trans, root, ret); 1540 1530 1541 1531 return ret; 1542 1532 }
+58 -34
fs/btrfs/volumes.c
··· 35 35 #include "volumes.h" 36 36 #include "async-thread.h" 37 37 #include "check-integrity.h" 38 + #include "rcu-string.h" 38 39 39 40 static int init_first_rw_device(struct btrfs_trans_handle *trans, 40 41 struct btrfs_root *root, ··· 65 64 device = list_entry(fs_devices->devices.next, 66 65 struct btrfs_device, dev_list); 67 66 list_del(&device->dev_list); 68 - kfree(device->name); 67 + rcu_string_free(device->name); 69 68 kfree(device); 70 69 } 71 70 kfree(fs_devices); ··· 335 334 { 336 335 struct btrfs_device *device; 337 336 struct btrfs_fs_devices *fs_devices; 337 + struct rcu_string *name; 338 338 u64 found_transid = btrfs_super_generation(disk_super); 339 - char *name; 340 339 341 340 fs_devices = find_fsid(disk_super->fsid); 342 341 if (!fs_devices) { ··· 370 369 memcpy(device->uuid, disk_super->dev_item.uuid, 371 370 BTRFS_UUID_SIZE); 372 371 spin_lock_init(&device->io_lock); 373 - device->name = kstrdup(path, GFP_NOFS); 374 - if (!device->name) { 372 + 373 + name = rcu_string_strdup(path, GFP_NOFS); 374 + if (!name) { 375 375 kfree(device); 376 376 return -ENOMEM; 377 377 } 378 + rcu_assign_pointer(device->name, name); 378 379 INIT_LIST_HEAD(&device->dev_alloc_list); 379 380 380 381 /* init readahead state */ ··· 393 390 394 391 device->fs_devices = fs_devices; 395 392 fs_devices->num_devices++; 396 - } else if (!device->name || strcmp(device->name, path)) { 397 - name = kstrdup(path, GFP_NOFS); 393 + } else if (!device->name || strcmp(device->name->str, path)) { 394 + name = rcu_string_strdup(path, GFP_NOFS); 398 395 if (!name) 399 396 return -ENOMEM; 400 - kfree(device->name); 401 - device->name = name; 397 + rcu_string_free(device->name); 398 + rcu_assign_pointer(device->name, name); 402 399 if (device->missing) { 403 400 fs_devices->missing_devices--; 404 401 device->missing = 0; ··· 433 430 434 431 /* We have held the volume lock, it is safe to get the devices. */ 435 432 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 433 + struct rcu_string *name; 434 + 436 435 device = kzalloc(sizeof(*device), GFP_NOFS); 437 436 if (!device) 438 437 goto error; 439 438 440 - device->name = kstrdup(orig_dev->name, GFP_NOFS); 441 - if (!device->name) { 439 + /* 440 + * This is ok to do without rcu read locked because we hold the 441 + * uuid mutex so nothing we touch in here is going to disappear. 442 + */ 443 + name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS); 444 + if (!name) { 442 445 kfree(device); 443 446 goto error; 444 447 } 448 + rcu_assign_pointer(device->name, name); 445 449 446 450 device->devid = orig_dev->devid; 447 451 device->work.func = pending_bios_fn; ··· 501 491 } 502 492 list_del_init(&device->dev_list); 503 493 fs_devices->num_devices--; 504 - kfree(device->name); 494 + rcu_string_free(device->name); 505 495 kfree(device); 506 496 } 507 497 ··· 526 516 if (device->bdev) 527 517 blkdev_put(device->bdev, device->mode); 528 518 529 - kfree(device->name); 519 + rcu_string_free(device->name); 530 520 kfree(device); 531 521 } 532 522 ··· 550 540 mutex_lock(&fs_devices->device_list_mutex); 551 541 list_for_each_entry(device, &fs_devices->devices, dev_list) { 552 542 struct btrfs_device *new_device; 543 + struct rcu_string *name; 553 544 554 545 if (device->bdev) 555 546 fs_devices->open_devices--; ··· 566 555 new_device = kmalloc(sizeof(*new_device), GFP_NOFS); 567 556 BUG_ON(!new_device); /* -ENOMEM */ 568 557 memcpy(new_device, device, sizeof(*new_device)); 569 - new_device->name = kstrdup(device->name, GFP_NOFS); 570 - BUG_ON(device->name && !new_device->name); /* -ENOMEM */ 558 + 559 + /* Safe because we are under uuid_mutex */ 560 + name = rcu_string_strdup(device->name->str, GFP_NOFS); 561 + BUG_ON(device->name && !name); /* -ENOMEM */ 562 + rcu_assign_pointer(new_device->name, name); 571 563 new_device->bdev = NULL; 572 564 new_device->writeable = 0; 573 565 new_device->in_fs_metadata = 0; ··· 635 621 if (!device->name) 636 622 continue; 637 623 638 - bdev = blkdev_get_by_path(device->name, flags, holder); 624 + bdev = blkdev_get_by_path(device->name->str, flags, holder); 639 625 if (IS_ERR(bdev)) { 640 - printk(KERN_INFO "open %s failed\n", device->name); 626 + printk(KERN_INFO "open %s failed\n", device->name->str); 641 627 goto error; 642 628 } 643 629 filemap_write_and_wait(bdev->bd_inode->i_mapping); ··· 1646 1632 struct block_device *bdev; 1647 1633 struct list_head *devices; 1648 1634 struct super_block *sb = root->fs_info->sb; 1635 + struct rcu_string *name; 1649 1636 u64 total_bytes; 1650 1637 int seeding_dev = 0; 1651 1638 int ret = 0; ··· 1686 1671 goto error; 1687 1672 } 1688 1673 1689 - device->name = kstrdup(device_path, GFP_NOFS); 1690 - if (!device->name) { 1674 + name = rcu_string_strdup(device_path, GFP_NOFS); 1675 + if (!name) { 1691 1676 kfree(device); 1692 1677 ret = -ENOMEM; 1693 1678 goto error; 1694 1679 } 1680 + rcu_assign_pointer(device->name, name); 1695 1681 1696 1682 ret = find_next_devid(root, &device->devid); 1697 1683 if (ret) { 1698 - kfree(device->name); 1684 + rcu_string_free(device->name); 1699 1685 kfree(device); 1700 1686 goto error; 1701 1687 } 1702 1688 1703 1689 trans = btrfs_start_transaction(root, 0); 1704 1690 if (IS_ERR(trans)) { 1705 - kfree(device->name); 1691 + rcu_string_free(device->name); 1706 1692 kfree(device); 1707 1693 ret = PTR_ERR(trans); 1708 1694 goto error; ··· 1812 1796 unlock_chunks(root); 1813 1797 btrfs_abort_transaction(trans, root, ret); 1814 1798 btrfs_end_transaction(trans, root); 1815 - kfree(device->name); 1799 + rcu_string_free(device->name); 1816 1800 kfree(device); 1817 1801 error: 1818 1802 blkdev_put(bdev, FMODE_EXCL); ··· 4220 4204 bio->bi_sector = bbio->stripes[dev_nr].physical >> 9; 4221 4205 dev = bbio->stripes[dev_nr].dev; 4222 4206 if (dev && dev->bdev && (rw != WRITE || dev->writeable)) { 4207 + #ifdef DEBUG 4208 + struct rcu_string *name; 4209 + 4210 + rcu_read_lock(); 4211 + name = rcu_dereference(dev->name); 4223 4212 pr_debug("btrfs_map_bio: rw %d, secor=%llu, dev=%lu " 4224 4213 "(%s id %llu), size=%u\n", rw, 4225 4214 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev, 4226 - dev->name, dev->devid, bio->bi_size); 4215 + name->str, dev->devid, bio->bi_size); 4216 + rcu_read_unlock(); 4217 + #endif 4227 4218 bio->bi_bdev = dev->bdev; 4228 4219 if (async_submit) 4229 4220 schedule_bio(root, dev, rw, bio); ··· 4717 4694 key.offset = device->devid; 4718 4695 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0); 4719 4696 if (ret) { 4720 - printk(KERN_WARNING "btrfs: no dev_stats entry found for device %s (devid %llu) (OK on first mount after mkfs)\n", 4721 - device->name, (unsigned long long)device->devid); 4697 + printk_in_rcu(KERN_WARNING "btrfs: no dev_stats entry found for device %s (devid %llu) (OK on first mount after mkfs)\n", 4698 + rcu_str_deref(device->name), 4699 + (unsigned long long)device->devid); 4722 4700 __btrfs_reset_dev_stats(device); 4723 4701 device->dev_stats_valid = 1; 4724 4702 btrfs_release_path(path); ··· 4771 4747 BUG_ON(!path); 4772 4748 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); 4773 4749 if (ret < 0) { 4774 - printk(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n", 4775 - ret, device->name); 4750 + printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n", 4751 + ret, rcu_str_deref(device->name)); 4776 4752 goto out; 4777 4753 } 4778 4754 ··· 4781 4757 /* need to delete old one and insert a new one */ 4782 4758 ret = btrfs_del_item(trans, dev_root, path); 4783 4759 if (ret != 0) { 4784 - printk(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n", 4785 - device->name, ret); 4760 + printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n", 4761 + rcu_str_deref(device->name), ret); 4786 4762 goto out; 4787 4763 } 4788 4764 ret = 1; ··· 4794 4770 ret = btrfs_insert_empty_item(trans, dev_root, path, 4795 4771 &key, sizeof(*ptr)); 4796 4772 if (ret < 0) { 4797 - printk(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n", 4798 - device->name, ret); 4773 + printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n", 4774 + rcu_str_deref(device->name), ret); 4799 4775 goto out; 4800 4776 } 4801 4777 } ··· 4847 4823 { 4848 4824 if (!dev->dev_stats_valid) 4849 4825 return; 4850 - printk_ratelimited(KERN_ERR 4826 + printk_ratelimited_in_rcu(KERN_ERR 4851 4827 "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n", 4852 - dev->name, 4828 + rcu_str_deref(dev->name), 4853 4829 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 4854 4830 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 4855 4831 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), ··· 4861 4837 4862 4838 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) 4863 4839 { 4864 - printk(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n", 4865 - dev->name, 4840 + printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n", 4841 + rcu_str_deref(dev->name), 4866 4842 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), 4867 4843 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), 4868 4844 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
+1 -1
fs/btrfs/volumes.h
··· 58 58 /* the mode sent to blkdev_get */ 59 59 fmode_t mode; 60 60 61 - char *name; 61 + struct rcu_string *name; 62 62 63 63 /* the internal btrfs device id */ 64 64 u64 devid;
+10 -6
fs/dcache.c
··· 683 683 /** 684 684 * d_find_alias - grab a hashed alias of inode 685 685 * @inode: inode in question 686 + * @want_discon: flag, used by d_splice_alias, to request 687 + * that only a DISCONNECTED alias be returned. 686 688 * 687 689 * If inode has a hashed alias, or is a directory and has any alias, 688 690 * acquire the reference to alias and return it. Otherwise return NULL. ··· 693 691 * of a filesystem. 694 692 * 695 693 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer 696 - * any other hashed alias over that. 694 + * any other hashed alias over that one unless @want_discon is set, 695 + * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias. 697 696 */ 698 - static struct dentry *__d_find_alias(struct inode *inode) 697 + static struct dentry *__d_find_alias(struct inode *inode, int want_discon) 699 698 { 700 699 struct dentry *alias, *discon_alias; 701 700 ··· 708 705 if (IS_ROOT(alias) && 709 706 (alias->d_flags & DCACHE_DISCONNECTED)) { 710 707 discon_alias = alias; 711 - } else { 708 + } else if (!want_discon) { 712 709 __dget_dlock(alias); 713 710 spin_unlock(&alias->d_lock); 714 711 return alias; ··· 739 736 740 737 if (!list_empty(&inode->i_dentry)) { 741 738 spin_lock(&inode->i_lock); 742 - de = __d_find_alias(inode); 739 + de = __d_find_alias(inode, 0); 743 740 spin_unlock(&inode->i_lock); 744 741 } 745 742 return de; ··· 1650 1647 1651 1648 if (inode && S_ISDIR(inode->i_mode)) { 1652 1649 spin_lock(&inode->i_lock); 1653 - new = __d_find_any_alias(inode); 1650 + new = __d_find_alias(inode, 1); 1654 1651 if (new) { 1652 + BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED)); 1655 1653 spin_unlock(&inode->i_lock); 1656 1654 security_d_instantiate(new, inode); 1657 1655 d_move(new, dentry); ··· 2482 2478 struct dentry *alias; 2483 2479 2484 2480 /* Does an aliased dentry already exist? */ 2485 - alias = __d_find_alias(inode); 2481 + alias = __d_find_alias(inode, 0); 2486 2482 if (alias) { 2487 2483 actual = alias; 2488 2484 write_seqlock(&rename_lock);
+1 -1
fs/exofs/sys.c
··· 109 109 static struct kobj_type uuid_ktype = { 110 110 }; 111 111 112 - void exofs_sysfs_dbg_print() 112 + void exofs_sysfs_dbg_print(void) 113 113 { 114 114 #ifdef CONFIG_EXOFS_DEBUG 115 115 struct kobject *k_name, *k_tmp;
+4 -4
fs/ext4/balloc.c
··· 90 90 * unusual file system layouts. 91 91 */ 92 92 if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) { 93 - block_cluster = EXT4_B2C(sbi, (start - 94 - ext4_block_bitmap(sb, gdp))); 93 + block_cluster = EXT4_B2C(sbi, 94 + ext4_block_bitmap(sb, gdp) - start); 95 95 if (block_cluster < num_clusters) 96 96 block_cluster = -1; 97 97 else if (block_cluster == num_clusters) { ··· 102 102 103 103 if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) { 104 104 inode_cluster = EXT4_B2C(sbi, 105 - start - ext4_inode_bitmap(sb, gdp)); 105 + ext4_inode_bitmap(sb, gdp) - start); 106 106 if (inode_cluster < num_clusters) 107 107 inode_cluster = -1; 108 108 else if (inode_cluster == num_clusters) { ··· 114 114 itbl_blk = ext4_inode_table(sb, gdp); 115 115 for (i = 0; i < sbi->s_itb_per_group; i++) { 116 116 if (ext4_block_in_group(sb, itbl_blk + i, block_group)) { 117 - c = EXT4_B2C(sbi, start - itbl_blk + i); 117 + c = EXT4_B2C(sbi, itbl_blk + i - start); 118 118 if ((c < num_clusters) || (c == inode_cluster) || 119 119 (c == block_cluster) || (c == itbl_cluster)) 120 120 continue;
-1
fs/ext4/ioctl.c
··· 123 123 else 124 124 ext4_clear_inode_flag(inode, i); 125 125 } 126 - ei->i_flags = flags; 127 126 128 127 ext4_set_inode_flags(inode); 129 128 inode->i_ctime = ext4_current_time(inode);
+1
fs/fs-writeback.c
··· 664 664 /* Wait for I_SYNC. This function drops i_lock... */ 665 665 inode_sleep_on_writeback(inode); 666 666 /* Inode may be gone, start again */ 667 + spin_lock(&wb->list_lock); 667 668 continue; 668 669 } 669 670 inode->i_state |= I_SYNC;
+5 -6
fs/nfs/callback.c
··· 17 17 #include <linux/kthread.h> 18 18 #include <linux/sunrpc/svcauth_gss.h> 19 19 #include <linux/sunrpc/bc_xprt.h> 20 - #include <linux/nsproxy.h> 21 20 22 21 #include <net/inet_sock.h> 23 22 ··· 106 107 { 107 108 int ret; 108 109 109 - ret = svc_create_xprt(serv, "tcp", xprt->xprt_net, PF_INET, 110 + ret = svc_create_xprt(serv, "tcp", &init_net, PF_INET, 110 111 nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS); 111 112 if (ret <= 0) 112 113 goto out_err; ··· 114 115 dprintk("NFS: Callback listener port = %u (af %u)\n", 115 116 nfs_callback_tcpport, PF_INET); 116 117 117 - ret = svc_create_xprt(serv, "tcp", xprt->xprt_net, PF_INET6, 118 + ret = svc_create_xprt(serv, "tcp", &init_net, PF_INET6, 118 119 nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS); 119 120 if (ret > 0) { 120 121 nfs_callback_tcpport6 = ret; ··· 183 184 * fore channel connection. 184 185 * Returns the input port (0) and sets the svc_serv bc_xprt on success 185 186 */ 186 - ret = svc_create_xprt(serv, "tcp-bc", xprt->xprt_net, PF_INET, 0, 187 + ret = svc_create_xprt(serv, "tcp-bc", &init_net, PF_INET, 0, 187 188 SVC_SOCK_ANONYMOUS); 188 189 if (ret < 0) { 189 190 rqstp = ERR_PTR(ret); ··· 253 254 char svc_name[12]; 254 255 int ret = 0; 255 256 int minorversion_setup; 256 - struct net *net = current->nsproxy->net_ns; 257 + struct net *net = &init_net; 257 258 258 259 mutex_lock(&nfs_callback_mutex); 259 260 if (cb_info->users++ || cb_info->task != NULL) { ··· 329 330 cb_info->users--; 330 331 if (cb_info->users == 0 && cb_info->task != NULL) { 331 332 kthread_stop(cb_info->task); 332 - svc_shutdown_net(cb_info->serv, current->nsproxy->net_ns); 333 + svc_shutdown_net(cb_info->serv, &init_net); 333 334 svc_exit_thread(cb_info->rqst); 334 335 cb_info->serv = NULL; 335 336 cb_info->rqst = NULL;
+4 -4
fs/nfs/callback_xdr.c
··· 455 455 args->csa_nrclists = ntohl(*p++); 456 456 args->csa_rclists = NULL; 457 457 if (args->csa_nrclists) { 458 - args->csa_rclists = kmalloc(args->csa_nrclists * 459 - sizeof(*args->csa_rclists), 460 - GFP_KERNEL); 458 + args->csa_rclists = kmalloc_array(args->csa_nrclists, 459 + sizeof(*args->csa_rclists), 460 + GFP_KERNEL); 461 461 if (unlikely(args->csa_rclists == NULL)) 462 462 goto out; 463 463 ··· 696 696 const struct cb_sequenceres *res) 697 697 { 698 698 __be32 *p; 699 - unsigned status = res->csr_status; 699 + __be32 status = res->csr_status; 700 700 701 701 if (unlikely(status != 0)) 702 702 goto out;
-2
fs/nfs/client.c
··· 544 544 545 545 smp_rmb(); 546 546 547 - BUG_ON(clp->cl_cons_state != NFS_CS_READY); 548 - 549 547 dprintk("<-- %s found nfs_client %p for %s\n", 550 548 __func__, clp, cl_init->hostname ?: ""); 551 549 return clp;
+4 -4
fs/nfs/direct.c
··· 523 523 nfs_list_remove_request(req); 524 524 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) { 525 525 /* Note the rewrite will go through mds */ 526 - kref_get(&req->wb_kref); 527 526 nfs_mark_request_commit(req, NULL, &cinfo); 528 - } 527 + } else 528 + nfs_release_request(req); 529 529 nfs_unlock_and_release_request(req); 530 530 } 531 531 ··· 716 716 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) 717 717 bit = NFS_IOHDR_NEED_RESCHED; 718 718 else if (dreq->flags == 0) { 719 - memcpy(&dreq->verf, &req->wb_verf, 719 + memcpy(&dreq->verf, hdr->verf, 720 720 sizeof(dreq->verf)); 721 721 bit = NFS_IOHDR_NEED_COMMIT; 722 722 dreq->flags = NFS_ODIRECT_DO_COMMIT; 723 723 } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) { 724 - if (memcmp(&dreq->verf, &req->wb_verf, sizeof(dreq->verf))) { 724 + if (memcmp(&dreq->verf, hdr->verf, sizeof(dreq->verf))) { 725 725 dreq->flags = NFS_ODIRECT_RESCHED_WRITES; 726 726 bit = NFS_IOHDR_NEED_RESCHED; 727 727 } else
+1 -1
fs/nfs/nfs4_fs.h
··· 295 295 296 296 extern const struct nfs4_minor_version_ops *nfs_v4_minor_ops[]; 297 297 298 - extern const u32 nfs4_fattr_bitmap[2]; 298 + extern const u32 nfs4_fattr_bitmap[3]; 299 299 extern const u32 nfs4_statfs_bitmap[2]; 300 300 extern const u32 nfs4_pathconf_bitmap[2]; 301 301 extern const u32 nfs4_fsinfo_bitmap[3];
+34 -8
fs/nfs/nfs4proc.c
··· 105 105 return -EINVAL; 106 106 case -NFS4ERR_SHARE_DENIED: 107 107 return -EACCES; 108 + case -NFS4ERR_MINOR_VERS_MISMATCH: 109 + return -EPROTONOSUPPORT; 108 110 default: 109 111 dprintk("%s could not handle NFSv4 error %d\n", 110 112 __func__, -err); ··· 118 116 /* 119 117 * This is our standard bitmap for GETATTR requests. 120 118 */ 121 - const u32 nfs4_fattr_bitmap[2] = { 119 + const u32 nfs4_fattr_bitmap[3] = { 122 120 FATTR4_WORD0_TYPE 123 121 | FATTR4_WORD0_CHANGE 124 122 | FATTR4_WORD0_SIZE ··· 133 131 | FATTR4_WORD1_TIME_ACCESS 134 132 | FATTR4_WORD1_TIME_METADATA 135 133 | FATTR4_WORD1_TIME_MODIFY 134 + }; 135 + 136 + static const u32 nfs4_pnfs_open_bitmap[3] = { 137 + FATTR4_WORD0_TYPE 138 + | FATTR4_WORD0_CHANGE 139 + | FATTR4_WORD0_SIZE 140 + | FATTR4_WORD0_FSID 141 + | FATTR4_WORD0_FILEID, 142 + FATTR4_WORD1_MODE 143 + | FATTR4_WORD1_NUMLINKS 144 + | FATTR4_WORD1_OWNER 145 + | FATTR4_WORD1_OWNER_GROUP 146 + | FATTR4_WORD1_RAWDEV 147 + | FATTR4_WORD1_SPACE_USED 148 + | FATTR4_WORD1_TIME_ACCESS 149 + | FATTR4_WORD1_TIME_METADATA 150 + | FATTR4_WORD1_TIME_MODIFY, 151 + FATTR4_WORD2_MDSTHRESHOLD 136 152 }; 137 153 138 154 const u32 nfs4_statfs_bitmap[2] = { ··· 864 844 p->o_arg.name = &dentry->d_name; 865 845 p->o_arg.server = server; 866 846 p->o_arg.bitmask = server->attr_bitmask; 847 + p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; 867 848 p->o_arg.claim = NFS4_OPEN_CLAIM_NULL; 868 849 if (attrs != NULL && attrs->ia_valid != 0) { 869 850 __be32 verf[2]; ··· 1841 1820 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); 1842 1821 if (!opendata->f_attr.mdsthreshold) 1843 1822 goto err_opendata_put; 1823 + opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0]; 1844 1824 } 1845 1825 if (dentry->d_inode != NULL) 1846 1826 opendata->state = nfs4_get_open_state(dentry->d_inode, sp); ··· 1902 1880 struct nfs4_state *res; 1903 1881 int status; 1904 1882 1883 + fmode &= FMODE_READ|FMODE_WRITE; 1905 1884 do { 1906 1885 status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, 1907 1886 &res, ctx_th); ··· 2549 2526 2550 2527 nfs_fattr_init(fattr); 2551 2528 2529 + /* Deal with open(O_TRUNC) */ 2530 + if (sattr->ia_valid & ATTR_OPEN) 2531 + sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN); 2532 + 2533 + /* Optimization: if the end result is no change, don't RPC */ 2534 + if ((sattr->ia_valid & ~(ATTR_FILE)) == 0) 2535 + return 0; 2536 + 2552 2537 /* Search for an existing open(O_WRITE) file */ 2553 2538 if (sattr->ia_valid & ATTR_FILE) { 2554 2539 struct nfs_open_context *ctx; ··· 2567 2536 state = ctx->state; 2568 2537 } 2569 2538 } 2570 - 2571 - /* Deal with open(O_TRUNC) */ 2572 - if (sattr->ia_valid & ATTR_OPEN) 2573 - sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN); 2574 2539 2575 2540 status = nfs4_do_setattr(inode, cred, fattr, sattr, state); 2576 2541 if (status == 0) ··· 5302 5275 5303 5276 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5304 5277 if (status) 5305 - pr_warn("NFS: Got error %d from the server %s on " 5278 + dprintk("NFS: Got error %d from the server %s on " 5306 5279 "DESTROY_CLIENTID.", status, clp->cl_hostname); 5307 5280 return status; 5308 5281 } ··· 5773 5746 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5774 5747 5775 5748 if (status) 5776 - printk(KERN_WARNING 5777 - "NFS: Got error %d from the server on DESTROY_SESSION. " 5749 + dprintk("NFS: Got error %d from the server on DESTROY_SESSION. " 5778 5750 "Session has been destroyed regardless...\n", status); 5779 5751 5780 5752 dprintk("<-- nfs4_proc_destroy_session\n");
+12 -10
fs/nfs/nfs4state.c
··· 244 244 return nfs4_wait_on_slot_tbl(&ses->fc_slot_table); 245 245 } 246 246 247 + static void nfs41_finish_session_reset(struct nfs_client *clp) 248 + { 249 + clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); 250 + clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); 251 + /* create_session negotiated new slot table */ 252 + clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); 253 + clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state); 254 + nfs41_setup_state_renewal(clp); 255 + } 256 + 247 257 int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred) 248 258 { 249 259 int status; ··· 269 259 status = nfs4_proc_create_session(clp, cred); 270 260 if (status != 0) 271 261 goto out; 272 - clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); 273 - nfs41_setup_state_renewal(clp); 262 + nfs41_finish_session_reset(clp); 274 263 nfs_mark_client_ready(clp, NFS_CS_READY); 275 264 out: 276 265 return status; ··· 1781 1772 status = nfs4_handle_reclaim_lease_error(clp, status); 1782 1773 goto out; 1783 1774 } 1784 - clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); 1785 - /* create_session negotiated new slot table */ 1786 - clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); 1787 - clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state); 1775 + nfs41_finish_session_reset(clp); 1788 1776 dprintk("%s: session reset was successful for server %s!\n", 1789 1777 __func__, clp->cl_hostname); 1790 - 1791 - /* Let the state manager reestablish state */ 1792 - if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) 1793 - nfs41_setup_state_renewal(clp); 1794 1778 out: 1795 1779 if (cred) 1796 1780 put_rpccred(cred);
+10 -5
fs/nfs/nfs4xdr.c
··· 1198 1198 } 1199 1199 1200 1200 static void encode_getfattr_open(struct xdr_stream *xdr, const u32 *bitmask, 1201 + const u32 *open_bitmap, 1201 1202 struct compound_hdr *hdr) 1202 1203 { 1203 1204 encode_getattr_three(xdr, 1204 - bitmask[0] & nfs4_fattr_bitmap[0], 1205 - bitmask[1] & nfs4_fattr_bitmap[1], 1206 - bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD, 1205 + bitmask[0] & open_bitmap[0], 1206 + bitmask[1] & open_bitmap[1], 1207 + bitmask[2] & open_bitmap[2], 1207 1208 hdr); 1208 1209 } 1209 1210 ··· 2222 2221 encode_putfh(xdr, args->fh, &hdr); 2223 2222 encode_open(xdr, args, &hdr); 2224 2223 encode_getfh(xdr, &hdr); 2225 - encode_getfattr_open(xdr, args->bitmask, &hdr); 2224 + encode_getfattr_open(xdr, args->bitmask, args->open_bitmap, &hdr); 2226 2225 encode_nops(&hdr); 2227 2226 } 2228 2227 ··· 4360 4359 4361 4360 if (unlikely(bitmap[2] & (FATTR4_WORD2_MDSTHRESHOLD - 1U))) 4362 4361 return -EIO; 4363 - if (likely(bitmap[2] & FATTR4_WORD2_MDSTHRESHOLD)) { 4362 + if (bitmap[2] & FATTR4_WORD2_MDSTHRESHOLD) { 4363 + /* Did the server return an unrequested attribute? */ 4364 + if (unlikely(res == NULL)) 4365 + return -EREMOTEIO; 4364 4366 p = xdr_inline_decode(xdr, 4); 4365 4367 if (unlikely(!p)) 4366 4368 goto out_overflow; ··· 4376 4372 __func__); 4377 4373 4378 4374 status = decode_first_threshold_item4(xdr, res); 4375 + bitmap[2] &= ~FATTR4_WORD2_MDSTHRESHOLD; 4379 4376 } 4380 4377 return status; 4381 4378 out_overflow:
+1 -1
fs/nfs/pnfs.h
··· 365 365 pnfs_use_threshold(struct nfs4_threshold **dst, struct nfs4_threshold *src, 366 366 struct nfs_server *nfss) 367 367 { 368 - return (dst && src && src->bm != 0 && 368 + return (dst && src && src->bm != 0 && nfss->pnfs_curr_ld && 369 369 nfss->pnfs_curr_ld->id == src->l_type); 370 370 } 371 371
+1 -1
fs/nfs/proc.c
··· 651 651 /* Emulate the eof flag, which isn't normally needed in NFSv2 652 652 * as it is guaranteed to always return the file attributes 653 653 */ 654 - if (data->args.offset + data->args.count >= data->res.fattr->size) 654 + if (data->args.offset + data->res.count >= data->res.fattr->size) 655 655 data->res.eof = 1; 656 656 } 657 657 return 0;
+3
fs/nfs/super.c
··· 1867 1867 if (data == NULL) 1868 1868 goto out_no_data; 1869 1869 1870 + args->version = NFS_DEFAULT_VERSION; 1870 1871 switch (data->version) { 1871 1872 case 1: 1872 1873 data->namlen = 0; ··· 2637 2636 2638 2637 if (data == NULL) 2639 2638 goto out_no_data; 2639 + 2640 + args->version = 4; 2640 2641 2641 2642 switch (data->version) { 2642 2643 case 1:
+4 -3
fs/nfs/write.c
··· 80 80 INIT_LIST_HEAD(&hdr->rpc_list); 81 81 spin_lock_init(&hdr->lock); 82 82 atomic_set(&hdr->refcnt, 0); 83 + hdr->verf = &p->verf; 83 84 } 84 85 return p; 85 86 } ··· 620 619 goto next; 621 620 } 622 621 if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) { 622 + memcpy(&req->wb_verf, hdr->verf, sizeof(req->wb_verf)); 623 623 nfs_mark_request_commit(req, hdr->lseg, &cinfo); 624 624 goto next; 625 625 } ··· 1257 1255 struct nfs_write_data *data = calldata; 1258 1256 struct nfs_pgio_header *hdr = data->header; 1259 1257 int status = data->task.tk_status; 1260 - struct nfs_page *req = hdr->req; 1261 1258 1262 1259 if ((status >= 0) && nfs_write_need_commit(data)) { 1263 1260 spin_lock(&hdr->lock); 1264 1261 if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) 1265 1262 ; /* Do nothing */ 1266 1263 else if (!test_and_set_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) 1267 - memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); 1268 - else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) 1264 + memcpy(hdr->verf, &data->verf, sizeof(*hdr->verf)); 1265 + else if (memcmp(hdr->verf, &data->verf, sizeof(*hdr->verf))) 1269 1266 set_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags); 1270 1267 spin_unlock(&hdr->lock); 1271 1268 }
+2 -2
fs/nfsd/nfs4state.c
··· 900 900 struct nfsd4_session *ses; 901 901 int mem; 902 902 903 - BUG_ON(!spin_is_locked(&client_lock)); 903 + lockdep_assert_held(&client_lock); 904 904 ses = container_of(kref, struct nfsd4_session, se_ref); 905 905 nfsd4_del_conns(ses); 906 906 spin_lock(&nfsd_drc_lock); ··· 1080 1080 static inline void 1081 1081 free_client(struct nfs4_client *clp) 1082 1082 { 1083 - BUG_ON(!spin_is_locked(&client_lock)); 1083 + lockdep_assert_held(&client_lock); 1084 1084 while (!list_empty(&clp->cl_sessions)) { 1085 1085 struct nfsd4_session *ses; 1086 1086 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
+10 -2
fs/ubifs/debug.c
··· 2918 2918 struct dentry *dent; 2919 2919 struct ubifs_debug_info *d = c->dbg; 2920 2920 2921 + if (!IS_ENABLED(DEBUG_FS)) 2922 + return 0; 2923 + 2921 2924 n = snprintf(d->dfs_dir_name, UBIFS_DFS_DIR_LEN + 1, UBIFS_DFS_DIR_NAME, 2922 2925 c->vi.ubi_num, c->vi.vol_id); 2923 2926 if (n == UBIFS_DFS_DIR_LEN) { ··· 3013 3010 */ 3014 3011 void dbg_debugfs_exit_fs(struct ubifs_info *c) 3015 3012 { 3016 - debugfs_remove_recursive(c->dbg->dfs_dir); 3013 + if (IS_ENABLED(DEBUG_FS)) 3014 + debugfs_remove_recursive(c->dbg->dfs_dir); 3017 3015 } 3018 3016 3019 3017 struct ubifs_global_debug_info ubifs_dbg; ··· 3099 3095 const char *fname; 3100 3096 struct dentry *dent; 3101 3097 3098 + if (!IS_ENABLED(DEBUG_FS)) 3099 + return 0; 3100 + 3102 3101 fname = "ubifs"; 3103 3102 dent = debugfs_create_dir(fname, NULL); 3104 3103 if (IS_ERR_OR_NULL(dent)) ··· 3166 3159 */ 3167 3160 void dbg_debugfs_exit(void) 3168 3161 { 3169 - debugfs_remove_recursive(dfs_rootdir); 3162 + if (IS_ENABLED(DEBUG_FS)) 3163 + debugfs_remove_recursive(dfs_rootdir); 3170 3164 } 3171 3165 3172 3166 /**
+2 -2
include/acpi/acpi_bus.h
··· 440 440 441 441 #else /* CONFIG_ACPI */ 442 442 443 - static int register_acpi_bus_type(struct acpi_bus_type *bus) { return 0; } 444 - static int unregister_acpi_bus_type(struct acpi_bus_type *bus) { return 0; } 443 + static inline int register_acpi_bus_type(void *bus) { return 0; } 444 + static inline int unregister_acpi_bus_type(void *bus) { return 0; } 445 445 446 446 #endif /* CONFIG_ACPI */ 447 447
+1
include/asm-generic/bug.h
··· 2 2 #define _ASM_GENERIC_BUG_H 3 3 4 4 #include <linux/compiler.h> 5 + #include <linux/kernel.h> 5 6 6 7 #ifdef CONFIG_BUG 7 8
+1 -1
include/drm/drm_crtc.h
··· 54 54 struct drm_object_properties *properties; 55 55 }; 56 56 57 - #define DRM_OBJECT_MAX_PROPERTY 16 57 + #define DRM_OBJECT_MAX_PROPERTY 24 58 58 struct drm_object_properties { 59 59 int count; 60 60 uint32_t ids[DRM_OBJECT_MAX_PROPERTY];
+15 -2
include/drm/drm_pciids.h
··· 181 181 {0x1002, 0x6747, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ 182 182 {0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ 183 183 {0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ 184 + {0x1002, 0x674A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ 184 185 {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ 185 186 {0x1002, 0x6751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ 186 187 {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ ··· 199 198 {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ 200 199 {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ 201 200 {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ 201 + {0x1002, 0x6771, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ 202 202 {0x1002, 0x6772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ 203 203 {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ 204 204 {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ ··· 231 229 {0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 232 230 {0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ 233 231 {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ 232 + {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 234 233 {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 235 234 {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 236 - {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ 237 - {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ 235 + {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 236 + {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 238 237 {0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ 239 238 {0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ 240 239 {0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ ··· 534 531 {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 535 532 {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ 536 533 {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ 534 + {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ 537 535 {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 538 536 {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 539 537 {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ ··· 554 550 {0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 555 551 {0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 556 552 {0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 553 + {0x1002, 0x980A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 557 554 {0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 558 555 {0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 559 556 {0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ ··· 566 561 {0x1002, 0x9909, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 567 562 {0x1002, 0x990A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 568 563 {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 564 + {0x1002, 0x9910, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 565 + {0x1002, 0x9913, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 566 + {0x1002, 0x9917, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 567 + {0x1002, 0x9918, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 568 + {0x1002, 0x9919, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 569 569 {0x1002, 0x9990, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 570 570 {0x1002, 0x9991, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 571 571 {0x1002, 0x9992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 572 572 {0x1002, 0x9993, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 573 573 {0x1002, 0x9994, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 574 + {0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 575 + {0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 576 + {0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 574 577 {0, 0, 0} 575 578 576 579 #define r128_PCI_IDS \
+3 -1
include/drm/exynos_drm.h
··· 64 64 * A structure for mapping buffer. 65 65 * 66 66 * @handle: a handle to gem object created. 67 + * @pad: just padding to be 64-bit aligned. 67 68 * @size: memory size to be mapped. 68 69 * @mapped: having user virtual address mmaped. 69 70 * - this variable would be filled by exynos gem module ··· 73 72 */ 74 73 struct drm_exynos_gem_mmap { 75 74 unsigned int handle; 76 - unsigned int size; 75 + unsigned int pad; 76 + uint64_t size; 77 77 uint64_t mapped; 78 78 }; 79 79
+41
include/linux/i2c-mux-pinctrl.h
··· 1 + /* 2 + * i2c-mux-pinctrl platform data 3 + * 4 + * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms and conditions of the GNU General Public License, 8 + * version 2, as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope it will be useful, but WITHOUT 11 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 + * more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 + */ 18 + 19 + #ifndef _LINUX_I2C_MUX_PINCTRL_H 20 + #define _LINUX_I2C_MUX_PINCTRL_H 21 + 22 + /** 23 + * struct i2c_mux_pinctrl_platform_data - Platform data for i2c-mux-pinctrl 24 + * @parent_bus_num: Parent I2C bus number 25 + * @base_bus_num: Base I2C bus number for the child busses. 0 for dynamic. 26 + * @bus_count: Number of child busses. Also the number of elements in 27 + * @pinctrl_states 28 + * @pinctrl_states: The names of the pinctrl state to select for each child bus 29 + * @pinctrl_state_idle: The pinctrl state to select when no child bus is being 30 + * accessed. If NULL, the most recently used pinctrl state will be left 31 + * selected. 32 + */ 33 + struct i2c_mux_pinctrl_platform_data { 34 + int parent_bus_num; 35 + int base_bus_num; 36 + int bus_count; 37 + const char **pinctrl_states; 38 + const char *pinctrl_state_idle; 39 + }; 40 + 41 + #endif
+5 -5
include/linux/moduleparam.h
··· 128 128 * The ops can have NULL set or get functions. 129 129 */ 130 130 #define module_param_cb(name, ops, arg, perm) \ 131 - __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, 0) 131 + __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, -1) 132 132 133 133 /** 134 134 * <level>_param_cb - general callback for a module/cmdline parameter ··· 192 192 { (void *)set, (void *)get }; \ 193 193 __module_param_call(MODULE_PARAM_PREFIX, \ 194 194 name, &__param_ops_##name, arg, \ 195 - (perm) + sizeof(__check_old_set_param(set))*0, 0) 195 + (perm) + sizeof(__check_old_set_param(set))*0, -1) 196 196 197 197 /* We don't get oldget: it's often a new-style param_get_uint, etc. */ 198 198 static inline int ··· 272 272 */ 273 273 #define core_param(name, var, type, perm) \ 274 274 param_check_##type(name, &(var)); \ 275 - __module_param_call("", name, &param_ops_##type, &var, perm, 0) 275 + __module_param_call("", name, &param_ops_##type, &var, perm, -1) 276 276 #endif /* !MODULE */ 277 277 278 278 /** ··· 290 290 = { len, string }; \ 291 291 __module_param_call(MODULE_PARAM_PREFIX, name, \ 292 292 &param_ops_string, \ 293 - .str = &__param_string_##name, perm, 0); \ 293 + .str = &__param_string_##name, perm, -1); \ 294 294 __MODULE_PARM_TYPE(name, "string") 295 295 296 296 /** ··· 432 432 __module_param_call(MODULE_PARAM_PREFIX, name, \ 433 433 &param_array_ops, \ 434 434 .arr = &__param_arr_##name, \ 435 - perm, 0); \ 435 + perm, -1); \ 436 436 __MODULE_PARM_TYPE(name, "array of " #type) 437 437 438 438 extern struct kernel_param_ops param_array_ops;
+5
include/linux/netfilter/xt_HMARK.h
··· 27 27 __u16 src; 28 28 __u16 dst; 29 29 } p16; 30 + struct { 31 + __be16 src; 32 + __be16 dst; 33 + } b16; 30 34 __u32 v32; 35 + __be32 b32; 31 36 }; 32 37 33 38 struct xt_hmark_info {
+3
include/linux/nfs_xdr.h
··· 348 348 const struct qstr * name; 349 349 const struct nfs_server *server; /* Needed for ID mapping */ 350 350 const u32 * bitmask; 351 + const u32 * open_bitmap; 351 352 __u32 claim; 352 353 struct nfs4_sequence_args seq_args; 353 354 }; ··· 1237 1236 struct list_head rpc_list; 1238 1237 atomic_t refcnt; 1239 1238 struct nfs_page *req; 1239 + struct nfs_writeverf *verf; 1240 1240 struct pnfs_layout_segment *lseg; 1241 1241 loff_t io_start; 1242 1242 const struct rpc_call_ops *mds_ops; ··· 1275 1273 struct nfs_write_header { 1276 1274 struct nfs_pgio_header header; 1277 1275 struct nfs_write_data rpc_data; 1276 + struct nfs_writeverf verf; 1278 1277 }; 1279 1278 1280 1279 struct nfs_mds_commit_info {
+2
include/linux/pci.h
··· 176 176 PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2, 177 177 /* Provide indication device is assigned by a Virtual Machine Manager */ 178 178 PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) 4, 179 + /* Device causes system crash if in D3 during S3 sleep */ 180 + PCI_DEV_FLAGS_NO_D3_DURING_SLEEP = (__force pci_dev_flags_t) 8, 179 181 }; 180 182 181 183 enum pci_irq_reroute_variant {
+2 -2
include/linux/perf_event.h
··· 555 555 PERF_RECORD_MAX, /* non-ABI */ 556 556 }; 557 557 558 + #define PERF_MAX_STACK_DEPTH 127 559 + 558 560 enum perf_callchain_context { 559 561 PERF_CONTEXT_HV = (__u64)-32, 560 562 PERF_CONTEXT_KERNEL = (__u64)-128, ··· 610 608 #include <linux/atomic.h> 611 609 #include <linux/sysfs.h> 612 610 #include <asm/local.h> 613 - 614 - #define PERF_MAX_STACK_DEPTH 255 615 611 616 612 struct perf_callchain_entry { 617 613 __u64 nr;
+6 -4
include/linux/prctl.h
··· 127 127 #define PR_SET_PTRACER 0x59616d61 128 128 # define PR_SET_PTRACER_ANY ((unsigned long)-1) 129 129 130 - #define PR_SET_CHILD_SUBREAPER 36 131 - #define PR_GET_CHILD_SUBREAPER 37 130 + #define PR_SET_CHILD_SUBREAPER 36 131 + #define PR_GET_CHILD_SUBREAPER 37 132 132 133 133 /* 134 134 * If no_new_privs is set, then operations that grant new privileges (i.e. ··· 142 142 * asking selinux for a specific new context (e.g. with runcon) will result 143 143 * in execve returning -EPERM. 144 144 */ 145 - #define PR_SET_NO_NEW_PRIVS 38 146 - #define PR_GET_NO_NEW_PRIVS 39 145 + #define PR_SET_NO_NEW_PRIVS 38 146 + #define PR_GET_NO_NEW_PRIVS 39 147 + 148 + #define PR_GET_TID_ADDRESS 40 147 149 148 150 #endif /* _LINUX_PRCTL_H */
+4 -1
include/linux/radix-tree.h
··· 368 368 iter->index++; 369 369 if (likely(*slot)) 370 370 return slot; 371 - if (flags & RADIX_TREE_ITER_CONTIG) 371 + if (flags & RADIX_TREE_ITER_CONTIG) { 372 + /* forbid switching to the next chunk */ 373 + iter->next_index = 0; 372 374 break; 375 + } 373 376 } 374 377 } 375 378 return NULL;
+4 -2
include/linux/rcutiny.h
··· 87 87 88 88 #ifdef CONFIG_TINY_RCU 89 89 90 - static inline int rcu_needs_cpu(int cpu) 90 + static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) 91 91 { 92 + *delta_jiffies = ULONG_MAX; 92 93 return 0; 93 94 } 94 95 ··· 97 96 98 97 int rcu_preempt_needs_cpu(void); 99 98 100 - static inline int rcu_needs_cpu(int cpu) 99 + static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) 101 100 { 101 + *delta_jiffies = ULONG_MAX; 102 102 return rcu_preempt_needs_cpu(); 103 103 } 104 104
+1 -1
include/linux/rcutree.h
··· 32 32 33 33 extern void rcu_init(void); 34 34 extern void rcu_note_context_switch(int cpu); 35 - extern int rcu_needs_cpu(int cpu); 35 + extern int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies); 36 36 extern void rcu_cpu_stall_reset(void); 37 37 38 38 /*
+12
include/linux/sched.h
··· 439 439 /* leave room for more dump flags */ 440 440 #define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ 441 441 #define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ 442 + #define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ 442 443 443 444 #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) 444 445 ··· 877 876 * Number of busy cpus in this group. 878 877 */ 879 878 atomic_t nr_busy_cpus; 879 + 880 + unsigned long cpumask[0]; /* iteration mask */ 880 881 }; 881 882 882 883 struct sched_group { ··· 901 898 static inline struct cpumask *sched_group_cpus(struct sched_group *sg) 902 899 { 903 900 return to_cpumask(sg->cpumask); 901 + } 902 + 903 + /* 904 + * cpumask masking which cpus in the group are allowed to iterate up the domain 905 + * tree. 906 + */ 907 + static inline struct cpumask *sched_group_mask(struct sched_group *sg) 908 + { 909 + return to_cpumask(sg->sgp->cpumask); 904 910 } 905 911 906 912 /**
+5 -3
include/linux/swapops.h
··· 9 9 * get good packing density in that tree, so the index should be dense in 10 10 * the low-order bits. 11 11 * 12 - * We arrange the `type' and `offset' fields so that `type' is at the five 12 + * We arrange the `type' and `offset' fields so that `type' is at the seven 13 13 * high-order bits of the swp_entry_t and `offset' is right-aligned in the 14 - * remaining bits. 14 + * remaining bits. Although `type' itself needs only five bits, we allow for 15 + * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry(). 15 16 * 16 17 * swp_entry_t's are *never* stored anywhere in their arch-dependent format. 17 18 */ 18 - #define SWP_TYPE_SHIFT(e) (sizeof(e.val) * 8 - MAX_SWAPFILES_SHIFT) 19 + #define SWP_TYPE_SHIFT(e) ((sizeof(e.val) * 8) - \ 20 + (MAX_SWAPFILES_SHIFT + RADIX_TREE_EXCEPTIONAL_SHIFT)) 19 21 #define SWP_OFFSET_MASK(e) ((1UL << SWP_TYPE_SHIFT(e)) - 1) 20 22 21 23 /*
+10 -10
include/linux/tcp.h
··· 69 69 #define tcp_flag_word(tp) ( ((union tcp_word_hdr *)(tp))->words [3]) 70 70 71 71 enum { 72 - TCP_FLAG_CWR = __cpu_to_be32(0x00800000), 73 - TCP_FLAG_ECE = __cpu_to_be32(0x00400000), 74 - TCP_FLAG_URG = __cpu_to_be32(0x00200000), 75 - TCP_FLAG_ACK = __cpu_to_be32(0x00100000), 76 - TCP_FLAG_PSH = __cpu_to_be32(0x00080000), 77 - TCP_FLAG_RST = __cpu_to_be32(0x00040000), 78 - TCP_FLAG_SYN = __cpu_to_be32(0x00020000), 79 - TCP_FLAG_FIN = __cpu_to_be32(0x00010000), 80 - TCP_RESERVED_BITS = __cpu_to_be32(0x0F000000), 81 - TCP_DATA_OFFSET = __cpu_to_be32(0xF0000000) 72 + TCP_FLAG_CWR = __constant_cpu_to_be32(0x00800000), 73 + TCP_FLAG_ECE = __constant_cpu_to_be32(0x00400000), 74 + TCP_FLAG_URG = __constant_cpu_to_be32(0x00200000), 75 + TCP_FLAG_ACK = __constant_cpu_to_be32(0x00100000), 76 + TCP_FLAG_PSH = __constant_cpu_to_be32(0x00080000), 77 + TCP_FLAG_RST = __constant_cpu_to_be32(0x00040000), 78 + TCP_FLAG_SYN = __constant_cpu_to_be32(0x00020000), 79 + TCP_FLAG_FIN = __constant_cpu_to_be32(0x00010000), 80 + TCP_RESERVED_BITS = __constant_cpu_to_be32(0x0F000000), 81 + TCP_DATA_OFFSET = __constant_cpu_to_be32(0xF0000000) 82 82 }; 83 83 84 84 /*
-2
include/linux/usb/hcd.h
··· 126 126 unsigned wireless:1; /* Wireless USB HCD */ 127 127 unsigned authorized_default:1; 128 128 unsigned has_tt:1; /* Integrated TT in root hub */ 129 - unsigned broken_pci_sleep:1; /* Don't put the 130 - controller in PCI-D3 for system sleep */ 131 129 132 130 unsigned int irq; /* irq allocated */ 133 131 void __iomem *regs; /* device memory/io */
+7
include/linux/vga_switcheroo.h
··· 12 12 enum vga_switcheroo_state { 13 13 VGA_SWITCHEROO_OFF, 14 14 VGA_SWITCHEROO_ON, 15 + /* below are referred only from vga_switcheroo_get_client_state() */ 16 + VGA_SWITCHEROO_INIT, 17 + VGA_SWITCHEROO_NOT_FOUND, 15 18 }; 16 19 17 20 enum vga_switcheroo_client_id { ··· 53 50 54 51 int vga_switcheroo_process_delayed_switch(void); 55 52 53 + int vga_switcheroo_get_client_state(struct pci_dev *dev); 54 + 56 55 #else 57 56 58 57 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {} ··· 67 62 int id, bool active) { return 0; } 68 63 static inline void vga_switcheroo_unregister_handler(void) {} 69 64 static inline int vga_switcheroo_process_delayed_switch(void) { return 0; } 65 + static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; } 66 + 70 67 71 68 #endif
+4 -1
include/net/inetpeer.h
··· 40 40 u32 pmtu_orig; 41 41 u32 pmtu_learned; 42 42 struct inetpeer_addr_base redirect_learned; 43 - struct list_head gc_list; 43 + union { 44 + struct list_head gc_list; 45 + struct rcu_head gc_rcu; 46 + }; 44 47 /* 45 48 * Once inet_peer is queued for deletion (refcnt == -1), following fields 46 49 * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
+1 -1
include/net/route.h
··· 130 130 { 131 131 struct flowi4 fl4 = { 132 132 .flowi4_oif = oif, 133 + .flowi4_tos = tos, 133 134 .daddr = daddr, 134 135 .saddr = saddr, 135 - .flowi4_tos = tos, 136 136 }; 137 137 return ip_route_output_key(net, &fl4); 138 138 }
+5 -2
include/net/sch_generic.h
··· 220 220 221 221 struct qdisc_skb_cb { 222 222 unsigned int pkt_len; 223 - unsigned char data[24]; 223 + u16 bond_queue_mapping; 224 + u16 _pad; 225 + unsigned char data[20]; 224 226 }; 225 227 226 228 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) 227 229 { 228 230 struct qdisc_skb_cb *qcb; 229 - BUILD_BUG_ON(sizeof(skb->cb) < sizeof(unsigned int) + sz); 231 + 232 + BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz); 230 233 BUILD_BUG_ON(sizeof(qcb->data) < sz); 231 234 } 232 235
+1
include/target/target_core_fabric.h
··· 47 47 */ 48 48 int (*check_stop_free)(struct se_cmd *); 49 49 void (*release_cmd)(struct se_cmd *); 50 + void (*put_session)(struct se_session *); 50 51 /* 51 52 * Called with spin_lock_bh(struct se_portal_group->session_lock held. 52 53 */
+1
include/trace/events/rcu.h
··· 289 289 * "In holdoff": Nothing to do, holding off after unsuccessful attempt. 290 290 * "Begin holdoff": Attempt failed, don't retry until next jiffy. 291 291 * "Dyntick with callbacks": Entering dyntick-idle despite callbacks. 292 + * "Dyntick with lazy callbacks": Entering dyntick-idle w/lazy callbacks. 292 293 * "More callbacks": Still more callbacks, try again to clear them out. 293 294 * "Callbacks drained": All callbacks processed, off to dyntick idle! 294 295 * "Timer": Timer fired to cause CPU to continue processing callbacks.
+2 -7
init/main.c
··· 508 508 parse_early_param(); 509 509 parse_args("Booting kernel", static_command_line, __start___param, 510 510 __stop___param - __start___param, 511 - 0, 0, &unknown_bootoption); 511 + -1, -1, &unknown_bootoption); 512 512 513 513 jump_label_init(); 514 514 ··· 755 755 { 756 756 int level; 757 757 758 - for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++) { 759 - pr_info("initlevel:%d=%s, %d registered initcalls\n", 760 - level, initcall_level_names[level], 761 - (int) (initcall_levels[level+1] 762 - - initcall_levels[level])); 758 + for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++) 763 759 do_initcall_level(level); 764 - } 765 760 } 766 761 767 762 /*
+12
ipc/shm.c
··· 393 393 return sfd->file->f_op->fsync(sfd->file, start, end, datasync); 394 394 } 395 395 396 + static long shm_fallocate(struct file *file, int mode, loff_t offset, 397 + loff_t len) 398 + { 399 + struct shm_file_data *sfd = shm_file_data(file); 400 + 401 + if (!sfd->file->f_op->fallocate) 402 + return -EOPNOTSUPP; 403 + return sfd->file->f_op->fallocate(file, mode, offset, len); 404 + } 405 + 396 406 static unsigned long shm_get_unmapped_area(struct file *file, 397 407 unsigned long addr, unsigned long len, unsigned long pgoff, 398 408 unsigned long flags) ··· 420 410 .get_unmapped_area = shm_get_unmapped_area, 421 411 #endif 422 412 .llseek = noop_llseek, 413 + .fallocate = shm_fallocate, 423 414 }; 424 415 425 416 static const struct file_operations shm_file_operations_huge = { ··· 429 418 .release = shm_release, 430 419 .get_unmapped_area = shm_get_unmapped_area, 431 420 .llseek = noop_llseek, 421 + .fallocate = shm_fallocate, 432 422 }; 433 423 434 424 int is_file_shm_hugepages(struct file *file)
+14 -3
kernel/cgroup.c
··· 896 896 mutex_unlock(&cgroup_mutex); 897 897 898 898 /* 899 - * Drop the active superblock reference that we took when we 900 - * created the cgroup 899 + * We want to drop the active superblock reference from the 900 + * cgroup creation after all the dentry refs are gone - 901 + * kill_sb gets mighty unhappy otherwise. Mark 902 + * dentry->d_fsdata with cgroup_diput() to tell 903 + * cgroup_d_release() to call deactivate_super(). 901 904 */ 902 - deactivate_super(cgrp->root->sb); 905 + dentry->d_fsdata = cgroup_diput; 903 906 904 907 /* 905 908 * if we're getting rid of the cgroup, refcount should ensure ··· 926 923 static int cgroup_delete(const struct dentry *d) 927 924 { 928 925 return 1; 926 + } 927 + 928 + static void cgroup_d_release(struct dentry *dentry) 929 + { 930 + /* did cgroup_diput() tell me to deactivate super? */ 931 + if (dentry->d_fsdata == cgroup_diput) 932 + deactivate_super(dentry->d_sb); 929 933 } 930 934 931 935 static void remove_dir(struct dentry *d) ··· 1542 1532 static const struct dentry_operations cgroup_dops = { 1543 1533 .d_iput = cgroup_diput, 1544 1534 .d_delete = cgroup_delete, 1535 + .d_release = cgroup_d_release, 1545 1536 }; 1546 1537 1547 1538 struct inode *inode =
-1
kernel/events/core.c
··· 3181 3181 event = event->group_leader; 3182 3182 3183 3183 perf_event_for_each_child(event, func); 3184 - func(event); 3185 3184 list_for_each_entry(sibling, &event->sibling_list, group_entry) 3186 3185 perf_event_for_each_child(sibling, func); 3187 3186 mutex_unlock(&ctx->mutex);
+3 -3
kernel/panic.c
··· 27 27 #define PANIC_TIMER_STEP 100 28 28 #define PANIC_BLINK_SPD 18 29 29 30 - int panic_on_oops; 30 + int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE; 31 31 static unsigned long tainted_mask; 32 32 static int pause_on_oops; 33 33 static int pause_on_oops_flag; ··· 108 108 */ 109 109 crash_kexec(NULL); 110 110 111 - kmsg_dump(KMSG_DUMP_PANIC); 112 - 113 111 /* 114 112 * Note smp_send_stop is the usual smp shutdown function, which 115 113 * unfortunately means it may not be hardened to work in a panic 116 114 * situation. 117 115 */ 118 116 smp_send_stop(); 117 + 118 + kmsg_dump(KMSG_DUMP_PANIC); 119 119 120 120 atomic_notifier_call_chain(&panic_notifier_list, 0, buf); 121 121
+2
kernel/rcutree.c
··· 1397 1397 rdp->qlen_lazy += rsp->qlen_lazy; 1398 1398 rdp->qlen += rsp->qlen; 1399 1399 rdp->n_cbs_adopted += rsp->qlen; 1400 + if (rsp->qlen_lazy != rsp->qlen) 1401 + rcu_idle_count_callbacks_posted(); 1400 1402 rsp->qlen_lazy = 0; 1401 1403 rsp->qlen = 0; 1402 1404
+14
kernel/rcutree.h
··· 84 84 /* Process level is worth LLONG_MAX/2. */ 85 85 int dynticks_nmi_nesting; /* Track NMI nesting level. */ 86 86 atomic_t dynticks; /* Even value for idle, else odd. */ 87 + #ifdef CONFIG_RCU_FAST_NO_HZ 88 + int dyntick_drain; /* Prepare-for-idle state variable. */ 89 + unsigned long dyntick_holdoff; 90 + /* No retries for the jiffy of failure. */ 91 + struct timer_list idle_gp_timer; 92 + /* Wake up CPU sleeping with callbacks. */ 93 + unsigned long idle_gp_timer_expires; 94 + /* When to wake up CPU (for repost). */ 95 + bool idle_first_pass; /* First pass of attempt to go idle? */ 96 + unsigned long nonlazy_posted; 97 + /* # times non-lazy CBs posted to CPU. */ 98 + unsigned long nonlazy_posted_snap; 99 + /* idle-period nonlazy_posted snapshot. */ 100 + #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ 87 101 }; 88 102 89 103 /* RCU's kthread states for tracing. */
+88 -77
kernel/rcutree_plugin.h
··· 1886 1886 * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs 1887 1887 * any flavor of RCU. 1888 1888 */ 1889 - int rcu_needs_cpu(int cpu) 1889 + int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) 1890 1890 { 1891 + *delta_jiffies = ULONG_MAX; 1891 1892 return rcu_cpu_has_callbacks(cpu); 1892 1893 } 1893 1894 ··· 1963 1962 #define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */ 1964 1963 #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */ 1965 1964 1966 - /* Loop counter for rcu_prepare_for_idle(). */ 1967 - static DEFINE_PER_CPU(int, rcu_dyntick_drain); 1968 - /* If rcu_dyntick_holdoff==jiffies, don't try to enter dyntick-idle mode. */ 1969 - static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); 1970 - /* Timer to awaken the CPU if it enters dyntick-idle mode with callbacks. */ 1971 - static DEFINE_PER_CPU(struct timer_list, rcu_idle_gp_timer); 1972 - /* Scheduled expiry time for rcu_idle_gp_timer to allow reposting. */ 1973 - static DEFINE_PER_CPU(unsigned long, rcu_idle_gp_timer_expires); 1974 - /* Enable special processing on first attempt to enter dyntick-idle mode. */ 1975 - static DEFINE_PER_CPU(bool, rcu_idle_first_pass); 1976 - /* Running count of non-lazy callbacks posted, never decremented. */ 1977 - static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted); 1978 - /* Snapshot of rcu_nonlazy_posted to detect meaningful exits from idle. */ 1979 - static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted_snap); 1980 - 1981 - /* 1982 - * Allow the CPU to enter dyntick-idle mode if either: (1) There are no 1983 - * callbacks on this CPU, (2) this CPU has not yet attempted to enter 1984 - * dyntick-idle mode, or (3) this CPU is in the process of attempting to 1985 - * enter dyntick-idle mode. Otherwise, if we have recently tried and failed 1986 - * to enter dyntick-idle mode, we refuse to try to enter it. After all, 1987 - * it is better to incur scheduling-clock interrupts than to spin 1988 - * continuously for the same time duration! 1989 - */ 1990 - int rcu_needs_cpu(int cpu) 1991 - { 1992 - /* Flag a new idle sojourn to the idle-entry state machine. */ 1993 - per_cpu(rcu_idle_first_pass, cpu) = 1; 1994 - /* If no callbacks, RCU doesn't need the CPU. */ 1995 - if (!rcu_cpu_has_callbacks(cpu)) 1996 - return 0; 1997 - /* Otherwise, RCU needs the CPU only if it recently tried and failed. */ 1998 - return per_cpu(rcu_dyntick_holdoff, cpu) == jiffies; 1999 - } 2000 - 2001 1965 /* 2002 1966 * Does the specified flavor of RCU have non-lazy callbacks pending on 2003 1967 * the specified CPU? Both RCU flavor and CPU are specified by the ··· 2006 2040 } 2007 2041 2008 2042 /* 2043 + * Allow the CPU to enter dyntick-idle mode if either: (1) There are no 2044 + * callbacks on this CPU, (2) this CPU has not yet attempted to enter 2045 + * dyntick-idle mode, or (3) this CPU is in the process of attempting to 2046 + * enter dyntick-idle mode. Otherwise, if we have recently tried and failed 2047 + * to enter dyntick-idle mode, we refuse to try to enter it. After all, 2048 + * it is better to incur scheduling-clock interrupts than to spin 2049 + * continuously for the same time duration! 2050 + * 2051 + * The delta_jiffies argument is used to store the time when RCU is 2052 + * going to need the CPU again if it still has callbacks. The reason 2053 + * for this is that rcu_prepare_for_idle() might need to post a timer, 2054 + * but if so, it will do so after tick_nohz_stop_sched_tick() has set 2055 + * the wakeup time for this CPU. This means that RCU's timer can be 2056 + * delayed until the wakeup time, which defeats the purpose of posting 2057 + * a timer. 2058 + */ 2059 + int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) 2060 + { 2061 + struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); 2062 + 2063 + /* Flag a new idle sojourn to the idle-entry state machine. */ 2064 + rdtp->idle_first_pass = 1; 2065 + /* If no callbacks, RCU doesn't need the CPU. */ 2066 + if (!rcu_cpu_has_callbacks(cpu)) { 2067 + *delta_jiffies = ULONG_MAX; 2068 + return 0; 2069 + } 2070 + if (rdtp->dyntick_holdoff == jiffies) { 2071 + /* RCU recently tried and failed, so don't try again. */ 2072 + *delta_jiffies = 1; 2073 + return 1; 2074 + } 2075 + /* Set up for the possibility that RCU will post a timer. */ 2076 + if (rcu_cpu_has_nonlazy_callbacks(cpu)) 2077 + *delta_jiffies = RCU_IDLE_GP_DELAY; 2078 + else 2079 + *delta_jiffies = RCU_IDLE_LAZY_GP_DELAY; 2080 + return 0; 2081 + } 2082 + 2083 + /* 2009 2084 * Handler for smp_call_function_single(). The only point of this 2010 2085 * handler is to wake the CPU up, so the handler does only tracing. 2011 2086 */ ··· 2082 2075 */ 2083 2076 static void rcu_prepare_for_idle_init(int cpu) 2084 2077 { 2085 - per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; 2086 - setup_timer(&per_cpu(rcu_idle_gp_timer, cpu), 2087 - rcu_idle_gp_timer_func, cpu); 2088 - per_cpu(rcu_idle_gp_timer_expires, cpu) = jiffies - 1; 2089 - per_cpu(rcu_idle_first_pass, cpu) = 1; 2078 + struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); 2079 + 2080 + rdtp->dyntick_holdoff = jiffies - 1; 2081 + setup_timer(&rdtp->idle_gp_timer, rcu_idle_gp_timer_func, cpu); 2082 + rdtp->idle_gp_timer_expires = jiffies - 1; 2083 + rdtp->idle_first_pass = 1; 2090 2084 } 2091 2085 2092 2086 /* 2093 2087 * Clean up for exit from idle. Because we are exiting from idle, there 2094 - * is no longer any point to rcu_idle_gp_timer, so cancel it. This will 2088 + * is no longer any point to ->idle_gp_timer, so cancel it. This will 2095 2089 * do nothing if this timer is not active, so just cancel it unconditionally. 2096 2090 */ 2097 2091 static void rcu_cleanup_after_idle(int cpu) 2098 2092 { 2099 - del_timer(&per_cpu(rcu_idle_gp_timer, cpu)); 2093 + struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); 2094 + 2095 + del_timer(&rdtp->idle_gp_timer); 2100 2096 trace_rcu_prep_idle("Cleanup after idle"); 2101 2097 } 2102 2098 ··· 2118 2108 * Because it is not legal to invoke rcu_process_callbacks() with irqs 2119 2109 * disabled, we do one pass of force_quiescent_state(), then do a 2120 2110 * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked 2121 - * later. The per-cpu rcu_dyntick_drain variable controls the sequencing. 2111 + * later. The ->dyntick_drain field controls the sequencing. 2122 2112 * 2123 2113 * The caller must have disabled interrupts. 2124 2114 */ 2125 2115 static void rcu_prepare_for_idle(int cpu) 2126 2116 { 2127 2117 struct timer_list *tp; 2118 + struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); 2128 2119 2129 2120 /* 2130 2121 * If this is an idle re-entry, for example, due to use of 2131 2122 * RCU_NONIDLE() or the new idle-loop tracing API within the idle 2132 2123 * loop, then don't take any state-machine actions, unless the 2133 2124 * momentary exit from idle queued additional non-lazy callbacks. 2134 - * Instead, repost the rcu_idle_gp_timer if this CPU has callbacks 2125 + * Instead, repost the ->idle_gp_timer if this CPU has callbacks 2135 2126 * pending. 2136 2127 */ 2137 - if (!per_cpu(rcu_idle_first_pass, cpu) && 2138 - (per_cpu(rcu_nonlazy_posted, cpu) == 2139 - per_cpu(rcu_nonlazy_posted_snap, cpu))) { 2128 + if (!rdtp->idle_first_pass && 2129 + (rdtp->nonlazy_posted == rdtp->nonlazy_posted_snap)) { 2140 2130 if (rcu_cpu_has_callbacks(cpu)) { 2141 - tp = &per_cpu(rcu_idle_gp_timer, cpu); 2142 - mod_timer_pinned(tp, per_cpu(rcu_idle_gp_timer_expires, cpu)); 2131 + tp = &rdtp->idle_gp_timer; 2132 + mod_timer_pinned(tp, rdtp->idle_gp_timer_expires); 2143 2133 } 2144 2134 return; 2145 2135 } 2146 - per_cpu(rcu_idle_first_pass, cpu) = 0; 2147 - per_cpu(rcu_nonlazy_posted_snap, cpu) = 2148 - per_cpu(rcu_nonlazy_posted, cpu) - 1; 2136 + rdtp->idle_first_pass = 0; 2137 + rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted - 1; 2149 2138 2150 2139 /* 2151 2140 * If there are no callbacks on this CPU, enter dyntick-idle mode. 2152 2141 * Also reset state to avoid prejudicing later attempts. 2153 2142 */ 2154 2143 if (!rcu_cpu_has_callbacks(cpu)) { 2155 - per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; 2156 - per_cpu(rcu_dyntick_drain, cpu) = 0; 2144 + rdtp->dyntick_holdoff = jiffies - 1; 2145 + rdtp->dyntick_drain = 0; 2157 2146 trace_rcu_prep_idle("No callbacks"); 2158 2147 return; 2159 2148 } ··· 2161 2152 * If in holdoff mode, just return. We will presumably have 2162 2153 * refrained from disabling the scheduling-clock tick. 2163 2154 */ 2164 - if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) { 2155 + if (rdtp->dyntick_holdoff == jiffies) { 2165 2156 trace_rcu_prep_idle("In holdoff"); 2166 2157 return; 2167 2158 } 2168 2159 2169 - /* Check and update the rcu_dyntick_drain sequencing. */ 2170 - if (per_cpu(rcu_dyntick_drain, cpu) <= 0) { 2160 + /* Check and update the ->dyntick_drain sequencing. */ 2161 + if (rdtp->dyntick_drain <= 0) { 2171 2162 /* First time through, initialize the counter. */ 2172 - per_cpu(rcu_dyntick_drain, cpu) = RCU_IDLE_FLUSHES; 2173 - } else if (per_cpu(rcu_dyntick_drain, cpu) <= RCU_IDLE_OPT_FLUSHES && 2163 + rdtp->dyntick_drain = RCU_IDLE_FLUSHES; 2164 + } else if (rdtp->dyntick_drain <= RCU_IDLE_OPT_FLUSHES && 2174 2165 !rcu_pending(cpu) && 2175 2166 !local_softirq_pending()) { 2176 2167 /* Can we go dyntick-idle despite still having callbacks? */ 2177 - trace_rcu_prep_idle("Dyntick with callbacks"); 2178 - per_cpu(rcu_dyntick_drain, cpu) = 0; 2179 - per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; 2180 - if (rcu_cpu_has_nonlazy_callbacks(cpu)) 2181 - per_cpu(rcu_idle_gp_timer_expires, cpu) = 2168 + rdtp->dyntick_drain = 0; 2169 + rdtp->dyntick_holdoff = jiffies; 2170 + if (rcu_cpu_has_nonlazy_callbacks(cpu)) { 2171 + trace_rcu_prep_idle("Dyntick with callbacks"); 2172 + rdtp->idle_gp_timer_expires = 2182 2173 jiffies + RCU_IDLE_GP_DELAY; 2183 - else 2184 - per_cpu(rcu_idle_gp_timer_expires, cpu) = 2174 + } else { 2175 + rdtp->idle_gp_timer_expires = 2185 2176 jiffies + RCU_IDLE_LAZY_GP_DELAY; 2186 - tp = &per_cpu(rcu_idle_gp_timer, cpu); 2187 - mod_timer_pinned(tp, per_cpu(rcu_idle_gp_timer_expires, cpu)); 2188 - per_cpu(rcu_nonlazy_posted_snap, cpu) = 2189 - per_cpu(rcu_nonlazy_posted, cpu); 2177 + trace_rcu_prep_idle("Dyntick with lazy callbacks"); 2178 + } 2179 + tp = &rdtp->idle_gp_timer; 2180 + mod_timer_pinned(tp, rdtp->idle_gp_timer_expires); 2181 + rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted; 2190 2182 return; /* Nothing more to do immediately. */ 2191 - } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) { 2183 + } else if (--(rdtp->dyntick_drain) <= 0) { 2192 2184 /* We have hit the limit, so time to give up. */ 2193 - per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; 2185 + rdtp->dyntick_holdoff = jiffies; 2194 2186 trace_rcu_prep_idle("Begin holdoff"); 2195 2187 invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */ 2196 2188 return; ··· 2237 2227 */ 2238 2228 static void rcu_idle_count_callbacks_posted(void) 2239 2229 { 2240 - __this_cpu_add(rcu_nonlazy_posted, 1); 2230 + __this_cpu_add(rcu_dynticks.nonlazy_posted, 1); 2241 2231 } 2242 2232 2243 2233 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ ··· 2248 2238 2249 2239 static void print_cpu_stall_fast_no_hz(char *cp, int cpu) 2250 2240 { 2251 - struct timer_list *tltp = &per_cpu(rcu_idle_gp_timer, cpu); 2241 + struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); 2242 + struct timer_list *tltp = &rdtp->idle_gp_timer; 2252 2243 2253 2244 sprintf(cp, "drain=%d %c timer=%lu", 2254 - per_cpu(rcu_dyntick_drain, cpu), 2255 - per_cpu(rcu_dyntick_holdoff, cpu) == jiffies ? 'H' : '.', 2245 + rdtp->dyntick_drain, 2246 + rdtp->dyntick_holdoff == jiffies ? 'H' : '.', 2256 2247 timer_pending(tltp) ? tltp->expires - jiffies : -1); 2257 2248 } 2258 2249
+152 -35
kernel/sched/core.c
··· 5556 5556 5557 5557 #ifdef CONFIG_SCHED_DEBUG 5558 5558 5559 - static __read_mostly int sched_domain_debug_enabled; 5559 + static __read_mostly int sched_debug_enabled; 5560 5560 5561 - static int __init sched_domain_debug_setup(char *str) 5561 + static int __init sched_debug_setup(char *str) 5562 5562 { 5563 - sched_domain_debug_enabled = 1; 5563 + sched_debug_enabled = 1; 5564 5564 5565 5565 return 0; 5566 5566 } 5567 - early_param("sched_debug", sched_domain_debug_setup); 5567 + early_param("sched_debug", sched_debug_setup); 5568 + 5569 + static inline bool sched_debug(void) 5570 + { 5571 + return sched_debug_enabled; 5572 + } 5568 5573 5569 5574 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, 5570 5575 struct cpumask *groupmask) ··· 5609 5604 break; 5610 5605 } 5611 5606 5612 - if (!group->sgp->power) { 5607 + /* 5608 + * Even though we initialize ->power to something semi-sane, 5609 + * we leave power_orig unset. This allows us to detect if 5610 + * domain iteration is still funny without causing /0 traps. 5611 + */ 5612 + if (!group->sgp->power_orig) { 5613 5613 printk(KERN_CONT "\n"); 5614 5614 printk(KERN_ERR "ERROR: domain->cpu_power not " 5615 5615 "set\n"); ··· 5662 5652 { 5663 5653 int level = 0; 5664 5654 5665 - if (!sched_domain_debug_enabled) 5655 + if (!sched_debug_enabled) 5666 5656 return; 5667 5657 5668 5658 if (!sd) { ··· 5683 5673 } 5684 5674 #else /* !CONFIG_SCHED_DEBUG */ 5685 5675 # define sched_domain_debug(sd, cpu) do { } while (0) 5676 + static inline bool sched_debug(void) 5677 + { 5678 + return false; 5679 + } 5686 5680 #endif /* CONFIG_SCHED_DEBUG */ 5687 5681 5688 5682 static int sd_degenerate(struct sched_domain *sd) ··· 6008 5994 struct sd_data data; 6009 5995 }; 6010 5996 5997 + /* 5998 + * Build an iteration mask that can exclude certain CPUs from the upwards 5999 + * domain traversal. 6000 + * 6001 + * Asymmetric node setups can result in situations where the domain tree is of 6002 + * unequal depth, make sure to skip domains that already cover the entire 6003 + * range. 6004 + * 6005 + * In that case build_sched_domains() will have terminated the iteration early 6006 + * and our sibling sd spans will be empty. Domains should always include the 6007 + * cpu they're built on, so check that. 6008 + * 6009 + */ 6010 + static void build_group_mask(struct sched_domain *sd, struct sched_group *sg) 6011 + { 6012 + const struct cpumask *span = sched_domain_span(sd); 6013 + struct sd_data *sdd = sd->private; 6014 + struct sched_domain *sibling; 6015 + int i; 6016 + 6017 + for_each_cpu(i, span) { 6018 + sibling = *per_cpu_ptr(sdd->sd, i); 6019 + if (!cpumask_test_cpu(i, sched_domain_span(sibling))) 6020 + continue; 6021 + 6022 + cpumask_set_cpu(i, sched_group_mask(sg)); 6023 + } 6024 + } 6025 + 6026 + /* 6027 + * Return the canonical balance cpu for this group, this is the first cpu 6028 + * of this group that's also in the iteration mask. 6029 + */ 6030 + int group_balance_cpu(struct sched_group *sg) 6031 + { 6032 + return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg)); 6033 + } 6034 + 6011 6035 static int 6012 6036 build_overlap_sched_groups(struct sched_domain *sd, int cpu) 6013 6037 { ··· 6064 6012 if (cpumask_test_cpu(i, covered)) 6065 6013 continue; 6066 6014 6015 + child = *per_cpu_ptr(sdd->sd, i); 6016 + 6017 + /* See the comment near build_group_mask(). */ 6018 + if (!cpumask_test_cpu(i, sched_domain_span(child))) 6019 + continue; 6020 + 6067 6021 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 6068 6022 GFP_KERNEL, cpu_to_node(cpu)); 6069 6023 ··· 6077 6019 goto fail; 6078 6020 6079 6021 sg_span = sched_group_cpus(sg); 6080 - 6081 - child = *per_cpu_ptr(sdd->sd, i); 6082 6022 if (child->child) { 6083 6023 child = child->child; 6084 6024 cpumask_copy(sg_span, sched_domain_span(child)); ··· 6086 6030 cpumask_or(covered, covered, sg_span); 6087 6031 6088 6032 sg->sgp = *per_cpu_ptr(sdd->sgp, i); 6089 - atomic_inc(&sg->sgp->ref); 6033 + if (atomic_inc_return(&sg->sgp->ref) == 1) 6034 + build_group_mask(sd, sg); 6090 6035 6036 + /* 6037 + * Initialize sgp->power such that even if we mess up the 6038 + * domains and no possible iteration will get us here, we won't 6039 + * die on a /0 trap. 6040 + */ 6041 + sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span); 6042 + 6043 + /* 6044 + * Make sure the first group of this domain contains the 6045 + * canonical balance cpu. Otherwise the sched_domain iteration 6046 + * breaks. See update_sg_lb_stats(). 6047 + */ 6091 6048 if ((!groups && cpumask_test_cpu(cpu, sg_span)) || 6092 - cpumask_first(sg_span) == cpu) { 6093 - WARN_ON_ONCE(!cpumask_test_cpu(cpu, sg_span)); 6049 + group_balance_cpu(sg) == cpu) 6094 6050 groups = sg; 6095 - } 6096 6051 6097 6052 if (!first) 6098 6053 first = sg; ··· 6176 6109 6177 6110 cpumask_clear(sched_group_cpus(sg)); 6178 6111 sg->sgp->power = 0; 6112 + cpumask_setall(sched_group_mask(sg)); 6179 6113 6180 6114 for_each_cpu(j, span) { 6181 6115 if (get_group(j, sdd, NULL) != group) ··· 6218 6150 sg = sg->next; 6219 6151 } while (sg != sd->groups); 6220 6152 6221 - if (cpu != group_first_cpu(sg)) 6153 + if (cpu != group_balance_cpu(sg)) 6222 6154 return; 6223 6155 6224 6156 update_group_power(sd, cpu); ··· 6268 6200 6269 6201 static int __init setup_relax_domain_level(char *str) 6270 6202 { 6271 - unsigned long val; 6272 - 6273 - val = simple_strtoul(str, NULL, 0); 6274 - if (val < sched_domain_level_max) 6275 - default_relax_domain_level = val; 6203 + if (kstrtoint(str, 0, &default_relax_domain_level)) 6204 + pr_warn("Unable to set relax_domain_level\n"); 6276 6205 6277 6206 return 1; 6278 6207 } ··· 6379 6314 #ifdef CONFIG_NUMA 6380 6315 6381 6316 static int sched_domains_numa_levels; 6382 - static int sched_domains_numa_scale; 6383 6317 static int *sched_domains_numa_distance; 6384 6318 static struct cpumask ***sched_domains_numa_masks; 6385 6319 static int sched_domains_curr_level; 6386 6320 6387 6321 static inline int sd_local_flags(int level) 6388 6322 { 6389 - if (sched_domains_numa_distance[level] > REMOTE_DISTANCE) 6323 + if (sched_domains_numa_distance[level] > RECLAIM_DISTANCE) 6390 6324 return 0; 6391 6325 6392 6326 return SD_BALANCE_EXEC | SD_BALANCE_FORK | SD_WAKE_AFFINE; ··· 6443 6379 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; 6444 6380 } 6445 6381 6382 + static void sched_numa_warn(const char *str) 6383 + { 6384 + static int done = false; 6385 + int i,j; 6386 + 6387 + if (done) 6388 + return; 6389 + 6390 + done = true; 6391 + 6392 + printk(KERN_WARNING "ERROR: %s\n\n", str); 6393 + 6394 + for (i = 0; i < nr_node_ids; i++) { 6395 + printk(KERN_WARNING " "); 6396 + for (j = 0; j < nr_node_ids; j++) 6397 + printk(KERN_CONT "%02d ", node_distance(i,j)); 6398 + printk(KERN_CONT "\n"); 6399 + } 6400 + printk(KERN_WARNING "\n"); 6401 + } 6402 + 6403 + static bool find_numa_distance(int distance) 6404 + { 6405 + int i; 6406 + 6407 + if (distance == node_distance(0, 0)) 6408 + return true; 6409 + 6410 + for (i = 0; i < sched_domains_numa_levels; i++) { 6411 + if (sched_domains_numa_distance[i] == distance) 6412 + return true; 6413 + } 6414 + 6415 + return false; 6416 + } 6417 + 6446 6418 static void sched_init_numa(void) 6447 6419 { 6448 6420 int next_distance, curr_distance = node_distance(0, 0); ··· 6486 6386 int level = 0; 6487 6387 int i, j, k; 6488 6388 6489 - sched_domains_numa_scale = curr_distance; 6490 6389 sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL); 6491 6390 if (!sched_domains_numa_distance) 6492 6391 return; ··· 6496 6397 * 6497 6398 * Assumes node_distance(0,j) includes all distances in 6498 6399 * node_distance(i,j) in order to avoid cubic time. 6499 - * 6500 - * XXX: could be optimized to O(n log n) by using sort() 6501 6400 */ 6502 6401 next_distance = curr_distance; 6503 6402 for (i = 0; i < nr_node_ids; i++) { 6504 6403 for (j = 0; j < nr_node_ids; j++) { 6505 - int distance = node_distance(0, j); 6506 - if (distance > curr_distance && 6507 - (distance < next_distance || 6508 - next_distance == curr_distance)) 6509 - next_distance = distance; 6404 + for (k = 0; k < nr_node_ids; k++) { 6405 + int distance = node_distance(i, k); 6406 + 6407 + if (distance > curr_distance && 6408 + (distance < next_distance || 6409 + next_distance == curr_distance)) 6410 + next_distance = distance; 6411 + 6412 + /* 6413 + * While not a strong assumption it would be nice to know 6414 + * about cases where if node A is connected to B, B is not 6415 + * equally connected to A. 6416 + */ 6417 + if (sched_debug() && node_distance(k, i) != distance) 6418 + sched_numa_warn("Node-distance not symmetric"); 6419 + 6420 + if (sched_debug() && i && !find_numa_distance(distance)) 6421 + sched_numa_warn("Node-0 not representative"); 6422 + } 6423 + if (next_distance != curr_distance) { 6424 + sched_domains_numa_distance[level++] = next_distance; 6425 + sched_domains_numa_levels = level; 6426 + curr_distance = next_distance; 6427 + } else break; 6510 6428 } 6511 - if (next_distance != curr_distance) { 6512 - sched_domains_numa_distance[level++] = next_distance; 6513 - sched_domains_numa_levels = level; 6514 - curr_distance = next_distance; 6515 - } else break; 6429 + 6430 + /* 6431 + * In case of sched_debug() we verify the above assumption. 6432 + */ 6433 + if (!sched_debug()) 6434 + break; 6516 6435 } 6517 6436 /* 6518 6437 * 'level' contains the number of unique distances, excluding the ··· 6642 6525 6643 6526 *per_cpu_ptr(sdd->sg, j) = sg; 6644 6527 6645 - sgp = kzalloc_node(sizeof(struct sched_group_power), 6528 + sgp = kzalloc_node(sizeof(struct sched_group_power) + cpumask_size(), 6646 6529 GFP_KERNEL, cpu_to_node(j)); 6647 6530 if (!sgp) 6648 6531 return -ENOMEM; ··· 6695 6578 if (!sd) 6696 6579 return child; 6697 6580 6698 - set_domain_attribute(sd, attr); 6699 6581 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); 6700 6582 if (child) { 6701 6583 sd->level = child->level + 1; ··· 6702 6586 child->parent = sd; 6703 6587 } 6704 6588 sd->child = child; 6589 + set_domain_attribute(sd, attr); 6705 6590 6706 6591 return sd; 6707 6592 }
+10 -19
kernel/sched/fair.c
··· 3602 3602 } while (group != child->groups); 3603 3603 } 3604 3604 3605 - sdg->sgp->power = power; 3605 + sdg->sgp->power_orig = sdg->sgp->power = power; 3606 3606 } 3607 3607 3608 3608 /* ··· 3632 3632 3633 3633 /** 3634 3634 * update_sg_lb_stats - Update sched_group's statistics for load balancing. 3635 - * @sd: The sched_domain whose statistics are to be updated. 3635 + * @env: The load balancing environment. 3636 3636 * @group: sched_group whose statistics are to be updated. 3637 3637 * @load_idx: Load index of sched_domain of this_cpu for load calc. 3638 3638 * @local_group: Does group contain this_cpu. ··· 3652 3652 int i; 3653 3653 3654 3654 if (local_group) 3655 - balance_cpu = group_first_cpu(group); 3655 + balance_cpu = group_balance_cpu(group); 3656 3656 3657 3657 /* Tally up the load of all CPUs in the group */ 3658 3658 max_cpu_load = 0; ··· 3667 3667 3668 3668 /* Bias balancing toward cpus of our domain */ 3669 3669 if (local_group) { 3670 - if (idle_cpu(i) && !first_idle_cpu) { 3670 + if (idle_cpu(i) && !first_idle_cpu && 3671 + cpumask_test_cpu(i, sched_group_mask(group))) { 3671 3672 first_idle_cpu = 1; 3672 3673 balance_cpu = i; 3673 3674 } ··· 3742 3741 3743 3742 /** 3744 3743 * update_sd_pick_busiest - return 1 on busiest group 3745 - * @sd: sched_domain whose statistics are to be checked 3744 + * @env: The load balancing environment. 3746 3745 * @sds: sched_domain statistics 3747 3746 * @sg: sched_group candidate to be checked for being the busiest 3748 3747 * @sgs: sched_group statistics 3749 - * @this_cpu: the current cpu 3750 3748 * 3751 3749 * Determine if @sg is a busier group than the previously selected 3752 3750 * busiest group. ··· 3783 3783 3784 3784 /** 3785 3785 * update_sd_lb_stats - Update sched_domain's statistics for load balancing. 3786 - * @sd: sched_domain whose statistics are to be updated. 3787 - * @this_cpu: Cpu for which load balance is currently performed. 3788 - * @idle: Idle status of this_cpu 3786 + * @env: The load balancing environment. 3789 3787 * @cpus: Set of cpus considered for load balancing. 3790 3788 * @balance: Should we balance. 3791 3789 * @sds: variable to hold the statistics for this sched_domain. ··· 3872 3874 * Returns 1 when packing is required and a task should be moved to 3873 3875 * this CPU. The amount of the imbalance is returned in *imbalance. 3874 3876 * 3875 - * @sd: The sched_domain whose packing is to be checked. 3877 + * @env: The load balancing environment. 3876 3878 * @sds: Statistics of the sched_domain which is to be packed 3877 - * @this_cpu: The cpu at whose sched_domain we're performing load-balance. 3878 - * @imbalance: returns amount of imbalanced due to packing. 3879 3879 */ 3880 3880 static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds) 3881 3881 { ··· 3899 3903 * fix_small_imbalance - Calculate the minor imbalance that exists 3900 3904 * amongst the groups of a sched_domain, during 3901 3905 * load balancing. 3906 + * @env: The load balancing environment. 3902 3907 * @sds: Statistics of the sched_domain whose imbalance is to be calculated. 3903 - * @this_cpu: The cpu at whose sched_domain we're performing load-balance. 3904 - * @imbalance: Variable to store the imbalance. 3905 3908 */ 3906 3909 static inline 3907 3910 void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds) ··· 4043 4048 * Also calculates the amount of weighted load which should be moved 4044 4049 * to restore balance. 4045 4050 * 4046 - * @sd: The sched_domain whose busiest group is to be returned. 4047 - * @this_cpu: The cpu for which load balancing is currently being performed. 4048 - * @imbalance: Variable which stores amount of weighted load which should 4049 - * be moved to restore balance/put a group to idle. 4050 - * @idle: The idle status of this_cpu. 4051 + * @env: The load balancing environment. 4051 4052 * @cpus: The set of CPUs under consideration for load-balancing. 4052 4053 * @balance: Pointer to a variable indicating if this_cpu 4053 4054 * is the appropriate cpu to perform load balancing at this_level.
+1 -1
kernel/sched/rt.c
··· 1562 1562 task_running(rq, task) || 1563 1563 !task->on_rq)) { 1564 1564 1565 - raw_spin_unlock(&lowest_rq->lock); 1565 + double_unlock_balance(rq, lowest_rq); 1566 1566 lowest_rq = NULL; 1567 1567 break; 1568 1568 }
+2
kernel/sched/sched.h
··· 526 526 DECLARE_PER_CPU(struct sched_domain *, sd_llc); 527 527 DECLARE_PER_CPU(int, sd_llc_id); 528 528 529 + extern int group_balance_cpu(struct sched_group *sg); 530 + 529 531 #endif /* CONFIG_SMP */ 530 532 531 533 #include "stats.h"
+33 -27
kernel/sys.c
··· 1786 1786 } 1787 1787 1788 1788 #ifdef CONFIG_CHECKPOINT_RESTORE 1789 - static bool vma_flags_mismatch(struct vm_area_struct *vma, 1790 - unsigned long required, 1791 - unsigned long banned) 1792 - { 1793 - return (vma->vm_flags & required) != required || 1794 - (vma->vm_flags & banned); 1795 - } 1796 - 1797 1789 static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) 1798 1790 { 1791 + struct vm_area_struct *vma; 1799 1792 struct file *exe_file; 1800 1793 struct dentry *dentry; 1801 1794 int err; 1802 - 1803 - /* 1804 - * Setting new mm::exe_file is only allowed when no VM_EXECUTABLE vma's 1805 - * remain. So perform a quick test first. 1806 - */ 1807 - if (mm->num_exe_file_vmas) 1808 - return -EBUSY; 1809 1795 1810 1796 exe_file = fget(fd); 1811 1797 if (!exe_file) ··· 1813 1827 if (err) 1814 1828 goto exit; 1815 1829 1830 + down_write(&mm->mmap_sem); 1831 + 1832 + /* 1833 + * Forbid mm->exe_file change if there are mapped other files. 1834 + */ 1835 + err = -EBUSY; 1836 + for (vma = mm->mmap; vma; vma = vma->vm_next) { 1837 + if (vma->vm_file && !path_equal(&vma->vm_file->f_path, 1838 + &exe_file->f_path)) 1839 + goto exit_unlock; 1840 + } 1841 + 1816 1842 /* 1817 1843 * The symlink can be changed only once, just to disallow arbitrary 1818 1844 * transitions malicious software might bring in. This means one 1819 1845 * could make a snapshot over all processes running and monitor 1820 1846 * /proc/pid/exe changes to notice unusual activity if needed. 1821 1847 */ 1822 - down_write(&mm->mmap_sem); 1823 - if (likely(!mm->exe_file)) 1824 - set_mm_exe_file(mm, exe_file); 1825 - else 1826 - err = -EBUSY; 1848 + err = -EPERM; 1849 + if (test_and_set_bit(MMF_EXE_FILE_CHANGED, &mm->flags)) 1850 + goto exit_unlock; 1851 + 1852 + set_mm_exe_file(mm, exe_file); 1853 + exit_unlock: 1827 1854 up_write(&mm->mmap_sem); 1828 1855 1829 1856 exit: ··· 1861 1862 if (opt == PR_SET_MM_EXE_FILE) 1862 1863 return prctl_set_mm_exe_file(mm, (unsigned int)addr); 1863 1864 1864 - if (addr >= TASK_SIZE) 1865 + if (addr >= TASK_SIZE || addr < mmap_min_addr) 1865 1866 return -EINVAL; 1866 1867 1867 1868 error = -EINVAL; ··· 1923 1924 error = -EFAULT; 1924 1925 goto out; 1925 1926 } 1926 - #ifdef CONFIG_STACK_GROWSUP 1927 - if (vma_flags_mismatch(vma, VM_READ | VM_WRITE | VM_GROWSUP, 0)) 1928 - #else 1929 - if (vma_flags_mismatch(vma, VM_READ | VM_WRITE | VM_GROWSDOWN, 0)) 1930 - #endif 1931 - goto out; 1932 1927 if (opt == PR_SET_MM_START_STACK) 1933 1928 mm->start_stack = addr; 1934 1929 else if (opt == PR_SET_MM_ARG_START) ··· 1974 1981 up_read(&mm->mmap_sem); 1975 1982 return error; 1976 1983 } 1984 + 1985 + static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr) 1986 + { 1987 + return put_user(me->clear_child_tid, tid_addr); 1988 + } 1989 + 1977 1990 #else /* CONFIG_CHECKPOINT_RESTORE */ 1978 1991 static int prctl_set_mm(int opt, unsigned long addr, 1979 1992 unsigned long arg4, unsigned long arg5) 1993 + { 1994 + return -EINVAL; 1995 + } 1996 + static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr) 1980 1997 { 1981 1998 return -EINVAL; 1982 1999 } ··· 2127 2124 else 2128 2125 return -EINVAL; 2129 2126 break; 2127 + case PR_GET_TID_ADDRESS: 2128 + error = prctl_get_tid_address(me, (int __user **)arg2); 2129 + break; 2130 2130 default: 2131 2131 return -EINVAL; 2132 2132 }
+6 -1
kernel/time/tick-sched.c
··· 274 274 static void tick_nohz_stop_sched_tick(struct tick_sched *ts) 275 275 { 276 276 unsigned long seq, last_jiffies, next_jiffies, delta_jiffies; 277 + unsigned long rcu_delta_jiffies; 277 278 ktime_t last_update, expires, now; 278 279 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; 279 280 u64 time_delta; ··· 323 322 time_delta = timekeeping_max_deferment(); 324 323 } while (read_seqretry(&xtime_lock, seq)); 325 324 326 - if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) || 325 + if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) || 327 326 arch_needs_cpu(cpu)) { 328 327 next_jiffies = last_jiffies + 1; 329 328 delta_jiffies = 1; ··· 331 330 /* Get the next timer wheel timer */ 332 331 next_jiffies = get_next_timer_interrupt(last_jiffies); 333 332 delta_jiffies = next_jiffies - last_jiffies; 333 + if (rcu_delta_jiffies < delta_jiffies) { 334 + next_jiffies = last_jiffies + rcu_delta_jiffies; 335 + delta_jiffies = rcu_delta_jiffies; 336 + } 334 337 } 335 338 /* 336 339 * Do not stop the tick, if we are only one off
+2
kernel/time/timekeeping.c
··· 962 962 timekeeper.xtime.tv_sec++; 963 963 leap = second_overflow(timekeeper.xtime.tv_sec); 964 964 timekeeper.xtime.tv_sec += leap; 965 + timekeeper.wall_to_monotonic.tv_sec -= leap; 965 966 } 966 967 967 968 /* Accumulate raw time */ ··· 1078 1077 timekeeper.xtime.tv_sec++; 1079 1078 leap = second_overflow(timekeeper.xtime.tv_sec); 1080 1079 timekeeper.xtime.tv_sec += leap; 1080 + timekeeper.wall_to_monotonic.tv_sec -= leap; 1081 1081 } 1082 1082 1083 1083 timekeeping_update(false);
+1 -1
kernel/trace/trace.c
··· 371 371 void tracing_off(void) 372 372 { 373 373 if (global_trace.buffer) 374 - ring_buffer_record_on(global_trace.buffer); 374 + ring_buffer_record_off(global_trace.buffer); 375 375 /* 376 376 * This flag is only looked at when buffers haven't been 377 377 * allocated yet. We don't really care about the race
+18 -1
kernel/watchdog.c
··· 372 372 373 373 374 374 #ifdef CONFIG_HARDLOCKUP_DETECTOR 375 + /* 376 + * People like the simple clean cpu node info on boot. 377 + * Reduce the watchdog noise by only printing messages 378 + * that are different from what cpu0 displayed. 379 + */ 380 + static unsigned long cpu0_err; 381 + 375 382 static int watchdog_nmi_enable(int cpu) 376 383 { 377 384 struct perf_event_attr *wd_attr; ··· 397 390 398 391 /* Try to register using hardware perf events */ 399 392 event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL); 393 + 394 + /* save cpu0 error for future comparision */ 395 + if (cpu == 0 && IS_ERR(event)) 396 + cpu0_err = PTR_ERR(event); 397 + 400 398 if (!IS_ERR(event)) { 401 - pr_info("enabled, takes one hw-pmu counter.\n"); 399 + /* only print for cpu0 or different than cpu0 */ 400 + if (cpu == 0 || cpu0_err) 401 + pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n"); 402 402 goto out_save; 403 403 } 404 404 405 + /* skip displaying the same error again */ 406 + if (cpu > 0 && (PTR_ERR(event) == cpu0_err)) 407 + return PTR_ERR(event); 405 408 406 409 /* vary the KERN level based on the returned errno */ 407 410 if (PTR_ERR(event) == -EOPNOTSUPP)
+20
lib/Kconfig.debug
··· 241 241 default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC 242 242 default 1 if BOOTPARAM_SOFTLOCKUP_PANIC 243 243 244 + config PANIC_ON_OOPS 245 + bool "Panic on Oops" if EXPERT 246 + default n 247 + help 248 + Say Y here to enable the kernel to panic when it oopses. This 249 + has the same effect as setting oops=panic on the kernel command 250 + line. 251 + 252 + This feature is useful to ensure that the kernel does not do 253 + anything erroneous after an oops which could result in data 254 + corruption or other issues. 255 + 256 + Say N if unsure. 257 + 258 + config PANIC_ON_OOPS_VALUE 259 + int 260 + range 0 1 261 + default 0 if !PANIC_ON_OOPS 262 + default 1 if PANIC_ON_OOPS 263 + 244 264 config DETECT_HUNG_TASK 245 265 bool "Detect Hung Tasks" 246 266 depends on DEBUG_KERNEL
+3 -2
lib/btree.c
··· 319 319 320 320 if (head->height == 0) 321 321 return NULL; 322 - retry: 323 322 longcpy(key, __key, geo->keylen); 323 + retry: 324 324 dec_key(geo, key); 325 325 326 326 node = head->node; ··· 351 351 } 352 352 miss: 353 353 if (retry_key) { 354 - __key = retry_key; 354 + longcpy(key, retry_key, geo->keylen); 355 355 retry_key = NULL; 356 356 goto retry; 357 357 } ··· 509 509 int btree_insert(struct btree_head *head, struct btree_geo *geo, 510 510 unsigned long *key, void *val, gfp_t gfp) 511 511 { 512 + BUG_ON(!val); 512 513 return btree_insert_level(head, geo, key, val, 1, gfp); 513 514 } 514 515 EXPORT_SYMBOL_GPL(btree_insert);
+3
lib/radix-tree.c
··· 686 686 * during iterating; it can be zero only at the beginning. 687 687 * And we cannot overflow iter->next_index in a single step, 688 688 * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG. 689 + * 690 + * This condition also used by radix_tree_next_slot() to stop 691 + * contiguous iterating, and forbid swithing to the next chunk. 689 692 */ 690 693 index = iter->next_index; 691 694 if (!index && iter->index)
+4 -3
lib/raid6/recov.c
··· 22 22 #include <linux/raid/pq.h> 23 23 24 24 /* Recover two failed data blocks. */ 25 - void raid6_2data_recov_intx1(int disks, size_t bytes, int faila, int failb, 26 - void **ptrs) 25 + static void raid6_2data_recov_intx1(int disks, size_t bytes, int faila, 26 + int failb, void **ptrs) 27 27 { 28 28 u8 *p, *q, *dp, *dq; 29 29 u8 px, qx, db; ··· 66 66 } 67 67 68 68 /* Recover failure of one data block plus the P block */ 69 - void raid6_datap_recov_intx1(int disks, size_t bytes, int faila, void **ptrs) 69 + static void raid6_datap_recov_intx1(int disks, size_t bytes, int faila, 70 + void **ptrs) 70 71 { 71 72 u8 *p, *q, *dq; 72 73 const u8 *qmul; /* Q multiplier table */
+4 -3
lib/raid6/recov_ssse3.c
··· 19 19 boot_cpu_has(X86_FEATURE_SSSE3); 20 20 } 21 21 22 - void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila, int failb, 23 - void **ptrs) 22 + static void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila, 23 + int failb, void **ptrs) 24 24 { 25 25 u8 *p, *q, *dp, *dq; 26 26 const u8 *pbmul; /* P multiplier table for B data */ ··· 194 194 } 195 195 196 196 197 - void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila, void **ptrs) 197 + static void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila, 198 + void **ptrs) 198 199 { 199 200 u8 *p, *q, *dq; 200 201 const u8 *qmul; /* Q multiplier table */
+1 -1
lib/spinlock_debug.c
··· 118 118 /* lockup suspected: */ 119 119 if (print_once) { 120 120 print_once = 0; 121 - spin_dump(lock, "lockup"); 121 + spin_dump(lock, "lockup suspected"); 122 122 #ifdef CONFIG_SMP 123 123 trigger_all_cpu_backtrace(); 124 124 #endif
+20
mm/memblock.c
··· 867 867 return memblock_search(&memblock.memory, addr) != -1; 868 868 } 869 869 870 + /** 871 + * memblock_is_region_memory - check if a region is a subset of memory 872 + * @base: base of region to check 873 + * @size: size of region to check 874 + * 875 + * Check if the region [@base, @base+@size) is a subset of a memory block. 876 + * 877 + * RETURNS: 878 + * 0 if false, non-zero if true 879 + */ 870 880 int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) 871 881 { 872 882 int idx = memblock_search(&memblock.memory, base); ··· 889 879 memblock.memory.regions[idx].size) >= end; 890 880 } 891 881 882 + /** 883 + * memblock_is_region_reserved - check if a region intersects reserved memory 884 + * @base: base of region to check 885 + * @size: size of region to check 886 + * 887 + * Check if the region [@base, @base+@size) intersects a reserved memory block. 888 + * 889 + * RETURNS: 890 + * 0 if false, non-zero if true 891 + */ 892 892 int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) 893 893 { 894 894 memblock_cap_size(base, &size);
+2 -2
mm/oom_kill.c
··· 183 183 unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, 184 184 const nodemask_t *nodemask, unsigned long totalpages) 185 185 { 186 - unsigned long points; 186 + long points; 187 187 188 188 if (oom_unkillable_task(p, memcg, nodemask)) 189 189 return 0; ··· 223 223 * Never return 0 for an eligible task regardless of the root bonus and 224 224 * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here). 225 225 */ 226 - return points ? points : 1; 226 + return points > 0 ? points : 1; 227 227 } 228 228 229 229 /*
+37 -20
mm/shmem.c
··· 683 683 mutex_lock(&shmem_swaplist_mutex); 684 684 /* 685 685 * We needed to drop mutex to make that restrictive page 686 - * allocation; but the inode might already be freed by now, 687 - * and we cannot refer to inode or mapping or info to check. 688 - * However, we do hold page lock on the PageSwapCache page, 689 - * so can check if that still has our reference remaining. 686 + * allocation, but the inode might have been freed while we 687 + * dropped it: although a racing shmem_evict_inode() cannot 688 + * complete without emptying the radix_tree, our page lock 689 + * on this swapcache page is not enough to prevent that - 690 + * free_swap_and_cache() of our swap entry will only 691 + * trylock_page(), removing swap from radix_tree whatever. 692 + * 693 + * We must not proceed to shmem_add_to_page_cache() if the 694 + * inode has been freed, but of course we cannot rely on 695 + * inode or mapping or info to check that. However, we can 696 + * safely check if our swap entry is still in use (and here 697 + * it can't have got reused for another page): if it's still 698 + * in use, then the inode cannot have been freed yet, and we 699 + * can safely proceed (if it's no longer in use, that tells 700 + * nothing about the inode, but we don't need to unuse swap). 690 701 */ 691 702 if (!page_swapcount(*pagep)) 692 703 error = -ENOENT; ··· 741 730 742 731 /* 743 732 * There's a faint possibility that swap page was replaced before 744 - * caller locked it: it will come back later with the right page. 733 + * caller locked it: caller will come back later with the right page. 745 734 */ 746 - if (unlikely(!PageSwapCache(page))) 735 + if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val)) 747 736 goto out; 748 737 749 738 /* ··· 1006 995 newpage = shmem_alloc_page(gfp, info, index); 1007 996 if (!newpage) 1008 997 return -ENOMEM; 1009 - VM_BUG_ON(shmem_should_replace_page(newpage, gfp)); 1010 998 1011 - *pagep = newpage; 1012 999 page_cache_get(newpage); 1013 1000 copy_highpage(newpage, oldpage); 1001 + flush_dcache_page(newpage); 1014 1002 1015 - VM_BUG_ON(!PageLocked(oldpage)); 1016 1003 __set_page_locked(newpage); 1017 - VM_BUG_ON(!PageUptodate(oldpage)); 1018 1004 SetPageUptodate(newpage); 1019 - VM_BUG_ON(!PageSwapBacked(oldpage)); 1020 1005 SetPageSwapBacked(newpage); 1021 - VM_BUG_ON(!swap_index); 1022 1006 set_page_private(newpage, swap_index); 1023 - VM_BUG_ON(!PageSwapCache(oldpage)); 1024 1007 SetPageSwapCache(newpage); 1025 1008 1026 1009 /* ··· 1024 1019 spin_lock_irq(&swap_mapping->tree_lock); 1025 1020 error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage, 1026 1021 newpage); 1027 - __inc_zone_page_state(newpage, NR_FILE_PAGES); 1028 - __dec_zone_page_state(oldpage, NR_FILE_PAGES); 1022 + if (!error) { 1023 + __inc_zone_page_state(newpage, NR_FILE_PAGES); 1024 + __dec_zone_page_state(oldpage, NR_FILE_PAGES); 1025 + } 1029 1026 spin_unlock_irq(&swap_mapping->tree_lock); 1030 - BUG_ON(error); 1031 1027 1032 - mem_cgroup_replace_page_cache(oldpage, newpage); 1033 - lru_cache_add_anon(newpage); 1028 + if (unlikely(error)) { 1029 + /* 1030 + * Is this possible? I think not, now that our callers check 1031 + * both PageSwapCache and page_private after getting page lock; 1032 + * but be defensive. Reverse old to newpage for clear and free. 1033 + */ 1034 + oldpage = newpage; 1035 + } else { 1036 + mem_cgroup_replace_page_cache(oldpage, newpage); 1037 + lru_cache_add_anon(newpage); 1038 + *pagep = newpage; 1039 + } 1034 1040 1035 1041 ClearPageSwapCache(oldpage); 1036 1042 set_page_private(oldpage, 0); ··· 1049 1033 unlock_page(oldpage); 1050 1034 page_cache_release(oldpage); 1051 1035 page_cache_release(oldpage); 1052 - return 0; 1036 + return error; 1053 1037 } 1054 1038 1055 1039 /* ··· 1123 1107 1124 1108 /* We have to do this with page locked to prevent races */ 1125 1109 lock_page(page); 1126 - if (!PageSwapCache(page) || page->mapping) { 1110 + if (!PageSwapCache(page) || page_private(page) != swap.val || 1111 + page->mapping) { 1127 1112 error = -EEXIST; /* try again */ 1128 1113 goto failed; 1129 1114 }
+4 -8
mm/swapfile.c
··· 1916 1916 1917 1917 /* 1918 1918 * Find out how many pages are allowed for a single swap 1919 - * device. There are three limiting factors: 1) the number 1919 + * device. There are two limiting factors: 1) the number 1920 1920 * of bits for the swap offset in the swp_entry_t type, and 1921 1921 * 2) the number of bits in the swap pte as defined by the 1922 - * the different architectures, and 3) the number of free bits 1923 - * in an exceptional radix_tree entry. In order to find the 1922 + * different architectures. In order to find the 1924 1923 * largest possible bit mask, a swap entry with swap type 0 1925 1924 * and swap offset ~0UL is created, encoded to a swap pte, 1926 1925 * decoded to a swp_entry_t again, and finally the swap 1927 1926 * offset is extracted. This will mask all the bits from 1928 1927 * the initial ~0UL mask that can't be encoded in either 1929 1928 * the swp_entry_t or the architecture definition of a 1930 - * swap pte. Then the same is done for a radix_tree entry. 1929 + * swap pte. 1931 1930 */ 1932 1931 maxpages = swp_offset(pte_to_swp_entry( 1933 - swp_entry_to_pte(swp_entry(0, ~0UL)))); 1934 - maxpages = swp_offset(radix_to_swp_entry( 1935 - swp_to_radix_entry(swp_entry(0, maxpages)))) + 1; 1936 - 1932 + swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1; 1937 1933 if (maxpages > swap_header->info.last_page) { 1938 1934 maxpages = swap_header->info.last_page + 1; 1939 1935 /* p->max is an unsigned int: don't overflow it */
+1 -3
net/appletalk/ddp.c
··· 1208 1208 if (addr->sat_addr.s_node == ATADDR_BCAST && 1209 1209 !sock_flag(sk, SOCK_BROADCAST)) { 1210 1210 #if 1 1211 - printk(KERN_WARNING "%s is broken and did not set " 1212 - "SO_BROADCAST. It will break when 2.2 is " 1213 - "released.\n", 1211 + pr_warn("atalk_connect: %s is broken and did not set SO_BROADCAST.\n", 1214 1212 current->comm); 1215 1213 #else 1216 1214 return -EACCES;
+1 -1
net/bluetooth/af_bluetooth.c
··· 210 210 } 211 211 212 212 if (sk->sk_state == BT_CONNECTED || !newsock || 213 - test_bit(BT_DEFER_SETUP, &bt_sk(parent)->flags)) { 213 + test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) { 214 214 bt_accept_unlink(sk); 215 215 if (newsock) 216 216 sock_graft(sk, newsock);
+33 -69
net/core/drop_monitor.c
··· 36 36 #define TRACE_ON 1 37 37 #define TRACE_OFF 0 38 38 39 - static void send_dm_alert(struct work_struct *unused); 40 - 41 - 42 39 /* 43 40 * Globals, our netlink socket pointer 44 41 * and the work handle that will send up ··· 45 48 static DEFINE_MUTEX(trace_state_mutex); 46 49 47 50 struct per_cpu_dm_data { 48 - struct work_struct dm_alert_work; 49 - struct sk_buff __rcu *skb; 50 - atomic_t dm_hit_count; 51 - struct timer_list send_timer; 52 - int cpu; 51 + spinlock_t lock; 52 + struct sk_buff *skb; 53 + struct work_struct dm_alert_work; 54 + struct timer_list send_timer; 53 55 }; 54 56 55 57 struct dm_hw_stat_delta { ··· 74 78 static unsigned long dm_hw_check_delta = 2*HZ; 75 79 static LIST_HEAD(hw_stats_list); 76 80 77 - static void reset_per_cpu_data(struct per_cpu_dm_data *data) 81 + static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data) 78 82 { 79 83 size_t al; 80 84 struct net_dm_alert_msg *msg; 81 85 struct nlattr *nla; 82 86 struct sk_buff *skb; 83 - struct sk_buff *oskb = rcu_dereference_protected(data->skb, 1); 87 + unsigned long flags; 84 88 85 89 al = sizeof(struct net_dm_alert_msg); 86 90 al += dm_hit_limit * sizeof(struct net_dm_drop_point); ··· 95 99 sizeof(struct net_dm_alert_msg)); 96 100 msg = nla_data(nla); 97 101 memset(msg, 0, al); 98 - } else 99 - schedule_work_on(data->cpu, &data->dm_alert_work); 100 - 101 - /* 102 - * Don't need to lock this, since we are guaranteed to only 103 - * run this on a single cpu at a time. 104 - * Note also that we only update data->skb if the old and new skb 105 - * pointers don't match. This ensures that we don't continually call 106 - * synchornize_rcu if we repeatedly fail to alloc a new netlink message. 107 - */ 108 - if (skb != oskb) { 109 - rcu_assign_pointer(data->skb, skb); 110 - 111 - synchronize_rcu(); 112 - 113 - atomic_set(&data->dm_hit_count, dm_hit_limit); 102 + } else { 103 + mod_timer(&data->send_timer, jiffies + HZ / 10); 114 104 } 115 105 106 + spin_lock_irqsave(&data->lock, flags); 107 + swap(data->skb, skb); 108 + spin_unlock_irqrestore(&data->lock, flags); 109 + 110 + return skb; 116 111 } 117 112 118 - static void send_dm_alert(struct work_struct *unused) 113 + static void send_dm_alert(struct work_struct *work) 119 114 { 120 115 struct sk_buff *skb; 121 - struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data); 116 + struct per_cpu_dm_data *data; 122 117 123 - WARN_ON_ONCE(data->cpu != smp_processor_id()); 118 + data = container_of(work, struct per_cpu_dm_data, dm_alert_work); 124 119 125 - /* 126 - * Grab the skb we're about to send 127 - */ 128 - skb = rcu_dereference_protected(data->skb, 1); 120 + skb = reset_per_cpu_data(data); 129 121 130 - /* 131 - * Replace it with a new one 132 - */ 133 - reset_per_cpu_data(data); 134 - 135 - /* 136 - * Ship it! 137 - */ 138 122 if (skb) 139 123 genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL); 140 - 141 - put_cpu_var(dm_cpu_data); 142 124 } 143 125 144 126 /* 145 127 * This is the timer function to delay the sending of an alert 146 128 * in the event that more drops will arrive during the 147 - * hysteresis period. Note that it operates under the timer interrupt 148 - * so we don't need to disable preemption here 129 + * hysteresis period. 149 130 */ 150 - static void sched_send_work(unsigned long unused) 131 + static void sched_send_work(unsigned long _data) 151 132 { 152 - struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data); 133 + struct per_cpu_dm_data *data = (struct per_cpu_dm_data *)_data; 153 134 154 - schedule_work_on(smp_processor_id(), &data->dm_alert_work); 155 - 156 - put_cpu_var(dm_cpu_data); 135 + schedule_work(&data->dm_alert_work); 157 136 } 158 137 159 138 static void trace_drop_common(struct sk_buff *skb, void *location) ··· 138 167 struct nlattr *nla; 139 168 int i; 140 169 struct sk_buff *dskb; 141 - struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data); 170 + struct per_cpu_dm_data *data; 171 + unsigned long flags; 142 172 143 - 144 - rcu_read_lock(); 145 - dskb = rcu_dereference(data->skb); 173 + local_irq_save(flags); 174 + data = &__get_cpu_var(dm_cpu_data); 175 + spin_lock(&data->lock); 176 + dskb = data->skb; 146 177 147 178 if (!dskb) 148 179 goto out; 149 - 150 - if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) { 151 - /* 152 - * we're already at zero, discard this hit 153 - */ 154 - goto out; 155 - } 156 180 157 181 nlh = (struct nlmsghdr *)dskb->data; 158 182 nla = genlmsg_data(nlmsg_data(nlh)); ··· 155 189 for (i = 0; i < msg->entries; i++) { 156 190 if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) { 157 191 msg->points[i].count++; 158 - atomic_inc(&data->dm_hit_count); 159 192 goto out; 160 193 } 161 194 } 162 - 195 + if (msg->entries == dm_hit_limit) 196 + goto out; 163 197 /* 164 198 * We need to create a new entry 165 199 */ ··· 171 205 172 206 if (!timer_pending(&data->send_timer)) { 173 207 data->send_timer.expires = jiffies + dm_delay * HZ; 174 - add_timer_on(&data->send_timer, smp_processor_id()); 208 + add_timer(&data->send_timer); 175 209 } 176 210 177 211 out: 178 - rcu_read_unlock(); 179 - put_cpu_var(dm_cpu_data); 180 - return; 212 + spin_unlock_irqrestore(&data->lock, flags); 181 213 } 182 214 183 215 static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location) ··· 382 418 383 419 for_each_possible_cpu(cpu) { 384 420 data = &per_cpu(dm_cpu_data, cpu); 385 - data->cpu = cpu; 386 421 INIT_WORK(&data->dm_alert_work, send_dm_alert); 387 422 init_timer(&data->send_timer); 388 - data->send_timer.data = cpu; 423 + data->send_timer.data = (unsigned long)data; 389 424 data->send_timer.function = sched_send_work; 425 + spin_lock_init(&data->lock); 390 426 reset_per_cpu_data(data); 391 427 } 392 428
+2 -2
net/core/filter.c
··· 616 616 /** 617 617 * sk_unattached_filter_create - create an unattached filter 618 618 * @fprog: the filter program 619 - * @sk: the socket to use 619 + * @pfp: the unattached filter that is created 620 620 * 621 - * Create a filter independent ofr any socket. We first run some 621 + * Create a filter independent of any socket. We first run some 622 622 * sanity checks on it to make sure it does not explode on us later. 623 623 * If an error occurs or there is insufficient memory for the filter 624 624 * a negative errno code is returned. On success the return is zero.
+6 -8
net/core/neighbour.c
··· 2219 2219 rcu_read_lock_bh(); 2220 2220 nht = rcu_dereference_bh(tbl->nht); 2221 2221 2222 - for (h = 0; h < (1 << nht->hash_shift); h++) { 2223 - if (h < s_h) 2224 - continue; 2222 + for (h = s_h; h < (1 << nht->hash_shift); h++) { 2225 2223 if (h > s_h) 2226 2224 s_idx = 0; 2227 2225 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0; ··· 2258 2260 2259 2261 read_lock_bh(&tbl->lock); 2260 2262 2261 - for (h = 0; h <= PNEIGH_HASHMASK; h++) { 2262 - if (h < s_h) 2263 - continue; 2263 + for (h = s_h; h <= PNEIGH_HASHMASK; h++) { 2264 2264 if (h > s_h) 2265 2265 s_idx = 0; 2266 2266 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) { ··· 2293 2297 struct neigh_table *tbl; 2294 2298 int t, family, s_t; 2295 2299 int proxy = 0; 2296 - int err = 0; 2300 + int err; 2297 2301 2298 2302 read_lock(&neigh_tbl_lock); 2299 2303 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family; ··· 2307 2311 2308 2312 s_t = cb->args[0]; 2309 2313 2310 - for (tbl = neigh_tables, t = 0; tbl && (err >= 0); 2314 + for (tbl = neigh_tables, t = 0; tbl; 2311 2315 tbl = tbl->next, t++) { 2312 2316 if (t < s_t || (family && tbl->family != family)) 2313 2317 continue; ··· 2318 2322 err = pneigh_dump_table(tbl, skb, cb); 2319 2323 else 2320 2324 err = neigh_dump_table(tbl, skb, cb); 2325 + if (err < 0) 2326 + break; 2321 2327 } 2322 2328 read_unlock(&neigh_tbl_lock); 2323 2329
+6 -5
net/core/netpoll.c
··· 362 362 363 363 void netpoll_send_udp(struct netpoll *np, const char *msg, int len) 364 364 { 365 - int total_len, eth_len, ip_len, udp_len; 365 + int total_len, ip_len, udp_len; 366 366 struct sk_buff *skb; 367 367 struct udphdr *udph; 368 368 struct iphdr *iph; 369 369 struct ethhdr *eth; 370 370 371 371 udp_len = len + sizeof(*udph); 372 - ip_len = eth_len = udp_len + sizeof(*iph); 373 - total_len = eth_len + ETH_HLEN + NET_IP_ALIGN; 372 + ip_len = udp_len + sizeof(*iph); 373 + total_len = ip_len + LL_RESERVED_SPACE(np->dev); 374 374 375 - skb = find_skb(np, total_len, total_len - len); 375 + skb = find_skb(np, total_len + np->dev->needed_tailroom, 376 + total_len - len); 376 377 if (!skb) 377 378 return; 378 379 379 380 skb_copy_to_linear_data(skb, msg, len); 380 - skb->len += len; 381 + skb_put(skb, len); 381 382 382 383 skb_push(skb, sizeof(*udph)); 383 384 skb_reset_transport_header(skb);
+1 -1
net/core/skbuff.c
··· 3361 3361 * @to: prior buffer 3362 3362 * @from: buffer to add 3363 3363 * @fragstolen: pointer to boolean 3364 - * 3364 + * @delta_truesize: how much more was allocated than was requested 3365 3365 */ 3366 3366 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 3367 3367 bool *fragstolen, int *delta_truesize)
+12 -4
net/ipv4/inetpeer.c
··· 560 560 } 561 561 EXPORT_SYMBOL(inet_peer_xrlim_allow); 562 562 563 + static void inetpeer_inval_rcu(struct rcu_head *head) 564 + { 565 + struct inet_peer *p = container_of(head, struct inet_peer, gc_rcu); 566 + 567 + spin_lock_bh(&gc_lock); 568 + list_add_tail(&p->gc_list, &gc_list); 569 + spin_unlock_bh(&gc_lock); 570 + 571 + schedule_delayed_work(&gc_work, gc_delay); 572 + } 573 + 563 574 void inetpeer_invalidate_tree(int family) 564 575 { 565 576 struct inet_peer *old, *new, *prev; ··· 587 576 prev = cmpxchg(&base->root, old, new); 588 577 if (prev == old) { 589 578 base->total = 0; 590 - spin_lock(&gc_lock); 591 - list_add_tail(&prev->gc_list, &gc_list); 592 - spin_unlock(&gc_lock); 593 - schedule_delayed_work(&gc_work, gc_delay); 579 + call_rcu(&prev->gc_rcu, inetpeer_inval_rcu); 594 580 } 595 581 596 582 out:
+1
net/ipv4/ip_forward.c
··· 44 44 struct ip_options *opt = &(IPCB(skb)->opt); 45 45 46 46 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); 47 + IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len); 47 48 48 49 if (unlikely(opt->optlen)) 49 50 ip_forward_options(skb);
+1
net/ipv4/ipmr.c
··· 1574 1574 struct ip_options *opt = &(IPCB(skb)->opt); 1575 1575 1576 1576 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); 1577 + IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len); 1577 1578 1578 1579 if (unlikely(opt->optlen)) 1579 1580 ip_forward_options(skb);
+1 -1
net/ipv6/ip6_fib.c
··· 1561 1561 neigh_flags = neigh->flags; 1562 1562 neigh_release(neigh); 1563 1563 } 1564 - if (neigh_flags & NTF_ROUTER) { 1564 + if (!(neigh_flags & NTF_ROUTER)) { 1565 1565 RT6_TRACE("purging route %p via non-router but gateway\n", 1566 1566 rt); 1567 1567 return -1;
+1
net/ipv6/ip6_output.c
··· 526 526 hdr->hop_limit--; 527 527 528 528 IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); 529 + IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len); 529 530 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev, 530 531 ip6_forward_finish); 531 532
+2
net/ipv6/ip6mr.c
··· 1886 1886 { 1887 1887 IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)), 1888 1888 IPSTATS_MIB_OUTFORWDATAGRAMS); 1889 + IP6_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)), 1890 + IPSTATS_MIB_OUTOCTETS, skb->len); 1889 1891 return dst_output(skb); 1890 1892 } 1891 1893
+2
net/l2tp/l2tp_eth.c
··· 162 162 if (dev) { 163 163 unregister_netdev(dev); 164 164 spriv->dev = NULL; 165 + module_put(THIS_MODULE); 165 166 } 166 167 } 167 168 } ··· 250 249 if (rc < 0) 251 250 goto out_del_dev; 252 251 252 + __module_get(THIS_MODULE); 253 253 /* Must be done after register_netdev() */ 254 254 strlcpy(session->ifname, dev->name, IFNAMSIZ); 255 255
+6 -3
net/l2tp/l2tp_ip.c
··· 464 464 sk->sk_bound_dev_if); 465 465 if (IS_ERR(rt)) 466 466 goto no_route; 467 - if (connected) 467 + if (connected) { 468 468 sk_setup_caps(sk, &rt->dst); 469 - else 470 - dst_release(&rt->dst); /* safe since we hold rcu_read_lock */ 469 + } else { 470 + skb_dst_set(skb, &rt->dst); 471 + goto xmit; 472 + } 471 473 } 472 474 473 475 /* We dont need to clone dst here, it is guaranteed to not disappear. ··· 477 475 */ 478 476 skb_dst_set_noref(skb, &rt->dst); 479 477 478 + xmit: 480 479 /* Queue the packet to IP for output */ 481 480 rc = ip_queue_xmit(skb, &inet->cork.fl); 482 481 rcu_read_unlock();
+6 -1
net/mac80211/agg-rx.c
··· 145 145 struct tid_ampdu_rx *tid_rx; 146 146 unsigned long timeout; 147 147 148 + rcu_read_lock(); 148 149 tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[*ptid]); 149 - if (!tid_rx) 150 + if (!tid_rx) { 151 + rcu_read_unlock(); 150 152 return; 153 + } 151 154 152 155 timeout = tid_rx->last_rx + TU_TO_JIFFIES(tid_rx->timeout); 153 156 if (time_is_after_jiffies(timeout)) { 154 157 mod_timer(&tid_rx->session_timer, timeout); 158 + rcu_read_unlock(); 155 159 return; 156 160 } 161 + rcu_read_unlock(); 157 162 158 163 #ifdef CONFIG_MAC80211_HT_DEBUG 159 164 printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
+3 -3
net/mac80211/cfg.c
··· 533 533 sinfo.filled = 0; 534 534 sta_set_sinfo(sta, &sinfo); 535 535 536 - if (sinfo.filled | STATION_INFO_TX_BITRATE) 536 + if (sinfo.filled & STATION_INFO_TX_BITRATE) 537 537 data[i] = 100000 * 538 538 cfg80211_calculate_bitrate(&sinfo.txrate); 539 539 i++; 540 - if (sinfo.filled | STATION_INFO_RX_BITRATE) 540 + if (sinfo.filled & STATION_INFO_RX_BITRATE) 541 541 data[i] = 100000 * 542 542 cfg80211_calculate_bitrate(&sinfo.rxrate); 543 543 i++; 544 544 545 - if (sinfo.filled | STATION_INFO_SIGNAL_AVG) 545 + if (sinfo.filled & STATION_INFO_SIGNAL_AVG) 546 546 data[i] = (u8)sinfo.signal_avg; 547 547 i++; 548 548 } else {
+12
net/mac80211/iface.c
··· 637 637 ieee80211_configure_filter(local); 638 638 break; 639 639 default: 640 + mutex_lock(&local->mtx); 641 + if (local->hw_roc_dev == sdata->dev && 642 + local->hw_roc_channel) { 643 + /* ignore return value since this is racy */ 644 + drv_cancel_remain_on_channel(local); 645 + ieee80211_queue_work(&local->hw, &local->hw_roc_done); 646 + } 647 + mutex_unlock(&local->mtx); 648 + 649 + flush_work(&local->hw_roc_start); 650 + flush_work(&local->hw_roc_done); 651 + 640 652 flush_work(&sdata->work); 641 653 /* 642 654 * When we get here, the interface is marked down.
+28 -10
net/mac80211/mlme.c
··· 1220 1220 sdata->vif.bss_conf.qos = true; 1221 1221 } 1222 1222 1223 + static void __ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata) 1224 + { 1225 + lockdep_assert_held(&sdata->local->mtx); 1226 + 1227 + sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL | 1228 + IEEE80211_STA_BEACON_POLL); 1229 + ieee80211_run_deferred_scan(sdata->local); 1230 + } 1231 + 1232 + static void ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata) 1233 + { 1234 + mutex_lock(&sdata->local->mtx); 1235 + __ieee80211_stop_poll(sdata); 1236 + mutex_unlock(&sdata->local->mtx); 1237 + } 1238 + 1223 1239 static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, 1224 1240 u16 capab, bool erp_valid, u8 erp) 1225 1241 { ··· 1301 1285 sdata->u.mgd.flags |= IEEE80211_STA_RESET_SIGNAL_AVE; 1302 1286 1303 1287 /* just to be sure */ 1304 - sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL | 1305 - IEEE80211_STA_BEACON_POLL); 1288 + ieee80211_stop_poll(sdata); 1306 1289 1307 1290 ieee80211_led_assoc(local, 1); 1308 1291 ··· 1471 1456 return; 1472 1457 } 1473 1458 1474 - ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL | 1475 - IEEE80211_STA_BEACON_POLL); 1459 + __ieee80211_stop_poll(sdata); 1476 1460 1477 1461 mutex_lock(&local->iflist_mtx); 1478 1462 ieee80211_recalc_ps(local, -1); ··· 1491 1477 round_jiffies_up(jiffies + 1492 1478 IEEE80211_CONNECTION_IDLE_TIME)); 1493 1479 out: 1494 - ieee80211_run_deferred_scan(local); 1495 1480 mutex_unlock(&local->mtx); 1496 1481 } 1497 1482 ··· 2421 2408 net_dbg_ratelimited("%s: cancelling probereq poll due to a received beacon\n", 2422 2409 sdata->name); 2423 2410 #endif 2411 + mutex_lock(&local->mtx); 2424 2412 ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL; 2413 + ieee80211_run_deferred_scan(local); 2414 + mutex_unlock(&local->mtx); 2415 + 2425 2416 mutex_lock(&local->iflist_mtx); 2426 2417 ieee80211_recalc_ps(local, -1); 2427 2418 mutex_unlock(&local->iflist_mtx); ··· 2612 2595 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2613 2596 u8 frame_buf[DEAUTH_DISASSOC_LEN]; 2614 2597 2615 - ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL | 2616 - IEEE80211_STA_BEACON_POLL); 2598 + ieee80211_stop_poll(sdata); 2617 2599 2618 2600 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason, 2619 2601 false, frame_buf); ··· 2890 2874 u32 flags; 2891 2875 2892 2876 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 2893 - sdata->u.mgd.flags &= ~(IEEE80211_STA_BEACON_POLL | 2894 - IEEE80211_STA_CONNECTION_POLL); 2877 + __ieee80211_stop_poll(sdata); 2895 2878 2896 2879 /* let's probe the connection once */ 2897 2880 flags = sdata->local->hw.flags; ··· 2959 2944 if (test_and_clear_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running)) 2960 2945 add_timer(&ifmgd->chswitch_timer); 2961 2946 ieee80211_sta_reset_beacon_monitor(sdata); 2947 + 2948 + mutex_lock(&sdata->local->mtx); 2962 2949 ieee80211_restart_sta_timer(sdata); 2950 + mutex_unlock(&sdata->local->mtx); 2963 2951 } 2964 2952 #endif 2965 2953 ··· 3124 3106 } 3125 3107 3126 3108 local->oper_channel = cbss->channel; 3127 - ieee80211_hw_config(local, 0); 3109 + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); 3128 3110 3129 3111 if (!have_sta) { 3130 3112 u32 rates = 0, basic_rates = 0;
+16
net/mac80211/offchannel.c
··· 234 234 return; 235 235 } 236 236 237 + /* was never transmitted */ 238 + if (local->hw_roc_skb) { 239 + u64 cookie; 240 + 241 + cookie = local->hw_roc_cookie ^ 2; 242 + 243 + cfg80211_mgmt_tx_status(local->hw_roc_dev, cookie, 244 + local->hw_roc_skb->data, 245 + local->hw_roc_skb->len, false, 246 + GFP_KERNEL); 247 + 248 + kfree_skb(local->hw_roc_skb); 249 + local->hw_roc_skb = NULL; 250 + local->hw_roc_skb_for_status = NULL; 251 + } 252 + 237 253 if (!local->hw_roc_for_tx) 238 254 cfg80211_remain_on_channel_expired(local->hw_roc_dev, 239 255 local->hw_roc_cookie,
+2 -2
net/mac80211/sta_info.c
··· 378 378 /* make the station visible */ 379 379 sta_info_hash_add(local, sta); 380 380 381 - list_add(&sta->list, &local->sta_list); 381 + list_add_rcu(&sta->list, &local->sta_list); 382 382 383 383 set_sta_flag(sta, WLAN_STA_INSERTED); 384 384 ··· 688 688 if (ret) 689 689 return ret; 690 690 691 - list_del(&sta->list); 691 + list_del_rcu(&sta->list); 692 692 693 693 mutex_lock(&local->key_mtx); 694 694 for (i = 0; i < NUM_DEFAULT_KEYS; i++)
+6 -3
net/mac80211/tx.c
··· 1737 1737 __le16 fc; 1738 1738 struct ieee80211_hdr hdr; 1739 1739 struct ieee80211s_hdr mesh_hdr __maybe_unused; 1740 - struct mesh_path __maybe_unused *mppath = NULL; 1740 + struct mesh_path __maybe_unused *mppath = NULL, *mpath = NULL; 1741 1741 const u8 *encaps_data; 1742 1742 int encaps_len, skip_header_bytes; 1743 1743 int nh_pos, h_pos; ··· 1803 1803 goto fail; 1804 1804 } 1805 1805 rcu_read_lock(); 1806 - if (!is_multicast_ether_addr(skb->data)) 1807 - mppath = mpp_path_lookup(skb->data, sdata); 1806 + if (!is_multicast_ether_addr(skb->data)) { 1807 + mpath = mesh_path_lookup(skb->data, sdata); 1808 + if (!mpath) 1809 + mppath = mpp_path_lookup(skb->data, sdata); 1810 + } 1808 1811 1809 1812 /* 1810 1813 * Use address extension if it is a packet from
+1 -1
net/mac80211/util.c
··· 1271 1271 enum ieee80211_sta_state state; 1272 1272 1273 1273 for (state = IEEE80211_STA_NOTEXIST; 1274 - state < sta->sta_state - 1; state++) 1274 + state < sta->sta_state; state++) 1275 1275 WARN_ON(drv_sta_state(local, sta->sdata, sta, 1276 1276 state, state + 1)); 1277 1277 }
+2 -3
net/netfilter/nf_conntrack_h323_main.c
··· 270 270 return 0; 271 271 272 272 /* RTP port is even */ 273 - port &= htons(~1); 274 - rtp_port = port; 275 - rtcp_port = htons(ntohs(port) + 1); 273 + rtp_port = port & ~htons(1); 274 + rtcp_port = port | htons(1); 276 275 277 276 /* Create expect for RTP */ 278 277 if ((rtp_exp = nf_ct_expect_alloc(ct)) == NULL)
+41 -31
net/netfilter/xt_HMARK.c
··· 32 32 MODULE_ALIAS("ip6t_HMARK"); 33 33 34 34 struct hmark_tuple { 35 - u32 src; 36 - u32 dst; 35 + __be32 src; 36 + __be32 dst; 37 37 union hmark_ports uports; 38 - uint8_t proto; 38 + u8 proto; 39 39 }; 40 40 41 - static inline u32 hmark_addr6_mask(const __u32 *addr32, const __u32 *mask) 41 + static inline __be32 hmark_addr6_mask(const __be32 *addr32, const __be32 *mask) 42 42 { 43 43 return (addr32[0] & mask[0]) ^ 44 44 (addr32[1] & mask[1]) ^ ··· 46 46 (addr32[3] & mask[3]); 47 47 } 48 48 49 - static inline u32 50 - hmark_addr_mask(int l3num, const __u32 *addr32, const __u32 *mask) 49 + static inline __be32 50 + hmark_addr_mask(int l3num, const __be32 *addr32, const __be32 *mask) 51 51 { 52 52 switch (l3num) { 53 53 case AF_INET: ··· 56 56 return hmark_addr6_mask(addr32, mask); 57 57 } 58 58 return 0; 59 + } 60 + 61 + static inline void hmark_swap_ports(union hmark_ports *uports, 62 + const struct xt_hmark_info *info) 63 + { 64 + union hmark_ports hp; 65 + u16 src, dst; 66 + 67 + hp.b32 = (uports->b32 & info->port_mask.b32) | info->port_set.b32; 68 + src = ntohs(hp.b16.src); 69 + dst = ntohs(hp.b16.dst); 70 + 71 + if (dst > src) 72 + uports->v32 = (dst << 16) | src; 73 + else 74 + uports->v32 = (src << 16) | dst; 59 75 } 60 76 61 77 static int ··· 90 74 otuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; 91 75 rtuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; 92 76 93 - t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.all, 94 - info->src_mask.all); 95 - t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.all, 96 - info->dst_mask.all); 77 + t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.ip6, 78 + info->src_mask.ip6); 79 + t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.ip6, 80 + info->dst_mask.ip6); 97 81 98 82 if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) 99 83 return 0; 100 84 101 85 t->proto = nf_ct_protonum(ct); 102 86 if (t->proto != IPPROTO_ICMP) { 103 - t->uports.p16.src = otuple->src.u.all; 104 - t->uports.p16.dst = rtuple->src.u.all; 105 - t->uports.v32 = (t->uports.v32 & info->port_mask.v32) | 106 - info->port_set.v32; 107 - if (t->uports.p16.dst < t->uports.p16.src) 108 - swap(t->uports.p16.dst, t->uports.p16.src); 87 + t->uports.b16.src = otuple->src.u.all; 88 + t->uports.b16.dst = rtuple->src.u.all; 89 + hmark_swap_ports(&t->uports, info); 109 90 } 110 91 111 92 return 0; ··· 111 98 #endif 112 99 } 113 100 101 + /* This hash function is endian independent, to ensure consistent hashing if 102 + * the cluster is composed of big and little endian systems. */ 114 103 static inline u32 115 104 hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info) 116 105 { 117 106 u32 hash; 107 + u32 src = ntohl(t->src); 108 + u32 dst = ntohl(t->dst); 118 109 119 - if (t->dst < t->src) 120 - swap(t->src, t->dst); 110 + if (dst < src) 111 + swap(src, dst); 121 112 122 - hash = jhash_3words(t->src, t->dst, t->uports.v32, info->hashrnd); 113 + hash = jhash_3words(src, dst, t->uports.v32, info->hashrnd); 123 114 hash = hash ^ (t->proto & info->proto_mask); 124 115 125 116 return (((u64)hash * info->hmodulus) >> 32) + info->hoffset; ··· 143 126 if (skb_copy_bits(skb, nhoff, &t->uports, sizeof(t->uports)) < 0) 144 127 return; 145 128 146 - t->uports.v32 = (t->uports.v32 & info->port_mask.v32) | 147 - info->port_set.v32; 148 - 149 - if (t->uports.p16.dst < t->uports.p16.src) 150 - swap(t->uports.p16.dst, t->uports.p16.src); 129 + hmark_swap_ports(&t->uports, info); 151 130 } 152 131 153 132 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) ··· 191 178 return -1; 192 179 } 193 180 noicmp: 194 - t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.all); 195 - t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.all); 181 + t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.ip6); 182 + t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.ip6); 196 183 197 184 if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) 198 185 return 0; ··· 268 255 } 269 256 } 270 257 271 - t->src = (__force u32) ip->saddr; 272 - t->dst = (__force u32) ip->daddr; 273 - 274 - t->src &= info->src_mask.ip; 275 - t->dst &= info->dst_mask.ip; 258 + t->src = ip->saddr & info->src_mask.ip; 259 + t->dst = ip->daddr & info->dst_mask.ip; 276 260 277 261 if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) 278 262 return 0;
+3
net/nfc/llcp/sock.c
··· 292 292 293 293 pr_debug("%p\n", sk); 294 294 295 + if (llcp_sock == NULL) 296 + return -EBADFD; 297 + 295 298 addr->sa_family = AF_NFC; 296 299 *len = sizeof(struct sockaddr_nfc_llcp); 297 300
+6 -6
net/sunrpc/rpc_pipe.c
··· 71 71 msg->errno = err; 72 72 destroy_msg(msg); 73 73 } while (!list_empty(head)); 74 - wake_up(waitq); 74 + 75 + if (waitq) 76 + wake_up(waitq); 75 77 } 76 78 77 79 static void ··· 93 91 } 94 92 dentry = dget(pipe->dentry); 95 93 spin_unlock(&pipe->lock); 96 - if (dentry) { 97 - rpc_purge_list(&RPC_I(dentry->d_inode)->waitq, 98 - &free_list, destroy_msg, -ETIMEDOUT); 99 - dput(dentry); 100 - } 94 + rpc_purge_list(dentry ? &RPC_I(dentry->d_inode)->waitq : NULL, 95 + &free_list, destroy_msg, -ETIMEDOUT); 96 + dput(dentry); 101 97 } 102 98 103 99 ssize_t rpc_pipe_generic_upcall(struct file *filp, struct rpc_pipe_msg *msg,
+2 -1
net/sunrpc/svc.c
··· 1374 1374 sizeof(req->rq_snd_buf)); 1375 1375 return bc_send(req); 1376 1376 } else { 1377 - /* Nothing to do to drop request */ 1377 + /* drop request */ 1378 + xprt_free_bc_request(req); 1378 1379 return 0; 1379 1380 } 1380 1381 }
+5 -1
net/wireless/ibss.c
··· 42 42 cfg80211_hold_bss(bss_from_pub(bss)); 43 43 wdev->current_bss = bss_from_pub(bss); 44 44 45 + wdev->sme_state = CFG80211_SME_CONNECTED; 45 46 cfg80211_upload_connect_keys(wdev); 46 47 47 48 nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid, ··· 61 60 struct cfg80211_event *ev; 62 61 unsigned long flags; 63 62 64 - CFG80211_DEV_WARN_ON(!wdev->ssid_len); 63 + CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTING); 65 64 66 65 ev = kzalloc(sizeof(*ev), gfp); 67 66 if (!ev) ··· 116 115 #ifdef CONFIG_CFG80211_WEXT 117 116 wdev->wext.ibss.channel = params->channel; 118 117 #endif 118 + wdev->sme_state = CFG80211_SME_CONNECTING; 119 119 err = rdev->ops->join_ibss(&rdev->wiphy, dev, params); 120 120 if (err) { 121 121 wdev->connect_keys = NULL; 122 + wdev->sme_state = CFG80211_SME_IDLE; 122 123 return err; 123 124 } 124 125 ··· 172 169 } 173 170 174 171 wdev->current_bss = NULL; 172 + wdev->sme_state = CFG80211_SME_IDLE; 175 173 wdev->ssid_len = 0; 176 174 #ifdef CONFIG_CFG80211_WEXT 177 175 if (!nowext)
+18 -1
net/wireless/util.c
··· 935 935 enum nl80211_iftype iftype) 936 936 { 937 937 struct wireless_dev *wdev_iter; 938 + u32 used_iftypes = BIT(iftype); 938 939 int num[NUM_NL80211_IFTYPES]; 939 940 int total = 1; 940 941 int i, j; ··· 962 961 963 962 num[wdev_iter->iftype]++; 964 963 total++; 964 + used_iftypes |= BIT(wdev_iter->iftype); 965 965 } 966 966 mutex_unlock(&rdev->devlist_mtx); 967 967 ··· 972 970 for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) { 973 971 const struct ieee80211_iface_combination *c; 974 972 struct ieee80211_iface_limit *limits; 973 + u32 all_iftypes = 0; 975 974 976 975 c = &rdev->wiphy.iface_combinations[i]; 977 976 ··· 987 984 if (rdev->wiphy.software_iftypes & BIT(iftype)) 988 985 continue; 989 986 for (j = 0; j < c->n_limits; j++) { 987 + all_iftypes |= limits[j].types; 990 988 if (!(limits[j].types & BIT(iftype))) 991 989 continue; 992 990 if (limits[j].max < num[iftype]) ··· 995 991 limits[j].max -= num[iftype]; 996 992 } 997 993 } 998 - /* yay, it fits */ 994 + 995 + /* 996 + * Finally check that all iftypes that we're currently 997 + * using are actually part of this combination. If they 998 + * aren't then we can't use this combination and have 999 + * to continue to the next. 1000 + */ 1001 + if ((all_iftypes & used_iftypes) != used_iftypes) 1002 + goto cont; 1003 + 1004 + /* 1005 + * This combination covered all interface types and 1006 + * supported the requested numbers, so we're good. 1007 + */ 999 1008 kfree(limits); 1000 1009 return 0; 1001 1010 cont:
+5 -3
sound/core/compress_offload.c
··· 502 502 if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING) 503 503 return -EPERM; 504 504 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH); 505 - if (!retval) { 505 + if (!retval) 506 506 stream->runtime->state = SNDRV_PCM_STATE_PAUSED; 507 - wake_up(&stream->runtime->sleep); 508 - } 509 507 return retval; 510 508 } 511 509 ··· 542 544 if (!retval) { 543 545 stream->runtime->state = SNDRV_PCM_STATE_SETUP; 544 546 wake_up(&stream->runtime->sleep); 547 + stream->runtime->hw_pointer = 0; 548 + stream->runtime->app_pointer = 0; 549 + stream->runtime->total_bytes_available = 0; 550 + stream->runtime->total_bytes_transferred = 0; 545 551 } 546 552 return retval; 547 553 }
+10 -7
sound/pci/hda/hda_intel.c
··· 2484 2484 static int DELAYED_INIT_MARK azx_first_init(struct azx *chip); 2485 2485 static int DELAYED_INIT_MARK azx_probe_continue(struct azx *chip); 2486 2486 2487 + #ifdef SUPPORT_VGA_SWITCHEROO 2487 2488 static struct pci_dev __devinit *get_bound_vga(struct pci_dev *pci); 2488 2489 2489 - #ifdef SUPPORT_VGA_SWITCHEROO 2490 2490 static void azx_vs_set_state(struct pci_dev *pci, 2491 2491 enum vga_switcheroo_state state) 2492 2492 { ··· 2578 2578 #else 2579 2579 #define init_vga_switcheroo(chip) /* NOP */ 2580 2580 #define register_vga_switcheroo(chip) 0 2581 + #define check_hdmi_disabled(pci) false 2581 2582 #endif /* SUPPORT_VGA_SWITCHER */ 2582 2583 2583 2584 /* ··· 2639 2638 return azx_free(device->device_data); 2640 2639 } 2641 2640 2641 + #ifdef SUPPORT_VGA_SWITCHEROO 2642 2642 /* 2643 2643 * Check of disabled HDMI controller by vga-switcheroo 2644 2644 */ ··· 2672 2670 struct pci_dev *p = get_bound_vga(pci); 2673 2671 2674 2672 if (p) { 2675 - if (vga_default_device() && p != vga_default_device()) 2673 + if (vga_switcheroo_get_client_state(p) == VGA_SWITCHEROO_OFF) 2676 2674 vga_inactive = true; 2677 2675 pci_dev_put(p); 2678 2676 } 2679 2677 return vga_inactive; 2680 2678 } 2679 + #endif /* SUPPORT_VGA_SWITCHEROO */ 2681 2680 2682 2681 /* 2683 2682 * white/black-listing for position_fix ··· 3354 3351 { PCI_DEVICE(0x6549, 0x1200), 3355 3352 .driver_data = AZX_DRIVER_TERA | AZX_DCAPS_NO_64BIT }, 3356 3353 /* Creative X-Fi (CA0110-IBG) */ 3354 + /* CTHDA chips */ 3355 + { PCI_DEVICE(0x1102, 0x0010), 3356 + .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA }, 3357 + { PCI_DEVICE(0x1102, 0x0012), 3358 + .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA }, 3357 3359 #if !defined(CONFIG_SND_CTXFI) && !defined(CONFIG_SND_CTXFI_MODULE) 3358 3360 /* the following entry conflicts with snd-ctxfi driver, 3359 3361 * as ctxfi driver mutates from HD-audio to native mode with ··· 3375 3367 .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND | 3376 3368 AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB }, 3377 3369 #endif 3378 - /* CTHDA chips */ 3379 - { PCI_DEVICE(0x1102, 0x0010), 3380 - .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA }, 3381 - { PCI_DEVICE(0x1102, 0x0012), 3382 - .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA }, 3383 3370 /* Vortex86MX */ 3384 3371 { PCI_DEVICE(0x17f3, 0x3010), .driver_data = AZX_DRIVER_GENERIC }, 3385 3372 /* VMware HDAudio */
+1 -1
sound/pci/hda/patch_conexant.c
··· 4061 4061 static int cx_auto_init(struct hda_codec *codec) 4062 4062 { 4063 4063 struct conexant_spec *spec = codec->spec; 4064 - /*snd_hda_sequence_write(codec, cx_auto_init_verbs);*/ 4064 + snd_hda_gen_apply_verbs(codec); 4065 4065 cx_auto_init_output(codec); 4066 4066 cx_auto_init_input(codec); 4067 4067 cx_auto_init_digital(codec);
+10
sound/pci/hda/patch_realtek.c
··· 1896 1896 alc_fix_pll(codec); 1897 1897 alc_auto_init_amp(codec, spec->init_amp); 1898 1898 1899 + snd_hda_gen_apply_verbs(codec); 1899 1900 alc_init_special_input_src(codec); 1900 1901 alc_auto_init_std(codec); 1901 1902 ··· 6440 6439 ALC662_FIXUP_ASUS_MODE7, 6441 6440 ALC662_FIXUP_ASUS_MODE8, 6442 6441 ALC662_FIXUP_NO_JACK_DETECT, 6442 + ALC662_FIXUP_ZOTAC_Z68, 6443 6443 }; 6444 6444 6445 6445 static const struct alc_fixup alc662_fixups[] = { ··· 6590 6588 .type = ALC_FIXUP_FUNC, 6591 6589 .v.func = alc_fixup_no_jack_detect, 6592 6590 }, 6591 + [ALC662_FIXUP_ZOTAC_Z68] = { 6592 + .type = ALC_FIXUP_PINS, 6593 + .v.pins = (const struct alc_pincfg[]) { 6594 + { 0x1b, 0x02214020 }, /* Front HP */ 6595 + { } 6596 + } 6597 + }, 6593 6598 }; 6594 6599 6595 6600 static const struct snd_pci_quirk alc662_fixup_tbl[] = { ··· 6610 6601 SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), 6611 6602 SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD), 6612 6603 SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD), 6604 + SND_PCI_QUIRK(0x19da, 0xa130, "Zotac Z68", ALC662_FIXUP_ZOTAC_Z68), 6613 6605 SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T), 6614 6606 6615 6607 #if 0
+21 -38
sound/soc/codecs/wm2000.c
··· 99 99 } 100 100 101 101 static int wm2000_poll_bit(struct i2c_client *i2c, 102 - unsigned int reg, u8 mask, int timeout) 102 + unsigned int reg, u8 mask) 103 103 { 104 + int timeout = 4000; 104 105 int val; 105 106 106 107 val = wm2000_read(i2c, reg); ··· 120 119 static int wm2000_power_up(struct i2c_client *i2c, int analogue) 121 120 { 122 121 struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); 123 - int ret, timeout; 122 + int ret; 124 123 125 124 BUG_ON(wm2000->anc_mode != ANC_OFF); 126 125 ··· 141 140 142 141 /* Wait for ANC engine to become ready */ 143 142 if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, 144 - WM2000_ANC_ENG_IDLE, 1)) { 143 + WM2000_ANC_ENG_IDLE)) { 145 144 dev_err(&i2c->dev, "ANC engine failed to reset\n"); 146 145 return -ETIMEDOUT; 147 146 } 148 147 149 148 if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, 150 - WM2000_STATUS_BOOT_COMPLETE, 1)) { 149 + WM2000_STATUS_BOOT_COMPLETE)) { 151 150 dev_err(&i2c->dev, "ANC engine failed to initialise\n"); 152 151 return -ETIMEDOUT; 153 152 } ··· 174 173 dev_dbg(&i2c->dev, "Download complete\n"); 175 174 176 175 if (analogue) { 177 - timeout = 248; 178 - wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, timeout / 4); 176 + wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, 248 / 4); 179 177 180 178 wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, 181 179 WM2000_MODE_ANA_SEQ_INCLUDE | 182 180 WM2000_MODE_MOUSE_ENABLE | 183 181 WM2000_MODE_THERMAL_ENABLE); 184 182 } else { 185 - timeout = 10; 186 - 187 183 wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, 188 184 WM2000_MODE_MOUSE_ENABLE | 189 185 WM2000_MODE_THERMAL_ENABLE); ··· 199 201 wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR); 200 202 201 203 if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, 202 - WM2000_STATUS_MOUSE_ACTIVE, timeout)) { 203 - dev_err(&i2c->dev, "Timed out waiting for device after %dms\n", 204 - timeout * 10); 204 + WM2000_STATUS_MOUSE_ACTIVE)) { 205 + dev_err(&i2c->dev, "Timed out waiting for device\n"); 205 206 return -ETIMEDOUT; 206 207 } 207 208 ··· 215 218 static int wm2000_power_down(struct i2c_client *i2c, int analogue) 216 219 { 217 220 struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); 218 - int timeout; 219 221 220 222 if (analogue) { 221 - timeout = 248; 222 - wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, timeout / 4); 223 + wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, 248 / 4); 223 224 wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, 224 225 WM2000_MODE_ANA_SEQ_INCLUDE | 225 226 WM2000_MODE_POWER_DOWN); 226 227 } else { 227 - timeout = 10; 228 228 wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, 229 229 WM2000_MODE_POWER_DOWN); 230 230 } 231 231 232 232 if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, 233 - WM2000_STATUS_POWER_DOWN_COMPLETE, timeout)) { 233 + WM2000_STATUS_POWER_DOWN_COMPLETE)) { 234 234 dev_err(&i2c->dev, "Timeout waiting for ANC power down\n"); 235 235 return -ETIMEDOUT; 236 236 } 237 237 238 238 if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, 239 - WM2000_ANC_ENG_IDLE, 1)) { 239 + WM2000_ANC_ENG_IDLE)) { 240 240 dev_err(&i2c->dev, "Timeout waiting for ANC engine idle\n"); 241 241 return -ETIMEDOUT; 242 242 } ··· 262 268 } 263 269 264 270 if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, 265 - WM2000_STATUS_ANC_DISABLED, 10)) { 271 + WM2000_STATUS_ANC_DISABLED)) { 266 272 dev_err(&i2c->dev, "Timeout waiting for ANC disable\n"); 267 273 return -ETIMEDOUT; 268 274 } 269 275 270 276 if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, 271 - WM2000_ANC_ENG_IDLE, 1)) { 277 + WM2000_ANC_ENG_IDLE)) { 272 278 dev_err(&i2c->dev, "Timeout waiting for ANC engine idle\n"); 273 279 return -ETIMEDOUT; 274 280 } ··· 305 311 wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR); 306 312 307 313 if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, 308 - WM2000_STATUS_MOUSE_ACTIVE, 10)) { 314 + WM2000_STATUS_MOUSE_ACTIVE)) { 309 315 dev_err(&i2c->dev, "Timed out waiting for MOUSE\n"); 310 316 return -ETIMEDOUT; 311 317 } ··· 319 325 static int wm2000_enter_standby(struct i2c_client *i2c, int analogue) 320 326 { 321 327 struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); 322 - int timeout; 323 328 324 329 BUG_ON(wm2000->anc_mode != ANC_ACTIVE); 325 330 326 331 if (analogue) { 327 - timeout = 248; 328 - wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, timeout / 4); 332 + wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, 248 / 4); 329 333 330 334 wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, 331 335 WM2000_MODE_ANA_SEQ_INCLUDE | 332 336 WM2000_MODE_THERMAL_ENABLE | 333 337 WM2000_MODE_STANDBY_ENTRY); 334 338 } else { 335 - timeout = 10; 336 - 337 339 wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, 338 340 WM2000_MODE_THERMAL_ENABLE | 339 341 WM2000_MODE_STANDBY_ENTRY); 340 342 } 341 343 342 344 if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, 343 - WM2000_STATUS_ANC_DISABLED, timeout)) { 345 + WM2000_STATUS_ANC_DISABLED)) { 344 346 dev_err(&i2c->dev, 345 347 "Timed out waiting for ANC disable after 1ms\n"); 346 348 return -ETIMEDOUT; 347 349 } 348 350 349 - if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, WM2000_ANC_ENG_IDLE, 350 - 1)) { 351 + if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, WM2000_ANC_ENG_IDLE)) { 351 352 dev_err(&i2c->dev, 352 - "Timed out waiting for standby after %dms\n", 353 - timeout * 10); 353 + "Timed out waiting for standby\n"); 354 354 return -ETIMEDOUT; 355 355 } 356 356 ··· 362 374 static int wm2000_exit_standby(struct i2c_client *i2c, int analogue) 363 375 { 364 376 struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); 365 - int timeout; 366 377 367 378 BUG_ON(wm2000->anc_mode != ANC_STANDBY); 368 379 369 380 wm2000_write(i2c, WM2000_REG_SYS_CTL1, 0); 370 381 371 382 if (analogue) { 372 - timeout = 248; 373 - wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, timeout / 4); 383 + wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, 248 / 4); 374 384 375 385 wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, 376 386 WM2000_MODE_ANA_SEQ_INCLUDE | 377 387 WM2000_MODE_THERMAL_ENABLE | 378 388 WM2000_MODE_MOUSE_ENABLE); 379 389 } else { 380 - timeout = 10; 381 - 382 390 wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, 383 391 WM2000_MODE_THERMAL_ENABLE | 384 392 WM2000_MODE_MOUSE_ENABLE); ··· 384 400 wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR); 385 401 386 402 if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, 387 - WM2000_STATUS_MOUSE_ACTIVE, timeout)) { 388 - dev_err(&i2c->dev, "Timed out waiting for MOUSE after %dms\n", 389 - timeout * 10); 403 + WM2000_STATUS_MOUSE_ACTIVE)) { 404 + dev_err(&i2c->dev, "Timed out waiting for MOUSE\n"); 390 405 return -ETIMEDOUT; 391 406 } 392 407
+66 -39
sound/soc/codecs/wm8994.c
··· 46 46 #define WM8994_NUM_DRC 3 47 47 #define WM8994_NUM_EQ 3 48 48 49 + static struct { 50 + unsigned int reg; 51 + unsigned int mask; 52 + } wm8994_vu_bits[] = { 53 + { WM8994_LEFT_LINE_INPUT_1_2_VOLUME, WM8994_IN1_VU }, 54 + { WM8994_RIGHT_LINE_INPUT_1_2_VOLUME, WM8994_IN1_VU }, 55 + { WM8994_LEFT_LINE_INPUT_3_4_VOLUME, WM8994_IN2_VU }, 56 + { WM8994_RIGHT_LINE_INPUT_3_4_VOLUME, WM8994_IN2_VU }, 57 + { WM8994_SPEAKER_VOLUME_LEFT, WM8994_SPKOUT_VU }, 58 + { WM8994_SPEAKER_VOLUME_RIGHT, WM8994_SPKOUT_VU }, 59 + { WM8994_LEFT_OUTPUT_VOLUME, WM8994_HPOUT1_VU }, 60 + { WM8994_RIGHT_OUTPUT_VOLUME, WM8994_HPOUT1_VU }, 61 + { WM8994_LEFT_OPGA_VOLUME, WM8994_MIXOUT_VU }, 62 + { WM8994_RIGHT_OPGA_VOLUME, WM8994_MIXOUT_VU }, 63 + 64 + { WM8994_AIF1_DAC1_LEFT_VOLUME, WM8994_AIF1DAC1_VU }, 65 + { WM8994_AIF1_DAC1_RIGHT_VOLUME, WM8994_AIF1DAC1_VU }, 66 + { WM8994_AIF1_DAC2_LEFT_VOLUME, WM8994_AIF1DAC2_VU }, 67 + { WM8994_AIF1_DAC2_RIGHT_VOLUME, WM8994_AIF1DAC2_VU }, 68 + { WM8994_AIF2_DAC_LEFT_VOLUME, WM8994_AIF2DAC_VU }, 69 + { WM8994_AIF2_DAC_RIGHT_VOLUME, WM8994_AIF2DAC_VU }, 70 + { WM8994_AIF1_ADC1_LEFT_VOLUME, WM8994_AIF1ADC1_VU }, 71 + { WM8994_AIF1_ADC1_RIGHT_VOLUME, WM8994_AIF1ADC1_VU }, 72 + { WM8994_AIF1_ADC2_LEFT_VOLUME, WM8994_AIF1ADC2_VU }, 73 + { WM8994_AIF1_ADC2_RIGHT_VOLUME, WM8994_AIF1ADC2_VU }, 74 + { WM8994_AIF2_ADC_LEFT_VOLUME, WM8994_AIF2ADC_VU }, 75 + { WM8994_AIF2_ADC_RIGHT_VOLUME, WM8994_AIF1ADC2_VU }, 76 + { WM8994_DAC1_LEFT_VOLUME, WM8994_DAC1_VU }, 77 + { WM8994_DAC1_RIGHT_VOLUME, WM8994_DAC1_VU }, 78 + { WM8994_DAC2_LEFT_VOLUME, WM8994_DAC2_VU }, 79 + { WM8994_DAC2_RIGHT_VOLUME, WM8994_DAC2_VU }, 80 + }; 81 + 49 82 static int wm8994_drc_base[] = { 50 83 WM8994_AIF1_DRC1_1, 51 84 WM8994_AIF1_DRC2_1, ··· 1022 989 struct snd_soc_codec *codec = w->codec; 1023 990 struct wm8994 *control = codec->control_data; 1024 991 int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA; 992 + int i; 1025 993 int dac; 1026 994 int adc; 1027 995 int val; ··· 1081 1047 WM8994_AIF1DAC2L_ENA); 1082 1048 break; 1083 1049 1050 + case SND_SOC_DAPM_POST_PMU: 1051 + for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++) 1052 + snd_soc_write(codec, wm8994_vu_bits[i].reg, 1053 + snd_soc_read(codec, 1054 + wm8994_vu_bits[i].reg)); 1055 + break; 1056 + 1084 1057 case SND_SOC_DAPM_PRE_PMD: 1085 1058 case SND_SOC_DAPM_POST_PMD: 1086 1059 snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, ··· 1113 1072 struct snd_kcontrol *kcontrol, int event) 1114 1073 { 1115 1074 struct snd_soc_codec *codec = w->codec; 1075 + int i; 1116 1076 int dac; 1117 1077 int adc; 1118 1078 int val; ··· 1162 1120 WM8994_AIF2DACR_ENA, 1163 1121 WM8994_AIF2DACL_ENA | 1164 1122 WM8994_AIF2DACR_ENA); 1123 + break; 1124 + 1125 + case SND_SOC_DAPM_POST_PMU: 1126 + for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++) 1127 + snd_soc_write(codec, wm8994_vu_bits[i].reg, 1128 + snd_soc_read(codec, 1129 + wm8994_vu_bits[i].reg)); 1165 1130 break; 1166 1131 1167 1132 case SND_SOC_DAPM_PRE_PMD: ··· 1239 1190 switch (event) { 1240 1191 case SND_SOC_DAPM_PRE_PMU: 1241 1192 if (wm8994->aif1clk_enable) { 1242 - aif1clk_ev(w, kcontrol, event); 1193 + aif1clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMU); 1243 1194 snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, 1244 1195 WM8994_AIF1CLK_ENA_MASK, 1245 1196 WM8994_AIF1CLK_ENA); 1197 + aif1clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMU); 1246 1198 wm8994->aif1clk_enable = 0; 1247 1199 } 1248 1200 if (wm8994->aif2clk_enable) { 1249 - aif2clk_ev(w, kcontrol, event); 1201 + aif2clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMU); 1250 1202 snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, 1251 1203 WM8994_AIF2CLK_ENA_MASK, 1252 1204 WM8994_AIF2CLK_ENA); 1205 + aif2clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMU); 1253 1206 wm8994->aif2clk_enable = 0; 1254 1207 } 1255 1208 break; ··· 1272 1221 switch (event) { 1273 1222 case SND_SOC_DAPM_POST_PMD: 1274 1223 if (wm8994->aif1clk_disable) { 1224 + aif1clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMD); 1275 1225 snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, 1276 1226 WM8994_AIF1CLK_ENA_MASK, 0); 1277 - aif1clk_ev(w, kcontrol, event); 1227 + aif1clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMD); 1278 1228 wm8994->aif1clk_disable = 0; 1279 1229 } 1280 1230 if (wm8994->aif2clk_disable) { 1231 + aif2clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMD); 1281 1232 snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, 1282 1233 WM8994_AIF2CLK_ENA_MASK, 0); 1283 - aif2clk_ev(w, kcontrol, event); 1234 + aif2clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMD); 1284 1235 wm8994->aif2clk_disable = 0; 1285 1236 } 1286 1237 break; ··· 1580 1527 1581 1528 static const struct snd_soc_dapm_widget wm8994_lateclk_widgets[] = { 1582 1529 SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, aif1clk_ev, 1583 - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD), 1530 + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | 1531 + SND_SOC_DAPM_PRE_PMD), 1584 1532 SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, aif2clk_ev, 1585 - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD), 1533 + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | 1534 + SND_SOC_DAPM_PRE_PMD), 1586 1535 SND_SOC_DAPM_PGA("Direct Voice", SND_SOC_NOPM, 0, 0, NULL, 0), 1587 1536 SND_SOC_DAPM_MIXER("SPKL", WM8994_POWER_MANAGEMENT_3, 8, 0, 1588 1537 left_speaker_mixer, ARRAY_SIZE(left_speaker_mixer)), ··· 3934 3879 3935 3880 pm_runtime_put(codec->dev); 3936 3881 3937 - /* Latch volume updates (right only; we always do left then right). */ 3938 - snd_soc_update_bits(codec, WM8994_AIF1_DAC1_LEFT_VOLUME, 3939 - WM8994_AIF1DAC1_VU, WM8994_AIF1DAC1_VU); 3940 - snd_soc_update_bits(codec, WM8994_AIF1_DAC1_RIGHT_VOLUME, 3941 - WM8994_AIF1DAC1_VU, WM8994_AIF1DAC1_VU); 3942 - snd_soc_update_bits(codec, WM8994_AIF1_DAC2_LEFT_VOLUME, 3943 - WM8994_AIF1DAC2_VU, WM8994_AIF1DAC2_VU); 3944 - snd_soc_update_bits(codec, WM8994_AIF1_DAC2_RIGHT_VOLUME, 3945 - WM8994_AIF1DAC2_VU, WM8994_AIF1DAC2_VU); 3946 - snd_soc_update_bits(codec, WM8994_AIF2_DAC_LEFT_VOLUME, 3947 - WM8994_AIF2DAC_VU, WM8994_AIF2DAC_VU); 3948 - snd_soc_update_bits(codec, WM8994_AIF2_DAC_RIGHT_VOLUME, 3949 - WM8994_AIF2DAC_VU, WM8994_AIF2DAC_VU); 3950 - snd_soc_update_bits(codec, WM8994_AIF1_ADC1_LEFT_VOLUME, 3951 - WM8994_AIF1ADC1_VU, WM8994_AIF1ADC1_VU); 3952 - snd_soc_update_bits(codec, WM8994_AIF1_ADC1_RIGHT_VOLUME, 3953 - WM8994_AIF1ADC1_VU, WM8994_AIF1ADC1_VU); 3954 - snd_soc_update_bits(codec, WM8994_AIF1_ADC2_LEFT_VOLUME, 3955 - WM8994_AIF1ADC2_VU, WM8994_AIF1ADC2_VU); 3956 - snd_soc_update_bits(codec, WM8994_AIF1_ADC2_RIGHT_VOLUME, 3957 - WM8994_AIF1ADC2_VU, WM8994_AIF1ADC2_VU); 3958 - snd_soc_update_bits(codec, WM8994_AIF2_ADC_LEFT_VOLUME, 3959 - WM8994_AIF2ADC_VU, WM8994_AIF1ADC2_VU); 3960 - snd_soc_update_bits(codec, WM8994_AIF2_ADC_RIGHT_VOLUME, 3961 - WM8994_AIF2ADC_VU, WM8994_AIF1ADC2_VU); 3962 - snd_soc_update_bits(codec, WM8994_DAC1_LEFT_VOLUME, 3963 - WM8994_DAC1_VU, WM8994_DAC1_VU); 3964 - snd_soc_update_bits(codec, WM8994_DAC1_RIGHT_VOLUME, 3965 - WM8994_DAC1_VU, WM8994_DAC1_VU); 3966 - snd_soc_update_bits(codec, WM8994_DAC2_LEFT_VOLUME, 3967 - WM8994_DAC2_VU, WM8994_DAC2_VU); 3968 - snd_soc_update_bits(codec, WM8994_DAC2_RIGHT_VOLUME, 3969 - WM8994_DAC2_VU, WM8994_DAC2_VU); 3882 + /* Latch volume update bits */ 3883 + for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++) 3884 + snd_soc_update_bits(codec, wm8994_vu_bits[i].reg, 3885 + wm8994_vu_bits[i].mask, 3886 + wm8994_vu_bits[i].mask); 3970 3887 3971 3888 /* Set the low bit of the 3D stereo depth so TLV matches */ 3972 3889 snd_soc_update_bits(codec, WM8994_AIF1_DAC1_FILTERS_2,
+8
sound/soc/fsl/imx-audmux.c
··· 26 26 #include <linux/of_device.h> 27 27 #include <linux/platform_device.h> 28 28 #include <linux/slab.h> 29 + #include <linux/pinctrl/consumer.h> 29 30 30 31 #include "imx-audmux.h" 31 32 ··· 250 249 static int __devinit imx_audmux_probe(struct platform_device *pdev) 251 250 { 252 251 struct resource *res; 252 + struct pinctrl *pinctrl; 253 253 const struct of_device_id *of_id = 254 254 of_match_device(imx_audmux_dt_ids, &pdev->dev); 255 255 ··· 258 256 audmux_base = devm_request_and_ioremap(&pdev->dev, res); 259 257 if (!audmux_base) 260 258 return -EADDRNOTAVAIL; 259 + 260 + pinctrl = devm_pinctrl_get_select_default(&pdev->dev); 261 + if (IS_ERR(pinctrl)) { 262 + dev_err(&pdev->dev, "setup pinctrl failed!"); 263 + return PTR_ERR(pinctrl); 264 + } 261 265 262 266 audmux_clk = clk_get(&pdev->dev, "audmux"); 263 267 if (IS_ERR(audmux_clk)) {
+2 -2
sound/soc/soc-dapm.c
··· 913 913 /* do we need to add this widget to the list ? */ 914 914 if (list) { 915 915 int err; 916 - err = dapm_list_add_widget(list, path->sink); 916 + err = dapm_list_add_widget(list, path->source); 917 917 if (err < 0) { 918 918 dev_err(widget->dapm->dev, "could not add widget %s\n", 919 919 widget->name); ··· 954 954 if (stream == SNDRV_PCM_STREAM_PLAYBACK) 955 955 paths = is_connected_output_ep(dai->playback_widget, list); 956 956 else 957 - paths = is_connected_input_ep(dai->playback_widget, list); 957 + paths = is_connected_input_ep(dai->capture_widget, list); 958 958 959 959 trace_snd_soc_dapm_connected(paths, stream); 960 960 dapm_clear_walk(&card->dapm);
+6
sound/soc/soc-pcm.c
··· 794 794 for (i = 0; i < card->num_links; i++) { 795 795 be = &card->rtd[i]; 796 796 797 + if (!be->dai_link->no_pcm) 798 + continue; 799 + 797 800 if (be->cpu_dai->playback_widget == widget || 798 801 be->codec_dai->playback_widget == widget) 799 802 return be; ··· 805 802 806 803 for (i = 0; i < card->num_links; i++) { 807 804 be = &card->rtd[i]; 805 + 806 + if (!be->dai_link->no_pcm) 807 + continue; 808 808 809 809 if (be->cpu_dai->capture_widget == widget || 810 810 be->codec_dai->capture_widget == widget)
+1
sound/soc/tegra/tegra30_ahub.c
··· 629 629 MODULE_DESCRIPTION("Tegra30 AHUB driver"); 630 630 MODULE_LICENSE("GPL v2"); 631 631 MODULE_ALIAS("platform:" DRV_NAME); 632 + MODULE_DEVICE_TABLE(of, tegra30_ahub_of_match);
+1
sound/usb/card.h
··· 119 119 unsigned long unlink_mask; /* bitmask of unlinked urbs */ 120 120 121 121 /* data and sync endpoints for this stream */ 122 + unsigned int ep_num; /* the endpoint number */ 122 123 struct snd_usb_endpoint *data_endpoint; 123 124 struct snd_usb_endpoint *sync_endpoint; 124 125 unsigned long flags;
+3 -4
sound/usb/stream.c
··· 97 97 subs->formats |= fp->formats; 98 98 subs->num_formats++; 99 99 subs->fmt_type = fp->fmt_type; 100 + subs->ep_num = fp->endpoint; 100 101 } 101 102 102 103 /* ··· 120 119 if (as->fmt_type != fp->fmt_type) 121 120 continue; 122 121 subs = &as->substream[stream]; 123 - if (!subs->data_endpoint) 124 - continue; 125 - if (subs->data_endpoint->ep_num == fp->endpoint) { 122 + if (subs->ep_num == fp->endpoint) { 126 123 list_add_tail(&fp->list, &subs->fmt_list); 127 124 subs->num_formats++; 128 125 subs->formats |= fp->formats; ··· 133 134 if (as->fmt_type != fp->fmt_type) 134 135 continue; 135 136 subs = &as->substream[stream]; 136 - if (subs->data_endpoint) 137 + if (subs->ep_num) 137 138 continue; 138 139 err = snd_pcm_new_stream(as->pcm, stream, 1); 139 140 if (err < 0)
+2
tools/perf/MANIFEST
··· 1 1 tools/perf 2 + tools/scripts 3 + tools/lib/traceevent 2 4 include/linux/const.h 3 5 include/linux/perf_event.h 4 6 include/linux/rbtree.h
+2 -2
tools/perf/builtin-report.c
··· 152 152 153 153 if (symbol_conf.use_callchain) { 154 154 err = callchain_append(he->callchain, 155 - &evsel->hists.callchain_cursor, 155 + &callchain_cursor, 156 156 sample->period); 157 157 if (err) 158 158 return err; ··· 162 162 * so we don't allocated the extra space needed because the stdio 163 163 * code will not use it. 164 164 */ 165 - if (al->sym != NULL && use_browser > 0) { 165 + if (he->ms.sym != NULL && use_browser > 0) { 166 166 struct annotation *notes = symbol__annotation(he->ms.sym); 167 167 168 168 assert(evsel != NULL);
+4 -4
tools/perf/builtin-stat.c
··· 1129 1129 return 0; 1130 1130 1131 1131 if (!evsel_list->nr_entries) { 1132 - if (perf_evlist__add_attrs_array(evsel_list, default_attrs) < 0) 1132 + if (perf_evlist__add_default_attrs(evsel_list, default_attrs) < 0) 1133 1133 return -1; 1134 1134 } 1135 1135 ··· 1139 1139 return 0; 1140 1140 1141 1141 /* Append detailed run extra attributes: */ 1142 - if (perf_evlist__add_attrs_array(evsel_list, detailed_attrs) < 0) 1142 + if (perf_evlist__add_default_attrs(evsel_list, detailed_attrs) < 0) 1143 1143 return -1; 1144 1144 1145 1145 if (detailed_run < 2) 1146 1146 return 0; 1147 1147 1148 1148 /* Append very detailed run extra attributes: */ 1149 - if (perf_evlist__add_attrs_array(evsel_list, very_detailed_attrs) < 0) 1149 + if (perf_evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0) 1150 1150 return -1; 1151 1151 1152 1152 if (detailed_run < 3) 1153 1153 return 0; 1154 1154 1155 1155 /* Append very, very detailed run extra attributes: */ 1156 - return perf_evlist__add_attrs_array(evsel_list, very_very_detailed_attrs); 1156 + return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs); 1157 1157 } 1158 1158 1159 1159 int cmd_stat(int argc, const char **argv, const char *prefix __used)
+1 -1
tools/perf/builtin-top.c
··· 787 787 } 788 788 789 789 if (symbol_conf.use_callchain) { 790 - err = callchain_append(he->callchain, &evsel->hists.callchain_cursor, 790 + err = callchain_append(he->callchain, &callchain_cursor, 791 791 sample->period); 792 792 if (err) 793 793 return;
+4 -3
tools/perf/design.txt
··· 409 409 prctl. When a counter is disabled, it doesn't count or generate 410 410 events but does continue to exist and maintain its count value. 411 411 412 - An individual counter or counter group can be enabled with 412 + An individual counter can be enabled with 413 413 414 - ioctl(fd, PERF_EVENT_IOC_ENABLE); 414 + ioctl(fd, PERF_EVENT_IOC_ENABLE, 0); 415 415 416 416 or disabled with 417 417 418 - ioctl(fd, PERF_EVENT_IOC_DISABLE); 418 + ioctl(fd, PERF_EVENT_IOC_DISABLE, 0); 419 419 420 + For a counter group, pass PERF_IOC_FLAG_GROUP as the third argument. 420 421 Enabling or disabling the leader of a group enables or disables the 421 422 whole group; that is, while the group leader is disabled, none of the 422 423 counters in the group will count. Enabling or disabling a member of a
+1 -1
tools/perf/ui/browsers/annotate.c
··· 668 668 "q/ESC/CTRL+C Exit\n\n" 669 669 "-> Go to target\n" 670 670 "<- Exit\n" 671 - "h Cycle thru hottest instructions\n" 671 + "H Cycle thru hottest instructions\n" 672 672 "j Toggle showing jump to target arrows\n" 673 673 "J Toggle showing number of jump sources on targets\n" 674 674 "n Search next string\n"
+1 -1
tools/perf/util/PERF-VERSION-GEN
··· 12 12 # First check if there is a .git to get the version from git describe 13 13 # otherwise try to get the version from the kernel makefile 14 14 if test -d ../../.git -o -f ../../.git && 15 - VN=$(git describe --abbrev=4 HEAD 2>/dev/null) && 15 + VN=$(git describe --match 'v[0-9].[0-9]*' --abbrev=4 HEAD 2>/dev/null) && 16 16 case "$VN" in 17 17 *$LF*) (exit 1) ;; 18 18 v[0-9]*)
+2
tools/perf/util/callchain.c
··· 18 18 #include "util.h" 19 19 #include "callchain.h" 20 20 21 + __thread struct callchain_cursor callchain_cursor; 22 + 21 23 bool ip_callchain__valid(struct ip_callchain *chain, 22 24 const union perf_event *event) 23 25 {
+2
tools/perf/util/callchain.h
··· 76 76 struct callchain_cursor_node *curr; 77 77 }; 78 78 79 + extern __thread struct callchain_cursor callchain_cursor; 80 + 79 81 static inline void callchain_init(struct callchain_root *root) 80 82 { 81 83 INIT_LIST_HEAD(&root->node.siblings);
+15 -2
tools/perf/util/evlist.c
··· 159 159 return -1; 160 160 } 161 161 162 + int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, 163 + struct perf_event_attr *attrs, size_t nr_attrs) 164 + { 165 + size_t i; 166 + 167 + for (i = 0; i < nr_attrs; i++) 168 + event_attr_init(attrs + i); 169 + 170 + return perf_evlist__add_attrs(evlist, attrs, nr_attrs); 171 + } 172 + 162 173 static int trace_event__id(const char *evname) 163 174 { 164 175 char *filename, *colon; ··· 274 263 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 275 264 list_for_each_entry(pos, &evlist->entries, node) { 276 265 for (thread = 0; thread < evlist->threads->nr; thread++) 277 - ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_DISABLE); 266 + ioctl(FD(pos, cpu, thread), 267 + PERF_EVENT_IOC_DISABLE, 0); 278 268 } 279 269 } 280 270 } ··· 288 276 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 289 277 list_for_each_entry(pos, &evlist->entries, node) { 290 278 for (thread = 0; thread < evlist->threads->nr; thread++) 291 - ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_ENABLE); 279 + ioctl(FD(pos, cpu, thread), 280 + PERF_EVENT_IOC_ENABLE, 0); 292 281 } 293 282 } 294 283 }
+4
tools/perf/util/evlist.h
··· 54 54 int perf_evlist__add_default(struct perf_evlist *evlist); 55 55 int perf_evlist__add_attrs(struct perf_evlist *evlist, 56 56 struct perf_event_attr *attrs, size_t nr_attrs); 57 + int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, 58 + struct perf_event_attr *attrs, size_t nr_attrs); 57 59 int perf_evlist__add_tracepoints(struct perf_evlist *evlist, 58 60 const char *tracepoints[], size_t nr_tracepoints); 59 61 int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist, ··· 64 62 65 63 #define perf_evlist__add_attrs_array(evlist, array) \ 66 64 perf_evlist__add_attrs(evlist, array, ARRAY_SIZE(array)) 65 + #define perf_evlist__add_default_attrs(evlist, array) \ 66 + __perf_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array)) 67 67 68 68 #define perf_evlist__add_tracepoints_array(evlist, array) \ 69 69 perf_evlist__add_tracepoints(evlist, array, ARRAY_SIZE(array))
+22 -7
tools/perf/util/evsel.c
··· 494 494 } 495 495 496 496 static int perf_event__parse_id_sample(const union perf_event *event, u64 type, 497 - struct perf_sample *sample) 497 + struct perf_sample *sample, 498 + bool swapped) 498 499 { 499 500 const u64 *array = event->sample.array; 501 + union u64_swap u; 500 502 501 503 array += ((event->header.size - 502 504 sizeof(event->header)) / sizeof(u64)) - 1; 503 505 504 506 if (type & PERF_SAMPLE_CPU) { 505 - u32 *p = (u32 *)array; 506 - sample->cpu = *p; 507 + u.val64 = *array; 508 + if (swapped) { 509 + /* undo swap of u64, then swap on individual u32s */ 510 + u.val64 = bswap_64(u.val64); 511 + u.val32[0] = bswap_32(u.val32[0]); 512 + } 513 + 514 + sample->cpu = u.val32[0]; 507 515 array--; 508 516 } 509 517 ··· 531 523 } 532 524 533 525 if (type & PERF_SAMPLE_TID) { 534 - u32 *p = (u32 *)array; 535 - sample->pid = p[0]; 536 - sample->tid = p[1]; 526 + u.val64 = *array; 527 + if (swapped) { 528 + /* undo swap of u64, then swap on individual u32s */ 529 + u.val64 = bswap_64(u.val64); 530 + u.val32[0] = bswap_32(u.val32[0]); 531 + u.val32[1] = bswap_32(u.val32[1]); 532 + } 533 + 534 + sample->pid = u.val32[0]; 535 + sample->tid = u.val32[1]; 537 536 } 538 537 539 538 return 0; ··· 577 562 if (event->header.type != PERF_RECORD_SAMPLE) { 578 563 if (!sample_id_all) 579 564 return 0; 580 - return perf_event__parse_id_sample(event, type, data); 565 + return perf_event__parse_id_sample(event, type, data, swapped); 581 566 } 582 567 583 568 array = event->sample.array;
+4 -3
tools/perf/util/hist.c
··· 378 378 * collapse the histogram 379 379 */ 380 380 381 - static bool hists__collapse_insert_entry(struct hists *hists, 381 + static bool hists__collapse_insert_entry(struct hists *hists __used, 382 382 struct rb_root *root, 383 383 struct hist_entry *he) 384 384 { ··· 397 397 iter->period += he->period; 398 398 iter->nr_events += he->nr_events; 399 399 if (symbol_conf.use_callchain) { 400 - callchain_cursor_reset(&hists->callchain_cursor); 401 - callchain_merge(&hists->callchain_cursor, iter->callchain, 400 + callchain_cursor_reset(&callchain_cursor); 401 + callchain_merge(&callchain_cursor, 402 + iter->callchain, 402 403 he->callchain); 403 404 } 404 405 hist_entry__free(he);
-2
tools/perf/util/hist.h
··· 67 67 struct events_stats stats; 68 68 u64 event_stream; 69 69 u16 col_len[HISTC_NR_COLS]; 70 - /* Best would be to reuse the session callchain cursor */ 71 - struct callchain_cursor callchain_cursor; 72 70 }; 73 71 74 72 struct hist_entry *__hists__add_entry(struct hists *self,
+4
tools/perf/util/pager.c
··· 57 57 } 58 58 if (!pager) 59 59 pager = getenv("PAGER"); 60 + if (!pager) { 61 + if (!access("/usr/bin/pager", X_OK)) 62 + pager = "/usr/bin/pager"; 63 + } 60 64 if (!pager) 61 65 pager = "less"; 62 66 else if (!*pager || !strcmp(pager, "cat"))
+2 -6
tools/perf/util/probe-event.c
··· 2164 2164 2165 2165 error: 2166 2166 if (kfd >= 0) { 2167 - if (namelist) 2168 - strlist__delete(namelist); 2169 - 2167 + strlist__delete(namelist); 2170 2168 close(kfd); 2171 2169 } 2172 2170 2173 2171 if (ufd >= 0) { 2174 - if (unamelist) 2175 - strlist__delete(unamelist); 2176 - 2172 + strlist__delete(unamelist); 2177 2173 close(ufd); 2178 2174 } 2179 2175
+76 -21
tools/perf/util/session.c
··· 288 288 return bi; 289 289 } 290 290 291 - int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel, 291 + int machine__resolve_callchain(struct machine *self, 292 + struct perf_evsel *evsel __used, 292 293 struct thread *thread, 293 294 struct ip_callchain *chain, 294 295 struct symbol **parent) ··· 298 297 unsigned int i; 299 298 int err; 300 299 301 - callchain_cursor_reset(&evsel->hists.callchain_cursor); 300 + callchain_cursor_reset(&callchain_cursor); 301 + 302 + if (chain->nr > PERF_MAX_STACK_DEPTH) { 303 + pr_warning("corrupted callchain. skipping...\n"); 304 + return 0; 305 + } 302 306 303 307 for (i = 0; i < chain->nr; i++) { 304 308 u64 ip; ··· 323 317 case PERF_CONTEXT_USER: 324 318 cpumode = PERF_RECORD_MISC_USER; break; 325 319 default: 326 - break; 320 + pr_debug("invalid callchain context: " 321 + "%"PRId64"\n", (s64) ip); 322 + /* 323 + * It seems the callchain is corrupted. 324 + * Discard all. 325 + */ 326 + callchain_cursor_reset(&callchain_cursor); 327 + return 0; 327 328 } 328 329 continue; 329 330 } ··· 346 333 break; 347 334 } 348 335 349 - err = callchain_cursor_append(&evsel->hists.callchain_cursor, 336 + err = callchain_cursor_append(&callchain_cursor, 350 337 ip, al.map, al.sym); 351 338 if (err) 352 339 return err; ··· 454 441 } 455 442 } 456 443 457 - static void perf_event__all64_swap(union perf_event *event) 444 + static void swap_sample_id_all(union perf_event *event, void *data) 445 + { 446 + void *end = (void *) event + event->header.size; 447 + int size = end - data; 448 + 449 + BUG_ON(size % sizeof(u64)); 450 + mem_bswap_64(data, size); 451 + } 452 + 453 + static void perf_event__all64_swap(union perf_event *event, 454 + bool sample_id_all __used) 458 455 { 459 456 struct perf_event_header *hdr = &event->header; 460 457 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); 461 458 } 462 459 463 - static void perf_event__comm_swap(union perf_event *event) 460 + static void perf_event__comm_swap(union perf_event *event, bool sample_id_all) 464 461 { 465 462 event->comm.pid = bswap_32(event->comm.pid); 466 463 event->comm.tid = bswap_32(event->comm.tid); 464 + 465 + if (sample_id_all) { 466 + void *data = &event->comm.comm; 467 + 468 + data += ALIGN(strlen(data) + 1, sizeof(u64)); 469 + swap_sample_id_all(event, data); 470 + } 467 471 } 468 472 469 - static void perf_event__mmap_swap(union perf_event *event) 473 + static void perf_event__mmap_swap(union perf_event *event, 474 + bool sample_id_all) 470 475 { 471 476 event->mmap.pid = bswap_32(event->mmap.pid); 472 477 event->mmap.tid = bswap_32(event->mmap.tid); 473 478 event->mmap.start = bswap_64(event->mmap.start); 474 479 event->mmap.len = bswap_64(event->mmap.len); 475 480 event->mmap.pgoff = bswap_64(event->mmap.pgoff); 481 + 482 + if (sample_id_all) { 483 + void *data = &event->mmap.filename; 484 + 485 + data += ALIGN(strlen(data) + 1, sizeof(u64)); 486 + swap_sample_id_all(event, data); 487 + } 476 488 } 477 489 478 - static void perf_event__task_swap(union perf_event *event) 490 + static void perf_event__task_swap(union perf_event *event, bool sample_id_all) 479 491 { 480 492 event->fork.pid = bswap_32(event->fork.pid); 481 493 event->fork.tid = bswap_32(event->fork.tid); 482 494 event->fork.ppid = bswap_32(event->fork.ppid); 483 495 event->fork.ptid = bswap_32(event->fork.ptid); 484 496 event->fork.time = bswap_64(event->fork.time); 497 + 498 + if (sample_id_all) 499 + swap_sample_id_all(event, &event->fork + 1); 485 500 } 486 501 487 - static void perf_event__read_swap(union perf_event *event) 502 + static void perf_event__read_swap(union perf_event *event, bool sample_id_all) 488 503 { 489 504 event->read.pid = bswap_32(event->read.pid); 490 505 event->read.tid = bswap_32(event->read.tid); ··· 520 479 event->read.time_enabled = bswap_64(event->read.time_enabled); 521 480 event->read.time_running = bswap_64(event->read.time_running); 522 481 event->read.id = bswap_64(event->read.id); 482 + 483 + if (sample_id_all) 484 + swap_sample_id_all(event, &event->read + 1); 523 485 } 524 486 525 487 static u8 revbyte(u8 b) ··· 574 530 swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64)); 575 531 } 576 532 577 - static void perf_event__hdr_attr_swap(union perf_event *event) 533 + static void perf_event__hdr_attr_swap(union perf_event *event, 534 + bool sample_id_all __used) 578 535 { 579 536 size_t size; 580 537 ··· 586 541 mem_bswap_64(event->attr.id, size); 587 542 } 588 543 589 - static void perf_event__event_type_swap(union perf_event *event) 544 + static void perf_event__event_type_swap(union perf_event *event, 545 + bool sample_id_all __used) 590 546 { 591 547 event->event_type.event_type.event_id = 592 548 bswap_64(event->event_type.event_type.event_id); 593 549 } 594 550 595 - static void perf_event__tracing_data_swap(union perf_event *event) 551 + static void perf_event__tracing_data_swap(union perf_event *event, 552 + bool sample_id_all __used) 596 553 { 597 554 event->tracing_data.size = bswap_32(event->tracing_data.size); 598 555 } 599 556 600 - typedef void (*perf_event__swap_op)(union perf_event *event); 557 + typedef void (*perf_event__swap_op)(union perf_event *event, 558 + bool sample_id_all); 601 559 602 560 static perf_event__swap_op perf_event__swap_ops[] = { 603 561 [PERF_RECORD_MMAP] = perf_event__mmap_swap, ··· 1034 986 } 1035 987 } 1036 988 989 + static void event_swap(union perf_event *event, bool sample_id_all) 990 + { 991 + perf_event__swap_op swap; 992 + 993 + swap = perf_event__swap_ops[event->header.type]; 994 + if (swap) 995 + swap(event, sample_id_all); 996 + } 997 + 1037 998 static int perf_session__process_event(struct perf_session *session, 1038 999 union perf_event *event, 1039 1000 struct perf_tool *tool, ··· 1051 994 struct perf_sample sample; 1052 995 int ret; 1053 996 1054 - if (session->header.needs_swap && 1055 - perf_event__swap_ops[event->header.type]) 1056 - perf_event__swap_ops[event->header.type](event); 997 + if (session->header.needs_swap) 998 + event_swap(event, session->sample_id_all); 1057 999 1058 1000 if (event->header.type >= PERF_RECORD_HEADER_MAX) 1059 1001 return -EINVAL; ··· 1484 1428 int print_sym, int print_dso, int print_symoffset) 1485 1429 { 1486 1430 struct addr_location al; 1487 - struct callchain_cursor *cursor = &evsel->hists.callchain_cursor; 1488 1431 struct callchain_cursor_node *node; 1489 1432 1490 1433 if (perf_event__preprocess_sample(event, machine, &al, sample, ··· 1501 1446 error("Failed to resolve callchain. Skipping\n"); 1502 1447 return; 1503 1448 } 1504 - callchain_cursor_commit(cursor); 1449 + callchain_cursor_commit(&callchain_cursor); 1505 1450 1506 1451 while (1) { 1507 - node = callchain_cursor_current(cursor); 1452 + node = callchain_cursor_current(&callchain_cursor); 1508 1453 if (!node) 1509 1454 break; 1510 1455 ··· 1515 1460 } 1516 1461 if (print_dso) { 1517 1462 printf(" ("); 1518 - map__fprintf_dsoname(al.map, stdout); 1463 + map__fprintf_dsoname(node->map, stdout); 1519 1464 printf(")"); 1520 1465 } 1521 1466 printf("\n"); 1522 1467 1523 - callchain_cursor_advance(cursor); 1468 + callchain_cursor_advance(&callchain_cursor); 1524 1469 } 1525 1470 1526 1471 } else {
+36 -2
tools/perf/util/symbol.c
··· 323 323 dso->sorted_by_name = 0; 324 324 dso->has_build_id = 0; 325 325 dso->kernel = DSO_TYPE_USER; 326 + dso->needs_swap = DSO_SWAP__UNSET; 326 327 INIT_LIST_HEAD(&dso->node); 327 328 } 328 329 ··· 1157 1156 return -1; 1158 1157 } 1159 1158 1159 + static int dso__swap_init(struct dso *dso, unsigned char eidata) 1160 + { 1161 + static unsigned int const endian = 1; 1162 + 1163 + dso->needs_swap = DSO_SWAP__NO; 1164 + 1165 + switch (eidata) { 1166 + case ELFDATA2LSB: 1167 + /* We are big endian, DSO is little endian. */ 1168 + if (*(unsigned char const *)&endian != 1) 1169 + dso->needs_swap = DSO_SWAP__YES; 1170 + break; 1171 + 1172 + case ELFDATA2MSB: 1173 + /* We are little endian, DSO is big endian. */ 1174 + if (*(unsigned char const *)&endian != 0) 1175 + dso->needs_swap = DSO_SWAP__YES; 1176 + break; 1177 + 1178 + default: 1179 + pr_err("unrecognized DSO data encoding %d\n", eidata); 1180 + return -EINVAL; 1181 + } 1182 + 1183 + return 0; 1184 + } 1185 + 1160 1186 static int dso__load_sym(struct dso *dso, struct map *map, const char *name, 1161 1187 int fd, symbol_filter_t filter, int kmodule, 1162 1188 int want_symtab) ··· 1214 1186 pr_debug("%s: cannot get elf header.\n", __func__); 1215 1187 goto out_elf_end; 1216 1188 } 1189 + 1190 + if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) 1191 + goto out_elf_end; 1217 1192 1218 1193 /* Always reject images with a mismatched build-id: */ 1219 1194 if (dso->has_build_id) { ··· 1303 1272 if (opdsec && sym.st_shndx == opdidx) { 1304 1273 u32 offset = sym.st_value - opdshdr.sh_addr; 1305 1274 u64 *opd = opddata->d_buf + offset; 1306 - sym.st_value = *opd; 1275 + sym.st_value = DSO__SWAP(dso, u64, *opd); 1307 1276 sym.st_shndx = elf_addr_to_index(elf, sym.st_value); 1308 1277 } 1309 1278 ··· 2817 2786 2818 2787 struct map *dso__new_map(const char *name) 2819 2788 { 2789 + struct map *map = NULL; 2820 2790 struct dso *dso = dso__new(name); 2821 - struct map *map = map__new2(0, dso, MAP__FUNCTION); 2791 + 2792 + if (dso) 2793 + map = map__new2(0, dso, MAP__FUNCTION); 2822 2794 2823 2795 return map; 2824 2796 }
+30
tools/perf/util/symbol.h
··· 9 9 #include <linux/list.h> 10 10 #include <linux/rbtree.h> 11 11 #include <stdio.h> 12 + #include <byteswap.h> 12 13 13 14 #ifdef HAVE_CPLUS_DEMANGLE 14 15 extern char *cplus_demangle(const char *, int); ··· 161 160 DSO_TYPE_GUEST_KERNEL 162 161 }; 163 162 163 + enum dso_swap_type { 164 + DSO_SWAP__UNSET, 165 + DSO_SWAP__NO, 166 + DSO_SWAP__YES, 167 + }; 168 + 164 169 struct dso { 165 170 struct list_head node; 166 171 struct rb_root symbols[MAP__NR_TYPES]; 167 172 struct rb_root symbol_names[MAP__NR_TYPES]; 168 173 enum dso_kernel_type kernel; 174 + enum dso_swap_type needs_swap; 169 175 u8 adjust_symbols:1; 170 176 u8 has_build_id:1; 171 177 u8 hit:1; ··· 189 181 u16 short_name_len; 190 182 char name[0]; 191 183 }; 184 + 185 + #define DSO__SWAP(dso, type, val) \ 186 + ({ \ 187 + type ____r = val; \ 188 + BUG_ON(dso->needs_swap == DSO_SWAP__UNSET); \ 189 + if (dso->needs_swap == DSO_SWAP__YES) { \ 190 + switch (sizeof(____r)) { \ 191 + case 2: \ 192 + ____r = bswap_16(val); \ 193 + break; \ 194 + case 4: \ 195 + ____r = bswap_32(val); \ 196 + break; \ 197 + case 8: \ 198 + ____r = bswap_64(val); \ 199 + break; \ 200 + default: \ 201 + BUG_ON(1); \ 202 + } \ 203 + } \ 204 + ____r; \ 205 + }) 192 206 193 207 struct dso *dso__new(const char *name); 194 208 void dso__delete(struct dso *dso);
+28 -2
tools/power/x86/turbostat/turbostat.c
··· 73 73 char *progname; 74 74 75 75 int num_cpus; 76 - cpu_set_t *cpu_mask; 77 - size_t cpu_mask_size; 76 + cpu_set_t *cpu_present_set, *cpu_mask; 77 + size_t cpu_present_setsize, cpu_mask_size; 78 78 79 79 struct counters { 80 80 unsigned long long tsc; /* per thread */ ··· 103 103 struct timeval tv_odd; 104 104 struct timeval tv_delta; 105 105 106 + int mark_cpu_present(int pkg, int core, int cpu) 107 + { 108 + CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set); 109 + return 0; 110 + } 111 + 106 112 /* 107 113 * cpu_mask_init(ncpus) 108 114 * ··· 124 118 } 125 119 cpu_mask_size = CPU_ALLOC_SIZE(ncpus); 126 120 CPU_ZERO_S(cpu_mask_size, cpu_mask); 121 + 122 + /* 123 + * Allocate and initialize cpu_present_set 124 + */ 125 + cpu_present_set = CPU_ALLOC(ncpus); 126 + if (cpu_present_set == NULL) { 127 + perror("CPU_ALLOC"); 128 + exit(3); 129 + } 130 + cpu_present_setsize = CPU_ALLOC_SIZE(ncpus); 131 + CPU_ZERO_S(cpu_present_setsize, cpu_present_set); 132 + for_all_cpus(mark_cpu_present); 127 133 } 128 134 129 135 void cpu_mask_uninit() ··· 143 125 CPU_FREE(cpu_mask); 144 126 cpu_mask = NULL; 145 127 cpu_mask_size = 0; 128 + CPU_FREE(cpu_present_set); 129 + cpu_present_set = NULL; 130 + cpu_present_setsize = 0; 146 131 } 147 132 148 133 int cpu_migrate(int cpu) ··· 933 912 switch (model) { 934 913 case 0x2A: 935 914 case 0x2D: 915 + case 0x3A: /* IVB */ 916 + case 0x3D: /* IVB Xeon */ 936 917 return 1; 937 918 } 938 919 return 0; ··· 1070 1047 int retval; 1071 1048 pid_t child_pid; 1072 1049 get_counters(cnt_even); 1050 + 1051 + /* clear affinity side-effect of get_counters() */ 1052 + sched_setaffinity(0, cpu_present_setsize, cpu_present_set); 1073 1053 gettimeofday(&tv_even, (struct timezone *)NULL); 1074 1054 1075 1055 child_pid = fork();
+1
virt/kvm/irq_comm.c
··· 332 332 */ 333 333 hlist_for_each_entry(ei, n, &rt->map[ue->gsi], link) 334 334 if (ei->type == KVM_IRQ_ROUTING_MSI || 335 + ue->type == KVM_IRQ_ROUTING_MSI || 335 336 ue->u.irqchip.irqchip == ei->irqchip.irqchip) 336 337 return r; 337 338