Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge remote-tracking branch 'origin/master' into next

(Merge in order to get the PCIe mps/mrss code fixes)

+3500 -2346
+13
Documentation/ABI/testing/sysfs-class-scsi_host
··· 1 + What: /sys/class/scsi_host/hostX/isci_id 2 + Date: June 2011 3 + Contact: Dave Jiang <dave.jiang@intel.com> 4 + Description: 5 + This file contains the enumerated host ID for the Intel 6 + SCU controller. The Intel(R) C600 Series Chipset SATA/SAS 7 + Storage Control Unit embeds up to two 4-port controllers in 8 + a single PCI device. The controllers are enumerated in order 9 + which usually means the lowest number scsi_host corresponds 10 + with the first controller, but this association is not 11 + guaranteed. The 'isci_id' attribute unambiguously identifies 12 + the controller index: '0' for the first controller, 13 + '1' for the second.
+19 -19
Documentation/DocBook/media/v4l/controls.xml
··· 1455 1455 </row> 1456 1456 1457 1457 <row><entry></entry></row> 1458 - <row> 1458 + <row id="v4l2-mpeg-video-h264-vui-sar-idc"> 1459 1459 <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC</constant>&nbsp;</entry> 1460 1460 <entry>enum&nbsp;v4l2_mpeg_video_h264_vui_sar_idc</entry> 1461 1461 </row> ··· 1561 1561 </row> 1562 1562 1563 1563 <row><entry></entry></row> 1564 - <row> 1564 + <row id="v4l2-mpeg-video-h264-level"> 1565 1565 <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_LEVEL</constant>&nbsp;</entry> 1566 1566 <entry>enum&nbsp;v4l2_mpeg_video_h264_level</entry> 1567 1567 </row> ··· 1641 1641 </row> 1642 1642 1643 1643 <row><entry></entry></row> 1644 - <row> 1644 + <row id="v4l2-mpeg-video-mpeg4-level"> 1645 1645 <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL</constant>&nbsp;</entry> 1646 1646 <entry>enum&nbsp;v4l2_mpeg_video_mpeg4_level</entry> 1647 1647 </row> ··· 1689 1689 </row> 1690 1690 1691 1691 <row><entry></entry></row> 1692 - <row> 1692 + <row id="v4l2-mpeg-video-h264-profile"> 1693 1693 <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_PROFILE</constant>&nbsp;</entry> 1694 - <entry>enum&nbsp;v4l2_mpeg_h264_profile</entry> 1694 + <entry>enum&nbsp;v4l2_mpeg_video_h264_profile</entry> 1695 1695 </row> 1696 1696 <row><entry spanname="descr">The profile information for H264. 1697 1697 Applicable to the H264 encoder. ··· 1774 1774 </row> 1775 1775 1776 1776 <row><entry></entry></row> 1777 - <row> 1777 + <row id="v4l2-mpeg-video-mpeg4-profile"> 1778 1778 <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE</constant>&nbsp;</entry> 1779 - <entry>enum&nbsp;v4l2_mpeg_mpeg4_profile</entry> 1779 + <entry>enum&nbsp;v4l2_mpeg_video_mpeg4_profile</entry> 1780 1780 </row> 1781 1781 <row><entry spanname="descr">The profile information for MPEG4. 1782 1782 Applicable to the MPEG4 encoder. ··· 1820 1820 </row> 1821 1821 1822 1822 <row><entry></entry></row> 1823 - <row> 1823 + <row id="v4l2-mpeg-video-multi-slice-mode"> 1824 1824 <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE</constant>&nbsp;</entry> 1825 - <entry>enum&nbsp;v4l2_mpeg_multi_slice_mode</entry> 1825 + <entry>enum&nbsp;v4l2_mpeg_video_multi_slice_mode</entry> 1826 1826 </row> 1827 1827 <row><entry spanname="descr">Determines how the encoder should handle division of frame into slices. 1828 1828 Applicable to the encoder. ··· 1868 1868 </row> 1869 1869 1870 1870 <row><entry></entry></row> 1871 - <row> 1871 + <row id="v4l2-mpeg-video-h264-loop-filter-mode"> 1872 1872 <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE</constant>&nbsp;</entry> 1873 - <entry>enum&nbsp;v4l2_mpeg_h264_loop_filter_mode</entry> 1873 + <entry>enum&nbsp;v4l2_mpeg_video_h264_loop_filter_mode</entry> 1874 1874 </row> 1875 1875 <row><entry spanname="descr">Loop filter mode for H264 encoder. 1876 1876 Possible values are:</entry> ··· 1913 1913 </row> 1914 1914 1915 1915 <row><entry></entry></row> 1916 - <row> 1916 + <row id="v4l2-mpeg-video-h264-entropy-mode"> 1917 1917 <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE</constant>&nbsp;</entry> 1918 - <entry>enum&nbsp;v4l2_mpeg_h264_symbol_mode</entry> 1918 + <entry>enum&nbsp;v4l2_mpeg_video_h264_entropy_mode</entry> 1919 1919 </row> 1920 1920 <row><entry spanname="descr">Entropy coding mode for H264 - CABAC/CAVALC. 1921 1921 Applicable to the H264 encoder. ··· 2140 2140 </row> 2141 2141 2142 2142 <row><entry></entry></row> 2143 - <row> 2143 + <row id="v4l2-mpeg-video-header-mode"> 2144 2144 <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_HEADER_MODE</constant>&nbsp;</entry> 2145 - <entry>enum&nbsp;v4l2_mpeg_header_mode</entry> 2145 + <entry>enum&nbsp;v4l2_mpeg_video_header_mode</entry> 2146 2146 </row> 2147 2147 <row><entry spanname="descr">Determines whether the header is returned as the first buffer or is 2148 2148 it returned together with the first frame. Applicable to encoders. ··· 2320 2320 Applicable to the H264 encoder.</entry> 2321 2321 </row> 2322 2322 <row><entry></entry></row> 2323 - <row> 2323 + <row id="v4l2-mpeg-mfc51-video-frame-skip-mode"> 2324 2324 <entry spanname="id"><constant>V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE</constant>&nbsp;</entry> 2325 - <entry>enum&nbsp;v4l2_mpeg_mfc51_frame_skip_mode</entry> 2325 + <entry>enum&nbsp;v4l2_mpeg_mfc51_video_frame_skip_mode</entry> 2326 2326 </row> 2327 2327 <row><entry spanname="descr"> 2328 2328 Indicates in what conditions the encoder should skip frames. If encoding a frame would cause the encoded stream to be larger then ··· 2361 2361 </entry> 2362 2362 </row> 2363 2363 <row><entry></entry></row> 2364 - <row> 2364 + <row id="v4l2-mpeg-mfc51-video-force-frame-type"> 2365 2365 <entry spanname="id"><constant>V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE</constant>&nbsp;</entry> 2366 - <entry>enum&nbsp;v4l2_mpeg_mfc51_force_frame_type</entry> 2366 + <entry>enum&nbsp;v4l2_mpeg_mfc51_video_force_frame_type</entry> 2367 2367 </row> 2368 2368 <row><entry spanname="descr">Force a frame type for the next queued buffer. Applicable to encoders. 2369 2369 Possible values are:</entry>
+1 -84
Documentation/cgroups/memory.txt
··· 380 380 381 381 5.2 stat file 382 382 383 - 5.2.1 memory.stat file includes following statistics 383 + memory.stat file includes following statistics 384 384 385 385 # per-memory cgroup local status 386 386 cache - # of bytes of page cache memory. ··· 437 437 (Note: file and shmem may be shared among other cgroups. In that case, 438 438 file_mapped is accounted only when the memory cgroup is owner of page 439 439 cache.) 440 - 441 - 5.2.2 memory.vmscan_stat 442 - 443 - memory.vmscan_stat includes statistics information for memory scanning and 444 - freeing, reclaiming. The statistics shows memory scanning information since 445 - memory cgroup creation and can be reset to 0 by writing 0 as 446 - 447 - #echo 0 > ../memory.vmscan_stat 448 - 449 - This file contains following statistics. 450 - 451 - [param]_[file_or_anon]_pages_by_[reason]_[under_heararchy] 452 - [param]_elapsed_ns_by_[reason]_[under_hierarchy] 453 - 454 - For example, 455 - 456 - scanned_file_pages_by_limit indicates the number of scanned 457 - file pages at vmscan. 458 - 459 - Now, 3 parameters are supported 460 - 461 - scanned - the number of pages scanned by vmscan 462 - rotated - the number of pages activated at vmscan 463 - freed - the number of pages freed by vmscan 464 - 465 - If "rotated" is high against scanned/freed, the memcg seems busy. 466 - 467 - Now, 2 reason are supported 468 - 469 - limit - the memory cgroup's limit 470 - system - global memory pressure + softlimit 471 - (global memory pressure not under softlimit is not handled now) 472 - 473 - When under_hierarchy is added in the tail, the number indicates the 474 - total memcg scan of its children and itself. 475 - 476 - elapsed_ns is a elapsed time in nanosecond. This may include sleep time 477 - and not indicates CPU usage. So, please take this as just showing 478 - latency. 479 - 480 - Here is an example. 481 - 482 - # cat /cgroup/memory/A/memory.vmscan_stat 483 - scanned_pages_by_limit 9471864 484 - scanned_anon_pages_by_limit 6640629 485 - scanned_file_pages_by_limit 2831235 486 - rotated_pages_by_limit 4243974 487 - rotated_anon_pages_by_limit 3971968 488 - rotated_file_pages_by_limit 272006 489 - freed_pages_by_limit 2318492 490 - freed_anon_pages_by_limit 962052 491 - freed_file_pages_by_limit 1356440 492 - elapsed_ns_by_limit 351386416101 493 - scanned_pages_by_system 0 494 - scanned_anon_pages_by_system 0 495 - scanned_file_pages_by_system 0 496 - rotated_pages_by_system 0 497 - rotated_anon_pages_by_system 0 498 - rotated_file_pages_by_system 0 499 - freed_pages_by_system 0 500 - freed_anon_pages_by_system 0 501 - freed_file_pages_by_system 0 502 - elapsed_ns_by_system 0 503 - scanned_pages_by_limit_under_hierarchy 9471864 504 - scanned_anon_pages_by_limit_under_hierarchy 6640629 505 - scanned_file_pages_by_limit_under_hierarchy 2831235 506 - rotated_pages_by_limit_under_hierarchy 4243974 507 - rotated_anon_pages_by_limit_under_hierarchy 3971968 508 - rotated_file_pages_by_limit_under_hierarchy 272006 509 - freed_pages_by_limit_under_hierarchy 2318492 510 - freed_anon_pages_by_limit_under_hierarchy 962052 511 - freed_file_pages_by_limit_under_hierarchy 1356440 512 - elapsed_ns_by_limit_under_hierarchy 351386416101 513 - scanned_pages_by_system_under_hierarchy 0 514 - scanned_anon_pages_by_system_under_hierarchy 0 515 - scanned_file_pages_by_system_under_hierarchy 0 516 - rotated_pages_by_system_under_hierarchy 0 517 - rotated_anon_pages_by_system_under_hierarchy 0 518 - rotated_file_pages_by_system_under_hierarchy 0 519 - freed_pages_by_system_under_hierarchy 0 520 - freed_anon_pages_by_system_under_hierarchy 0 521 - freed_file_pages_by_system_under_hierarchy 0 522 - elapsed_ns_by_system_under_hierarchy 0 523 440 524 441 5.3 swappiness 525 442
+8
Documentation/feature-removal-schedule.txt
··· 592 592 interface that was used by acer-wmi driver. It will replaced by 593 593 information log when acer-wmi initial. 594 594 Who: Lee, Chun-Yi <jlee@novell.com> 595 + 596 + ---------------------------- 597 + What: The XFS nodelaylog mount option 598 + When: 3.3 599 + Why: The delaylog mode that has been the default since 2.6.39 has proven 600 + stable, and the old code is in the way of additional improvements in 601 + the log code. 602 + Who: Christoph Hellwig <hch@lst.de>
+7
Documentation/hwmon/max16065
··· 62 62 the devices explicitly. Please see Documentation/i2c/instantiating-devices for 63 63 details. 64 64 65 + WARNING: Do not access chip registers using the i2cdump command, and do not use 66 + any of the i2ctools commands on a command register (0xa5 to 0xac). The chips 67 + supported by this driver interpret any access to a command register (including 68 + read commands) as request to execute the command in question. This may result in 69 + power loss, board resets, and/or Flash corruption. Worst case, your board may 70 + turn into a brick. 71 + 65 72 66 73 Sysfs entries 67 74 -------------
+2
Documentation/ioctl/ioctl-number.txt
··· 319 319 <mailto:thomas@winischhofer.net> 320 320 0xF4 00-1F video/mbxfb.h mbxfb 321 321 <mailto:raph@8d.com> 322 + 0xF6 all LTTng Linux Trace Toolkit Next Generation 323 + <mailto:mathieu.desnoyers@efficios.com> 322 324 0xFD all linux/dm-ioctl.h
+6 -3
Documentation/kernel-parameters.txt
··· 2086 2086 Override pmtimer IOPort with a hex value. 2087 2087 e.g. pmtmr=0x508 2088 2088 2089 - pnp.debug [PNP] 2090 - Enable PNP debug messages. This depends on the 2091 - CONFIG_PNP_DEBUG_MESSAGES option. 2089 + pnp.debug=1 [PNP] 2090 + Enable PNP debug messages (depends on the 2091 + CONFIG_PNP_DEBUG_MESSAGES option). Change at run-time 2092 + via /sys/module/pnp/parameters/debug. We always show 2093 + current resource usage; turning this on also shows 2094 + possible settings and some assignment information. 2092 2095 2093 2096 pnpacpi= [ACPI] 2094 2097 { off }
+2 -1
Documentation/networking/dmfe.txt
··· 1 + Note: This driver doesn't have a maintainer. 2 + 1 3 Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver for Linux. 2 4 3 5 This program is free software; you can redistribute it and/or ··· 57 55 Authors: 58 56 59 57 Sten Wang <sten_wang@davicom.com.tw > : Original Author 60 - Tobias Ringstrom <tori@unhappy.mine.nu> : Current Maintainer 61 58 62 59 Contributors: 63 60
+18 -5
MAINTAINERS
··· 1278 1278 ATLX ETHERNET DRIVERS 1279 1279 M: Jay Cliburn <jcliburn@gmail.com> 1280 1280 M: Chris Snook <chris.snook@gmail.com> 1281 - M: Jie Yang <jie.yang@atheros.com> 1282 1281 L: netdev@vger.kernel.org 1283 1282 W: http://sourceforge.net/projects/atl1 1284 1283 W: http://atl1.sourceforge.net ··· 1573 1574 1574 1575 BROCADE BNA 10 GIGABIT ETHERNET DRIVER 1575 1576 M: Rasesh Mody <rmody@brocade.com> 1576 - M: Debashis Dutt <ddutt@brocade.com> 1577 1577 L: netdev@vger.kernel.org 1578 1578 S: Supported 1579 1579 F: drivers/net/bna/ ··· 1756 1758 1757 1759 CISCO VIC ETHERNET NIC DRIVER 1758 1760 M: Christian Benvenuti <benve@cisco.com> 1759 - M: Vasanthy Kolluri <vkolluri@cisco.com> 1760 1761 M: Roopa Prabhu <roprabhu@cisco.com> 1761 1762 M: David Wang <dwang2@cisco.com> 1762 1763 S: Supported ··· 3259 3262 F: drivers/input/input-mt.c 3260 3263 K: \b(ABS|SYN)_MT_ 3261 3264 3265 + INTEL C600 SERIES SAS CONTROLLER DRIVER 3266 + M: Intel SCU Linux support <intel-linux-scu@intel.com> 3267 + M: Dan Williams <dan.j.williams@intel.com> 3268 + M: Dave Jiang <dave.jiang@intel.com> 3269 + M: Ed Nadolski <edmund.nadolski@intel.com> 3270 + L: linux-scsi@vger.kernel.org 3271 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/djbw/isci.git 3272 + S: Maintained 3273 + F: drivers/scsi/isci/ 3274 + F: firmware/isci/ 3275 + 3262 3276 INTEL IDLE DRIVER 3263 3277 M: Len Brown <lenb@kernel.org> 3264 3278 L: linux-pm@lists.linux-foundation.org ··· 4412 4404 L: coreteam@netfilter.org 4413 4405 W: http://www.netfilter.org/ 4414 4406 W: http://www.iptables.org/ 4415 - T: git git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-2.6.git 4407 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-2.6.git 4408 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next-2.6.git 4416 4409 S: Supported 4417 4410 F: include/linux/netfilter* 4418 4411 F: include/linux/netfilter/ ··· 4783 4774 4784 4775 OSD LIBRARY and FILESYSTEM 4785 4776 M: Boaz Harrosh <bharrosh@panasas.com> 4786 - M: Benny Halevy <bhalevy@panasas.com> 4777 + M: Benny Halevy <bhalevy@tonian.com> 4787 4778 L: osd-dev@open-osd.org 4788 4779 W: http://open-osd.org 4789 4780 T: git git://git.open-osd.org/open-osd.git ··· 7209 7200 S: Supported 7210 7201 F: Documentation/hwmon/wm83?? 7211 7202 F: drivers/leds/leds-wm83*.c 7203 + F: drivers/input/misc/wm831x-on.c 7204 + F: drivers/input/touchscreen/wm831x-ts.c 7205 + F: drivers/input/touchscreen/wm97*.c 7212 7206 F: drivers/mfd/wm8*.c 7213 7207 F: drivers/power/wm83*.c 7214 7208 F: drivers/rtc/rtc-wm83*.c ··· 7221 7209 F: include/linux/mfd/wm831x/ 7222 7210 F: include/linux/mfd/wm8350/ 7223 7211 F: include/linux/mfd/wm8400* 7212 + F: include/linux/wm97xx.h 7224 7213 F: include/sound/wm????.h 7225 7214 F: sound/soc/codecs/wm* 7226 7215
+1 -1
Makefile
··· 1 1 VERSION = 3 2 2 PATCHLEVEL = 1 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc4 4 + EXTRAVERSION = -rc6 5 5 NAME = "Divemaster Edition" 6 6 7 7 # *DOCUMENTATION*
+1 -1
arch/alpha/Kconfig
··· 51 51 def_bool y 52 52 53 53 config GENERIC_GPIO 54 - def_bool y 54 + bool 55 55 56 56 config ZONE_DMA 57 57 bool
+7 -2
arch/arm/include/asm/hardware/cache-l2x0.h
··· 45 45 #define L2X0_CLEAN_INV_LINE_PA 0x7F0 46 46 #define L2X0_CLEAN_INV_LINE_IDX 0x7F8 47 47 #define L2X0_CLEAN_INV_WAY 0x7FC 48 - #define L2X0_LOCKDOWN_WAY_D 0x900 49 - #define L2X0_LOCKDOWN_WAY_I 0x904 48 + /* 49 + * The lockdown registers repeat 8 times for L310, the L210 has only one 50 + * D and one I lockdown register at 0x0900 and 0x0904. 51 + */ 52 + #define L2X0_LOCKDOWN_WAY_D_BASE 0x900 53 + #define L2X0_LOCKDOWN_WAY_I_BASE 0x904 54 + #define L2X0_LOCKDOWN_STRIDE 0x08 50 55 #define L2X0_TEST_OPERATION 0xF00 51 56 #define L2X0_LINE_DATA 0xF10 52 57 #define L2X0_LINE_TAG 0xF30
-1
arch/arm/mach-cns3xxx/include/mach/entry-macro.S
··· 8 8 * published by the Free Software Foundation. 9 9 */ 10 10 11 - #include <mach/hardware.h> 12 11 #include <asm/hardware/entry-macro-gic.S> 13 12 14 13 .macro disable_fiq
-1
arch/arm/mach-cns3xxx/include/mach/system.h
··· 13 13 14 14 #include <linux/io.h> 15 15 #include <asm/proc-fns.h> 16 - #include <mach/hardware.h> 17 16 18 17 static inline void arch_idle(void) 19 18 {
-1
arch/arm/mach-cns3xxx/include/mach/uncompress.h
··· 8 8 */ 9 9 10 10 #include <asm/mach-types.h> 11 - #include <mach/hardware.h> 12 11 #include <mach/cns3xxx.h> 13 12 14 13 #define AMBA_UART_DR(base) (*(volatile unsigned char *)((base) + 0x00))
+1 -1
arch/arm/mach-cns3xxx/pcie.c
··· 49 49 return &cns3xxx_pcie[root->domain]; 50 50 } 51 51 52 - static struct cns3xxx_pcie *pdev_to_cnspci(struct pci_dev *dev) 52 + static struct cns3xxx_pcie *pdev_to_cnspci(const struct pci_dev *dev) 53 53 { 54 54 return sysdata_to_cnspci(dev->sysdata); 55 55 }
+28
arch/arm/mach-davinci/board-da850-evm.c
··· 115 115 }, 116 116 }; 117 117 118 + #ifdef CONFIG_MTD 119 + static void da850_evm_m25p80_notify_add(struct mtd_info *mtd) 120 + { 121 + char *mac_addr = davinci_soc_info.emac_pdata->mac_addr; 122 + size_t retlen; 123 + 124 + if (!strcmp(mtd->name, "MAC-Address")) { 125 + mtd->read(mtd, 0, ETH_ALEN, &retlen, mac_addr); 126 + if (retlen == ETH_ALEN) 127 + pr_info("Read MAC addr from SPI Flash: %pM\n", 128 + mac_addr); 129 + } 130 + } 131 + 132 + static struct mtd_notifier da850evm_spi_notifier = { 133 + .add = da850_evm_m25p80_notify_add, 134 + }; 135 + 136 + static void da850_evm_setup_mac_addr(void) 137 + { 138 + register_mtd_user(&da850evm_spi_notifier); 139 + } 140 + #else 141 + static void da850_evm_setup_mac_addr(void) { } 142 + #endif 143 + 118 144 static struct mtd_partition da850_evm_norflash_partition[] = { 119 145 { 120 146 .name = "bootloaders + env", ··· 1270 1244 if (ret) 1271 1245 pr_warning("da850_evm_init: sata registration failed: %d\n", 1272 1246 ret); 1247 + 1248 + da850_evm_setup_mac_addr(); 1273 1249 } 1274 1250 1275 1251 #ifdef CONFIG_SERIAL_8250_CONSOLE
+1 -1
arch/arm/mach-davinci/include/mach/psc.h
··· 243 243 #define PSC_STATE_DISABLE 2 244 244 #define PSC_STATE_ENABLE 3 245 245 246 - #define MDSTAT_STATE_MASK 0x1f 246 + #define MDSTAT_STATE_MASK 0x3f 247 247 #define MDCTL_FORCE BIT(31) 248 248 249 249 #ifndef __ASSEMBLER__
+5 -1
arch/arm/mach-davinci/sleep.S
··· 217 217 ENDPROC(davinci_ddr_psc_config) 218 218 219 219 CACHE_FLUSH: 220 - .word arm926_flush_kern_cache_all 220 + #ifdef CONFIG_CPU_V6 221 + .word v6_flush_kern_cache_all 222 + #else 223 + .word arm926_flush_kern_cache_all 224 + #endif 221 225 222 226 ENTRY(davinci_cpu_suspend_sz) 223 227 .word . - davinci_cpu_suspend
+3 -3
arch/arm/mach-integrator/integrator_ap.c
··· 337 337 static void integrator_clocksource_init(u32 khz) 338 338 { 339 339 void __iomem *base = (void __iomem *)TIMER2_VA_BASE; 340 - u32 ctrl = TIMER_CTRL_ENABLE; 340 + u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC; 341 341 342 342 if (khz >= 1500) { 343 343 khz /= 16; 344 - ctrl = TIMER_CTRL_DIV16; 344 + ctrl |= TIMER_CTRL_DIV16; 345 345 } 346 346 347 - writel(ctrl, base + TIMER_CTRL); 348 347 writel(0xffff, base + TIMER_LOAD); 348 + writel(ctrl, base + TIMER_CTRL); 349 349 350 350 clocksource_mmio_init(base + TIMER_VALUE, "timer2", 351 351 khz * 1000, 200, 16, clocksource_mmio_readl_down);
+2
arch/arm/mach-omap2/clock3xxx_data.c
··· 3078 3078 .name = "gpt12_fck", 3079 3079 .ops = &clkops_null, 3080 3080 .parent = &secure_32k_fck, 3081 + .clkdm_name = "wkup_clkdm", 3081 3082 .recalc = &followparent_recalc, 3082 3083 }; 3083 3084 ··· 3086 3085 .name = "wdt1_fck", 3087 3086 .ops = &clkops_null, 3088 3087 .parent = &secure_32k_fck, 3088 + .clkdm_name = "wkup_clkdm", 3089 3089 .recalc = &followparent_recalc, 3090 3090 }; 3091 3091
+9 -1
arch/arm/mach-omap2/clock44xx_data.c
··· 3376 3376 } else if (cpu_is_omap446x()) { 3377 3377 cpu_mask = RATE_IN_4460; 3378 3378 cpu_clkflg = CK_446X; 3379 + } else { 3380 + return 0; 3379 3381 } 3380 3382 3381 3383 clk_init(&omap2_clk_functions); 3382 - omap2_clk_disable_clkdm_control(); 3384 + 3385 + /* 3386 + * Must stay commented until all OMAP SoC drivers are 3387 + * converted to runtime PM, or drivers may start crashing 3388 + * 3389 + * omap2_clk_disable_clkdm_control(); 3390 + */ 3383 3391 3384 3392 for (c = omap44xx_clks; c < omap44xx_clks + ARRAY_SIZE(omap44xx_clks); 3385 3393 c++)
+2
arch/arm/mach-omap2/clockdomain.c
··· 747 747 spin_lock_irqsave(&clkdm->lock, flags); 748 748 clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED; 749 749 ret = arch_clkdm->clkdm_wakeup(clkdm); 750 + ret |= pwrdm_state_switch(clkdm->pwrdm.ptr); 750 751 spin_unlock_irqrestore(&clkdm->lock, flags); 751 752 return ret; 752 753 } ··· 819 818 spin_lock_irqsave(&clkdm->lock, flags); 820 819 clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED; 821 820 arch_clkdm->clkdm_deny_idle(clkdm); 821 + pwrdm_state_switch(clkdm->pwrdm.ptr); 822 822 spin_unlock_irqrestore(&clkdm->lock, flags); 823 823 } 824 824
+1
arch/arm/mach-omap2/omap_hwmod_2430_data.c
··· 192 192 .pa_end = OMAP243X_HS_BASE + SZ_4K - 1, 193 193 .flags = ADDR_TYPE_RT 194 194 }, 195 + { } 195 196 }; 196 197 197 198 /* l4_core ->usbhsotg interface */
-2
arch/arm/mach-omap2/pm.c
··· 130 130 } else { 131 131 hwsup = clkdm_in_hwsup(pwrdm->pwrdm_clkdms[0]); 132 132 clkdm_wakeup(pwrdm->pwrdm_clkdms[0]); 133 - pwrdm_wait_transition(pwrdm); 134 133 sleep_switch = FORCEWAKEUP_SWITCH; 135 134 } 136 135 } ··· 155 156 return ret; 156 157 } 157 158 158 - pwrdm_wait_transition(pwrdm); 159 159 pwrdm_state_switch(pwrdm); 160 160 err: 161 161 return ret;
+16 -9
arch/arm/mach-omap2/powerdomain.c
··· 195 195 196 196 /** 197 197 * pwrdm_init - set up the powerdomain layer 198 - * @pwrdm_list: array of struct powerdomain pointers to register 198 + * @pwrdms: array of struct powerdomain pointers to register 199 199 * @custom_funcs: func pointers for arch specific implementations 200 200 * 201 - * Loop through the array of powerdomains @pwrdm_list, registering all 202 - * that are available on the current CPU. If pwrdm_list is supplied 203 - * and not null, all of the referenced powerdomains will be 204 - * registered. No return value. XXX pwrdm_list is not really a 205 - * "list"; it is an array. Rename appropriately. 201 + * Loop through the array of powerdomains @pwrdms, registering all 202 + * that are available on the current CPU. Also, program all 203 + * powerdomain target state as ON; this is to prevent domains from 204 + * hitting low power states (if bootloader has target states set to 205 + * something other than ON) and potentially even losing context while 206 + * PM is not fully initialized. The PM late init code can then program 207 + * the desired target state for all the power domains. No return 208 + * value. 206 209 */ 207 - void pwrdm_init(struct powerdomain **pwrdm_list, struct pwrdm_ops *custom_funcs) 210 + void pwrdm_init(struct powerdomain **pwrdms, struct pwrdm_ops *custom_funcs) 208 211 { 209 212 struct powerdomain **p = NULL; 213 + struct powerdomain *temp_p; 210 214 211 215 if (!custom_funcs) 212 216 WARN(1, "powerdomain: No custom pwrdm functions registered\n"); 213 217 else 214 218 arch_pwrdm = custom_funcs; 215 219 216 - if (pwrdm_list) { 217 - for (p = pwrdm_list; *p; p++) 220 + if (pwrdms) { 221 + for (p = pwrdms; *p; p++) 218 222 _pwrdm_register(*p); 219 223 } 224 + 225 + list_for_each_entry(temp_p, &pwrdm_list, node) 226 + pwrdm_set_next_pwrst(temp_p, PWRDM_POWER_ON); 220 227 } 221 228 222 229 /**
+1
arch/arm/mach-prima2/clock.c
··· 481 481 482 482 static struct of_device_id clkc_ids[] = { 483 483 { .compatible = "sirf,prima2-clkc" }, 484 + {}, 484 485 }; 485 486 486 487 void __init sirfsoc_of_clk_init(void)
+1
arch/arm/mach-prima2/irq.c
··· 51 51 52 52 static struct of_device_id intc_ids[] = { 53 53 { .compatible = "sirf,prima2-intc" }, 54 + {}, 54 55 }; 55 56 56 57 void __init sirfsoc_of_irq_init(void)
+1
arch/arm/mach-prima2/rstc.c
··· 19 19 20 20 static struct of_device_id rstc_ids[] = { 21 21 { .compatible = "sirf,prima2-rstc" }, 22 + {}, 22 23 }; 23 24 24 25 static int __init sirfsoc_of_rstc_init(void)
+1
arch/arm/mach-prima2/timer.c
··· 190 190 191 191 static struct of_device_id timer_ids[] = { 192 192 { .compatible = "sirf,prima2-tick" }, 193 + {}, 193 194 }; 194 195 195 196 static void __init sirfsoc_of_timer_map(void)
+1 -1
arch/arm/mm/abort-macro.S
··· 17 17 cmp \tmp, # 0x5600 @ Is it ldrsb? 18 18 orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes 19 19 tst \tmp, #1 << 11 @ L = 0 -> write 20 - orreq \psr, \psr, #1 << 11 @ yes. 20 + orreq \fsr, \fsr, #1 << 11 @ yes. 21 21 b do_DataAbort 22 22 not_thumb: 23 23 .endm
+21
arch/arm/mm/cache-l2x0.c
··· 277 277 spin_unlock_irqrestore(&l2x0_lock, flags); 278 278 } 279 279 280 + static void __init l2x0_unlock(__u32 cache_id) 281 + { 282 + int lockregs; 283 + int i; 284 + 285 + if (cache_id == L2X0_CACHE_ID_PART_L310) 286 + lockregs = 8; 287 + else 288 + /* L210 and unknown types */ 289 + lockregs = 1; 290 + 291 + for (i = 0; i < lockregs; i++) { 292 + writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE + 293 + i * L2X0_LOCKDOWN_STRIDE); 294 + writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE + 295 + i * L2X0_LOCKDOWN_STRIDE); 296 + } 297 + } 298 + 280 299 void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) 281 300 { 282 301 __u32 aux; ··· 347 328 * accessing the below registers will fault. 348 329 */ 349 330 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { 331 + /* Make sure that I&D is not locked down when starting */ 332 + l2x0_unlock(cache_id); 350 333 351 334 /* l2x0 controller is disabled */ 352 335 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
+1 -1
arch/arm/mm/init.c
··· 298 298 #ifdef CONFIG_HAVE_ARCH_PFN_VALID 299 299 int pfn_valid(unsigned long pfn) 300 300 { 301 - return memblock_is_memory(pfn << PAGE_SHIFT); 301 + return memblock_is_memory(__pfn_to_phys(pfn)); 302 302 } 303 303 EXPORT_SYMBOL(pfn_valid); 304 304 #endif
+3
arch/arm/plat-omap/omap_device.c
··· 615 615 616 616 return pm_generic_resume_noirq(dev); 617 617 } 618 + #else 619 + #define _od_suspend_noirq NULL 620 + #define _od_resume_noirq NULL 618 621 #endif 619 622 620 623 static struct dev_pm_domain omap_device_pm_domain = {
+57 -2
arch/openrisc/include/asm/dma-mapping.h
··· 31 31 32 32 #define DMA_ERROR_CODE (~(dma_addr_t)0x0) 33 33 34 - int dma_mapping_error(struct device *dev, dma_addr_t dma_addr); 35 34 36 35 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 37 36 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) ··· 46 47 void or1k_unmap_page(struct device *dev, dma_addr_t dma_handle, 47 48 size_t size, enum dma_data_direction dir, 48 49 struct dma_attrs *attrs); 50 + int or1k_map_sg(struct device *dev, struct scatterlist *sg, 51 + int nents, enum dma_data_direction dir, 52 + struct dma_attrs *attrs); 53 + void or1k_unmap_sg(struct device *dev, struct scatterlist *sg, 54 + int nents, enum dma_data_direction dir, 55 + struct dma_attrs *attrs); 49 56 void or1k_sync_single_for_cpu(struct device *dev, 50 57 dma_addr_t dma_handle, size_t size, 51 58 enum dma_data_direction dir); ··· 103 98 debug_dma_unmap_page(dev, addr, size, dir, true); 104 99 } 105 100 101 + static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, 102 + int nents, enum dma_data_direction dir) 103 + { 104 + int i, ents; 105 + struct scatterlist *s; 106 + 107 + for_each_sg(sg, s, nents, i) 108 + kmemcheck_mark_initialized(sg_virt(s), s->length); 109 + BUG_ON(!valid_dma_direction(dir)); 110 + ents = or1k_map_sg(dev, sg, nents, dir, NULL); 111 + debug_dma_map_sg(dev, sg, nents, ents, dir); 112 + 113 + return ents; 114 + } 115 + 116 + static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, 117 + int nents, enum dma_data_direction dir) 118 + { 119 + BUG_ON(!valid_dma_direction(dir)); 120 + debug_dma_unmap_sg(dev, sg, nents, dir); 121 + or1k_unmap_sg(dev, sg, nents, dir, NULL); 122 + } 123 + 124 + static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, 125 + size_t offset, size_t size, 126 + enum dma_data_direction dir) 127 + { 128 + dma_addr_t addr; 129 + 130 + kmemcheck_mark_initialized(page_address(page) + offset, size); 131 + BUG_ON(!valid_dma_direction(dir)); 132 + addr = or1k_map_page(dev, page, offset, size, dir, NULL); 133 + debug_dma_map_page(dev, page, offset, size, dir, addr, false); 134 + 135 + return addr; 136 + } 137 + 138 + static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, 139 + size_t size, enum dma_data_direction dir) 140 + { 141 + BUG_ON(!valid_dma_direction(dir)); 142 + or1k_unmap_page(dev, addr, size, dir, NULL); 143 + debug_dma_unmap_page(dev, addr, size, dir, true); 144 + } 145 + 106 146 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, 107 147 size_t size, 108 148 enum dma_data_direction dir) ··· 169 119 static inline int dma_supported(struct device *dev, u64 dma_mask) 170 120 { 171 121 /* Support 32 bit DMA mask exclusively */ 172 - return dma_mask == 0xffffffffULL; 122 + return dma_mask == DMA_BIT_MASK(32); 123 + } 124 + 125 + static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 126 + { 127 + return 0; 173 128 } 174 129 175 130 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+1 -6
arch/openrisc/include/asm/sigcontext.h
··· 23 23 24 24 /* This struct is saved by setup_frame in signal.c, to keep the current 25 25 context while a signal handler is executed. It's restored by sys_sigreturn. 26 - 27 - To keep things simple, we use pt_regs here even though normally you just 28 - specify the list of regs to save. Then we can use copy_from_user on the 29 - entire regs instead of a bunch of get_user's as well... 30 26 */ 31 27 32 28 struct sigcontext { 33 - struct pt_regs regs; /* needs to be first */ 29 + struct user_regs_struct regs; /* needs to be first */ 34 30 unsigned long oldmask; 35 - unsigned long usp; /* usp before stacking this gunk on it */ 36 31 }; 37 32 38 33 #endif /* __ASM_OPENRISC_SIGCONTEXT_H */
+27 -1
arch/openrisc/kernel/dma.c
··· 154 154 /* Nothing special to do here... */ 155 155 } 156 156 157 + int or1k_map_sg(struct device *dev, struct scatterlist *sg, 158 + int nents, enum dma_data_direction dir, 159 + struct dma_attrs *attrs) 160 + { 161 + struct scatterlist *s; 162 + int i; 163 + 164 + for_each_sg(sg, s, nents, i) { 165 + s->dma_address = or1k_map_page(dev, sg_page(s), s->offset, 166 + s->length, dir, NULL); 167 + } 168 + 169 + return nents; 170 + } 171 + 172 + void or1k_unmap_sg(struct device *dev, struct scatterlist *sg, 173 + int nents, enum dma_data_direction dir, 174 + struct dma_attrs *attrs) 175 + { 176 + struct scatterlist *s; 177 + int i; 178 + 179 + for_each_sg(sg, s, nents, i) { 180 + or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, NULL); 181 + } 182 + } 183 + 157 184 void or1k_sync_single_for_cpu(struct device *dev, 158 185 dma_addr_t dma_handle, size_t size, 159 186 enum dma_data_direction dir) ··· 214 187 215 188 return 0; 216 189 } 217 - 218 190 fs_initcall(dma_init);
+11 -18
arch/openrisc/kernel/signal.c
··· 52 52 static int restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc) 53 53 { 54 54 unsigned int err = 0; 55 - unsigned long old_usp; 56 55 57 56 /* Alwys make any pending restarted system call return -EINTR */ 58 57 current_thread_info()->restart_block.fn = do_no_restart_syscall; 59 58 60 - /* restore the regs from &sc->regs (same as sc, since regs is first) 59 + /* 60 + * Restore the regs from &sc->regs. 61 61 * (sc is already checked for VERIFY_READ since the sigframe was 62 62 * checked in sys_sigreturn previously) 63 63 */ 64 - 65 - if (__copy_from_user(regs, sc, sizeof(struct pt_regs))) 64 + if (__copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long))) 65 + goto badframe; 66 + if (__copy_from_user(&regs->pc, &sc->regs.pc, sizeof(unsigned long))) 67 + goto badframe; 68 + if (__copy_from_user(&regs->sr, &sc->regs.sr, sizeof(unsigned long))) 66 69 goto badframe; 67 70 68 71 /* make sure the SM-bit is cleared so user-mode cannot fool us */ 69 72 regs->sr &= ~SPR_SR_SM; 70 - 71 - /* restore the old USP as it was before we stacked the sc etc. 72 - * (we cannot just pop the sigcontext since we aligned the sp and 73 - * stuff after pushing it) 74 - */ 75 - 76 - err |= __get_user(old_usp, &sc->usp); 77 - 78 - regs->sp = old_usp; 79 73 80 74 /* TODO: the other ports use regs->orig_XX to disable syscall checks 81 75 * after this completes, but we don't use that mechanism. maybe we can ··· 131 137 unsigned long mask) 132 138 { 133 139 int err = 0; 134 - unsigned long usp = regs->sp; 135 140 136 - /* copy the regs. they are first in sc so we can use sc directly */ 141 + /* copy the regs */ 137 142 138 - err |= __copy_to_user(sc, regs, sizeof(struct pt_regs)); 143 + err |= __copy_to_user(sc->regs.gpr, regs, 32 * sizeof(unsigned long)); 144 + err |= __copy_to_user(&sc->regs.pc, &regs->pc, sizeof(unsigned long)); 145 + err |= __copy_to_user(&sc->regs.sr, &regs->sr, sizeof(unsigned long)); 139 146 140 147 /* then some other stuff */ 141 148 142 149 err |= __put_user(mask, &sc->oldmask); 143 - 144 - err |= __put_user(usp, &sc->usp); 145 150 146 151 return err; 147 152 }
+1 -1
arch/powerpc/boot/dts/p1023rds.dts
··· 387 387 #size-cells = <1>; 388 388 compatible = "cfi-flash"; 389 389 reg = <0x0 0x0 0x02000000>; 390 - bank-width = <1>; 390 + bank-width = <2>; 391 391 device-width = <1>; 392 392 partition@0 { 393 393 label = "ramdisk";
+1
arch/powerpc/configs/85xx/p1023rds_defconfig
··· 171 171 CONFIG_CRYPTO_SHA512=y 172 172 CONFIG_CRYPTO_AES=y 173 173 # CONFIG_CRYPTO_ANSI_CPRNG is not set 174 + CONFIG_CRYPTO_DEV_FSL_CAAM=y
+1
arch/powerpc/configs/corenet32_smp_defconfig
··· 185 185 CONFIG_CRYPTO_SHA512=y 186 186 CONFIG_CRYPTO_AES=y 187 187 # CONFIG_CRYPTO_ANSI_CPRNG is not set 188 + CONFIG_CRYPTO_DEV_FSL_CAAM=y
+4 -1
arch/powerpc/configs/corenet64_smp_defconfig
··· 100 100 CONFIG_SYSCTL_SYSCALL_CHECK=y 101 101 CONFIG_VIRQ_DEBUG=y 102 102 CONFIG_CRYPTO_PCBC=m 103 + CONFIG_CRYPTO_SHA256=y 104 + CONFIG_CRYPTO_SHA512=y 105 + CONFIG_CRYPTO_AES=y 103 106 # CONFIG_CRYPTO_ANSI_CPRNG is not set 104 - CONFIG_CRYPTO_DEV_TALITOS=y 107 + CONFIG_CRYPTO_DEV_FSL_CAAM=y
+1
arch/powerpc/configs/mpc85xx_defconfig
··· 139 139 CONFIG_SND_INTEL8X0=y 140 140 # CONFIG_SND_PPC is not set 141 141 # CONFIG_SND_USB is not set 142 + CONFIG_SND_SOC=y 142 143 CONFIG_HID_A4TECH=y 143 144 CONFIG_HID_APPLE=y 144 145 CONFIG_HID_BELKIN=y
+1
arch/powerpc/configs/mpc85xx_smp_defconfig
··· 140 140 CONFIG_SND_INTEL8X0=y 141 141 # CONFIG_SND_PPC is not set 142 142 # CONFIG_SND_USB is not set 143 + CONFIG_SND_SOC=y 143 144 CONFIG_HID_A4TECH=y 144 145 CONFIG_HID_APPLE=y 145 146 CONFIG_HID_BELKIN=y
+4
arch/um/Kconfig.x86
··· 10 10 bool 11 11 default n 12 12 13 + config CMPXCHG_DOUBLE 14 + bool 15 + default n 16 + 13 17 source "arch/x86/Kconfig.cpu" 14 18 15 19 endmenu
+1 -1
arch/um/Makefile
··· 41 41 KBUILD_CFLAGS += $(CFLAGS) $(CFLAGS-y) -D__arch_um__ -DSUBARCH=\"$(SUBARCH)\" \ 42 42 $(ARCH_INCLUDE) $(MODE_INCLUDE) -Dvmap=kernel_vmap \ 43 43 -Din6addr_loopback=kernel_in6addr_loopback \ 44 - -Din6addr_any=kernel_in6addr_any 44 + -Din6addr_any=kernel_in6addr_any -Dstrrchr=kernel_strrchr 45 45 46 46 KBUILD_AFLAGS += $(ARCH_INCLUDE) 47 47
+36 -25
arch/um/drivers/line.c
··· 399 399 * is done under a spinlock. Checking whether the device is in use is 400 400 * line->tty->count > 1, also under the spinlock. 401 401 * 402 - * tty->count serves to decide whether the device should be enabled or 403 - * disabled on the host. If it's equal to 1, then we are doing the 402 + * line->count serves to decide whether the device should be enabled or 403 + * disabled on the host. If it's equal to 0, then we are doing the 404 404 * first open or last close. Otherwise, open and close just return. 405 405 */ 406 406 ··· 414 414 goto out_unlock; 415 415 416 416 err = 0; 417 - if (tty->count > 1) 417 + if (line->count++) 418 418 goto out_unlock; 419 419 420 - spin_unlock(&line->count_lock); 421 - 420 + BUG_ON(tty->driver_data); 422 421 tty->driver_data = line; 423 422 line->tty = tty; 424 423 424 + spin_unlock(&line->count_lock); 425 425 err = enable_chan(line); 426 - if (err) 426 + if (err) /* line_close() will be called by our caller */ 427 427 return err; 428 428 429 429 INIT_DELAYED_WORK(&line->task, line_timer_cb); ··· 436 436 chan_window_size(&line->chan_list, &tty->winsize.ws_row, 437 437 &tty->winsize.ws_col); 438 438 439 - return err; 439 + return 0; 440 440 441 441 out_unlock: 442 442 spin_unlock(&line->count_lock); ··· 460 460 flush_buffer(line); 461 461 462 462 spin_lock(&line->count_lock); 463 - if (!line->valid) 464 - goto out_unlock; 463 + BUG_ON(!line->valid); 465 464 466 - if (tty->count > 1) 465 + if (--line->count) 467 466 goto out_unlock; 468 - 469 - spin_unlock(&line->count_lock); 470 467 471 468 line->tty = NULL; 472 469 tty->driver_data = NULL; 470 + 471 + spin_unlock(&line->count_lock); 473 472 474 473 if (line->sigio) { 475 474 unregister_winch(tty); ··· 497 498 498 499 spin_lock(&line->count_lock); 499 500 500 - if (line->tty != NULL) { 501 + if (line->count) { 501 502 *error_out = "Device is already open"; 502 503 goto out; 503 504 } ··· 721 722 int pid; 722 723 struct tty_struct *tty; 723 724 unsigned long stack; 725 + struct work_struct work; 724 726 }; 725 727 726 - static void free_winch(struct winch *winch, int free_irq_ok) 728 + static void __free_winch(struct work_struct *work) 727 729 { 728 - if (free_irq_ok) 729 - free_irq(WINCH_IRQ, winch); 730 - 731 - list_del(&winch->list); 730 + struct winch *winch = container_of(work, struct winch, work); 731 + free_irq(WINCH_IRQ, winch); 732 732 733 733 if (winch->pid != -1) 734 734 os_kill_process(winch->pid, 1); 735 - if (winch->fd != -1) 736 - os_close_file(winch->fd); 737 735 if (winch->stack != 0) 738 736 free_stack(winch->stack, 0); 739 737 kfree(winch); 738 + } 739 + 740 + static void free_winch(struct winch *winch) 741 + { 742 + int fd = winch->fd; 743 + winch->fd = -1; 744 + if (fd != -1) 745 + os_close_file(fd); 746 + list_del(&winch->list); 747 + __free_winch(&winch->work); 740 748 } 741 749 742 750 static irqreturn_t winch_interrupt(int irq, void *data) ··· 751 745 struct winch *winch = data; 752 746 struct tty_struct *tty; 753 747 struct line *line; 748 + int fd = winch->fd; 754 749 int err; 755 750 char c; 756 751 757 - if (winch->fd != -1) { 758 - err = generic_read(winch->fd, &c, NULL); 752 + if (fd != -1) { 753 + err = generic_read(fd, &c, NULL); 759 754 if (err < 0) { 760 755 if (err != -EAGAIN) { 756 + winch->fd = -1; 757 + list_del(&winch->list); 758 + os_close_file(fd); 761 759 printk(KERN_ERR "winch_interrupt : " 762 760 "read failed, errno = %d\n", -err); 763 761 printk(KERN_ERR "fd %d is losing SIGWINCH " 764 762 "support\n", winch->tty_fd); 765 - free_winch(winch, 0); 763 + INIT_WORK(&winch->work, __free_winch); 764 + schedule_work(&winch->work); 766 765 return IRQ_HANDLED; 767 766 } 768 767 goto out; ··· 839 828 list_for_each_safe(ele, next, &winch_handlers) { 840 829 winch = list_entry(ele, struct winch, list); 841 830 if (winch->tty == tty) { 842 - free_winch(winch, 1); 831 + free_winch(winch); 843 832 break; 844 833 } 845 834 } ··· 855 844 856 845 list_for_each_safe(ele, next, &winch_handlers) { 857 846 winch = list_entry(ele, struct winch, list); 858 - free_winch(winch, 1); 847 + free_winch(winch); 859 848 } 860 849 861 850 spin_unlock(&winch_handler_lock);
+1
arch/um/drivers/xterm.c
··· 123 123 err = -errno; 124 124 printk(UM_KERN_ERR "xterm_open : unlink failed, errno = %d\n", 125 125 errno); 126 + close(fd); 126 127 return err; 127 128 } 128 129 close(fd);
-4
arch/um/include/asm/ptrace-generic.h
··· 42 42 unsigned long addr, unsigned long data); 43 43 extern unsigned long getreg(struct task_struct *child, int regno); 44 44 extern int putreg(struct task_struct *child, int regno, unsigned long value); 45 - extern int get_fpregs(struct user_i387_struct __user *buf, 46 - struct task_struct *child); 47 - extern int set_fpregs(struct user_i387_struct __user *buf, 48 - struct task_struct *child); 49 45 50 46 extern int arch_copy_tls(struct task_struct *new); 51 47 extern void clear_flushed_tls(struct task_struct *task);
+1
arch/um/include/shared/line.h
··· 33 33 struct line { 34 34 struct tty_struct *tty; 35 35 spinlock_t count_lock; 36 + unsigned long count; 36 37 int valid; 37 38 38 39 char *init_str;
+1 -1
arch/um/include/shared/registers.h
··· 16 16 extern int save_registers(int pid, struct uml_pt_regs *regs); 17 17 extern int restore_registers(int pid, struct uml_pt_regs *regs); 18 18 extern int init_registers(int pid); 19 - extern void get_safe_registers(unsigned long *regs); 19 + extern void get_safe_registers(unsigned long *regs, unsigned long *fp_regs); 20 20 extern unsigned long get_thread_reg(int reg, jmp_buf *buf); 21 21 extern int get_fp_registers(int pid, unsigned long *regs); 22 22 extern int put_fp_registers(int pid, unsigned long *regs);
+1 -1
arch/um/kernel/process.c
··· 202 202 arch_copy_thread(&current->thread.arch, &p->thread.arch); 203 203 } 204 204 else { 205 - get_safe_registers(p->thread.regs.regs.gp); 205 + get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp); 206 206 p->thread.request.u.thread = current->thread.request.u.thread; 207 207 handler = new_thread_handler; 208 208 }
-28
arch/um/kernel/ptrace.c
··· 50 50 void __user *vp = p; 51 51 52 52 switch (request) { 53 - /* read word at location addr. */ 54 - case PTRACE_PEEKTEXT: 55 - case PTRACE_PEEKDATA: 56 - ret = generic_ptrace_peekdata(child, addr, data); 57 - break; 58 - 59 53 /* read the word at location addr in the USER area. */ 60 54 case PTRACE_PEEKUSR: 61 55 ret = peek_user(child, addr, data); 62 - break; 63 - 64 - /* write the word at location addr. */ 65 - case PTRACE_POKETEXT: 66 - case PTRACE_POKEDATA: 67 - ret = generic_ptrace_pokedata(child, addr, data); 68 56 break; 69 57 70 58 /* write the word at location addr in the USER area */ ··· 95 107 break; 96 108 } 97 109 #endif 98 - #ifdef PTRACE_GETFPREGS 99 - case PTRACE_GETFPREGS: /* Get the child FPU state. */ 100 - ret = get_fpregs(vp, child); 101 - break; 102 - #endif 103 - #ifdef PTRACE_SETFPREGS 104 - case PTRACE_SETFPREGS: /* Set the child FPU state. */ 105 - ret = set_fpregs(vp, child); 106 - break; 107 - #endif 108 110 case PTRACE_GET_THREAD_AREA: 109 111 ret = ptrace_get_thread_area(child, addr, vp); 110 112 break; ··· 131 153 ret = -EIO; 132 154 break; 133 155 } 134 - #endif 135 - #ifdef PTRACE_ARCH_PRCTL 136 - case PTRACE_ARCH_PRCTL: 137 - /* XXX Calls ptrace on the host - needs some SMP thinking */ 138 - ret = arch_prctl(child, data, (void __user *) addr); 139 - break; 140 156 #endif 141 157 default: 142 158 ret = ptrace_request(child, request, addr, data);
+8 -1
arch/um/os-Linux/registers.c
··· 8 8 #include <string.h> 9 9 #include <sys/ptrace.h> 10 10 #include "sysdep/ptrace.h" 11 + #include "sysdep/ptrace_user.h" 12 + #include "registers.h" 11 13 12 14 int save_registers(int pid, struct uml_pt_regs *regs) 13 15 { ··· 34 32 /* This is set once at boot time and not changed thereafter */ 35 33 36 34 static unsigned long exec_regs[MAX_REG_NR]; 35 + static unsigned long exec_fp_regs[FP_SIZE]; 37 36 38 37 int init_registers(int pid) 39 38 { ··· 45 42 return -errno; 46 43 47 44 arch_init_registers(pid); 45 + get_fp_registers(pid, exec_fp_regs); 48 46 return 0; 49 47 } 50 48 51 - void get_safe_registers(unsigned long *regs) 49 + void get_safe_registers(unsigned long *regs, unsigned long *fp_regs) 52 50 { 53 51 memcpy(regs, exec_regs, sizeof(exec_regs)); 52 + 53 + if (fp_regs) 54 + memcpy(fp_regs, exec_fp_regs, sizeof(exec_fp_regs)); 54 55 }
+1 -1
arch/um/os-Linux/skas/mem.c
··· 39 39 40 40 static int __init init_syscall_regs(void) 41 41 { 42 - get_safe_registers(syscall_regs); 42 + get_safe_registers(syscall_regs, NULL); 43 43 syscall_regs[REGS_IP_INDEX] = STUB_CODE + 44 44 ((unsigned long) &batch_syscall_stub - 45 45 (unsigned long) &__syscall_stub_start);
+18 -1
arch/um/os-Linux/skas/process.c
··· 373 373 if (ptrace(PTRACE_SETREGS, pid, 0, regs->gp)) 374 374 fatal_sigsegv(); 375 375 376 + if (put_fp_registers(pid, regs->fp)) 377 + fatal_sigsegv(); 378 + 376 379 /* Now we set local_using_sysemu to be used for one loop */ 377 380 local_using_sysemu = get_using_sysemu(); 378 381 ··· 398 395 regs->is_user = 1; 399 396 if (ptrace(PTRACE_GETREGS, pid, 0, regs->gp)) { 400 397 printk(UM_KERN_ERR "userspace - PTRACE_GETREGS failed, " 398 + "errno = %d\n", errno); 399 + fatal_sigsegv(); 400 + } 401 + 402 + if (get_fp_registers(pid, regs->fp)) { 403 + printk(UM_KERN_ERR "userspace - get_fp_registers failed, " 401 404 "errno = %d\n", errno); 402 405 fatal_sigsegv(); 403 406 } ··· 466 457 } 467 458 468 459 static unsigned long thread_regs[MAX_REG_NR]; 460 + static unsigned long thread_fp_regs[FP_SIZE]; 469 461 470 462 static int __init init_thread_regs(void) 471 463 { 472 - get_safe_registers(thread_regs); 464 + get_safe_registers(thread_regs, thread_fp_regs); 473 465 /* Set parent's instruction pointer to start of clone-stub */ 474 466 thread_regs[REGS_IP_INDEX] = STUB_CODE + 475 467 (unsigned long) stub_clone_handler - ··· 510 500 err = -errno; 511 501 printk(UM_KERN_ERR "copy_context_skas0 : PTRACE_SETREGS " 512 502 "failed, pid = %d, errno = %d\n", pid, -err); 503 + return err; 504 + } 505 + 506 + err = put_fp_registers(pid, thread_fp_regs); 507 + if (err < 0) { 508 + printk(UM_KERN_ERR "copy_context_skas0 : put_fp_registers " 509 + "failed, pid = %d, err = %d\n", pid, err); 513 510 return err; 514 511 } 515 512
-5
arch/um/sys-i386/asm/ptrace.h
··· 42 42 */ 43 43 struct user_desc; 44 44 45 - extern int get_fpxregs(struct user_fxsr_struct __user *buf, 46 - struct task_struct *child); 47 - extern int set_fpxregs(struct user_fxsr_struct __user *buf, 48 - struct task_struct *tsk); 49 - 50 45 extern int ptrace_get_thread_area(struct task_struct *child, int idx, 51 46 struct user_desc __user *user_desc); 52 47
+23 -5
arch/um/sys-i386/ptrace.c
··· 145 145 return put_user(tmp, (unsigned long __user *) data); 146 146 } 147 147 148 - int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) 148 + static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) 149 149 { 150 150 int err, n, cpu = ((struct thread_info *) child->stack)->cpu; 151 151 struct user_i387_struct fpregs; ··· 161 161 return n; 162 162 } 163 163 164 - int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) 164 + static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) 165 165 { 166 166 int n, cpu = ((struct thread_info *) child->stack)->cpu; 167 167 struct user_i387_struct fpregs; ··· 174 174 (unsigned long *) &fpregs); 175 175 } 176 176 177 - int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) 177 + static int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) 178 178 { 179 179 int err, n, cpu = ((struct thread_info *) child->stack)->cpu; 180 180 struct user_fxsr_struct fpregs; ··· 190 190 return n; 191 191 } 192 192 193 - int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) 193 + static int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) 194 194 { 195 195 int n, cpu = ((struct thread_info *) child->stack)->cpu; 196 196 struct user_fxsr_struct fpregs; ··· 206 206 long subarch_ptrace(struct task_struct *child, long request, 207 207 unsigned long addr, unsigned long data) 208 208 { 209 - return -EIO; 209 + int ret = -EIO; 210 + void __user *datap = (void __user *) data; 211 + switch (request) { 212 + case PTRACE_GETFPREGS: /* Get the child FPU state. */ 213 + ret = get_fpregs(datap, child); 214 + break; 215 + case PTRACE_SETFPREGS: /* Set the child FPU state. */ 216 + ret = set_fpregs(datap, child); 217 + break; 218 + case PTRACE_GETFPXREGS: /* Get the child FPU state. */ 219 + ret = get_fpxregs(datap, child); 220 + break; 221 + case PTRACE_SETFPXREGS: /* Set the child FPU state. */ 222 + ret = set_fpxregs(datap, child); 223 + break; 224 + default: 225 + ret = -EIO; 226 + } 227 + return ret; 210 228 }
+1
arch/um/sys-i386/shared/sysdep/ptrace.h
··· 53 53 54 54 struct uml_pt_regs { 55 55 unsigned long gp[MAX_REG_NR]; 56 + unsigned long fp[HOST_FPX_SIZE]; 56 57 struct faultinfo faultinfo; 57 58 long syscall; 58 59 int is_user;
+8 -4
arch/um/sys-x86_64/ptrace.c
··· 145 145 return instr == 0x050f; 146 146 } 147 147 148 - int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) 148 + static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) 149 149 { 150 150 int err, n, cpu = ((struct thread_info *) child->stack)->cpu; 151 151 long fpregs[HOST_FP_SIZE]; ··· 162 162 return n; 163 163 } 164 164 165 - int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) 165 + static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) 166 166 { 167 167 int n, cpu = ((struct thread_info *) child->stack)->cpu; 168 168 long fpregs[HOST_FP_SIZE]; ··· 182 182 void __user *datap = (void __user *) data; 183 183 184 184 switch (request) { 185 - case PTRACE_GETFPXREGS: /* Get the child FPU state. */ 185 + case PTRACE_GETFPREGS: /* Get the child FPU state. */ 186 186 ret = get_fpregs(datap, child); 187 187 break; 188 - case PTRACE_SETFPXREGS: /* Set the child FPU state. */ 188 + case PTRACE_SETFPREGS: /* Set the child FPU state. */ 189 189 ret = set_fpregs(datap, child); 190 + break; 191 + case PTRACE_ARCH_PRCTL: 192 + /* XXX Calls ptrace on the host - needs some SMP thinking */ 193 + ret = arch_prctl(child, data, (void __user *) addr); 190 194 break; 191 195 } 192 196
+1
arch/um/sys-x86_64/shared/sysdep/ptrace.h
··· 85 85 86 86 struct uml_pt_regs { 87 87 unsigned long gp[MAX_REG_NR]; 88 + unsigned long fp[HOST_FP_SIZE]; 88 89 struct faultinfo faultinfo; 89 90 long syscall; 90 91 int is_user;
-1
arch/x86/include/asm/alternative-asm.h
··· 16 16 #endif 17 17 18 18 .macro altinstruction_entry orig alt feature orig_len alt_len 19 - .align 8 20 19 .long \orig - . 21 20 .long \alt - . 22 21 .word \feature
-4
arch/x86/include/asm/alternative.h
··· 48 48 u16 cpuid; /* cpuid bit set for replacement */ 49 49 u8 instrlen; /* length of original instruction */ 50 50 u8 replacementlen; /* length of new instruction, <= instrlen */ 51 - #ifdef CONFIG_X86_64 52 - u32 pad2; 53 - #endif 54 51 }; 55 52 56 53 extern void alternative_instructions(void); ··· 80 83 \ 81 84 "661:\n\t" oldinstr "\n662:\n" \ 82 85 ".section .altinstructions,\"a\"\n" \ 83 - _ASM_ALIGN "\n" \ 84 86 " .long 661b - .\n" /* label */ \ 85 87 " .long 663f - .\n" /* new instruction */ \ 86 88 " .word " __stringify(feature) "\n" /* feature bit */ \
-2
arch/x86/include/asm/cpufeature.h
··· 332 332 asm goto("1: jmp %l[t_no]\n" 333 333 "2:\n" 334 334 ".section .altinstructions,\"a\"\n" 335 - _ASM_ALIGN "\n" 336 335 " .long 1b - .\n" 337 336 " .long 0\n" /* no replacement */ 338 337 " .word %P0\n" /* feature bit */ ··· 349 350 asm volatile("1: movb $0,%0\n" 350 351 "2:\n" 351 352 ".section .altinstructions,\"a\"\n" 352 - _ASM_ALIGN "\n" 353 353 " .long 1b - .\n" 354 354 " .long 3f - .\n" 355 355 " .word %P1\n" /* feature bit */
+1 -1
arch/x86/include/asm/pvclock.h
··· 44 44 : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) ); 45 45 #elif defined(__x86_64__) 46 46 __asm__ ( 47 - "mul %[mul_frac] ; shrd $32, %[hi], %[lo]" 47 + "mulq %[mul_frac] ; shrd $32, %[hi], %[lo]" 48 48 : [lo]"=a"(product), 49 49 [hi]"=d"(tmp) 50 50 : "0"(delta),
+3
arch/x86/kernel/cpu/perf_event.c
··· 1900 1900 1901 1901 perf_callchain_store(entry, regs->ip); 1902 1902 1903 + if (!current->mm) 1904 + return; 1905 + 1903 1906 if (perf_callchain_user32(regs, entry)) 1904 1907 return; 1905 1908
+7 -2
arch/x86/pci/acpi.c
··· 365 365 */ 366 366 if (bus) { 367 367 struct pci_bus *child; 368 - list_for_each_entry(child, &bus->children, node) 369 - pcie_bus_configure_settings(child, child->self->pcie_mpss); 368 + list_for_each_entry(child, &bus->children, node) { 369 + struct pci_dev *self = child->self; 370 + if (!self) 371 + continue; 372 + 373 + pcie_bus_configure_settings(child, self->pcie_mpss); 374 + } 370 375 } 371 376 372 377 if (!bus)
+2 -4
arch/x86/xen/mmu.c
··· 1721 1721 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES; 1722 1722 } 1723 1723 #ifdef CONFIG_X86_32 1724 - if ((machine_to_phys_mapping + machine_to_phys_nr) 1725 - < machine_to_phys_mapping) 1726 - machine_to_phys_nr = (unsigned long *)NULL 1727 - - machine_to_phys_mapping; 1724 + WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1)) 1725 + < machine_to_phys_mapping); 1728 1726 #endif 1729 1727 } 1730 1728
+21
arch/x86/xen/setup.c
··· 184 184 PFN_UP(start_pci), PFN_DOWN(last)); 185 185 return identity; 186 186 } 187 + 188 + static unsigned long __init xen_get_max_pages(void) 189 + { 190 + unsigned long max_pages = MAX_DOMAIN_PAGES; 191 + domid_t domid = DOMID_SELF; 192 + int ret; 193 + 194 + ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid); 195 + if (ret > 0) 196 + max_pages = ret; 197 + return min(max_pages, MAX_DOMAIN_PAGES); 198 + } 199 + 187 200 /** 188 201 * machine_specific_memory_setup - Hook for machine specific memory setup. 189 202 **/ ··· 304 291 "XEN START INFO"); 305 292 306 293 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); 294 + 295 + extra_limit = xen_get_max_pages(); 296 + if (max_pfn + extra_pages > extra_limit) { 297 + if (extra_limit > max_pfn) 298 + extra_pages = extra_limit - max_pfn; 299 + else 300 + extra_pages = 0; 301 + } 307 302 308 303 extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820); 309 304
+10 -1
arch/x86/xen/smp.c
··· 32 32 #include <xen/page.h> 33 33 #include <xen/events.h> 34 34 35 + #include <xen/hvc-console.h> 35 36 #include "xen-ops.h" 36 37 #include "mmu.h" 37 38 ··· 208 207 unsigned cpu; 209 208 unsigned int i; 210 209 210 + if (skip_ioapic_setup) { 211 + char *m = (max_cpus == 0) ? 212 + "The nosmp parameter is incompatible with Xen; " \ 213 + "use Xen dom0_max_vcpus=1 parameter" : 214 + "The noapic parameter is incompatible with Xen"; 215 + 216 + xen_raw_printk(m); 217 + panic(m); 218 + } 211 219 xen_init_lock_cpu(0); 212 220 213 221 smp_store_cpu_info(0); ··· 532 522 WARN_ON(xen_smp_intr_init(0)); 533 523 534 524 xen_init_lock_cpu(0); 535 - xen_init_spinlocks(); 536 525 } 537 526 538 527 static int __cpuinit xen_hvm_cpu_up(unsigned int cpu)
+3 -2
arch/x86/xen/time.c
··· 168 168 struct pvclock_vcpu_time_info *src; 169 169 cycle_t ret; 170 170 171 - src = &get_cpu_var(xen_vcpu)->time; 171 + preempt_disable_notrace(); 172 + src = &__get_cpu_var(xen_vcpu)->time; 172 173 ret = pvclock_clocksource_read(src); 173 - put_cpu_var(xen_vcpu); 174 + preempt_enable_notrace(); 174 175 return ret; 175 176 } 176 177
+5 -3
arch/x86/xen/xen-asm_32.S
··· 113 113 114 114 /* 115 115 * If there's something pending, mask events again so we can 116 - * jump back into xen_hypervisor_callback 116 + * jump back into xen_hypervisor_callback. Otherwise do not 117 + * touch XEN_vcpu_info_mask. 117 118 */ 118 - sete XEN_vcpu_info_mask(%eax) 119 + jne 1f 120 + movb $1, XEN_vcpu_info_mask(%eax) 119 121 120 - popl %eax 122 + 1: popl %eax 121 123 122 124 /* 123 125 * From this point on the registers are restored and the stack
+1 -1
drivers/acpi/acpica/acconfig.h
··· 121 121 122 122 /* Maximum sleep allowed via Sleep() operator */ 123 123 124 - #define ACPI_MAX_SLEEP 20000 /* Two seconds */ 124 + #define ACPI_MAX_SLEEP 2000 /* Two seconds */ 125 125 126 126 /****************************************************************************** 127 127 *
+1
drivers/acpi/apei/Kconfig
··· 13 13 bool "APEI Generic Hardware Error Source" 14 14 depends on ACPI_APEI && X86 15 15 select ACPI_HED 16 + select IRQ_WORK 16 17 select LLIST 17 18 select GENERIC_ALLOCATOR 18 19 help
+1 -1
drivers/acpi/apei/apei-base.c
··· 618 618 }; 619 619 620 620 capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; 621 - capbuf[OSC_SUPPORT_TYPE] = 0; 621 + capbuf[OSC_SUPPORT_TYPE] = 1; 622 622 capbuf[OSC_CONTROL_TYPE] = 0; 623 623 624 624 if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))
+1 -4
drivers/base/regmap/regmap.c
··· 168 168 map->work_buf = kmalloc(map->format.buf_size, GFP_KERNEL); 169 169 if (map->work_buf == NULL) { 170 170 ret = -ENOMEM; 171 - goto err_bus; 171 + goto err_map; 172 172 } 173 173 174 174 return map; 175 175 176 - err_bus: 177 - module_put(map->bus->owner); 178 176 err_map: 179 177 kfree(map); 180 178 err: ··· 186 188 void regmap_exit(struct regmap *map) 187 189 { 188 190 kfree(map->work_buf); 189 - module_put(map->bus->owner); 190 191 kfree(map); 191 192 } 192 193 EXPORT_SYMBOL_GPL(regmap_exit);
+3
drivers/cpufreq/pcc-cpufreq.c
··· 261 261 pr = per_cpu(processors, cpu); 262 262 pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); 263 263 264 + if (!pr) 265 + return -ENODEV; 266 + 264 267 status = acpi_evaluate_object(pr->handle, "PCCP", NULL, &buffer); 265 268 if (ACPI_FAILURE(status)) 266 269 return -ENODEV;
+29 -13
drivers/dma/ste_dma40.c
··· 174 174 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a 175 175 * transfer and call client callback. 176 176 * @client: Cliented owned descriptor list. 177 + * @pending_queue: Submitted jobs, to be issued by issue_pending() 177 178 * @active: Active descriptor. 178 179 * @queue: Queued jobs. 180 + * @prepare_queue: Prepared jobs. 179 181 * @dma_cfg: The client configuration of this dma channel. 180 182 * @configured: whether the dma_cfg configuration is valid 181 183 * @base: Pointer to the device instance struct. ··· 205 203 struct list_head pending_queue; 206 204 struct list_head active; 207 205 struct list_head queue; 206 + struct list_head prepare_queue; 208 207 struct stedma40_chan_cfg dma_cfg; 209 208 bool configured; 210 209 struct d40_base *base; ··· 480 477 481 478 list_for_each_entry_safe(d, _d, &d40c->client, node) 482 479 if (async_tx_test_ack(&d->txd)) { 483 - d40_pool_lli_free(d40c, d); 484 480 d40_desc_remove(d); 485 481 desc = d; 486 482 memset(desc, 0, sizeof(*desc)); ··· 646 644 return d; 647 645 } 648 646 647 + /* remove desc from current queue and add it to the pending_queue */ 649 648 static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) 650 649 { 650 + d40_desc_remove(desc); 651 + desc->is_in_client_list = false; 651 652 list_add_tail(&desc->node, &d40c->pending_queue); 652 653 } 653 654 ··· 808 803 static void d40_term_all(struct d40_chan *d40c) 809 804 { 810 805 struct d40_desc *d40d; 806 + struct d40_desc *_d; 811 807 812 808 /* Release active descriptors */ 813 809 while ((d40d = d40_first_active_get(d40c))) { ··· 827 821 d40_desc_remove(d40d); 828 822 d40_desc_free(d40c, d40d); 829 823 } 824 + 825 + /* Release client owned descriptors */ 826 + if (!list_empty(&d40c->client)) 827 + list_for_each_entry_safe(d40d, _d, &d40c->client, node) { 828 + d40_desc_remove(d40d); 829 + d40_desc_free(d40c, d40d); 830 + } 831 + 832 + /* Release descriptors in prepare queue */ 833 + if (!list_empty(&d40c->prepare_queue)) 834 + list_for_each_entry_safe(d40d, _d, 835 + &d40c->prepare_queue, node) { 836 + d40_desc_remove(d40d); 837 + d40_desc_free(d40c, d40d); 838 + } 830 839 831 840 d40c->pending_tx = 0; 832 841 d40c->busy = false; ··· 1229 1208 1230 1209 if (!d40d->cyclic) { 1231 1210 if (async_tx_test_ack(&d40d->txd)) { 1232 - d40_pool_lli_free(d40c, d40d); 1233 1211 d40_desc_remove(d40d); 1234 1212 d40_desc_free(d40c, d40d); 1235 1213 } else { ··· 1615 1595 u32 event; 1616 1596 struct d40_phy_res *phy = d40c->phy_chan; 1617 1597 bool is_src; 1618 - struct d40_desc *d; 1619 - struct d40_desc *_d; 1620 - 1621 1598 1622 1599 /* Terminate all queued and active transfers */ 1623 1600 d40_term_all(d40c); 1624 - 1625 - /* Release client owned descriptors */ 1626 - if (!list_empty(&d40c->client)) 1627 - list_for_each_entry_safe(d, _d, &d40c->client, node) { 1628 - d40_pool_lli_free(d40c, d); 1629 - d40_desc_remove(d); 1630 - d40_desc_free(d40c, d); 1631 - } 1632 1601 1633 1602 if (phy == NULL) { 1634 1603 chan_err(d40c, "phy == null\n"); ··· 1919 1910 chan_is_logical(chan) ? "log" : "phy", ret); 1920 1911 goto err; 1921 1912 } 1913 + 1914 + /* 1915 + * add descriptor to the prepare queue in order to be able 1916 + * to free them later in terminate_all 1917 + */ 1918 + list_add_tail(&desc->node, &chan->prepare_queue); 1922 1919 1923 1920 spin_unlock_irqrestore(&chan->lock, flags); 1924 1921 ··· 2415 2400 INIT_LIST_HEAD(&d40c->queue); 2416 2401 INIT_LIST_HEAD(&d40c->pending_queue); 2417 2402 INIT_LIST_HEAD(&d40c->client); 2403 + INIT_LIST_HEAD(&d40c->prepare_queue); 2418 2404 2419 2405 tasklet_init(&d40c->tasklet, dma_tasklet, 2420 2406 (unsigned long) d40c);
+3
drivers/firewire/ohci.c
··· 290 290 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID, 291 291 QUIRK_CYCLE_TIMER}, 292 292 293 + {PCI_VENDOR_ID_O2, PCI_ANY_ID, PCI_ANY_ID, 294 + QUIRK_NO_MSI}, 295 + 293 296 {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID, 294 297 QUIRK_CYCLE_TIMER}, 295 298
+5 -10
drivers/gpio/gpio-generic.c
··· 351 351 return 0; 352 352 } 353 353 354 - int __devexit bgpio_remove(struct bgpio_chip *bgc) 354 + int bgpio_remove(struct bgpio_chip *bgc) 355 355 { 356 356 int err = gpiochip_remove(&bgc->gc); 357 357 ··· 361 361 } 362 362 EXPORT_SYMBOL_GPL(bgpio_remove); 363 363 364 - int __devinit bgpio_init(struct bgpio_chip *bgc, 365 - struct device *dev, 366 - unsigned long sz, 367 - void __iomem *dat, 368 - void __iomem *set, 369 - void __iomem *clr, 370 - void __iomem *dirout, 371 - void __iomem *dirin, 372 - bool big_endian) 364 + int bgpio_init(struct bgpio_chip *bgc, struct device *dev, 365 + unsigned long sz, void __iomem *dat, void __iomem *set, 366 + void __iomem *clr, void __iomem *dirout, void __iomem *dirin, 367 + bool big_endian) 373 368 { 374 369 int ret; 375 370
-1
drivers/gpu/drm/drm_fb_helper.c
··· 256 256 { 257 257 printk(KERN_ERR "panic occurred, switching back to text console\n"); 258 258 return drm_fb_helper_force_kernel_mode(); 259 - return 0; 260 259 } 261 260 EXPORT_SYMBOL(drm_fb_helper_panic); 262 261
+2 -1
drivers/gpu/drm/nouveau/nouveau_fence.c
··· 530 530 nouveau_gpuobj_ref(NULL, &obj); 531 531 if (ret) 532 532 return ret; 533 - } else { 533 + } else 534 + if (USE_SEMA(dev)) { 534 535 /* map fence bo into channel's vm */ 535 536 ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm, 536 537 &chan->fence.vma);
+5 -2
drivers/gpu/drm/nouveau/nouveau_sgdma.c
··· 37 37 return -ENOMEM; 38 38 39 39 nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL); 40 - if (!nvbe->ttm_alloced) 40 + if (!nvbe->ttm_alloced) { 41 + kfree(nvbe->pages); 42 + nvbe->pages = NULL; 41 43 return -ENOMEM; 44 + } 42 45 43 46 nvbe->nr_pages = 0; 44 47 while (num_pages--) { ··· 129 126 130 127 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) { 131 128 nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3); 132 - dma_offset += NV_CTXDMA_PAGE_SIZE; 129 + offset_l += NV_CTXDMA_PAGE_SIZE; 133 130 } 134 131 } 135 132
+13 -2
drivers/gpu/drm/nouveau/nv04_crtc.c
··· 781 781 struct drm_device *dev = crtc->dev; 782 782 struct drm_nouveau_private *dev_priv = dev->dev_private; 783 783 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; 784 - struct drm_framebuffer *drm_fb = nv_crtc->base.fb; 785 - struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); 784 + struct drm_framebuffer *drm_fb; 785 + struct nouveau_framebuffer *fb; 786 786 int arb_burst, arb_lwm; 787 787 int ret; 788 + 789 + NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); 790 + 791 + /* no fb bound */ 792 + if (!atomic && !crtc->fb) { 793 + NV_DEBUG_KMS(dev, "No FB bound\n"); 794 + return 0; 795 + } 796 + 788 797 789 798 /* If atomic, we want to switch to the fb we were passed, so 790 799 * now we update pointers to do that. (We don't pin; just ··· 803 794 drm_fb = passed_fb; 804 795 fb = nouveau_framebuffer(passed_fb); 805 796 } else { 797 + drm_fb = crtc->fb; 798 + fb = nouveau_framebuffer(crtc->fb); 806 799 /* If not atomic, we can go ahead and pin, and unpin the 807 800 * old fb we were passed. 808 801 */
+10 -2
drivers/gpu/drm/nouveau/nv50_crtc.c
··· 519 519 struct drm_device *dev = nv_crtc->base.dev; 520 520 struct drm_nouveau_private *dev_priv = dev->dev_private; 521 521 struct nouveau_channel *evo = nv50_display(dev)->master; 522 - struct drm_framebuffer *drm_fb = nv_crtc->base.fb; 523 - struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); 522 + struct drm_framebuffer *drm_fb; 523 + struct nouveau_framebuffer *fb; 524 524 int ret; 525 525 526 526 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); 527 + 528 + /* no fb bound */ 529 + if (!atomic && !crtc->fb) { 530 + NV_DEBUG_KMS(dev, "No FB bound\n"); 531 + return 0; 532 + } 527 533 528 534 /* If atomic, we want to switch to the fb we were passed, so 529 535 * now we update pointers to do that. (We don't pin; just ··· 539 533 drm_fb = passed_fb; 540 534 fb = nouveau_framebuffer(passed_fb); 541 535 } else { 536 + drm_fb = crtc->fb; 537 + fb = nouveau_framebuffer(crtc->fb); 542 538 /* If not atomic, we can go ahead and pin, and unpin the 543 539 * old fb we were passed. 544 540 */
+35 -6
drivers/gpu/drm/radeon/evergreen.c
··· 41 41 void evergreen_fini(struct radeon_device *rdev); 42 42 static void evergreen_pcie_gen2_enable(struct radeon_device *rdev); 43 43 44 + void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev) 45 + { 46 + u16 ctl, v; 47 + int cap, err; 48 + 49 + cap = pci_pcie_cap(rdev->pdev); 50 + if (!cap) 51 + return; 52 + 53 + err = pci_read_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, &ctl); 54 + if (err) 55 + return; 56 + 57 + v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12; 58 + 59 + /* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it 60 + * to avoid hangs or perfomance issues 61 + */ 62 + if ((v == 0) || (v == 6) || (v == 7)) { 63 + ctl &= ~PCI_EXP_DEVCTL_READRQ; 64 + ctl |= (2 << 12); 65 + pci_write_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, ctl); 66 + } 67 + } 68 + 44 69 void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc) 45 70 { 46 71 /* enable the pflip int */ ··· 1404 1379 /* Initialize the ring buffer's read and write pointers */ 1405 1380 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); 1406 1381 WREG32(CP_RB_RPTR_WR, 0); 1407 - WREG32(CP_RB_WPTR, 0); 1382 + rdev->cp.wptr = 0; 1383 + WREG32(CP_RB_WPTR, rdev->cp.wptr); 1408 1384 1409 1385 /* set the wb address wether it's enabled or not */ 1410 1386 WREG32(CP_RB_RPTR_ADDR, ··· 1427 1401 WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); 1428 1402 1429 1403 rdev->cp.rptr = RREG32(CP_RB_RPTR); 1430 - rdev->cp.wptr = RREG32(CP_RB_WPTR); 1431 1404 1432 1405 evergreen_cp_start(rdev); 1433 1406 rdev->cp.ready = true; ··· 1887 1862 } 1888 1863 1889 1864 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 1865 + 1866 + evergreen_fix_pci_max_read_req_size(rdev); 1890 1867 1891 1868 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2; 1892 1869 ··· 3171 3144 } 3172 3145 3173 3146 int evergreen_copy_blit(struct radeon_device *rdev, 3174 - uint64_t src_offset, uint64_t dst_offset, 3175 - unsigned num_pages, struct radeon_fence *fence) 3147 + uint64_t src_offset, 3148 + uint64_t dst_offset, 3149 + unsigned num_gpu_pages, 3150 + struct radeon_fence *fence) 3176 3151 { 3177 3152 int r; 3178 3153 3179 3154 mutex_lock(&rdev->r600_blit.mutex); 3180 3155 rdev->r600_blit.vb_ib = NULL; 3181 - r = evergreen_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); 3156 + r = evergreen_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE); 3182 3157 if (r) { 3183 3158 if (rdev->r600_blit.vb_ib) 3184 3159 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); 3185 3160 mutex_unlock(&rdev->r600_blit.mutex); 3186 3161 return r; 3187 3162 } 3188 - evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); 3163 + evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE); 3189 3164 evergreen_blit_done_copy(rdev, fence); 3190 3165 mutex_unlock(&rdev->r600_blit.mutex); 3191 3166 return 0;
+9 -6
drivers/gpu/drm/radeon/ni.c
··· 39 39 extern void evergreen_mc_program(struct radeon_device *rdev); 40 40 extern void evergreen_irq_suspend(struct radeon_device *rdev); 41 41 extern int evergreen_mc_init(struct radeon_device *rdev); 42 + extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev); 42 43 43 44 #define EVERGREEN_PFP_UCODE_SIZE 1120 44 45 #define EVERGREEN_PM4_UCODE_SIZE 1376 ··· 670 669 671 670 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 672 671 672 + evergreen_fix_pci_max_read_req_size(rdev); 673 + 673 674 mc_shared_chmap = RREG32(MC_SHARED_CHMAP); 674 675 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 675 676 ··· 1187 1184 1188 1185 /* Initialize the ring buffer's read and write pointers */ 1189 1186 WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); 1190 - WREG32(CP_RB0_WPTR, 0); 1187 + rdev->cp.wptr = 0; 1188 + WREG32(CP_RB0_WPTR, rdev->cp.wptr); 1191 1189 1192 1190 /* set the wb address wether it's enabled or not */ 1193 1191 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); ··· 1208 1204 WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8); 1209 1205 1210 1206 rdev->cp.rptr = RREG32(CP_RB0_RPTR); 1211 - rdev->cp.wptr = RREG32(CP_RB0_WPTR); 1212 1207 1213 1208 /* ring1 - compute only */ 1214 1209 /* Set ring buffer size */ ··· 1220 1217 1221 1218 /* Initialize the ring buffer's read and write pointers */ 1222 1219 WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); 1223 - WREG32(CP_RB1_WPTR, 0); 1220 + rdev->cp1.wptr = 0; 1221 + WREG32(CP_RB1_WPTR, rdev->cp1.wptr); 1224 1222 1225 1223 /* set the wb address wether it's enabled or not */ 1226 1224 WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); ··· 1233 1229 WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8); 1234 1230 1235 1231 rdev->cp1.rptr = RREG32(CP_RB1_RPTR); 1236 - rdev->cp1.wptr = RREG32(CP_RB1_WPTR); 1237 1232 1238 1233 /* ring2 - compute only */ 1239 1234 /* Set ring buffer size */ ··· 1245 1242 1246 1243 /* Initialize the ring buffer's read and write pointers */ 1247 1244 WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); 1248 - WREG32(CP_RB2_WPTR, 0); 1245 + rdev->cp2.wptr = 0; 1246 + WREG32(CP_RB2_WPTR, rdev->cp2.wptr); 1249 1247 1250 1248 /* set the wb address wether it's enabled or not */ 1251 1249 WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); ··· 1258 1254 WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8); 1259 1255 1260 1256 rdev->cp2.rptr = RREG32(CP_RB2_RPTR); 1261 - rdev->cp2.wptr = RREG32(CP_RB2_WPTR); 1262 1257 1263 1258 /* start the rings */ 1264 1259 cayman_cp_start(rdev);
+10 -12
drivers/gpu/drm/radeon/r100.c
··· 721 721 int r100_copy_blit(struct radeon_device *rdev, 722 722 uint64_t src_offset, 723 723 uint64_t dst_offset, 724 - unsigned num_pages, 724 + unsigned num_gpu_pages, 725 725 struct radeon_fence *fence) 726 726 { 727 727 uint32_t cur_pages; 728 - uint32_t stride_bytes = PAGE_SIZE; 728 + uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE; 729 729 uint32_t pitch; 730 730 uint32_t stride_pixels; 731 731 unsigned ndw; ··· 737 737 /* radeon pitch is /64 */ 738 738 pitch = stride_bytes / 64; 739 739 stride_pixels = stride_bytes / 4; 740 - num_loops = DIV_ROUND_UP(num_pages, 8191); 740 + num_loops = DIV_ROUND_UP(num_gpu_pages, 8191); 741 741 742 742 /* Ask for enough room for blit + flush + fence */ 743 743 ndw = 64 + (10 * num_loops); ··· 746 746 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); 747 747 return -EINVAL; 748 748 } 749 - while (num_pages > 0) { 750 - cur_pages = num_pages; 749 + while (num_gpu_pages > 0) { 750 + cur_pages = num_gpu_pages; 751 751 if (cur_pages > 8191) { 752 752 cur_pages = 8191; 753 753 } 754 - num_pages -= cur_pages; 754 + num_gpu_pages -= cur_pages; 755 755 756 756 /* pages are in Y direction - height 757 757 page width in X direction - width */ ··· 773 773 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); 774 774 radeon_ring_write(rdev, 0); 775 775 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); 776 - radeon_ring_write(rdev, num_pages); 777 - radeon_ring_write(rdev, num_pages); 776 + radeon_ring_write(rdev, cur_pages); 777 + radeon_ring_write(rdev, cur_pages); 778 778 radeon_ring_write(rdev, cur_pages | (stride_pixels << 16)); 779 779 } 780 780 radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); ··· 990 990 /* Force read & write ptr to 0 */ 991 991 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE); 992 992 WREG32(RADEON_CP_RB_RPTR_WR, 0); 993 - WREG32(RADEON_CP_RB_WPTR, 0); 993 + rdev->cp.wptr = 0; 994 + WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr); 994 995 995 996 /* set the wb address whether it's enabled or not */ 996 997 WREG32(R_00070C_CP_RB_RPTR_ADDR, ··· 1008 1007 WREG32(RADEON_CP_RB_CNTL, tmp); 1009 1008 udelay(10); 1010 1009 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); 1011 - rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR); 1012 - /* protect against crazy HW on resume */ 1013 - rdev->cp.wptr &= rdev->cp.ptr_mask; 1014 1010 /* Set cp mode to bus mastering & enable cp*/ 1015 1011 WREG32(RADEON_CP_CSQ_MODE, 1016 1012 REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
+2 -2
drivers/gpu/drm/radeon/r200.c
··· 84 84 int r200_copy_dma(struct radeon_device *rdev, 85 85 uint64_t src_offset, 86 86 uint64_t dst_offset, 87 - unsigned num_pages, 87 + unsigned num_gpu_pages, 88 88 struct radeon_fence *fence) 89 89 { 90 90 uint32_t size; ··· 93 93 int r = 0; 94 94 95 95 /* radeon pitch is /64 */ 96 - size = num_pages << PAGE_SHIFT; 96 + size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT; 97 97 num_loops = DIV_ROUND_UP(size, 0x1FFFFF); 98 98 r = radeon_ring_lock(rdev, num_loops * 4 + 64); 99 99 if (r) {
+8 -6
drivers/gpu/drm/radeon/r600.c
··· 2209 2209 /* Initialize the ring buffer's read and write pointers */ 2210 2210 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); 2211 2211 WREG32(CP_RB_RPTR_WR, 0); 2212 - WREG32(CP_RB_WPTR, 0); 2212 + rdev->cp.wptr = 0; 2213 + WREG32(CP_RB_WPTR, rdev->cp.wptr); 2213 2214 2214 2215 /* set the wb address whether it's enabled or not */ 2215 2216 WREG32(CP_RB_RPTR_ADDR, ··· 2232 2231 WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); 2233 2232 2234 2233 rdev->cp.rptr = RREG32(CP_RB_RPTR); 2235 - rdev->cp.wptr = RREG32(CP_RB_WPTR); 2236 2234 2237 2235 r600_cp_start(rdev); 2238 2236 rdev->cp.ready = true; ··· 2353 2353 } 2354 2354 2355 2355 int r600_copy_blit(struct radeon_device *rdev, 2356 - uint64_t src_offset, uint64_t dst_offset, 2357 - unsigned num_pages, struct radeon_fence *fence) 2356 + uint64_t src_offset, 2357 + uint64_t dst_offset, 2358 + unsigned num_gpu_pages, 2359 + struct radeon_fence *fence) 2358 2360 { 2359 2361 int r; 2360 2362 2361 2363 mutex_lock(&rdev->r600_blit.mutex); 2362 2364 rdev->r600_blit.vb_ib = NULL; 2363 - r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); 2365 + r = r600_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE); 2364 2366 if (r) { 2365 2367 if (rdev->r600_blit.vb_ib) 2366 2368 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); 2367 2369 mutex_unlock(&rdev->r600_blit.mutex); 2368 2370 return r; 2369 2371 } 2370 - r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); 2372 + r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE); 2371 2373 r600_blit_done_copy(rdev, fence); 2372 2374 mutex_unlock(&rdev->r600_blit.mutex); 2373 2375 return 0;
+4 -3
drivers/gpu/drm/radeon/radeon.h
··· 322 322 323 323 #define RADEON_GPU_PAGE_SIZE 4096 324 324 #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1) 325 + #define RADEON_GPU_PAGE_SHIFT 12 325 326 326 327 struct radeon_gart { 327 328 dma_addr_t table_addr; ··· 915 914 int (*copy_blit)(struct radeon_device *rdev, 916 915 uint64_t src_offset, 917 916 uint64_t dst_offset, 918 - unsigned num_pages, 917 + unsigned num_gpu_pages, 919 918 struct radeon_fence *fence); 920 919 int (*copy_dma)(struct radeon_device *rdev, 921 920 uint64_t src_offset, 922 921 uint64_t dst_offset, 923 - unsigned num_pages, 922 + unsigned num_gpu_pages, 924 923 struct radeon_fence *fence); 925 924 int (*copy)(struct radeon_device *rdev, 926 925 uint64_t src_offset, 927 926 uint64_t dst_offset, 928 - unsigned num_pages, 927 + unsigned num_gpu_pages, 929 928 struct radeon_fence *fence); 930 929 uint32_t (*get_engine_clock)(struct radeon_device *rdev); 931 930 void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
+4 -4
drivers/gpu/drm/radeon/radeon_asic.h
··· 75 75 int r100_copy_blit(struct radeon_device *rdev, 76 76 uint64_t src_offset, 77 77 uint64_t dst_offset, 78 - unsigned num_pages, 78 + unsigned num_gpu_pages, 79 79 struct radeon_fence *fence); 80 80 int r100_set_surface_reg(struct radeon_device *rdev, int reg, 81 81 uint32_t tiling_flags, uint32_t pitch, ··· 143 143 extern int r200_copy_dma(struct radeon_device *rdev, 144 144 uint64_t src_offset, 145 145 uint64_t dst_offset, 146 - unsigned num_pages, 146 + unsigned num_gpu_pages, 147 147 struct radeon_fence *fence); 148 148 void r200_set_safe_registers(struct radeon_device *rdev); 149 149 ··· 311 311 int r600_ring_test(struct radeon_device *rdev); 312 312 int r600_copy_blit(struct radeon_device *rdev, 313 313 uint64_t src_offset, uint64_t dst_offset, 314 - unsigned num_pages, struct radeon_fence *fence); 314 + unsigned num_gpu_pages, struct radeon_fence *fence); 315 315 void r600_hpd_init(struct radeon_device *rdev); 316 316 void r600_hpd_fini(struct radeon_device *rdev); 317 317 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); ··· 403 403 void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 404 404 int evergreen_copy_blit(struct radeon_device *rdev, 405 405 uint64_t src_offset, uint64_t dst_offset, 406 - unsigned num_pages, struct radeon_fence *fence); 406 + unsigned num_gpu_pages, struct radeon_fence *fence); 407 407 void evergreen_hpd_init(struct radeon_device *rdev); 408 408 void evergreen_hpd_fini(struct radeon_device *rdev); 409 409 bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+3
drivers/gpu/drm/radeon/radeon_clocks.c
··· 219 219 } else { 220 220 DRM_INFO("Using generic clock info\n"); 221 221 222 + /* may need to be per card */ 223 + rdev->clock.max_pixel_clock = 35000; 224 + 222 225 if (rdev->flags & RADEON_IS_IGP) { 223 226 p1pll->reference_freq = 1432; 224 227 p2pll->reference_freq = 1432;
+24 -13
drivers/gpu/drm/radeon/radeon_connectors.c
··· 1297 1297 if (!radeon_dig_connector->edp_on) 1298 1298 atombios_set_edp_panel_power(connector, 1299 1299 ATOM_TRANSMITTER_ACTION_POWER_OFF); 1300 - } else { 1301 - /* need to setup ddc on the bridge */ 1302 - if (radeon_connector_encoder_is_dp_bridge(connector)) { 1300 + } else if (radeon_connector_encoder_is_dp_bridge(connector)) { 1301 + /* DP bridges are always DP */ 1302 + radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; 1303 + /* get the DPCD from the bridge */ 1304 + radeon_dp_getdpcd(radeon_connector); 1305 + 1306 + if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) 1307 + ret = connector_status_connected; 1308 + else { 1309 + /* need to setup ddc on the bridge */ 1303 1310 if (encoder) 1304 1311 radeon_atom_ext_encoder_setup_ddc(encoder); 1312 + if (radeon_ddc_probe(radeon_connector, 1313 + radeon_connector->requires_extended_probe)) 1314 + ret = connector_status_connected; 1305 1315 } 1316 + 1317 + if ((ret == connector_status_disconnected) && 1318 + radeon_connector->dac_load_detect) { 1319 + struct drm_encoder *encoder = radeon_best_single_encoder(connector); 1320 + struct drm_encoder_helper_funcs *encoder_funcs; 1321 + if (encoder) { 1322 + encoder_funcs = encoder->helper_private; 1323 + ret = encoder_funcs->detect(encoder, connector); 1324 + } 1325 + } 1326 + } else { 1306 1327 radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); 1307 1328 if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { 1308 1329 ret = connector_status_connected; ··· 1337 1316 if (radeon_ddc_probe(radeon_connector, 1338 1317 radeon_connector->requires_extended_probe)) 1339 1318 ret = connector_status_connected; 1340 - } 1341 - } 1342 - 1343 - if ((ret == connector_status_disconnected) && 1344 - radeon_connector->dac_load_detect) { 1345 - struct drm_encoder *encoder = radeon_best_single_encoder(connector); 1346 - struct drm_encoder_helper_funcs *encoder_funcs; 1347 - if (encoder) { 1348 - encoder_funcs = encoder->helper_private; 1349 - ret = encoder_funcs->detect(encoder, connector); 1350 1319 } 1351 1320 } 1352 1321 }
+13 -8
drivers/gpu/drm/radeon/radeon_display.c
··· 473 473 spin_lock_irqsave(&dev->event_lock, flags); 474 474 radeon_crtc->unpin_work = NULL; 475 475 unlock_free: 476 - drm_gem_object_unreference_unlocked(old_radeon_fb->obj); 477 476 spin_unlock_irqrestore(&dev->event_lock, flags); 477 + drm_gem_object_unreference_unlocked(old_radeon_fb->obj); 478 478 radeon_fence_unref(&work->fence); 479 479 kfree(work); 480 480 ··· 707 707 radeon_router_select_ddc_port(radeon_connector); 708 708 709 709 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 710 - (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { 710 + (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) || 711 + radeon_connector_encoder_is_dp_bridge(&radeon_connector->base)) { 711 712 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; 713 + 712 714 if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT || 713 715 dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus) 714 - radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter); 715 - } 716 - if (!radeon_connector->ddc_bus) 717 - return -1; 718 - if (!radeon_connector->edid) { 719 - radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); 716 + radeon_connector->edid = drm_get_edid(&radeon_connector->base, 717 + &dig->dp_i2c_bus->adapter); 718 + else if (radeon_connector->ddc_bus && !radeon_connector->edid) 719 + radeon_connector->edid = drm_get_edid(&radeon_connector->base, 720 + &radeon_connector->ddc_bus->adapter); 721 + } else { 722 + if (radeon_connector->ddc_bus && !radeon_connector->edid) 723 + radeon_connector->edid = drm_get_edid(&radeon_connector->base, 724 + &radeon_connector->ddc_bus->adapter); 720 725 } 721 726 722 727 if (!radeon_connector->edid) {
+6 -1
drivers/gpu/drm/radeon/radeon_ttm.c
··· 277 277 DRM_ERROR("Trying to move memory with CP turned off.\n"); 278 278 return -EINVAL; 279 279 } 280 - r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence); 280 + 281 + BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); 282 + 283 + r = radeon_copy(rdev, old_start, new_start, 284 + new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ 285 + fence); 281 286 /* FIXME: handle copy error */ 282 287 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, 283 288 evict, no_wait_reserve, no_wait_gpu, new_mem);
+2 -1
drivers/gpu/drm/ttm/ttm_bo.c
··· 394 394 395 395 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { 396 396 if (bo->ttm == NULL) { 397 - ret = ttm_bo_add_ttm(bo, false); 397 + bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); 398 + ret = ttm_bo_add_ttm(bo, zero); 398 399 if (ret) 399 400 goto out_err; 400 401 }
+1
drivers/hid/hid-ids.h
··· 277 277 #define USB_DEVICE_ID_PENPOWER 0x00f4 278 278 279 279 #define USB_VENDOR_ID_GREENASIA 0x0e8f 280 + #define USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD 0x3013 280 281 281 282 #define USB_VENDOR_ID_GRETAGMACBETH 0x0971 282 283 #define USB_DEVICE_ID_GRETAGMACBETH_HUEY 0x2005
+55 -11
drivers/hid/hid-magicmouse.c
··· 81 81 #define NO_TOUCHES -1 82 82 #define SINGLE_TOUCH_UP -2 83 83 84 + /* Touch surface information. Dimension is in hundredths of a mm, min and max 85 + * are in units. */ 86 + #define MOUSE_DIMENSION_X (float)9056 87 + #define MOUSE_MIN_X -1100 88 + #define MOUSE_MAX_X 1258 89 + #define MOUSE_RES_X ((MOUSE_MAX_X - MOUSE_MIN_X) / (MOUSE_DIMENSION_X / 100)) 90 + #define MOUSE_DIMENSION_Y (float)5152 91 + #define MOUSE_MIN_Y -1589 92 + #define MOUSE_MAX_Y 2047 93 + #define MOUSE_RES_Y ((MOUSE_MAX_Y - MOUSE_MIN_Y) / (MOUSE_DIMENSION_Y / 100)) 94 + 95 + #define TRACKPAD_DIMENSION_X (float)13000 96 + #define TRACKPAD_MIN_X -2909 97 + #define TRACKPAD_MAX_X 3167 98 + #define TRACKPAD_RES_X \ 99 + ((TRACKPAD_MAX_X - TRACKPAD_MIN_X) / (TRACKPAD_DIMENSION_X / 100)) 100 + #define TRACKPAD_DIMENSION_Y (float)11000 101 + #define TRACKPAD_MIN_Y -2456 102 + #define TRACKPAD_MAX_Y 2565 103 + #define TRACKPAD_RES_Y \ 104 + ((TRACKPAD_MAX_Y - TRACKPAD_MIN_Y) / (TRACKPAD_DIMENSION_Y / 100)) 105 + 84 106 /** 85 107 * struct magicmouse_sc - Tracks Magic Mouse-specific data. 86 108 * @input: Input device through which we report events. ··· 428 406 * inverse of the reported Y. 429 407 */ 430 408 if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) { 431 - input_set_abs_params(input, ABS_MT_POSITION_X, -1100, 432 - 1358, 4, 0); 433 - input_set_abs_params(input, ABS_MT_POSITION_Y, -1589, 434 - 2047, 4, 0); 409 + input_set_abs_params(input, ABS_MT_POSITION_X, 410 + MOUSE_MIN_X, MOUSE_MAX_X, 4, 0); 411 + input_set_abs_params(input, ABS_MT_POSITION_Y, 412 + MOUSE_MIN_Y, MOUSE_MAX_Y, 4, 0); 413 + 414 + input_abs_set_res(input, ABS_MT_POSITION_X, 415 + MOUSE_RES_X); 416 + input_abs_set_res(input, ABS_MT_POSITION_Y, 417 + MOUSE_RES_Y); 435 418 } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ 436 - input_set_abs_params(input, ABS_X, -2909, 3167, 4, 0); 437 - input_set_abs_params(input, ABS_Y, -2456, 2565, 4, 0); 438 - input_set_abs_params(input, ABS_MT_POSITION_X, -2909, 439 - 3167, 4, 0); 440 - input_set_abs_params(input, ABS_MT_POSITION_Y, -2456, 441 - 2565, 4, 0); 419 + input_set_abs_params(input, ABS_X, TRACKPAD_MIN_X, 420 + TRACKPAD_MAX_X, 4, 0); 421 + input_set_abs_params(input, ABS_Y, TRACKPAD_MIN_Y, 422 + TRACKPAD_MAX_Y, 4, 0); 423 + input_set_abs_params(input, ABS_MT_POSITION_X, 424 + TRACKPAD_MIN_X, TRACKPAD_MAX_X, 4, 0); 425 + input_set_abs_params(input, ABS_MT_POSITION_Y, 426 + TRACKPAD_MIN_Y, TRACKPAD_MAX_Y, 4, 0); 427 + 428 + input_abs_set_res(input, ABS_X, TRACKPAD_RES_X); 429 + input_abs_set_res(input, ABS_Y, TRACKPAD_RES_Y); 430 + input_abs_set_res(input, ABS_MT_POSITION_X, 431 + TRACKPAD_RES_X); 432 + input_abs_set_res(input, ABS_MT_POSITION_Y, 433 + TRACKPAD_RES_Y); 442 434 } 443 435 444 436 input_set_events_per_packet(input, 60); ··· 537 501 } 538 502 report->size = 6; 539 503 504 + /* 505 + * Some devices repond with 'invalid report id' when feature 506 + * report switching it into multitouch mode is sent to it. 507 + * 508 + * This results in -EIO from the _raw low-level transport callback, 509 + * but there seems to be no other way of switching the mode. 510 + * Thus the super-ugly hacky success check below. 511 + */ 540 512 ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature), 541 513 HID_FEATURE_REPORT); 542 - if (ret != sizeof(feature)) { 514 + if (ret != -EIO && ret != sizeof(feature)) { 543 515 hid_err(hdev, "unable to request touch data (%d)\n", ret); 544 516 goto err_stop_hw; 545 517 }
+12 -12
drivers/hid/hid-wacom.c
··· 353 353 if (ret) { 354 354 hid_warn(hdev, "can't create sysfs battery attribute, err: %d\n", 355 355 ret); 356 - /* 357 - * battery attribute is not critical for the tablet, but if it 358 - * failed then there is no need to create ac attribute 359 - */ 360 - goto move_on; 356 + goto err_battery; 361 357 } 362 358 363 359 wdata->ac.properties = wacom_ac_props; ··· 367 371 if (ret) { 368 372 hid_warn(hdev, 369 373 "can't create ac battery attribute, err: %d\n", ret); 370 - /* 371 - * ac attribute is not critical for the tablet, but if it 372 - * failed then we don't want to battery attribute to exist 373 - */ 374 - power_supply_unregister(&wdata->battery); 374 + goto err_ac; 375 375 } 376 - 377 - move_on: 378 376 #endif 379 377 hidinput = list_entry(hdev->inputs.next, struct hid_input, list); 380 378 input = hidinput->input; 379 + 380 + __set_bit(INPUT_PROP_POINTER, input->propbit); 381 381 382 382 /* Basics */ 383 383 input->evbit[0] |= BIT(EV_KEY) | BIT(EV_ABS) | BIT(EV_REL); ··· 408 416 409 417 return 0; 410 418 419 + #ifdef CONFIG_HID_WACOM_POWER_SUPPLY 420 + err_ac: 421 + power_supply_unregister(&wdata->battery); 422 + err_battery: 423 + device_remove_file(&hdev->dev, &dev_attr_speed); 424 + hid_hw_stop(hdev); 425 + #endif 411 426 err_free: 412 427 kfree(wdata); 413 428 return ret; ··· 425 426 #ifdef CONFIG_HID_WACOM_POWER_SUPPLY 426 427 struct wacom_data *wdata = hid_get_drvdata(hdev); 427 428 #endif 429 + device_remove_file(&hdev->dev, &dev_attr_speed); 428 430 hid_hw_stop(hdev); 429 431 430 432 #ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+1
drivers/hid/usbhid/hid-quirks.c
··· 47 47 { USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL }, 48 48 49 49 { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT }, 50 + { USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT }, 50 51 { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, 51 52 { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT }, 52 53 { USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT },
+6 -1
drivers/hwmon/coretemp.c
··· 601 601 err = rdmsr_safe_on_cpu(cpu, tdata->intrpt_reg, &eax, &edx); 602 602 if (!err) { 603 603 tdata->attr_size += MAX_THRESH_ATTRS; 604 - tdata->ttarget = tdata->tjmax - ((eax >> 16) & 0x7f) * 1000; 604 + tdata->tmin = tdata->tjmax - 605 + ((eax & THERM_MASK_THRESHOLD0) >> 606 + THERM_SHIFT_THRESHOLD0) * 1000; 607 + tdata->ttarget = tdata->tjmax - 608 + ((eax & THERM_MASK_THRESHOLD1) >> 609 + THERM_SHIFT_THRESHOLD1) * 1000; 605 610 } 606 611 607 612 pdata->core_data[attr_no] = tdata;
+1 -1
drivers/hwmon/max16065.c
··· 124 124 125 125 static inline int ADC_TO_CURR(int adc, int gain) 126 126 { 127 - return adc * 1400000 / gain * 255; 127 + return adc * 1400000 / (gain * 255); 128 128 } 129 129 130 130 /*
+8 -1
drivers/hwmon/pmbus/pmbus_core.c
··· 978 978 struct pmbus_limit_attr { 979 979 u16 reg; /* Limit register */ 980 980 bool update; /* True if register needs updates */ 981 + bool low; /* True if low limit; for limits with compare 982 + functions only */ 981 983 const char *attr; /* Attribute name */ 982 984 const char *alarm; /* Alarm attribute name */ 983 985 u32 sbit; /* Alarm attribute status bit */ ··· 1031 1029 if (attr->compare) { 1032 1030 pmbus_add_boolean_cmp(data, name, 1033 1031 l->alarm, index, 1034 - cbase, cindex, 1032 + l->low ? cindex : cbase, 1033 + l->low ? cbase : cindex, 1035 1034 attr->sbase + page, l->sbit); 1036 1035 } else { 1037 1036 pmbus_add_boolean_reg(data, name, ··· 1369 1366 static const struct pmbus_limit_attr temp_limit_attrs[] = { 1370 1367 { 1371 1368 .reg = PMBUS_UT_WARN_LIMIT, 1369 + .low = true, 1372 1370 .attr = "min", 1373 1371 .alarm = "min_alarm", 1374 1372 .sbit = PB_TEMP_UT_WARNING, 1375 1373 }, { 1376 1374 .reg = PMBUS_UT_FAULT_LIMIT, 1375 + .low = true, 1377 1376 .attr = "lcrit", 1378 1377 .alarm = "lcrit_alarm", 1379 1378 .sbit = PB_TEMP_UT_FAULT, ··· 1404 1399 static const struct pmbus_limit_attr temp_limit_attrs23[] = { 1405 1400 { 1406 1401 .reg = PMBUS_UT_WARN_LIMIT, 1402 + .low = true, 1407 1403 .attr = "min", 1408 1404 .alarm = "min_alarm", 1409 1405 .sbit = PB_TEMP_UT_WARNING, 1410 1406 }, { 1411 1407 .reg = PMBUS_UT_FAULT_LIMIT, 1408 + .low = true, 1412 1409 .attr = "lcrit", 1413 1410 .alarm = "lcrit_alarm", 1414 1411 .sbit = PB_TEMP_UT_FAULT,
+2 -4
drivers/hwmon/pmbus/ucd9000.c
··· 141 141 block_buffer[ret] = '\0'; 142 142 dev_info(&client->dev, "Device ID %s\n", block_buffer); 143 143 144 - mid = NULL; 145 - for (i = 0; i < ARRAY_SIZE(ucd9000_id); i++) { 146 - mid = &ucd9000_id[i]; 144 + for (mid = ucd9000_id; mid->name[0]; mid++) { 147 145 if (!strncasecmp(mid->name, block_buffer, strlen(mid->name))) 148 146 break; 149 147 } 150 - if (!mid || !strlen(mid->name)) { 148 + if (!mid->name[0]) { 151 149 dev_err(&client->dev, "Unsupported device\n"); 152 150 return -ENODEV; 153 151 }
+2 -4
drivers/hwmon/pmbus/ucd9200.c
··· 68 68 block_buffer[ret] = '\0'; 69 69 dev_info(&client->dev, "Device ID %s\n", block_buffer); 70 70 71 - mid = NULL; 72 - for (i = 0; i < ARRAY_SIZE(ucd9200_id); i++) { 73 - mid = &ucd9200_id[i]; 71 + for (mid = ucd9200_id; mid->name[0]; mid++) { 74 72 if (!strncasecmp(mid->name, block_buffer, strlen(mid->name))) 75 73 break; 76 74 } 77 - if (!mid || !strlen(mid->name)) { 75 + if (!mid->name[0]) { 78 76 dev_err(&client->dev, "Unsupported device\n"); 79 77 return -ENODEV; 80 78 }
+4 -1
drivers/i2c/busses/i2c-pxa-pci.c
··· 109 109 return -EINVAL; 110 110 } 111 111 sds = kzalloc(sizeof(*sds), GFP_KERNEL); 112 - if (!sds) 112 + if (!sds) { 113 + ret = -ENOMEM; 113 114 goto err_mem; 115 + } 114 116 115 117 for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) { 116 118 sds->pdev[i] = add_i2c_device(dev, i); 117 119 if (IS_ERR(sds->pdev[i])) { 120 + ret = PTR_ERR(sds->pdev[i]); 118 121 while (--i >= 0) 119 122 platform_device_unregister(sds->pdev[i]); 120 123 goto err_dev_add;
+44 -14
drivers/i2c/busses/i2c-tegra.c
··· 270 270 271 271 /* Rounds down to not include partial word at the end of buf */ 272 272 words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD; 273 - if (words_to_transfer > tx_fifo_avail) 274 - words_to_transfer = tx_fifo_avail; 275 273 276 - i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer); 274 + /* It's very common to have < 4 bytes, so optimize that case. */ 275 + if (words_to_transfer) { 276 + if (words_to_transfer > tx_fifo_avail) 277 + words_to_transfer = tx_fifo_avail; 277 278 278 - buf += words_to_transfer * BYTES_PER_FIFO_WORD; 279 - buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD; 280 - tx_fifo_avail -= words_to_transfer; 279 + /* 280 + * Update state before writing to FIFO. If this casues us 281 + * to finish writing all bytes (AKA buf_remaining goes to 0) we 282 + * have a potential for an interrupt (PACKET_XFER_COMPLETE is 283 + * not maskable). We need to make sure that the isr sees 284 + * buf_remaining as 0 and doesn't call us back re-entrantly. 285 + */ 286 + buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD; 287 + tx_fifo_avail -= words_to_transfer; 288 + i2c_dev->msg_buf_remaining = buf_remaining; 289 + i2c_dev->msg_buf = buf + 290 + words_to_transfer * BYTES_PER_FIFO_WORD; 291 + barrier(); 292 + 293 + i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer); 294 + 295 + buf += words_to_transfer * BYTES_PER_FIFO_WORD; 296 + } 281 297 282 298 /* 283 299 * If there is a partial word at the end of buf, handle it manually to ··· 303 287 if (tx_fifo_avail > 0 && buf_remaining > 0) { 304 288 BUG_ON(buf_remaining > 3); 305 289 memcpy(&val, buf, buf_remaining); 290 + 291 + /* Again update before writing to FIFO to make sure isr sees. */ 292 + i2c_dev->msg_buf_remaining = 0; 293 + i2c_dev->msg_buf = NULL; 294 + barrier(); 295 + 306 296 i2c_writel(i2c_dev, val, I2C_TX_FIFO); 307 - buf_remaining = 0; 308 - tx_fifo_avail--; 309 297 } 310 298 311 - BUG_ON(tx_fifo_avail > 0 && buf_remaining > 0); 312 - i2c_dev->msg_buf_remaining = buf_remaining; 313 - i2c_dev->msg_buf = buf; 314 299 return 0; 315 300 } 316 301 ··· 428 411 tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ); 429 412 } 430 413 431 - if ((status & I2C_INT_PACKET_XFER_COMPLETE) && 432 - !i2c_dev->msg_buf_remaining) 414 + if (status & I2C_INT_PACKET_XFER_COMPLETE) { 415 + BUG_ON(i2c_dev->msg_buf_remaining); 433 416 complete(&i2c_dev->msg_complete); 417 + } 434 418 435 419 i2c_writel(i2c_dev, status, I2C_INT_STATUS); 436 420 if (i2c_dev->is_dvc) ··· 549 531 550 532 static u32 tegra_i2c_func(struct i2c_adapter *adap) 551 533 { 552 - return I2C_FUNC_I2C; 534 + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 553 535 } 554 536 555 537 static const struct i2c_algorithm tegra_i2c_algo = { ··· 737 719 } 738 720 #endif 739 721 722 + #if defined(CONFIG_OF) 723 + /* Match table for of_platform binding */ 724 + static const struct of_device_id tegra_i2c_of_match[] __devinitconst = { 725 + { .compatible = "nvidia,tegra20-i2c", }, 726 + {}, 727 + }; 728 + MODULE_DEVICE_TABLE(of, tegra_i2c_of_match); 729 + #else 730 + #define tegra_i2c_of_match NULL 731 + #endif 732 + 740 733 static struct platform_driver tegra_i2c_driver = { 741 734 .probe = tegra_i2c_probe, 742 735 .remove = tegra_i2c_remove, ··· 758 729 .driver = { 759 730 .name = "tegra-i2c", 760 731 .owner = THIS_MODULE, 732 + .of_match_table = tegra_i2c_of_match, 761 733 }, 762 734 }; 763 735
-1
drivers/input/keyboard/adp5588-keys.c
··· 668 668 MODULE_LICENSE("GPL"); 669 669 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); 670 670 MODULE_DESCRIPTION("ADP5588/87 Keypad driver"); 671 - MODULE_ALIAS("platform:adp5588-keys");
+1 -1
drivers/input/misc/cm109.c
··· 475 475 le16_to_cpu(dev->ctl_req->wIndex), 476 476 dev->ctl_data, 477 477 USB_PKT_LEN, USB_CTRL_SET_TIMEOUT); 478 - if (error && error != EINTR) 478 + if (error < 0 && error != -EINTR) 479 479 err("%s: usb_control_msg() failed %d", __func__, error); 480 480 } 481 481
+20
drivers/input/mouse/bcm5974.c
··· 67 67 #define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245 68 68 #define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246 69 69 #define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247 70 + /* MacbookAir4,1 (unibody, July 2011) */ 71 + #define USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI 0x0249 72 + #define USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO 0x024a 73 + #define USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS 0x024b 70 74 /* MacbookAir4,2 (unibody, July 2011) */ 71 75 #define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI 0x024c 72 76 #define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO 0x024d ··· 116 112 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI), 117 113 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ISO), 118 114 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_JIS), 115 + /* MacbookAir4,1 */ 116 + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI), 117 + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO), 118 + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS), 119 119 /* MacbookAir4,2 */ 120 120 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI), 121 121 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ISO), ··· 341 333 { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, 342 334 { DIM_X, DIM_X / SN_COORD, -4750, 5280 }, 343 335 { DIM_Y, DIM_Y / SN_COORD, -150, 6730 } 336 + }, 337 + { 338 + USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI, 339 + USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO, 340 + USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS, 341 + HAS_INTEGRATED_BUTTON, 342 + 0x84, sizeof(struct bt_data), 343 + 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 344 + { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, 345 + { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, 346 + { DIM_X, DIM_X / SN_COORD, -4620, 5140 }, 347 + { DIM_Y, DIM_Y / SN_COORD, -150, 6600 } 344 348 }, 345 349 {} 346 350 };
-14
drivers/input/tablet/wacom_sys.c
··· 229 229 get_unaligned_le16(&report[i + 3]); 230 230 i += 4; 231 231 } 232 - } else if (usage == WCM_DIGITIZER) { 233 - /* max pressure isn't reported 234 - features->pressure_max = (unsigned short) 235 - (report[i+4] << 8 | report[i + 3]); 236 - */ 237 - features->pressure_max = 255; 238 - i += 4; 239 232 } 240 233 break; 241 234 ··· 283 290 case HID_USAGE_STYLUS: 284 291 pen = 1; 285 292 i++; 286 - break; 287 - 288 - case HID_USAGE_UNDEFINED: 289 - if (usage == WCM_DESKTOP && finger) /* capacity */ 290 - features->pressure_max = 291 - get_unaligned_le16(&report[i + 3]); 292 - i += 4; 293 293 break; 294 294 } 295 295 break;
+35 -10
drivers/input/tablet/wacom_wac.c
··· 800 800 int i; 801 801 802 802 for (i = 0; i < 2; i++) { 803 - int p = data[9 * i + 2]; 804 - bool touch = p && !wacom->shared->stylus_in_proximity; 803 + int offset = (data[1] & 0x80) ? (8 * i) : (9 * i); 804 + bool touch = data[offset + 3] & 0x80; 805 805 806 - input_mt_slot(input, i); 807 - input_mt_report_slot_state(input, MT_TOOL_FINGER, touch); 808 806 /* 809 807 * Touch events need to be disabled while stylus is 810 808 * in proximity because user's hand is resting on touchpad 811 809 * and sending unwanted events. User expects tablet buttons 812 810 * to continue working though. 813 811 */ 812 + touch = touch && !wacom->shared->stylus_in_proximity; 813 + 814 + input_mt_slot(input, i); 815 + input_mt_report_slot_state(input, MT_TOOL_FINGER, touch); 814 816 if (touch) { 815 - int x = get_unaligned_be16(&data[9 * i + 3]) & 0x7ff; 816 - int y = get_unaligned_be16(&data[9 * i + 5]) & 0x7ff; 817 + int x = get_unaligned_be16(&data[offset + 3]) & 0x7ff; 818 + int y = get_unaligned_be16(&data[offset + 5]) & 0x7ff; 817 819 if (features->quirks & WACOM_QUIRK_BBTOUCH_LOWRES) { 818 820 x <<= 5; 819 821 y <<= 5; 820 822 } 821 - input_report_abs(input, ABS_MT_PRESSURE, p); 822 823 input_report_abs(input, ABS_MT_POSITION_X, x); 823 824 input_report_abs(input, ABS_MT_POSITION_Y, y); 824 825 } ··· 1057 1056 features->x_fuzz, 0); 1058 1057 input_set_abs_params(input_dev, ABS_Y, 0, features->y_max, 1059 1058 features->y_fuzz, 0); 1060 - input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max, 1061 - features->pressure_fuzz, 0); 1062 1059 1063 1060 if (features->device_type == BTN_TOOL_PEN) { 1061 + input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max, 1062 + features->pressure_fuzz, 0); 1063 + 1064 1064 /* penabled devices have fixed resolution for each model */ 1065 1065 input_abs_set_res(input_dev, ABS_X, features->x_resolution); 1066 1066 input_abs_set_res(input_dev, ABS_Y, features->y_resolution); ··· 1100 1098 __set_bit(BTN_TOOL_MOUSE, input_dev->keybit); 1101 1099 __set_bit(BTN_STYLUS, input_dev->keybit); 1102 1100 __set_bit(BTN_STYLUS2, input_dev->keybit); 1101 + 1102 + __set_bit(INPUT_PROP_POINTER, input_dev->propbit); 1103 1103 break; 1104 1104 1105 1105 case WACOM_21UX2: ··· 1130 1126 } 1131 1127 1132 1128 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); 1129 + 1130 + __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); 1131 + 1133 1132 wacom_setup_cintiq(wacom_wac); 1134 1133 break; 1135 1134 ··· 1157 1150 /* fall through */ 1158 1151 1159 1152 case INTUOS: 1153 + __set_bit(INPUT_PROP_POINTER, input_dev->propbit); 1154 + 1160 1155 wacom_setup_intuos(wacom_wac); 1161 1156 break; 1162 1157 ··· 1174 1165 1175 1166 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); 1176 1167 wacom_setup_intuos(wacom_wac); 1168 + 1169 + __set_bit(INPUT_PROP_POINTER, input_dev->propbit); 1177 1170 break; 1178 1171 1179 1172 case TABLETPC2FG: ··· 1194 1183 case TABLETPC: 1195 1184 __clear_bit(ABS_MISC, input_dev->absbit); 1196 1185 1186 + __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); 1187 + 1197 1188 if (features->device_type != BTN_TOOL_PEN) 1198 1189 break; /* no need to process stylus stuff */ 1199 1190 1200 1191 /* fall through */ 1201 1192 1202 1193 case PL: 1203 - case PTU: 1204 1194 case DTU: 1205 1195 __set_bit(BTN_TOOL_PEN, input_dev->keybit); 1196 + __set_bit(BTN_TOOL_RUBBER, input_dev->keybit); 1206 1197 __set_bit(BTN_STYLUS, input_dev->keybit); 1198 + __set_bit(BTN_STYLUS2, input_dev->keybit); 1199 + 1200 + __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); 1201 + break; 1202 + 1203 + case PTU: 1207 1204 __set_bit(BTN_STYLUS2, input_dev->keybit); 1208 1205 /* fall through */ 1209 1206 1210 1207 case PENPARTNER: 1208 + __set_bit(BTN_TOOL_PEN, input_dev->keybit); 1211 1209 __set_bit(BTN_TOOL_RUBBER, input_dev->keybit); 1210 + __set_bit(BTN_STYLUS, input_dev->keybit); 1211 + 1212 + __set_bit(INPUT_PROP_POINTER, input_dev->propbit); 1212 1213 break; 1213 1214 1214 1215 case BAMBOO_PT: 1215 1216 __clear_bit(ABS_MISC, input_dev->absbit); 1217 + 1218 + __set_bit(INPUT_PROP_POINTER, input_dev->propbit); 1216 1219 1217 1220 if (features->device_type == BTN_TOOL_DOUBLETAP) { 1218 1221 __set_bit(BTN_LEFT, input_dev->keybit);
+2
drivers/input/touchscreen/wacom_w8001.c
··· 383 383 dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); 384 384 strlcat(w8001->name, "Wacom Serial", sizeof(w8001->name)); 385 385 386 + __set_bit(INPUT_PROP_DIRECT, dev->propbit); 387 + 386 388 /* penabled? */ 387 389 error = w8001_command(w8001, W8001_CMD_QUERY, true); 388 390 if (!error) {
+10 -8
drivers/iommu/amd_iommu.c
··· 605 605 * Writes the command to the IOMMUs command buffer and informs the 606 606 * hardware about the new command. 607 607 */ 608 - static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) 608 + static int iommu_queue_command_sync(struct amd_iommu *iommu, 609 + struct iommu_cmd *cmd, 610 + bool sync) 609 611 { 610 612 u32 left, tail, head, next_tail; 611 613 unsigned long flags; ··· 641 639 copy_cmd_to_buffer(iommu, cmd, tail); 642 640 643 641 /* We need to sync now to make sure all commands are processed */ 644 - iommu->need_sync = true; 642 + iommu->need_sync = sync; 645 643 646 644 spin_unlock_irqrestore(&iommu->lock, flags); 647 645 648 646 return 0; 647 + } 648 + 649 + static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) 650 + { 651 + return iommu_queue_command_sync(iommu, cmd, true); 649 652 } 650 653 651 654 /* ··· 668 661 669 662 build_completion_wait(&cmd, (u64)&sem); 670 663 671 - ret = iommu_queue_command(iommu, &cmd); 664 + ret = iommu_queue_command_sync(iommu, &cmd, false); 672 665 if (ret) 673 666 return ret; 674 667 ··· 847 840 static void domain_flush_devices(struct protection_domain *domain) 848 841 { 849 842 struct iommu_dev_data *dev_data; 850 - unsigned long flags; 851 - 852 - spin_lock_irqsave(&domain->lock, flags); 853 843 854 844 list_for_each_entry(dev_data, &domain->dev_list, list) 855 845 device_flush_dte(dev_data); 856 - 857 - spin_unlock_irqrestore(&domain->lock, flags); 858 846 } 859 847 860 848 /****************************************************************************
+1 -1
drivers/iommu/dmar.c
··· 1388 1388 return ret; 1389 1389 } 1390 1390 1391 - ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu); 1391 + ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu); 1392 1392 if (ret) 1393 1393 printk(KERN_ERR "IOMMU: can't request irq\n"); 1394 1394 return ret;
+2
drivers/leds/ledtrig-timer.c
··· 41 41 42 42 if (count == size) { 43 43 led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off); 44 + led_cdev->blink_delay_on = state; 44 45 ret = count; 45 46 } 46 47 ··· 70 69 71 70 if (count == size) { 72 71 led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state); 72 + led_cdev->blink_delay_off = state; 73 73 ret = count; 74 74 } 75 75
+1 -1
drivers/md/linear.h
··· 10 10 11 11 struct linear_private_data 12 12 { 13 + struct rcu_head rcu; 13 14 sector_t array_sectors; 14 15 dev_info_t disks[0]; 15 - struct rcu_head rcu; 16 16 }; 17 17 18 18
+23 -5
drivers/md/md.c
··· 848 848 bio->bi_end_io = super_written; 849 849 850 850 atomic_inc(&mddev->pending_writes); 851 - submit_bio(REQ_WRITE | REQ_SYNC | REQ_FLUSH | REQ_FUA, bio); 851 + submit_bio(WRITE_FLUSH_FUA, bio); 852 852 } 853 853 854 854 void md_super_wait(mddev_t *mddev) ··· 1138 1138 ret = 0; 1139 1139 } 1140 1140 rdev->sectors = rdev->sb_start; 1141 + /* Limit to 4TB as metadata cannot record more than that */ 1142 + if (rdev->sectors >= (2ULL << 32)) 1143 + rdev->sectors = (2ULL << 32) - 2; 1141 1144 1142 - if (rdev->sectors < sb->size * 2 && sb->level > 1) 1145 + if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) 1143 1146 /* "this cannot possibly happen" ... */ 1144 1147 ret = -EINVAL; 1145 1148 ··· 1176 1173 mddev->clevel[0] = 0; 1177 1174 mddev->layout = sb->layout; 1178 1175 mddev->raid_disks = sb->raid_disks; 1179 - mddev->dev_sectors = sb->size * 2; 1176 + mddev->dev_sectors = ((sector_t)sb->size) * 2; 1180 1177 mddev->events = ev1; 1181 1178 mddev->bitmap_info.offset = 0; 1182 1179 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; ··· 1418 1415 rdev->sb_start = calc_dev_sboffset(rdev); 1419 1416 if (!num_sectors || num_sectors > rdev->sb_start) 1420 1417 num_sectors = rdev->sb_start; 1418 + /* Limit to 4TB as metadata cannot record more than that. 1419 + * 4TB == 2^32 KB, or 2*2^32 sectors. 1420 + */ 1421 + if (num_sectors >= (2ULL << 32)) 1422 + num_sectors = (2ULL << 32) - 2; 1421 1423 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1422 1424 rdev->sb_page); 1423 1425 md_super_wait(rdev->mddev); ··· 1745 1737 sb->chunksize = cpu_to_le32(mddev->chunk_sectors); 1746 1738 sb->level = cpu_to_le32(mddev->level); 1747 1739 sb->layout = cpu_to_le32(mddev->layout); 1740 + 1741 + if (test_bit(WriteMostly, &rdev->flags)) 1742 + sb->devflags |= WriteMostly1; 1743 + else 1744 + sb->devflags &= ~WriteMostly1; 1748 1745 1749 1746 if (mddev->bitmap && mddev->bitmap_info.file == NULL) { 1750 1747 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); ··· 2574 2561 int err = -EINVAL; 2575 2562 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { 2576 2563 md_error(rdev->mddev, rdev); 2577 - err = 0; 2564 + if (test_bit(Faulty, &rdev->flags)) 2565 + err = 0; 2566 + else 2567 + err = -EBUSY; 2578 2568 } else if (cmd_match(buf, "remove")) { 2579 2569 if (rdev->raid_disk >= 0) 2580 2570 err = -EBUSY; ··· 2600 2584 err = 0; 2601 2585 } else if (cmd_match(buf, "-blocked")) { 2602 2586 if (!test_bit(Faulty, &rdev->flags) && 2603 - test_bit(BlockedBadBlocks, &rdev->flags)) { 2587 + rdev->badblocks.unacked_exist) { 2604 2588 /* metadata handler doesn't understand badblocks, 2605 2589 * so we need to fail the device 2606 2590 */ ··· 5999 5983 return -ENODEV; 6000 5984 6001 5985 md_error(mddev, rdev); 5986 + if (!test_bit(Faulty, &rdev->flags)) 5987 + return -EBUSY; 6002 5988 return 0; 6003 5989 } 6004 5990
+9 -5
drivers/md/raid1.c
··· 1099 1099 bio_list_add(&conf->pending_bio_list, mbio); 1100 1100 spin_unlock_irqrestore(&conf->device_lock, flags); 1101 1101 } 1102 - r1_bio_write_done(r1_bio); 1103 - 1104 - /* In case raid1d snuck in to freeze_array */ 1105 - wake_up(&conf->wait_barrier); 1106 - 1102 + /* Mustn't call r1_bio_write_done before this next test, 1103 + * as it could result in the bio being freed. 1104 + */ 1107 1105 if (sectors_handled < (bio->bi_size >> 9)) { 1106 + r1_bio_write_done(r1_bio); 1108 1107 /* We need another r1_bio. It has already been counted 1109 1108 * in bio->bi_phys_segments 1110 1109 */ ··· 1115 1116 r1_bio->sector = bio->bi_sector + sectors_handled; 1116 1117 goto retry_write; 1117 1118 } 1119 + 1120 + r1_bio_write_done(r1_bio); 1121 + 1122 + /* In case raid1d snuck in to freeze_array */ 1123 + wake_up(&conf->wait_barrier); 1118 1124 1119 1125 if (do_sync || !bitmap || !plugged) 1120 1126 md_wakeup_thread(mddev->thread);
+24 -23
drivers/md/raid10.c
··· 337 337 md_write_end(r10_bio->mddev); 338 338 } 339 339 340 + static void one_write_done(r10bio_t *r10_bio) 341 + { 342 + if (atomic_dec_and_test(&r10_bio->remaining)) { 343 + if (test_bit(R10BIO_WriteError, &r10_bio->state)) 344 + reschedule_retry(r10_bio); 345 + else { 346 + close_write(r10_bio); 347 + if (test_bit(R10BIO_MadeGood, &r10_bio->state)) 348 + reschedule_retry(r10_bio); 349 + else 350 + raid_end_bio_io(r10_bio); 351 + } 352 + } 353 + } 354 + 340 355 static void raid10_end_write_request(struct bio *bio, int error) 341 356 { 342 357 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); ··· 402 387 * Let's see if all mirrored write operations have finished 403 388 * already. 404 389 */ 405 - if (atomic_dec_and_test(&r10_bio->remaining)) { 406 - if (test_bit(R10BIO_WriteError, &r10_bio->state)) 407 - reschedule_retry(r10_bio); 408 - else { 409 - close_write(r10_bio); 410 - if (test_bit(R10BIO_MadeGood, &r10_bio->state)) 411 - reschedule_retry(r10_bio); 412 - else 413 - raid_end_bio_io(r10_bio); 414 - } 415 - } 390 + one_write_done(r10_bio); 416 391 if (dec_rdev) 417 392 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev); 418 393 } ··· 1132 1127 spin_unlock_irqrestore(&conf->device_lock, flags); 1133 1128 } 1134 1129 1135 - if (atomic_dec_and_test(&r10_bio->remaining)) { 1136 - /* This matches the end of raid10_end_write_request() */ 1137 - bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, 1138 - r10_bio->sectors, 1139 - !test_bit(R10BIO_Degraded, &r10_bio->state), 1140 - 0); 1141 - md_write_end(mddev); 1142 - raid_end_bio_io(r10_bio); 1143 - } 1144 - 1145 - /* In case raid10d snuck in to freeze_array */ 1146 - wake_up(&conf->wait_barrier); 1130 + /* Don't remove the bias on 'remaining' (one_write_done) until 1131 + * after checking if we need to go around again. 1132 + */ 1147 1133 1148 1134 if (sectors_handled < (bio->bi_size >> 9)) { 1135 + one_write_done(r10_bio); 1149 1136 /* We need another r10_bio. It has already been counted 1150 1137 * in bio->bi_phys_segments. 1151 1138 */ ··· 1151 1154 r10_bio->state = 0; 1152 1155 goto retry_write; 1153 1156 } 1157 + one_write_done(r10_bio); 1158 + 1159 + /* In case raid10d snuck in to freeze_array */ 1160 + wake_up(&conf->wait_barrier); 1154 1161 1155 1162 if (do_sync || !mddev->bitmap || !plugged) 1156 1163 md_wakeup_thread(mddev->thread);
+1 -1
drivers/md/raid5.c
··· 3336 3336 3337 3337 finish: 3338 3338 /* wait for this device to become unblocked */ 3339 - if (unlikely(s.blocked_rdev)) 3339 + if (conf->mddev->external && unlikely(s.blocked_rdev)) 3340 3340 md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev); 3341 3341 3342 3342 if (s.handle_bad_blocks)
+4 -22
drivers/media/dvb/dvb-usb/vp7045.c
··· 224 224 static int vp7045_usb_probe(struct usb_interface *intf, 225 225 const struct usb_device_id *id) 226 226 { 227 - struct dvb_usb_device *d; 228 - int ret = dvb_usb_device_init(intf, &vp7045_properties, 229 - THIS_MODULE, &d, adapter_nr); 230 - if (ret) 231 - return ret; 232 - 233 - d->priv = kmalloc(20, GFP_KERNEL); 234 - if (!d->priv) { 235 - dvb_usb_device_exit(intf); 236 - return -ENOMEM; 237 - } 238 - 239 - return ret; 240 - } 241 - 242 - static void vp7045_usb_disconnect(struct usb_interface *intf) 243 - { 244 - struct dvb_usb_device *d = usb_get_intfdata(intf); 245 - kfree(d->priv); 246 - dvb_usb_device_exit(intf); 227 + return dvb_usb_device_init(intf, &vp7045_properties, 228 + THIS_MODULE, NULL, adapter_nr); 247 229 } 248 230 249 231 static struct usb_device_id vp7045_usb_table [] = { ··· 240 258 static struct dvb_usb_device_properties vp7045_properties = { 241 259 .usb_ctrl = CYPRESS_FX2, 242 260 .firmware = "dvb-usb-vp7045-01.fw", 243 - .size_of_priv = sizeof(u8 *), 261 + .size_of_priv = 20, 244 262 245 263 .num_adapters = 1, 246 264 .adapter = { ··· 287 305 static struct usb_driver vp7045_usb_driver = { 288 306 .name = "dvb_usb_vp7045", 289 307 .probe = vp7045_usb_probe, 290 - .disconnect = vp7045_usb_disconnect, 308 + .disconnect = dvb_usb_device_exit, 291 309 .id_table = vp7045_usb_table, 292 310 }; 293 311
+8 -37
drivers/media/rc/nuvoton-cir.c
··· 618 618 static void nvt_process_rx_ir_data(struct nvt_dev *nvt) 619 619 { 620 620 DEFINE_IR_RAW_EVENT(rawir); 621 - unsigned int count; 622 621 u32 carrier; 623 622 u8 sample; 624 623 int i; ··· 630 631 if (nvt->carrier_detect_enabled) 631 632 carrier = nvt_rx_carrier_detect(nvt); 632 633 633 - count = nvt->pkts; 634 - nvt_dbg_verbose("Processing buffer of len %d", count); 634 + nvt_dbg_verbose("Processing buffer of len %d", nvt->pkts); 635 635 636 636 init_ir_raw_event(&rawir); 637 637 638 - for (i = 0; i < count; i++) { 639 - nvt->pkts--; 638 + for (i = 0; i < nvt->pkts; i++) { 640 639 sample = nvt->buf[i]; 641 640 642 641 rawir.pulse = ((sample & BUF_PULSE_BIT) != 0); 643 642 rawir.duration = US_TO_NS((sample & BUF_LEN_MASK) 644 643 * SAMPLE_PERIOD); 645 644 646 - if ((sample & BUF_LEN_MASK) == BUF_LEN_MASK) { 647 - if (nvt->rawir.pulse == rawir.pulse) 648 - nvt->rawir.duration += rawir.duration; 649 - else { 650 - nvt->rawir.duration = rawir.duration; 651 - nvt->rawir.pulse = rawir.pulse; 652 - } 653 - continue; 654 - } 645 + nvt_dbg("Storing %s with duration %d", 646 + rawir.pulse ? "pulse" : "space", rawir.duration); 655 647 656 - rawir.duration += nvt->rawir.duration; 657 - 658 - init_ir_raw_event(&nvt->rawir); 659 - nvt->rawir.duration = 0; 660 - nvt->rawir.pulse = rawir.pulse; 661 - 662 - if (sample == BUF_PULSE_BIT) 663 - rawir.pulse = false; 664 - 665 - if (rawir.duration) { 666 - nvt_dbg("Storing %s with duration %d", 667 - rawir.pulse ? "pulse" : "space", 668 - rawir.duration); 669 - 670 - ir_raw_event_store_with_filter(nvt->rdev, &rawir); 671 - } 648 + ir_raw_event_store_with_filter(nvt->rdev, &rawir); 672 649 673 650 /* 674 651 * BUF_PULSE_BIT indicates end of IR data, BUF_REPEAT_BYTE 675 652 * indicates end of IR signal, but new data incoming. In both 676 653 * cases, it means we're ready to call ir_raw_event_handle 677 654 */ 678 - if ((sample == BUF_PULSE_BIT) && nvt->pkts) { 655 + if ((sample == BUF_PULSE_BIT) && (i + 1 < nvt->pkts)) { 679 656 nvt_dbg("Calling ir_raw_event_handle (signal end)\n"); 680 657 ir_raw_event_handle(nvt->rdev); 681 658 } 682 659 } 683 660 661 + nvt->pkts = 0; 662 + 684 663 nvt_dbg("Calling ir_raw_event_handle (buffer empty)\n"); 685 664 ir_raw_event_handle(nvt->rdev); 686 - 687 - if (nvt->pkts) { 688 - nvt_dbg("Odd, pkts should be 0 now... (its %u)", nvt->pkts); 689 - nvt->pkts = 0; 690 - } 691 665 692 666 nvt_dbg_verbose("%s done", __func__); 693 667 } ··· 1020 1048 1021 1049 spin_lock_init(&nvt->nvt_lock); 1022 1050 spin_lock_init(&nvt->tx.lock); 1023 - init_ir_raw_event(&nvt->rawir); 1024 1051 1025 1052 ret = -EBUSY; 1026 1053 /* now claim resources */
-1
drivers/media/rc/nuvoton-cir.h
··· 67 67 struct nvt_dev { 68 68 struct pnp_dev *pdev; 69 69 struct rc_dev *rdev; 70 - struct ir_raw_event rawir; 71 70 72 71 spinlock_t nvt_lock; 73 72
+10 -12
drivers/media/video/gspca/ov519.c
··· 2858 2858 case 0x60: 2859 2859 PDEBUG(D_PROBE, "Sensor is a OV7660"); 2860 2860 sd->sensor = SEN_OV7660; 2861 - sd->invert_led = 0; 2862 2861 break; 2863 2862 default: 2864 2863 PDEBUG(D_PROBE, "Unknown sensor: 0x76%x", low); ··· 3336 3337 case BRIDGE_OV519: 3337 3338 cam->cam_mode = ov519_vga_mode; 3338 3339 cam->nmodes = ARRAY_SIZE(ov519_vga_mode); 3339 - sd->invert_led = !sd->invert_led; 3340 3340 break; 3341 3341 case BRIDGE_OVFX2: 3342 3342 cam->cam_mode = ov519_vga_mode; ··· 5003 5005 /* -- module initialisation -- */ 5004 5006 static const struct usb_device_id device_table[] = { 5005 5007 {USB_DEVICE(0x041e, 0x4003), .driver_info = BRIDGE_W9968CF }, 5006 - {USB_DEVICE(0x041e, 0x4052), .driver_info = BRIDGE_OV519 }, 5007 - {USB_DEVICE(0x041e, 0x405f), 5008 + {USB_DEVICE(0x041e, 0x4052), 5008 5009 .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, 5010 + {USB_DEVICE(0x041e, 0x405f), .driver_info = BRIDGE_OV519 }, 5009 5011 {USB_DEVICE(0x041e, 0x4060), .driver_info = BRIDGE_OV519 }, 5010 5012 {USB_DEVICE(0x041e, 0x4061), .driver_info = BRIDGE_OV519 }, 5011 - {USB_DEVICE(0x041e, 0x4064), 5012 - .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, 5013 + {USB_DEVICE(0x041e, 0x4064), .driver_info = BRIDGE_OV519 }, 5013 5014 {USB_DEVICE(0x041e, 0x4067), .driver_info = BRIDGE_OV519 }, 5014 - {USB_DEVICE(0x041e, 0x4068), 5015 + {USB_DEVICE(0x041e, 0x4068), .driver_info = BRIDGE_OV519 }, 5016 + {USB_DEVICE(0x045e, 0x028c), 5015 5017 .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, 5016 - {USB_DEVICE(0x045e, 0x028c), .driver_info = BRIDGE_OV519 }, 5017 5018 {USB_DEVICE(0x054c, 0x0154), .driver_info = BRIDGE_OV519 }, 5018 - {USB_DEVICE(0x054c, 0x0155), 5019 - .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, 5019 + {USB_DEVICE(0x054c, 0x0155), .driver_info = BRIDGE_OV519 }, 5020 5020 {USB_DEVICE(0x05a9, 0x0511), .driver_info = BRIDGE_OV511 }, 5021 5021 {USB_DEVICE(0x05a9, 0x0518), .driver_info = BRIDGE_OV518 }, 5022 - {USB_DEVICE(0x05a9, 0x0519), .driver_info = BRIDGE_OV519 }, 5023 - {USB_DEVICE(0x05a9, 0x0530), .driver_info = BRIDGE_OV519 }, 5022 + {USB_DEVICE(0x05a9, 0x0519), 5023 + .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, 5024 + {USB_DEVICE(0x05a9, 0x0530), 5025 + .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, 5024 5026 {USB_DEVICE(0x05a9, 0x2800), .driver_info = BRIDGE_OVFX2 }, 5025 5027 {USB_DEVICE(0x05a9, 0x4519), .driver_info = BRIDGE_OV519 }, 5026 5028 {USB_DEVICE(0x05a9, 0x8519), .driver_info = BRIDGE_OV519 },
+5 -1
drivers/media/video/gspca/sonixj.c
··· 2386 2386 reg_w1(gspca_dev, 0x01, 0x22); 2387 2387 msleep(100); 2388 2388 reg01 = SCL_SEL_OD | S_PDN_INV; 2389 - reg17 &= MCK_SIZE_MASK; 2389 + reg17 &= ~MCK_SIZE_MASK; 2390 2390 reg17 |= 0x04; /* clock / 4 */ 2391 2391 break; 2392 2392 } ··· 2532 2532 if (!mode) { /* if 640x480 */ 2533 2533 reg17 &= ~MCK_SIZE_MASK; 2534 2534 reg17 |= 0x04; /* clock / 4 */ 2535 + } else { 2536 + reg01 &= ~SYS_SEL_48M; /* clk 24Mz */ 2537 + reg17 &= ~MCK_SIZE_MASK; 2538 + reg17 |= 0x02; /* clock / 2 */ 2535 2539 } 2536 2540 break; 2537 2541 case SENSOR_OV7630:
+1 -1
drivers/media/video/pwc/pwc-v4l.c
··· 338 338 if (pdev->restore_factory) 339 339 pdev->restore_factory->flags = V4L2_CTRL_FLAG_UPDATE; 340 340 341 - if (!pdev->features & FEATURE_MOTOR_PANTILT) 341 + if (!(pdev->features & FEATURE_MOTOR_PANTILT)) 342 342 return hdl->error; 343 343 344 344 /* Motor pan / tilt / reset */
+2
drivers/media/video/via-camera.c
··· 1332 1332 struct pci_bus *pbus = pci_find_bus(0, 0); 1333 1333 u8 cbyte; 1334 1334 1335 + if (!pbus) 1336 + return false; 1335 1337 pci_bus_read_config_byte(pbus, VIACAM_SERIAL_DEVFN, 1336 1338 VIACAM_SERIAL_CREG, &cbyte); 1337 1339 if ((cbyte & VIACAM_SERIAL_BIT) == 0)
+5
drivers/mfd/max8997.c
··· 135 135 max8997->dev = &i2c->dev; 136 136 max8997->i2c = i2c; 137 137 max8997->type = id->driver_data; 138 + max8997->irq = i2c->irq; 138 139 139 140 if (!pdata) 140 141 goto err; 141 142 143 + max8997->irq_base = pdata->irq_base; 144 + max8997->ono = pdata->ono; 142 145 max8997->wakeup = pdata->wakeup; 143 146 144 147 mutex_init(&max8997->iolock); ··· 154 151 i2c_set_clientdata(max8997->muic, max8997); 155 152 156 153 pm_runtime_set_active(max8997->dev); 154 + 155 + max8997_irq_init(max8997); 157 156 158 157 mfd_add_devices(max8997->dev, -1, max8997_devs, 159 158 ARRAY_SIZE(max8997_devs),
+1 -1
drivers/mfd/omap-usb-host.c
··· 17 17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 18 */ 19 19 #include <linux/kernel.h> 20 + #include <linux/module.h> 20 21 #include <linux/types.h> 21 22 #include <linux/slab.h> 22 23 #include <linux/delay.h> ··· 677 676 | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF 678 677 | OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE); 679 678 680 - reg |= (1 << (i + 1)); 681 679 } else 682 680 continue; 683 681
+2
drivers/mfd/tps65910-irq.c
··· 178 178 switch (tps65910_chip_id(tps65910)) { 179 179 case TPS65910: 180 180 tps65910->irq_num = TPS65910_NUM_IRQ; 181 + break; 181 182 case TPS65911: 182 183 tps65910->irq_num = TPS65911_NUM_IRQ; 184 + break; 183 185 } 184 186 185 187 /* Register with genirq */
+4 -1
drivers/mfd/twl4030-madc.c
··· 510 510 u8 ch_msb, ch_lsb; 511 511 int ret; 512 512 513 - if (!req) 513 + if (!req || !twl4030_madc) 514 514 return -EINVAL; 515 + 515 516 mutex_lock(&twl4030_madc->lock); 516 517 if (req->method < TWL4030_MADC_RT || req->method > TWL4030_MADC_SW2) { 517 518 ret = -EINVAL; ··· 706 705 madc = kzalloc(sizeof(*madc), GFP_KERNEL); 707 706 if (!madc) 708 707 return -ENOMEM; 708 + 709 + madc->dev = &pdev->dev; 709 710 710 711 /* 711 712 * Phoenix provides 2 interrupt lines. The first one is connected to
+2 -2
drivers/mfd/wm8350-gpio.c
··· 37 37 return ret; 38 38 } 39 39 40 - static int gpio_set_debounce(struct wm8350 *wm8350, int gpio, int db) 40 + static int wm8350_gpio_set_debounce(struct wm8350 *wm8350, int gpio, int db) 41 41 { 42 42 if (db == WM8350_GPIO_DEBOUNCE_ON) 43 43 return wm8350_set_bits(wm8350, WM8350_GPIO_DEBOUNCE, ··· 210 210 goto err; 211 211 if (gpio_set_polarity(wm8350, gpio, pol)) 212 212 goto err; 213 - if (gpio_set_debounce(wm8350, gpio, debounce)) 213 + if (wm8350_gpio_set_debounce(wm8350, gpio, debounce)) 214 214 goto err; 215 215 if (gpio_set_dir(wm8350, gpio, dir)) 216 216 goto err;
+5 -7
drivers/misc/pti.c
··· 165 165 static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc, 166 166 const char *thread_name) 167 167 { 168 + /* 169 + * Since we access the comm member in current's task_struct, we only 170 + * need to be as large as what 'comm' in that structure is. 171 + */ 172 + char comm[TASK_COMM_LEN]; 168 173 struct pti_masterchannel mccontrol = {.master = CONTROL_ID, 169 174 .channel = 0}; 170 175 const char *thread_name_p; ··· 177 172 u8 control_frame[CONTROL_FRAME_LEN]; 178 173 179 174 if (!thread_name) { 180 - /* 181 - * Since we access the comm member in current's task_struct, 182 - * we only need to be as large as what 'comm' in that 183 - * structure is. 184 - */ 185 - char comm[TASK_COMM_LEN]; 186 - 187 175 if (!in_interrupt()) 188 176 get_task_comm(comm, current); 189 177 else
+31 -4
drivers/mmc/core/core.c
··· 133 133 if (mrq->done) 134 134 mrq->done(mrq); 135 135 136 - mmc_host_clk_gate(host); 136 + mmc_host_clk_release(host); 137 137 } 138 138 } 139 139 ··· 192 192 mrq->stop->mrq = mrq; 193 193 } 194 194 } 195 - mmc_host_clk_ungate(host); 195 + mmc_host_clk_hold(host); 196 196 led_trigger_event(host->led, LED_FULL); 197 197 host->ops->request(host, mrq); 198 198 } ··· 728 728 */ 729 729 void mmc_set_chip_select(struct mmc_host *host, int mode) 730 730 { 731 + mmc_host_clk_hold(host); 731 732 host->ios.chip_select = mode; 732 733 mmc_set_ios(host); 734 + mmc_host_clk_release(host); 733 735 } 734 736 735 737 /* 736 738 * Sets the host clock to the highest possible frequency that 737 739 * is below "hz". 738 740 */ 739 - void mmc_set_clock(struct mmc_host *host, unsigned int hz) 741 + static void __mmc_set_clock(struct mmc_host *host, unsigned int hz) 740 742 { 741 743 WARN_ON(hz < host->f_min); 742 744 ··· 747 745 748 746 host->ios.clock = hz; 749 747 mmc_set_ios(host); 748 + } 749 + 750 + void mmc_set_clock(struct mmc_host *host, unsigned int hz) 751 + { 752 + mmc_host_clk_hold(host); 753 + __mmc_set_clock(host, hz); 754 + mmc_host_clk_release(host); 750 755 } 751 756 752 757 #ifdef CONFIG_MMC_CLKGATE ··· 788 779 if (host->clk_old) { 789 780 BUG_ON(host->ios.clock); 790 781 /* This call will also set host->clk_gated to false */ 791 - mmc_set_clock(host, host->clk_old); 782 + __mmc_set_clock(host, host->clk_old); 792 783 } 793 784 } 794 785 ··· 816 807 */ 817 808 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) 818 809 { 810 + mmc_host_clk_hold(host); 819 811 host->ios.bus_mode = mode; 820 812 mmc_set_ios(host); 813 + mmc_host_clk_release(host); 821 814 } 822 815 823 816 /* ··· 827 816 */ 828 817 void mmc_set_bus_width(struct mmc_host *host, unsigned int width) 829 818 { 819 + mmc_host_clk_hold(host); 830 820 host->ios.bus_width = width; 831 821 mmc_set_ios(host); 822 + mmc_host_clk_release(host); 832 823 } 833 824 834 825 /** ··· 1028 1015 1029 1016 ocr &= 3 << bit; 1030 1017 1018 + mmc_host_clk_hold(host); 1031 1019 host->ios.vdd = bit; 1032 1020 mmc_set_ios(host); 1021 + mmc_host_clk_release(host); 1033 1022 } else { 1034 1023 pr_warning("%s: host doesn't support card's voltages\n", 1035 1024 mmc_hostname(host)); ··· 1078 1063 */ 1079 1064 void mmc_set_timing(struct mmc_host *host, unsigned int timing) 1080 1065 { 1066 + mmc_host_clk_hold(host); 1081 1067 host->ios.timing = timing; 1082 1068 mmc_set_ios(host); 1069 + mmc_host_clk_release(host); 1083 1070 } 1084 1071 1085 1072 /* ··· 1089 1072 */ 1090 1073 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type) 1091 1074 { 1075 + mmc_host_clk_hold(host); 1092 1076 host->ios.drv_type = drv_type; 1093 1077 mmc_set_ios(host); 1078 + mmc_host_clk_release(host); 1094 1079 } 1095 1080 1096 1081 /* ··· 1109 1090 static void mmc_power_up(struct mmc_host *host) 1110 1091 { 1111 1092 int bit; 1093 + 1094 + mmc_host_clk_hold(host); 1112 1095 1113 1096 /* If ocr is set, we use it */ 1114 1097 if (host->ocr) ··· 1147 1126 * time required to reach a stable voltage. 1148 1127 */ 1149 1128 mmc_delay(10); 1129 + 1130 + mmc_host_clk_release(host); 1150 1131 } 1151 1132 1152 1133 static void mmc_power_off(struct mmc_host *host) 1153 1134 { 1135 + mmc_host_clk_hold(host); 1136 + 1154 1137 host->ios.clock = 0; 1155 1138 host->ios.vdd = 0; 1156 1139 ··· 1172 1147 host->ios.bus_width = MMC_BUS_WIDTH_1; 1173 1148 host->ios.timing = MMC_TIMING_LEGACY; 1174 1149 mmc_set_ios(host); 1150 + 1151 + mmc_host_clk_release(host); 1175 1152 } 1176 1153 1177 1154 /*
+6 -6
drivers/mmc/core/host.c
··· 119 119 } 120 120 121 121 /** 122 - * mmc_host_clk_ungate - ungate hardware MCI clocks 122 + * mmc_host_clk_hold - ungate hardware MCI clocks 123 123 * @host: host to ungate. 124 124 * 125 125 * Makes sure the host ios.clock is restored to a non-zero value 126 126 * past this call. Increase clock reference count and ungate clock 127 127 * if we're the first user. 128 128 */ 129 - void mmc_host_clk_ungate(struct mmc_host *host) 129 + void mmc_host_clk_hold(struct mmc_host *host) 130 130 { 131 131 unsigned long flags; 132 132 ··· 164 164 } 165 165 166 166 /** 167 - * mmc_host_clk_gate - gate off hardware MCI clocks 167 + * mmc_host_clk_release - gate off hardware MCI clocks 168 168 * @host: host to gate. 169 169 * 170 170 * Calls the host driver with ios.clock set to zero as often as possible 171 171 * in order to gate off hardware MCI clocks. Decrease clock reference 172 172 * count and schedule disabling of clock. 173 173 */ 174 - void mmc_host_clk_gate(struct mmc_host *host) 174 + void mmc_host_clk_release(struct mmc_host *host) 175 175 { 176 176 unsigned long flags; 177 177 ··· 179 179 host->clk_requests--; 180 180 if (mmc_host_may_gate_card(host->card) && 181 181 !host->clk_requests) 182 - schedule_work(&host->clk_gate_work); 182 + queue_work(system_nrt_wq, &host->clk_gate_work); 183 183 spin_unlock_irqrestore(&host->clk_lock, flags); 184 184 } 185 185 ··· 231 231 if (cancel_work_sync(&host->clk_gate_work)) 232 232 mmc_host_clk_gate_delayed(host); 233 233 if (host->clk_gated) 234 - mmc_host_clk_ungate(host); 234 + mmc_host_clk_hold(host); 235 235 /* There should be only one user now */ 236 236 WARN_ON(host->clk_requests > 1); 237 237 }
+4 -4
drivers/mmc/core/host.h
··· 16 16 void mmc_unregister_host_class(void); 17 17 18 18 #ifdef CONFIG_MMC_CLKGATE 19 - void mmc_host_clk_ungate(struct mmc_host *host); 20 - void mmc_host_clk_gate(struct mmc_host *host); 19 + void mmc_host_clk_hold(struct mmc_host *host); 20 + void mmc_host_clk_release(struct mmc_host *host); 21 21 unsigned int mmc_host_clk_rate(struct mmc_host *host); 22 22 23 23 #else 24 - static inline void mmc_host_clk_ungate(struct mmc_host *host) 24 + static inline void mmc_host_clk_hold(struct mmc_host *host) 25 25 { 26 26 } 27 27 28 - static inline void mmc_host_clk_gate(struct mmc_host *host) 28 + static inline void mmc_host_clk_release(struct mmc_host *host) 29 29 { 30 30 } 31 31
+53 -28
drivers/mmc/core/sd.c
··· 469 469 return 0; 470 470 } 471 471 472 - static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status) 472 + static void sd_update_bus_speed_mode(struct mmc_card *card) 473 473 { 474 - unsigned int bus_speed = 0, timing = 0; 475 - int err; 476 - 477 474 /* 478 475 * If the host doesn't support any of the UHS-I modes, fallback on 479 476 * default speed. 480 477 */ 481 478 if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | 482 - MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) 483 - return 0; 479 + MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) { 480 + card->sd_bus_speed = 0; 481 + return; 482 + } 484 483 485 484 if ((card->host->caps & MMC_CAP_UHS_SDR104) && 486 485 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) { 487 - bus_speed = UHS_SDR104_BUS_SPEED; 488 - timing = MMC_TIMING_UHS_SDR104; 489 - card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR; 486 + card->sd_bus_speed = UHS_SDR104_BUS_SPEED; 490 487 } else if ((card->host->caps & MMC_CAP_UHS_DDR50) && 491 488 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) { 492 - bus_speed = UHS_DDR50_BUS_SPEED; 493 - timing = MMC_TIMING_UHS_DDR50; 494 - card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR; 489 + card->sd_bus_speed = UHS_DDR50_BUS_SPEED; 495 490 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | 496 491 MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode & 497 492 SD_MODE_UHS_SDR50)) { 498 - bus_speed = UHS_SDR50_BUS_SPEED; 499 - timing = MMC_TIMING_UHS_SDR50; 500 - card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR; 493 + card->sd_bus_speed = UHS_SDR50_BUS_SPEED; 501 494 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | 502 495 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) && 503 496 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) { 504 - bus_speed = UHS_SDR25_BUS_SPEED; 505 - timing = MMC_TIMING_UHS_SDR25; 506 - card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR; 497 + card->sd_bus_speed = UHS_SDR25_BUS_SPEED; 507 498 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | 508 499 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 | 509 500 MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode & 510 501 SD_MODE_UHS_SDR12)) { 511 - bus_speed = UHS_SDR12_BUS_SPEED; 512 - timing = MMC_TIMING_UHS_SDR12; 513 - card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR; 502 + card->sd_bus_speed = UHS_SDR12_BUS_SPEED; 503 + } 504 + } 505 + 506 + static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status) 507 + { 508 + int err; 509 + unsigned int timing = 0; 510 + 511 + switch (card->sd_bus_speed) { 512 + case UHS_SDR104_BUS_SPEED: 513 + timing = MMC_TIMING_UHS_SDR104; 514 + card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR; 515 + break; 516 + case UHS_DDR50_BUS_SPEED: 517 + timing = MMC_TIMING_UHS_DDR50; 518 + card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR; 519 + break; 520 + case UHS_SDR50_BUS_SPEED: 521 + timing = MMC_TIMING_UHS_SDR50; 522 + card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR; 523 + break; 524 + case UHS_SDR25_BUS_SPEED: 525 + timing = MMC_TIMING_UHS_SDR25; 526 + card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR; 527 + break; 528 + case UHS_SDR12_BUS_SPEED: 529 + timing = MMC_TIMING_UHS_SDR12; 530 + card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR; 531 + break; 532 + default: 533 + return 0; 514 534 } 515 535 516 - card->sd_bus_speed = bus_speed; 517 - err = mmc_sd_switch(card, 1, 0, bus_speed, status); 536 + err = mmc_sd_switch(card, 1, 0, card->sd_bus_speed, status); 518 537 if (err) 519 538 return err; 520 539 521 - if ((status[16] & 0xF) != bus_speed) 540 + if ((status[16] & 0xF) != card->sd_bus_speed) 522 541 printk(KERN_WARNING "%s: Problem setting bus speed mode!\n", 523 542 mmc_hostname(card->host)); 524 543 else { ··· 637 618 mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4); 638 619 } 639 620 621 + /* 622 + * Select the bus speed mode depending on host 623 + * and card capability. 624 + */ 625 + sd_update_bus_speed_mode(card); 626 + 640 627 /* Set the driver strength for the card */ 641 628 err = sd_select_driver_type(card, status); 642 629 if (err) 643 630 goto out; 644 631 645 - /* Set bus speed mode of the card */ 646 - err = sd_set_bus_speed_mode(card, status); 632 + /* Set current limit for the card */ 633 + err = sd_set_current_limit(card, status); 647 634 if (err) 648 635 goto out; 649 636 650 - /* Set current limit for the card */ 651 - err = sd_set_current_limit(card, status); 637 + /* Set bus speed mode of the card */ 638 + err = sd_set_bus_speed_mode(card, status); 652 639 if (err) 653 640 goto out; 654 641
+1
drivers/mmc/host/sdhci-esdhc-imx.c
··· 16 16 #include <linux/err.h> 17 17 #include <linux/clk.h> 18 18 #include <linux/gpio.h> 19 + #include <linux/module.h> 19 20 #include <linux/slab.h> 20 21 #include <linux/mmc/host.h> 21 22 #include <linux/mmc/mmc.h>
+2
drivers/mmc/host/sdhci-s3c.c
··· 302 302 ctrl &= ~SDHCI_CTRL_8BITBUS; 303 303 break; 304 304 default: 305 + ctrl &= ~SDHCI_CTRL_4BITBUS; 306 + ctrl &= ~SDHCI_CTRL_8BITBUS; 305 307 break; 306 308 } 307 309
+2 -2
drivers/mmc/host/sh_mobile_sdhi.c
··· 120 120 mmc_data->hclk = clk_get_rate(priv->clk); 121 121 mmc_data->set_pwr = sh_mobile_sdhi_set_pwr; 122 122 mmc_data->get_cd = sh_mobile_sdhi_get_cd; 123 - if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT) 124 - mmc_data->write16_hook = sh_mobile_sdhi_write16_hook; 125 123 mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED; 126 124 if (p) { 127 125 mmc_data->flags = p->tmio_flags; 126 + if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT) 127 + mmc_data->write16_hook = sh_mobile_sdhi_write16_hook; 128 128 mmc_data->ocr_mask = p->tmio_ocr_mask; 129 129 mmc_data->capabilities |= p->tmio_caps; 130 130
+1 -1
drivers/mtd/ubi/debug.h
··· 181 181 182 182 #define ubi_dbg_msg(fmt, ...) do { \ 183 183 if (0) \ 184 - pr_debug(fmt "\n", ##__VA_ARGS__); \ 184 + printk(KERN_DEBUG fmt "\n", ##__VA_ARGS__); \ 185 185 } while (0) 186 186 187 187 #define dbg_msg(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
+6 -5
drivers/net/Kconfig
··· 2535 2535 source "drivers/net/stmmac/Kconfig" 2536 2536 2537 2537 config PCH_GBE 2538 - tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GbE" 2538 + tristate "Intel EG20T PCH/OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE" 2539 2539 depends on PCI 2540 2540 select MII 2541 2541 ---help--- ··· 2548 2548 This driver enables Gigabit Ethernet function. 2549 2549 2550 2550 This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ 2551 - Output Hub), ML7223. 2552 - ML7223 IOH is for MP(Media Phone) use. 2553 - ML7223 is companion chip for Intel Atom E6xx series. 2554 - ML7223 is completely compatible for Intel EG20T PCH. 2551 + Output Hub), ML7223/ML7831. 2552 + ML7223 IOH is for MP(Media Phone) use. ML7831 IOH is for general 2553 + purpose use. 2554 + ML7223/ML7831 is companion chip for Intel Atom E6xx series. 2555 + ML7223/ML7831 is completely compatible for Intel EG20T PCH. 2555 2556 2556 2557 config FTGMAC100 2557 2558 tristate "Faraday FTGMAC100 Gigabit Ethernet support"
+3
drivers/net/arm/am79c961a.c
··· 308 308 struct net_device *dev = (struct net_device *)data; 309 309 struct dev_priv *priv = netdev_priv(dev); 310 310 unsigned int lnkstat, carrier; 311 + unsigned long flags; 311 312 313 + spin_lock_irqsave(&priv->chip_lock, flags); 312 314 lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST; 315 + spin_unlock_irqrestore(&priv->chip_lock, flags); 313 316 carrier = netif_carrier_ok(dev); 314 317 315 318 if (lnkstat && !carrier) {
+91 -31
drivers/net/bnx2x/bnx2x.h
··· 315 315 u32 raw; 316 316 }; 317 317 318 + /* dropless fc FW/HW related params */ 319 + #define BRB_SIZE(bp) (CHIP_IS_E3(bp) ? 1024 : 512) 320 + #define MAX_AGG_QS(bp) (CHIP_IS_E1(bp) ? \ 321 + ETH_MAX_AGGREGATION_QUEUES_E1 :\ 322 + ETH_MAX_AGGREGATION_QUEUES_E1H_E2) 323 + #define FW_DROP_LEVEL(bp) (3 + MAX_SPQ_PENDING + MAX_AGG_QS(bp)) 324 + #define FW_PREFETCH_CNT 16 325 + #define DROPLESS_FC_HEADROOM 100 318 326 319 327 /* MC hsi */ 320 328 #define BCM_PAGE_SHIFT 12 ··· 339 331 /* SGE ring related macros */ 340 332 #define NUM_RX_SGE_PAGES 2 341 333 #define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) 342 - #define MAX_RX_SGE_CNT (RX_SGE_CNT - 2) 334 + #define NEXT_PAGE_SGE_DESC_CNT 2 335 + #define MAX_RX_SGE_CNT (RX_SGE_CNT - NEXT_PAGE_SGE_DESC_CNT) 343 336 /* RX_SGE_CNT is promised to be a power of 2 */ 344 337 #define RX_SGE_MASK (RX_SGE_CNT - 1) 345 338 #define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) 346 339 #define MAX_RX_SGE (NUM_RX_SGE - 1) 347 340 #define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \ 348 - (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1) 341 + (MAX_RX_SGE_CNT - 1)) ? \ 342 + (x) + 1 + NEXT_PAGE_SGE_DESC_CNT : \ 343 + (x) + 1) 349 344 #define RX_SGE(x) ((x) & MAX_RX_SGE) 345 + 346 + /* 347 + * Number of required SGEs is the sum of two: 348 + * 1. Number of possible opened aggregations (next packet for 349 + * these aggregations will probably consume SGE immidiatelly) 350 + * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only 351 + * after placement on BD for new TPA aggregation) 352 + * 353 + * Takes into account NEXT_PAGE_SGE_DESC_CNT "next" elements on each page 354 + */ 355 + #define NUM_SGE_REQ (MAX_AGG_QS(bp) + \ 356 + (BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2) 357 + #define NUM_SGE_PG_REQ ((NUM_SGE_REQ + MAX_RX_SGE_CNT - 1) / \ 358 + MAX_RX_SGE_CNT) 359 + #define SGE_TH_LO(bp) (NUM_SGE_REQ + \ 360 + NUM_SGE_PG_REQ * NEXT_PAGE_SGE_DESC_CNT) 361 + #define SGE_TH_HI(bp) (SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM) 350 362 351 363 /* Manipulate a bit vector defined as an array of u64 */ 352 364 ··· 579 551 580 552 #define NUM_TX_RINGS 16 581 553 #define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types)) 582 - #define MAX_TX_DESC_CNT (TX_DESC_CNT - 1) 554 + #define NEXT_PAGE_TX_DESC_CNT 1 555 + #define MAX_TX_DESC_CNT (TX_DESC_CNT - NEXT_PAGE_TX_DESC_CNT) 583 556 #define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) 584 557 #define MAX_TX_BD (NUM_TX_BD - 1) 585 558 #define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) 586 559 #define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \ 587 - (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) 560 + (MAX_TX_DESC_CNT - 1)) ? \ 561 + (x) + 1 + NEXT_PAGE_TX_DESC_CNT : \ 562 + (x) + 1) 588 563 #define TX_BD(x) ((x) & MAX_TX_BD) 589 564 #define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT) 590 565 591 566 /* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */ 592 567 #define NUM_RX_RINGS 8 593 568 #define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) 594 - #define MAX_RX_DESC_CNT (RX_DESC_CNT - 2) 569 + #define NEXT_PAGE_RX_DESC_CNT 2 570 + #define MAX_RX_DESC_CNT (RX_DESC_CNT - NEXT_PAGE_RX_DESC_CNT) 595 571 #define RX_DESC_MASK (RX_DESC_CNT - 1) 596 572 #define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) 597 573 #define MAX_RX_BD (NUM_RX_BD - 1) 598 574 #define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) 599 - #define MIN_RX_AVAIL 128 575 + 576 + /* dropless fc calculations for BDs 577 + * 578 + * Number of BDs should as number of buffers in BRB: 579 + * Low threshold takes into account NEXT_PAGE_RX_DESC_CNT 580 + * "next" elements on each page 581 + */ 582 + #define NUM_BD_REQ BRB_SIZE(bp) 583 + #define NUM_BD_PG_REQ ((NUM_BD_REQ + MAX_RX_DESC_CNT - 1) / \ 584 + MAX_RX_DESC_CNT) 585 + #define BD_TH_LO(bp) (NUM_BD_REQ + \ 586 + NUM_BD_PG_REQ * NEXT_PAGE_RX_DESC_CNT + \ 587 + FW_DROP_LEVEL(bp)) 588 + #define BD_TH_HI(bp) (BD_TH_LO(bp) + DROPLESS_FC_HEADROOM) 589 + 590 + #define MIN_RX_AVAIL ((bp)->dropless_fc ? BD_TH_HI(bp) + 128 : 128) 600 591 601 592 #define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \ 602 593 ETH_MIN_RX_CQES_WITH_TPA_E1 : \ ··· 626 579 MIN_RX_AVAIL)) 627 580 628 581 #define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ 629 - (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1) 582 + (MAX_RX_DESC_CNT - 1)) ? \ 583 + (x) + 1 + NEXT_PAGE_RX_DESC_CNT : \ 584 + (x) + 1) 630 585 #define RX_BD(x) ((x) & MAX_RX_BD) 631 586 632 587 /* ··· 638 589 #define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd)) 639 590 #define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL) 640 591 #define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) 641 - #define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1) 592 + #define NEXT_PAGE_RCQ_DESC_CNT 1 593 + #define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - NEXT_PAGE_RCQ_DESC_CNT) 642 594 #define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS) 643 595 #define MAX_RCQ_BD (NUM_RCQ_BD - 1) 644 596 #define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2) 645 597 #define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \ 646 - (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) 598 + (MAX_RCQ_DESC_CNT - 1)) ? \ 599 + (x) + 1 + NEXT_PAGE_RCQ_DESC_CNT : \ 600 + (x) + 1) 647 601 #define RCQ_BD(x) ((x) & MAX_RCQ_BD) 602 + 603 + /* dropless fc calculations for RCQs 604 + * 605 + * Number of RCQs should be as number of buffers in BRB: 606 + * Low threshold takes into account NEXT_PAGE_RCQ_DESC_CNT 607 + * "next" elements on each page 608 + */ 609 + #define NUM_RCQ_REQ BRB_SIZE(bp) 610 + #define NUM_RCQ_PG_REQ ((NUM_BD_REQ + MAX_RCQ_DESC_CNT - 1) / \ 611 + MAX_RCQ_DESC_CNT) 612 + #define RCQ_TH_LO(bp) (NUM_RCQ_REQ + \ 613 + NUM_RCQ_PG_REQ * NEXT_PAGE_RCQ_DESC_CNT + \ 614 + FW_DROP_LEVEL(bp)) 615 + #define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM) 648 616 649 617 650 618 /* This is needed for determining of last_max */ ··· 751 685 #define FP_CSB_FUNC_OFF \ 752 686 offsetof(struct cstorm_status_block_c, func) 753 687 754 - #define HC_INDEX_TOE_RX_CQ_CONS 0 /* Formerly Ustorm TOE CQ index */ 755 - /* (HC_INDEX_U_TOE_RX_CQ_CONS) */ 756 - #define HC_INDEX_ETH_RX_CQ_CONS 1 /* Formerly Ustorm ETH CQ index */ 757 - /* (HC_INDEX_U_ETH_RX_CQ_CONS) */ 758 - #define HC_INDEX_ETH_RX_BD_CONS 2 /* Formerly Ustorm ETH BD index */ 759 - /* (HC_INDEX_U_ETH_RX_BD_CONS) */ 688 + #define HC_INDEX_ETH_RX_CQ_CONS 1 760 689 761 - #define HC_INDEX_TOE_TX_CQ_CONS 4 /* Formerly Cstorm TOE CQ index */ 762 - /* (HC_INDEX_C_TOE_TX_CQ_CONS) */ 763 - #define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 /* Formerly Cstorm ETH CQ index */ 764 - /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ 765 - #define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 /* Formerly Cstorm ETH CQ index */ 766 - /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ 767 - #define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 /* Formerly Cstorm ETH CQ index */ 768 - /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ 690 + #define HC_INDEX_OOO_TX_CQ_CONS 4 691 + 692 + #define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 693 + 694 + #define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 695 + 696 + #define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 769 697 770 698 #define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0 771 - 772 699 773 700 #define BNX2X_RX_SB_INDEX \ 774 701 (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]) ··· 1159 1100 #define BP_PORT(bp) (bp->pfid & 1) 1160 1101 #define BP_FUNC(bp) (bp->pfid) 1161 1102 #define BP_ABS_FUNC(bp) (bp->pf_num) 1162 - #define BP_E1HVN(bp) (bp->pfid >> 1) 1163 - #define BP_VN(bp) (BP_E1HVN(bp)) /*remove when approved*/ 1164 - #define BP_L_ID(bp) (BP_E1HVN(bp) << 2) 1165 - #define BP_FW_MB_IDX(bp) (BP_PORT(bp) +\ 1166 - BP_VN(bp) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1)) 1103 + #define BP_VN(bp) ((bp)->pfid >> 1) 1104 + #define BP_MAX_VN_NUM(bp) (CHIP_MODE_IS_4_PORT(bp) ? 2 : 4) 1105 + #define BP_L_ID(bp) (BP_VN(bp) << 2) 1106 + #define BP_FW_MB_IDX_VN(bp, vn) (BP_PORT(bp) +\ 1107 + (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1)) 1108 + #define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp)) 1167 1109 1168 1110 struct net_device *dev; 1169 1111 struct pci_dev *pdev; ··· 1827 1767 1828 1768 #define MAX_DMAE_C_PER_PORT 8 1829 1769 #define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ 1830 - BP_E1HVN(bp)) 1770 + BP_VN(bp)) 1831 1771 #define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ 1832 1772 E1HVN_MAX) 1833 1773 ··· 1853 1793 1854 1794 /* must be used on a CID before placing it on a HW ring */ 1855 1795 #define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ 1856 - (BP_E1HVN(bp) << BNX2X_SWCID_SHIFT) | \ 1796 + (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \ 1857 1797 (x)) 1858 1798 1859 1799 #define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe))
+14 -13
drivers/net/bnx2x/bnx2x_cmn.c
··· 987 987 void bnx2x_init_rx_rings(struct bnx2x *bp) 988 988 { 989 989 int func = BP_FUNC(bp); 990 - int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : 991 - ETH_MAX_AGGREGATION_QUEUES_E1H_E2; 992 990 u16 ring_prod; 993 991 int i, j; 994 992 ··· 999 1001 1000 1002 if (!fp->disable_tpa) { 1001 1003 /* Fill the per-aggregtion pool */ 1002 - for (i = 0; i < max_agg_queues; i++) { 1004 + for (i = 0; i < MAX_AGG_QS(bp); i++) { 1003 1005 struct bnx2x_agg_info *tpa_info = 1004 1006 &fp->tpa_info[i]; 1005 1007 struct sw_rx_bd *first_buf = ··· 1039 1041 bnx2x_free_rx_sge_range(bp, fp, 1040 1042 ring_prod); 1041 1043 bnx2x_free_tpa_pool(bp, fp, 1042 - max_agg_queues); 1044 + MAX_AGG_QS(bp)); 1043 1045 fp->disable_tpa = 1; 1044 1046 ring_prod = 0; 1045 1047 break; ··· 1135 1137 bnx2x_free_rx_bds(fp); 1136 1138 1137 1139 if (!fp->disable_tpa) 1138 - bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? 1139 - ETH_MAX_AGGREGATION_QUEUES_E1 : 1140 - ETH_MAX_AGGREGATION_QUEUES_E1H_E2); 1140 + bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp)); 1141 1141 } 1142 1142 } 1143 1143 ··· 3091 3095 struct bnx2x_fastpath *fp = &bp->fp[index]; 3092 3096 int ring_size = 0; 3093 3097 u8 cos; 3098 + int rx_ring_size = 0; 3094 3099 3095 3100 /* if rx_ring_size specified - use it */ 3096 - int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size : 3097 - MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); 3101 + if (!bp->rx_ring_size) { 3098 3102 3099 - /* allocate at least number of buffers required by FW */ 3100 - rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : 3101 - MIN_RX_SIZE_TPA, 3102 - rx_ring_size); 3103 + rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); 3104 + 3105 + /* allocate at least number of buffers required by FW */ 3106 + rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : 3107 + MIN_RX_SIZE_TPA, rx_ring_size); 3108 + 3109 + bp->rx_ring_size = rx_ring_size; 3110 + } else 3111 + rx_ring_size = bp->rx_ring_size; 3103 3112 3104 3113 /* Common */ 3105 3114 sb = &bnx2x_fp(bp, index, status_blk);
+41 -7
drivers/net/bnx2x/bnx2x_ethtool.c
··· 363 363 } 364 364 365 365 /* advertise the requested speed and duplex if supported */ 366 - cmd->advertising &= bp->port.supported[cfg_idx]; 366 + if (cmd->advertising & ~(bp->port.supported[cfg_idx])) { 367 + DP(NETIF_MSG_LINK, "Advertisement parameters " 368 + "are not supported\n"); 369 + return -EINVAL; 370 + } 367 371 368 372 bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG; 369 - bp->link_params.req_duplex[cfg_idx] = DUPLEX_FULL; 370 - bp->port.advertising[cfg_idx] |= (ADVERTISED_Autoneg | 373 + bp->link_params.req_duplex[cfg_idx] = cmd->duplex; 374 + bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg | 371 375 cmd->advertising); 376 + if (cmd->advertising) { 372 377 378 + bp->link_params.speed_cap_mask[cfg_idx] = 0; 379 + if (cmd->advertising & ADVERTISED_10baseT_Half) { 380 + bp->link_params.speed_cap_mask[cfg_idx] |= 381 + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF; 382 + } 383 + if (cmd->advertising & ADVERTISED_10baseT_Full) 384 + bp->link_params.speed_cap_mask[cfg_idx] |= 385 + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL; 386 + 387 + if (cmd->advertising & ADVERTISED_100baseT_Full) 388 + bp->link_params.speed_cap_mask[cfg_idx] |= 389 + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL; 390 + 391 + if (cmd->advertising & ADVERTISED_100baseT_Half) { 392 + bp->link_params.speed_cap_mask[cfg_idx] |= 393 + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF; 394 + } 395 + if (cmd->advertising & ADVERTISED_1000baseT_Half) { 396 + bp->link_params.speed_cap_mask[cfg_idx] |= 397 + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G; 398 + } 399 + if (cmd->advertising & (ADVERTISED_1000baseT_Full | 400 + ADVERTISED_1000baseKX_Full)) 401 + bp->link_params.speed_cap_mask[cfg_idx] |= 402 + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G; 403 + 404 + if (cmd->advertising & (ADVERTISED_10000baseT_Full | 405 + ADVERTISED_10000baseKX4_Full | 406 + ADVERTISED_10000baseKR_Full)) 407 + bp->link_params.speed_cap_mask[cfg_idx] |= 408 + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G; 409 + } 373 410 } else { /* forced speed */ 374 411 /* advertise the requested speed and duplex if supported */ 375 412 switch (speed) { ··· 1347 1310 if (bp->rx_ring_size) 1348 1311 ering->rx_pending = bp->rx_ring_size; 1349 1312 else 1350 - if (bp->state == BNX2X_STATE_OPEN && bp->num_queues) 1351 - ering->rx_pending = MAX_RX_AVAIL/bp->num_queues; 1352 - else 1353 - ering->rx_pending = MAX_RX_AVAIL; 1313 + ering->rx_pending = MAX_RX_AVAIL; 1354 1314 1355 1315 ering->rx_mini_pending = 0; 1356 1316 ering->rx_jumbo_pending = 0;
+23 -23
drivers/net/bnx2x/bnx2x_link.c
··· 778 778 { 779 779 u32 nig_reg_adress_crd_weight = 0; 780 780 u32 pbf_reg_adress_crd_weight = 0; 781 - /* Calculate and set BW for this COS*/ 782 - const u32 cos_bw_nig = (bw * min_w_val_nig) / total_bw; 783 - const u32 cos_bw_pbf = (bw * min_w_val_pbf) / total_bw; 781 + /* Calculate and set BW for this COS - use 1 instead of 0 for BW */ 782 + const u32 cos_bw_nig = ((bw ? bw : 1) * min_w_val_nig) / total_bw; 783 + const u32 cos_bw_pbf = ((bw ? bw : 1) * min_w_val_pbf) / total_bw; 784 784 785 785 switch (cos_entry) { 786 786 case 0: ··· 852 852 /* Calculate total BW requested */ 853 853 for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) { 854 854 if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) { 855 - 856 - if (0 == ets_params->cos[cos_idx].params.bw_params.bw) { 857 - DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW" 858 - "was set to 0\n"); 859 - return -EINVAL; 855 + *total_bw += 856 + ets_params->cos[cos_idx].params.bw_params.bw; 860 857 } 861 - *total_bw += 862 - ets_params->cos[cos_idx].params.bw_params.bw; 863 - } 864 858 } 865 859 866 - /*Check taotl BW is valid */ 860 + /* Check total BW is valid */ 867 861 if ((100 != *total_bw) || (0 == *total_bw)) { 868 862 if (0 == *total_bw) { 869 863 DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW" ··· 1720 1726 1721 1727 /* Check loopback mode */ 1722 1728 if (lb) 1723 - val |= XMAC_CTRL_REG_CORE_LOCAL_LPBK; 1729 + val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK; 1724 1730 REG_WR(bp, xmac_base + XMAC_REG_CTRL, val); 1725 1731 bnx2x_set_xumac_nig(params, 1726 1732 ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1); ··· 3623 3629 /* Advertised speeds */ 3624 3630 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3625 3631 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16); 3632 + 3633 + /* Advertised and set FEC (Forward Error Correction) */ 3634 + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3635 + MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2, 3636 + (MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY | 3637 + MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ)); 3626 3638 3627 3639 /* Enable CL37 BAM */ 3628 3640 if (REG_RD(bp, params->shmem_base + ··· 5924 5924 (tmp | EMAC_LED_OVERRIDE)); 5925 5925 /* 5926 5926 * return here without enabling traffic 5927 - * LED blink andsetting rate in ON mode. 5927 + * LED blink and setting rate in ON mode. 5928 5928 * In oper mode, enabling LED blink 5929 5929 * and setting rate is needed. 5930 5930 */ ··· 5936 5936 * This is a work-around for HW issue found when link 5937 5937 * is up in CL73 5938 5938 */ 5939 - REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); 5939 + if ((!CHIP_IS_E3(bp)) || 5940 + (CHIP_IS_E3(bp) && 5941 + mode == LED_MODE_ON)) 5942 + REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); 5943 + 5940 5944 if (CHIP_IS_E1x(bp) || 5941 5945 CHIP_IS_E2(bp) || 5942 5946 (mode == LED_MODE_ON)) ··· 10642 10638 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, 10643 10639 .addr = 0xff, 10644 10640 .def_md_devad = 0, 10645 - .flags = (FLAGS_HW_LOCK_REQUIRED | 10646 - FLAGS_TX_ERROR_CHECK), 10641 + .flags = FLAGS_HW_LOCK_REQUIRED, 10647 10642 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10648 10643 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10649 10644 .mdio_ctrl = 0, ··· 10768 10765 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706, 10769 10766 .addr = 0xff, 10770 10767 .def_md_devad = 0, 10771 - .flags = (FLAGS_INIT_XGXS_FIRST | 10772 - FLAGS_TX_ERROR_CHECK), 10768 + .flags = FLAGS_INIT_XGXS_FIRST, 10773 10769 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10774 10770 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10775 10771 .mdio_ctrl = 0, ··· 10799 10797 .addr = 0xff, 10800 10798 .def_md_devad = 0, 10801 10799 .flags = (FLAGS_HW_LOCK_REQUIRED | 10802 - FLAGS_INIT_XGXS_FIRST | 10803 - FLAGS_TX_ERROR_CHECK), 10800 + FLAGS_INIT_XGXS_FIRST), 10804 10801 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10805 10802 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10806 10803 .mdio_ctrl = 0, ··· 10830 10829 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, 10831 10830 .addr = 0xff, 10832 10831 .def_md_devad = 0, 10833 - .flags = (FLAGS_FAN_FAILURE_DET_REQ | 10834 - FLAGS_TX_ERROR_CHECK), 10832 + .flags = FLAGS_FAN_FAILURE_DET_REQ, 10835 10833 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10836 10834 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10837 10835 .mdio_ctrl = 0,
+115 -47
drivers/net/bnx2x/bnx2x_main.c
··· 407 407 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); 408 408 409 409 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); 410 - opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) | 411 - (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); 410 + opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) | 411 + (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); 412 412 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); 413 413 414 414 #ifdef __BIG_ENDIAN ··· 1419 1419 if (!CHIP_IS_E1(bp)) { 1420 1420 /* init leading/trailing edge */ 1421 1421 if (IS_MF(bp)) { 1422 - val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); 1422 + val = (0xee0f | (1 << (BP_VN(bp) + 4))); 1423 1423 if (bp->port.pmf) 1424 1424 /* enable nig and gpio3 attention */ 1425 1425 val |= 0x1100; ··· 1471 1471 1472 1472 /* init leading/trailing edge */ 1473 1473 if (IS_MF(bp)) { 1474 - val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); 1474 + val = (0xee0f | (1 << (BP_VN(bp) + 4))); 1475 1475 if (bp->port.pmf) 1476 1476 /* enable nig and gpio3 attention */ 1477 1477 val |= 0x1100; ··· 2287 2287 int vn; 2288 2288 2289 2289 bp->vn_weight_sum = 0; 2290 - for (vn = VN_0; vn < E1HVN_MAX; vn++) { 2290 + for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2291 2291 u32 vn_cfg = bp->mf_config[vn]; 2292 2292 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 2293 2293 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; ··· 2320 2320 CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2321 2321 } 2322 2322 2323 + /* returns func by VN for current port */ 2324 + static inline int func_by_vn(struct bnx2x *bp, int vn) 2325 + { 2326 + return 2 * vn + BP_PORT(bp); 2327 + } 2328 + 2323 2329 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) 2324 2330 { 2325 2331 struct rate_shaping_vars_per_vn m_rs_vn; 2326 2332 struct fairness_vars_per_vn m_fair_vn; 2327 2333 u32 vn_cfg = bp->mf_config[vn]; 2328 - int func = 2*vn + BP_PORT(bp); 2334 + int func = func_by_vn(bp, vn); 2329 2335 u16 vn_min_rate, vn_max_rate; 2330 2336 int i; 2331 2337 ··· 2428 2422 * 2429 2423 * and there are 2 functions per port 2430 2424 */ 2431 - for (vn = VN_0; vn < E1HVN_MAX; vn++) { 2425 + for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2432 2426 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp); 2433 2427 2434 2428 if (func >= E1H_FUNC_MAX) ··· 2460 2454 2461 2455 /* calculate and set min-max rate for each vn */ 2462 2456 if (bp->port.pmf) 2463 - for (vn = VN_0; vn < E1HVN_MAX; vn++) 2457 + for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) 2464 2458 bnx2x_init_vn_minmax(bp, vn); 2465 2459 2466 2460 /* always enable rate shaping and fairness */ ··· 2479 2473 2480 2474 static inline void bnx2x_link_sync_notify(struct bnx2x *bp) 2481 2475 { 2482 - int port = BP_PORT(bp); 2483 2476 int func; 2484 2477 int vn; 2485 2478 2486 2479 /* Set the attention towards other drivers on the same port */ 2487 - for (vn = VN_0; vn < E1HVN_MAX; vn++) { 2488 - if (vn == BP_E1HVN(bp)) 2480 + for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2481 + if (vn == BP_VN(bp)) 2489 2482 continue; 2490 2483 2491 - func = ((vn << 1) | port); 2484 + func = func_by_vn(bp, vn); 2492 2485 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + 2493 2486 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); 2494 2487 } ··· 2582 2577 bnx2x_dcbx_pmf_update(bp); 2583 2578 2584 2579 /* enable nig attention */ 2585 - val = (0xff0f | (1 << (BP_E1HVN(bp) + 4))); 2580 + val = (0xff0f | (1 << (BP_VN(bp) + 4))); 2586 2581 if (bp->common.int_block == INT_BLOCK_HC) { 2587 2582 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); 2588 2583 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); ··· 2761 2756 u16 tpa_agg_size = 0; 2762 2757 2763 2758 if (!fp->disable_tpa) { 2764 - pause->sge_th_hi = 250; 2765 - pause->sge_th_lo = 150; 2759 + pause->sge_th_lo = SGE_TH_LO(bp); 2760 + pause->sge_th_hi = SGE_TH_HI(bp); 2761 + 2762 + /* validate SGE ring has enough to cross high threshold */ 2763 + WARN_ON(bp->dropless_fc && 2764 + pause->sge_th_hi + FW_PREFETCH_CNT > 2765 + MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES); 2766 + 2766 2767 tpa_agg_size = min_t(u32, 2767 2768 (min_t(u32, 8, MAX_SKB_FRAGS) * 2768 2769 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); ··· 2782 2771 2783 2772 /* pause - not for e1 */ 2784 2773 if (!CHIP_IS_E1(bp)) { 2785 - pause->bd_th_hi = 350; 2786 - pause->bd_th_lo = 250; 2787 - pause->rcq_th_hi = 350; 2788 - pause->rcq_th_lo = 250; 2774 + pause->bd_th_lo = BD_TH_LO(bp); 2775 + pause->bd_th_hi = BD_TH_HI(bp); 2776 + 2777 + pause->rcq_th_lo = RCQ_TH_LO(bp); 2778 + pause->rcq_th_hi = RCQ_TH_HI(bp); 2779 + /* 2780 + * validate that rings have enough entries to cross 2781 + * high thresholds 2782 + */ 2783 + WARN_ON(bp->dropless_fc && 2784 + pause->bd_th_hi + FW_PREFETCH_CNT > 2785 + bp->rx_ring_size); 2786 + WARN_ON(bp->dropless_fc && 2787 + pause->rcq_th_hi + FW_PREFETCH_CNT > 2788 + NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT); 2789 2789 2790 2790 pause->pri_map = 1; 2791 2791 } ··· 2824 2802 * For PF Clients it should be the maximum avaliable number. 2825 2803 * VF driver(s) may want to define it to a smaller value. 2826 2804 */ 2827 - rxq_init->max_tpa_queues = 2828 - (CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : 2829 - ETH_MAX_AGGREGATION_QUEUES_E1H_E2); 2805 + rxq_init->max_tpa_queues = MAX_AGG_QS(bp); 2830 2806 2831 2807 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; 2832 2808 rxq_init->fw_sb_id = fp->fw_sb_id; ··· 4828 4808 hc_sm->time_to_expire = 0xFFFFFFFF; 4829 4809 } 4830 4810 4811 + 4812 + /* allocates state machine ids. */ 4813 + static inline 4814 + void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) 4815 + { 4816 + /* zero out state machine indices */ 4817 + /* rx indices */ 4818 + index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 4819 + 4820 + /* tx indices */ 4821 + index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 4822 + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; 4823 + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; 4824 + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; 4825 + 4826 + /* map indices */ 4827 + /* rx indices */ 4828 + index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= 4829 + SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 4830 + 4831 + /* tx indices */ 4832 + index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= 4833 + SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 4834 + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= 4835 + SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 4836 + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= 4837 + SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 4838 + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= 4839 + SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 4840 + } 4841 + 4831 4842 static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, 4832 4843 u8 vf_valid, int fw_sb_id, int igu_sb_id) 4833 4844 { ··· 4890 4839 hc_sm_p = sb_data_e2.common.state_machine; 4891 4840 sb_data_p = (u32 *)&sb_data_e2; 4892 4841 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); 4842 + bnx2x_map_sb_state_machines(sb_data_e2.index_data); 4893 4843 } else { 4894 4844 memset(&sb_data_e1x, 0, 4895 4845 sizeof(struct hc_status_block_data_e1x)); ··· 4905 4853 hc_sm_p = sb_data_e1x.common.state_machine; 4906 4854 sb_data_p = (u32 *)&sb_data_e1x; 4907 4855 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); 4856 + bnx2x_map_sb_state_machines(sb_data_e1x.index_data); 4908 4857 } 4909 4858 4910 4859 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], ··· 5855 5802 * take the UNDI lock to protect undi_unload flow from accessing 5856 5803 * registers while we're resetting the chip 5857 5804 */ 5858 - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 5805 + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 5859 5806 5860 5807 bnx2x_reset_common(bp); 5861 5808 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); ··· 5867 5814 } 5868 5815 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); 5869 5816 5870 - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 5817 + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 5871 5818 5872 5819 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); 5873 5820 ··· 6724 6671 if (CHIP_MODE_IS_4_PORT(bp)) 6725 6672 dsb_idx = BP_FUNC(bp); 6726 6673 else 6727 - dsb_idx = BP_E1HVN(bp); 6674 + dsb_idx = BP_VN(bp); 6728 6675 6729 6676 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ? 6730 6677 IGU_BC_BASE_DSB_PROD + dsb_idx : 6731 6678 IGU_NORM_BASE_DSB_PROD + dsb_idx); 6732 6679 6680 + /* 6681 + * igu prods come in chunks of E1HVN_MAX (4) - 6682 + * does not matters what is the current chip mode 6683 + */ 6733 6684 for (i = 0; i < (num_segs * E1HVN_MAX); 6734 6685 i += E1HVN_MAX) { 6735 6686 addr = IGU_REG_PROD_CONS_MEMORY + ··· 7627 7570 u32 val; 7628 7571 /* The mac address is written to entries 1-4 to 7629 7572 preserve entry 0 which is used by the PMF */ 7630 - u8 entry = (BP_E1HVN(bp) + 1)*8; 7573 + u8 entry = (BP_VN(bp) + 1)*8; 7631 7574 7632 7575 val = (mac_addr[0] << 8) | mac_addr[1]; 7633 7576 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); ··· 8603 8546 /* Check if there is any driver already loaded */ 8604 8547 val = REG_RD(bp, MISC_REG_UNPREPARED); 8605 8548 if (val == 0x1) { 8606 - /* Check if it is the UNDI driver 8549 + 8550 + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 8551 + /* 8552 + * Check if it is the UNDI driver 8607 8553 * UNDI driver initializes CID offset for normal bell to 0x7 8608 8554 */ 8609 - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 8610 8555 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 8611 8556 if (val == 0x7) { 8612 8557 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; ··· 8645 8586 8646 8587 bnx2x_fw_command(bp, reset_code, 0); 8647 8588 } 8648 - 8649 - /* now it's safe to release the lock */ 8650 - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 8651 8589 8652 8590 bnx2x_undi_int_disable(bp); 8653 8591 port = BP_PORT(bp); ··· 8695 8639 bp->fw_seq = 8696 8640 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) & 8697 8641 DRV_MSG_SEQ_NUMBER_MASK); 8698 - } else 8699 - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 8642 + } 8643 + 8644 + /* now it's safe to release the lock */ 8645 + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 8700 8646 } 8701 8647 } 8702 8648 ··· 8835 8777 static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) 8836 8778 { 8837 8779 int pfid = BP_FUNC(bp); 8838 - int vn = BP_E1HVN(bp); 8839 8780 int igu_sb_id; 8840 8781 u32 val; 8841 8782 u8 fid, igu_sb_cnt = 0; 8842 8783 8843 8784 bp->igu_base_sb = 0xff; 8844 8785 if (CHIP_INT_MODE_IS_BC(bp)) { 8786 + int vn = BP_VN(bp); 8845 8787 igu_sb_cnt = bp->igu_sb_cnt; 8846 8788 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * 8847 8789 FP_SB_MAX_E1x; ··· 9474 9416 bp->igu_base_sb = 0; 9475 9417 } else { 9476 9418 bp->common.int_block = INT_BLOCK_IGU; 9419 + 9420 + /* do not allow device reset during IGU info preocessing */ 9421 + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 9422 + 9477 9423 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); 9478 9424 9479 9425 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { ··· 9509 9447 9510 9448 bnx2x_get_igu_cam_info(bp); 9511 9449 9450 + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 9512 9451 } 9513 9452 9514 9453 /* ··· 9536 9473 9537 9474 bp->mf_ov = 0; 9538 9475 bp->mf_mode = 0; 9539 - vn = BP_E1HVN(bp); 9476 + vn = BP_VN(bp); 9540 9477 9541 9478 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { 9542 9479 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", ··· 9655 9592 9656 9593 /* port info */ 9657 9594 bnx2x_get_port_hwinfo(bp); 9658 - 9659 - if (!BP_NOMCP(bp)) { 9660 - bp->fw_seq = 9661 - (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & 9662 - DRV_MSG_SEQ_NUMBER_MASK); 9663 - BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); 9664 - } 9665 9595 9666 9596 /* Get MAC addresses */ 9667 9597 bnx2x_get_mac_hwinfo(bp); ··· 9820 9764 /* need to reset chip if undi was active */ 9821 9765 if (!BP_NOMCP(bp)) 9822 9766 bnx2x_undi_unload(bp); 9767 + 9768 + /* init fw_seq after undi_unload! */ 9769 + if (!BP_NOMCP(bp)) { 9770 + bp->fw_seq = 9771 + (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & 9772 + DRV_MSG_SEQ_NUMBER_MASK); 9773 + BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); 9774 + } 9823 9775 9824 9776 if (CHIP_REV_IS_FPGA(bp)) 9825 9777 dev_err(&bp->pdev->dev, "FPGA detected\n"); ··· 10323 10259 /* clean indirect addresses */ 10324 10260 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 10325 10261 PCICFG_VENDOR_ID_OFFSET); 10326 - /* Clean the following indirect addresses for all functions since it 10262 + /* 10263 + * Clean the following indirect addresses for all functions since it 10327 10264 * is not used by the driver. 10328 10265 */ 10329 10266 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0); 10330 10267 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0); 10331 10268 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); 10332 10269 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); 10333 - REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); 10334 - REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); 10335 - REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); 10336 - REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); 10270 + 10271 + if (CHIP_IS_E1x(bp)) { 10272 + REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); 10273 + REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); 10274 + REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); 10275 + REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); 10276 + } 10337 10277 10338 10278 /* 10339 10279 * Enable internal target-read (in case we are probed after PF FLR).
+5 -2
drivers/net/bnx2x/bnx2x_reg.h
··· 5320 5320 #define XCM_REG_XX_OVFL_EVNT_ID 0x20058 5321 5321 #define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0) 5322 5322 #define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1) 5323 - #define XMAC_CTRL_REG_CORE_LOCAL_LPBK (0x1<<3) 5323 + #define XMAC_CTRL_REG_LINE_LOCAL_LPBK (0x1<<2) 5324 5324 #define XMAC_CTRL_REG_RX_EN (0x1<<1) 5325 5325 #define XMAC_CTRL_REG_SOFT_RESET (0x1<<6) 5326 5326 #define XMAC_CTRL_REG_TX_EN (0x1<<0) ··· 5766 5766 #define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8 5767 5767 #define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9 5768 5768 #define HW_LOCK_RESOURCE_SPIO 2 5769 - #define HW_LOCK_RESOURCE_UNDI 5 5769 + #define HW_LOCK_RESOURCE_RESET 5 5770 5770 #define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4) 5771 5771 #define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5) 5772 5772 #define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18) ··· 6853 6853 #define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7 6854 6854 #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10 6855 6855 #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11 6856 + #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2 0x12 6857 + #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY 0x4000 6858 + #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ 0x8000 6856 6859 #define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150 0x96 6857 6860 #define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000 6858 6861 #define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e
+4 -3
drivers/net/bnx2x/bnx2x_stats.c
··· 710 710 break; 711 711 712 712 case MAC_TYPE_NONE: /* unreached */ 713 - BNX2X_ERR("stats updated by DMAE but no MAC active\n"); 713 + DP(BNX2X_MSG_STATS, 714 + "stats updated by DMAE but no MAC active\n"); 714 715 return -1; 715 716 716 717 default: /* unreached */ ··· 1392 1391 1393 1392 static void bnx2x_func_stats_base_init(struct bnx2x *bp) 1394 1393 { 1395 - int vn, vn_max = IS_MF(bp) ? E1HVN_MAX : E1VN_MAX; 1394 + int vn, vn_max = IS_MF(bp) ? BP_MAX_VN_NUM(bp) : E1VN_MAX; 1396 1395 u32 func_stx; 1397 1396 1398 1397 /* sanity */ ··· 1405 1404 func_stx = bp->func_stx; 1406 1405 1407 1406 for (vn = VN_0; vn < vn_max; vn++) { 1408 - int mb_idx = CHIP_IS_E1x(bp) ? 2*vn + BP_PORT(bp) : vn; 1407 + int mb_idx = BP_FW_MB_IDX_VN(bp, vn); 1409 1408 1410 1409 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); 1411 1410 bnx2x_func_stats_init(bp);
+1
drivers/net/can/ti_hecc.c
··· 46 46 #include <linux/skbuff.h> 47 47 #include <linux/platform_device.h> 48 48 #include <linux/clk.h> 49 + #include <linux/io.h> 49 50 50 51 #include <linux/can/dev.h> 51 52 #include <linux/can/error.h>
+6
drivers/net/e1000/e1000_hw.c
··· 4026 4026 checksum += eeprom_data; 4027 4027 } 4028 4028 4029 + #ifdef CONFIG_PARISC 4030 + /* This is a signature and not a checksum on HP c8000 */ 4031 + if ((hw->subsystem_vendor_id == 0x103C) && (eeprom_data == 0x16d6)) 4032 + return E1000_SUCCESS; 4033 + 4034 + #endif 4029 4035 if (checksum == (u16) EEPROM_SUM) 4030 4036 return E1000_SUCCESS; 4031 4037 else {
+31 -17
drivers/net/ibmveth.c
··· 757 757 struct ibmveth_adapter *adapter = netdev_priv(dev); 758 758 unsigned long set_attr, clr_attr, ret_attr; 759 759 unsigned long set_attr6, clr_attr6; 760 - long ret, ret6; 760 + long ret, ret4, ret6; 761 761 int rc1 = 0, rc2 = 0; 762 762 int restart = 0; 763 763 ··· 770 770 771 771 set_attr = 0; 772 772 clr_attr = 0; 773 + set_attr6 = 0; 774 + clr_attr6 = 0; 773 775 774 776 if (data) { 775 777 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; ··· 786 784 if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) && 787 785 !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) && 788 786 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) { 789 - ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr, 787 + ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr, 790 788 set_attr, &ret_attr); 791 789 792 - if (ret != H_SUCCESS) { 790 + if (ret4 != H_SUCCESS) { 793 791 netdev_err(dev, "unable to change IPv4 checksum " 794 792 "offload settings. %d rc=%ld\n", 795 - data, ret); 793 + data, ret4); 796 794 797 - ret = h_illan_attributes(adapter->vdev->unit_address, 798 - set_attr, clr_attr, &ret_attr); 795 + h_illan_attributes(adapter->vdev->unit_address, 796 + set_attr, clr_attr, &ret_attr); 797 + 798 + if (data == 1) 799 + dev->features &= ~NETIF_F_IP_CSUM; 800 + 799 801 } else { 800 802 adapter->fw_ipv4_csum_support = data; 801 803 } ··· 810 804 if (ret6 != H_SUCCESS) { 811 805 netdev_err(dev, "unable to change IPv6 checksum " 812 806 "offload settings. %d rc=%ld\n", 813 - data, ret); 807 + data, ret6); 814 808 815 - ret = h_illan_attributes(adapter->vdev->unit_address, 816 - set_attr6, clr_attr6, 817 - &ret_attr); 809 + h_illan_attributes(adapter->vdev->unit_address, 810 + set_attr6, clr_attr6, &ret_attr); 811 + 812 + if (data == 1) 813 + dev->features &= ~NETIF_F_IPV6_CSUM; 814 + 818 815 } else 819 816 adapter->fw_ipv6_csum_support = data; 820 817 821 - if (ret != H_SUCCESS || ret6 != H_SUCCESS) 818 + if (ret4 == H_SUCCESS || ret6 == H_SUCCESS) 822 819 adapter->rx_csum = data; 823 820 else 824 821 rc1 = -EIO; ··· 939 930 union ibmveth_buf_desc descs[6]; 940 931 int last, i; 941 932 int force_bounce = 0; 933 + dma_addr_t dma_addr; 942 934 943 935 /* 944 936 * veth handles a maximum of 6 segments including the header, so ··· 1004 994 } 1005 995 1006 996 /* Map the header */ 1007 - descs[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data, 1008 - skb_headlen(skb), 1009 - DMA_TO_DEVICE); 1010 - if (dma_mapping_error(&adapter->vdev->dev, descs[0].fields.address)) 997 + dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, 998 + skb_headlen(skb), DMA_TO_DEVICE); 999 + if (dma_mapping_error(&adapter->vdev->dev, dma_addr)) 1011 1000 goto map_failed; 1012 1001 1013 1002 descs[0].fields.flags_len = desc_flags | skb_headlen(skb); 1003 + descs[0].fields.address = dma_addr; 1014 1004 1015 1005 /* Map the frags */ 1016 1006 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1017 - unsigned long dma_addr; 1018 1007 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1019 1008 1020 1009 dma_addr = dma_map_page(&adapter->vdev->dev, frag->page, ··· 1035 1026 netdev->stats.tx_bytes += skb->len; 1036 1027 } 1037 1028 1038 - for (i = 0; i < skb_shinfo(skb)->nr_frags + 1; i++) 1029 + dma_unmap_single(&adapter->vdev->dev, 1030 + descs[0].fields.address, 1031 + descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK, 1032 + DMA_TO_DEVICE); 1033 + 1034 + for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++) 1039 1035 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address, 1040 1036 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK, 1041 1037 DMA_TO_DEVICE);
+10 -2
drivers/net/pch_gbe/pch_gbe.h
··· 127 127 128 128 /* Reset */ 129 129 #define PCH_GBE_ALL_RST 0x80000000 /* All reset */ 130 - #define PCH_GBE_TX_RST 0x40000000 /* TX MAC, TX FIFO, TX DMA reset */ 131 - #define PCH_GBE_RX_RST 0x04000000 /* RX MAC, RX FIFO, RX DMA reset */ 130 + #define PCH_GBE_TX_RST 0x00008000 /* TX MAC, TX FIFO, TX DMA reset */ 131 + #define PCH_GBE_RX_RST 0x00004000 /* RX MAC, RX FIFO, RX DMA reset */ 132 132 133 133 /* TCP/IP Accelerator Control */ 134 134 #define PCH_GBE_EX_LIST_EN 0x00000008 /* External List Enable */ ··· 275 275 /* DMA Control */ 276 276 #define PCH_GBE_RX_DMA_EN 0x00000002 /* Enables Receive DMA */ 277 277 #define PCH_GBE_TX_DMA_EN 0x00000001 /* Enables Transmission DMA */ 278 + 279 + /* RX DMA STATUS */ 280 + #define PCH_GBE_IDLE_CHECK 0xFFFFFFFE 278 281 279 282 /* Wake On LAN Status */ 280 283 #define PCH_GBE_WLS_BR 0x00000008 /* Broadcas Address */ ··· 474 471 struct pch_gbe_buffer { 475 472 struct sk_buff *skb; 476 473 dma_addr_t dma; 474 + unsigned char *rx_buffer; 477 475 unsigned long time_stamp; 478 476 u16 length; 479 477 bool mapped; ··· 515 511 struct pch_gbe_rx_ring { 516 512 struct pch_gbe_rx_desc *desc; 517 513 dma_addr_t dma; 514 + unsigned char *rx_buff_pool; 515 + dma_addr_t rx_buff_pool_logic; 516 + unsigned int rx_buff_pool_size; 518 517 unsigned int size; 519 518 unsigned int count; 520 519 unsigned int next_to_use; ··· 629 622 unsigned long rx_buffer_len; 630 623 unsigned long tx_queue_len; 631 624 bool have_msi; 625 + bool rx_stop_flag; 632 626 }; 633 627 634 628 extern const char pch_driver_version[];
+192 -108
drivers/net/pch_gbe/pch_gbe_main.c
··· 20 20 21 21 #include "pch_gbe.h" 22 22 #include "pch_gbe_api.h" 23 - #include <linux/prefetch.h> 24 23 25 24 #define DRV_VERSION "1.00" 26 25 const char pch_driver_version[] = DRV_VERSION; ··· 33 34 #define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ 34 35 #define PCH_GBE_COPYBREAK_DEFAULT 256 35 36 #define PCH_GBE_PCI_BAR 1 37 + #define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */ 36 38 37 39 /* Macros for ML7223 */ 38 40 #define PCI_VENDOR_ID_ROHM 0x10db 39 41 #define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013 42 + 43 + /* Macros for ML7831 */ 44 + #define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802 40 45 41 46 #define PCH_GBE_TX_WEIGHT 64 42 47 #define PCH_GBE_RX_WEIGHT 64 ··· 55 52 ) 56 53 57 54 /* Ethertype field values */ 55 + #define PCH_GBE_MAX_RX_BUFFER_SIZE 0x2880 58 56 #define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318 59 57 #define PCH_GBE_FRAME_SIZE_2048 2048 60 58 #define PCH_GBE_FRAME_SIZE_4096 4096 ··· 87 83 #define PCH_GBE_INT_ENABLE_MASK ( \ 88 84 PCH_GBE_INT_RX_DMA_CMPLT | \ 89 85 PCH_GBE_INT_RX_DSC_EMP | \ 86 + PCH_GBE_INT_RX_FIFO_ERR | \ 90 87 PCH_GBE_INT_WOL_DET | \ 91 88 PCH_GBE_INT_TX_CMPLT \ 92 89 ) 93 90 91 + #define PCH_GBE_INT_DISABLE_ALL 0 94 92 95 93 static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; 96 94 ··· 144 138 if (!tmp) 145 139 pr_err("Error: busy bit is not cleared\n"); 146 140 } 141 + 142 + /** 143 + * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context 144 + * @reg: Pointer of register 145 + * @busy: Busy bit 146 + */ 147 + static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit) 148 + { 149 + u32 tmp; 150 + int ret = -1; 151 + /* wait busy */ 152 + tmp = 20; 153 + while ((ioread32(reg) & bit) && --tmp) 154 + udelay(5); 155 + if (!tmp) 156 + pr_err("Error: busy bit is not cleared\n"); 157 + else 158 + ret = 0; 159 + return ret; 160 + } 161 + 147 162 /** 148 163 * pch_gbe_mac_mar_set - Set MAC address register 149 164 * @hw: Pointer to the HW structure ··· 212 185 #endif 213 186 pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST); 214 187 /* Setup the receive address */ 188 + pch_gbe_mac_mar_set(hw, hw->mac.addr, 0); 189 + return; 190 + } 191 + 192 + static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw) 193 + { 194 + /* Read the MAC address. and store to the private data */ 195 + pch_gbe_mac_read_mac_addr(hw); 196 + iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET); 197 + pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST); 198 + /* Setup the MAC address */ 215 199 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0); 216 200 return; 217 201 } ··· 709 671 710 672 tcpip = ioread32(&hw->reg->TCPIP_ACC); 711 673 712 - if (netdev->features & NETIF_F_RXCSUM) { 713 - tcpip &= ~PCH_GBE_RX_TCPIPACC_OFF; 714 - tcpip |= PCH_GBE_RX_TCPIPACC_EN; 715 - } else { 716 - tcpip |= PCH_GBE_RX_TCPIPACC_OFF; 717 - tcpip &= ~PCH_GBE_RX_TCPIPACC_EN; 718 - } 674 + tcpip |= PCH_GBE_RX_TCPIPACC_OFF; 675 + tcpip &= ~PCH_GBE_RX_TCPIPACC_EN; 719 676 iowrite32(tcpip, &hw->reg->TCPIP_ACC); 720 677 return; 721 678 } ··· 750 717 iowrite32(rdba, &hw->reg->RX_DSC_BASE); 751 718 iowrite32(rdlen, &hw->reg->RX_DSC_SIZE); 752 719 iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P); 753 - 754 - /* Enables Receive DMA */ 755 - rxdma = ioread32(&hw->reg->DMA_CTRL); 756 - rxdma |= PCH_GBE_RX_DMA_EN; 757 - iowrite32(rxdma, &hw->reg->DMA_CTRL); 758 - /* Enables Receive */ 759 - iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN); 760 720 } 761 721 762 722 /** ··· 1123 1097 spin_unlock_irqrestore(&adapter->stats_lock, flags); 1124 1098 } 1125 1099 1100 + static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter) 1101 + { 1102 + struct pch_gbe_hw *hw = &adapter->hw; 1103 + u32 rxdma; 1104 + u16 value; 1105 + int ret; 1106 + 1107 + /* Disable Receive DMA */ 1108 + rxdma = ioread32(&hw->reg->DMA_CTRL); 1109 + rxdma &= ~PCH_GBE_RX_DMA_EN; 1110 + iowrite32(rxdma, &hw->reg->DMA_CTRL); 1111 + /* Wait Rx DMA BUS is IDLE */ 1112 + ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK); 1113 + if (ret) { 1114 + /* Disable Bus master */ 1115 + pci_read_config_word(adapter->pdev, PCI_COMMAND, &value); 1116 + value &= ~PCI_COMMAND_MASTER; 1117 + pci_write_config_word(adapter->pdev, PCI_COMMAND, value); 1118 + /* Stop Receive */ 1119 + pch_gbe_mac_reset_rx(hw); 1120 + /* Enable Bus master */ 1121 + value |= PCI_COMMAND_MASTER; 1122 + pci_write_config_word(adapter->pdev, PCI_COMMAND, value); 1123 + } else { 1124 + /* Stop Receive */ 1125 + pch_gbe_mac_reset_rx(hw); 1126 + } 1127 + } 1128 + 1129 + static void pch_gbe_start_receive(struct pch_gbe_hw *hw) 1130 + { 1131 + u32 rxdma; 1132 + 1133 + /* Enables Receive DMA */ 1134 + rxdma = ioread32(&hw->reg->DMA_CTRL); 1135 + rxdma |= PCH_GBE_RX_DMA_EN; 1136 + iowrite32(rxdma, &hw->reg->DMA_CTRL); 1137 + /* Enables Receive */ 1138 + iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN); 1139 + return; 1140 + } 1141 + 1126 1142 /** 1127 1143 * pch_gbe_intr - Interrupt Handler 1128 1144 * @irq: Interrupt number ··· 1191 1123 if (int_st & PCH_GBE_INT_RX_FRAME_ERR) 1192 1124 adapter->stats.intr_rx_frame_err_count++; 1193 1125 if (int_st & PCH_GBE_INT_RX_FIFO_ERR) 1194 - adapter->stats.intr_rx_fifo_err_count++; 1126 + if (!adapter->rx_stop_flag) { 1127 + adapter->stats.intr_rx_fifo_err_count++; 1128 + pr_debug("Rx fifo over run\n"); 1129 + adapter->rx_stop_flag = true; 1130 + int_en = ioread32(&hw->reg->INT_EN); 1131 + iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR), 1132 + &hw->reg->INT_EN); 1133 + pch_gbe_stop_receive(adapter); 1134 + } 1195 1135 if (int_st & PCH_GBE_INT_RX_DMA_ERR) 1196 1136 adapter->stats.intr_rx_dma_err_count++; 1197 1137 if (int_st & PCH_GBE_INT_TX_FIFO_ERR) ··· 1211 1135 /* When Rx descriptor is empty */ 1212 1136 if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) { 1213 1137 adapter->stats.intr_rx_dsc_empty_count++; 1214 - pr_err("Rx descriptor is empty\n"); 1138 + pr_debug("Rx descriptor is empty\n"); 1215 1139 int_en = ioread32(&hw->reg->INT_EN); 1216 1140 iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN); 1217 1141 if (hw->mac.tx_fc_enable) { ··· 1261 1185 unsigned int i; 1262 1186 unsigned int bufsz; 1263 1187 1264 - bufsz = adapter->rx_buffer_len + PCH_GBE_DMA_ALIGN; 1188 + bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; 1265 1189 i = rx_ring->next_to_use; 1266 1190 1267 1191 while ((cleaned_count--)) { 1268 1192 buffer_info = &rx_ring->buffer_info[i]; 1269 - skb = buffer_info->skb; 1270 - if (skb) { 1271 - skb_trim(skb, 0); 1272 - } else { 1273 - skb = netdev_alloc_skb(netdev, bufsz); 1274 - if (unlikely(!skb)) { 1275 - /* Better luck next round */ 1276 - adapter->stats.rx_alloc_buff_failed++; 1277 - break; 1278 - } 1279 - /* 64byte align */ 1280 - skb_reserve(skb, PCH_GBE_DMA_ALIGN); 1281 - 1282 - buffer_info->skb = skb; 1283 - buffer_info->length = adapter->rx_buffer_len; 1193 + skb = netdev_alloc_skb(netdev, bufsz); 1194 + if (unlikely(!skb)) { 1195 + /* Better luck next round */ 1196 + adapter->stats.rx_alloc_buff_failed++; 1197 + break; 1284 1198 } 1199 + /* align */ 1200 + skb_reserve(skb, NET_IP_ALIGN); 1201 + buffer_info->skb = skb; 1202 + 1285 1203 buffer_info->dma = dma_map_single(&pdev->dev, 1286 - skb->data, 1204 + buffer_info->rx_buffer, 1287 1205 buffer_info->length, 1288 1206 DMA_FROM_DEVICE); 1289 1207 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { ··· 1308 1238 &hw->reg->RX_DSC_SW_P); 1309 1239 } 1310 1240 return; 1241 + } 1242 + 1243 + static int 1244 + pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter, 1245 + struct pch_gbe_rx_ring *rx_ring, int cleaned_count) 1246 + { 1247 + struct pci_dev *pdev = adapter->pdev; 1248 + struct pch_gbe_buffer *buffer_info; 1249 + unsigned int i; 1250 + unsigned int bufsz; 1251 + unsigned int size; 1252 + 1253 + bufsz = adapter->rx_buffer_len; 1254 + 1255 + size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY; 1256 + rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size, 1257 + &rx_ring->rx_buff_pool_logic, 1258 + GFP_KERNEL); 1259 + if (!rx_ring->rx_buff_pool) { 1260 + pr_err("Unable to allocate memory for the receive poll buffer\n"); 1261 + return -ENOMEM; 1262 + } 1263 + memset(rx_ring->rx_buff_pool, 0, size); 1264 + rx_ring->rx_buff_pool_size = size; 1265 + for (i = 0; i < rx_ring->count; i++) { 1266 + buffer_info = &rx_ring->buffer_info[i]; 1267 + buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i; 1268 + buffer_info->length = bufsz; 1269 + } 1270 + return 0; 1311 1271 } 1312 1272 1313 1273 /** ··· 1480 1380 unsigned int i; 1481 1381 unsigned int cleaned_count = 0; 1482 1382 bool cleaned = false; 1483 - struct sk_buff *skb, *new_skb; 1383 + struct sk_buff *skb; 1484 1384 u8 dma_status; 1485 1385 u16 gbec_status; 1486 1386 u32 tcp_ip_status; ··· 1501 1401 rx_desc->gbec_status = DSC_INIT16; 1502 1402 buffer_info = &rx_ring->buffer_info[i]; 1503 1403 skb = buffer_info->skb; 1404 + buffer_info->skb = NULL; 1504 1405 1505 1406 /* unmap dma */ 1506 1407 dma_unmap_single(&pdev->dev, buffer_info->dma, 1507 1408 buffer_info->length, DMA_FROM_DEVICE); 1508 1409 buffer_info->mapped = false; 1509 - /* Prefetch the packet */ 1510 - prefetch(skb->data); 1511 1410 1512 1411 pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x " 1513 1412 "TCP:0x%08x] BufInf = 0x%p\n", ··· 1526 1427 pr_err("Receive CRC Error\n"); 1527 1428 } else { 1528 1429 /* get receive length */ 1529 - /* length convert[-3] */ 1530 - length = (rx_desc->rx_words_eob) - 3; 1430 + /* length convert[-3], length includes FCS length */ 1431 + length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN; 1432 + if (rx_desc->rx_words_eob & 0x02) 1433 + length = length - 4; 1434 + /* 1435 + * buffer_info->rx_buffer: [Header:14][payload] 1436 + * skb->data: [Reserve:2][Header:14][payload] 1437 + */ 1438 + memcpy(skb->data, buffer_info->rx_buffer, length); 1531 1439 1532 - /* Decide the data conversion method */ 1533 - if (!(netdev->features & NETIF_F_RXCSUM)) { 1534 - /* [Header:14][payload] */ 1535 - if (NET_IP_ALIGN) { 1536 - /* Because alignment differs, 1537 - * the new_skb is newly allocated, 1538 - * and data is copied to new_skb.*/ 1539 - new_skb = netdev_alloc_skb(netdev, 1540 - length + NET_IP_ALIGN); 1541 - if (!new_skb) { 1542 - /* dorrop error */ 1543 - pr_err("New skb allocation " 1544 - "Error\n"); 1545 - goto dorrop; 1546 - } 1547 - skb_reserve(new_skb, NET_IP_ALIGN); 1548 - memcpy(new_skb->data, skb->data, 1549 - length); 1550 - skb = new_skb; 1551 - } else { 1552 - /* DMA buffer is used as SKB as it is.*/ 1553 - buffer_info->skb = NULL; 1554 - } 1555 - } else { 1556 - /* [Header:14][padding:2][payload] */ 1557 - /* The length includes padding length */ 1558 - length = length - PCH_GBE_DMA_PADDING; 1559 - if ((length < copybreak) || 1560 - (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) { 1561 - /* Because alignment differs, 1562 - * the new_skb is newly allocated, 1563 - * and data is copied to new_skb. 1564 - * Padding data is deleted 1565 - * at the time of a copy.*/ 1566 - new_skb = netdev_alloc_skb(netdev, 1567 - length + NET_IP_ALIGN); 1568 - if (!new_skb) { 1569 - /* dorrop error */ 1570 - pr_err("New skb allocation " 1571 - "Error\n"); 1572 - goto dorrop; 1573 - } 1574 - skb_reserve(new_skb, NET_IP_ALIGN); 1575 - memcpy(new_skb->data, skb->data, 1576 - ETH_HLEN); 1577 - memcpy(&new_skb->data[ETH_HLEN], 1578 - &skb->data[ETH_HLEN + 1579 - PCH_GBE_DMA_PADDING], 1580 - length - ETH_HLEN); 1581 - skb = new_skb; 1582 - } else { 1583 - /* Padding data is deleted 1584 - * by moving header data.*/ 1585 - memmove(&skb->data[PCH_GBE_DMA_PADDING], 1586 - &skb->data[0], ETH_HLEN); 1587 - skb_reserve(skb, NET_IP_ALIGN); 1588 - buffer_info->skb = NULL; 1589 - } 1590 - } 1591 - /* The length includes FCS length */ 1592 - length = length - ETH_FCS_LEN; 1593 1440 /* update status of driver */ 1594 1441 adapter->stats.rx_bytes += length; 1595 1442 adapter->stats.rx_packets++; ··· 1554 1509 pr_debug("Receive skb->ip_summed: %d length: %d\n", 1555 1510 skb->ip_summed, length); 1556 1511 } 1557 - dorrop: 1558 1512 /* return some buffers to hardware, one at a time is too slow */ 1559 1513 if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) { 1560 1514 pch_gbe_alloc_rx_buffers(adapter, rx_ring, ··· 1758 1714 pr_err("Error: can't bring device up\n"); 1759 1715 return err; 1760 1716 } 1717 + err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count); 1718 + if (err) { 1719 + pr_err("Error: can't bring device up\n"); 1720 + return err; 1721 + } 1761 1722 pch_gbe_alloc_tx_buffers(adapter, tx_ring); 1762 1723 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count); 1763 1724 adapter->tx_queue_len = netdev->tx_queue_len; 1725 + pch_gbe_start_receive(&adapter->hw); 1764 1726 1765 1727 mod_timer(&adapter->watchdog_timer, jiffies); 1766 1728 ··· 1784 1734 void pch_gbe_down(struct pch_gbe_adapter *adapter) 1785 1735 { 1786 1736 struct net_device *netdev = adapter->netdev; 1737 + struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring; 1787 1738 1788 1739 /* signal that we're down so the interrupt handler does not 1789 1740 * reschedule our watchdog timer */ ··· 1803 1752 pch_gbe_reset(adapter); 1804 1753 pch_gbe_clean_tx_ring(adapter, adapter->tx_ring); 1805 1754 pch_gbe_clean_rx_ring(adapter, adapter->rx_ring); 1755 + 1756 + pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size, 1757 + rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic); 1758 + rx_ring->rx_buff_pool_logic = 0; 1759 + rx_ring->rx_buff_pool_size = 0; 1760 + rx_ring->rx_buff_pool = NULL; 1806 1761 } 1807 1762 1808 1763 /** ··· 2061 2004 { 2062 2005 struct pch_gbe_adapter *adapter = netdev_priv(netdev); 2063 2006 int max_frame; 2007 + unsigned long old_rx_buffer_len = adapter->rx_buffer_len; 2008 + int err; 2064 2009 2065 2010 max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 2066 2011 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || ··· 2077 2018 else if (max_frame <= PCH_GBE_FRAME_SIZE_8192) 2078 2019 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192; 2079 2020 else 2080 - adapter->rx_buffer_len = PCH_GBE_MAX_JUMBO_FRAME_SIZE; 2081 - netdev->mtu = new_mtu; 2082 - adapter->hw.mac.max_frame_size = max_frame; 2021 + adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE; 2083 2022 2084 - if (netif_running(netdev)) 2085 - pch_gbe_reinit_locked(adapter); 2086 - else 2023 + if (netif_running(netdev)) { 2024 + pch_gbe_down(adapter); 2025 + err = pch_gbe_up(adapter); 2026 + if (err) { 2027 + adapter->rx_buffer_len = old_rx_buffer_len; 2028 + pch_gbe_up(adapter); 2029 + return -ENOMEM; 2030 + } else { 2031 + netdev->mtu = new_mtu; 2032 + adapter->hw.mac.max_frame_size = max_frame; 2033 + } 2034 + } else { 2087 2035 pch_gbe_reset(adapter); 2036 + netdev->mtu = new_mtu; 2037 + adapter->hw.mac.max_frame_size = max_frame; 2038 + } 2088 2039 2089 2040 pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n", 2090 2041 max_frame, (u32) adapter->rx_buffer_len, netdev->mtu, ··· 2172 2103 int work_done = 0; 2173 2104 bool poll_end_flag = false; 2174 2105 bool cleaned = false; 2106 + u32 int_en; 2175 2107 2176 2108 pr_debug("budget : %d\n", budget); 2177 2109 ··· 2180 2110 if (!netif_carrier_ok(netdev)) { 2181 2111 poll_end_flag = true; 2182 2112 } else { 2183 - cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring); 2184 2113 pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); 2114 + if (adapter->rx_stop_flag) { 2115 + adapter->rx_stop_flag = false; 2116 + pch_gbe_start_receive(&adapter->hw); 2117 + int_en = ioread32(&adapter->hw.reg->INT_EN); 2118 + iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR), 2119 + &adapter->hw.reg->INT_EN); 2120 + } 2121 + cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring); 2185 2122 2186 2123 if (cleaned) 2187 2124 work_done = budget; ··· 2524 2447 }, 2525 2448 {.vendor = PCI_VENDOR_ID_ROHM, 2526 2449 .device = PCI_DEVICE_ID_ROHM_ML7223_GBE, 2450 + .subvendor = PCI_ANY_ID, 2451 + .subdevice = PCI_ANY_ID, 2452 + .class = (PCI_CLASS_NETWORK_ETHERNET << 8), 2453 + .class_mask = (0xFFFF00) 2454 + }, 2455 + {.vendor = PCI_VENDOR_ID_ROHM, 2456 + .device = PCI_DEVICE_ID_ROHM_ML7831_GBE, 2527 2457 .subvendor = PCI_ANY_ID, 2528 2458 .subdevice = PCI_ANY_ID, 2529 2459 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
+2 -16
drivers/net/sfc/efx.c
··· 1050 1050 { 1051 1051 struct pci_dev *pci_dev = efx->pci_dev; 1052 1052 dma_addr_t dma_mask = efx->type->max_dma_mask; 1053 - bool use_wc; 1054 1053 int rc; 1055 1054 1056 1055 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); ··· 1100 1101 rc = -EIO; 1101 1102 goto fail3; 1102 1103 } 1103 - 1104 - /* bug22643: If SR-IOV is enabled then tx push over a write combined 1105 - * mapping is unsafe. We need to disable write combining in this case. 1106 - * MSI is unsupported when SR-IOV is enabled, and the firmware will 1107 - * have removed the MSI capability. So write combining is safe if 1108 - * there is an MSI capability. 1109 - */ 1110 - use_wc = (!EFX_WORKAROUND_22643(efx) || 1111 - pci_find_capability(pci_dev, PCI_CAP_ID_MSI)); 1112 - if (use_wc) 1113 - efx->membase = ioremap_wc(efx->membase_phys, 1114 - efx->type->mem_map_size); 1115 - else 1116 - efx->membase = ioremap_nocache(efx->membase_phys, 1117 - efx->type->mem_map_size); 1104 + efx->membase = ioremap_nocache(efx->membase_phys, 1105 + efx->type->mem_map_size); 1118 1106 if (!efx->membase) { 1119 1107 netif_err(efx, probe, efx->net_dev, 1120 1108 "could not map memory BAR at %llx+%x\n",
-6
drivers/net/sfc/io.h
··· 103 103 _efx_writed(efx, value->u32[2], reg + 8); 104 104 _efx_writed(efx, value->u32[3], reg + 12); 105 105 #endif 106 - wmb(); 107 106 mmiowb(); 108 107 spin_unlock_irqrestore(&efx->biu_lock, flags); 109 108 } ··· 125 126 __raw_writel((__force u32)value->u32[0], membase + addr); 126 127 __raw_writel((__force u32)value->u32[1], membase + addr + 4); 127 128 #endif 128 - wmb(); 129 129 mmiowb(); 130 130 spin_unlock_irqrestore(&efx->biu_lock, flags); 131 131 } ··· 139 141 140 142 /* No lock required */ 141 143 _efx_writed(efx, value->u32[0], reg); 142 - wmb(); 143 144 } 144 145 145 146 /* Read a 128-bit CSR, locking as appropriate. */ ··· 149 152 150 153 spin_lock_irqsave(&efx->biu_lock, flags); 151 154 value->u32[0] = _efx_readd(efx, reg + 0); 152 - rmb(); 153 155 value->u32[1] = _efx_readd(efx, reg + 4); 154 156 value->u32[2] = _efx_readd(efx, reg + 8); 155 157 value->u32[3] = _efx_readd(efx, reg + 12); ··· 171 175 value->u64[0] = (__force __le64)__raw_readq(membase + addr); 172 176 #else 173 177 value->u32[0] = (__force __le32)__raw_readl(membase + addr); 174 - rmb(); 175 178 value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); 176 179 #endif 177 180 spin_unlock_irqrestore(&efx->biu_lock, flags); ··· 244 249 _efx_writed(efx, value->u32[2], reg + 8); 245 250 _efx_writed(efx, value->u32[3], reg + 12); 246 251 #endif 247 - wmb(); 248 252 } 249 253 #define efx_writeo_page(efx, value, reg, page) \ 250 254 _efx_writeo_page(efx, value, \
+17 -29
drivers/net/sfc/mcdi.c
··· 50 50 return &nic_data->mcdi; 51 51 } 52 52 53 - static inline void 54 - efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg) 55 - { 56 - struct siena_nic_data *nic_data = efx->nic_data; 57 - value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg); 58 - } 59 - 60 - static inline void 61 - efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg) 62 - { 63 - struct siena_nic_data *nic_data = efx->nic_data; 64 - __raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg); 65 - } 66 - 67 53 void efx_mcdi_init(struct efx_nic *efx) 68 54 { 69 55 struct efx_mcdi_iface *mcdi; ··· 70 84 const u8 *inbuf, size_t inlen) 71 85 { 72 86 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 73 - unsigned pdu = MCDI_PDU(efx); 74 - unsigned doorbell = MCDI_DOORBELL(efx); 87 + unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 88 + unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); 75 89 unsigned int i; 76 90 efx_dword_t hdr; 77 91 u32 xflags, seqno; ··· 92 106 MCDI_HEADER_SEQ, seqno, 93 107 MCDI_HEADER_XFLAGS, xflags); 94 108 95 - efx_mcdi_writed(efx, &hdr, pdu); 109 + efx_writed(efx, &hdr, pdu); 96 110 97 111 for (i = 0; i < inlen; i += 4) 98 - efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i), 99 - pdu + 4 + i); 112 + _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i); 113 + 114 + /* Ensure the payload is written out before the header */ 115 + wmb(); 100 116 101 117 /* ring the doorbell with a distinctive value */ 102 - EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc); 103 - efx_mcdi_writed(efx, &hdr, doorbell); 118 + _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); 104 119 } 105 120 106 121 static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) 107 122 { 108 123 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 109 - unsigned int pdu = MCDI_PDU(efx); 124 + unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 110 125 int i; 111 126 112 127 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); 113 128 BUG_ON(outlen & 3 || outlen >= 0x100); 114 129 115 130 for (i = 0; i < outlen; i += 4) 116 - efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i); 131 + *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i); 117 132 } 118 133 119 134 static int efx_mcdi_poll(struct efx_nic *efx) ··· 122 135 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 123 136 unsigned int time, finish; 124 137 unsigned int respseq, respcmd, error; 125 - unsigned int pdu = MCDI_PDU(efx); 138 + unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 126 139 unsigned int rc, spins; 127 140 efx_dword_t reg; 128 141 ··· 148 161 149 162 time = get_seconds(); 150 163 151 - efx_mcdi_readd(efx, &reg, pdu); 164 + rmb(); 165 + efx_readd(efx, &reg, pdu); 152 166 153 167 /* All 1's indicates that shared memory is in reset (and is 154 168 * not a valid header). Wait for it to come out reset before ··· 176 188 respseq, mcdi->seqno); 177 189 rc = EIO; 178 190 } else if (error) { 179 - efx_mcdi_readd(efx, &reg, pdu + 4); 191 + efx_readd(efx, &reg, pdu + 4); 180 192 switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { 181 193 #define TRANSLATE_ERROR(name) \ 182 194 case MC_CMD_ERR_ ## name: \ ··· 210 222 /* Test and clear MC-rebooted flag for this port/function */ 211 223 int efx_mcdi_poll_reboot(struct efx_nic *efx) 212 224 { 213 - unsigned int addr = MCDI_REBOOT_FLAG(efx); 225 + unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx); 214 226 efx_dword_t reg; 215 227 uint32_t value; 216 228 217 229 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) 218 230 return false; 219 231 220 - efx_mcdi_readd(efx, &reg, addr); 232 + efx_readd(efx, &reg, addr); 221 233 value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); 222 234 223 235 if (value == 0) 224 236 return 0; 225 237 226 238 EFX_ZERO_DWORD(reg); 227 - efx_mcdi_writed(efx, &reg, addr); 239 + efx_writed(efx, &reg, addr); 228 240 229 241 if (value == MC_STATUS_DWORD_ASSERT) 230 242 return -EINTR;
-7
drivers/net/sfc/nic.c
··· 1936 1936 1937 1937 size = min_t(size_t, table->step, 16); 1938 1938 1939 - if (table->offset >= efx->type->mem_map_size) { 1940 - /* No longer mapped; return dummy data */ 1941 - memcpy(buf, "\xde\xc0\xad\xde", 4); 1942 - buf += table->rows * size; 1943 - continue; 1944 - } 1945 - 1946 1939 for (i = 0; i < table->rows; i++) { 1947 1940 switch (table->step) { 1948 1941 case 4: /* 32-bit register or SRAM */
-2
drivers/net/sfc/nic.h
··· 143 143 /** 144 144 * struct siena_nic_data - Siena NIC state 145 145 * @mcdi: Management-Controller-to-Driver Interface 146 - * @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable. 147 146 * @wol_filter_id: Wake-on-LAN packet filter id 148 147 */ 149 148 struct siena_nic_data { 150 149 struct efx_mcdi_iface mcdi; 151 - void __iomem *mcdi_smem; 152 150 int wol_filter_id; 153 151 }; 154 152
+4 -21
drivers/net/sfc/siena.c
··· 250 250 efx_reado(efx, &reg, FR_AZ_CS_DEBUG); 251 251 efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; 252 252 253 - /* Initialise MCDI */ 254 - nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys + 255 - FR_CZ_MC_TREG_SMEM, 256 - FR_CZ_MC_TREG_SMEM_STEP * 257 - FR_CZ_MC_TREG_SMEM_ROWS); 258 - if (!nic_data->mcdi_smem) { 259 - netif_err(efx, probe, efx->net_dev, 260 - "could not map MCDI at %llx+%x\n", 261 - (unsigned long long)efx->membase_phys + 262 - FR_CZ_MC_TREG_SMEM, 263 - FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS); 264 - rc = -ENOMEM; 265 - goto fail1; 266 - } 267 253 efx_mcdi_init(efx); 268 254 269 255 /* Recover from a failed assertion before probing */ 270 256 rc = efx_mcdi_handle_assertion(efx); 271 257 if (rc) 272 - goto fail2; 258 + goto fail1; 273 259 274 260 /* Let the BMC know that the driver is now in charge of link and 275 261 * filter settings. We must do this before we reset the NIC */ ··· 310 324 fail3: 311 325 efx_mcdi_drv_attach(efx, false, NULL); 312 326 fail2: 313 - iounmap(nic_data->mcdi_smem); 314 327 fail1: 315 328 kfree(efx->nic_data); 316 329 return rc; ··· 389 404 390 405 static void siena_remove_nic(struct efx_nic *efx) 391 406 { 392 - struct siena_nic_data *nic_data = efx->nic_data; 393 - 394 407 efx_nic_free_buffer(efx, &efx->irq_status); 395 408 396 409 siena_reset_hw(efx, RESET_TYPE_ALL); ··· 398 415 efx_mcdi_drv_attach(efx, false, NULL); 399 416 400 417 /* Tear down the private nic state */ 401 - iounmap(nic_data->mcdi_smem); 402 - kfree(nic_data); 418 + kfree(efx->nic_data); 403 419 efx->nic_data = NULL; 404 420 } 405 421 ··· 638 656 .default_mac_ops = &efx_mcdi_mac_operations, 639 657 640 658 .revision = EFX_REV_SIENA_A0, 641 - .mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */ 659 + .mem_map_size = (FR_CZ_MC_TREG_SMEM + 660 + FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS), 642 661 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, 643 662 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, 644 663 .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
-2
drivers/net/sfc/workarounds.h
··· 38 38 #define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS 39 39 /* Legacy interrupt storm when interrupt fifo fills */ 40 40 #define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA 41 - /* Write combining and sriov=enabled are incompatible */ 42 - #define EFX_WORKAROUND_22643 EFX_WORKAROUND_SIENA 43 41 44 42 /* Spurious parity errors in TSORT buffers */ 45 43 #define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
+5
drivers/net/usb/ipheth.c
··· 59 59 #define USB_PRODUCT_IPHONE_3G 0x1292 60 60 #define USB_PRODUCT_IPHONE_3GS 0x1294 61 61 #define USB_PRODUCT_IPHONE_4 0x1297 62 + #define USB_PRODUCT_IPHONE_4_VZW 0x129c 62 63 63 64 #define IPHETH_USBINTF_CLASS 255 64 65 #define IPHETH_USBINTF_SUBCLASS 253 ··· 97 96 IPHETH_USBINTF_PROTO) }, 98 97 { USB_DEVICE_AND_INTERFACE_INFO( 99 98 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4, 99 + IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, 100 + IPHETH_USBINTF_PROTO) }, 101 + { USB_DEVICE_AND_INTERFACE_INFO( 102 + USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW, 100 103 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, 101 104 IPHETH_USBINTF_PROTO) }, 102 105 { }
+2 -1
drivers/net/wireless/ath/ath9k/ar9002_calib.c
··· 41 41 case ADC_DC_CAL: 42 42 /* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */ 43 43 if (!IS_CHAN_B(chan) && 44 - !(IS_CHAN_2GHZ(chan) && IS_CHAN_HT20(chan))) 44 + !((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) && 45 + IS_CHAN_HT20(chan))) 45 46 supported = true; 46 47 break; 47 48 }
+1 -1
drivers/net/wireless/ath/ath9k/ar9003_phy.c
··· 671 671 REG_WRITE_ARRAY(&ah->iniModesAdditional, 672 672 modesIndex, regWrites); 673 673 674 - if (AR_SREV_9300(ah)) 674 + if (AR_SREV_9330(ah)) 675 675 REG_WRITE_ARRAY(&ah->iniModesAdditional, 1, regWrites); 676 676 677 677 if (AR_SREV_9340(ah) && !ah->is_clk_25mhz)
+6
drivers/net/wireless/ath/ath9k/main.c
··· 2303 2303 mutex_lock(&sc->mutex); 2304 2304 cancel_delayed_work_sync(&sc->tx_complete_work); 2305 2305 2306 + if (ah->ah_flags & AH_UNPLUGGED) { 2307 + ath_dbg(common, ATH_DBG_ANY, "Device has been unplugged!\n"); 2308 + mutex_unlock(&sc->mutex); 2309 + return; 2310 + } 2311 + 2306 2312 if (sc->sc_flags & SC_OP_INVALID) { 2307 2313 ath_dbg(common, ATH_DBG_ANY, "Device not present\n"); 2308 2314 mutex_unlock(&sc->mutex);
+8 -5
drivers/net/wireless/iwlegacy/iwl-3945-rs.c
··· 822 822 823 823 out: 824 824 825 - rs_sta->last_txrate_idx = index; 826 - if (sband->band == IEEE80211_BAND_5GHZ) 827 - info->control.rates[0].idx = rs_sta->last_txrate_idx - 828 - IWL_FIRST_OFDM_RATE; 829 - else 825 + if (sband->band == IEEE80211_BAND_5GHZ) { 826 + if (WARN_ON_ONCE(index < IWL_FIRST_OFDM_RATE)) 827 + index = IWL_FIRST_OFDM_RATE; 828 + rs_sta->last_txrate_idx = index; 829 + info->control.rates[0].idx = index - IWL_FIRST_OFDM_RATE; 830 + } else { 831 + rs_sta->last_txrate_idx = index; 830 832 info->control.rates[0].idx = rs_sta->last_txrate_idx; 833 + } 831 834 832 835 IWL_DEBUG_RATE(priv, "leave: %d\n", index); 833 836 }
+1 -1
drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
··· 167 167 168 168 memset(&cmd, 0, sizeof(cmd)); 169 169 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); 170 - memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(offset_calib)); 170 + memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(*offset_calib)); 171 171 if (!(cmd.radio_sensor_offset)) 172 172 cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET; 173 173
+2
drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
··· 771 771 cmd = txq->cmd[cmd_index]; 772 772 meta = &txq->meta[cmd_index]; 773 773 774 + txq->time_stamp = jiffies; 775 + 774 776 iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); 775 777 776 778 /* Input error checking is done when commands are added to queue. */
+8
drivers/net/wireless/rtlwifi/core.c
··· 610 610 611 611 mac->link_state = MAC80211_NOLINK; 612 612 memset(mac->bssid, 0, 6); 613 + 614 + /* reset sec info */ 615 + rtl_cam_reset_sec_info(hw); 616 + 617 + rtl_cam_reset_all_entry(hw); 613 618 mac->vendor = PEER_UNKNOWN; 614 619 615 620 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, ··· 1068 1063 *or clear all entry here. 1069 1064 */ 1070 1065 rtl_cam_delete_one_entry(hw, mac_addr, key_idx); 1066 + 1067 + rtl_cam_reset_sec_info(hw); 1068 + 1071 1069 break; 1072 1070 default: 1073 1071 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+6 -5
drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
··· 549 549 (tcb_desc->rts_use_shortpreamble ? 1 : 0) 550 550 : (tcb_desc->rts_use_shortgi ? 1 : 0))); 551 551 if (mac->bw_40) { 552 - if (tcb_desc->packet_bw) { 552 + if (rate_flag & IEEE80211_TX_RC_DUP_DATA) { 553 553 SET_TX_DESC_DATA_BW(txdesc, 1); 554 554 SET_TX_DESC_DATA_SC(txdesc, 3); 555 + } else if(rate_flag & IEEE80211_TX_RC_40_MHZ_WIDTH){ 556 + SET_TX_DESC_DATA_BW(txdesc, 1); 557 + SET_TX_DESC_DATA_SC(txdesc, mac->cur_40_prime_sc); 555 558 } else { 556 559 SET_TX_DESC_DATA_BW(txdesc, 0); 557 - if (rate_flag & IEEE80211_TX_RC_DUP_DATA) 558 - SET_TX_DESC_DATA_SC(txdesc, 559 - mac->cur_40_prime_sc); 560 - } 560 + SET_TX_DESC_DATA_SC(txdesc, 0); 561 + } 561 562 } else { 562 563 SET_TX_DESC_DATA_BW(txdesc, 0); 563 564 SET_TX_DESC_DATA_SC(txdesc, 0);
+3 -1
drivers/pci/hotplug/pcihp_slot.c
··· 169 169 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI))) 170 170 return; 171 171 172 - pcie_bus_configure_settings(dev->bus, dev->bus->self->pcie_mpss); 172 + if (dev->bus && dev->bus->self) 173 + pcie_bus_configure_settings(dev->bus, 174 + dev->bus->self->pcie_mpss); 173 175 174 176 memset(&hpp, 0, sizeof(hpp)); 175 177 ret = pci_get_hp_params(dev, &hpp);
+1 -1
drivers/pci/pci.c
··· 77 77 unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; 78 78 unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; 79 79 80 - enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE; 80 + enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE; 81 81 82 82 /* 83 83 * The default CLS is used if arch didn't set CLS explicitly and not
+24 -23
drivers/pci/probe.c
··· 1351 1351 * will occur as normal. 1352 1352 */ 1353 1353 if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) || 1354 - dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT)) 1354 + (dev->bus->self && 1355 + dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT))) 1355 1356 *smpss = 0; 1356 1357 1357 1358 if (*smpss > dev->pcie_mpss) ··· 1397 1396 1398 1397 static void pcie_write_mrrs(struct pci_dev *dev, int mps) 1399 1398 { 1400 - int rc, mrrs; 1399 + int rc, mrrs, dev_mpss; 1401 1400 1402 - if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { 1403 - int dev_mpss = 128 << dev->pcie_mpss; 1401 + /* In the "safe" case, do not configure the MRRS. There appear to be 1402 + * issues with setting MRRS to 0 on a number of devices. 1403 + */ 1404 1404 1405 - /* For Max performance, the MRRS must be set to the largest 1406 - * supported value. However, it cannot be configured larger 1407 - * than the MPS the device or the bus can support. This assumes 1408 - * that the largest MRRS available on the device cannot be 1409 - * smaller than the device MPSS. 1410 - */ 1411 - mrrs = mps < dev_mpss ? mps : dev_mpss; 1412 - } else 1413 - /* In the "safe" case, configure the MRRS for fairness on the 1414 - * bus by making all devices have the same size 1415 - */ 1416 - mrrs = mps; 1405 + if (pcie_bus_config != PCIE_BUS_PERFORMANCE) 1406 + return; 1417 1407 1408 + dev_mpss = 128 << dev->pcie_mpss; 1409 + 1410 + /* For Max performance, the MRRS must be set to the largest supported 1411 + * value. However, it cannot be configured larger than the MPS the 1412 + * device or the bus can support. This assumes that the largest MRRS 1413 + * available on the device cannot be smaller than the device MPSS. 1414 + */ 1415 + mrrs = min(mps, dev_mpss); 1418 1416 1419 1417 /* MRRS is a R/W register. Invalid values can be written, but a 1420 - * subsiquent read will verify if the value is acceptable or not. 1418 + * subsequent read will verify if the value is acceptable or not. 1421 1419 * If the MRRS value provided is not acceptable (e.g., too large), 1422 1420 * shrink the value until it is acceptable to the HW. 1423 1421 */ 1424 1422 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) { 1423 + dev_warn(&dev->dev, "Attempting to modify the PCI-E MRRS value" 1424 + " to %d. If any issues are encountered, please try " 1425 + "running with pci=pcie_bus_safe\n", mrrs); 1425 1426 rc = pcie_set_readrq(dev, mrrs); 1426 1427 if (rc) 1427 - dev_err(&dev->dev, "Failed attempting to set the MRRS\n"); 1428 + dev_err(&dev->dev, 1429 + "Failed attempting to set the MRRS\n"); 1428 1430 1429 1431 mrrs /= 2; 1430 1432 } ··· 1440 1436 if (!pci_is_pcie(dev)) 1441 1437 return 0; 1442 1438 1443 - dev_info(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n", 1439 + dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n", 1444 1440 pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev)); 1445 1441 1446 1442 pcie_write_mps(dev, mps); 1447 1443 pcie_write_mrrs(dev, mps); 1448 1444 1449 - dev_info(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n", 1445 + dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n", 1450 1446 pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev)); 1451 1447 1452 1448 return 0; ··· 1459 1455 void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss) 1460 1456 { 1461 1457 u8 smpss = mpss; 1462 - 1463 - if (!bus->self) 1464 - return; 1465 1458 1466 1459 if (!pci_is_pcie(bus->self)) 1467 1460 return;
+8 -8
drivers/rtc/rtc-ep93xx.c
··· 36 36 */ 37 37 struct ep93xx_rtc { 38 38 void __iomem *mmio_base; 39 + struct rtc_device *rtc; 39 40 }; 40 41 41 42 static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload, ··· 131 130 { 132 131 struct ep93xx_rtc *ep93xx_rtc; 133 132 struct resource *res; 134 - struct rtc_device *rtc; 135 133 int err; 136 134 137 135 ep93xx_rtc = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_rtc), GFP_KERNEL); ··· 151 151 return -ENXIO; 152 152 153 153 pdev->dev.platform_data = ep93xx_rtc; 154 - platform_set_drvdata(pdev, rtc); 154 + platform_set_drvdata(pdev, ep93xx_rtc); 155 155 156 - rtc = rtc_device_register(pdev->name, 156 + ep93xx_rtc->rtc = rtc_device_register(pdev->name, 157 157 &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE); 158 - if (IS_ERR(rtc)) { 159 - err = PTR_ERR(rtc); 158 + if (IS_ERR(ep93xx_rtc->rtc)) { 159 + err = PTR_ERR(ep93xx_rtc->rtc); 160 160 goto exit; 161 161 } 162 162 ··· 167 167 return 0; 168 168 169 169 fail: 170 - rtc_device_unregister(rtc); 170 + rtc_device_unregister(ep93xx_rtc->rtc); 171 171 exit: 172 172 platform_set_drvdata(pdev, NULL); 173 173 pdev->dev.platform_data = NULL; ··· 176 176 177 177 static int __exit ep93xx_rtc_remove(struct platform_device *pdev) 178 178 { 179 - struct rtc_device *rtc = platform_get_drvdata(pdev); 179 + struct ep93xx_rtc *ep93xx_rtc = platform_get_drvdata(pdev); 180 180 181 181 sysfs_remove_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files); 182 182 platform_set_drvdata(pdev, NULL); 183 - rtc_device_unregister(rtc); 183 + rtc_device_unregister(ep93xx_rtc->rtc); 184 184 pdev->dev.platform_data = NULL; 185 185 186 186 return 0;
+1
drivers/rtc/rtc-imxdi.c
··· 35 35 #include <linux/module.h> 36 36 #include <linux/platform_device.h> 37 37 #include <linux/rtc.h> 38 + #include <linux/sched.h> 38 39 #include <linux/workqueue.h> 39 40 40 41 /* DryIce Register Definitions */
+2
drivers/rtc/rtc-lib.c
··· 85 85 time -= tm->tm_hour * 3600; 86 86 tm->tm_min = time / 60; 87 87 tm->tm_sec = time - tm->tm_min * 60; 88 + 89 + tm->tm_isdst = 0; 88 90 } 89 91 EXPORT_SYMBOL(rtc_time_to_tm); 90 92
+26
drivers/rtc/rtc-s3c.c
··· 51 51 52 52 static DEFINE_SPINLOCK(s3c_rtc_pie_lock); 53 53 54 + static void s3c_rtc_alarm_clk_enable(bool enable) 55 + { 56 + static DEFINE_SPINLOCK(s3c_rtc_alarm_clk_lock); 57 + static bool alarm_clk_enabled; 58 + unsigned long irq_flags; 59 + 60 + spin_lock_irqsave(&s3c_rtc_alarm_clk_lock, irq_flags); 61 + if (enable) { 62 + if (!alarm_clk_enabled) { 63 + clk_enable(rtc_clk); 64 + alarm_clk_enabled = true; 65 + } 66 + } else { 67 + if (alarm_clk_enabled) { 68 + clk_disable(rtc_clk); 69 + alarm_clk_enabled = false; 70 + } 71 + } 72 + spin_unlock_irqrestore(&s3c_rtc_alarm_clk_lock, irq_flags); 73 + } 74 + 54 75 /* IRQ Handlers */ 55 76 56 77 static irqreturn_t s3c_rtc_alarmirq(int irq, void *id) ··· 85 64 writeb(S3C2410_INTP_ALM, s3c_rtc_base + S3C2410_INTP); 86 65 87 66 clk_disable(rtc_clk); 67 + 68 + s3c_rtc_alarm_clk_enable(false); 69 + 88 70 return IRQ_HANDLED; 89 71 } 90 72 ··· 120 96 121 97 writeb(tmp, s3c_rtc_base + S3C2410_RTCALM); 122 98 clk_disable(rtc_clk); 99 + 100 + s3c_rtc_alarm_clk_enable(enabled); 123 101 124 102 return 0; 125 103 }
+25 -37
drivers/rtc/rtc-twl.c
··· 362 362 int res; 363 363 u8 rd_reg; 364 364 365 - #ifdef CONFIG_LOCKDEP 366 - /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which 367 - * we don't want and can't tolerate. Although it might be 368 - * friendlier not to borrow this thread context... 369 - */ 370 - local_irq_enable(); 371 - #endif 372 - 373 365 res = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG); 374 366 if (res) 375 367 goto out; ··· 420 428 static int __devinit twl_rtc_probe(struct platform_device *pdev) 421 429 { 422 430 struct rtc_device *rtc; 423 - int ret = 0; 431 + int ret = -EINVAL; 424 432 int irq = platform_get_irq(pdev, 0); 425 433 u8 rd_reg; 426 434 427 435 if (irq <= 0) 428 - return -EINVAL; 429 - 430 - rtc = rtc_device_register(pdev->name, 431 - &pdev->dev, &twl_rtc_ops, THIS_MODULE); 432 - if (IS_ERR(rtc)) { 433 - ret = PTR_ERR(rtc); 434 - dev_err(&pdev->dev, "can't register RTC device, err %ld\n", 435 - PTR_ERR(rtc)); 436 - goto out0; 437 - 438 - } 439 - 440 - platform_set_drvdata(pdev, rtc); 436 + goto out1; 441 437 442 438 ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG); 443 439 if (ret < 0) ··· 442 462 if (ret < 0) 443 463 goto out1; 444 464 445 - ret = request_irq(irq, twl_rtc_interrupt, 446 - IRQF_TRIGGER_RISING, 447 - dev_name(&rtc->dev), rtc); 448 - if (ret < 0) { 449 - dev_err(&pdev->dev, "IRQ is not free.\n"); 450 - goto out1; 451 - } 452 - 453 465 if (twl_class_is_6030()) { 454 466 twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK, 455 467 REG_INT_MSK_LINE_A); ··· 452 480 /* Check RTC module status, Enable if it is off */ 453 481 ret = twl_rtc_read_u8(&rd_reg, REG_RTC_CTRL_REG); 454 482 if (ret < 0) 455 - goto out2; 483 + goto out1; 456 484 457 485 if (!(rd_reg & BIT_RTC_CTRL_REG_STOP_RTC_M)) { 458 486 dev_info(&pdev->dev, "Enabling TWL-RTC.\n"); 459 487 rd_reg = BIT_RTC_CTRL_REG_STOP_RTC_M; 460 488 ret = twl_rtc_write_u8(rd_reg, REG_RTC_CTRL_REG); 461 489 if (ret < 0) 462 - goto out2; 490 + goto out1; 463 491 } 464 492 465 493 /* init cached IRQ enable bits */ 466 494 ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG); 467 495 if (ret < 0) 468 - goto out2; 496 + goto out1; 469 497 470 - return ret; 498 + rtc = rtc_device_register(pdev->name, 499 + &pdev->dev, &twl_rtc_ops, THIS_MODULE); 500 + if (IS_ERR(rtc)) { 501 + ret = PTR_ERR(rtc); 502 + dev_err(&pdev->dev, "can't register RTC device, err %ld\n", 503 + PTR_ERR(rtc)); 504 + goto out1; 505 + } 506 + 507 + ret = request_threaded_irq(irq, NULL, twl_rtc_interrupt, 508 + IRQF_TRIGGER_RISING, 509 + dev_name(&rtc->dev), rtc); 510 + if (ret < 0) { 511 + dev_err(&pdev->dev, "IRQ is not free.\n"); 512 + goto out2; 513 + } 514 + 515 + platform_set_drvdata(pdev, rtc); 516 + return 0; 471 517 472 518 out2: 473 - free_irq(irq, rtc); 474 - out1: 475 519 rtc_device_unregister(rtc); 476 - out0: 520 + out1: 477 521 return ret; 478 522 } 479 523
+1 -1
drivers/scsi/bnx2i/bnx2i_hwi.c
··· 563 563 nopout_wqe->itt = ((u16)task->itt | 564 564 (ISCSI_TASK_TYPE_MPATH << 565 565 ISCSI_TMF_REQUEST_TYPE_SHIFT)); 566 - nopout_wqe->ttt = nopout_hdr->ttt; 566 + nopout_wqe->ttt = be32_to_cpu(nopout_hdr->ttt); 567 567 nopout_wqe->flags = 0; 568 568 if (!unsol) 569 569 nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
+8 -5
drivers/scsi/fcoe/fcoe.c
··· 432 432 u8 flogi_maddr[ETH_ALEN]; 433 433 const struct net_device_ops *ops; 434 434 435 + rtnl_lock(); 436 + 435 437 /* 436 438 * Don't listen for Ethernet packets anymore. 437 439 * synchronize_net() ensures that the packet handlers are not running ··· 462 460 FCOE_NETDEV_DBG(netdev, "Failed to disable FCoE" 463 461 " specific feature for LLD.\n"); 464 462 } 463 + 464 + rtnl_unlock(); 465 465 466 466 /* Release the self-reference taken during fcoe_interface_create() */ 467 467 fcoe_interface_put(fcoe); ··· 1955 1951 fcoe_if_destroy(port->lport); 1956 1952 1957 1953 /* Do not tear down the fcoe interface for NPIV port */ 1958 - if (!npiv) { 1959 - rtnl_lock(); 1954 + if (!npiv) 1960 1955 fcoe_interface_cleanup(fcoe); 1961 - rtnl_unlock(); 1962 - } 1963 1956 1964 1957 mutex_unlock(&fcoe_config_mutex); 1965 1958 } ··· 2010 2009 printk(KERN_ERR "fcoe: Failed to create interface (%s)\n", 2011 2010 netdev->name); 2012 2011 rc = -EIO; 2012 + rtnl_unlock(); 2013 2013 fcoe_interface_cleanup(fcoe); 2014 - goto out_nodev; 2014 + goto out_nortnl; 2015 2015 } 2016 2016 2017 2017 /* Make this the "master" N_Port */ ··· 2029 2027 2030 2028 out_nodev: 2031 2029 rtnl_unlock(); 2030 + out_nortnl: 2032 2031 mutex_unlock(&fcoe_config_mutex); 2033 2032 return rc; 2034 2033 }
+37 -20
drivers/scsi/hpsa.c
··· 676 676 BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA); 677 677 removed[*nremoved] = h->dev[entry]; 678 678 (*nremoved)++; 679 + 680 + /* 681 + * New physical devices won't have target/lun assigned yet 682 + * so we need to preserve the values in the slot we are replacing. 683 + */ 684 + if (new_entry->target == -1) { 685 + new_entry->target = h->dev[entry]->target; 686 + new_entry->lun = h->dev[entry]->lun; 687 + } 688 + 679 689 h->dev[entry] = new_entry; 680 690 added[*nadded] = new_entry; 681 691 (*nadded)++; ··· 1558 1548 } 1559 1549 1560 1550 static int hpsa_update_device_info(struct ctlr_info *h, 1561 - unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device) 1551 + unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device, 1552 + unsigned char *is_OBDR_device) 1562 1553 { 1563 - #define OBDR_TAPE_INQ_SIZE 49 1554 + 1555 + #define OBDR_SIG_OFFSET 43 1556 + #define OBDR_TAPE_SIG "$DR-10" 1557 + #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1) 1558 + #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN) 1559 + 1564 1560 unsigned char *inq_buff; 1561 + unsigned char *obdr_sig; 1565 1562 1566 1563 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); 1567 1564 if (!inq_buff) ··· 1599 1582 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); 1600 1583 else 1601 1584 this_device->raid_level = RAID_UNKNOWN; 1585 + 1586 + if (is_OBDR_device) { 1587 + /* See if this is a One-Button-Disaster-Recovery device 1588 + * by looking for "$DR-10" at offset 43 in inquiry data. 1589 + */ 1590 + obdr_sig = &inq_buff[OBDR_SIG_OFFSET]; 1591 + *is_OBDR_device = (this_device->devtype == TYPE_ROM && 1592 + strncmp(obdr_sig, OBDR_TAPE_SIG, 1593 + OBDR_SIG_LEN) == 0); 1594 + } 1602 1595 1603 1596 kfree(inq_buff); 1604 1597 return 0; ··· 1743 1716 return 0; 1744 1717 } 1745 1718 1746 - if (hpsa_update_device_info(h, scsi3addr, this_device)) 1719 + if (hpsa_update_device_info(h, scsi3addr, this_device, NULL)) 1747 1720 return 0; 1748 1721 (*nmsa2xxx_enclosures)++; 1749 1722 hpsa_set_bus_target_lun(this_device, bus, target, 0); ··· 1835 1808 */ 1836 1809 struct ReportLUNdata *physdev_list = NULL; 1837 1810 struct ReportLUNdata *logdev_list = NULL; 1838 - unsigned char *inq_buff = NULL; 1839 1811 u32 nphysicals = 0; 1840 1812 u32 nlogicals = 0; 1841 1813 u32 ndev_allocated = 0; ··· 1850 1824 GFP_KERNEL); 1851 1825 physdev_list = kzalloc(reportlunsize, GFP_KERNEL); 1852 1826 logdev_list = kzalloc(reportlunsize, GFP_KERNEL); 1853 - inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); 1854 1827 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); 1855 1828 1856 - if (!currentsd || !physdev_list || !logdev_list || 1857 - !inq_buff || !tmpdevice) { 1829 + if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) { 1858 1830 dev_err(&h->pdev->dev, "out of memory\n"); 1859 1831 goto out; 1860 1832 } ··· 1887 1863 /* adjust our table of devices */ 1888 1864 nmsa2xxx_enclosures = 0; 1889 1865 for (i = 0; i < nphysicals + nlogicals + 1; i++) { 1890 - u8 *lunaddrbytes; 1866 + u8 *lunaddrbytes, is_OBDR = 0; 1891 1867 1892 1868 /* Figure out where the LUN ID info is coming from */ 1893 1869 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, ··· 1898 1874 continue; 1899 1875 1900 1876 /* Get device type, vendor, model, device id */ 1901 - if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice)) 1877 + if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice, 1878 + &is_OBDR)) 1902 1879 continue; /* skip it if we can't talk to it. */ 1903 1880 figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun, 1904 1881 tmpdevice); ··· 1923 1898 hpsa_set_bus_target_lun(this_device, bus, target, lun); 1924 1899 1925 1900 switch (this_device->devtype) { 1926 - case TYPE_ROM: { 1901 + case TYPE_ROM: 1927 1902 /* We don't *really* support actual CD-ROM devices, 1928 1903 * just "One Button Disaster Recovery" tape drive 1929 1904 * which temporarily pretends to be a CD-ROM drive. ··· 1931 1906 * device by checking for "$DR-10" in bytes 43-48 of 1932 1907 * the inquiry data. 1933 1908 */ 1934 - char obdr_sig[7]; 1935 - #define OBDR_TAPE_SIG "$DR-10" 1936 - strncpy(obdr_sig, &inq_buff[43], 6); 1937 - obdr_sig[6] = '\0'; 1938 - if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0) 1939 - /* Not OBDR device, ignore it. */ 1940 - break; 1941 - } 1942 - ncurrent++; 1909 + if (is_OBDR) 1910 + ncurrent++; 1943 1911 break; 1944 1912 case TYPE_DISK: 1945 1913 if (i < nphysicals) ··· 1965 1947 for (i = 0; i < ndev_allocated; i++) 1966 1948 kfree(currentsd[i]); 1967 1949 kfree(currentsd); 1968 - kfree(inq_buff); 1969 1950 kfree(physdev_list); 1970 1951 kfree(logdev_list); 1971 1952 }
+12 -1
drivers/scsi/isci/host.c
··· 531 531 break; 532 532 533 533 case SCU_COMPLETION_TYPE_EVENT: 534 + sci_controller_event_completion(ihost, ent); 535 + break; 536 + 534 537 case SCU_COMPLETION_TYPE_NOTIFY: { 535 538 event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) << 536 539 (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT); ··· 1094 1091 struct isci_request *request; 1095 1092 struct isci_request *next_request; 1096 1093 struct sas_task *task; 1094 + u16 active; 1097 1095 1098 1096 INIT_LIST_HEAD(&completed_request_list); 1099 1097 INIT_LIST_HEAD(&errored_request_list); ··· 1185 1181 } 1186 1182 } 1187 1183 1184 + /* the coalesence timeout doubles at each encoding step, so 1185 + * update it based on the ilog2 value of the outstanding requests 1186 + */ 1187 + active = isci_tci_active(ihost); 1188 + writel(SMU_ICC_GEN_VAL(NUMBER, active) | 1189 + SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)), 1190 + &ihost->smu_registers->interrupt_coalesce_control); 1188 1191 } 1189 1192 1190 1193 /** ··· 1482 1471 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); 1483 1472 1484 1473 /* set the default interrupt coalescence number and timeout value. */ 1485 - sci_controller_set_interrupt_coalescence(ihost, 0x10, 250); 1474 + sci_controller_set_interrupt_coalescence(ihost, 0, 0); 1486 1475 } 1487 1476 1488 1477 static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
+3
drivers/scsi/isci/host.h
··· 369 369 #define ISCI_TAG_SEQ(tag) (((tag) >> 12) & (SCI_MAX_SEQ-1)) 370 370 #define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1)) 371 371 372 + /* interrupt coalescing baseline: 9 == 3 to 5us interrupt delay per command */ 373 + #define ISCI_COALESCE_BASE 9 374 + 372 375 /* expander attached sata devices require 3 rnc slots */ 373 376 static inline int sci_remote_device_node_count(struct isci_remote_device *idev) 374 377 {
+28 -19
drivers/scsi/isci/init.c
··· 59 59 #include <linux/firmware.h> 60 60 #include <linux/efi.h> 61 61 #include <asm/string.h> 62 + #include <scsi/scsi_host.h> 62 63 #include "isci.h" 63 64 #include "task.h" 64 65 #include "probe_roms.h" 66 + 67 + #define MAJ 1 68 + #define MIN 0 69 + #define BUILD 0 70 + #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ 71 + __stringify(BUILD) 72 + 73 + MODULE_VERSION(DRV_VERSION); 65 74 66 75 static struct scsi_transport_template *isci_transport_template; 67 76 ··· 122 113 module_param(max_concurr_spinup, byte, 0); 123 114 MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup"); 124 115 116 + static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf) 117 + { 118 + struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev); 119 + struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); 120 + struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha); 121 + 122 + return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id); 123 + } 124 + 125 + static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL); 126 + 127 + struct device_attribute *isci_host_attrs[] = { 128 + &dev_attr_isci_id, 129 + NULL 130 + }; 131 + 125 132 static struct scsi_host_template isci_sht = { 126 133 127 134 .module = THIS_MODULE, ··· 163 138 .slave_alloc = sas_slave_alloc, 164 139 .target_destroy = sas_target_destroy, 165 140 .ioctl = sas_ioctl, 141 + .shost_attrs = isci_host_attrs, 166 142 }; 167 143 168 144 static struct sas_domain_function_template isci_transport_ops = { ··· 258 232 return 0; 259 233 } 260 234 261 - static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf) 262 - { 263 - struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev); 264 - struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); 265 - struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha); 266 - 267 - return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id); 268 - } 269 - 270 - static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL); 271 - 272 235 static void isci_unregister(struct isci_host *isci_host) 273 236 { 274 237 struct Scsi_Host *shost; ··· 266 251 return; 267 252 268 253 shost = isci_host->shost; 269 - device_remove_file(&shost->shost_dev, &dev_attr_isci_id); 270 254 271 255 sas_unregister_ha(&isci_host->sas_ha); 272 256 ··· 429 415 if (err) 430 416 goto err_shost_remove; 431 417 432 - err = device_create_file(&shost->shost_dev, &dev_attr_isci_id); 433 - if (err) 434 - goto err_unregister_ha; 435 - 436 418 return isci_host; 437 419 438 - err_unregister_ha: 439 - sas_unregister_ha(&(isci_host->sas_ha)); 440 420 err_shost_remove: 441 421 scsi_remove_host(shost); 442 422 err_shost: ··· 548 540 { 549 541 int err; 550 542 551 - pr_info("%s: Intel(R) C600 SAS Controller Driver\n", DRV_NAME); 543 + pr_info("%s: Intel(R) C600 SAS Controller Driver - version %s\n", 544 + DRV_NAME, DRV_VERSION); 552 545 553 546 isci_transport_template = sas_domain_attach_transport(&isci_transport_ops); 554 547 if (!isci_transport_template)
+13
drivers/scsi/isci/phy.c
··· 104 104 u32 parity_count = 0; 105 105 u32 llctl, link_rate; 106 106 u32 clksm_value = 0; 107 + u32 sp_timeouts = 0; 107 108 108 109 iphy->link_layer_registers = reg; 109 110 ··· 211 210 } 212 211 llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate); 213 212 writel(llctl, &iphy->link_layer_registers->link_layer_control); 213 + 214 + sp_timeouts = readl(&iphy->link_layer_registers->sas_phy_timeouts); 215 + 216 + /* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */ 217 + sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF); 218 + 219 + /* Set RATE_CHANGE timeout value to 0x3B (59us). This ensures SCU can 220 + * lock with 3Gb drive when SCU max rate is set to 1.5Gb. 221 + */ 222 + sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B); 223 + 224 + writel(sp_timeouts, &iphy->link_layer_registers->sas_phy_timeouts); 214 225 215 226 if (is_a2(ihost->pdev)) { 216 227 /* Program the max ARB time for the PHY to 700us so we inter-operate with
+12
drivers/scsi/isci/registers.h
··· 1299 1299 #define SCU_AFE_XCVRCR_OFFSET 0x00DC 1300 1300 #define SCU_AFE_LUTCR_OFFSET 0x00E0 1301 1301 1302 + #define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_SHIFT (0UL) 1303 + #define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_MASK (0x000000FFUL) 1304 + #define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_SHIFT (8UL) 1305 + #define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_MASK (0x0000FF00UL) 1306 + #define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_SHIFT (16UL) 1307 + #define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_MASK (0x00FF0000UL) 1308 + #define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_SHIFT (24UL) 1309 + #define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_MASK (0xFF000000UL) 1310 + 1311 + #define SCU_SAS_PHYTOV_GEN_VAL(name, value) \ 1312 + SCU_GEN_VALUE(SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_##name, value) 1313 + 1302 1314 #define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_SHIFT (0) 1303 1315 #define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_MASK (0x00000003) 1304 1316 #define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1 (0)
+16 -14
drivers/scsi/isci/request.c
··· 732 732 sci_change_state(&ireq->sm, SCI_REQ_ABORTING); 733 733 return SCI_SUCCESS; 734 734 case SCI_REQ_TASK_WAIT_TC_RESP: 735 + /* The task frame was already confirmed to have been 736 + * sent by the SCU HW. Since the state machine is 737 + * now only waiting for the task response itself, 738 + * abort the request and complete it immediately 739 + * and don't wait for the task response. 740 + */ 735 741 sci_change_state(&ireq->sm, SCI_REQ_ABORTING); 736 742 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 737 743 return SCI_SUCCESS; 738 744 case SCI_REQ_ABORTING: 739 - sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 740 - return SCI_SUCCESS; 745 + /* If a request has a termination requested twice, return 746 + * a failure indication, since HW confirmation of the first 747 + * abort is still outstanding. 748 + */ 741 749 case SCI_REQ_COMPLETED: 742 750 default: 743 751 dev_warn(&ireq->owning_controller->pdev->dev, ··· 2407 2399 } 2408 2400 } 2409 2401 2410 - static void isci_request_process_stp_response(struct sas_task *task, 2411 - void *response_buffer) 2402 + static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis) 2412 2403 { 2413 - struct dev_to_host_fis *d2h_reg_fis = response_buffer; 2414 2404 struct task_status_struct *ts = &task->task_status; 2415 2405 struct ata_task_resp *resp = (void *)&ts->buf[0]; 2416 2406 2417 - resp->frame_len = le16_to_cpu(*(__le16 *)(response_buffer + 6)); 2418 - memcpy(&resp->ending_fis[0], response_buffer + 16, 24); 2407 + resp->frame_len = sizeof(*fis); 2408 + memcpy(resp->ending_fis, fis, sizeof(*fis)); 2419 2409 ts->buf_valid_size = sizeof(*resp); 2420 2410 2421 - /** 2422 - * If the device fault bit is set in the status register, then 2411 + /* If the device fault bit is set in the status register, then 2423 2412 * set the sense data and return. 2424 2413 */ 2425 - if (d2h_reg_fis->status & ATA_DF) 2414 + if (fis->status & ATA_DF) 2426 2415 ts->stat = SAS_PROTO_RESPONSE; 2427 2416 else 2428 2417 ts->stat = SAM_STAT_GOOD; ··· 2433 2428 { 2434 2429 struct sas_task *task = isci_request_access_task(request); 2435 2430 struct ssp_response_iu *resp_iu; 2436 - void *resp_buf; 2437 2431 unsigned long task_flags; 2438 2432 struct isci_remote_device *idev = isci_lookup_device(task->dev); 2439 2433 enum service_response response = SAS_TASK_UNDELIVERED; ··· 2569 2565 task); 2570 2566 2571 2567 if (sas_protocol_ata(task->task_proto)) { 2572 - resp_buf = &request->stp.rsp; 2573 - isci_request_process_stp_response(task, 2574 - resp_buf); 2568 + isci_process_stp_response(task, &request->stp.rsp); 2575 2569 } else if (SAS_PROTOCOL_SSP == task->task_proto) { 2576 2570 2577 2571 /* crack the iu response buffer. */
+1 -1
drivers/scsi/isci/unsolicited_frame_control.c
··· 72 72 */ 73 73 buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE; 74 74 header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header); 75 - size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(dma_addr_t); 75 + size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(uf_control->address_table.array[0]); 76 76 77 77 /* 78 78 * The Unsolicited Frame buffers are set at the start of the UF
+1 -1
drivers/scsi/isci/unsolicited_frame_control.h
··· 214 214 * starting address of the UF address table. 215 215 * 64-bit pointers are required by the hardware. 216 216 */ 217 - dma_addr_t *array; 217 + u64 *array; 218 218 219 219 /** 220 220 * This field specifies the physical address location for the UF
+41 -18
drivers/scsi/libfc/fc_exch.c
··· 494 494 */ 495 495 error = lport->tt.frame_send(lport, fp); 496 496 497 + if (fh->fh_type == FC_TYPE_BLS) 498 + return error; 499 + 497 500 /* 498 501 * Update the exchange and sequence flags, 499 502 * assuming all frames for the sequence have been sent. ··· 578 575 } 579 576 580 577 /** 581 - * fc_seq_exch_abort() - Abort an exchange and sequence 582 - * @req_sp: The sequence to be aborted 578 + * fc_exch_abort_locked() - Abort an exchange 579 + * @ep: The exchange to be aborted 583 580 * @timer_msec: The period of time to wait before aborting 584 581 * 585 - * Generally called because of a timeout or an abort from the upper layer. 582 + * Locking notes: Called with exch lock held 583 + * 584 + * Return value: 0 on success else error code 586 585 */ 587 - static int fc_seq_exch_abort(const struct fc_seq *req_sp, 588 - unsigned int timer_msec) 586 + static int fc_exch_abort_locked(struct fc_exch *ep, 587 + unsigned int timer_msec) 589 588 { 590 589 struct fc_seq *sp; 591 - struct fc_exch *ep; 592 590 struct fc_frame *fp; 593 591 int error; 594 592 595 - ep = fc_seq_exch(req_sp); 596 - 597 - spin_lock_bh(&ep->ex_lock); 598 593 if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) || 599 - ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) { 600 - spin_unlock_bh(&ep->ex_lock); 594 + ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) 601 595 return -ENXIO; 602 - } 603 596 604 597 /* 605 598 * Send the abort on a new sequence if possible. 606 599 */ 607 600 sp = fc_seq_start_next_locked(&ep->seq); 608 - if (!sp) { 609 - spin_unlock_bh(&ep->ex_lock); 601 + if (!sp) 610 602 return -ENOMEM; 611 - } 612 603 613 604 ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL; 614 605 if (timer_msec) 615 606 fc_exch_timer_set_locked(ep, timer_msec); 616 - spin_unlock_bh(&ep->ex_lock); 617 607 618 608 /* 619 609 * If not logged into the fabric, don't send ABTS but leave ··· 625 629 error = fc_seq_send(ep->lp, sp, fp); 626 630 } else 627 631 error = -ENOBUFS; 632 + return error; 633 + } 634 + 635 + /** 636 + * fc_seq_exch_abort() - Abort an exchange and sequence 637 + * @req_sp: The sequence to be aborted 638 + * @timer_msec: The period of time to wait before aborting 639 + * 640 + * Generally called because of a timeout or an abort from the upper layer. 641 + * 642 + * Return value: 0 on success else error code 643 + */ 644 + static int fc_seq_exch_abort(const struct fc_seq *req_sp, 645 + unsigned int timer_msec) 646 + { 647 + struct fc_exch *ep; 648 + int error; 649 + 650 + ep = fc_seq_exch(req_sp); 651 + spin_lock_bh(&ep->ex_lock); 652 + error = fc_exch_abort_locked(ep, timer_msec); 653 + spin_unlock_bh(&ep->ex_lock); 628 654 return error; 629 655 } 630 656 ··· 1733 1715 int rc = 1; 1734 1716 1735 1717 spin_lock_bh(&ep->ex_lock); 1718 + fc_exch_abort_locked(ep, 0); 1736 1719 ep->state |= FC_EX_RST_CLEANUP; 1737 1720 if (cancel_delayed_work(&ep->timeout_work)) 1738 1721 atomic_dec(&ep->ex_refcnt); /* drop hold for timer */ ··· 1981 1962 struct fc_exch *ep; 1982 1963 struct fc_seq *sp = NULL; 1983 1964 struct fc_frame_header *fh; 1965 + struct fc_fcp_pkt *fsp = NULL; 1984 1966 int rc = 1; 1985 1967 1986 1968 ep = fc_exch_alloc(lport, fp); ··· 2004 1984 fc_exch_setup_hdr(ep, fp, ep->f_ctl); 2005 1985 sp->cnt++; 2006 1986 2007 - if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) 1987 + if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) { 1988 + fsp = fr_fsp(fp); 2008 1989 fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); 1990 + } 2009 1991 2010 1992 if (unlikely(lport->tt.frame_send(lport, fp))) 2011 1993 goto err; ··· 2021 1999 spin_unlock_bh(&ep->ex_lock); 2022 2000 return sp; 2023 2001 err: 2024 - fc_fcp_ddp_done(fr_fsp(fp)); 2002 + if (fsp) 2003 + fc_fcp_ddp_done(fsp); 2025 2004 rc = fc_exch_done_locked(ep); 2026 2005 spin_unlock_bh(&ep->ex_lock); 2027 2006 if (!rc)
+9 -2
drivers/scsi/libfc/fc_fcp.c
··· 2019 2019 struct fc_fcp_internal *si; 2020 2020 int rc = FAILED; 2021 2021 unsigned long flags; 2022 + int rval; 2023 + 2024 + rval = fc_block_scsi_eh(sc_cmd); 2025 + if (rval) 2026 + return rval; 2022 2027 2023 2028 lport = shost_priv(sc_cmd->device->host); 2024 2029 if (lport->state != LPORT_ST_READY) ··· 2073 2068 int rc = FAILED; 2074 2069 int rval; 2075 2070 2076 - rval = fc_remote_port_chkready(rport); 2071 + rval = fc_block_scsi_eh(sc_cmd); 2077 2072 if (rval) 2078 - goto out; 2073 + return rval; 2079 2074 2080 2075 lport = shost_priv(sc_cmd->device->host); 2081 2076 ··· 2120 2115 unsigned long wait_tmo; 2121 2116 2122 2117 FC_SCSI_DBG(lport, "Resetting host\n"); 2118 + 2119 + fc_block_scsi_eh(sc_cmd); 2123 2120 2124 2121 lport->tt.lport_reset(lport); 2125 2122 wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
+10 -1
drivers/scsi/libfc/fc_lport.c
··· 88 88 */ 89 89 90 90 #include <linux/timer.h> 91 + #include <linux/delay.h> 91 92 #include <linux/slab.h> 92 93 #include <asm/unaligned.h> 93 94 ··· 1030 1029 FCH_EVT_LIPRESET, 0); 1031 1030 fc_vports_linkchange(lport); 1032 1031 fc_lport_reset_locked(lport); 1033 - if (lport->link_up) 1032 + if (lport->link_up) { 1033 + /* 1034 + * Wait upto resource allocation time out before 1035 + * doing re-login since incomplete FIP exchanged 1036 + * from last session may collide with exchanges 1037 + * in new session. 1038 + */ 1039 + msleep(lport->r_a_tov); 1034 1040 fc_lport_enter_flogi(lport); 1041 + } 1035 1042 } 1036 1043 1037 1044 /**
+5 -2
drivers/scsi/qla2xxx/qla_attr.c
··· 1786 1786 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); 1787 1787 } 1788 1788 1789 - if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { 1789 + if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { 1790 1790 if (ha->fw_attributes & BIT_4) { 1791 + int prot = 0; 1791 1792 vha->flags.difdix_supported = 1; 1792 1793 ql_dbg(ql_dbg_user, vha, 0x7082, 1793 1794 "Registered for DIF/DIX type 1 and 3 protection.\n"); 1795 + if (ql2xenabledif == 1) 1796 + prot = SHOST_DIX_TYPE0_PROTECTION; 1794 1797 scsi_host_set_prot(vha->host, 1795 - SHOST_DIF_TYPE1_PROTECTION 1798 + prot | SHOST_DIF_TYPE1_PROTECTION 1796 1799 | SHOST_DIF_TYPE2_PROTECTION 1797 1800 | SHOST_DIF_TYPE3_PROTECTION 1798 1801 | SHOST_DIX_TYPE1_PROTECTION
+18 -18
drivers/scsi/qla2xxx/qla_dbg.c
··· 8 8 /* 9 9 * Table for showing the current message id in use for particular level 10 10 * Change this table for addition of log/debug messages. 11 - * ----------------------------------------------------- 12 - * | Level | Last Value Used | 13 - * ----------------------------------------------------- 14 - * | Module Init and Probe | 0x0116 | 15 - * | Mailbox commands | 0x111e | 16 - * | Device Discovery | 0x2083 | 17 - * | Queue Command and IO tracing | 0x302e | 18 - * | DPC Thread | 0x401c | 19 - * | Async Events | 0x5059 | 20 - * | Timer Routines | 0x600d | 21 - * | User Space Interactions | 0x709c | 22 - * | Task Management | 0x8043 | 23 - * | AER/EEH | 0x900f | 24 - * | Virtual Port | 0xa007 | 25 - * | ISP82XX Specific | 0xb027 | 26 - * | MultiQ | 0xc00b | 27 - * | Misc | 0xd00b | 28 - * ----------------------------------------------------- 11 + * ---------------------------------------------------------------------- 12 + * | Level | Last Value Used | Holes | 13 + * ---------------------------------------------------------------------- 14 + * | Module Init and Probe | 0x0116 | | 15 + * | Mailbox commands | 0x1126 | | 16 + * | Device Discovery | 0x2083 | | 17 + * | Queue Command and IO tracing | 0x302e | 0x3008 | 18 + * | DPC Thread | 0x401c | | 19 + * | Async Events | 0x5059 | | 20 + * | Timer Routines | 0x600d | | 21 + * | User Space Interactions | 0x709d | | 22 + * | Task Management | 0x8041 | | 23 + * | AER/EEH | 0x900f | | 24 + * | Virtual Port | 0xa007 | | 25 + * | ISP82XX Specific | 0xb04f | | 26 + * | MultiQ | 0xc00b | | 27 + * | Misc | 0xd00b | | 28 + * ---------------------------------------------------------------------- 29 29 */ 30 30 31 31 #include "qla_def.h"
+2
drivers/scsi/qla2xxx/qla_def.h
··· 2529 2529 #define DT_ISP8021 BIT_14 2530 2530 #define DT_ISP_LAST (DT_ISP8021 << 1) 2531 2531 2532 + #define DT_T10_PI BIT_25 2532 2533 #define DT_IIDMA BIT_26 2533 2534 #define DT_FWI2 BIT_27 2534 2535 #define DT_ZIO_SUPPORTED BIT_28 ··· 2573 2572 #define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha)) 2574 2573 #define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha)) 2575 2574 2575 + #define IS_T10_PI_CAPABLE(ha) ((ha)->device_type & DT_T10_PI) 2576 2576 #define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA) 2577 2577 #define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2) 2578 2578 #define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED)
+5
drivers/scsi/qla2xxx/qla_fw.h
··· 537 537 /* 538 538 * If DIF Error is set in comp_status, these additional fields are 539 539 * defined: 540 + * 541 + * !!! NOTE: Firmware sends expected/actual DIF data in big endian 542 + * format; but all of the "data" field gets swab32-d in the beginning 543 + * of qla2x00_status_entry(). 544 + * 540 545 * &data[10] : uint8_t report_runt_bg[2]; - computed guard 541 546 * &data[12] : uint8_t actual_dif[8]; - DIF Data received 542 547 * &data[20] : uint8_t expected_dif[8]; - DIF Data computed
-3
drivers/scsi/qla2xxx/qla_init.c
··· 3838 3838 req = vha->req; 3839 3839 rsp = req->rsp; 3840 3840 3841 - atomic_set(&vha->loop_state, LOOP_UPDATE); 3842 3841 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 3843 3842 if (vha->flags.online) { 3844 3843 if (!(rval = qla2x00_fw_ready(vha))) { 3845 3844 /* Wait at most MAX_TARGET RSCNs for a stable link. */ 3846 3845 wait_time = 256; 3847 3846 do { 3848 - atomic_set(&vha->loop_state, LOOP_UPDATE); 3849 - 3850 3847 /* Issue a marker after FW becomes ready. */ 3851 3848 qla2x00_marker(vha, req, rsp, 0, 0, 3852 3849 MK_SYNC_ALL);
+29
drivers/scsi/qla2xxx/qla_inline.h
··· 102 102 fcport->d_id.b.al_pa); 103 103 } 104 104 } 105 + 106 + static inline int 107 + qla2x00_hba_err_chk_enabled(srb_t *sp) 108 + { 109 + /* 110 + * Uncomment when corresponding SCSI changes are done. 111 + * 112 + if (!sp->cmd->prot_chk) 113 + return 0; 114 + * 115 + */ 116 + 117 + switch (scsi_get_prot_op(sp->cmd)) { 118 + case SCSI_PROT_READ_STRIP: 119 + case SCSI_PROT_WRITE_INSERT: 120 + if (ql2xenablehba_err_chk >= 1) 121 + return 1; 122 + break; 123 + case SCSI_PROT_READ_PASS: 124 + case SCSI_PROT_WRITE_PASS: 125 + if (ql2xenablehba_err_chk >= 2) 126 + return 1; 127 + break; 128 + case SCSI_PROT_READ_INSERT: 129 + case SCSI_PROT_WRITE_STRIP: 130 + return 1; 131 + } 132 + return 0; 133 + }
+235 -47
drivers/scsi/qla2xxx/qla_iocb.c
··· 709 709 * 710 710 */ 711 711 static inline void 712 - qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt, 712 + qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt, 713 713 unsigned int protcnt) 714 714 { 715 - struct sd_dif_tuple *spt; 715 + struct scsi_cmnd *cmd = sp->cmd; 716 716 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 717 - unsigned char op = scsi_get_prot_op(cmd); 718 717 719 718 switch (scsi_get_prot_type(cmd)) { 720 - /* For TYPE 0 protection: no checking */ 721 719 case SCSI_PROT_DIF_TYPE0: 722 - pkt->ref_tag_mask[0] = 0x00; 723 - pkt->ref_tag_mask[1] = 0x00; 724 - pkt->ref_tag_mask[2] = 0x00; 725 - pkt->ref_tag_mask[3] = 0x00; 720 + /* 721 + * No check for ql2xenablehba_err_chk, as it would be an 722 + * I/O error if hba tag generation is not done. 723 + */ 724 + pkt->ref_tag = cpu_to_le32((uint32_t) 725 + (0xffffffff & scsi_get_lba(cmd))); 726 + 727 + if (!qla2x00_hba_err_chk_enabled(sp)) 728 + break; 729 + 730 + pkt->ref_tag_mask[0] = 0xff; 731 + pkt->ref_tag_mask[1] = 0xff; 732 + pkt->ref_tag_mask[2] = 0xff; 733 + pkt->ref_tag_mask[3] = 0xff; 726 734 break; 727 735 728 736 /* ··· 738 730 * match LBA in CDB + N 739 731 */ 740 732 case SCSI_PROT_DIF_TYPE2: 741 - if (!ql2xenablehba_err_chk) 742 - break; 743 - 744 - if (scsi_prot_sg_count(cmd)) { 745 - spt = page_address(sg_page(scsi_prot_sglist(cmd))) + 746 - scsi_prot_sglist(cmd)[0].offset; 747 - pkt->app_tag = swab32(spt->app_tag); 748 - pkt->app_tag_mask[0] = 0xff; 749 - pkt->app_tag_mask[1] = 0xff; 750 - } 733 + pkt->app_tag = __constant_cpu_to_le16(0); 734 + pkt->app_tag_mask[0] = 0x0; 735 + pkt->app_tag_mask[1] = 0x0; 751 736 752 737 pkt->ref_tag = cpu_to_le32((uint32_t) 753 738 (0xffffffff & scsi_get_lba(cmd))); 739 + 740 + if (!qla2x00_hba_err_chk_enabled(sp)) 741 + break; 754 742 755 743 /* enable ALL bytes of the ref tag */ 756 744 pkt->ref_tag_mask[0] = 0xff; ··· 767 763 * 16 bit app tag. 768 764 */ 769 765 case SCSI_PROT_DIF_TYPE1: 770 - if (!ql2xenablehba_err_chk) 766 + pkt->ref_tag = cpu_to_le32((uint32_t) 767 + (0xffffffff & scsi_get_lba(cmd))); 768 + pkt->app_tag = __constant_cpu_to_le16(0); 769 + pkt->app_tag_mask[0] = 0x0; 770 + pkt->app_tag_mask[1] = 0x0; 771 + 772 + if (!qla2x00_hba_err_chk_enabled(sp)) 771 773 break; 772 774 773 - if (protcnt && (op == SCSI_PROT_WRITE_STRIP || 774 - op == SCSI_PROT_WRITE_PASS)) { 775 - spt = page_address(sg_page(scsi_prot_sglist(cmd))) + 776 - scsi_prot_sglist(cmd)[0].offset; 777 - ql_dbg(ql_dbg_io, vha, 0x3008, 778 - "LBA from user %p, lba = 0x%x for cmd=%p.\n", 779 - spt, (int)spt->ref_tag, cmd); 780 - pkt->ref_tag = swab32(spt->ref_tag); 781 - pkt->app_tag_mask[0] = 0x0; 782 - pkt->app_tag_mask[1] = 0x0; 783 - } else { 784 - pkt->ref_tag = cpu_to_le32((uint32_t) 785 - (0xffffffff & scsi_get_lba(cmd))); 786 - pkt->app_tag = __constant_cpu_to_le16(0); 787 - pkt->app_tag_mask[0] = 0x0; 788 - pkt->app_tag_mask[1] = 0x0; 789 - } 790 775 /* enable ALL bytes of the ref tag */ 791 776 pkt->ref_tag_mask[0] = 0xff; 792 777 pkt->ref_tag_mask[1] = 0xff; ··· 791 798 scsi_get_prot_type(cmd), cmd); 792 799 } 793 800 801 + struct qla2_sgx { 802 + dma_addr_t dma_addr; /* OUT */ 803 + uint32_t dma_len; /* OUT */ 794 804 805 + uint32_t tot_bytes; /* IN */ 806 + struct scatterlist *cur_sg; /* IN */ 807 + 808 + /* for book keeping, bzero on initial invocation */ 809 + uint32_t bytes_consumed; 810 + uint32_t num_bytes; 811 + uint32_t tot_partial; 812 + 813 + /* for debugging */ 814 + uint32_t num_sg; 815 + srb_t *sp; 816 + }; 817 + 818 + static int 819 + qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx, 820 + uint32_t *partial) 821 + { 822 + struct scatterlist *sg; 823 + uint32_t cumulative_partial, sg_len; 824 + dma_addr_t sg_dma_addr; 825 + 826 + if (sgx->num_bytes == sgx->tot_bytes) 827 + return 0; 828 + 829 + sg = sgx->cur_sg; 830 + cumulative_partial = sgx->tot_partial; 831 + 832 + sg_dma_addr = sg_dma_address(sg); 833 + sg_len = sg_dma_len(sg); 834 + 835 + sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed; 836 + 837 + if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) { 838 + sgx->dma_len = (blk_sz - cumulative_partial); 839 + sgx->tot_partial = 0; 840 + sgx->num_bytes += blk_sz; 841 + *partial = 0; 842 + } else { 843 + sgx->dma_len = sg_len - sgx->bytes_consumed; 844 + sgx->tot_partial += sgx->dma_len; 845 + *partial = 1; 846 + } 847 + 848 + sgx->bytes_consumed += sgx->dma_len; 849 + 850 + if (sg_len == sgx->bytes_consumed) { 851 + sg = sg_next(sg); 852 + sgx->num_sg++; 853 + sgx->cur_sg = sg; 854 + sgx->bytes_consumed = 0; 855 + } 856 + 857 + return 1; 858 + } 859 + 860 + static int 861 + qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, 862 + uint32_t *dsd, uint16_t tot_dsds) 863 + { 864 + void *next_dsd; 865 + uint8_t avail_dsds = 0; 866 + uint32_t dsd_list_len; 867 + struct dsd_dma *dsd_ptr; 868 + struct scatterlist *sg_prot; 869 + uint32_t *cur_dsd = dsd; 870 + uint16_t used_dsds = tot_dsds; 871 + 872 + uint32_t prot_int; 873 + uint32_t partial; 874 + struct qla2_sgx sgx; 875 + dma_addr_t sle_dma; 876 + uint32_t sle_dma_len, tot_prot_dma_len = 0; 877 + struct scsi_cmnd *cmd = sp->cmd; 878 + 879 + prot_int = cmd->device->sector_size; 880 + 881 + memset(&sgx, 0, sizeof(struct qla2_sgx)); 882 + sgx.tot_bytes = scsi_bufflen(sp->cmd); 883 + sgx.cur_sg = scsi_sglist(sp->cmd); 884 + sgx.sp = sp; 885 + 886 + sg_prot = scsi_prot_sglist(sp->cmd); 887 + 888 + while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) { 889 + 890 + sle_dma = sgx.dma_addr; 891 + sle_dma_len = sgx.dma_len; 892 + alloc_and_fill: 893 + /* Allocate additional continuation packets? */ 894 + if (avail_dsds == 0) { 895 + avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? 896 + QLA_DSDS_PER_IOCB : used_dsds; 897 + dsd_list_len = (avail_dsds + 1) * 12; 898 + used_dsds -= avail_dsds; 899 + 900 + /* allocate tracking DS */ 901 + dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 902 + if (!dsd_ptr) 903 + return 1; 904 + 905 + /* allocate new list */ 906 + dsd_ptr->dsd_addr = next_dsd = 907 + dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, 908 + &dsd_ptr->dsd_list_dma); 909 + 910 + if (!next_dsd) { 911 + /* 912 + * Need to cleanup only this dsd_ptr, rest 913 + * will be done by sp_free_dma() 914 + */ 915 + kfree(dsd_ptr); 916 + return 1; 917 + } 918 + 919 + list_add_tail(&dsd_ptr->list, 920 + &((struct crc_context *)sp->ctx)->dsd_list); 921 + 922 + sp->flags |= SRB_CRC_CTX_DSD_VALID; 923 + 924 + /* add new list to cmd iocb or last list */ 925 + *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 926 + *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); 927 + *cur_dsd++ = dsd_list_len; 928 + cur_dsd = (uint32_t *)next_dsd; 929 + } 930 + *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 931 + *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 932 + *cur_dsd++ = cpu_to_le32(sle_dma_len); 933 + avail_dsds--; 934 + 935 + if (partial == 0) { 936 + /* Got a full protection interval */ 937 + sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len; 938 + sle_dma_len = 8; 939 + 940 + tot_prot_dma_len += sle_dma_len; 941 + if (tot_prot_dma_len == sg_dma_len(sg_prot)) { 942 + tot_prot_dma_len = 0; 943 + sg_prot = sg_next(sg_prot); 944 + } 945 + 946 + partial = 1; /* So as to not re-enter this block */ 947 + goto alloc_and_fill; 948 + } 949 + } 950 + /* Null termination */ 951 + *cur_dsd++ = 0; 952 + *cur_dsd++ = 0; 953 + *cur_dsd++ = 0; 954 + return 0; 955 + } 795 956 static int 796 957 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, 797 958 uint16_t tot_dsds) ··· 1128 981 struct scsi_cmnd *cmd; 1129 982 struct scatterlist *cur_seg; 1130 983 int sgc; 1131 - uint32_t total_bytes; 984 + uint32_t total_bytes = 0; 1132 985 uint32_t data_bytes; 1133 986 uint32_t dif_bytes; 1134 987 uint8_t bundling = 1; ··· 1170 1023 __constant_cpu_to_le16(CF_READ_DATA); 1171 1024 } 1172 1025 1173 - tot_prot_dsds = scsi_prot_sg_count(cmd); 1174 - if (!tot_prot_dsds) 1026 + if ((scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_INSERT) || 1027 + (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_STRIP) || 1028 + (scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_STRIP) || 1029 + (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_INSERT)) 1175 1030 bundling = 0; 1176 1031 1177 1032 /* Allocate CRC context from global pool */ ··· 1196 1047 1197 1048 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); 1198 1049 1199 - qla24xx_set_t10dif_tags(cmd, (struct fw_dif_context *) 1050 + qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *) 1200 1051 &crc_ctx_pkt->ref_tag, tot_prot_dsds); 1201 1052 1202 1053 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); ··· 1225 1076 fcp_cmnd->additional_cdb_len |= 2; 1226 1077 1227 1078 int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun); 1228 - host_to_fcp_swap((uint8_t *)&fcp_cmnd->lun, sizeof(fcp_cmnd->lun)); 1229 1079 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); 1230 1080 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len); 1231 1081 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32( ··· 1255 1107 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ 1256 1108 1257 1109 /* Compute dif len and adjust data len to incude protection */ 1258 - total_bytes = data_bytes; 1259 1110 dif_bytes = 0; 1260 1111 blk_size = cmd->device->sector_size; 1261 - if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 1262 - dif_bytes = (data_bytes / blk_size) * 8; 1263 - total_bytes += dif_bytes; 1112 + dif_bytes = (data_bytes / blk_size) * 8; 1113 + 1114 + switch (scsi_get_prot_op(sp->cmd)) { 1115 + case SCSI_PROT_READ_INSERT: 1116 + case SCSI_PROT_WRITE_STRIP: 1117 + total_bytes = data_bytes; 1118 + data_bytes += dif_bytes; 1119 + break; 1120 + 1121 + case SCSI_PROT_READ_STRIP: 1122 + case SCSI_PROT_WRITE_INSERT: 1123 + case SCSI_PROT_READ_PASS: 1124 + case SCSI_PROT_WRITE_PASS: 1125 + total_bytes = data_bytes + dif_bytes; 1126 + break; 1127 + default: 1128 + BUG(); 1264 1129 } 1265 1130 1266 - if (!ql2xenablehba_err_chk) 1131 + if (!qla2x00_hba_err_chk_enabled(sp)) 1267 1132 fw_prot_opts |= 0x10; /* Disable Guard tag checking */ 1268 1133 1269 1134 if (!bundling) { ··· 1312 1151 1313 1152 cmd_pkt->control_flags |= 1314 1153 __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); 1315 - if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, 1154 + 1155 + if (!bundling && tot_prot_dsds) { 1156 + if (qla24xx_walk_and_build_sglist_no_difb(ha, sp, 1157 + cur_dsd, tot_dsds)) 1158 + goto crc_queuing_error; 1159 + } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, 1316 1160 (tot_dsds - tot_prot_dsds))) 1317 1161 goto crc_queuing_error; 1318 1162 ··· 1580 1414 goto queuing_error; 1581 1415 else 1582 1416 sp->flags |= SRB_DMA_VALID; 1417 + 1418 + if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 1419 + (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { 1420 + struct qla2_sgx sgx; 1421 + uint32_t partial; 1422 + 1423 + memset(&sgx, 0, sizeof(struct qla2_sgx)); 1424 + sgx.tot_bytes = scsi_bufflen(cmd); 1425 + sgx.cur_sg = scsi_sglist(cmd); 1426 + sgx.sp = sp; 1427 + 1428 + nseg = 0; 1429 + while (qla24xx_get_one_block_sg( 1430 + cmd->device->sector_size, &sgx, &partial)) 1431 + nseg++; 1432 + } 1583 1433 } else 1584 1434 nseg = 0; 1585 1435 ··· 1610 1428 goto queuing_error; 1611 1429 else 1612 1430 sp->flags |= SRB_CRC_PROT_DMA_VALID; 1431 + 1432 + if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 1433 + (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { 1434 + nseg = scsi_bufflen(cmd) / cmd->device->sector_size; 1435 + } 1613 1436 } else { 1614 1437 nseg = 0; 1615 1438 } ··· 1641 1454 /* Build header part of command packet (excluding the OPCODE). */ 1642 1455 req->current_outstanding_cmd = handle; 1643 1456 req->outstanding_cmds[handle] = sp; 1457 + sp->handle = handle; 1644 1458 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 1645 1459 req->cnt -= req_cnt; 1646 1460
+87 -28
drivers/scsi/qla2xxx/qla_isr.c
··· 719 719 vha->flags.rscn_queue_overflow = 1; 720 720 } 721 721 722 - atomic_set(&vha->loop_state, LOOP_UPDATE); 723 722 atomic_set(&vha->loop_down_timer, 0); 724 723 vha->flags.management_server_logged_in = 0; 725 724 ··· 1434 1435 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST 1435 1436 * to indicate to the kernel that the HBA detected error. 1436 1437 */ 1437 - static inline void 1438 + static inline int 1438 1439 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) 1439 1440 { 1440 1441 struct scsi_qla_host *vha = sp->fcport->vha; 1441 1442 struct scsi_cmnd *cmd = sp->cmd; 1442 - struct scsi_dif_tuple *ep = 1443 - (struct scsi_dif_tuple *)&sts24->data[20]; 1444 - struct scsi_dif_tuple *ap = 1445 - (struct scsi_dif_tuple *)&sts24->data[12]; 1443 + uint8_t *ap = &sts24->data[12]; 1444 + uint8_t *ep = &sts24->data[20]; 1446 1445 uint32_t e_ref_tag, a_ref_tag; 1447 1446 uint16_t e_app_tag, a_app_tag; 1448 1447 uint16_t e_guard, a_guard; 1449 1448 1450 - e_ref_tag = be32_to_cpu(ep->ref_tag); 1451 - a_ref_tag = be32_to_cpu(ap->ref_tag); 1452 - e_app_tag = be16_to_cpu(ep->app_tag); 1453 - a_app_tag = be16_to_cpu(ap->app_tag); 1454 - e_guard = be16_to_cpu(ep->guard); 1455 - a_guard = be16_to_cpu(ap->guard); 1449 + /* 1450 + * swab32 of the "data" field in the beginning of qla2x00_status_entry() 1451 + * would make guard field appear at offset 2 1452 + */ 1453 + a_guard = le16_to_cpu(*(uint16_t *)(ap + 2)); 1454 + a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0)); 1455 + a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4)); 1456 + e_guard = le16_to_cpu(*(uint16_t *)(ep + 2)); 1457 + e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0)); 1458 + e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4)); 1456 1459 1457 1460 ql_dbg(ql_dbg_io, vha, 0x3023, 1458 1461 "iocb(s) %p Returned STATUS.\n", sts24); ··· 1466 1465 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, 1467 1466 a_app_tag, e_app_tag, a_guard, e_guard); 1468 1467 1468 + /* 1469 + * Ignore sector if: 1470 + * For type 3: ref & app tag is all 'f's 1471 + * For type 0,1,2: app tag is all 'f's 1472 + */ 1473 + if ((a_app_tag == 0xffff) && 1474 + ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) || 1475 + (a_ref_tag == 0xffffffff))) { 1476 + uint32_t blocks_done, resid; 1477 + sector_t lba_s = scsi_get_lba(cmd); 1478 + 1479 + /* 2TB boundary case covered automatically with this */ 1480 + blocks_done = e_ref_tag - (uint32_t)lba_s + 1; 1481 + 1482 + resid = scsi_bufflen(cmd) - (blocks_done * 1483 + cmd->device->sector_size); 1484 + 1485 + scsi_set_resid(cmd, resid); 1486 + cmd->result = DID_OK << 16; 1487 + 1488 + /* Update protection tag */ 1489 + if (scsi_prot_sg_count(cmd)) { 1490 + uint32_t i, j = 0, k = 0, num_ent; 1491 + struct scatterlist *sg; 1492 + struct sd_dif_tuple *spt; 1493 + 1494 + /* Patch the corresponding protection tags */ 1495 + scsi_for_each_prot_sg(cmd, sg, 1496 + scsi_prot_sg_count(cmd), i) { 1497 + num_ent = sg_dma_len(sg) / 8; 1498 + if (k + num_ent < blocks_done) { 1499 + k += num_ent; 1500 + continue; 1501 + } 1502 + j = blocks_done - k - 1; 1503 + k = blocks_done; 1504 + break; 1505 + } 1506 + 1507 + if (k != blocks_done) { 1508 + qla_printk(KERN_WARNING, sp->fcport->vha->hw, 1509 + "unexpected tag values tag:lba=%x:%lx)\n", 1510 + e_ref_tag, lba_s); 1511 + return 1; 1512 + } 1513 + 1514 + spt = page_address(sg_page(sg)) + sg->offset; 1515 + spt += j; 1516 + 1517 + spt->app_tag = 0xffff; 1518 + if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) 1519 + spt->ref_tag = 0xffffffff; 1520 + } 1521 + 1522 + return 0; 1523 + } 1524 + 1469 1525 /* check guard */ 1470 1526 if (e_guard != a_guard) { 1471 1527 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, ··· 1530 1472 set_driver_byte(cmd, DRIVER_SENSE); 1531 1473 set_host_byte(cmd, DID_ABORT); 1532 1474 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1533 - return; 1534 - } 1535 - 1536 - /* check appl tag */ 1537 - if (e_app_tag != a_app_tag) { 1538 - scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1539 - 0x10, 0x2); 1540 - set_driver_byte(cmd, DRIVER_SENSE); 1541 - set_host_byte(cmd, DID_ABORT); 1542 - cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1543 - return; 1475 + return 1; 1544 1476 } 1545 1477 1546 1478 /* check ref tag */ ··· 1540 1492 set_driver_byte(cmd, DRIVER_SENSE); 1541 1493 set_host_byte(cmd, DID_ABORT); 1542 1494 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1543 - return; 1495 + return 1; 1544 1496 } 1497 + 1498 + /* check appl tag */ 1499 + if (e_app_tag != a_app_tag) { 1500 + scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1501 + 0x10, 0x2); 1502 + set_driver_byte(cmd, DRIVER_SENSE); 1503 + set_host_byte(cmd, DID_ABORT); 1504 + cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1505 + return 1; 1506 + } 1507 + 1508 + return 1; 1545 1509 } 1546 1510 1547 1511 /** ··· 1827 1767 break; 1828 1768 1829 1769 case CS_DIF_ERROR: 1830 - qla2x00_handle_dif_error(sp, sts24); 1770 + logit = qla2x00_handle_dif_error(sp, sts24); 1831 1771 break; 1832 1772 default: 1833 1773 cp->result = DID_ERROR << 16; ··· 2528 2468 goto skip_msi; 2529 2469 } 2530 2470 2531 - if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX || 2532 - !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) { 2471 + if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) { 2533 2472 ql_log(ql_log_warn, vha, 0x0035, 2534 2473 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", 2535 - ha->pdev->revision, ha->fw_attributes); 2474 + ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX); 2536 2475 goto skip_msix; 2537 2476 } 2538 2477
+1 -1
drivers/scsi/qla2xxx/qla_mid.c
··· 472 472 host->can_queue = base_vha->req->length + 128; 473 473 host->this_id = 255; 474 474 host->cmd_per_lun = 3; 475 - if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) 475 + if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) 476 476 host->max_cmd_len = 32; 477 477 else 478 478 host->max_cmd_len = MAX_CMDSZ;
+13 -12
drivers/scsi/qla2xxx/qla_nx.c
··· 2208 2208 struct qla_hw_data *ha; 2209 2209 struct rsp_que *rsp; 2210 2210 struct device_reg_82xx __iomem *reg; 2211 + unsigned long flags; 2211 2212 2212 2213 rsp = (struct rsp_que *) dev_id; 2213 2214 if (!rsp) { ··· 2219 2218 2220 2219 ha = rsp->hw; 2221 2220 reg = &ha->iobase->isp82; 2222 - spin_lock_irq(&ha->hardware_lock); 2221 + spin_lock_irqsave(&ha->hardware_lock, flags); 2223 2222 vha = pci_get_drvdata(ha->pdev); 2224 2223 qla24xx_process_response_queue(vha, rsp); 2225 2224 WRT_REG_DWORD(&reg->host_int, 0); 2226 - spin_unlock_irq(&ha->hardware_lock); 2225 + spin_unlock_irqrestore(&ha->hardware_lock, flags); 2227 2226 return IRQ_HANDLED; 2228 2227 } 2229 2228 ··· 2839 2838 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); 2840 2839 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 2841 2840 2841 + /* build FCP_CMND IU */ 2842 + memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd)); 2843 + int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun); 2844 + ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; 2845 + 2846 + if (cmd->sc_data_direction == DMA_TO_DEVICE) 2847 + ctx->fcp_cmnd->additional_cdb_len |= 1; 2848 + else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 2849 + ctx->fcp_cmnd->additional_cdb_len |= 2; 2850 + 2842 2851 /* 2843 2852 * Update tagged queuing modifier -- default is TSK_SIMPLE (0). 2844 2853 */ ··· 2864 2853 break; 2865 2854 } 2866 2855 } 2867 - 2868 - /* build FCP_CMND IU */ 2869 - memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd)); 2870 - int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun); 2871 - ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; 2872 - 2873 - if (cmd->sc_data_direction == DMA_TO_DEVICE) 2874 - ctx->fcp_cmnd->additional_cdb_len |= 1; 2875 - else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 2876 - ctx->fcp_cmnd->additional_cdb_len |= 2; 2877 2856 2878 2857 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); 2879 2858
+22 -8
drivers/scsi/qla2xxx/qla_os.c
··· 106 106 "Maximum queue depth to report for target devices."); 107 107 108 108 /* Do not change the value of this after module load */ 109 - int ql2xenabledif = 1; 109 + int ql2xenabledif = 0; 110 110 module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR); 111 111 MODULE_PARM_DESC(ql2xenabledif, 112 112 " Enable T10-CRC-DIF " 113 - " Default is 0 - No DIF Support. 1 - Enable it"); 113 + " Default is 0 - No DIF Support. 1 - Enable it" 114 + ", 2 - Enable DIF for all types, except Type 0."); 114 115 115 - int ql2xenablehba_err_chk; 116 + int ql2xenablehba_err_chk = 2; 116 117 module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR); 117 118 MODULE_PARM_DESC(ql2xenablehba_err_chk, 118 - " Enable T10-CRC-DIF Error isolation by HBA" 119 - " Default is 0 - Error isolation disabled, 1 - Enable it"); 119 + " Enable T10-CRC-DIF Error isolation by HBA:\n" 120 + " Default is 1.\n" 121 + " 0 -- Error isolation disabled\n" 122 + " 1 -- Error isolation enabled only for DIX Type 0\n" 123 + " 2 -- Error isolation enabled for all Types\n"); 120 124 121 125 int ql2xiidmaenable=1; 122 126 module_param(ql2xiidmaenable, int, S_IRUGO); ··· 913 909 "Abort command mbx success.\n"); 914 910 wait = 1; 915 911 } 912 + 913 + spin_lock_irqsave(&ha->hardware_lock, flags); 916 914 qla2x00_sp_compl(ha, sp); 915 + spin_unlock_irqrestore(&ha->hardware_lock, flags); 916 + 917 + /* Did the command return during mailbox execution? */ 918 + if (ret == FAILED && !CMD_SP(cmd)) 919 + ret = SUCCESS; 917 920 918 921 /* Wait for the command to be returned. */ 919 922 if (wait) { ··· 2262 2251 host->this_id = 255; 2263 2252 host->cmd_per_lun = 3; 2264 2253 host->unique_id = host->host_no; 2265 - if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) 2254 + if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) 2266 2255 host->max_cmd_len = 32; 2267 2256 else 2268 2257 host->max_cmd_len = MAX_CMDSZ; ··· 2389 2378 "Detected hba at address=%p.\n", 2390 2379 ha); 2391 2380 2392 - if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { 2381 + if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { 2393 2382 if (ha->fw_attributes & BIT_4) { 2383 + int prot = 0; 2394 2384 base_vha->flags.difdix_supported = 1; 2395 2385 ql_dbg(ql_dbg_init, base_vha, 0x00f1, 2396 2386 "Registering for DIF/DIX type 1 and 3 protection.\n"); 2387 + if (ql2xenabledif == 1) 2388 + prot = SHOST_DIX_TYPE0_PROTECTION; 2397 2389 scsi_host_set_prot(host, 2398 - SHOST_DIF_TYPE1_PROTECTION 2390 + prot | SHOST_DIF_TYPE1_PROTECTION 2399 2391 | SHOST_DIF_TYPE2_PROTECTION 2400 2392 | SHOST_DIF_TYPE3_PROTECTION 2401 2393 | SHOST_DIX_TYPE1_PROTECTION
+1 -1
drivers/scsi/qla2xxx/qla_version.h
··· 7 7 /* 8 8 * Driver version 9 9 */ 10 - #define QLA2XXX_VERSION "8.03.07.03-k" 10 + #define QLA2XXX_VERSION "8.03.07.07-k" 11 11 12 12 #define QLA_DRIVER_MAJOR_VER 8 13 13 #define QLA_DRIVER_MINOR_VER 3
+1 -1
drivers/scsi/qla4xxx/Kconfig
··· 1 1 config SCSI_QLA_ISCSI 2 2 tristate "QLogic ISP4XXX and ISP82XX host adapter family support" 3 - depends on PCI && SCSI 3 + depends on PCI && SCSI && NET 4 4 select SCSI_ISCSI_ATTRS 5 5 ---help--- 6 6 This driver supports the QLogic 40xx (ISP4XXX) and 8022 (ISP82XX)
+3 -1
drivers/staging/comedi/drivers/ni_labpc.c
··· 241 241 struct comedi_insn *insn, 242 242 unsigned int *data); 243 243 static void labpc_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd); 244 - #ifdef CONFIG_COMEDI_PCI 244 + #ifdef CONFIG_ISA_DMA_API 245 245 static unsigned int labpc_suggest_transfer_size(struct comedi_cmd cmd); 246 + #endif 247 + #ifdef CONFIG_COMEDI_PCI 246 248 static int labpc_find_device(struct comedi_device *dev, int bus, int slot); 247 249 #endif 248 250 static int labpc_dio_mem_callback(int dir, int port, int data,
+1 -1
drivers/target/iscsi/iscsi_target_parameters.c
··· 1430 1430 u8 DataSequenceInOrder = 0; 1431 1431 u8 ErrorRecoveryLevel = 0, SessionType = 0; 1432 1432 u8 IFMarker = 0, OFMarker = 0; 1433 - u8 IFMarkInt_Reject = 0, OFMarkInt_Reject = 0; 1433 + u8 IFMarkInt_Reject = 1, OFMarkInt_Reject = 1; 1434 1434 u32 FirstBurstLength = 0, MaxBurstLength = 0; 1435 1435 struct iscsi_param *param = NULL; 1436 1436
+21 -249
drivers/target/iscsi/iscsi_target_util.c
··· 875 875 } 876 876 877 877 /* 878 - * Used before iscsi_do[rx,tx]_data() to determine iov and [rx,tx]_marker 879 - * array counts needed for sync and steering. 880 - */ 881 - static int iscsit_determine_sync_and_steering_counts( 882 - struct iscsi_conn *conn, 883 - struct iscsi_data_count *count) 884 - { 885 - u32 length = count->data_length; 886 - u32 marker, markint; 887 - 888 - count->sync_and_steering = 1; 889 - 890 - marker = (count->type == ISCSI_RX_DATA) ? 891 - conn->of_marker : conn->if_marker; 892 - markint = (count->type == ISCSI_RX_DATA) ? 893 - (conn->conn_ops->OFMarkInt * 4) : 894 - (conn->conn_ops->IFMarkInt * 4); 895 - count->ss_iov_count = count->iov_count; 896 - 897 - while (length > 0) { 898 - if (length >= marker) { 899 - count->ss_iov_count += 3; 900 - count->ss_marker_count += 2; 901 - 902 - length -= marker; 903 - marker = markint; 904 - } else 905 - length = 0; 906 - } 907 - 908 - return 0; 909 - } 910 - 911 - /* 912 878 * Setup conn->if_marker and conn->of_marker values based upon 913 879 * the initial marker-less interval. (see iSCSI v19 A.2) 914 880 */ ··· 1256 1290 struct kvec iov; 1257 1291 u32 tx_hdr_size, data_len; 1258 1292 u32 offset = cmd->first_data_sg_off; 1259 - int tx_sent; 1293 + int tx_sent, iov_off; 1260 1294 1261 1295 send_hdr: 1262 1296 tx_hdr_size = ISCSI_HDR_LEN; ··· 1276 1310 } 1277 1311 1278 1312 data_len = cmd->tx_size - tx_hdr_size - cmd->padding; 1279 - if (conn->conn_ops->DataDigest) 1313 + /* 1314 + * Set iov_off used by padding and data digest tx_data() calls below 1315 + * in order to determine proper offset into cmd->iov_data[] 1316 + */ 1317 + if (conn->conn_ops->DataDigest) { 1280 1318 data_len -= ISCSI_CRC_LEN; 1281 - 1319 + if (cmd->padding) 1320 + iov_off = (cmd->iov_data_count - 2); 1321 + else 1322 + iov_off = (cmd->iov_data_count - 1); 1323 + } else { 1324 + iov_off = (cmd->iov_data_count - 1); 1325 + } 1282 1326 /* 1283 1327 * Perform sendpage() for each page in the scatterlist 1284 1328 */ ··· 1317 1341 1318 1342 send_padding: 1319 1343 if (cmd->padding) { 1320 - struct kvec *iov_p = 1321 - &cmd->iov_data[cmd->iov_data_count-1]; 1344 + struct kvec *iov_p = &cmd->iov_data[iov_off++]; 1322 1345 1323 1346 tx_sent = tx_data(conn, iov_p, 1, cmd->padding); 1324 1347 if (cmd->padding != tx_sent) { ··· 1331 1356 1332 1357 send_datacrc: 1333 1358 if (conn->conn_ops->DataDigest) { 1334 - struct kvec *iov_d = 1335 - &cmd->iov_data[cmd->iov_data_count]; 1359 + struct kvec *iov_d = &cmd->iov_data[iov_off]; 1336 1360 1337 1361 tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN); 1338 1362 if (ISCSI_CRC_LEN != tx_sent) { ··· 1405 1431 struct iscsi_data_count *count) 1406 1432 { 1407 1433 int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len; 1408 - u32 rx_marker_val[count->ss_marker_count], rx_marker_iov = 0; 1409 - struct kvec iov[count->ss_iov_count], *iov_p; 1434 + struct kvec *iov_p; 1410 1435 struct msghdr msg; 1411 1436 1412 1437 if (!conn || !conn->sock || !conn->conn_ops) ··· 1413 1440 1414 1441 memset(&msg, 0, sizeof(struct msghdr)); 1415 1442 1416 - if (count->sync_and_steering) { 1417 - int size = 0; 1418 - u32 i, orig_iov_count = 0; 1419 - u32 orig_iov_len = 0, orig_iov_loc = 0; 1420 - u32 iov_count = 0, per_iov_bytes = 0; 1421 - u32 *rx_marker, old_rx_marker = 0; 1422 - struct kvec *iov_record; 1423 - 1424 - memset(&rx_marker_val, 0, 1425 - count->ss_marker_count * sizeof(u32)); 1426 - memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec)); 1427 - 1428 - iov_record = count->iov; 1429 - orig_iov_count = count->iov_count; 1430 - rx_marker = &conn->of_marker; 1431 - 1432 - i = 0; 1433 - size = data; 1434 - orig_iov_len = iov_record[orig_iov_loc].iov_len; 1435 - while (size > 0) { 1436 - pr_debug("rx_data: #1 orig_iov_len %u," 1437 - " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc); 1438 - pr_debug("rx_data: #2 rx_marker %u, size" 1439 - " %u\n", *rx_marker, size); 1440 - 1441 - if (orig_iov_len >= *rx_marker) { 1442 - iov[iov_count].iov_len = *rx_marker; 1443 - iov[iov_count++].iov_base = 1444 - (iov_record[orig_iov_loc].iov_base + 1445 - per_iov_bytes); 1446 - 1447 - iov[iov_count].iov_len = (MARKER_SIZE / 2); 1448 - iov[iov_count++].iov_base = 1449 - &rx_marker_val[rx_marker_iov++]; 1450 - iov[iov_count].iov_len = (MARKER_SIZE / 2); 1451 - iov[iov_count++].iov_base = 1452 - &rx_marker_val[rx_marker_iov++]; 1453 - old_rx_marker = *rx_marker; 1454 - 1455 - /* 1456 - * OFMarkInt is in 32-bit words. 1457 - */ 1458 - *rx_marker = (conn->conn_ops->OFMarkInt * 4); 1459 - size -= old_rx_marker; 1460 - orig_iov_len -= old_rx_marker; 1461 - per_iov_bytes += old_rx_marker; 1462 - 1463 - pr_debug("rx_data: #3 new_rx_marker" 1464 - " %u, size %u\n", *rx_marker, size); 1465 - } else { 1466 - iov[iov_count].iov_len = orig_iov_len; 1467 - iov[iov_count++].iov_base = 1468 - (iov_record[orig_iov_loc].iov_base + 1469 - per_iov_bytes); 1470 - 1471 - per_iov_bytes = 0; 1472 - *rx_marker -= orig_iov_len; 1473 - size -= orig_iov_len; 1474 - 1475 - if (size) 1476 - orig_iov_len = 1477 - iov_record[++orig_iov_loc].iov_len; 1478 - 1479 - pr_debug("rx_data: #4 new_rx_marker" 1480 - " %u, size %u\n", *rx_marker, size); 1481 - } 1482 - } 1483 - data += (rx_marker_iov * (MARKER_SIZE / 2)); 1484 - 1485 - iov_p = &iov[0]; 1486 - iov_len = iov_count; 1487 - 1488 - if (iov_count > count->ss_iov_count) { 1489 - pr_err("iov_count: %d, count->ss_iov_count:" 1490 - " %d\n", iov_count, count->ss_iov_count); 1491 - return -1; 1492 - } 1493 - if (rx_marker_iov > count->ss_marker_count) { 1494 - pr_err("rx_marker_iov: %d, count->ss_marker" 1495 - "_count: %d\n", rx_marker_iov, 1496 - count->ss_marker_count); 1497 - return -1; 1498 - } 1499 - } else { 1500 - iov_p = count->iov; 1501 - iov_len = count->iov_count; 1502 - } 1443 + iov_p = count->iov; 1444 + iov_len = count->iov_count; 1503 1445 1504 1446 while (total_rx < data) { 1505 1447 rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len, ··· 1429 1541 rx_loop, total_rx, data); 1430 1542 } 1431 1543 1432 - if (count->sync_and_steering) { 1433 - int j; 1434 - for (j = 0; j < rx_marker_iov; j++) { 1435 - pr_debug("rx_data: #5 j: %d, offset: %d\n", 1436 - j, rx_marker_val[j]); 1437 - conn->of_marker_offset = rx_marker_val[j]; 1438 - } 1439 - total_rx -= (rx_marker_iov * (MARKER_SIZE / 2)); 1440 - } 1441 - 1442 1544 return total_rx; 1443 1545 } 1444 1546 ··· 1437 1559 struct iscsi_data_count *count) 1438 1560 { 1439 1561 int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len; 1440 - u32 tx_marker_val[count->ss_marker_count], tx_marker_iov = 0; 1441 - struct kvec iov[count->ss_iov_count], *iov_p; 1562 + struct kvec *iov_p; 1442 1563 struct msghdr msg; 1443 1564 1444 1565 if (!conn || !conn->sock || !conn->conn_ops) ··· 1450 1573 1451 1574 memset(&msg, 0, sizeof(struct msghdr)); 1452 1575 1453 - if (count->sync_and_steering) { 1454 - int size = 0; 1455 - u32 i, orig_iov_count = 0; 1456 - u32 orig_iov_len = 0, orig_iov_loc = 0; 1457 - u32 iov_count = 0, per_iov_bytes = 0; 1458 - u32 *tx_marker, old_tx_marker = 0; 1459 - struct kvec *iov_record; 1460 - 1461 - memset(&tx_marker_val, 0, 1462 - count->ss_marker_count * sizeof(u32)); 1463 - memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec)); 1464 - 1465 - iov_record = count->iov; 1466 - orig_iov_count = count->iov_count; 1467 - tx_marker = &conn->if_marker; 1468 - 1469 - i = 0; 1470 - size = data; 1471 - orig_iov_len = iov_record[orig_iov_loc].iov_len; 1472 - while (size > 0) { 1473 - pr_debug("tx_data: #1 orig_iov_len %u," 1474 - " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc); 1475 - pr_debug("tx_data: #2 tx_marker %u, size" 1476 - " %u\n", *tx_marker, size); 1477 - 1478 - if (orig_iov_len >= *tx_marker) { 1479 - iov[iov_count].iov_len = *tx_marker; 1480 - iov[iov_count++].iov_base = 1481 - (iov_record[orig_iov_loc].iov_base + 1482 - per_iov_bytes); 1483 - 1484 - tx_marker_val[tx_marker_iov] = 1485 - (size - *tx_marker); 1486 - iov[iov_count].iov_len = (MARKER_SIZE / 2); 1487 - iov[iov_count++].iov_base = 1488 - &tx_marker_val[tx_marker_iov++]; 1489 - iov[iov_count].iov_len = (MARKER_SIZE / 2); 1490 - iov[iov_count++].iov_base = 1491 - &tx_marker_val[tx_marker_iov++]; 1492 - old_tx_marker = *tx_marker; 1493 - 1494 - /* 1495 - * IFMarkInt is in 32-bit words. 1496 - */ 1497 - *tx_marker = (conn->conn_ops->IFMarkInt * 4); 1498 - size -= old_tx_marker; 1499 - orig_iov_len -= old_tx_marker; 1500 - per_iov_bytes += old_tx_marker; 1501 - 1502 - pr_debug("tx_data: #3 new_tx_marker" 1503 - " %u, size %u\n", *tx_marker, size); 1504 - pr_debug("tx_data: #4 offset %u\n", 1505 - tx_marker_val[tx_marker_iov-1]); 1506 - } else { 1507 - iov[iov_count].iov_len = orig_iov_len; 1508 - iov[iov_count++].iov_base 1509 - = (iov_record[orig_iov_loc].iov_base + 1510 - per_iov_bytes); 1511 - 1512 - per_iov_bytes = 0; 1513 - *tx_marker -= orig_iov_len; 1514 - size -= orig_iov_len; 1515 - 1516 - if (size) 1517 - orig_iov_len = 1518 - iov_record[++orig_iov_loc].iov_len; 1519 - 1520 - pr_debug("tx_data: #5 new_tx_marker" 1521 - " %u, size %u\n", *tx_marker, size); 1522 - } 1523 - } 1524 - 1525 - data += (tx_marker_iov * (MARKER_SIZE / 2)); 1526 - 1527 - iov_p = &iov[0]; 1528 - iov_len = iov_count; 1529 - 1530 - if (iov_count > count->ss_iov_count) { 1531 - pr_err("iov_count: %d, count->ss_iov_count:" 1532 - " %d\n", iov_count, count->ss_iov_count); 1533 - return -1; 1534 - } 1535 - if (tx_marker_iov > count->ss_marker_count) { 1536 - pr_err("tx_marker_iov: %d, count->ss_marker" 1537 - "_count: %d\n", tx_marker_iov, 1538 - count->ss_marker_count); 1539 - return -1; 1540 - } 1541 - } else { 1542 - iov_p = count->iov; 1543 - iov_len = count->iov_count; 1544 - } 1576 + iov_p = count->iov; 1577 + iov_len = count->iov_count; 1545 1578 1546 1579 while (total_tx < data) { 1547 1580 tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len, ··· 1465 1678 pr_debug("tx_loop: %d, total_tx: %d, data: %d\n", 1466 1679 tx_loop, total_tx, data); 1467 1680 } 1468 - 1469 - if (count->sync_and_steering) 1470 - total_tx -= (tx_marker_iov * (MARKER_SIZE / 2)); 1471 1681 1472 1682 return total_tx; 1473 1683 } ··· 1486 1702 c.data_length = data; 1487 1703 c.type = ISCSI_RX_DATA; 1488 1704 1489 - if (conn->conn_ops->OFMarker && 1490 - (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) { 1491 - if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0) 1492 - return -1; 1493 - } 1494 - 1495 1705 return iscsit_do_rx_data(conn, &c); 1496 1706 } 1497 1707 ··· 1505 1727 c.iov_count = iov_count; 1506 1728 c.data_length = data; 1507 1729 c.type = ISCSI_TX_DATA; 1508 - 1509 - if (conn->conn_ops->IFMarker && 1510 - (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) { 1511 - if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0) 1512 - return -1; 1513 - } 1514 1730 1515 1731 return iscsit_do_tx_data(conn, &c); 1516 1732 }
+33 -2
drivers/target/target_core_cdb.c
··· 24 24 */ 25 25 26 26 #include <linux/kernel.h> 27 + #include <linux/ctype.h> 27 28 #include <asm/unaligned.h> 28 29 #include <scsi/scsi.h> 29 30 ··· 155 154 return 0; 156 155 } 157 156 157 + static void 158 + target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf_off) 159 + { 160 + unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0]; 161 + unsigned char *buf = buf_off; 162 + int cnt = 0, next = 1; 163 + /* 164 + * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on 165 + * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field 166 + * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION 167 + * to complete the payload. These are based from VPD=0x80 PRODUCT SERIAL 168 + * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure 169 + * per device uniqeness. 170 + */ 171 + while (*p != '\0') { 172 + if (cnt >= 13) 173 + break; 174 + if (!isxdigit(*p)) { 175 + p++; 176 + continue; 177 + } 178 + if (next != 0) { 179 + buf[cnt++] |= hex_to_bin(*p++); 180 + next = 0; 181 + } else { 182 + buf[cnt] = hex_to_bin(*p++) << 4; 183 + next = 1; 184 + } 185 + } 186 + } 187 + 158 188 /* 159 189 * Device identification VPD, for a complete list of 160 190 * DESIGNATOR TYPEs see spc4r17 Table 459. ··· 251 219 * VENDOR_SPECIFIC_IDENTIFIER and 252 220 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION 253 221 */ 254 - buf[off++] |= hex_to_bin(dev->se_sub_dev->t10_wwn.unit_serial[0]); 255 - hex2bin(&buf[off], &dev->se_sub_dev->t10_wwn.unit_serial[1], 12); 222 + target_parse_naa_6h_vendor_specific(dev, &buf[off]); 256 223 257 224 len = 20; 258 225 off = (len + 4);
+4 -5
drivers/target/target_core_transport.c
··· 977 977 { 978 978 struct se_device *dev = container_of(work, struct se_device, 979 979 qf_work_queue); 980 + LIST_HEAD(qf_cmd_list); 980 981 struct se_cmd *cmd, *cmd_tmp; 981 982 982 983 spin_lock_irq(&dev->qf_cmd_lock); 983 - list_for_each_entry_safe(cmd, cmd_tmp, &dev->qf_cmd_list, se_qf_node) { 984 + list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); 985 + spin_unlock_irq(&dev->qf_cmd_lock); 984 986 987 + list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 985 988 list_del(&cmd->se_qf_node); 986 989 atomic_dec(&dev->dev_qf_count); 987 990 smp_mb__after_atomic_dec(); 988 - spin_unlock_irq(&dev->qf_cmd_lock); 989 991 990 992 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 991 993 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, ··· 999 997 * has been added to head of queue 1000 998 */ 1001 999 transport_add_cmd_to_queue(cmd, cmd->t_state); 1002 - 1003 - spin_lock_irq(&dev->qf_cmd_lock); 1004 1000 } 1005 - spin_unlock_irq(&dev->qf_cmd_lock); 1006 1001 } 1007 1002 1008 1003 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
+2 -10
drivers/target/tcm_fc/tcm_fc.h
··· 98 98 struct list_head list; /* linkage in ft_lport_acl tpg_list */ 99 99 struct list_head lun_list; /* head of LUNs */ 100 100 struct se_portal_group se_tpg; 101 - struct task_struct *thread; /* processing thread */ 102 - struct se_queue_obj qobj; /* queue for processing thread */ 101 + struct workqueue_struct *workqueue; 103 102 }; 104 103 105 104 struct ft_lport_acl { ··· 109 110 struct se_wwn fc_lport_wwn; 110 111 }; 111 112 112 - enum ft_cmd_state { 113 - FC_CMD_ST_NEW = 0, 114 - FC_CMD_ST_REJ 115 - }; 116 - 117 113 /* 118 114 * Commands 119 115 */ 120 116 struct ft_cmd { 121 - enum ft_cmd_state state; 122 117 u32 lun; /* LUN from request */ 123 118 struct ft_sess *sess; /* session held for cmd */ 124 119 struct fc_seq *seq; /* sequence in exchange mgr */ ··· 120 127 struct fc_frame *req_frame; 121 128 unsigned char *cdb; /* pointer to CDB inside frame */ 122 129 u32 write_data_len; /* data received on writes */ 123 - struct se_queue_req se_req; 130 + struct work_struct work; 124 131 /* Local sense buffer */ 125 132 unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER]; 126 133 u32 was_ddp_setup:1; /* Set only if ddp is setup */ ··· 170 177 /* 171 178 * other internal functions. 172 179 */ 173 - int ft_thread(void *); 174 180 void ft_recv_req(struct ft_sess *, struct fc_frame *); 175 181 struct ft_tpg *ft_lport_find_tpg(struct fc_lport *); 176 182 struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *);
+11 -79
drivers/target/tcm_fc/tfc_cmd.c
··· 62 62 int count; 63 63 64 64 se_cmd = &cmd->se_cmd; 65 - pr_debug("%s: cmd %p state %d sess %p seq %p se_cmd %p\n", 66 - caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd); 65 + pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n", 66 + caller, cmd, cmd->sess, cmd->seq, se_cmd); 67 67 pr_debug("%s: cmd %p cdb %p\n", 68 68 caller, cmd, cmd->cdb); 69 69 pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun); ··· 88 88 } 89 89 print_hex_dump(KERN_INFO, "ft_dump_cmd ", DUMP_PREFIX_NONE, 90 90 16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0); 91 - } 92 - 93 - static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd) 94 - { 95 - struct ft_tpg *tpg = sess->tport->tpg; 96 - struct se_queue_obj *qobj = &tpg->qobj; 97 - unsigned long flags; 98 - 99 - qobj = &sess->tport->tpg->qobj; 100 - spin_lock_irqsave(&qobj->cmd_queue_lock, flags); 101 - list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list); 102 - atomic_inc(&qobj->queue_cnt); 103 - spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 104 - 105 - wake_up_process(tpg->thread); 106 - } 107 - 108 - static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj) 109 - { 110 - unsigned long flags; 111 - struct se_queue_req *qr; 112 - 113 - spin_lock_irqsave(&qobj->cmd_queue_lock, flags); 114 - if (list_empty(&qobj->qobj_list)) { 115 - spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 116 - return NULL; 117 - } 118 - qr = list_first_entry(&qobj->qobj_list, struct se_queue_req, qr_list); 119 - list_del(&qr->qr_list); 120 - atomic_dec(&qobj->queue_cnt); 121 - spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 122 - return container_of(qr, struct ft_cmd, se_req); 123 91 } 124 92 125 93 static void ft_free_cmd(struct ft_cmd *cmd) ··· 250 282 251 283 int ft_get_cmd_state(struct se_cmd *se_cmd) 252 284 { 253 - struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); 254 - 255 - return cmd->state; 285 + return 0; 256 286 } 257 287 258 288 int ft_is_state_remove(struct se_cmd *se_cmd) ··· 471 505 return 0; 472 506 } 473 507 508 + static void ft_send_work(struct work_struct *work); 509 + 474 510 /* 475 511 * Handle incoming FCP command. 476 512 */ ··· 491 523 goto busy; 492 524 } 493 525 cmd->req_frame = fp; /* hold frame during cmd */ 494 - ft_queue_cmd(sess, cmd); 526 + 527 + INIT_WORK(&cmd->work, ft_send_work); 528 + queue_work(sess->tport->tpg->workqueue, &cmd->work); 495 529 return; 496 530 497 531 busy: ··· 533 563 /* 534 564 * Send new command to target. 535 565 */ 536 - static void ft_send_cmd(struct ft_cmd *cmd) 566 + static void ft_send_work(struct work_struct *work) 537 567 { 568 + struct ft_cmd *cmd = container_of(work, struct ft_cmd, work); 538 569 struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame); 539 570 struct se_cmd *se_cmd; 540 571 struct fcp_cmnd *fcp; 541 - int data_dir; 572 + int data_dir = 0; 542 573 u32 data_len; 543 574 int task_attr; 544 575 int ret; ··· 645 674 646 675 err: 647 676 ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID); 648 - } 649 - 650 - /* 651 - * Handle request in the command thread. 652 - */ 653 - static void ft_exec_req(struct ft_cmd *cmd) 654 - { 655 - pr_debug("cmd state %x\n", cmd->state); 656 - switch (cmd->state) { 657 - case FC_CMD_ST_NEW: 658 - ft_send_cmd(cmd); 659 - break; 660 - default: 661 - break; 662 - } 663 - } 664 - 665 - /* 666 - * Processing thread. 667 - * Currently one thread per tpg. 668 - */ 669 - int ft_thread(void *arg) 670 - { 671 - struct ft_tpg *tpg = arg; 672 - struct se_queue_obj *qobj = &tpg->qobj; 673 - struct ft_cmd *cmd; 674 - 675 - while (!kthread_should_stop()) { 676 - schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT); 677 - if (kthread_should_stop()) 678 - goto out; 679 - 680 - cmd = ft_dequeue_cmd(qobj); 681 - if (cmd) 682 - ft_exec_req(cmd); 683 - } 684 - 685 - out: 686 - return 0; 687 677 }
+3 -4
drivers/target/tcm_fc/tfc_conf.c
··· 327 327 tpg->index = index; 328 328 tpg->lport_acl = lacl; 329 329 INIT_LIST_HEAD(&tpg->lun_list); 330 - transport_init_queue_obj(&tpg->qobj); 331 330 332 331 ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg, 333 332 tpg, TRANSPORT_TPG_TYPE_NORMAL); ··· 335 336 return NULL; 336 337 } 337 338 338 - tpg->thread = kthread_run(ft_thread, tpg, "ft_tpg%lu", index); 339 - if (IS_ERR(tpg->thread)) { 339 + tpg->workqueue = alloc_workqueue("tcm_fc", 0, 1); 340 + if (!tpg->workqueue) { 340 341 kfree(tpg); 341 342 return NULL; 342 343 } ··· 355 356 pr_debug("del tpg %s\n", 356 357 config_item_name(&tpg->se_tpg.tpg_group.cg_item)); 357 358 358 - kthread_stop(tpg->thread); 359 + destroy_workqueue(tpg->workqueue); 359 360 360 361 /* Wait for sessions to be freed thru RCU, for BUG_ON below */ 361 362 synchronize_rcu();
+30 -32
drivers/target/tcm_fc/tfc_io.c
··· 219 219 if (cmd->was_ddp_setup) { 220 220 BUG_ON(!ep); 221 221 BUG_ON(!lport); 222 - } 223 - 224 - /* 225 - * Doesn't expect payload if DDP is setup. Payload 226 - * is expected to be copied directly to user buffers 227 - * due to DDP (Large Rx offload), 228 - */ 229 - buf = fc_frame_payload_get(fp, 1); 230 - if (buf) 231 - pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, " 222 + /* 223 + * Since DDP (Large Rx offload) was setup for this request, 224 + * payload is expected to be copied directly to user buffers. 225 + */ 226 + buf = fc_frame_payload_get(fp, 1); 227 + if (buf) 228 + pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, " 232 229 "cmd->sg_cnt 0x%x. DDP was setup" 233 230 " hence not expected to receive frame with " 234 - "payload, Frame will be dropped if " 235 - "'Sequence Initiative' bit in f_ctl is " 231 + "payload, Frame will be dropped if" 232 + "'Sequence Initiative' bit in f_ctl is" 236 233 "not set\n", __func__, ep->xid, f_ctl, 237 234 cmd->sg, cmd->sg_cnt); 238 - /* 239 - * Invalidate HW DDP context if it was setup for respective 240 - * command. Invalidation of HW DDP context is requited in both 241 - * situation (success and error). 242 - */ 243 - ft_invl_hw_context(cmd); 235 + /* 236 + * Invalidate HW DDP context if it was setup for respective 237 + * command. Invalidation of HW DDP context is requited in both 238 + * situation (success and error). 239 + */ 240 + ft_invl_hw_context(cmd); 244 241 245 - /* 246 - * If "Sequence Initiative (TSI)" bit set in f_ctl, means last 247 - * write data frame is received successfully where payload is 248 - * posted directly to user buffer and only the last frame's 249 - * header is posted in receive queue. 250 - * 251 - * If "Sequence Initiative (TSI)" bit is not set, means error 252 - * condition w.r.t. DDP, hence drop the packet and let explict 253 - * ABORTS from other end of exchange timer trigger the recovery. 254 - */ 255 - if (f_ctl & FC_FC_SEQ_INIT) 256 - goto last_frame; 257 - else 258 - goto drop; 242 + /* 243 + * If "Sequence Initiative (TSI)" bit set in f_ctl, means last 244 + * write data frame is received successfully where payload is 245 + * posted directly to user buffer and only the last frame's 246 + * header is posted in receive queue. 247 + * 248 + * If "Sequence Initiative (TSI)" bit is not set, means error 249 + * condition w.r.t. DDP, hence drop the packet and let explict 250 + * ABORTS from other end of exchange timer trigger the recovery. 251 + */ 252 + if (f_ctl & FC_FC_SEQ_INIT) 253 + goto last_frame; 254 + else 255 + goto drop; 256 + } 259 257 260 258 rel_off = ntohl(fh->fh_parm_offset); 261 259 frame_len = fr_len(fp);
+2 -2
drivers/tty/serial/crisv10.c
··· 4450 4450 4451 4451 #if defined(CONFIG_ETRAX_RS485) 4452 4452 #if defined(CONFIG_ETRAX_RS485_ON_PA) 4453 - if (cris_io_interface_allocate_pins(if_ser0, 'a', rs485_pa_bit, 4453 + if (cris_io_interface_allocate_pins(if_serial_0, 'a', rs485_pa_bit, 4454 4454 rs485_pa_bit)) { 4455 4455 printk(KERN_CRIT "ETRAX100LX serial: Could not allocate " 4456 4456 "RS485 pin\n"); ··· 4459 4459 } 4460 4460 #endif 4461 4461 #if defined(CONFIG_ETRAX_RS485_ON_PORT_G) 4462 - if (cris_io_interface_allocate_pins(if_ser0, 'g', rs485_pa_bit, 4462 + if (cris_io_interface_allocate_pins(if_serial_0, 'g', rs485_pa_bit, 4463 4463 rs485_port_g_bit)) { 4464 4464 printk(KERN_CRIT "ETRAX100LX serial: Could not allocate " 4465 4465 "RS485 pin\n");
+1 -1
drivers/usb/host/xhci-hub.c
··· 761 761 memset(buf, 0, retval); 762 762 status = 0; 763 763 764 - mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC; 764 + mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC; 765 765 766 766 spin_lock_irqsave(&xhci->lock, flags); 767 767 /* For each port, did anything change? If so, set that bit in buf. */
+19
drivers/usb/host/xhci-ring.c
··· 1934 1934 int status = -EINPROGRESS; 1935 1935 struct urb_priv *urb_priv; 1936 1936 struct xhci_ep_ctx *ep_ctx; 1937 + struct list_head *tmp; 1937 1938 u32 trb_comp_code; 1938 1939 int ret = 0; 1940 + int td_num = 0; 1939 1941 1940 1942 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 1941 1943 xdev = xhci->devs[slot_id]; ··· 1957 1955 xhci_err(xhci, "ERROR Transfer event for disabled endpoint " 1958 1956 "or incorrect stream ring\n"); 1959 1957 return -ENODEV; 1958 + } 1959 + 1960 + /* Count current td numbers if ep->skip is set */ 1961 + if (ep->skip) { 1962 + list_for_each(tmp, &ep_ring->td_list) 1963 + td_num++; 1960 1964 } 1961 1965 1962 1966 event_dma = le64_to_cpu(event->buffer); ··· 2076 2068 goto cleanup; 2077 2069 } 2078 2070 2071 + /* We've skipped all the TDs on the ep ring when ep->skip set */ 2072 + if (ep->skip && td_num == 0) { 2073 + ep->skip = false; 2074 + xhci_dbg(xhci, "All tds on the ep_ring skipped. " 2075 + "Clear skip flag.\n"); 2076 + ret = 0; 2077 + goto cleanup; 2078 + } 2079 + 2079 2080 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); 2081 + if (ep->skip) 2082 + td_num--; 2080 2083 2081 2084 /* Is this a TRB in the currently executing TD? */ 2082 2085 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
+1 -1
drivers/video/backlight/backlight.c
··· 19 19 #include <asm/backlight.h> 20 20 #endif 21 21 22 - static const char const *backlight_types[] = { 22 + static const char *const backlight_types[] = { 23 23 [BACKLIGHT_RAW] = "raw", 24 24 [BACKLIGHT_PLATFORM] = "platform", 25 25 [BACKLIGHT_FIRMWARE] = "firmware",
+20 -20
drivers/xen/events.c
··· 54 54 * This lock protects updates to the following mapping and reference-count 55 55 * arrays. The lock does not need to be acquired to read the mapping tables. 56 56 */ 57 - static DEFINE_SPINLOCK(irq_mapping_update_lock); 57 + static DEFINE_MUTEX(irq_mapping_update_lock); 58 58 59 59 static LIST_HEAD(xen_irq_list_head); 60 60 ··· 631 631 int irq = -1; 632 632 struct physdev_irq irq_op; 633 633 634 - spin_lock(&irq_mapping_update_lock); 634 + mutex_lock(&irq_mapping_update_lock); 635 635 636 636 irq = find_irq_by_gsi(gsi); 637 637 if (irq != -1) { ··· 684 684 handle_edge_irq, name); 685 685 686 686 out: 687 - spin_unlock(&irq_mapping_update_lock); 687 + mutex_unlock(&irq_mapping_update_lock); 688 688 689 689 return irq; 690 690 } ··· 710 710 { 711 711 int irq, ret; 712 712 713 - spin_lock(&irq_mapping_update_lock); 713 + mutex_lock(&irq_mapping_update_lock); 714 714 715 715 irq = xen_allocate_irq_dynamic(); 716 716 if (irq == -1) ··· 724 724 if (ret < 0) 725 725 goto error_irq; 726 726 out: 727 - spin_unlock(&irq_mapping_update_lock); 727 + mutex_unlock(&irq_mapping_update_lock); 728 728 return irq; 729 729 error_irq: 730 - spin_unlock(&irq_mapping_update_lock); 730 + mutex_unlock(&irq_mapping_update_lock); 731 731 xen_free_irq(irq); 732 732 return -1; 733 733 } ··· 740 740 struct irq_info *info = info_for_irq(irq); 741 741 int rc = -ENOENT; 742 742 743 - spin_lock(&irq_mapping_update_lock); 743 + mutex_lock(&irq_mapping_update_lock); 744 744 745 745 desc = irq_to_desc(irq); 746 746 if (!desc) ··· 766 766 xen_free_irq(irq); 767 767 768 768 out: 769 - spin_unlock(&irq_mapping_update_lock); 769 + mutex_unlock(&irq_mapping_update_lock); 770 770 return rc; 771 771 } 772 772 ··· 776 776 777 777 struct irq_info *info; 778 778 779 - spin_lock(&irq_mapping_update_lock); 779 + mutex_lock(&irq_mapping_update_lock); 780 780 781 781 list_for_each_entry(info, &xen_irq_list_head, list) { 782 782 if (info == NULL || info->type != IRQT_PIRQ) ··· 787 787 } 788 788 irq = -1; 789 789 out: 790 - spin_unlock(&irq_mapping_update_lock); 790 + mutex_unlock(&irq_mapping_update_lock); 791 791 792 792 return irq; 793 793 } ··· 802 802 { 803 803 int irq; 804 804 805 - spin_lock(&irq_mapping_update_lock); 805 + mutex_lock(&irq_mapping_update_lock); 806 806 807 807 irq = evtchn_to_irq[evtchn]; 808 808 ··· 818 818 } 819 819 820 820 out: 821 - spin_unlock(&irq_mapping_update_lock); 821 + mutex_unlock(&irq_mapping_update_lock); 822 822 823 823 return irq; 824 824 } ··· 829 829 struct evtchn_bind_ipi bind_ipi; 830 830 int evtchn, irq; 831 831 832 - spin_lock(&irq_mapping_update_lock); 832 + mutex_lock(&irq_mapping_update_lock); 833 833 834 834 irq = per_cpu(ipi_to_irq, cpu)[ipi]; 835 835 ··· 853 853 } 854 854 855 855 out: 856 - spin_unlock(&irq_mapping_update_lock); 856 + mutex_unlock(&irq_mapping_update_lock); 857 857 return irq; 858 858 } 859 859 ··· 878 878 struct evtchn_bind_virq bind_virq; 879 879 int evtchn, irq; 880 880 881 - spin_lock(&irq_mapping_update_lock); 881 + mutex_lock(&irq_mapping_update_lock); 882 882 883 883 irq = per_cpu(virq_to_irq, cpu)[virq]; 884 884 ··· 903 903 } 904 904 905 905 out: 906 - spin_unlock(&irq_mapping_update_lock); 906 + mutex_unlock(&irq_mapping_update_lock); 907 907 908 908 return irq; 909 909 } ··· 913 913 struct evtchn_close close; 914 914 int evtchn = evtchn_from_irq(irq); 915 915 916 - spin_lock(&irq_mapping_update_lock); 916 + mutex_lock(&irq_mapping_update_lock); 917 917 918 918 if (VALID_EVTCHN(evtchn)) { 919 919 close.port = evtchn; ··· 943 943 944 944 xen_free_irq(irq); 945 945 946 - spin_unlock(&irq_mapping_update_lock); 946 + mutex_unlock(&irq_mapping_update_lock); 947 947 } 948 948 949 949 int bind_evtchn_to_irqhandler(unsigned int evtchn, ··· 1279 1279 will also be masked. */ 1280 1280 disable_irq(irq); 1281 1281 1282 - spin_lock(&irq_mapping_update_lock); 1282 + mutex_lock(&irq_mapping_update_lock); 1283 1283 1284 1284 /* After resume the irq<->evtchn mappings are all cleared out */ 1285 1285 BUG_ON(evtchn_to_irq[evtchn] != -1); ··· 1289 1289 1290 1290 xen_irq_info_evtchn_init(irq, evtchn); 1291 1291 1292 - spin_unlock(&irq_mapping_update_lock); 1292 + mutex_unlock(&irq_mapping_update_lock); 1293 1293 1294 1294 /* new event channels are always bound to cpu 0 */ 1295 1295 irq_set_affinity(irq, cpumask_of(0));
+4 -2
fs/9p/v9fs_vfs.h
··· 54 54 55 55 struct inode *v9fs_alloc_inode(struct super_block *sb); 56 56 void v9fs_destroy_inode(struct inode *inode); 57 - struct inode *v9fs_get_inode(struct super_block *sb, int mode); 57 + struct inode *v9fs_get_inode(struct super_block *sb, int mode, dev_t); 58 58 int v9fs_init_inode(struct v9fs_session_info *v9ses, 59 - struct inode *inode, int mode); 59 + struct inode *inode, int mode, dev_t); 60 60 void v9fs_evict_inode(struct inode *inode); 61 61 ino_t v9fs_qid2ino(struct p9_qid *qid); 62 62 void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *); ··· 83 83 v9inode->cache_validity |= V9FS_INO_INVALID_ATTR; 84 84 return; 85 85 } 86 + 87 + int v9fs_open_to_dotl_flags(int flags); 86 88 #endif
+28 -8
fs/9p/vfs_file.c
··· 65 65 v9inode = V9FS_I(inode); 66 66 v9ses = v9fs_inode2v9ses(inode); 67 67 if (v9fs_proto_dotl(v9ses)) 68 - omode = file->f_flags; 68 + omode = v9fs_open_to_dotl_flags(file->f_flags); 69 69 else 70 70 omode = v9fs_uflags2omode(file->f_flags, 71 71 v9fs_proto_dotu(v9ses)); ··· 169 169 170 170 /* convert posix lock to p9 tlock args */ 171 171 memset(&flock, 0, sizeof(flock)); 172 - flock.type = fl->fl_type; 172 + /* map the lock type */ 173 + switch (fl->fl_type) { 174 + case F_RDLCK: 175 + flock.type = P9_LOCK_TYPE_RDLCK; 176 + break; 177 + case F_WRLCK: 178 + flock.type = P9_LOCK_TYPE_WRLCK; 179 + break; 180 + case F_UNLCK: 181 + flock.type = P9_LOCK_TYPE_UNLCK; 182 + break; 183 + } 173 184 flock.start = fl->fl_start; 174 185 if (fl->fl_end == OFFSET_MAX) 175 186 flock.length = 0; ··· 256 245 257 246 /* convert posix lock to p9 tgetlock args */ 258 247 memset(&glock, 0, sizeof(glock)); 259 - glock.type = fl->fl_type; 248 + glock.type = P9_LOCK_TYPE_UNLCK; 260 249 glock.start = fl->fl_start; 261 250 if (fl->fl_end == OFFSET_MAX) 262 251 glock.length = 0; ··· 268 257 res = p9_client_getlock_dotl(fid, &glock); 269 258 if (res < 0) 270 259 return res; 271 - if (glock.type != F_UNLCK) { 272 - fl->fl_type = glock.type; 260 + /* map 9p lock type to os lock type */ 261 + switch (glock.type) { 262 + case P9_LOCK_TYPE_RDLCK: 263 + fl->fl_type = F_RDLCK; 264 + break; 265 + case P9_LOCK_TYPE_WRLCK: 266 + fl->fl_type = F_WRLCK; 267 + break; 268 + case P9_LOCK_TYPE_UNLCK: 269 + fl->fl_type = F_UNLCK; 270 + break; 271 + } 272 + if (glock.type != P9_LOCK_TYPE_UNLCK) { 273 273 fl->fl_start = glock.start; 274 274 if (glock.length == 0) 275 275 fl->fl_end = OFFSET_MAX; 276 276 else 277 277 fl->fl_end = glock.start + glock.length - 1; 278 278 fl->fl_pid = glock.proc_id; 279 - } else 280 - fl->fl_type = F_UNLCK; 281 - 279 + } 282 280 return res; 283 281 } 284 282
+87 -52
fs/9p/vfs_inode.c
··· 95 95 /** 96 96 * p9mode2unixmode- convert plan9 mode bits to unix mode bits 97 97 * @v9ses: v9fs session information 98 - * @mode: mode to convert 98 + * @stat: p9_wstat from which mode need to be derived 99 + * @rdev: major number, minor number in case of device files. 99 100 * 100 101 */ 101 - 102 - static int p9mode2unixmode(struct v9fs_session_info *v9ses, int mode) 102 + static int p9mode2unixmode(struct v9fs_session_info *v9ses, 103 + struct p9_wstat *stat, dev_t *rdev) 103 104 { 104 105 int res; 106 + int mode = stat->mode; 105 107 106 - res = mode & 0777; 108 + res = mode & S_IALLUGO; 109 + *rdev = 0; 107 110 108 111 if ((mode & P9_DMDIR) == P9_DMDIR) 109 112 res |= S_IFDIR; ··· 119 116 && (v9ses->nodev == 0)) 120 117 res |= S_IFIFO; 121 118 else if ((mode & P9_DMDEVICE) && (v9fs_proto_dotu(v9ses)) 122 - && (v9ses->nodev == 0)) 123 - res |= S_IFBLK; 124 - else 119 + && (v9ses->nodev == 0)) { 120 + char type = 0, ext[32]; 121 + int major = -1, minor = -1; 122 + 123 + strncpy(ext, stat->extension, sizeof(ext)); 124 + sscanf(ext, "%c %u %u", &type, &major, &minor); 125 + switch (type) { 126 + case 'c': 127 + res |= S_IFCHR; 128 + break; 129 + case 'b': 130 + res |= S_IFBLK; 131 + break; 132 + default: 133 + P9_DPRINTK(P9_DEBUG_ERROR, 134 + "Unknown special type %c %s\n", type, 135 + stat->extension); 136 + }; 137 + *rdev = MKDEV(major, minor); 138 + } else 125 139 res |= S_IFREG; 126 140 127 141 if (v9fs_proto_dotu(v9ses)) { ··· 151 131 if ((mode & P9_DMSETVTX) == P9_DMSETVTX) 152 132 res |= S_ISVTX; 153 133 } 154 - 155 134 return res; 156 135 } 157 136 ··· 261 242 } 262 243 263 244 int v9fs_init_inode(struct v9fs_session_info *v9ses, 264 - struct inode *inode, int mode) 245 + struct inode *inode, int mode, dev_t rdev) 265 246 { 266 247 int err = 0; 267 248 268 249 inode_init_owner(inode, NULL, mode); 269 250 inode->i_blocks = 0; 270 - inode->i_rdev = 0; 251 + inode->i_rdev = rdev; 271 252 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 272 253 inode->i_mapping->a_ops = &v9fs_addr_operations; 273 254 ··· 354 335 * 355 336 */ 356 337 357 - struct inode *v9fs_get_inode(struct super_block *sb, int mode) 338 + struct inode *v9fs_get_inode(struct super_block *sb, int mode, dev_t rdev) 358 339 { 359 340 int err; 360 341 struct inode *inode; ··· 367 348 P9_EPRINTK(KERN_WARNING, "Problem allocating inode\n"); 368 349 return ERR_PTR(-ENOMEM); 369 350 } 370 - err = v9fs_init_inode(v9ses, inode, mode); 351 + err = v9fs_init_inode(v9ses, inode, mode, rdev); 371 352 if (err) { 372 353 iput(inode); 373 354 return ERR_PTR(err); ··· 454 435 static int v9fs_test_inode(struct inode *inode, void *data) 455 436 { 456 437 int umode; 438 + dev_t rdev; 457 439 struct v9fs_inode *v9inode = V9FS_I(inode); 458 440 struct p9_wstat *st = (struct p9_wstat *)data; 459 441 struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode); 460 442 461 - umode = p9mode2unixmode(v9ses, st->mode); 443 + umode = p9mode2unixmode(v9ses, st, &rdev); 462 444 /* don't match inode of different type */ 463 445 if ((inode->i_mode & S_IFMT) != (umode & S_IFMT)) 464 446 return 0; ··· 493 473 struct p9_wstat *st, 494 474 int new) 495 475 { 476 + dev_t rdev; 496 477 int retval, umode; 497 478 unsigned long i_ino; 498 479 struct inode *inode; ··· 517 496 * later. 518 497 */ 519 498 inode->i_ino = i_ino; 520 - umode = p9mode2unixmode(v9ses, st->mode); 521 - retval = v9fs_init_inode(v9ses, inode, umode); 499 + umode = p9mode2unixmode(v9ses, st, &rdev); 500 + retval = v9fs_init_inode(v9ses, inode, umode, rdev); 522 501 if (retval) 523 502 goto error; 524 503 ··· 553 532 } 554 533 555 534 /** 535 + * v9fs_at_to_dotl_flags- convert Linux specific AT flags to 536 + * plan 9 AT flag. 537 + * @flags: flags to convert 538 + */ 539 + static int v9fs_at_to_dotl_flags(int flags) 540 + { 541 + int rflags = 0; 542 + if (flags & AT_REMOVEDIR) 543 + rflags |= P9_DOTL_AT_REMOVEDIR; 544 + return rflags; 545 + } 546 + 547 + /** 556 548 * v9fs_remove - helper function to remove files and directories 557 549 * @dir: directory inode that is being deleted 558 550 * @dentry: dentry that is being deleted ··· 592 558 return retval; 593 559 } 594 560 if (v9fs_proto_dotl(v9ses)) 595 - retval = p9_client_unlinkat(dfid, dentry->d_name.name, flags); 561 + retval = p9_client_unlinkat(dfid, dentry->d_name.name, 562 + v9fs_at_to_dotl_flags(flags)); 596 563 if (retval == -EOPNOTSUPP) { 597 564 /* Try the one based on path */ 598 565 v9fid = v9fs_fid_clone(dentry); ··· 680 645 P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err); 681 646 goto error; 682 647 } 683 - d_instantiate(dentry, inode); 684 648 err = v9fs_fid_add(dentry, fid); 685 649 if (err < 0) 686 650 goto error; 687 - 651 + d_instantiate(dentry, inode); 688 652 return ofid; 689 - 690 653 error: 691 654 if (ofid) 692 655 p9_client_clunk(ofid); ··· 825 792 struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, 826 793 struct nameidata *nameidata) 827 794 { 795 + struct dentry *res; 828 796 struct super_block *sb; 829 797 struct v9fs_session_info *v9ses; 830 798 struct p9_fid *dfid, *fid; ··· 857 823 858 824 return ERR_PTR(result); 859 825 } 860 - 861 - inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb); 826 + /* 827 + * Make sure we don't use a wrong inode due to parallel 828 + * unlink. For cached mode create calls request for new 829 + * inode. But with cache disabled, lookup should do this. 830 + */ 831 + if (v9ses->cache) 832 + inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb); 833 + else 834 + inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb); 862 835 if (IS_ERR(inode)) { 863 836 result = PTR_ERR(inode); 864 837 inode = NULL; 865 838 goto error; 866 839 } 867 - 868 840 result = v9fs_fid_add(dentry, fid); 869 841 if (result < 0) 870 842 goto error_iput; 871 - 872 843 inst_out: 873 - d_add(dentry, inode); 874 - return NULL; 875 - 844 + /* 845 + * If we had a rename on the server and a parallel lookup 846 + * for the new name, then make sure we instantiate with 847 + * the new name. ie look up for a/b, while on server somebody 848 + * moved b under k and client parallely did a lookup for 849 + * k/b. 850 + */ 851 + res = d_materialise_unique(dentry, inode); 852 + if (!IS_ERR(res)) 853 + return res; 854 + result = PTR_ERR(res); 876 855 error_iput: 877 856 iput(inode); 878 857 error: ··· 1049 1002 return PTR_ERR(st); 1050 1003 1051 1004 v9fs_stat2inode(st, dentry->d_inode, dentry->d_inode->i_sb); 1052 - generic_fillattr(dentry->d_inode, stat); 1005 + generic_fillattr(dentry->d_inode, stat); 1053 1006 1054 1007 p9stat_free(st); 1055 1008 kfree(st); ··· 1133 1086 v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode, 1134 1087 struct super_block *sb) 1135 1088 { 1089 + mode_t mode; 1136 1090 char ext[32]; 1137 1091 char tag_name[14]; 1138 1092 unsigned int i_nlink; ··· 1169 1121 inode->i_nlink = i_nlink; 1170 1122 } 1171 1123 } 1172 - inode->i_mode = p9mode2unixmode(v9ses, stat->mode); 1173 - if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode))) { 1174 - char type = 0; 1175 - int major = -1; 1176 - int minor = -1; 1177 - 1178 - strncpy(ext, stat->extension, sizeof(ext)); 1179 - sscanf(ext, "%c %u %u", &type, &major, &minor); 1180 - switch (type) { 1181 - case 'c': 1182 - inode->i_mode &= ~S_IFBLK; 1183 - inode->i_mode |= S_IFCHR; 1184 - break; 1185 - case 'b': 1186 - break; 1187 - default: 1188 - P9_DPRINTK(P9_DEBUG_ERROR, 1189 - "Unknown special type %c %s\n", type, 1190 - stat->extension); 1191 - }; 1192 - inode->i_rdev = MKDEV(major, minor); 1193 - init_special_inode(inode, inode->i_mode, inode->i_rdev); 1194 - } else 1195 - inode->i_rdev = 0; 1196 - 1124 + mode = stat->mode & S_IALLUGO; 1125 + mode |= inode->i_mode & ~S_IALLUGO; 1126 + inode->i_mode = mode; 1197 1127 i_size_write(inode, stat->length); 1198 1128 1199 1129 /* not real number of blocks, but 512 byte ones ... */ ··· 1437 1411 1438 1412 int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode) 1439 1413 { 1414 + int umode; 1415 + dev_t rdev; 1440 1416 loff_t i_size; 1441 1417 struct p9_wstat *st; 1442 1418 struct v9fs_session_info *v9ses; ··· 1447 1419 st = p9_client_stat(fid); 1448 1420 if (IS_ERR(st)) 1449 1421 return PTR_ERR(st); 1422 + /* 1423 + * Don't update inode if the file type is different 1424 + */ 1425 + umode = p9mode2unixmode(v9ses, st, &rdev); 1426 + if ((inode->i_mode & S_IFMT) != (umode & S_IFMT)) 1427 + goto out; 1450 1428 1451 1429 spin_lock(&inode->i_lock); 1452 1430 /* ··· 1464 1430 if (v9ses->cache) 1465 1431 inode->i_size = i_size; 1466 1432 spin_unlock(&inode->i_lock); 1433 + out: 1467 1434 p9stat_free(st); 1468 1435 kfree(st); 1469 1436 return 0;
+73 -13
fs/9p/vfs_inode_dotl.c
··· 153 153 * later. 154 154 */ 155 155 inode->i_ino = i_ino; 156 - retval = v9fs_init_inode(v9ses, inode, st->st_mode); 156 + retval = v9fs_init_inode(v9ses, inode, 157 + st->st_mode, new_decode_dev(st->st_rdev)); 157 158 if (retval) 158 159 goto error; 159 160 ··· 189 188 inode = v9fs_qid_iget_dotl(sb, &st->qid, fid, st, new); 190 189 kfree(st); 191 190 return inode; 191 + } 192 + 193 + struct dotl_openflag_map { 194 + int open_flag; 195 + int dotl_flag; 196 + }; 197 + 198 + static int v9fs_mapped_dotl_flags(int flags) 199 + { 200 + int i; 201 + int rflags = 0; 202 + struct dotl_openflag_map dotl_oflag_map[] = { 203 + { O_CREAT, P9_DOTL_CREATE }, 204 + { O_EXCL, P9_DOTL_EXCL }, 205 + { O_NOCTTY, P9_DOTL_NOCTTY }, 206 + { O_TRUNC, P9_DOTL_TRUNC }, 207 + { O_APPEND, P9_DOTL_APPEND }, 208 + { O_NONBLOCK, P9_DOTL_NONBLOCK }, 209 + { O_DSYNC, P9_DOTL_DSYNC }, 210 + { FASYNC, P9_DOTL_FASYNC }, 211 + { O_DIRECT, P9_DOTL_DIRECT }, 212 + { O_LARGEFILE, P9_DOTL_LARGEFILE }, 213 + { O_DIRECTORY, P9_DOTL_DIRECTORY }, 214 + { O_NOFOLLOW, P9_DOTL_NOFOLLOW }, 215 + { O_NOATIME, P9_DOTL_NOATIME }, 216 + { O_CLOEXEC, P9_DOTL_CLOEXEC }, 217 + { O_SYNC, P9_DOTL_SYNC}, 218 + }; 219 + for (i = 0; i < ARRAY_SIZE(dotl_oflag_map); i++) { 220 + if (flags & dotl_oflag_map[i].open_flag) 221 + rflags |= dotl_oflag_map[i].dotl_flag; 222 + } 223 + return rflags; 224 + } 225 + 226 + /** 227 + * v9fs_open_to_dotl_flags- convert Linux specific open flags to 228 + * plan 9 open flag. 229 + * @flags: flags to convert 230 + */ 231 + int v9fs_open_to_dotl_flags(int flags) 232 + { 233 + int rflags = 0; 234 + 235 + /* 236 + * We have same bits for P9_DOTL_READONLY, P9_DOTL_WRONLY 237 + * and P9_DOTL_NOACCESS 238 + */ 239 + rflags |= flags & O_ACCMODE; 240 + rflags |= v9fs_mapped_dotl_flags(flags); 241 + 242 + return rflags; 192 243 } 193 244 194 245 /** ··· 311 258 "Failed to get acl values in creat %d\n", err); 312 259 goto error; 313 260 } 314 - err = p9_client_create_dotl(ofid, name, flags, mode, gid, &qid); 261 + err = p9_client_create_dotl(ofid, name, v9fs_open_to_dotl_flags(flags), 262 + mode, gid, &qid); 315 263 if (err < 0) { 316 264 P9_DPRINTK(P9_DEBUG_VFS, 317 265 "p9_client_open_dotl failed in creat %d\n", ··· 335 281 P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err); 336 282 goto error; 337 283 } 338 - d_instantiate(dentry, inode); 339 284 err = v9fs_fid_add(dentry, fid); 340 285 if (err < 0) 341 286 goto error; 287 + d_instantiate(dentry, inode); 342 288 343 289 /* Now set the ACL based on the default value */ 344 290 v9fs_set_create_acl(dentry, &dacl, &pacl); ··· 457 403 err); 458 404 goto error; 459 405 } 460 - d_instantiate(dentry, inode); 461 406 err = v9fs_fid_add(dentry, fid); 462 407 if (err < 0) 463 408 goto error; 409 + d_instantiate(dentry, inode); 464 410 fid = NULL; 465 411 } else { 466 412 /* ··· 468 414 * inode with stat. We need to get an inode 469 415 * so that we can set the acl with dentry 470 416 */ 471 - inode = v9fs_get_inode(dir->i_sb, mode); 417 + inode = v9fs_get_inode(dir->i_sb, mode, 0); 472 418 if (IS_ERR(inode)) { 473 419 err = PTR_ERR(inode); 474 420 goto error; ··· 594 540 void 595 541 v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode) 596 542 { 543 + mode_t mode; 597 544 struct v9fs_inode *v9inode = V9FS_I(inode); 598 545 599 546 if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) { ··· 607 552 inode->i_uid = stat->st_uid; 608 553 inode->i_gid = stat->st_gid; 609 554 inode->i_nlink = stat->st_nlink; 610 - inode->i_mode = stat->st_mode; 611 - inode->i_rdev = new_decode_dev(stat->st_rdev); 612 555 613 - if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode))) 614 - init_special_inode(inode, inode->i_mode, inode->i_rdev); 556 + mode = stat->st_mode & S_IALLUGO; 557 + mode |= inode->i_mode & ~S_IALLUGO; 558 + inode->i_mode = mode; 615 559 616 560 i_size_write(inode, stat->st_size); 617 561 inode->i_blocks = stat->st_blocks; ··· 711 657 err); 712 658 goto error; 713 659 } 714 - d_instantiate(dentry, inode); 715 660 err = v9fs_fid_add(dentry, fid); 716 661 if (err < 0) 717 662 goto error; 663 + d_instantiate(dentry, inode); 718 664 fid = NULL; 719 665 } else { 720 666 /* Not in cached mode. No need to populate inode with stat */ 721 - inode = v9fs_get_inode(dir->i_sb, S_IFLNK); 667 + inode = v9fs_get_inode(dir->i_sb, S_IFLNK, 0); 722 668 if (IS_ERR(inode)) { 723 669 err = PTR_ERR(inode); 724 670 goto error; ··· 864 810 err); 865 811 goto error; 866 812 } 867 - d_instantiate(dentry, inode); 868 813 err = v9fs_fid_add(dentry, fid); 869 814 if (err < 0) 870 815 goto error; 816 + d_instantiate(dentry, inode); 871 817 fid = NULL; 872 818 } else { 873 819 /* 874 820 * Not in cached mode. No need to populate inode with stat. 875 821 * socket syscall returns a fd, so we need instantiate 876 822 */ 877 - inode = v9fs_get_inode(dir->i_sb, mode); 823 + inode = v9fs_get_inode(dir->i_sb, mode, rdev); 878 824 if (IS_ERR(inode)) { 879 825 err = PTR_ERR(inode); 880 826 goto error; ··· 940 886 st = p9_client_getattr_dotl(fid, P9_STATS_ALL); 941 887 if (IS_ERR(st)) 942 888 return PTR_ERR(st); 889 + /* 890 + * Don't update inode if the file type is different 891 + */ 892 + if ((inode->i_mode & S_IFMT) != (st->st_mode & S_IFMT)) 893 + goto out; 943 894 944 895 spin_lock(&inode->i_lock); 945 896 /* ··· 956 897 if (v9ses->cache) 957 898 inode->i_size = i_size; 958 899 spin_unlock(&inode->i_lock); 900 + out: 959 901 kfree(st); 960 902 return 0; 961 903 }
+1 -1
fs/9p/vfs_super.c
··· 149 149 else 150 150 sb->s_d_op = &v9fs_dentry_operations; 151 151 152 - inode = v9fs_get_inode(sb, S_IFDIR | mode); 152 + inode = v9fs_get_inode(sb, S_IFDIR | mode, 0); 153 153 if (IS_ERR(inode)) { 154 154 retval = PTR_ERR(inode); 155 155 goto release_sb;
+5 -2
fs/block_dev.c
··· 1429 1429 WARN_ON_ONCE(bdev->bd_holders); 1430 1430 sync_blockdev(bdev); 1431 1431 kill_bdev(bdev); 1432 + /* ->release can cause the old bdi to disappear, 1433 + * so must switch it out first 1434 + */ 1435 + bdev_inode_switch_bdi(bdev->bd_inode, 1436 + &default_backing_dev_info); 1432 1437 } 1433 1438 if (bdev->bd_contains == bdev) { 1434 1439 if (disk->fops->release) ··· 1447 1442 disk_put_part(bdev->bd_part); 1448 1443 bdev->bd_part = NULL; 1449 1444 bdev->bd_disk = NULL; 1450 - bdev_inode_switch_bdi(bdev->bd_inode, 1451 - &default_backing_dev_info); 1452 1445 if (bdev != bdev->bd_contains) 1453 1446 victim = bdev->bd_contains; 1454 1447 bdev->bd_contains = NULL;
+5 -1
fs/btrfs/btrfs_inode.h
··· 176 176 { 177 177 u64 ino = BTRFS_I(inode)->location.objectid; 178 178 179 - if (ino <= BTRFS_FIRST_FREE_OBJECTID) 179 + /* 180 + * !ino: btree_inode 181 + * type == BTRFS_ROOT_ITEM_KEY: subvol dir 182 + */ 183 + if (!ino || BTRFS_I(inode)->location.type == BTRFS_ROOT_ITEM_KEY) 180 184 ino = inode->i_ino; 181 185 return ino; 182 186 }
+3 -1
fs/btrfs/file-item.c
··· 183 183 * read from the commit root and sidestep a nasty deadlock 184 184 * between reading the free space cache and updating the csum tree. 185 185 */ 186 - if (btrfs_is_free_space_inode(root, inode)) 186 + if (btrfs_is_free_space_inode(root, inode)) { 187 187 path->search_commit_root = 1; 188 + path->skip_locking = 1; 189 + } 188 190 189 191 disk_bytenr = (u64)bio->bi_sector << 9; 190 192 if (dio)
+17 -8
fs/btrfs/file.c
··· 1075 1075 start_pos = pos & ~((u64)root->sectorsize - 1); 1076 1076 last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT; 1077 1077 1078 - if (start_pos > inode->i_size) { 1079 - err = btrfs_cont_expand(inode, i_size_read(inode), start_pos); 1080 - if (err) 1081 - return err; 1082 - } 1083 - 1084 1078 again: 1085 1079 for (i = 0; i < num_pages; i++) { 1086 1080 pages[i] = find_or_create_page(inode->i_mapping, index + i, ··· 1332 1338 struct inode *inode = fdentry(file)->d_inode; 1333 1339 struct btrfs_root *root = BTRFS_I(inode)->root; 1334 1340 loff_t *ppos = &iocb->ki_pos; 1341 + u64 start_pos; 1335 1342 ssize_t num_written = 0; 1336 1343 ssize_t err = 0; 1337 1344 size_t count, ocount; ··· 1380 1385 1381 1386 file_update_time(file); 1382 1387 BTRFS_I(inode)->sequence++; 1388 + 1389 + start_pos = round_down(pos, root->sectorsize); 1390 + if (start_pos > i_size_read(inode)) { 1391 + err = btrfs_cont_expand(inode, i_size_read(inode), start_pos); 1392 + if (err) { 1393 + mutex_unlock(&inode->i_mutex); 1394 + goto out; 1395 + } 1396 + } 1383 1397 1384 1398 if (unlikely(file->f_flags & O_DIRECT)) { 1385 1399 num_written = __btrfs_direct_write(iocb, iov, nr_segs, ··· 1817 1813 goto out; 1818 1814 case SEEK_DATA: 1819 1815 case SEEK_HOLE: 1816 + if (offset >= i_size_read(inode)) { 1817 + mutex_unlock(&inode->i_mutex); 1818 + return -ENXIO; 1819 + } 1820 + 1820 1821 ret = find_desired_extent(inode, &offset, origin); 1821 1822 if (ret) { 1822 1823 mutex_unlock(&inode->i_mutex); ··· 1830 1821 } 1831 1822 1832 1823 if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) { 1833 - ret = -EINVAL; 1824 + offset = -EINVAL; 1834 1825 goto out; 1835 1826 } 1836 1827 if (offset > inode->i_sb->s_maxbytes) { 1837 - ret = -EINVAL; 1828 + offset = -EINVAL; 1838 1829 goto out; 1839 1830 } 1840 1831
+4
fs/btrfs/free-space-cache.c
··· 190 190 struct btrfs_path *path, 191 191 struct inode *inode) 192 192 { 193 + struct btrfs_block_rsv *rsv; 193 194 loff_t oldsize; 194 195 int ret = 0; 195 196 197 + rsv = trans->block_rsv; 196 198 trans->block_rsv = root->orphan_block_rsv; 197 199 ret = btrfs_block_rsv_check(trans, root, 198 200 root->orphan_block_rsv, ··· 212 210 */ 213 211 ret = btrfs_truncate_inode_items(trans, root, inode, 214 212 0, BTRFS_EXTENT_DATA_KEY); 213 + 214 + trans->block_rsv = rsv; 215 215 if (ret) { 216 216 WARN_ON(1); 217 217 return ret;
+25 -15
fs/btrfs/inode.c
··· 1786 1786 &ordered_extent->list); 1787 1787 1788 1788 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); 1789 - if (!ret) { 1789 + if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 1790 1790 ret = btrfs_update_inode(trans, root, inode); 1791 1791 BUG_ON(ret); 1792 1792 } ··· 3510 3510 err = btrfs_drop_extents(trans, inode, cur_offset, 3511 3511 cur_offset + hole_size, 3512 3512 &hint_byte, 1); 3513 - if (err) 3513 + if (err) { 3514 + btrfs_end_transaction(trans, root); 3514 3515 break; 3516 + } 3515 3517 3516 3518 err = btrfs_insert_file_extent(trans, root, 3517 3519 btrfs_ino(inode), cur_offset, 0, 3518 3520 0, hole_size, 0, hole_size, 3519 3521 0, 0, 0); 3520 - if (err) 3522 + if (err) { 3523 + btrfs_end_transaction(trans, root); 3521 3524 break; 3525 + } 3522 3526 3523 3527 btrfs_drop_extent_cache(inode, hole_start, 3524 3528 last_byte - 1, 0); ··· 3956 3952 struct btrfs_root *root, int *new) 3957 3953 { 3958 3954 struct inode *inode; 3959 - int bad_inode = 0; 3960 3955 3961 3956 inode = btrfs_iget_locked(s, location->objectid, root); 3962 3957 if (!inode) ··· 3971 3968 if (new) 3972 3969 *new = 1; 3973 3970 } else { 3974 - bad_inode = 1; 3971 + unlock_new_inode(inode); 3972 + iput(inode); 3973 + inode = ERR_PTR(-ESTALE); 3975 3974 } 3976 - } 3977 - 3978 - if (bad_inode) { 3979 - iput(inode); 3980 - inode = ERR_PTR(-ESTALE); 3981 3975 } 3982 3976 3983 3977 return inode; ··· 4018 4018 memcpy(&location, dentry->d_fsdata, sizeof(struct btrfs_key)); 4019 4019 kfree(dentry->d_fsdata); 4020 4020 dentry->d_fsdata = NULL; 4021 - d_clear_need_lookup(dentry); 4021 + /* This thing is hashed, drop it for now */ 4022 + d_drop(dentry); 4022 4023 } else { 4023 4024 ret = btrfs_inode_by_name(dir, dentry, &location); 4024 4025 } ··· 4086 4085 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, 4087 4086 struct nameidata *nd) 4088 4087 { 4089 - return d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry); 4088 + struct dentry *ret; 4089 + 4090 + ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry); 4091 + if (unlikely(d_need_lookup(dentry))) { 4092 + spin_lock(&dentry->d_lock); 4093 + dentry->d_flags &= ~DCACHE_NEED_LOOKUP; 4094 + spin_unlock(&dentry->d_lock); 4095 + } 4096 + return ret; 4090 4097 } 4091 4098 4092 4099 unsigned char btrfs_filetype_table[] = { ··· 4134 4125 4135 4126 /* special case for "." */ 4136 4127 if (filp->f_pos == 0) { 4137 - over = filldir(dirent, ".", 1, 1, btrfs_ino(inode), DT_DIR); 4128 + over = filldir(dirent, ".", 1, 4129 + filp->f_pos, btrfs_ino(inode), DT_DIR); 4138 4130 if (over) 4139 4131 return 0; 4140 4132 filp->f_pos = 1; ··· 4144 4134 if (filp->f_pos == 1) { 4145 4135 u64 pino = parent_ino(filp->f_path.dentry); 4146 4136 over = filldir(dirent, "..", 2, 4147 - 2, pino, DT_DIR); 4137 + filp->f_pos, pino, DT_DIR); 4148 4138 if (over) 4149 4139 return 0; 4150 4140 filp->f_pos = 2; ··· 5833 5823 5834 5824 add_pending_csums(trans, inode, ordered->file_offset, &ordered->list); 5835 5825 ret = btrfs_ordered_update_i_size(inode, 0, ordered); 5836 - if (!ret) 5826 + if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) 5837 5827 btrfs_update_inode(trans, root, inode); 5838 5828 ret = 0; 5839 5829 out_unlock:
+25 -8
fs/btrfs/ioctl.c
··· 2177 2177 if (!(src_file->f_mode & FMODE_READ)) 2178 2178 goto out_fput; 2179 2179 2180 + /* don't make the dst file partly checksummed */ 2181 + if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) != 2182 + (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) 2183 + goto out_fput; 2184 + 2180 2185 ret = -EISDIR; 2181 2186 if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode)) 2182 2187 goto out_fput; ··· 2225 2220 !IS_ALIGNED(destoff, bs)) 2226 2221 goto out_unlock; 2227 2222 2223 + if (destoff > inode->i_size) { 2224 + ret = btrfs_cont_expand(inode, inode->i_size, destoff); 2225 + if (ret) 2226 + goto out_unlock; 2227 + } 2228 + 2229 + /* truncate page cache pages from target inode range */ 2230 + truncate_inode_pages_range(&inode->i_data, destoff, 2231 + PAGE_CACHE_ALIGN(destoff + len) - 1); 2232 + 2228 2233 /* do any pending delalloc/csum calc on src, one way or 2229 2234 another, and lock file content */ 2230 2235 while (1) { ··· 2250 2235 btrfs_put_ordered_extent(ordered); 2251 2236 btrfs_wait_ordered_range(src, off, len); 2252 2237 } 2253 - 2254 - /* truncate page cache pages from target inode range */ 2255 - truncate_inode_pages_range(&inode->i_data, off, 2256 - ALIGN(off + len, PAGE_CACHE_SIZE) - 1); 2257 2238 2258 2239 /* clone data */ 2259 2240 key.objectid = btrfs_ino(src); ··· 2336 2325 2337 2326 if (type == BTRFS_FILE_EXTENT_REG || 2338 2327 type == BTRFS_FILE_EXTENT_PREALLOC) { 2328 + /* 2329 + * a | --- range to clone ---| b 2330 + * | ------------- extent ------------- | 2331 + */ 2332 + 2333 + /* substract range b */ 2334 + if (key.offset + datal > off + len) 2335 + datal = off + len - key.offset; 2336 + 2337 + /* substract range a */ 2339 2338 if (off > key.offset) { 2340 2339 datao += off - key.offset; 2341 2340 datal -= off - key.offset; 2342 2341 } 2343 - 2344 - if (key.offset + datal > off + len) 2345 - datal = off + len - key.offset; 2346 2342 2347 2343 ret = btrfs_drop_extents(trans, inode, 2348 2344 new_key.offset, ··· 2447 2429 if (endoff > inode->i_size) 2448 2430 btrfs_i_size_write(inode, endoff); 2449 2431 2450 - BTRFS_I(inode)->flags = BTRFS_I(src)->flags; 2451 2432 ret = btrfs_update_inode(trans, root, inode); 2452 2433 BUG_ON(ret); 2453 2434 btrfs_end_transaction(trans, root);
+4
fs/btrfs/transaction.c
··· 884 884 struct btrfs_root *tree_root = fs_info->tree_root; 885 885 struct btrfs_root *root = pending->root; 886 886 struct btrfs_root *parent_root; 887 + struct btrfs_block_rsv *rsv; 887 888 struct inode *parent_inode; 888 889 struct dentry *parent; 889 890 struct dentry *dentry; ··· 895 894 u64 index = 0; 896 895 u64 objectid; 897 896 u64 root_flags; 897 + 898 + rsv = trans->block_rsv; 898 899 899 900 new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS); 900 901 if (!new_root_item) { ··· 1005 1002 btrfs_orphan_post_snapshot(trans, pending); 1006 1003 fail: 1007 1004 kfree(new_root_item); 1005 + trans->block_rsv = rsv; 1008 1006 btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1); 1009 1007 return 0; 1010 1008 }
+9
fs/btrfs/xattr.c
··· 116 116 if (ret) 117 117 goto out; 118 118 btrfs_release_path(path); 119 + 120 + /* 121 + * remove the attribute 122 + */ 123 + if (!value) 124 + goto out; 119 125 } 120 126 121 127 again: ··· 164 158 return ret; 165 159 } 166 160 161 + /* 162 + * @value: "" makes the attribute to empty, NULL removes it 163 + */ 167 164 int __btrfs_setxattr(struct btrfs_trans_handle *trans, 168 165 struct inode *inode, const char *name, 169 166 const void *value, size_t size, int flags)
+1 -1
fs/ceph/mds_client.c
··· 1595 1595 r = build_dentry_path(rdentry, ppath, pathlen, ino, freepath); 1596 1596 dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen, 1597 1597 *ppath); 1598 - } else if (rpath) { 1598 + } else if (rpath || rino) { 1599 1599 *ino = rino; 1600 1600 *ppath = rpath; 1601 1601 *pathlen = strlen(rpath);
+2 -2
fs/ceph/super.c
··· 813 813 fsc = create_fs_client(fsopt, opt); 814 814 if (IS_ERR(fsc)) { 815 815 res = ERR_CAST(fsc); 816 - kfree(fsopt); 817 - kfree(opt); 816 + destroy_mount_options(fsopt); 817 + ceph_destroy_options(opt); 818 818 goto out_final; 819 819 } 820 820
+1
fs/ext4/ext4.h
··· 175 175 */ 176 176 #define EXT4_IO_END_UNWRITTEN 0x0001 177 177 #define EXT4_IO_END_ERROR 0x0002 178 + #define EXT4_IO_END_QUEUED 0x0004 178 179 179 180 struct ext4_io_page { 180 181 struct page *p_page;
-3
fs/ext4/inode.c
··· 121 121 122 122 trace_ext4_evict_inode(inode); 123 123 124 - mutex_lock(&inode->i_mutex); 125 - ext4_flush_completed_IO(inode); 126 - mutex_unlock(&inode->i_mutex); 127 124 ext4_ioend_wait(inode); 128 125 129 126 if (inode->i_nlink) {
+17 -1
fs/ext4/page-io.c
··· 142 142 unsigned long flags; 143 143 int ret; 144 144 145 - mutex_lock(&inode->i_mutex); 145 + if (!mutex_trylock(&inode->i_mutex)) { 146 + /* 147 + * Requeue the work instead of waiting so that the work 148 + * items queued after this can be processed. 149 + */ 150 + queue_work(EXT4_SB(inode->i_sb)->dio_unwritten_wq, &io->work); 151 + /* 152 + * To prevent the ext4-dio-unwritten thread from keeping 153 + * requeueing end_io requests and occupying cpu for too long, 154 + * yield the cpu if it sees an end_io request that has already 155 + * been requeued. 156 + */ 157 + if (io->flag & EXT4_IO_END_QUEUED) 158 + yield(); 159 + io->flag |= EXT4_IO_END_QUEUED; 160 + return; 161 + } 146 162 ret = ext4_end_io_nolock(io); 147 163 if (ret < 0) { 148 164 mutex_unlock(&inode->i_mutex);
+8 -4
fs/fuse/dev.c
··· 258 258 forget->forget_one.nlookup = nlookup; 259 259 260 260 spin_lock(&fc->lock); 261 - fc->forget_list_tail->next = forget; 262 - fc->forget_list_tail = forget; 263 - wake_up(&fc->waitq); 264 - kill_fasync(&fc->fasync, SIGIO, POLL_IN); 261 + if (fc->connected) { 262 + fc->forget_list_tail->next = forget; 263 + fc->forget_list_tail = forget; 264 + wake_up(&fc->waitq); 265 + kill_fasync(&fc->fasync, SIGIO, POLL_IN); 266 + } else { 267 + kfree(forget); 268 + } 265 269 spin_unlock(&fc->lock); 266 270 } 267 271
+3
fs/fuse/inode.c
··· 812 812 if (arg->minor >= 17) { 813 813 if (!(arg->flags & FUSE_FLOCK_LOCKS)) 814 814 fc->no_flock = 1; 815 + } else { 816 + if (!(arg->flags & FUSE_POSIX_LOCKS)) 817 + fc->no_flock = 1; 815 818 } 816 819 if (arg->flags & FUSE_ATOMIC_O_TRUNC) 817 820 fc->atomic_o_trunc = 1;
+10 -5
fs/hfsplus/super.c
··· 344 344 struct inode *root, *inode; 345 345 struct qstr str; 346 346 struct nls_table *nls = NULL; 347 + u64 last_fs_block, last_fs_page; 347 348 int err; 348 349 349 350 err = -EINVAL; ··· 400 399 if (!sbi->rsrc_clump_blocks) 401 400 sbi->rsrc_clump_blocks = 1; 402 401 403 - err = generic_check_addressable(sbi->alloc_blksz_shift, 404 - sbi->total_blocks); 405 - if (err) { 402 + err = -EFBIG; 403 + last_fs_block = sbi->total_blocks - 1; 404 + last_fs_page = (last_fs_block << sbi->alloc_blksz_shift) >> 405 + PAGE_CACHE_SHIFT; 406 + 407 + if ((last_fs_block > (sector_t)(~0ULL) >> (sbi->alloc_blksz_shift - 9)) || 408 + (last_fs_page > (pgoff_t)(~0ULL))) { 406 409 printk(KERN_ERR "hfs: filesystem size too large.\n"); 407 410 goto out_free_vhdr; 408 411 } ··· 530 525 out_close_ext_tree: 531 526 hfs_btree_close(sbi->ext_tree); 532 527 out_free_vhdr: 533 - kfree(sbi->s_vhdr); 534 - kfree(sbi->s_backup_vhdr); 528 + kfree(sbi->s_vhdr_buf); 529 + kfree(sbi->s_backup_vhdr_buf); 535 530 out_unload_nls: 536 531 unload_nls(sbi->nls); 537 532 unload_nls(nls);
+2 -2
fs/hfsplus/wrapper.c
··· 272 272 return 0; 273 273 274 274 out_free_backup_vhdr: 275 - kfree(sbi->s_backup_vhdr); 275 + kfree(sbi->s_backup_vhdr_buf); 276 276 out_free_vhdr: 277 - kfree(sbi->s_vhdr); 277 + kfree(sbi->s_vhdr_buf); 278 278 out: 279 279 return error; 280 280 }
+19 -18
fs/namei.c
··· 727 727 if ((flags & LOOKUP_NO_AUTOMOUNT) && !(flags & LOOKUP_PARENT)) 728 728 return -EISDIR; /* we actually want to stop here */ 729 729 730 - /* 731 - * We don't want to mount if someone's just doing a stat and they've 732 - * set AT_SYMLINK_NOFOLLOW - unless they're stat'ing a directory and 733 - * appended a '/' to the name. 730 + /* We don't want to mount if someone's just doing a stat - 731 + * unless they're stat'ing a directory and appended a '/' to 732 + * the name. 733 + * 734 + * We do, however, want to mount if someone wants to open or 735 + * create a file of any type under the mountpoint, wants to 736 + * traverse through the mountpoint or wants to open the 737 + * mounted directory. Also, autofs may mark negative dentries 738 + * as being automount points. These will need the attentions 739 + * of the daemon to instantiate them before they can be used. 734 740 */ 735 - if (!(flags & LOOKUP_FOLLOW)) { 736 - /* We do, however, want to mount if someone wants to open or 737 - * create a file of any type under the mountpoint, wants to 738 - * traverse through the mountpoint or wants to open the mounted 739 - * directory. 740 - * Also, autofs may mark negative dentries as being automount 741 - * points. These will need the attentions of the daemon to 742 - * instantiate them before they can be used. 743 - */ 744 - if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY | 745 - LOOKUP_OPEN | LOOKUP_CREATE)) && 746 - path->dentry->d_inode) 747 - return -EISDIR; 748 - } 741 + if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY | 742 + LOOKUP_OPEN | LOOKUP_CREATE)) && 743 + path->dentry->d_inode) 744 + return -EISDIR; 745 + 749 746 current->total_link_count++; 750 747 if (current->total_link_count >= 40) 751 748 return -ELOOP; ··· 2616 2619 if (!dir->i_op->rmdir) 2617 2620 return -EPERM; 2618 2621 2622 + dget(dentry); 2619 2623 mutex_lock(&dentry->d_inode->i_mutex); 2620 2624 2621 2625 error = -EBUSY; ··· 2637 2639 2638 2640 out: 2639 2641 mutex_unlock(&dentry->d_inode->i_mutex); 2642 + dput(dentry); 2640 2643 if (!error) 2641 2644 d_delete(dentry); 2642 2645 return error; ··· 3027 3028 if (error) 3028 3029 return error; 3029 3030 3031 + dget(new_dentry); 3030 3032 if (target) 3031 3033 mutex_lock(&target->i_mutex); 3032 3034 ··· 3048 3048 out: 3049 3049 if (target) 3050 3050 mutex_unlock(&target->i_mutex); 3051 + dput(new_dentry); 3051 3052 if (!error) 3052 3053 if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) 3053 3054 d_move(old_dentry,new_dentry);
+5 -3
fs/nfs/nfs4_fs.h
··· 56 56 NFS4_SESSION_DRAINING, 57 57 }; 58 58 59 + #define NFS4_RENEW_TIMEOUT 0x01 60 + #define NFS4_RENEW_DELEGATION_CB 0x02 61 + 59 62 struct nfs4_minor_version_ops { 60 63 u32 minor_version; 61 64 ··· 228 225 }; 229 226 230 227 struct nfs4_state_maintenance_ops { 231 - int (*sched_state_renewal)(struct nfs_client *, struct rpc_cred *); 228 + int (*sched_state_renewal)(struct nfs_client *, struct rpc_cred *, unsigned); 232 229 struct rpc_cred * (*get_state_renewal_cred_locked)(struct nfs_client *); 233 230 int (*renew_lease)(struct nfs_client *, struct rpc_cred *); 234 231 }; ··· 240 237 extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *); 241 238 extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *); 242 239 extern int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred); 243 - extern int nfs4_proc_async_renew(struct nfs_client *, struct rpc_cred *); 244 - extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *); 245 240 extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *); 246 241 extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *); 247 242 extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc); ··· 350 349 extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t); 351 350 extern void nfs4_schedule_lease_recovery(struct nfs_client *); 352 351 extern void nfs4_schedule_state_manager(struct nfs_client *); 352 + extern void nfs4_schedule_path_down_recovery(struct nfs_client *clp); 353 353 extern void nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs4_state *); 354 354 extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags); 355 355 extern void nfs41_handle_recall_slot(struct nfs_client *clp);
+14 -6
fs/nfs/nfs4proc.c
··· 3374 3374 3375 3375 if (task->tk_status < 0) { 3376 3376 /* Unless we're shutting down, schedule state recovery! */ 3377 - if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) != 0) 3377 + if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0) 3378 + return; 3379 + if (task->tk_status != NFS4ERR_CB_PATH_DOWN) { 3378 3380 nfs4_schedule_lease_recovery(clp); 3379 - return; 3381 + return; 3382 + } 3383 + nfs4_schedule_path_down_recovery(clp); 3380 3384 } 3381 3385 do_renew_lease(clp, timestamp); 3382 3386 } ··· 3390 3386 .rpc_release = nfs4_renew_release, 3391 3387 }; 3392 3388 3393 - int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred) 3389 + static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) 3394 3390 { 3395 3391 struct rpc_message msg = { 3396 3392 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], ··· 3399 3395 }; 3400 3396 struct nfs4_renewdata *data; 3401 3397 3398 + if (renew_flags == 0) 3399 + return 0; 3402 3400 if (!atomic_inc_not_zero(&clp->cl_count)) 3403 3401 return -EIO; 3404 - data = kmalloc(sizeof(*data), GFP_KERNEL); 3402 + data = kmalloc(sizeof(*data), GFP_NOFS); 3405 3403 if (data == NULL) 3406 3404 return -ENOMEM; 3407 3405 data->client = clp; ··· 3412 3406 &nfs4_renew_ops, data); 3413 3407 } 3414 3408 3415 - int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) 3409 + static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) 3416 3410 { 3417 3411 struct rpc_message msg = { 3418 3412 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], ··· 5510 5504 return rpc_run_task(&task_setup_data); 5511 5505 } 5512 5506 5513 - static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred) 5507 + static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) 5514 5508 { 5515 5509 struct rpc_task *task; 5516 5510 int ret = 0; 5517 5511 5512 + if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) 5513 + return 0; 5518 5514 task = _nfs41_proc_sequence(clp, cred); 5519 5515 if (IS_ERR(task)) 5520 5516 ret = PTR_ERR(task);
+9 -3
fs/nfs/nfs4renewd.c
··· 60 60 struct rpc_cred *cred; 61 61 long lease; 62 62 unsigned long last, now; 63 + unsigned renew_flags = 0; 63 64 64 65 ops = clp->cl_mvops->state_renewal_ops; 65 66 dprintk("%s: start\n", __func__); ··· 73 72 last = clp->cl_last_renewal; 74 73 now = jiffies; 75 74 /* Are we close to a lease timeout? */ 76 - if (time_after(now, last + lease/3)) { 75 + if (time_after(now, last + lease/3)) 76 + renew_flags |= NFS4_RENEW_TIMEOUT; 77 + if (nfs_delegations_present(clp)) 78 + renew_flags |= NFS4_RENEW_DELEGATION_CB; 79 + 80 + if (renew_flags != 0) { 77 81 cred = ops->get_state_renewal_cred_locked(clp); 78 82 spin_unlock(&clp->cl_lock); 79 83 if (cred == NULL) { 80 - if (!nfs_delegations_present(clp)) { 84 + if (!(renew_flags & NFS4_RENEW_DELEGATION_CB)) { 81 85 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); 82 86 goto out; 83 87 } 84 88 nfs_expire_all_delegations(clp); 85 89 } else { 86 90 /* Queue an asynchronous RENEW. */ 87 - ops->sched_state_renewal(clp, cred); 91 + ops->sched_state_renewal(clp, cred, renew_flags); 88 92 put_rpccred(cred); 89 93 goto out_exp; 90 94 }
+6
fs/nfs/nfs4state.c
··· 1038 1038 nfs4_schedule_state_manager(clp); 1039 1039 } 1040 1040 1041 + void nfs4_schedule_path_down_recovery(struct nfs_client *clp) 1042 + { 1043 + nfs_handle_cb_pathdown(clp); 1044 + nfs4_schedule_state_manager(clp); 1045 + } 1046 + 1041 1047 static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state) 1042 1048 { 1043 1049
+20 -3
fs/nfs/super.c
··· 2035 2035 sb->s_blocksize = nfs_block_bits(server->wsize, 2036 2036 &sb->s_blocksize_bits); 2037 2037 2038 - if (server->flags & NFS_MOUNT_NOAC) 2039 - sb->s_flags |= MS_SYNCHRONOUS; 2040 - 2041 2038 sb->s_bdi = &server->backing_dev_info; 2042 2039 2043 2040 nfs_super_set_maxbytes(sb, server->maxfilesize); ··· 2246 2249 if (server->flags & NFS_MOUNT_UNSHARED) 2247 2250 compare_super = NULL; 2248 2251 2252 + /* -o noac implies -o sync */ 2253 + if (server->flags & NFS_MOUNT_NOAC) 2254 + sb_mntdata.mntflags |= MS_SYNCHRONOUS; 2255 + 2249 2256 /* Get a superblock - note that we may end up sharing one that already exists */ 2250 2257 s = sget(fs_type, compare_super, nfs_set_super, &sb_mntdata); 2251 2258 if (IS_ERR(s)) { ··· 2361 2360 2362 2361 if (server->flags & NFS_MOUNT_UNSHARED) 2363 2362 compare_super = NULL; 2363 + 2364 + /* -o noac implies -o sync */ 2365 + if (server->flags & NFS_MOUNT_NOAC) 2366 + sb_mntdata.mntflags |= MS_SYNCHRONOUS; 2364 2367 2365 2368 /* Get a superblock - note that we may end up sharing one that already exists */ 2366 2369 s = sget(&nfs_fs_type, compare_super, nfs_set_super, &sb_mntdata); ··· 2632 2627 2633 2628 if (server->flags & NFS4_MOUNT_UNSHARED) 2634 2629 compare_super = NULL; 2630 + 2631 + /* -o noac implies -o sync */ 2632 + if (server->flags & NFS_MOUNT_NOAC) 2633 + sb_mntdata.mntflags |= MS_SYNCHRONOUS; 2635 2634 2636 2635 /* Get a superblock - note that we may end up sharing one that already exists */ 2637 2636 s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata); ··· 2925 2916 if (server->flags & NFS4_MOUNT_UNSHARED) 2926 2917 compare_super = NULL; 2927 2918 2919 + /* -o noac implies -o sync */ 2920 + if (server->flags & NFS_MOUNT_NOAC) 2921 + sb_mntdata.mntflags |= MS_SYNCHRONOUS; 2922 + 2928 2923 /* Get a superblock - note that we may end up sharing one that already exists */ 2929 2924 s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata); 2930 2925 if (IS_ERR(s)) { ··· 3015 3002 3016 3003 if (server->flags & NFS4_MOUNT_UNSHARED) 3017 3004 compare_super = NULL; 3005 + 3006 + /* -o noac implies -o sync */ 3007 + if (server->flags & NFS_MOUNT_NOAC) 3008 + sb_mntdata.mntflags |= MS_SYNCHRONOUS; 3018 3009 3019 3010 /* Get a superblock - note that we may end up sharing one that already exists */ 3020 3011 s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
+1 -1
fs/nfs/write.c
··· 958 958 if (!data) 959 959 goto out_bad; 960 960 data->pagevec[0] = page; 961 - nfs_write_rpcsetup(req, data, wsize, offset, desc->pg_ioflags); 961 + nfs_write_rpcsetup(req, data, len, offset, desc->pg_ioflags); 962 962 list_add(&data->list, res); 963 963 requests++; 964 964 nbytes -= len;
+3 -3
fs/ubifs/debug.h
··· 335 335 #define DBGKEY(key) ((char *)(key)) 336 336 #define DBGKEY1(key) ((char *)(key)) 337 337 338 - #define ubifs_dbg_msg(fmt, ...) do { \ 339 - if (0) \ 340 - pr_debug(fmt "\n", ##__VA_ARGS__); \ 338 + #define ubifs_dbg_msg(fmt, ...) do { \ 339 + if (0) \ 340 + printk(KERN_DEBUG fmt "\n", ##__VA_ARGS__); \ 341 341 } while (0) 342 342 343 343 #define dbg_dump_stack()
+2 -1
fs/xfs/xfs_aops.c
··· 1300 1300 bool is_async) 1301 1301 { 1302 1302 struct xfs_ioend *ioend = iocb->private; 1303 + struct inode *inode = ioend->io_inode; 1303 1304 1304 1305 /* 1305 1306 * blockdev_direct_IO can return an error even after the I/O ··· 1332 1331 } 1333 1332 1334 1333 /* XXX: probably should move into the real I/O completion handler */ 1335 - inode_dio_done(ioend->io_inode); 1334 + inode_dio_done(inode); 1336 1335 } 1337 1336 1338 1337 STATIC ssize_t
+11 -3
fs/xfs/xfs_iops.c
··· 70 70 } 71 71 72 72 /* 73 - * If the linux inode is valid, mark it dirty. 74 - * Used when committing a dirty inode into a transaction so that 75 - * the inode will get written back by the linux code 73 + * If the linux inode is valid, mark it dirty, else mark the dirty state 74 + * in the XFS inode to make sure we pick it up when reclaiming the inode. 76 75 */ 77 76 void 78 77 xfs_mark_inode_dirty_sync( ··· 81 82 82 83 if (!(inode->i_state & (I_WILL_FREE|I_FREEING))) 83 84 mark_inode_dirty_sync(inode); 85 + else { 86 + barrier(); 87 + ip->i_update_core = 1; 88 + } 84 89 } 85 90 86 91 void ··· 95 92 96 93 if (!(inode->i_state & (I_WILL_FREE|I_FREEING))) 97 94 mark_inode_dirty(inode); 95 + else { 96 + barrier(); 97 + ip->i_update_core = 1; 98 + } 99 + 98 100 } 99 101 100 102 /*
+11 -25
fs/xfs/xfs_super.c
··· 356 356 mp->m_flags |= XFS_MOUNT_DELAYLOG; 357 357 } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) { 358 358 mp->m_flags &= ~XFS_MOUNT_DELAYLOG; 359 + xfs_warn(mp, 360 + "nodelaylog is deprecated and will be removed in Linux 3.3"); 359 361 } else if (!strcmp(this_char, MNTOPT_DISCARD)) { 360 362 mp->m_flags |= XFS_MOUNT_DISCARD; 361 363 } else if (!strcmp(this_char, MNTOPT_NODISCARD)) { ··· 879 877 struct xfs_trans *tp; 880 878 int error; 881 879 882 - xfs_iunlock(ip, XFS_ILOCK_SHARED); 883 880 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); 884 881 error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0); 885 - 886 882 if (error) { 887 883 xfs_trans_cancel(tp, 0); 888 - /* we need to return with the lock hold shared */ 889 - xfs_ilock(ip, XFS_ILOCK_SHARED); 890 884 return error; 891 885 } 892 886 893 887 xfs_ilock(ip, XFS_ILOCK_EXCL); 894 - 895 - /* 896 - * Note - it's possible that we might have pushed ourselves out of the 897 - * way during trans_reserve which would flush the inode. But there's 898 - * no guarantee that the inode buffer has actually gone out yet (it's 899 - * delwri). Plus the buffer could be pinned anyway if it's part of 900 - * an inode in another recent transaction. So we play it safe and 901 - * fire off the transaction anyway. 902 - */ 903 - xfs_trans_ijoin(tp, ip); 888 + xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL); 904 889 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 905 - error = xfs_trans_commit(tp, 0); 906 - xfs_ilock_demote(ip, XFS_ILOCK_EXCL); 907 - 908 - return error; 890 + return xfs_trans_commit(tp, 0); 909 891 } 910 892 911 893 STATIC int ··· 904 918 trace_xfs_write_inode(ip); 905 919 906 920 if (XFS_FORCED_SHUTDOWN(mp)) 907 - return XFS_ERROR(EIO); 921 + return -XFS_ERROR(EIO); 922 + if (!ip->i_update_core) 923 + return 0; 908 924 909 925 if (wbc->sync_mode == WB_SYNC_ALL) { 910 926 /* ··· 917 929 * of synchronous log foces dramatically. 918 930 */ 919 931 xfs_ioend_wait(ip); 920 - xfs_ilock(ip, XFS_ILOCK_SHARED); 921 - if (ip->i_update_core) { 922 - error = xfs_log_inode(ip); 923 - if (error) 924 - goto out_unlock; 925 - } 932 + error = xfs_log_inode(ip); 933 + if (error) 934 + goto out; 935 + return 0; 926 936 } else { 927 937 /* 928 938 * We make this non-blocking if the inode is contended, return
+5 -10
include/linux/basic_mmio_gpio.h
··· 63 63 return container_of(gc, struct bgpio_chip, gc); 64 64 } 65 65 66 - int __devexit bgpio_remove(struct bgpio_chip *bgc); 67 - int __devinit bgpio_init(struct bgpio_chip *bgc, 68 - struct device *dev, 69 - unsigned long sz, 70 - void __iomem *dat, 71 - void __iomem *set, 72 - void __iomem *clr, 73 - void __iomem *dirout, 74 - void __iomem *dirin, 75 - bool big_endian); 66 + int bgpio_remove(struct bgpio_chip *bgc); 67 + int bgpio_init(struct bgpio_chip *bgc, struct device *dev, 68 + unsigned long sz, void __iomem *dat, void __iomem *set, 69 + void __iomem *clr, void __iomem *dirout, void __iomem *dirin, 70 + bool big_endian); 76 71 77 72 #endif /* __BASIC_MMIO_GPIO_H */
-19
include/linux/memcontrol.h
··· 39 39 struct mem_cgroup *mem_cont, 40 40 int active, int file); 41 41 42 - struct memcg_scanrecord { 43 - struct mem_cgroup *mem; /* scanend memory cgroup */ 44 - struct mem_cgroup *root; /* scan target hierarchy root */ 45 - int context; /* scanning context (see memcontrol.c) */ 46 - unsigned long nr_scanned[2]; /* the number of scanned pages */ 47 - unsigned long nr_rotated[2]; /* the number of rotated pages */ 48 - unsigned long nr_freed[2]; /* the number of freed pages */ 49 - unsigned long elapsed; /* nsec of time elapsed while scanning */ 50 - }; 51 - 52 42 #ifdef CONFIG_CGROUP_MEM_RES_CTLR 53 43 /* 54 44 * All "charge" functions with gfp_mask should use GFP_KERNEL or ··· 116 126 mem_cgroup_get_reclaim_stat_from_page(struct page *page); 117 127 extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, 118 128 struct task_struct *p); 119 - 120 - extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, 121 - gfp_t gfp_mask, bool noswap, 122 - struct memcg_scanrecord *rec); 123 - extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, 124 - gfp_t gfp_mask, bool noswap, 125 - struct zone *zone, 126 - struct memcg_scanrecord *rec, 127 - unsigned long *nr_scanned); 128 129 129 130 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 130 131 extern int do_swap_account;
+1 -1
include/linux/mfd/wm8994/pdata.h
··· 26 26 struct regulator_init_data *init_data; 27 27 }; 28 28 29 - #define WM8994_CONFIGURE_GPIO 0x8000 29 + #define WM8994_CONFIGURE_GPIO 0x10000 30 30 31 31 #define WM8994_DRC_REGS 5 32 32 #define WM8994_EQ_REGS 20
+15 -9
include/linux/perf_event.h
··· 944 944 945 945 extern int perf_num_counters(void); 946 946 extern const char *perf_pmu_name(void); 947 - extern void __perf_event_task_sched_in(struct task_struct *task); 948 - extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); 947 + extern void __perf_event_task_sched_in(struct task_struct *prev, 948 + struct task_struct *task); 949 + extern void __perf_event_task_sched_out(struct task_struct *prev, 950 + struct task_struct *next); 949 951 extern int perf_event_init_task(struct task_struct *child); 950 952 extern void perf_event_exit_task(struct task_struct *child); 951 953 extern void perf_event_free_task(struct task_struct *task); ··· 1061 1059 1062 1060 extern struct jump_label_key perf_sched_events; 1063 1061 1064 - static inline void perf_event_task_sched_in(struct task_struct *task) 1062 + static inline void perf_event_task_sched_in(struct task_struct *prev, 1063 + struct task_struct *task) 1065 1064 { 1066 1065 if (static_branch(&perf_sched_events)) 1067 - __perf_event_task_sched_in(task); 1066 + __perf_event_task_sched_in(prev, task); 1068 1067 } 1069 1068 1070 - static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) 1069 + static inline void perf_event_task_sched_out(struct task_struct *prev, 1070 + struct task_struct *next) 1071 1071 { 1072 1072 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); 1073 1073 1074 - __perf_event_task_sched_out(task, next); 1074 + if (static_branch(&perf_sched_events)) 1075 + __perf_event_task_sched_out(prev, next); 1075 1076 } 1076 1077 1077 1078 extern void perf_event_mmap(struct vm_area_struct *vma); ··· 1144 1139 extern void perf_event_task_tick(void); 1145 1140 #else 1146 1141 static inline void 1147 - perf_event_task_sched_in(struct task_struct *task) { } 1142 + perf_event_task_sched_in(struct task_struct *prev, 1143 + struct task_struct *task) { } 1148 1144 static inline void 1149 - perf_event_task_sched_out(struct task_struct *task, 1150 - struct task_struct *next) { } 1145 + perf_event_task_sched_out(struct task_struct *prev, 1146 + struct task_struct *next) { } 1151 1147 static inline int perf_event_init_task(struct task_struct *child) { return 0; } 1152 1148 static inline void perf_event_exit_task(struct task_struct *child) { } 1153 1149 static inline void perf_event_free_task(struct task_struct *task) { }
+1 -1
include/linux/regulator/consumer.h
··· 123 123 const char *supply; 124 124 struct regulator *consumer; 125 125 126 - /* Internal use */ 126 + /* private: Internal use */ 127 127 int ret; 128 128 }; 129 129
+1
include/linux/skbuff.h
··· 524 524 extern bool skb_recycle_check(struct sk_buff *skb, int skb_size); 525 525 526 526 extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); 527 + extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); 527 528 extern struct sk_buff *skb_clone(struct sk_buff *skb, 528 529 gfp_t priority); 529 530 extern struct sk_buff *skb_copy(const struct sk_buff *skb,
+2
include/linux/snmp.h
··· 231 231 LINUX_MIB_TCPDEFERACCEPTDROP, 232 232 LINUX_MIB_IPRPFILTER, /* IP Reverse Path Filter (rp_filter) */ 233 233 LINUX_MIB_TCPTIMEWAITOVERFLOW, /* TCPTimeWaitOverflow */ 234 + LINUX_MIB_TCPREQQFULLDOCOOKIES, /* TCPReqQFullDoCookies */ 235 + LINUX_MIB_TCPREQQFULLDROP, /* TCPReqQFullDrop */ 234 236 __LINUX_MIB_MAX 235 237 }; 236 238
+6
include/linux/swap.h
··· 252 252 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 253 253 gfp_t gfp_mask, nodemask_t *mask); 254 254 extern int __isolate_lru_page(struct page *page, int mode, int file); 255 + extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, 256 + gfp_t gfp_mask, bool noswap); 257 + extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, 258 + gfp_t gfp_mask, bool noswap, 259 + struct zone *zone, 260 + unsigned long *nr_scanned); 255 261 extern unsigned long shrink_all_memory(unsigned long nr_pages); 256 262 extern int vm_swappiness; 257 263 extern int remove_mapping(struct address_space *mapping, struct page *page);
+29
include/net/9p/9p.h
··· 288 288 P9_DMSETVTX = 0x00010000, 289 289 }; 290 290 291 + /* 9p2000.L open flags */ 292 + #define P9_DOTL_RDONLY 00000000 293 + #define P9_DOTL_WRONLY 00000001 294 + #define P9_DOTL_RDWR 00000002 295 + #define P9_DOTL_NOACCESS 00000003 296 + #define P9_DOTL_CREATE 00000100 297 + #define P9_DOTL_EXCL 00000200 298 + #define P9_DOTL_NOCTTY 00000400 299 + #define P9_DOTL_TRUNC 00001000 300 + #define P9_DOTL_APPEND 00002000 301 + #define P9_DOTL_NONBLOCK 00004000 302 + #define P9_DOTL_DSYNC 00010000 303 + #define P9_DOTL_FASYNC 00020000 304 + #define P9_DOTL_DIRECT 00040000 305 + #define P9_DOTL_LARGEFILE 00100000 306 + #define P9_DOTL_DIRECTORY 00200000 307 + #define P9_DOTL_NOFOLLOW 00400000 308 + #define P9_DOTL_NOATIME 01000000 309 + #define P9_DOTL_CLOEXEC 02000000 310 + #define P9_DOTL_SYNC 04000000 311 + 312 + /* 9p2000.L at flags */ 313 + #define P9_DOTL_AT_REMOVEDIR 0x200 314 + 315 + /* 9p2000.L lock type */ 316 + #define P9_LOCK_TYPE_RDLCK 0 317 + #define P9_LOCK_TYPE_WRLCK 1 318 + #define P9_LOCK_TYPE_UNLCK 2 319 + 291 320 /** 292 321 * enum p9_qid_t - QID types 293 322 * @P9_QTDIR: directory
+2
include/net/cfg80211.h
··· 1744 1744 * by default for perm_addr. In this case, the mask should be set to 1745 1745 * all-zeroes. In this case it is assumed that the device can handle 1746 1746 * the same number of arbitrary MAC addresses. 1747 + * @registered: protects ->resume and ->suspend sysfs callbacks against 1748 + * unregister hardware 1747 1749 * @debugfsdir: debugfs directory used for this wiphy, will be renamed 1748 1750 * automatically on wiphy renames 1749 1751 * @dev: (virtual) struct device for this wiphy
+22 -3
include/net/flow.h
··· 7 7 #ifndef _NET_FLOW_H 8 8 #define _NET_FLOW_H 9 9 10 + #include <linux/socket.h> 10 11 #include <linux/in6.h> 11 12 #include <linux/atomic.h> 12 13 ··· 69 68 #define fl4_ipsec_spi uli.spi 70 69 #define fl4_mh_type uli.mht.type 71 70 #define fl4_gre_key uli.gre_key 72 - }; 71 + } __attribute__((__aligned__(BITS_PER_LONG/8))); 73 72 74 73 static inline void flowi4_init_output(struct flowi4 *fl4, int oif, 75 74 __u32 mark, __u8 tos, __u8 scope, ··· 113 112 #define fl6_ipsec_spi uli.spi 114 113 #define fl6_mh_type uli.mht.type 115 114 #define fl6_gre_key uli.gre_key 116 - }; 115 + } __attribute__((__aligned__(BITS_PER_LONG/8))); 117 116 118 117 struct flowidn { 119 118 struct flowi_common __fl_common; ··· 128 127 union flowi_uli uli; 129 128 #define fld_sport uli.ports.sport 130 129 #define fld_dport uli.ports.dport 131 - }; 130 + } __attribute__((__aligned__(BITS_PER_LONG/8))); 132 131 133 132 struct flowi { 134 133 union { ··· 160 159 static inline struct flowi *flowidn_to_flowi(struct flowidn *fldn) 161 160 { 162 161 return container_of(fldn, struct flowi, u.dn); 162 + } 163 + 164 + typedef unsigned long flow_compare_t; 165 + 166 + static inline size_t flow_key_size(u16 family) 167 + { 168 + switch (family) { 169 + case AF_INET: 170 + BUILD_BUG_ON(sizeof(struct flowi4) % sizeof(flow_compare_t)); 171 + return sizeof(struct flowi4) / sizeof(flow_compare_t); 172 + case AF_INET6: 173 + BUILD_BUG_ON(sizeof(struct flowi6) % sizeof(flow_compare_t)); 174 + return sizeof(struct flowi6) / sizeof(flow_compare_t); 175 + case AF_DECnet: 176 + BUILD_BUG_ON(sizeof(struct flowidn) % sizeof(flow_compare_t)); 177 + return sizeof(struct flowidn) / sizeof(flow_compare_t); 178 + } 179 + return 0; 163 180 } 164 181 165 182 #define FLOW_DIR_IN 0
+2 -1
include/net/request_sock.h
··· 96 96 */ 97 97 struct listen_sock { 98 98 u8 max_qlen_log; 99 - /* 3 bytes hole, try to use */ 99 + u8 synflood_warned; 100 + /* 2 bytes hole, try to use */ 100 101 int qlen; 101 102 int qlen_young; 102 103 int clock_hand;
+1
include/net/sctp/command.h
··· 109 109 SCTP_CMD_SEND_MSG, /* Send the whole use message */ 110 110 SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */ 111 111 SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/ 112 + SCTP_CMD_SET_ASOC, /* Restore association context */ 112 113 SCTP_CMD_LAST 113 114 } sctp_verb_t; 114 115
+21 -1
include/net/tcp.h
··· 431 431 extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS]; 432 432 extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, 433 433 struct ip_options *opt); 434 + #ifdef CONFIG_SYN_COOKIES 434 435 extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, 435 436 __u16 *mss); 437 + #else 438 + static inline __u32 cookie_v4_init_sequence(struct sock *sk, 439 + struct sk_buff *skb, 440 + __u16 *mss) 441 + { 442 + return 0; 443 + } 444 + #endif 436 445 437 446 extern __u32 cookie_init_timestamp(struct request_sock *req); 438 447 extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *); 439 448 440 449 /* From net/ipv6/syncookies.c */ 441 450 extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb); 451 + #ifdef CONFIG_SYN_COOKIES 442 452 extern __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, 443 453 __u16 *mss); 444 - 454 + #else 455 + static inline __u32 cookie_v6_init_sequence(struct sock *sk, 456 + struct sk_buff *skb, 457 + __u16 *mss) 458 + { 459 + return 0; 460 + } 461 + #endif 445 462 /* tcp_output.c */ 446 463 447 464 extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, ··· 477 460 extern void tcp_send_fin(struct sock *sk); 478 461 extern void tcp_send_active_reset(struct sock *sk, gfp_t priority); 479 462 extern int tcp_send_synack(struct sock *); 463 + extern int tcp_syn_flood_action(struct sock *sk, 464 + const struct sk_buff *skb, 465 + const char *proto); 480 466 extern void tcp_push_one(struct sock *, unsigned int mss_now); 481 467 extern void tcp_send_ack(struct sock *sk); 482 468 extern void tcp_send_delayed_ack(struct sock *sk);
+1
include/net/transp_v6.h
··· 39 39 struct sk_buff *skb); 40 40 41 41 extern int datagram_send_ctl(struct net *net, 42 + struct sock *sk, 42 43 struct msghdr *msg, 43 44 struct flowi6 *fl6, 44 45 struct ipv6_txoptions *opt,
+55 -12
kernel/events/core.c
··· 399 399 local_irq_restore(flags); 400 400 } 401 401 402 - static inline void perf_cgroup_sched_out(struct task_struct *task) 402 + static inline void perf_cgroup_sched_out(struct task_struct *task, 403 + struct task_struct *next) 403 404 { 404 - perf_cgroup_switch(task, PERF_CGROUP_SWOUT); 405 + struct perf_cgroup *cgrp1; 406 + struct perf_cgroup *cgrp2 = NULL; 407 + 408 + /* 409 + * we come here when we know perf_cgroup_events > 0 410 + */ 411 + cgrp1 = perf_cgroup_from_task(task); 412 + 413 + /* 414 + * next is NULL when called from perf_event_enable_on_exec() 415 + * that will systematically cause a cgroup_switch() 416 + */ 417 + if (next) 418 + cgrp2 = perf_cgroup_from_task(next); 419 + 420 + /* 421 + * only schedule out current cgroup events if we know 422 + * that we are switching to a different cgroup. Otherwise, 423 + * do no touch the cgroup events. 424 + */ 425 + if (cgrp1 != cgrp2) 426 + perf_cgroup_switch(task, PERF_CGROUP_SWOUT); 405 427 } 406 428 407 - static inline void perf_cgroup_sched_in(struct task_struct *task) 429 + static inline void perf_cgroup_sched_in(struct task_struct *prev, 430 + struct task_struct *task) 408 431 { 409 - perf_cgroup_switch(task, PERF_CGROUP_SWIN); 432 + struct perf_cgroup *cgrp1; 433 + struct perf_cgroup *cgrp2 = NULL; 434 + 435 + /* 436 + * we come here when we know perf_cgroup_events > 0 437 + */ 438 + cgrp1 = perf_cgroup_from_task(task); 439 + 440 + /* prev can never be NULL */ 441 + cgrp2 = perf_cgroup_from_task(prev); 442 + 443 + /* 444 + * only need to schedule in cgroup events if we are changing 445 + * cgroup during ctxsw. Cgroup events were not scheduled 446 + * out of ctxsw out if that was not the case. 447 + */ 448 + if (cgrp1 != cgrp2) 449 + perf_cgroup_switch(task, PERF_CGROUP_SWIN); 410 450 } 411 451 412 452 static inline int perf_cgroup_connect(int fd, struct perf_event *event, ··· 558 518 { 559 519 } 560 520 561 - static inline void perf_cgroup_sched_out(struct task_struct *task) 521 + static inline void perf_cgroup_sched_out(struct task_struct *task, 522 + struct task_struct *next) 562 523 { 563 524 } 564 525 565 - static inline void perf_cgroup_sched_in(struct task_struct *task) 526 + static inline void perf_cgroup_sched_in(struct task_struct *prev, 527 + struct task_struct *task) 566 528 { 567 529 } 568 530 ··· 2030 1988 * cgroup event are system-wide mode only 2031 1989 */ 2032 1990 if (atomic_read(&__get_cpu_var(perf_cgroup_events))) 2033 - perf_cgroup_sched_out(task); 1991 + perf_cgroup_sched_out(task, next); 2034 1992 } 2035 1993 2036 1994 static void task_ctx_sched_out(struct perf_event_context *ctx) ··· 2195 2153 * accessing the event control register. If a NMI hits, then it will 2196 2154 * keep the event running. 2197 2155 */ 2198 - void __perf_event_task_sched_in(struct task_struct *task) 2156 + void __perf_event_task_sched_in(struct task_struct *prev, 2157 + struct task_struct *task) 2199 2158 { 2200 2159 struct perf_event_context *ctx; 2201 2160 int ctxn; ··· 2214 2171 * cgroup event are system-wide mode only 2215 2172 */ 2216 2173 if (atomic_read(&__get_cpu_var(perf_cgroup_events))) 2217 - perf_cgroup_sched_in(task); 2174 + perf_cgroup_sched_in(prev, task); 2218 2175 } 2219 2176 2220 2177 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) ··· 2470 2427 * ctxswin cgroup events which are already scheduled 2471 2428 * in. 2472 2429 */ 2473 - perf_cgroup_sched_out(current); 2430 + perf_cgroup_sched_out(current, NULL); 2474 2431 2475 2432 raw_spin_lock(&ctx->lock); 2476 2433 task_ctx_sched_out(ctx); ··· 3396 3353 } 3397 3354 3398 3355 static void calc_timer_values(struct perf_event *event, 3399 - u64 *running, 3400 - u64 *enabled) 3356 + u64 *enabled, 3357 + u64 *running) 3401 3358 { 3402 3359 u64 now, ctx_time; 3403 3360
+1 -1
kernel/irq/chip.c
··· 178 178 desc->depth = 1; 179 179 if (desc->irq_data.chip->irq_shutdown) 180 180 desc->irq_data.chip->irq_shutdown(&desc->irq_data); 181 - if (desc->irq_data.chip->irq_disable) 181 + else if (desc->irq_data.chip->irq_disable) 182 182 desc->irq_data.chip->irq_disable(&desc->irq_data); 183 183 else 184 184 desc->irq_data.chip->irq_mask(&desc->irq_data);
+27 -16
kernel/sched.c
··· 3065 3065 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 3066 3066 local_irq_disable(); 3067 3067 #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ 3068 - perf_event_task_sched_in(current); 3068 + perf_event_task_sched_in(prev, current); 3069 3069 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 3070 3070 local_irq_enable(); 3071 3071 #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ ··· 4279 4279 } 4280 4280 4281 4281 /* 4282 - * schedule() is the main scheduler function. 4282 + * __schedule() is the main scheduler function. 4283 4283 */ 4284 - asmlinkage void __sched schedule(void) 4284 + static void __sched __schedule(void) 4285 4285 { 4286 4286 struct task_struct *prev, *next; 4287 4287 unsigned long *switch_count; ··· 4322 4322 if (to_wakeup) 4323 4323 try_to_wake_up_local(to_wakeup); 4324 4324 } 4325 - 4326 - /* 4327 - * If we are going to sleep and we have plugged IO 4328 - * queued, make sure to submit it to avoid deadlocks. 4329 - */ 4330 - if (blk_needs_flush_plug(prev)) { 4331 - raw_spin_unlock(&rq->lock); 4332 - blk_schedule_flush_plug(prev); 4333 - raw_spin_lock(&rq->lock); 4334 - } 4335 4325 } 4336 4326 switch_count = &prev->nvcsw; 4337 4327 } ··· 4358 4368 preempt_enable_no_resched(); 4359 4369 if (need_resched()) 4360 4370 goto need_resched; 4371 + } 4372 + 4373 + static inline void sched_submit_work(struct task_struct *tsk) 4374 + { 4375 + if (!tsk->state) 4376 + return; 4377 + /* 4378 + * If we are going to sleep and we have plugged IO queued, 4379 + * make sure to submit it to avoid deadlocks. 4380 + */ 4381 + if (blk_needs_flush_plug(tsk)) 4382 + blk_schedule_flush_plug(tsk); 4383 + } 4384 + 4385 + asmlinkage void schedule(void) 4386 + { 4387 + struct task_struct *tsk = current; 4388 + 4389 + sched_submit_work(tsk); 4390 + __schedule(); 4361 4391 } 4362 4392 EXPORT_SYMBOL(schedule); 4363 4393 ··· 4445 4435 4446 4436 do { 4447 4437 add_preempt_count_notrace(PREEMPT_ACTIVE); 4448 - schedule(); 4438 + __schedule(); 4449 4439 sub_preempt_count_notrace(PREEMPT_ACTIVE); 4450 4440 4451 4441 /* ··· 4473 4463 do { 4474 4464 add_preempt_count(PREEMPT_ACTIVE); 4475 4465 local_irq_enable(); 4476 - schedule(); 4466 + __schedule(); 4477 4467 local_irq_disable(); 4478 4468 sub_preempt_count(PREEMPT_ACTIVE); 4479 4469 ··· 5598 5588 static void __cond_resched(void) 5599 5589 { 5600 5590 add_preempt_count(PREEMPT_ACTIVE); 5601 - schedule(); 5591 + __schedule(); 5602 5592 sub_preempt_count(PREEMPT_ACTIVE); 5603 5593 } 5604 5594 ··· 7453 7443 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j); 7454 7444 if (sd && (sd->flags & SD_OVERLAP)) 7455 7445 free_sched_groups(sd->groups, 0); 7446 + kfree(*per_cpu_ptr(sdd->sd, j)); 7456 7447 kfree(*per_cpu_ptr(sdd->sg, j)); 7457 7448 kfree(*per_cpu_ptr(sdd->sgp, j)); 7458 7449 }
+1
kernel/taskstats.c
··· 655 655 .cmd = TASKSTATS_CMD_GET, 656 656 .doit = taskstats_user_cmd, 657 657 .policy = taskstats_cmd_get_policy, 658 + .flags = GENL_ADMIN_PERM, 658 659 }; 659 660 660 661 static struct genl_ops cgroupstats_ops = {
+13 -5
kernel/time/alarmtimer.c
··· 441 441 static void alarm_timer_get(struct k_itimer *timr, 442 442 struct itimerspec *cur_setting) 443 443 { 444 + memset(cur_setting, 0, sizeof(struct itimerspec)); 445 + 444 446 cur_setting->it_interval = 445 447 ktime_to_timespec(timr->it.alarmtimer.period); 446 448 cur_setting->it_value = ··· 481 479 if (!rtcdev) 482 480 return -ENOTSUPP; 483 481 484 - /* Save old values */ 485 - old_setting->it_interval = 486 - ktime_to_timespec(timr->it.alarmtimer.period); 487 - old_setting->it_value = 488 - ktime_to_timespec(timr->it.alarmtimer.node.expires); 482 + /* 483 + * XXX HACK! Currently we can DOS a system if the interval 484 + * period on alarmtimers is too small. Cap the interval here 485 + * to 100us and solve this properly in a future patch! -jstultz 486 + */ 487 + if ((new_setting->it_interval.tv_sec == 0) && 488 + (new_setting->it_interval.tv_nsec < 100000)) 489 + new_setting->it_interval.tv_nsec = 100000; 490 + 491 + if (old_setting) 492 + alarm_timer_get(timr, old_setting); 489 493 490 494 /* If the timer was already set, cancel it */ 491 495 alarm_cancel(&timr->it.alarmtimer);
+8 -7
kernel/tsacct.c
··· 78 78 79 79 #define KB 1024 80 80 #define MB (1024*KB) 81 + #define KB_MASK (~(KB-1)) 81 82 /* 82 83 * fill in extended accounting fields 83 84 */ ··· 96 95 stats->hiwater_vm = get_mm_hiwater_vm(mm) * PAGE_SIZE / KB; 97 96 mmput(mm); 98 97 } 99 - stats->read_char = p->ioac.rchar; 100 - stats->write_char = p->ioac.wchar; 101 - stats->read_syscalls = p->ioac.syscr; 102 - stats->write_syscalls = p->ioac.syscw; 98 + stats->read_char = p->ioac.rchar & KB_MASK; 99 + stats->write_char = p->ioac.wchar & KB_MASK; 100 + stats->read_syscalls = p->ioac.syscr & KB_MASK; 101 + stats->write_syscalls = p->ioac.syscw & KB_MASK; 103 102 #ifdef CONFIG_TASK_IO_ACCOUNTING 104 - stats->read_bytes = p->ioac.read_bytes; 105 - stats->write_bytes = p->ioac.write_bytes; 106 - stats->cancelled_write_bytes = p->ioac.cancelled_write_bytes; 103 + stats->read_bytes = p->ioac.read_bytes & KB_MASK; 104 + stats->write_bytes = p->ioac.write_bytes & KB_MASK; 105 + stats->cancelled_write_bytes = p->ioac.cancelled_write_bytes & KB_MASK; 107 106 #else 108 107 stats->read_bytes = 0; 109 108 stats->write_bytes = 0;
+6 -1
kernel/workqueue.c
··· 2412 2412 2413 2413 for_each_cwq_cpu(cpu, wq) { 2414 2414 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 2415 + bool drained; 2415 2416 2416 - if (!cwq->nr_active && list_empty(&cwq->delayed_works)) 2417 + spin_lock_irq(&cwq->gcwq->lock); 2418 + drained = !cwq->nr_active && list_empty(&cwq->delayed_works); 2419 + spin_unlock_irq(&cwq->gcwq->lock); 2420 + 2421 + if (drained) 2417 2422 continue; 2418 2423 2419 2424 if (++flush_cnt == 10 ||
+1
lib/sha1.c
··· 8 8 #include <linux/kernel.h> 9 9 #include <linux/module.h> 10 10 #include <linux/bitops.h> 11 + #include <linux/cryptohash.h> 11 12 #include <asm/unaligned.h> 12 13 13 14 /*
+4 -2
mm/filemap.c
··· 827 827 { 828 828 unsigned int i; 829 829 unsigned int ret; 830 - unsigned int nr_found; 830 + unsigned int nr_found, nr_skip; 831 831 832 832 rcu_read_lock(); 833 833 restart: 834 834 nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree, 835 835 (void ***)pages, NULL, start, nr_pages); 836 836 ret = 0; 837 + nr_skip = 0; 837 838 for (i = 0; i < nr_found; i++) { 838 839 struct page *page; 839 840 repeat: ··· 857 856 * here as an exceptional entry: so skip over it - 858 857 * we only reach this from invalidate_mapping_pages(). 859 858 */ 859 + nr_skip++; 860 860 continue; 861 861 } 862 862 ··· 878 876 * If all entries were removed before we could secure them, 879 877 * try again, because callers stop trying once 0 is returned. 880 878 */ 881 - if (unlikely(!ret && nr_found)) 879 + if (unlikely(!ret && nr_found > nr_skip)) 882 880 goto restart; 883 881 rcu_read_unlock(); 884 882 return ret;
+6 -166
mm/memcontrol.c
··· 204 204 static void mem_cgroup_threshold(struct mem_cgroup *mem); 205 205 static void mem_cgroup_oom_notify(struct mem_cgroup *mem); 206 206 207 - enum { 208 - SCAN_BY_LIMIT, 209 - SCAN_BY_SYSTEM, 210 - NR_SCAN_CONTEXT, 211 - SCAN_BY_SHRINK, /* not recorded now */ 212 - }; 213 - 214 - enum { 215 - SCAN, 216 - SCAN_ANON, 217 - SCAN_FILE, 218 - ROTATE, 219 - ROTATE_ANON, 220 - ROTATE_FILE, 221 - FREED, 222 - FREED_ANON, 223 - FREED_FILE, 224 - ELAPSED, 225 - NR_SCANSTATS, 226 - }; 227 - 228 - struct scanstat { 229 - spinlock_t lock; 230 - unsigned long stats[NR_SCAN_CONTEXT][NR_SCANSTATS]; 231 - unsigned long rootstats[NR_SCAN_CONTEXT][NR_SCANSTATS]; 232 - }; 233 - 234 - const char *scanstat_string[NR_SCANSTATS] = { 235 - "scanned_pages", 236 - "scanned_anon_pages", 237 - "scanned_file_pages", 238 - "rotated_pages", 239 - "rotated_anon_pages", 240 - "rotated_file_pages", 241 - "freed_pages", 242 - "freed_anon_pages", 243 - "freed_file_pages", 244 - "elapsed_ns", 245 - }; 246 - #define SCANSTAT_WORD_LIMIT "_by_limit" 247 - #define SCANSTAT_WORD_SYSTEM "_by_system" 248 - #define SCANSTAT_WORD_HIERARCHY "_under_hierarchy" 249 - 250 - 251 207 /* 252 208 * The memory controller data structure. The memory controller controls both 253 209 * page cache and RSS per cgroup. We would eventually like to provide ··· 269 313 270 314 /* For oom notifier event fd */ 271 315 struct list_head oom_notify; 272 - /* For recording LRU-scan statistics */ 273 - struct scanstat scanstat; 316 + 274 317 /* 275 318 * Should we move charges of a task when a task is moved into this 276 319 * mem_cgroup ? And what type of charges should we move ? ··· 1633 1678 } 1634 1679 #endif 1635 1680 1636 - static void __mem_cgroup_record_scanstat(unsigned long *stats, 1637 - struct memcg_scanrecord *rec) 1638 - { 1639 - 1640 - stats[SCAN] += rec->nr_scanned[0] + rec->nr_scanned[1]; 1641 - stats[SCAN_ANON] += rec->nr_scanned[0]; 1642 - stats[SCAN_FILE] += rec->nr_scanned[1]; 1643 - 1644 - stats[ROTATE] += rec->nr_rotated[0] + rec->nr_rotated[1]; 1645 - stats[ROTATE_ANON] += rec->nr_rotated[0]; 1646 - stats[ROTATE_FILE] += rec->nr_rotated[1]; 1647 - 1648 - stats[FREED] += rec->nr_freed[0] + rec->nr_freed[1]; 1649 - stats[FREED_ANON] += rec->nr_freed[0]; 1650 - stats[FREED_FILE] += rec->nr_freed[1]; 1651 - 1652 - stats[ELAPSED] += rec->elapsed; 1653 - } 1654 - 1655 - static void mem_cgroup_record_scanstat(struct memcg_scanrecord *rec) 1656 - { 1657 - struct mem_cgroup *mem; 1658 - int context = rec->context; 1659 - 1660 - if (context >= NR_SCAN_CONTEXT) 1661 - return; 1662 - 1663 - mem = rec->mem; 1664 - spin_lock(&mem->scanstat.lock); 1665 - __mem_cgroup_record_scanstat(mem->scanstat.stats[context], rec); 1666 - spin_unlock(&mem->scanstat.lock); 1667 - 1668 - mem = rec->root; 1669 - spin_lock(&mem->scanstat.lock); 1670 - __mem_cgroup_record_scanstat(mem->scanstat.rootstats[context], rec); 1671 - spin_unlock(&mem->scanstat.lock); 1672 - } 1673 - 1674 1681 /* 1675 1682 * Scan the hierarchy if needed to reclaim memory. We remember the last child 1676 1683 * we reclaimed from, so that we don't end up penalizing one child extensively ··· 1657 1740 bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP; 1658 1741 bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK; 1659 1742 bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT; 1660 - struct memcg_scanrecord rec; 1661 1743 unsigned long excess; 1662 - unsigned long scanned; 1744 + unsigned long nr_scanned; 1663 1745 1664 1746 excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT; 1665 1747 1666 1748 /* If memsw_is_minimum==1, swap-out is of-no-use. */ 1667 1749 if (!check_soft && !shrink && root_mem->memsw_is_minimum) 1668 1750 noswap = true; 1669 - 1670 - if (shrink) 1671 - rec.context = SCAN_BY_SHRINK; 1672 - else if (check_soft) 1673 - rec.context = SCAN_BY_SYSTEM; 1674 - else 1675 - rec.context = SCAN_BY_LIMIT; 1676 - 1677 - rec.root = root_mem; 1678 1751 1679 1752 while (1) { 1680 1753 victim = mem_cgroup_select_victim(root_mem); ··· 1706 1799 css_put(&victim->css); 1707 1800 continue; 1708 1801 } 1709 - rec.mem = victim; 1710 - rec.nr_scanned[0] = 0; 1711 - rec.nr_scanned[1] = 0; 1712 - rec.nr_rotated[0] = 0; 1713 - rec.nr_rotated[1] = 0; 1714 - rec.nr_freed[0] = 0; 1715 - rec.nr_freed[1] = 0; 1716 - rec.elapsed = 0; 1717 1802 /* we use swappiness of local cgroup */ 1718 1803 if (check_soft) { 1719 1804 ret = mem_cgroup_shrink_node_zone(victim, gfp_mask, 1720 - noswap, zone, &rec, &scanned); 1721 - *total_scanned += scanned; 1805 + noswap, zone, &nr_scanned); 1806 + *total_scanned += nr_scanned; 1722 1807 } else 1723 1808 ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, 1724 - noswap, &rec); 1725 - mem_cgroup_record_scanstat(&rec); 1809 + noswap); 1726 1810 css_put(&victim->css); 1727 1811 /* 1728 1812 * At shrinking usage, we can't check we should stop here or ··· 3752 3854 /* try to free all pages in this cgroup */ 3753 3855 shrink = 1; 3754 3856 while (nr_retries && mem->res.usage > 0) { 3755 - struct memcg_scanrecord rec; 3756 3857 int progress; 3757 3858 3758 3859 if (signal_pending(current)) { 3759 3860 ret = -EINTR; 3760 3861 goto out; 3761 3862 } 3762 - rec.context = SCAN_BY_SHRINK; 3763 - rec.mem = mem; 3764 - rec.root = mem; 3765 3863 progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL, 3766 - false, &rec); 3864 + false); 3767 3865 if (!progress) { 3768 3866 nr_retries--; 3769 3867 /* maybe some writeback is necessary */ ··· 4603 4709 } 4604 4710 #endif /* CONFIG_NUMA */ 4605 4711 4606 - static int mem_cgroup_vmscan_stat_read(struct cgroup *cgrp, 4607 - struct cftype *cft, 4608 - struct cgroup_map_cb *cb) 4609 - { 4610 - struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); 4611 - char string[64]; 4612 - int i; 4613 - 4614 - for (i = 0; i < NR_SCANSTATS; i++) { 4615 - strcpy(string, scanstat_string[i]); 4616 - strcat(string, SCANSTAT_WORD_LIMIT); 4617 - cb->fill(cb, string, mem->scanstat.stats[SCAN_BY_LIMIT][i]); 4618 - } 4619 - 4620 - for (i = 0; i < NR_SCANSTATS; i++) { 4621 - strcpy(string, scanstat_string[i]); 4622 - strcat(string, SCANSTAT_WORD_SYSTEM); 4623 - cb->fill(cb, string, mem->scanstat.stats[SCAN_BY_SYSTEM][i]); 4624 - } 4625 - 4626 - for (i = 0; i < NR_SCANSTATS; i++) { 4627 - strcpy(string, scanstat_string[i]); 4628 - strcat(string, SCANSTAT_WORD_LIMIT); 4629 - strcat(string, SCANSTAT_WORD_HIERARCHY); 4630 - cb->fill(cb, string, mem->scanstat.rootstats[SCAN_BY_LIMIT][i]); 4631 - } 4632 - for (i = 0; i < NR_SCANSTATS; i++) { 4633 - strcpy(string, scanstat_string[i]); 4634 - strcat(string, SCANSTAT_WORD_SYSTEM); 4635 - strcat(string, SCANSTAT_WORD_HIERARCHY); 4636 - cb->fill(cb, string, mem->scanstat.rootstats[SCAN_BY_SYSTEM][i]); 4637 - } 4638 - return 0; 4639 - } 4640 - 4641 - static int mem_cgroup_reset_vmscan_stat(struct cgroup *cgrp, 4642 - unsigned int event) 4643 - { 4644 - struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); 4645 - 4646 - spin_lock(&mem->scanstat.lock); 4647 - memset(&mem->scanstat.stats, 0, sizeof(mem->scanstat.stats)); 4648 - memset(&mem->scanstat.rootstats, 0, sizeof(mem->scanstat.rootstats)); 4649 - spin_unlock(&mem->scanstat.lock); 4650 - return 0; 4651 - } 4652 - 4653 - 4654 4712 static struct cftype mem_cgroup_files[] = { 4655 4713 { 4656 4714 .name = "usage_in_bytes", ··· 4673 4827 .mode = S_IRUGO, 4674 4828 }, 4675 4829 #endif 4676 - { 4677 - .name = "vmscan_stat", 4678 - .read_map = mem_cgroup_vmscan_stat_read, 4679 - .trigger = mem_cgroup_reset_vmscan_stat, 4680 - }, 4681 4830 }; 4682 4831 4683 4832 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP ··· 4936 5095 atomic_set(&mem->refcnt, 1); 4937 5096 mem->move_charge_at_immigrate = 0; 4938 5097 mutex_init(&mem->thresholds_lock); 4939 - spin_lock_init(&mem->scanstat.lock); 4940 5098 return &mem->css; 4941 5099 free_out: 4942 5100 __mem_cgroup_free(mem);
+5 -4
mm/mempolicy.c
··· 636 636 struct vm_area_struct *prev; 637 637 struct vm_area_struct *vma; 638 638 int err = 0; 639 - pgoff_t pgoff; 640 639 unsigned long vmstart; 641 640 unsigned long vmend; 642 641 ··· 648 649 vmstart = max(start, vma->vm_start); 649 650 vmend = min(end, vma->vm_end); 650 651 651 - pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); 652 652 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, 653 - vma->anon_vma, vma->vm_file, pgoff, new_pol); 653 + vma->anon_vma, vma->vm_file, vma->vm_pgoff, 654 + new_pol); 654 655 if (prev) { 655 656 vma = prev; 656 657 next = vma->vm_next; ··· 1411 1412 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags); 1412 1413 1413 1414 if (!err && nmask) { 1414 - err = copy_from_user(bm, nm, alloc_size); 1415 + unsigned long copy_size; 1416 + copy_size = min_t(unsigned long, sizeof(bm), alloc_size); 1417 + err = copy_from_user(bm, nm, copy_size); 1415 1418 /* ensure entire bitmap is zeroed */ 1416 1419 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); 1417 1420 err |= compat_put_bitmap(nmask, bm, nr_bits);
+1 -1
mm/slub.c
··· 2377 2377 */ 2378 2378 if (unlikely(!prior)) { 2379 2379 remove_full(s, page); 2380 - add_partial(n, page, 0); 2380 + add_partial(n, page, 1); 2381 2381 stat(s, FREE_ADD_PARTIAL); 2382 2382 } 2383 2383 }
+8
mm/vmalloc.c
··· 2140 2140 return NULL; 2141 2141 } 2142 2142 2143 + /* 2144 + * If the allocated address space is passed to a hypercall 2145 + * before being used then we cannot rely on a page fault to 2146 + * trigger an update of the page tables. So sync all the page 2147 + * tables here. 2148 + */ 2149 + vmalloc_sync_all(); 2150 + 2143 2151 return area; 2144 2152 } 2145 2153 EXPORT_SYMBOL_GPL(alloc_vm_area);
+17 -49
mm/vmscan.c
··· 105 105 106 106 /* Which cgroup do we reclaim from */ 107 107 struct mem_cgroup *mem_cgroup; 108 - struct memcg_scanrecord *memcg_record; 109 108 110 109 /* 111 110 * Nodemask of nodes allowed by the caller. If NULL, all nodes ··· 1348 1349 int file = is_file_lru(lru); 1349 1350 int numpages = hpage_nr_pages(page); 1350 1351 reclaim_stat->recent_rotated[file] += numpages; 1351 - if (!scanning_global_lru(sc)) 1352 - sc->memcg_record->nr_rotated[file] += numpages; 1353 1352 } 1354 1353 if (!pagevec_add(&pvec, page)) { 1355 1354 spin_unlock_irq(&zone->lru_lock); ··· 1391 1394 1392 1395 reclaim_stat->recent_scanned[0] += *nr_anon; 1393 1396 reclaim_stat->recent_scanned[1] += *nr_file; 1394 - if (!scanning_global_lru(sc)) { 1395 - sc->memcg_record->nr_scanned[0] += *nr_anon; 1396 - sc->memcg_record->nr_scanned[1] += *nr_file; 1397 - } 1398 1397 } 1399 1398 1400 1399 /* ··· 1504 1511 nr_reclaimed += shrink_page_list(&page_list, zone, sc); 1505 1512 } 1506 1513 1507 - if (!scanning_global_lru(sc)) 1508 - sc->memcg_record->nr_freed[file] += nr_reclaimed; 1509 - 1510 1514 local_irq_disable(); 1511 1515 if (current_is_kswapd()) 1512 1516 __count_vm_events(KSWAPD_STEAL, nr_reclaimed); ··· 1603 1613 } 1604 1614 1605 1615 reclaim_stat->recent_scanned[file] += nr_taken; 1606 - if (!scanning_global_lru(sc)) 1607 - sc->memcg_record->nr_scanned[file] += nr_taken; 1608 1616 1609 1617 __count_zone_vm_events(PGREFILL, zone, pgscanned); 1610 1618 if (file) ··· 1654 1666 * get_scan_ratio. 1655 1667 */ 1656 1668 reclaim_stat->recent_rotated[file] += nr_rotated; 1657 - if (!scanning_global_lru(sc)) 1658 - sc->memcg_record->nr_rotated[file] += nr_rotated; 1659 1669 1660 1670 move_active_pages_to_lru(zone, &l_active, 1661 1671 LRU_ACTIVE + file * LRU_FILE); ··· 1794 1808 u64 fraction[2], denominator; 1795 1809 enum lru_list l; 1796 1810 int noswap = 0; 1797 - int force_scan = 0; 1811 + bool force_scan = false; 1798 1812 unsigned long nr_force_scan[2]; 1799 1813 1800 - 1801 - anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) + 1802 - zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON); 1803 - file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) + 1804 - zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE); 1805 - 1806 - if (((anon + file) >> priority) < SWAP_CLUSTER_MAX) { 1807 - /* kswapd does zone balancing and need to scan this zone */ 1808 - if (scanning_global_lru(sc) && current_is_kswapd()) 1809 - force_scan = 1; 1810 - /* memcg may have small limit and need to avoid priority drop */ 1811 - if (!scanning_global_lru(sc)) 1812 - force_scan = 1; 1813 - } 1814 + /* kswapd does zone balancing and needs to scan this zone */ 1815 + if (scanning_global_lru(sc) && current_is_kswapd()) 1816 + force_scan = true; 1817 + /* memcg may have small limit and need to avoid priority drop */ 1818 + if (!scanning_global_lru(sc)) 1819 + force_scan = true; 1814 1820 1815 1821 /* If we have no swap space, do not bother scanning anon pages. */ 1816 1822 if (!sc->may_swap || (nr_swap_pages <= 0)) { ··· 1814 1836 nr_force_scan[1] = SWAP_CLUSTER_MAX; 1815 1837 goto out; 1816 1838 } 1839 + 1840 + anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) + 1841 + zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON); 1842 + file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) + 1843 + zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE); 1817 1844 1818 1845 if (scanning_global_lru(sc)) { 1819 1846 free = zone_page_state(zone, NR_FREE_PAGES); ··· 2251 2268 #ifdef CONFIG_CGROUP_MEM_RES_CTLR 2252 2269 2253 2270 unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, 2254 - gfp_t gfp_mask, bool noswap, 2255 - struct zone *zone, 2256 - struct memcg_scanrecord *rec, 2257 - unsigned long *scanned) 2271 + gfp_t gfp_mask, bool noswap, 2272 + struct zone *zone, 2273 + unsigned long *nr_scanned) 2258 2274 { 2259 2275 struct scan_control sc = { 2260 2276 .nr_scanned = 0, ··· 2263 2281 .may_swap = !noswap, 2264 2282 .order = 0, 2265 2283 .mem_cgroup = mem, 2266 - .memcg_record = rec, 2267 2284 }; 2268 - ktime_t start, end; 2269 2285 2270 2286 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 2271 2287 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); ··· 2272 2292 sc.may_writepage, 2273 2293 sc.gfp_mask); 2274 2294 2275 - start = ktime_get(); 2276 2295 /* 2277 2296 * NOTE: Although we can get the priority field, using it 2278 2297 * here is not a good idea, since it limits the pages we can scan. ··· 2280 2301 * the priority and make it zero. 2281 2302 */ 2282 2303 shrink_zone(0, zone, &sc); 2283 - end = ktime_get(); 2284 - 2285 - if (rec) 2286 - rec->elapsed += ktime_to_ns(ktime_sub(end, start)); 2287 - *scanned = sc.nr_scanned; 2288 2304 2289 2305 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); 2290 2306 2307 + *nr_scanned = sc.nr_scanned; 2291 2308 return sc.nr_reclaimed; 2292 2309 } 2293 2310 2294 2311 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, 2295 2312 gfp_t gfp_mask, 2296 - bool noswap, 2297 - struct memcg_scanrecord *rec) 2313 + bool noswap) 2298 2314 { 2299 2315 struct zonelist *zonelist; 2300 2316 unsigned long nr_reclaimed; 2301 - ktime_t start, end; 2302 2317 int nid; 2303 2318 struct scan_control sc = { 2304 2319 .may_writepage = !laptop_mode, ··· 2301 2328 .nr_to_reclaim = SWAP_CLUSTER_MAX, 2302 2329 .order = 0, 2303 2330 .mem_cgroup = mem_cont, 2304 - .memcg_record = rec, 2305 2331 .nodemask = NULL, /* we don't care the placement */ 2306 2332 .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 2307 2333 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), ··· 2309 2337 .gfp_mask = sc.gfp_mask, 2310 2338 }; 2311 2339 2312 - start = ktime_get(); 2313 2340 /* 2314 2341 * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't 2315 2342 * take care of from where we get pages. So the node where we start the ··· 2323 2352 sc.gfp_mask); 2324 2353 2325 2354 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); 2326 - end = ktime_get(); 2327 - if (rec) 2328 - rec->elapsed += ktime_to_ns(ktime_sub(end, start)); 2329 2355 2330 2356 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); 2331 2357
+2 -2
mm/vmstat.c
··· 659 659 } 660 660 #endif 661 661 662 - #if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) 662 + #if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA) 663 663 #ifdef CONFIG_ZONE_DMA 664 664 #define TEXT_FOR_DMA(xx) xx "_dma", 665 665 #else ··· 788 788 789 789 #endif /* CONFIG_VM_EVENTS_COUNTERS */ 790 790 }; 791 - #endif /* CONFIG_PROC_FS || CONFIG_SYSFS */ 791 + #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */ 792 792 793 793 794 794 #ifdef CONFIG_PROC_FS
+12 -5
net/9p/trans_virtio.c
··· 263 263 { 264 264 int in, out, inp, outp; 265 265 struct virtio_chan *chan = client->trans; 266 - char *rdata = (char *)req->rc+sizeof(struct p9_fcall); 267 266 unsigned long flags; 268 267 size_t pdata_off = 0; 269 268 struct trans_rpage_info *rpinfo = NULL; ··· 345 346 * Arrange in such a way that server places header in the 346 347 * alloced memory and payload onto the user buffer. 347 348 */ 348 - inp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata, 11); 349 + inp = pack_sg_list(chan->sg, out, 350 + VIRTQUEUE_NUM, req->rc->sdata, 11); 349 351 /* 350 352 * Running executables in the filesystem may result in 351 353 * a read request with kernel buffer as opposed to user buffer. ··· 366 366 } 367 367 in += inp; 368 368 } else { 369 - in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata, 370 - req->rc->capacity); 369 + in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, 370 + req->rc->sdata, req->rc->capacity); 371 371 } 372 372 373 373 err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc); ··· 592 592 .close = p9_virtio_close, 593 593 .request = p9_virtio_request, 594 594 .cancel = p9_virtio_cancel, 595 - .maxsize = PAGE_SIZE*VIRTQUEUE_NUM, 595 + 596 + /* 597 + * We leave one entry for input and one entry for response 598 + * headers. We also skip one more entry to accomodate, address 599 + * that are not at page boundary, that can result in an extra 600 + * page in zero copy. 601 + */ 602 + .maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3), 596 603 .pref = P9_TRANS_PREF_PAYLOAD_SEP, 597 604 .def = 0, 598 605 .owner = THIS_MODULE,
+1 -1
net/bridge/netfilter/Kconfig
··· 4 4 5 5 menuconfig BRIDGE_NF_EBTABLES 6 6 tristate "Ethernet Bridge tables (ebtables) support" 7 - depends on BRIDGE && BRIDGE_NETFILTER 7 + depends on BRIDGE && NETFILTER 8 8 select NETFILTER_XTABLES 9 9 help 10 10 ebtables is a general, extensible frame/packet identification
+5 -1
net/caif/caif_dev.c
··· 93 93 caifdevs = caif_device_list(dev_net(dev)); 94 94 BUG_ON(!caifdevs); 95 95 96 - caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC); 96 + caifd = kzalloc(sizeof(*caifd), GFP_KERNEL); 97 97 if (!caifd) 98 98 return NULL; 99 99 caifd->pcpu_refcnt = alloc_percpu(int); 100 + if (!caifd->pcpu_refcnt) { 101 + kfree(caifd); 102 + return NULL; 103 + } 100 104 caifd->netdev = dev; 101 105 dev_hold(dev); 102 106 return caifd;
+1 -1
net/can/af_can.c
··· 857 857 struct net_device *dev; 858 858 859 859 if (stats_timer) 860 - del_timer(&can_stattimer); 860 + del_timer_sync(&can_stattimer); 861 861 862 862 can_remove_proc(); 863 863
+29 -11
net/ceph/msgpool.c
··· 7 7 8 8 #include <linux/ceph/msgpool.h> 9 9 10 - static void *alloc_fn(gfp_t gfp_mask, void *arg) 10 + static void *msgpool_alloc(gfp_t gfp_mask, void *arg) 11 11 { 12 12 struct ceph_msgpool *pool = arg; 13 - void *p; 13 + struct ceph_msg *msg; 14 14 15 - p = ceph_msg_new(0, pool->front_len, gfp_mask); 16 - if (!p) 17 - pr_err("msgpool %s alloc failed\n", pool->name); 18 - return p; 15 + msg = ceph_msg_new(0, pool->front_len, gfp_mask); 16 + if (!msg) { 17 + dout("msgpool_alloc %s failed\n", pool->name); 18 + } else { 19 + dout("msgpool_alloc %s %p\n", pool->name, msg); 20 + msg->pool = pool; 21 + } 22 + return msg; 19 23 } 20 24 21 - static void free_fn(void *element, void *arg) 25 + static void msgpool_free(void *element, void *arg) 22 26 { 23 - ceph_msg_put(element); 27 + struct ceph_msgpool *pool = arg; 28 + struct ceph_msg *msg = element; 29 + 30 + dout("msgpool_release %s %p\n", pool->name, msg); 31 + msg->pool = NULL; 32 + ceph_msg_put(msg); 24 33 } 25 34 26 35 int ceph_msgpool_init(struct ceph_msgpool *pool, 27 36 int front_len, int size, bool blocking, const char *name) 28 37 { 38 + dout("msgpool %s init\n", name); 29 39 pool->front_len = front_len; 30 - pool->pool = mempool_create(size, alloc_fn, free_fn, pool); 40 + pool->pool = mempool_create(size, msgpool_alloc, msgpool_free, pool); 31 41 if (!pool->pool) 32 42 return -ENOMEM; 33 43 pool->name = name; ··· 46 36 47 37 void ceph_msgpool_destroy(struct ceph_msgpool *pool) 48 38 { 39 + dout("msgpool %s destroy\n", pool->name); 49 40 mempool_destroy(pool->pool); 50 41 } 51 42 52 43 struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, 53 44 int front_len) 54 45 { 46 + struct ceph_msg *msg; 47 + 55 48 if (front_len > pool->front_len) { 56 - pr_err("msgpool_get pool %s need front %d, pool size is %d\n", 49 + dout("msgpool_get %s need front %d, pool size is %d\n", 57 50 pool->name, front_len, pool->front_len); 58 51 WARN_ON(1); 59 52 ··· 64 51 return ceph_msg_new(0, front_len, GFP_NOFS); 65 52 } 66 53 67 - return mempool_alloc(pool->pool, GFP_NOFS); 54 + msg = mempool_alloc(pool->pool, GFP_NOFS); 55 + dout("msgpool_get %s %p\n", pool->name, msg); 56 + return msg; 68 57 } 69 58 70 59 void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg) 71 60 { 61 + dout("msgpool_put %s %p\n", pool->name, msg); 62 + 72 63 /* reset msg front_len; user may have changed it */ 73 64 msg->front.iov_len = pool->front_len; 74 65 msg->hdr.front_len = cpu_to_le32(pool->front_len); 75 66 76 67 kref_init(&msg->kref); /* retake single ref */ 68 + mempool_free(msg, pool->pool); 77 69 }
+17 -5
net/ceph/osd_client.c
··· 685 685 put_osd(osd); 686 686 } 687 687 688 + static void remove_all_osds(struct ceph_osd_client *osdc) 689 + { 690 + dout("__remove_old_osds %p\n", osdc); 691 + mutex_lock(&osdc->request_mutex); 692 + while (!RB_EMPTY_ROOT(&osdc->osds)) { 693 + struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds), 694 + struct ceph_osd, o_node); 695 + __remove_osd(osdc, osd); 696 + } 697 + mutex_unlock(&osdc->request_mutex); 698 + } 699 + 688 700 static void __move_osd_to_lru(struct ceph_osd_client *osdc, 689 701 struct ceph_osd *osd) 690 702 { ··· 713 701 list_del_init(&osd->o_osd_lru); 714 702 } 715 703 716 - static void remove_old_osds(struct ceph_osd_client *osdc, int remove_all) 704 + static void remove_old_osds(struct ceph_osd_client *osdc) 717 705 { 718 706 struct ceph_osd *osd, *nosd; 719 707 720 708 dout("__remove_old_osds %p\n", osdc); 721 709 mutex_lock(&osdc->request_mutex); 722 710 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) { 723 - if (!remove_all && time_before(jiffies, osd->lru_ttl)) 711 + if (time_before(jiffies, osd->lru_ttl)) 724 712 break; 725 713 __remove_osd(osdc, osd); 726 714 } ··· 763 751 struct rb_node *parent = NULL; 764 752 struct ceph_osd *osd = NULL; 765 753 754 + dout("__insert_osd %p osd%d\n", new, new->o_osd); 766 755 while (*p) { 767 756 parent = *p; 768 757 osd = rb_entry(parent, struct ceph_osd, o_node); ··· 1157 1144 1158 1145 dout("osds timeout\n"); 1159 1146 down_read(&osdc->map_sem); 1160 - remove_old_osds(osdc, 0); 1147 + remove_old_osds(osdc); 1161 1148 up_read(&osdc->map_sem); 1162 1149 1163 1150 schedule_delayed_work(&osdc->osds_timeout_work, ··· 1875 1862 ceph_osdmap_destroy(osdc->osdmap); 1876 1863 osdc->osdmap = NULL; 1877 1864 } 1878 - remove_old_osds(osdc, 1); 1879 - WARN_ON(!RB_EMPTY_ROOT(&osdc->osds)); 1865 + remove_all_osds(osdc); 1880 1866 mempool_destroy(osdc->req_mempool); 1881 1867 ceph_msgpool_destroy(&osdc->msgpool_op); 1882 1868 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
+8
net/core/dev.c
··· 1515 1515 */ 1516 1516 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 1517 1517 { 1518 + if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 1519 + if (skb_copy_ubufs(skb, GFP_ATOMIC)) { 1520 + atomic_long_inc(&dev->rx_dropped); 1521 + kfree_skb(skb); 1522 + return NET_RX_DROP; 1523 + } 1524 + } 1525 + 1518 1526 skb_orphan(skb); 1519 1527 nf_reset(skb); 1520 1528
+21 -15
net/core/flow.c
··· 30 30 struct hlist_node hlist; 31 31 struct list_head gc_list; 32 32 } u; 33 + struct net *net; 33 34 u16 family; 34 35 u8 dir; 35 36 u32 genid; ··· 173 172 174 173 static u32 flow_hash_code(struct flow_cache *fc, 175 174 struct flow_cache_percpu *fcp, 176 - const struct flowi *key) 175 + const struct flowi *key, 176 + size_t keysize) 177 177 { 178 178 const u32 *k = (const u32 *) key; 179 + const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32); 179 180 180 - return jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd) 181 + return jhash2(k, length, fcp->hash_rnd) 181 182 & (flow_cache_hash_size(fc) - 1); 182 183 } 183 184 184 - typedef unsigned long flow_compare_t; 185 - 186 185 /* I hear what you're saying, use memcmp. But memcmp cannot make 187 - * important assumptions that we can here, such as alignment and 188 - * constant size. 186 + * important assumptions that we can here, such as alignment. 189 187 */ 190 - static int flow_key_compare(const struct flowi *key1, const struct flowi *key2) 188 + static int flow_key_compare(const struct flowi *key1, const struct flowi *key2, 189 + size_t keysize) 191 190 { 192 191 const flow_compare_t *k1, *k1_lim, *k2; 193 - const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t); 194 - 195 - BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t)); 196 192 197 193 k1 = (const flow_compare_t *) key1; 198 - k1_lim = k1 + n_elem; 194 + k1_lim = k1 + keysize; 199 195 200 196 k2 = (const flow_compare_t *) key2; 201 197 ··· 213 215 struct flow_cache_entry *fle, *tfle; 214 216 struct hlist_node *entry; 215 217 struct flow_cache_object *flo; 218 + size_t keysize; 216 219 unsigned int hash; 217 220 218 221 local_bh_disable(); ··· 221 222 222 223 fle = NULL; 223 224 flo = NULL; 225 + 226 + keysize = flow_key_size(family); 227 + if (!keysize) 228 + goto nocache; 229 + 224 230 /* Packet really early in init? Making flow_cache_init a 225 231 * pre-smp initcall would solve this. --RR */ 226 232 if (!fcp->hash_table) ··· 234 230 if (fcp->hash_rnd_recalc) 235 231 flow_new_hash_rnd(fc, fcp); 236 232 237 - hash = flow_hash_code(fc, fcp, key); 233 + hash = flow_hash_code(fc, fcp, key, keysize); 238 234 hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) { 239 - if (tfle->family == family && 235 + if (tfle->net == net && 236 + tfle->family == family && 240 237 tfle->dir == dir && 241 - flow_key_compare(key, &tfle->key) == 0) { 238 + flow_key_compare(key, &tfle->key, keysize) == 0) { 242 239 fle = tfle; 243 240 break; 244 241 } ··· 251 246 252 247 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); 253 248 if (fle) { 249 + fle->net = net; 254 250 fle->family = family; 255 251 fle->dir = dir; 256 - memcpy(&fle->key, key, sizeof(*key)); 252 + memcpy(&fle->key, key, keysize * sizeof(flow_compare_t)); 257 253 fle->object = NULL; 258 254 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]); 259 255 fcp->hash_count++;
+17 -5
net/core/skbuff.c
··· 611 611 } 612 612 EXPORT_SYMBOL_GPL(skb_morph); 613 613 614 - /* skb frags copy userspace buffers to kernel */ 615 - static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 614 + /* skb_copy_ubufs - copy userspace skb frags buffers to kernel 615 + * @skb: the skb to modify 616 + * @gfp_mask: allocation priority 617 + * 618 + * This must be called on SKBTX_DEV_ZEROCOPY skb. 619 + * It will copy all frags into kernel and drop the reference 620 + * to userspace pages. 621 + * 622 + * If this function is called from an interrupt gfp_mask() must be 623 + * %GFP_ATOMIC. 624 + * 625 + * Returns 0 on success or a negative error code on failure 626 + * to allocate kernel memory to copy to. 627 + */ 628 + int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 616 629 { 617 630 int i; 618 631 int num_frags = skb_shinfo(skb)->nr_frags; ··· 665 652 skb_shinfo(skb)->frags[i - 1].page = head; 666 653 head = (struct page *)head->private; 667 654 } 655 + 656 + skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 668 657 return 0; 669 658 } 670 659 ··· 692 677 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 693 678 if (skb_copy_ubufs(skb, gfp_mask)) 694 679 return NULL; 695 - skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 696 680 } 697 681 698 682 n = skb + 1; ··· 817 803 n = NULL; 818 804 goto out; 819 805 } 820 - skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 821 806 } 822 807 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 823 808 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; ··· 909 896 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 910 897 if (skb_copy_ubufs(skb, gfp_mask)) 911 898 goto nofrags; 912 - skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 913 899 } 914 900 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 915 901 get_page(skb_shinfo(skb)->frags[i].page);
+1 -1
net/ethernet/eth.c
··· 340 340 dev->addr_len = ETH_ALEN; 341 341 dev->tx_queue_len = 1000; /* Ethernet wants good queues */ 342 342 dev->flags = IFF_BROADCAST|IFF_MULTICAST; 343 - dev->priv_flags = IFF_TX_SKB_SHARING; 343 + dev->priv_flags |= IFF_TX_SKB_SHARING; 344 344 345 345 memset(dev->broadcast, 0xFF, ETH_ALEN); 346 346
+6 -1
net/ipv4/af_inet.c
··· 466 466 goto out; 467 467 468 468 if (addr->sin_family != AF_INET) { 469 + /* Compatibility games : accept AF_UNSPEC (mapped to AF_INET) 470 + * only if s_addr is INADDR_ANY. 471 + */ 469 472 err = -EAFNOSUPPORT; 470 - goto out; 473 + if (addr->sin_family != AF_UNSPEC || 474 + addr->sin_addr.s_addr != htonl(INADDR_ANY)) 475 + goto out; 471 476 } 472 477 473 478 chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
+9 -1
net/ipv4/fib_semantics.c
··· 142 142 }; 143 143 144 144 /* Release a nexthop info record */ 145 + static void free_fib_info_rcu(struct rcu_head *head) 146 + { 147 + struct fib_info *fi = container_of(head, struct fib_info, rcu); 148 + 149 + if (fi->fib_metrics != (u32 *) dst_default_metrics) 150 + kfree(fi->fib_metrics); 151 + kfree(fi); 152 + } 145 153 146 154 void free_fib_info(struct fib_info *fi) 147 155 { ··· 164 156 } endfor_nexthops(fi); 165 157 fib_info_cnt--; 166 158 release_net(fi->fib_net); 167 - kfree_rcu(fi, rcu); 159 + call_rcu(&fi->rcu, free_fib_info_rcu); 168 160 } 169 161 170 162 void fib_release_info(struct fib_info *fi)
+5 -7
net/ipv4/netfilter/ip_queue.c
··· 218 218 return skb; 219 219 220 220 nlmsg_failure: 221 + kfree_skb(skb); 221 222 *errp = -EINVAL; 222 223 printk(KERN_ERR "ip_queue: error creating packet message\n"); 223 224 return NULL; ··· 314 313 { 315 314 struct nf_queue_entry *entry; 316 315 317 - if (vmsg->value > NF_MAX_VERDICT) 316 + if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN) 318 317 return -EINVAL; 319 318 320 319 entry = ipq_find_dequeue_entry(vmsg->id); ··· 359 358 break; 360 359 361 360 case IPQM_VERDICT: 362 - if (pmsg->msg.verdict.value > NF_MAX_VERDICT) 363 - status = -EINVAL; 364 - else 365 - status = ipq_set_verdict(&pmsg->msg.verdict, 366 - len - sizeof(*pmsg)); 367 - break; 361 + status = ipq_set_verdict(&pmsg->msg.verdict, 362 + len - sizeof(*pmsg)); 363 + break; 368 364 default: 369 365 status = -EINVAL; 370 366 }
+2
net/ipv4/proc.c
··· 254 254 SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP), 255 255 SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER), 256 256 SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW), 257 + SNMP_MIB_ITEM("TCPReqQFullDoCookies", LINUX_MIB_TCPREQQFULLDOCOOKIES), 258 + SNMP_MIB_ITEM("TCPReqQFullDrop", LINUX_MIB_TCPREQQFULLDROP), 257 259 SNMP_MIB_SENTINEL 258 260 }; 259 261
+1 -1
net/ipv4/tcp_input.c
··· 1124 1124 return 0; 1125 1125 1126 1126 /* ...Then it's D-SACK, and must reside below snd_una completely */ 1127 - if (!after(end_seq, tp->snd_una)) 1127 + if (after(end_seq, tp->snd_una)) 1128 1128 return 0; 1129 1129 1130 1130 if (!before(start_seq, tp->undo_marker))
+28 -21
net/ipv4/tcp_ipv4.c
··· 808 808 kfree(inet_rsk(req)->opt); 809 809 } 810 810 811 - static void syn_flood_warning(const struct sk_buff *skb) 811 + /* 812 + * Return 1 if a syncookie should be sent 813 + */ 814 + int tcp_syn_flood_action(struct sock *sk, 815 + const struct sk_buff *skb, 816 + const char *proto) 812 817 { 813 - const char *msg; 818 + const char *msg = "Dropping request"; 819 + int want_cookie = 0; 820 + struct listen_sock *lopt; 821 + 822 + 814 823 815 824 #ifdef CONFIG_SYN_COOKIES 816 - if (sysctl_tcp_syncookies) 825 + if (sysctl_tcp_syncookies) { 817 826 msg = "Sending cookies"; 818 - else 827 + want_cookie = 1; 828 + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES); 829 + } else 819 830 #endif 820 - msg = "Dropping request"; 831 + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP); 821 832 822 - pr_info("TCP: Possible SYN flooding on port %d. %s.\n", 823 - ntohs(tcp_hdr(skb)->dest), msg); 833 + lopt = inet_csk(sk)->icsk_accept_queue.listen_opt; 834 + if (!lopt->synflood_warned) { 835 + lopt->synflood_warned = 1; 836 + pr_info("%s: Possible SYN flooding on port %d. %s. " 837 + " Check SNMP counters.\n", 838 + proto, ntohs(tcp_hdr(skb)->dest), msg); 839 + } 840 + return want_cookie; 824 841 } 842 + EXPORT_SYMBOL(tcp_syn_flood_action); 825 843 826 844 /* 827 845 * Save and compile IPv4 options into the request_sock if needed. ··· 1253 1235 __be32 saddr = ip_hdr(skb)->saddr; 1254 1236 __be32 daddr = ip_hdr(skb)->daddr; 1255 1237 __u32 isn = TCP_SKB_CB(skb)->when; 1256 - #ifdef CONFIG_SYN_COOKIES 1257 1238 int want_cookie = 0; 1258 - #else 1259 - #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */ 1260 - #endif 1261 1239 1262 1240 /* Never answer to SYNs send to broadcast or multicast */ 1263 1241 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) ··· 1264 1250 * evidently real one. 1265 1251 */ 1266 1252 if (inet_csk_reqsk_queue_is_full(sk) && !isn) { 1267 - if (net_ratelimit()) 1268 - syn_flood_warning(skb); 1269 - #ifdef CONFIG_SYN_COOKIES 1270 - if (sysctl_tcp_syncookies) { 1271 - want_cookie = 1; 1272 - } else 1273 - #endif 1274 - goto drop; 1253 + want_cookie = tcp_syn_flood_action(sk, skb, "TCP"); 1254 + if (!want_cookie) 1255 + goto drop; 1275 1256 } 1276 1257 1277 1258 /* Accept backlog is full. If we have already queued enough ··· 1312 1303 while (l-- > 0) 1313 1304 *c++ ^= *hash_location++; 1314 1305 1315 - #ifdef CONFIG_SYN_COOKIES 1316 1306 want_cookie = 0; /* not our kind of cookie */ 1317 - #endif 1318 1307 tmp_ext.cookie_out_never = 0; /* false */ 1319 1308 tmp_ext.cookie_plus = tmp_opt.cookie_plus; 1320 1309 } else if (!tp->rx_opt.cookie_in_always) {
+3 -2
net/ipv6/datagram.c
··· 599 599 return 0; 600 600 } 601 601 602 - int datagram_send_ctl(struct net *net, 602 + int datagram_send_ctl(struct net *net, struct sock *sk, 603 603 struct msghdr *msg, struct flowi6 *fl6, 604 604 struct ipv6_txoptions *opt, 605 605 int *hlimit, int *tclass, int *dontfrag) ··· 658 658 659 659 if (addr_type != IPV6_ADDR_ANY) { 660 660 int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL; 661 - if (!ipv6_chk_addr(net, &src_info->ipi6_addr, 661 + if (!inet_sk(sk)->transparent && 662 + !ipv6_chk_addr(net, &src_info->ipi6_addr, 662 663 strict ? dev : NULL, 0)) 663 664 err = -EINVAL; 664 665 else
+4 -4
net/ipv6/ip6_flowlabel.c
··· 322 322 } 323 323 324 324 static struct ip6_flowlabel * 325 - fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval, 326 - int optlen, int *err_p) 325 + fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, 326 + char __user *optval, int optlen, int *err_p) 327 327 { 328 328 struct ip6_flowlabel *fl = NULL; 329 329 int olen; ··· 360 360 msg.msg_control = (void*)(fl->opt+1); 361 361 memset(&flowi6, 0, sizeof(flowi6)); 362 362 363 - err = datagram_send_ctl(net, &msg, &flowi6, fl->opt, &junk, 363 + err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk, 364 364 &junk, &junk); 365 365 if (err) 366 366 goto done; ··· 528 528 if (freq.flr_label & ~IPV6_FLOWLABEL_MASK) 529 529 return -EINVAL; 530 530 531 - fl = fl_create(net, &freq, optval, optlen, &err); 531 + fl = fl_create(net, sk, &freq, optval, optlen, &err); 532 532 if (fl == NULL) 533 533 return err; 534 534 sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
+1 -1
net/ipv6/ipv6_sockglue.c
··· 475 475 msg.msg_controllen = optlen; 476 476 msg.msg_control = (void*)(opt+1); 477 477 478 - retv = datagram_send_ctl(net, &msg, &fl6, opt, &junk, &junk, 478 + retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk, 479 479 &junk); 480 480 if (retv) 481 481 goto done;
+5 -7
net/ipv6/netfilter/ip6_queue.c
··· 218 218 return skb; 219 219 220 220 nlmsg_failure: 221 + kfree_skb(skb); 221 222 *errp = -EINVAL; 222 223 printk(KERN_ERR "ip6_queue: error creating packet message\n"); 223 224 return NULL; ··· 314 313 { 315 314 struct nf_queue_entry *entry; 316 315 317 - if (vmsg->value > NF_MAX_VERDICT) 316 + if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN) 318 317 return -EINVAL; 319 318 320 319 entry = ipq_find_dequeue_entry(vmsg->id); ··· 359 358 break; 360 359 361 360 case IPQM_VERDICT: 362 - if (pmsg->msg.verdict.value > NF_MAX_VERDICT) 363 - status = -EINVAL; 364 - else 365 - status = ipq_set_verdict(&pmsg->msg.verdict, 366 - len - sizeof(*pmsg)); 367 - break; 361 + status = ipq_set_verdict(&pmsg->msg.verdict, 362 + len - sizeof(*pmsg)); 363 + break; 368 364 default: 369 365 status = -EINVAL; 370 366 }
+2 -2
net/ipv6/raw.c
··· 817 817 memset(opt, 0, sizeof(struct ipv6_txoptions)); 818 818 opt->tot_len = sizeof(struct ipv6_txoptions); 819 819 820 - err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit, 821 - &tclass, &dontfrag); 820 + err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 821 + &hlimit, &tclass, &dontfrag); 822 822 if (err < 0) { 823 823 fl6_sock_release(flowlabel); 824 824 return err;
+22 -11
net/ipv6/route.c
··· 104 104 struct inet_peer *peer; 105 105 u32 *p = NULL; 106 106 107 + if (!(rt->dst.flags & DST_HOST)) 108 + return NULL; 109 + 107 110 if (!rt->rt6i_peer) 108 111 rt6_bind_peer(rt, 1); 109 112 ··· 254 251 struct rt6_info *rt = (struct rt6_info *)dst; 255 252 struct inet6_dev *idev = rt->rt6i_idev; 256 253 struct inet_peer *peer = rt->rt6i_peer; 254 + 255 + if (!(rt->dst.flags & DST_HOST)) 256 + dst_destroy_metrics_generic(dst); 257 257 258 258 if (idev != NULL) { 259 259 rt->rt6i_idev = NULL; ··· 729 723 ipv6_addr_copy(&rt->rt6i_gateway, daddr); 730 724 } 731 725 732 - rt->rt6i_dst.plen = 128; 733 726 rt->rt6i_flags |= RTF_CACHE; 734 - rt->dst.flags |= DST_HOST; 735 727 736 728 #ifdef CONFIG_IPV6_SUBTREES 737 729 if (rt->rt6i_src.plen && saddr) { ··· 779 775 struct rt6_info *rt = ip6_rt_copy(ort, daddr); 780 776 781 777 if (rt) { 782 - rt->rt6i_dst.plen = 128; 783 778 rt->rt6i_flags |= RTF_CACHE; 784 - rt->dst.flags |= DST_HOST; 785 779 dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_raw(&ort->dst))); 786 780 } 787 781 return rt; ··· 1080 1078 neigh = NULL; 1081 1079 } 1082 1080 1083 - rt->rt6i_idev = idev; 1081 + rt->dst.flags |= DST_HOST; 1082 + rt->dst.output = ip6_output; 1084 1083 dst_set_neighbour(&rt->dst, neigh); 1085 1084 atomic_set(&rt->dst.__refcnt, 1); 1086 - ipv6_addr_copy(&rt->rt6i_dst.addr, addr); 1087 1085 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255); 1088 - rt->dst.output = ip6_output; 1086 + 1087 + ipv6_addr_copy(&rt->rt6i_dst.addr, addr); 1088 + rt->rt6i_dst.plen = 128; 1089 + rt->rt6i_idev = idev; 1089 1090 1090 1091 spin_lock_bh(&icmp6_dst_lock); 1091 1092 rt->dst.next = icmp6_dst_gc_list; ··· 1266 1261 if (rt->rt6i_dst.plen == 128) 1267 1262 rt->dst.flags |= DST_HOST; 1268 1263 1264 + if (!(rt->dst.flags & DST_HOST) && cfg->fc_mx) { 1265 + u32 *metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); 1266 + if (!metrics) { 1267 + err = -ENOMEM; 1268 + goto out; 1269 + } 1270 + dst_init_metrics(&rt->dst, metrics, 0); 1271 + } 1269 1272 #ifdef CONFIG_IPV6_SUBTREES 1270 1273 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len); 1271 1274 rt->rt6i_src.plen = cfg->fc_src_len; ··· 1620 1607 if (on_link) 1621 1608 nrt->rt6i_flags &= ~RTF_GATEWAY; 1622 1609 1623 - nrt->rt6i_dst.plen = 128; 1624 - nrt->dst.flags |= DST_HOST; 1625 - 1626 1610 ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key); 1627 1611 dst_set_neighbour(&nrt->dst, neigh_clone(neigh)); 1628 1612 ··· 1764 1754 if (rt) { 1765 1755 rt->dst.input = ort->dst.input; 1766 1756 rt->dst.output = ort->dst.output; 1757 + rt->dst.flags |= DST_HOST; 1767 1758 1768 1759 ipv6_addr_copy(&rt->rt6i_dst.addr, dest); 1769 - rt->rt6i_dst.plen = ort->rt6i_dst.plen; 1760 + rt->rt6i_dst.plen = 128; 1770 1761 dst_copy_metrics(&rt->dst, &ort->dst); 1771 1762 rt->dst.error = ort->dst.error; 1772 1763 rt->rt6i_idev = ort->rt6i_idev;
+3 -28
net/ipv6/tcp_ipv6.c
··· 531 531 return tcp_v6_send_synack(sk, req, rvp); 532 532 } 533 533 534 - static inline void syn_flood_warning(struct sk_buff *skb) 535 - { 536 - #ifdef CONFIG_SYN_COOKIES 537 - if (sysctl_tcp_syncookies) 538 - printk(KERN_INFO 539 - "TCPv6: Possible SYN flooding on port %d. " 540 - "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest)); 541 - else 542 - #endif 543 - printk(KERN_INFO 544 - "TCPv6: Possible SYN flooding on port %d. " 545 - "Dropping request.\n", ntohs(tcp_hdr(skb)->dest)); 546 - } 547 - 548 534 static void tcp_v6_reqsk_destructor(struct request_sock *req) 549 535 { 550 536 kfree_skb(inet6_rsk(req)->pktopts); ··· 1165 1179 struct tcp_sock *tp = tcp_sk(sk); 1166 1180 __u32 isn = TCP_SKB_CB(skb)->when; 1167 1181 struct dst_entry *dst = NULL; 1168 - #ifdef CONFIG_SYN_COOKIES 1169 1182 int want_cookie = 0; 1170 - #else 1171 - #define want_cookie 0 1172 - #endif 1173 1183 1174 1184 if (skb->protocol == htons(ETH_P_IP)) 1175 1185 return tcp_v4_conn_request(sk, skb); ··· 1174 1192 goto drop; 1175 1193 1176 1194 if (inet_csk_reqsk_queue_is_full(sk) && !isn) { 1177 - if (net_ratelimit()) 1178 - syn_flood_warning(skb); 1179 - #ifdef CONFIG_SYN_COOKIES 1180 - if (sysctl_tcp_syncookies) 1181 - want_cookie = 1; 1182 - else 1183 - #endif 1184 - goto drop; 1195 + want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6"); 1196 + if (!want_cookie) 1197 + goto drop; 1185 1198 } 1186 1199 1187 1200 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) ··· 1226 1249 while (l-- > 0) 1227 1250 *c++ ^= *hash_location++; 1228 1251 1229 - #ifdef CONFIG_SYN_COOKIES 1230 1252 want_cookie = 0; /* not our kind of cookie */ 1231 - #endif 1232 1253 tmp_ext.cookie_out_never = 0; /* false */ 1233 1254 tmp_ext.cookie_plus = tmp_opt.cookie_plus; 1234 1255 } else if (!tp->rx_opt.cookie_in_always) {
+2 -2
net/ipv6/udp.c
··· 1090 1090 memset(opt, 0, sizeof(struct ipv6_txoptions)); 1091 1091 opt->tot_len = sizeof(*opt); 1092 1092 1093 - err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit, 1094 - &tclass, &dontfrag); 1093 + err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 1094 + &hlimit, &tclass, &dontfrag); 1095 1095 if (err < 0) { 1096 1096 fl6_sock_release(flowlabel); 1097 1097 return err;
+3 -3
net/irda/irsysctl.c
··· 40 40 extern int sysctl_fast_poll_increase; 41 41 extern char sysctl_devname[]; 42 42 extern int sysctl_max_baud_rate; 43 - extern int sysctl_min_tx_turn_time; 44 - extern int sysctl_max_tx_data_size; 45 - extern int sysctl_max_tx_window; 43 + extern unsigned int sysctl_min_tx_turn_time; 44 + extern unsigned int sysctl_max_tx_data_size; 45 + extern unsigned int sysctl_max_tx_window; 46 46 extern int sysctl_max_noreply_time; 47 47 extern int sysctl_warn_noreply_time; 48 48 extern int sysctl_lap_keepalive_time;
+3 -3
net/irda/qos.c
··· 60 60 * Default is 10us which means using the unmodified value given by the 61 61 * peer except if it's 0 (0 is likely a bug in the other stack). 62 62 */ 63 - unsigned sysctl_min_tx_turn_time = 10; 63 + unsigned int sysctl_min_tx_turn_time = 10; 64 64 /* 65 65 * Maximum data size to be used in transmission in payload of LAP frame. 66 66 * There is a bit of confusion in the IrDA spec : ··· 75 75 * bytes frames or all negotiated frame sizes, but you can use the sysctl 76 76 * to play with this value anyway. 77 77 * Jean II */ 78 - unsigned sysctl_max_tx_data_size = 2042; 78 + unsigned int sysctl_max_tx_data_size = 2042; 79 79 /* 80 80 * Maximum transmit window, i.e. number of LAP frames between turn-around. 81 81 * This allow to override what the peer told us. Some peers are buggy and 82 82 * don't always support what they tell us. 83 83 * Jean II */ 84 - unsigned sysctl_max_tx_window = 7; 84 + unsigned int sysctl_max_tx_window = 7; 85 85 86 86 static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get); 87 87 static int irlap_param_link_disconnect(void *instance, irda_param_t *parm,
+1 -1
net/mac80211/sta_info.c
··· 665 665 BUG_ON(!sdata->bss); 666 666 667 667 atomic_dec(&sdata->bss->num_sta_ps); 668 - __sta_info_clear_tim_bit(sdata->bss, sta); 668 + sta_info_clear_tim_bit(sta); 669 669 } 670 670 671 671 local->num_sta--;
+1
net/netfilter/nf_conntrack_pptp.c
··· 364 364 break; 365 365 366 366 case PPTP_WAN_ERROR_NOTIFY: 367 + case PPTP_SET_LINK_INFO: 367 368 case PPTP_ECHO_REQUEST: 368 369 case PPTP_ECHO_REPLY: 369 370 /* I don't have to explain these ;) */
+3 -3
net/netfilter/nf_conntrack_proto_tcp.c
··· 409 409 if (opsize < 2) /* "silly options" */ 410 410 return; 411 411 if (opsize > length) 412 - break; /* don't parse partial options */ 412 + return; /* don't parse partial options */ 413 413 414 414 if (opcode == TCPOPT_SACK_PERM 415 415 && opsize == TCPOLEN_SACK_PERM) ··· 447 447 BUG_ON(ptr == NULL); 448 448 449 449 /* Fast path for timestamp-only option */ 450 - if (length == TCPOLEN_TSTAMP_ALIGNED*4 450 + if (length == TCPOLEN_TSTAMP_ALIGNED 451 451 && *(__be32 *)ptr == htonl((TCPOPT_NOP << 24) 452 452 | (TCPOPT_NOP << 16) 453 453 | (TCPOPT_TIMESTAMP << 8) ··· 469 469 if (opsize < 2) /* "silly options" */ 470 470 return; 471 471 if (opsize > length) 472 - break; /* don't parse partial options */ 472 + return; /* don't parse partial options */ 473 473 474 474 if (opcode == TCPOPT_SACK 475 475 && opsize >= (TCPOLEN_SACK_BASE
+4 -5
net/netfilter/xt_rateest.c
··· 78 78 { 79 79 struct xt_rateest_match_info *info = par->matchinfo; 80 80 struct xt_rateest *est1, *est2; 81 - int ret = false; 81 + int ret = -EINVAL; 82 82 83 83 if (hweight32(info->flags & (XT_RATEEST_MATCH_ABS | 84 84 XT_RATEEST_MATCH_REL)) != 1) ··· 101 101 if (!est1) 102 102 goto err1; 103 103 104 + est2 = NULL; 104 105 if (info->flags & XT_RATEEST_MATCH_REL) { 105 106 est2 = xt_rateest_lookup(info->name2); 106 107 if (!est2) 107 108 goto err2; 108 - } else 109 - est2 = NULL; 110 - 109 + } 111 110 112 111 info->est1 = est1; 113 112 info->est2 = est2; ··· 115 116 err2: 116 117 xt_rateest_put(est1); 117 118 err1: 118 - return -EINVAL; 119 + return ret; 119 120 } 120 121 121 122 static void xt_rateest_mt_destroy(const struct xt_mtdtor_param *par)
+13 -14
net/sched/cls_rsvp.h
··· 425 425 struct rsvp_filter *f, **fp; 426 426 struct rsvp_session *s, **sp; 427 427 struct tc_rsvp_pinfo *pinfo = NULL; 428 - struct nlattr *opt = tca[TCA_OPTIONS-1]; 428 + struct nlattr *opt = tca[TCA_OPTIONS]; 429 429 struct nlattr *tb[TCA_RSVP_MAX + 1]; 430 430 struct tcf_exts e; 431 431 unsigned int h1, h2; ··· 439 439 if (err < 0) 440 440 return err; 441 441 442 - err = tcf_exts_validate(tp, tb, tca[TCA_RATE-1], &e, &rsvp_ext_map); 442 + err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &rsvp_ext_map); 443 443 if (err < 0) 444 444 return err; 445 445 ··· 449 449 450 450 if (f->handle != handle && handle) 451 451 goto errout2; 452 - if (tb[TCA_RSVP_CLASSID-1]) { 453 - f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID-1]); 452 + if (tb[TCA_RSVP_CLASSID]) { 453 + f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]); 454 454 tcf_bind_filter(tp, &f->res, base); 455 455 } 456 456 ··· 462 462 err = -EINVAL; 463 463 if (handle) 464 464 goto errout2; 465 - if (tb[TCA_RSVP_DST-1] == NULL) 465 + if (tb[TCA_RSVP_DST] == NULL) 466 466 goto errout2; 467 467 468 468 err = -ENOBUFS; ··· 471 471 goto errout2; 472 472 473 473 h2 = 16; 474 - if (tb[TCA_RSVP_SRC-1]) { 475 - memcpy(f->src, nla_data(tb[TCA_RSVP_SRC-1]), sizeof(f->src)); 474 + if (tb[TCA_RSVP_SRC]) { 475 + memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src)); 476 476 h2 = hash_src(f->src); 477 477 } 478 - if (tb[TCA_RSVP_PINFO-1]) { 479 - pinfo = nla_data(tb[TCA_RSVP_PINFO-1]); 478 + if (tb[TCA_RSVP_PINFO]) { 479 + pinfo = nla_data(tb[TCA_RSVP_PINFO]); 480 480 f->spi = pinfo->spi; 481 481 f->tunnelhdr = pinfo->tunnelhdr; 482 482 } 483 - if (tb[TCA_RSVP_CLASSID-1]) 484 - f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID-1]); 483 + if (tb[TCA_RSVP_CLASSID]) 484 + f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]); 485 485 486 - dst = nla_data(tb[TCA_RSVP_DST-1]); 486 + dst = nla_data(tb[TCA_RSVP_DST]); 487 487 h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0); 488 488 489 489 err = -ENOMEM; ··· 642 642 return -1; 643 643 } 644 644 645 - static struct tcf_proto_ops RSVP_OPS = { 646 - .next = NULL, 645 + static struct tcf_proto_ops RSVP_OPS __read_mostly = { 647 646 .kind = RSVP_ID, 648 647 .classify = rsvp_classify, 649 648 .init = rsvp_init,
+5
net/sctp/sm_sideeffect.c
··· 1689 1689 case SCTP_CMD_PURGE_ASCONF_QUEUE: 1690 1690 sctp_asconf_queue_teardown(asoc); 1691 1691 break; 1692 + 1693 + case SCTP_CMD_SET_ASOC: 1694 + asoc = cmd->obj.asoc; 1695 + break; 1696 + 1692 1697 default: 1693 1698 pr_warn("Impossible command: %u, %p\n", 1694 1699 cmd->verb, cmd->obj.ptr);
+6
net/sctp/sm_statefuns.c
··· 2047 2047 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); 2048 2048 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); 2049 2049 2050 + /* Restore association pointer to provide SCTP command interpeter 2051 + * with a valid context in case it needs to manipulate 2052 + * the queues */ 2053 + sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, 2054 + SCTP_ASOC((struct sctp_association *)asoc)); 2055 + 2050 2056 return retval; 2051 2057 2052 2058 nomem:
+24 -9
sound/core/pcm_lib.c
··· 1761 1761 snd_pcm_uframes_t avail = 0; 1762 1762 long wait_time, tout; 1763 1763 1764 + init_waitqueue_entry(&wait, current); 1765 + set_current_state(TASK_INTERRUPTIBLE); 1766 + add_wait_queue(&runtime->tsleep, &wait); 1767 + 1764 1768 if (runtime->no_period_wakeup) 1765 1769 wait_time = MAX_SCHEDULE_TIMEOUT; 1766 1770 else { ··· 1775 1771 } 1776 1772 wait_time = msecs_to_jiffies(wait_time * 1000); 1777 1773 } 1778 - init_waitqueue_entry(&wait, current); 1779 - add_wait_queue(&runtime->tsleep, &wait); 1774 + 1780 1775 for (;;) { 1781 1776 if (signal_pending(current)) { 1782 1777 err = -ERESTARTSYS; 1783 1778 break; 1784 1779 } 1780 + 1781 + /* 1782 + * We need to check if space became available already 1783 + * (and thus the wakeup happened already) first to close 1784 + * the race of space already having become available. 1785 + * This check must happen after been added to the waitqueue 1786 + * and having current state be INTERRUPTIBLE. 1787 + */ 1788 + if (is_playback) 1789 + avail = snd_pcm_playback_avail(runtime); 1790 + else 1791 + avail = snd_pcm_capture_avail(runtime); 1792 + if (avail >= runtime->twake) 1793 + break; 1785 1794 snd_pcm_stream_unlock_irq(substream); 1786 - tout = schedule_timeout_interruptible(wait_time); 1795 + 1796 + tout = schedule_timeout(wait_time); 1797 + 1787 1798 snd_pcm_stream_lock_irq(substream); 1799 + set_current_state(TASK_INTERRUPTIBLE); 1788 1800 switch (runtime->status->state) { 1789 1801 case SNDRV_PCM_STATE_SUSPENDED: 1790 1802 err = -ESTRPIPE; ··· 1826 1806 err = -EIO; 1827 1807 break; 1828 1808 } 1829 - if (is_playback) 1830 - avail = snd_pcm_playback_avail(runtime); 1831 - else 1832 - avail = snd_pcm_capture_avail(runtime); 1833 - if (avail >= runtime->twake) 1834 - break; 1835 1809 } 1836 1810 _endloop: 1811 + set_current_state(TASK_RUNNING); 1837 1812 remove_wait_queue(&runtime->tsleep, &wait); 1838 1813 *availp = avail; 1839 1814 return err;
+5 -1
sound/pci/hda/hda_codec.c
··· 579 579 return -1; 580 580 } 581 581 recursive++; 582 - for (i = 0; i < nums; i++) 582 + for (i = 0; i < nums; i++) { 583 + unsigned int type = get_wcaps_type(get_wcaps(codec, conn[i])); 584 + if (type == AC_WID_PIN || type == AC_WID_AUD_OUT) 585 + continue; 583 586 if (snd_hda_get_conn_index(codec, conn[i], nid, recursive) >= 0) 584 587 return i; 588 + } 585 589 return -1; 586 590 } 587 591 EXPORT_SYMBOL_HDA(snd_hda_get_conn_index);
+1 -1
sound/pci/hda/patch_cirrus.c
··· 535 535 int index, unsigned int pval, int dir, 536 536 struct snd_kcontrol **kctlp) 537 537 { 538 - char tmp[32]; 538 + char tmp[44]; 539 539 struct snd_kcontrol_new knew = 540 540 HDA_CODEC_VOLUME_IDX(tmp, index, 0, 0, HDA_OUTPUT); 541 541 knew.private_value = pval;
+2 -2
sound/soc/blackfin/bf5xx-ad193x.c
··· 103 103 .cpu_dai_name = "bfin-tdm.0", 104 104 .codec_dai_name ="ad193x-hifi", 105 105 .platform_name = "bfin-tdm-pcm-audio", 106 - .codec_name = "ad193x.5", 106 + .codec_name = "spi0.5", 107 107 .ops = &bf5xx_ad193x_ops, 108 108 }, 109 109 { ··· 112 112 .cpu_dai_name = "bfin-tdm.1", 113 113 .codec_dai_name ="ad193x-hifi", 114 114 .platform_name = "bfin-tdm-pcm-audio", 115 - .codec_name = "ad193x.5", 115 + .codec_name = "spi0.5", 116 116 .ops = &bf5xx_ad193x_ops, 117 117 }, 118 118 };
+3 -3
sound/soc/fsl/mpc5200_dma.c
··· 369 369 .pcm_free = &psc_dma_free, 370 370 }; 371 371 372 - static int mpc5200_hpcd_probe(struct of_device *op) 372 + static int mpc5200_hpcd_probe(struct platform_device *op) 373 373 { 374 374 phys_addr_t fifo; 375 375 struct psc_dma *psc_dma; ··· 487 487 return ret; 488 488 } 489 489 490 - static int mpc5200_hpcd_remove(struct of_device *op) 490 + static int mpc5200_hpcd_remove(struct platform_device *op) 491 491 { 492 492 struct psc_dma *psc_dma = dev_get_drvdata(&op->dev); 493 493 ··· 519 519 static struct platform_driver mpc5200_hpcd_of_driver = { 520 520 .probe = mpc5200_hpcd_probe, 521 521 .remove = mpc5200_hpcd_remove, 522 - .dev = { 522 + .driver = { 523 523 .owner = THIS_MODULE, 524 524 .name = "mpc5200-pcm-audio", 525 525 .of_match_table = mpc5200_hpcd_match,
-1
sound/soc/imx/imx-pcm-fiq.c
··· 240 240 241 241 static int imx_pcm_fiq_new(struct snd_soc_pcm_runtime *rtd) 242 242 { 243 - struct snd_card *card = rtd->card->snd_card; 244 243 struct snd_soc_dai *dai = rtd->cpu_dai; 245 244 struct snd_pcm *pcm = rtd->pcm; 246 245 int ret;
+1 -1
sound/soc/kirkwood/kirkwood-i2s.c
··· 424 424 if (!priv->mem) { 425 425 dev_err(&pdev->dev, "request_mem_region failed\n"); 426 426 err = -EBUSY; 427 - goto error_alloc; 427 + goto err_alloc; 428 428 } 429 429 430 430 priv->io = ioremap(priv->mem->start, SZ_16K);
+6 -6
sound/soc/soc-cache.c
··· 203 203 rbnode = rb_entry(node, struct snd_soc_rbtree_node, node); 204 204 for (i = 0; i < rbnode->blklen; ++i) { 205 205 regtmp = rbnode->base_reg + i; 206 - WARN_ON(codec->writable_register && 207 - codec->writable_register(codec, regtmp)); 208 206 val = snd_soc_rbtree_get_register(rbnode, i); 209 207 def = snd_soc_get_cache_val(codec->reg_def_copy, i, 210 208 rbnode->word_size); 211 209 if (val == def) 212 210 continue; 211 + 212 + WARN_ON(!snd_soc_codec_writable_register(codec, regtmp)); 213 213 214 214 codec->cache_bypass = 1; 215 215 ret = snd_soc_write(codec, regtmp, val); ··· 563 563 564 564 lzo_blocks = codec->reg_cache; 565 565 for_each_set_bit(i, lzo_blocks[0]->sync_bmp, lzo_blocks[0]->sync_bmp_nbits) { 566 - WARN_ON(codec->writable_register && 567 - codec->writable_register(codec, i)); 566 + WARN_ON(!snd_soc_codec_writable_register(codec, i)); 568 567 ret = snd_soc_cache_read(codec, i, &val); 569 568 if (ret) 570 569 return ret; ··· 822 823 823 824 codec_drv = codec->driver; 824 825 for (i = 0; i < codec_drv->reg_cache_size; ++i) { 825 - WARN_ON(codec->writable_register && 826 - codec->writable_register(codec, i)); 827 826 ret = snd_soc_cache_read(codec, i, &val); 828 827 if (ret) 829 828 return ret; ··· 829 832 if (snd_soc_get_cache_val(codec->reg_def_copy, 830 833 i, codec_drv->reg_word_size) == val) 831 834 continue; 835 + 836 + WARN_ON(!snd_soc_codec_writable_register(codec, i)); 837 + 832 838 ret = snd_soc_write(codec, i, val); 833 839 if (ret) 834 840 return ret;
+2 -2
sound/soc/soc-core.c
··· 1633 1633 if (codec->readable_register) 1634 1634 return codec->readable_register(codec, reg); 1635 1635 else 1636 - return 0; 1636 + return 1; 1637 1637 } 1638 1638 EXPORT_SYMBOL_GPL(snd_soc_codec_readable_register); 1639 1639 ··· 1651 1651 if (codec->writable_register) 1652 1652 return codec->writable_register(codec, reg); 1653 1653 else 1654 - return 0; 1654 + return 1; 1655 1655 } 1656 1656 EXPORT_SYMBOL_GPL(snd_soc_codec_writable_register); 1657 1657
+1 -1
sound/soc/soc-dapm.c
··· 2763 2763 2764 2764 /** 2765 2765 * snd_soc_dapm_free - free dapm resources 2766 - * @card: SoC device 2766 + * @dapm: DAPM context 2767 2767 * 2768 2768 * Free all dapm widgets and resources. 2769 2769 */
+1 -1
sound/soc/soc-jack.c
··· 105 105 106 106 snd_soc_dapm_sync(dapm); 107 107 108 - snd_jack_report(jack->jack, status); 108 + snd_jack_report(jack->jack, jack->status); 109 109 110 110 out: 111 111 mutex_unlock(&codec->mutex);