Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'imx/devel' into next/devel

+4404 -3016
+13
Documentation/ABI/testing/sysfs-class-scsi_host
··· 1 + What: /sys/class/scsi_host/hostX/isci_id 2 + Date: June 2011 3 + Contact: Dave Jiang <dave.jiang@intel.com> 4 + Description: 5 + This file contains the enumerated host ID for the Intel 6 + SCU controller. The Intel(R) C600 Series Chipset SATA/SAS 7 + Storage Control Unit embeds up to two 4-port controllers in 8 + a single PCI device. The controllers are enumerated in order 9 + which usually means the lowest number scsi_host corresponds 10 + with the first controller, but this association is not 11 + guaranteed. The 'isci_id' attribute unambiguously identifies 12 + the controller index: '0' for the first controller, 13 + '1' for the second.
+1 -84
Documentation/cgroups/memory.txt
··· 380 380 381 381 5.2 stat file 382 382 383 - 5.2.1 memory.stat file includes following statistics 383 + memory.stat file includes following statistics 384 384 385 385 # per-memory cgroup local status 386 386 cache - # of bytes of page cache memory. ··· 437 437 (Note: file and shmem may be shared among other cgroups. In that case, 438 438 file_mapped is accounted only when the memory cgroup is owner of page 439 439 cache.) 440 - 441 - 5.2.2 memory.vmscan_stat 442 - 443 - memory.vmscan_stat includes statistics information for memory scanning and 444 - freeing, reclaiming. The statistics shows memory scanning information since 445 - memory cgroup creation and can be reset to 0 by writing 0 as 446 - 447 - #echo 0 > ../memory.vmscan_stat 448 - 449 - This file contains following statistics. 450 - 451 - [param]_[file_or_anon]_pages_by_[reason]_[under_heararchy] 452 - [param]_elapsed_ns_by_[reason]_[under_hierarchy] 453 - 454 - For example, 455 - 456 - scanned_file_pages_by_limit indicates the number of scanned 457 - file pages at vmscan. 458 - 459 - Now, 3 parameters are supported 460 - 461 - scanned - the number of pages scanned by vmscan 462 - rotated - the number of pages activated at vmscan 463 - freed - the number of pages freed by vmscan 464 - 465 - If "rotated" is high against scanned/freed, the memcg seems busy. 466 - 467 - Now, 2 reason are supported 468 - 469 - limit - the memory cgroup's limit 470 - system - global memory pressure + softlimit 471 - (global memory pressure not under softlimit is not handled now) 472 - 473 - When under_hierarchy is added in the tail, the number indicates the 474 - total memcg scan of its children and itself. 475 - 476 - elapsed_ns is a elapsed time in nanosecond. This may include sleep time 477 - and not indicates CPU usage. So, please take this as just showing 478 - latency. 479 - 480 - Here is an example. 481 - 482 - # cat /cgroup/memory/A/memory.vmscan_stat 483 - scanned_pages_by_limit 9471864 484 - scanned_anon_pages_by_limit 6640629 485 - scanned_file_pages_by_limit 2831235 486 - rotated_pages_by_limit 4243974 487 - rotated_anon_pages_by_limit 3971968 488 - rotated_file_pages_by_limit 272006 489 - freed_pages_by_limit 2318492 490 - freed_anon_pages_by_limit 962052 491 - freed_file_pages_by_limit 1356440 492 - elapsed_ns_by_limit 351386416101 493 - scanned_pages_by_system 0 494 - scanned_anon_pages_by_system 0 495 - scanned_file_pages_by_system 0 496 - rotated_pages_by_system 0 497 - rotated_anon_pages_by_system 0 498 - rotated_file_pages_by_system 0 499 - freed_pages_by_system 0 500 - freed_anon_pages_by_system 0 501 - freed_file_pages_by_system 0 502 - elapsed_ns_by_system 0 503 - scanned_pages_by_limit_under_hierarchy 9471864 504 - scanned_anon_pages_by_limit_under_hierarchy 6640629 505 - scanned_file_pages_by_limit_under_hierarchy 2831235 506 - rotated_pages_by_limit_under_hierarchy 4243974 507 - rotated_anon_pages_by_limit_under_hierarchy 3971968 508 - rotated_file_pages_by_limit_under_hierarchy 272006 509 - freed_pages_by_limit_under_hierarchy 2318492 510 - freed_anon_pages_by_limit_under_hierarchy 962052 511 - freed_file_pages_by_limit_under_hierarchy 1356440 512 - elapsed_ns_by_limit_under_hierarchy 351386416101 513 - scanned_pages_by_system_under_hierarchy 0 514 - scanned_anon_pages_by_system_under_hierarchy 0 515 - scanned_file_pages_by_system_under_hierarchy 0 516 - rotated_pages_by_system_under_hierarchy 0 517 - rotated_anon_pages_by_system_under_hierarchy 0 518 - rotated_file_pages_by_system_under_hierarchy 0 519 - freed_pages_by_system_under_hierarchy 0 520 - freed_anon_pages_by_system_under_hierarchy 0 521 - freed_file_pages_by_system_under_hierarchy 0 522 - elapsed_ns_by_system_under_hierarchy 0 523 440 524 441 5.3 swappiness 525 442
+4 -10
Documentation/hwmon/coretemp
··· 35 35 All Sysfs entries are named with their core_id (represented here by 'X'). 36 36 tempX_input - Core temperature (in millidegrees Celsius). 37 37 tempX_max - All cooling devices should be turned on (on Core2). 38 - Initialized with IA32_THERM_INTERRUPT. When the CPU 39 - temperature reaches this temperature, an interrupt is 40 - generated and tempX_max_alarm is set. 41 - tempX_max_hyst - If the CPU temperature falls below than temperature, 42 - an interrupt is generated and tempX_max_alarm is reset. 43 - tempX_max_alarm - Set if the temperature reaches or exceeds tempX_max. 44 - Reset if the temperature drops to or below tempX_max_hyst. 45 38 tempX_crit - Maximum junction temperature (in millidegrees Celsius). 46 39 tempX_crit_alarm - Set when Out-of-spec bit is set, never clears. 47 40 Correct CPU operation is no longer guaranteed. ··· 42 49 number. For Package temp, this will be "Physical id Y", 43 50 where Y is the package number. 44 51 45 - The TjMax temperature is set to 85 degrees C if undocumented model specific 46 - register (UMSR) 0xee has bit 30 set. If not the TjMax is 100 degrees C as 47 - (sometimes) documented in processor datasheet. 52 + On CPU models which support it, TjMax is read from a model-specific register. 53 + On other models, it is set to an arbitrary value based on weak heuristics. 54 + If these heuristics don't work for you, you can pass the correct TjMax value 55 + as a module parameter (tjmax). 48 56 49 57 Appendix A. Known TjMax lists (TBD): 50 58 Some information comes from ark.intel.com
+6 -3
Documentation/kernel-parameters.txt
··· 2086 2086 Override pmtimer IOPort with a hex value. 2087 2087 e.g. pmtmr=0x508 2088 2088 2089 - pnp.debug [PNP] 2090 - Enable PNP debug messages. This depends on the 2091 - CONFIG_PNP_DEBUG_MESSAGES option. 2089 + pnp.debug=1 [PNP] 2090 + Enable PNP debug messages (depends on the 2091 + CONFIG_PNP_DEBUG_MESSAGES option). Change at run-time 2092 + via /sys/module/pnp/parameters/debug. We always show 2093 + current resource usage; turning this on also shows 2094 + possible settings and some assignment information. 2092 2095 2093 2096 pnpacpi= [ACPI] 2094 2097 { off }
+2 -1
Documentation/networking/dmfe.txt
··· 1 + Note: This driver doesn't have a maintainer. 2 + 1 3 Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver for Linux. 2 4 3 5 This program is free software; you can redistribute it and/or ··· 57 55 Authors: 58 56 59 57 Sten Wang <sten_wang@davicom.com.tw > : Original Author 60 - Tobias Ringstrom <tori@unhappy.mine.nu> : Current Maintainer 61 58 62 59 Contributors: 63 60
+2 -2
Documentation/networking/ip-sysctl.txt
··· 1042 1042 The functional behaviour for certain settings is different 1043 1043 depending on whether local forwarding is enabled or not. 1044 1044 1045 - accept_ra - BOOLEAN 1045 + accept_ra - INTEGER 1046 1046 Accept Router Advertisements; autoconfigure using them. 1047 1047 1048 1048 Possible values are: ··· 1106 1106 The amount of Duplicate Address Detection probes to send. 1107 1107 Default: 1 1108 1108 1109 - forwarding - BOOLEAN 1109 + forwarding - INTEGER 1110 1110 Configure interface-specific Host/Router behaviour. 1111 1111 1112 1112 Note: It is recommended to have the same setting on all
+1 -1
Documentation/networking/scaling.txt
··· 243 243 244 244 The number of entries in the per-queue flow table are set through: 245 245 246 - /sys/class/net/<dev>/queues/tx-<n>/rps_flow_cnt 246 + /sys/class/net/<dev>/queues/rx-<n>/rps_flow_cnt 247 247 248 248 == Suggested Configuration 249 249
+4 -3
Documentation/vm/transhuge.txt
··· 123 123 khugepaged runs usually at low frequency so while one may not want to 124 124 invoke defrag algorithms synchronously during the page faults, it 125 125 should be worth invoking defrag at least in khugepaged. However it's 126 - also possible to disable defrag in khugepaged: 126 + also possible to disable defrag in khugepaged by writing 0 or enable 127 + defrag in khugepaged by writing 1: 127 128 128 - echo yes >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag 129 - echo no >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag 129 + echo 0 >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag 130 + echo 1 >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag 130 131 131 132 You can also control how many pages khugepaged should scan at each 132 133 pass:
+18 -6
MAINTAINERS
··· 1278 1278 ATLX ETHERNET DRIVERS 1279 1279 M: Jay Cliburn <jcliburn@gmail.com> 1280 1280 M: Chris Snook <chris.snook@gmail.com> 1281 - M: Jie Yang <jie.yang@atheros.com> 1282 1281 L: netdev@vger.kernel.org 1283 1282 W: http://sourceforge.net/projects/atl1 1284 1283 W: http://atl1.sourceforge.net ··· 1573 1574 1574 1575 BROCADE BNA 10 GIGABIT ETHERNET DRIVER 1575 1576 M: Rasesh Mody <rmody@brocade.com> 1576 - M: Debashis Dutt <ddutt@brocade.com> 1577 1577 L: netdev@vger.kernel.org 1578 1578 S: Supported 1579 1579 F: drivers/net/bna/ ··· 1756 1758 1757 1759 CISCO VIC ETHERNET NIC DRIVER 1758 1760 M: Christian Benvenuti <benve@cisco.com> 1759 - M: Vasanthy Kolluri <vkolluri@cisco.com> 1760 1761 M: Roopa Prabhu <roprabhu@cisco.com> 1761 1762 M: David Wang <dwang2@cisco.com> 1762 1763 S: Supported ··· 3259 3262 F: drivers/input/input-mt.c 3260 3263 K: \b(ABS|SYN)_MT_ 3261 3264 3265 + INTEL C600 SERIES SAS CONTROLLER DRIVER 3266 + M: Intel SCU Linux support <intel-linux-scu@intel.com> 3267 + M: Dan Williams <dan.j.williams@intel.com> 3268 + M: Dave Jiang <dave.jiang@intel.com> 3269 + M: Ed Nadolski <edmund.nadolski@intel.com> 3270 + L: linux-scsi@vger.kernel.org 3271 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/djbw/isci.git 3272 + S: Maintained 3273 + F: drivers/scsi/isci/ 3274 + F: firmware/isci/ 3275 + 3262 3276 INTEL IDLE DRIVER 3263 3277 M: Len Brown <lenb@kernel.org> 3264 3278 L: linux-pm@lists.linux-foundation.org ··· 4412 4404 L: coreteam@netfilter.org 4413 4405 W: http://www.netfilter.org/ 4414 4406 W: http://www.iptables.org/ 4415 - T: git git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-2.6.git 4407 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-2.6.git 4408 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next-2.6.git 4416 4409 S: Supported 4417 4410 F: include/linux/netfilter* 4418 4411 F: include/linux/netfilter/ ··· 4783 4774 4784 4775 OSD LIBRARY and FILESYSTEM 4785 4776 M: Boaz Harrosh <bharrosh@panasas.com> 4786 - M: Benny Halevy <bhalevy@panasas.com> 4777 + M: Benny Halevy <bhalevy@tonian.com> 4787 4778 L: osd-dev@open-osd.org 4788 4779 W: http://open-osd.org 4789 4780 T: git git://git.open-osd.org/open-osd.git ··· 6374 6365 F: arch/arm/mach-tegra 6375 6366 6376 6367 TEHUTI ETHERNET DRIVER 6377 - M: Alexander Indenbaum <baum@tehutinetworks.net> 6378 6368 M: Andy Gospodarek <andy@greyhouse.net> 6379 6369 L: netdev@vger.kernel.org 6380 6370 S: Supported ··· 7208 7200 S: Supported 7209 7201 F: Documentation/hwmon/wm83?? 7210 7202 F: drivers/leds/leds-wm83*.c 7203 + F: drivers/input/misc/wm831x-on.c 7204 + F: drivers/input/touchscreen/wm831x-ts.c 7205 + F: drivers/input/touchscreen/wm97*.c 7211 7206 F: drivers/mfd/wm8*.c 7212 7207 F: drivers/power/wm83*.c 7213 7208 F: drivers/rtc/rtc-wm83*.c ··· 7220 7209 F: include/linux/mfd/wm831x/ 7221 7210 F: include/linux/mfd/wm8350/ 7222 7211 F: include/linux/mfd/wm8400* 7212 + F: include/linux/wm97xx.h 7223 7213 F: include/sound/wm????.h 7224 7214 F: sound/soc/codecs/wm* 7225 7215
+1 -1
Makefile
··· 1 1 VERSION = 3 2 2 PATCHLEVEL = 1 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc6 4 + EXTRAVERSION = -rc9 5 5 NAME = "Divemaster Edition" 6 6 7 7 # *DOCUMENTATION*
+1 -1
arch/alpha/Kconfig
··· 51 51 def_bool y 52 52 53 53 config GENERIC_GPIO 54 - def_bool y 54 + bool 55 55 56 56 config ZONE_DMA 57 57 bool
+14
arch/arm/Kconfig
··· 1284 1284 processor into full low interrupt latency mode. ARM11MPCore 1285 1285 is not affected. 1286 1286 1287 + config ARM_ERRATA_764369 1288 + bool "ARM errata: Data cache line maintenance operation by MVA may not succeed" 1289 + depends on CPU_V7 && SMP 1290 + help 1291 + This option enables the workaround for erratum 764369 1292 + affecting Cortex-A9 MPCore with two or more processors (all 1293 + current revisions). Under certain timing circumstances, a data 1294 + cache line maintenance operation by MVA targeting an Inner 1295 + Shareable memory region may fail to proceed up to either the 1296 + Point of Coherency or to the Point of Unification of the 1297 + system. This workaround adds a DSB instruction before the 1298 + relevant cache maintenance functions and sets a specific bit 1299 + in the diagnostic control register of the SCU. 1300 + 1287 1301 endmenu 1288 1302 1289 1303 source "arch/arm/common/Kconfig"
+6 -6
arch/arm/boot/dts/tegra-harmony.dts
··· 57 57 }; 58 58 59 59 sdhci@c8000200 { 60 - gpios = <&gpio 69 0>, /* cd, gpio PI5 */ 61 - <&gpio 57 0>, /* wp, gpio PH1 */ 62 - <&gpio 155 0>; /* power, gpio PT3 */ 60 + cd-gpios = <&gpio 69 0>; /* gpio PI5 */ 61 + wp-gpios = <&gpio 57 0>; /* gpio PH1 */ 62 + power-gpios = <&gpio 155 0>; /* gpio PT3 */ 63 63 }; 64 64 65 65 sdhci@c8000600 { 66 - gpios = <&gpio 58 0>, /* cd, gpio PH2 */ 67 - <&gpio 59 0>, /* wp, gpio PH3 */ 68 - <&gpio 70 0>; /* power, gpio PI6 */ 66 + cd-gpios = <&gpio 58 0>; /* gpio PH2 */ 67 + wp-gpios = <&gpio 59 0>; /* gpio PH3 */ 68 + power-gpios = <&gpio 70 0>; /* gpio PI6 */ 69 69 }; 70 70 };
+3 -3
arch/arm/boot/dts/tegra-seaboard.dts
··· 21 21 }; 22 22 23 23 sdhci@c8000400 { 24 - gpios = <&gpio 69 0>, /* cd, gpio PI5 */ 25 - <&gpio 57 0>, /* wp, gpio PH1 */ 26 - <&gpio 70 0>; /* power, gpio PI6 */ 24 + cd-gpios = <&gpio 69 0>; /* gpio PI5 */ 25 + wp-gpios = <&gpio 57 0>; /* gpio PH1 */ 26 + power-gpios = <&gpio 70 0>; /* gpio PI6 */ 27 27 }; 28 28 };
+1
arch/arm/configs/mxs_defconfig
··· 26 26 CONFIG_MACH_MX28EVK=y 27 27 CONFIG_MACH_STMP378X_DEVB=y 28 28 CONFIG_MACH_TX28=y 29 + CONFIG_MACH_M28EVK=y 29 30 # CONFIG_ARM_THUMB is not set 30 31 CONFIG_NO_HZ=y 31 32 CONFIG_HIGH_RES_TIMERS=y
+17 -17
arch/arm/include/asm/futex.h
··· 25 25 26 26 #ifdef CONFIG_SMP 27 27 28 - #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ 28 + #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ 29 29 smp_mb(); \ 30 30 __asm__ __volatile__( \ 31 - "1: ldrex %1, [%2]\n" \ 31 + "1: ldrex %1, [%3]\n" \ 32 32 " " insn "\n" \ 33 - "2: strex %1, %0, [%2]\n" \ 34 - " teq %1, #0\n" \ 33 + "2: strex %2, %0, [%3]\n" \ 34 + " teq %2, #0\n" \ 35 35 " bne 1b\n" \ 36 36 " mov %0, #0\n" \ 37 - __futex_atomic_ex_table("%4") \ 38 - : "=&r" (ret), "=&r" (oldval) \ 37 + __futex_atomic_ex_table("%5") \ 38 + : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \ 39 39 : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ 40 40 : "cc", "memory") 41 41 ··· 73 73 #include <linux/preempt.h> 74 74 #include <asm/domain.h> 75 75 76 - #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ 76 + #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ 77 77 __asm__ __volatile__( \ 78 - "1: " T(ldr) " %1, [%2]\n" \ 78 + "1: " T(ldr) " %1, [%3]\n" \ 79 79 " " insn "\n" \ 80 - "2: " T(str) " %0, [%2]\n" \ 80 + "2: " T(str) " %0, [%3]\n" \ 81 81 " mov %0, #0\n" \ 82 - __futex_atomic_ex_table("%4") \ 83 - : "=&r" (ret), "=&r" (oldval) \ 82 + __futex_atomic_ex_table("%5") \ 83 + : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \ 84 84 : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ 85 85 : "cc", "memory") 86 86 ··· 117 117 int cmp = (encoded_op >> 24) & 15; 118 118 int oparg = (encoded_op << 8) >> 20; 119 119 int cmparg = (encoded_op << 20) >> 20; 120 - int oldval = 0, ret; 120 + int oldval = 0, ret, tmp; 121 121 122 122 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) 123 123 oparg = 1 << oparg; ··· 129 129 130 130 switch (op) { 131 131 case FUTEX_OP_SET: 132 - __futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg); 132 + __futex_atomic_op("mov %0, %4", ret, oldval, tmp, uaddr, oparg); 133 133 break; 134 134 case FUTEX_OP_ADD: 135 - __futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg); 135 + __futex_atomic_op("add %0, %1, %4", ret, oldval, tmp, uaddr, oparg); 136 136 break; 137 137 case FUTEX_OP_OR: 138 - __futex_atomic_op("orr %0, %1, %3", ret, oldval, uaddr, oparg); 138 + __futex_atomic_op("orr %0, %1, %4", ret, oldval, tmp, uaddr, oparg); 139 139 break; 140 140 case FUTEX_OP_ANDN: 141 - __futex_atomic_op("and %0, %1, %3", ret, oldval, uaddr, ~oparg); 141 + __futex_atomic_op("and %0, %1, %4", ret, oldval, tmp, uaddr, ~oparg); 142 142 break; 143 143 case FUTEX_OP_XOR: 144 - __futex_atomic_op("eor %0, %1, %3", ret, oldval, uaddr, oparg); 144 + __futex_atomic_op("eor %0, %1, %4", ret, oldval, tmp, uaddr, oparg); 145 145 break; 146 146 default: 147 147 ret = -ENOSYS;
+2 -2
arch/arm/include/asm/unistd.h
··· 478 478 /* 479 479 * Unimplemented (or alternatively implemented) syscalls 480 480 */ 481 - #define __IGNORE_fadvise64_64 1 482 - #define __IGNORE_migrate_pages 1 481 + #define __IGNORE_fadvise64_64 482 + #define __IGNORE_migrate_pages 483 483 484 484 #endif /* __KERNEL__ */ 485 485 #endif /* __ASM_ARM_UNISTD_H */
+10
arch/arm/kernel/smp_scu.c
··· 13 13 14 14 #include <asm/smp_scu.h> 15 15 #include <asm/cacheflush.h> 16 + #include <asm/cputype.h> 16 17 17 18 #define SCU_CTRL 0x00 18 19 #define SCU_CONFIG 0x04 ··· 37 36 void __init scu_enable(void __iomem *scu_base) 38 37 { 39 38 u32 scu_ctrl; 39 + 40 + #ifdef CONFIG_ARM_ERRATA_764369 41 + /* Cortex-A9 only */ 42 + if ((read_cpuid(CPUID_ID) & 0xff0ffff0) == 0x410fc090) { 43 + scu_ctrl = __raw_readl(scu_base + 0x30); 44 + if (!(scu_ctrl & 1)) 45 + __raw_writel(scu_ctrl | 0x1, scu_base + 0x30); 46 + } 47 + #endif 40 48 41 49 scu_ctrl = __raw_readl(scu_base + SCU_CTRL); 42 50 /* already enabled? */
+12 -3
arch/arm/kernel/vmlinux.lds.S
··· 23 23 24 24 #if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK) 25 25 #define ARM_EXIT_KEEP(x) x 26 + #define ARM_EXIT_DISCARD(x) 26 27 #else 27 28 #define ARM_EXIT_KEEP(x) 29 + #define ARM_EXIT_DISCARD(x) x 28 30 #endif 29 31 30 32 OUTPUT_ARCH(arm) ··· 41 39 SECTIONS 42 40 { 43 41 /* 42 + * XXX: The linker does not define how output sections are 43 + * assigned to input sections when there are multiple statements 44 + * matching the same input section name. There is no documented 45 + * order of matching. 46 + * 44 47 * unwind exit sections must be discarded before the rest of the 45 48 * unwind sections get included. 46 49 */ ··· 54 47 *(.ARM.extab.exit.text) 55 48 ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) 56 49 ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) 50 + ARM_EXIT_DISCARD(EXIT_TEXT) 51 + ARM_EXIT_DISCARD(EXIT_DATA) 52 + EXIT_CALL 57 53 #ifndef CONFIG_HOTPLUG 58 54 *(.ARM.exidx.devexit.text) 59 55 *(.ARM.extab.devexit.text) ··· 68 58 #ifndef CONFIG_SMP_ON_UP 69 59 *(.alt.smp.init) 70 60 #endif 61 + *(.discard) 62 + *(.discard.*) 71 63 } 72 64 73 65 #ifdef CONFIG_XIP_KERNEL ··· 291 279 292 280 STABS_DEBUG 293 281 .comment 0 : { *(.comment) } 294 - 295 - /* Default discards */ 296 - DISCARDS 297 282 } 298 283 299 284 /*
+1 -1
arch/arm/mach-dove/common.c
··· 158 158 159 159 void __init dove_spi1_init(void) 160 160 { 161 - orion_spi_init(DOVE_SPI1_PHYS_BASE, get_tclk()); 161 + orion_spi_1_init(DOVE_SPI1_PHYS_BASE, get_tclk()); 162 162 } 163 163 164 164 /*****************************************************************************
+3 -5
arch/arm/mach-exynos4/clock.c
··· 899 899 .reg_div = { .reg = S5P_CLKDIV_CAM, .shift = 28, .size = 4 }, 900 900 }, { 901 901 .clk = { 902 - .name = "sclk_cam", 903 - .devname = "exynos4-fimc.0", 902 + .name = "sclk_cam0", 904 903 .enable = exynos4_clksrc_mask_cam_ctrl, 905 904 .ctrlbit = (1 << 16), 906 905 }, ··· 908 909 .reg_div = { .reg = S5P_CLKDIV_CAM, .shift = 16, .size = 4 }, 909 910 }, { 910 911 .clk = { 911 - .name = "sclk_cam", 912 - .devname = "exynos4-fimc.1", 912 + .name = "sclk_cam1", 913 913 .enable = exynos4_clksrc_mask_cam_ctrl, 914 914 .ctrlbit = (1 << 20), 915 915 }, ··· 1158 1160 1159 1161 vpllsrc = clk_get_rate(&clk_vpllsrc.clk); 1160 1162 vpll = s5p_get_pll46xx(vpllsrc, __raw_readl(S5P_VPLL_CON0), 1161 - __raw_readl(S5P_VPLL_CON1), pll_4650); 1163 + __raw_readl(S5P_VPLL_CON1), pll_4650c); 1162 1164 1163 1165 clk_fout_apll.ops = &exynos4_fout_apll_ops; 1164 1166 clk_fout_mpll.rate = mpll;
+9 -1
arch/arm/mach-exynos4/mct.c
··· 132 132 return ((cycle_t)hi << 32) | lo; 133 133 } 134 134 135 + static void exynos4_frc_resume(struct clocksource *cs) 136 + { 137 + exynos4_mct_frc_start(0, 0); 138 + } 139 + 135 140 struct clocksource mct_frc = { 136 141 .name = "mct-frc", 137 142 .rating = 400, 138 143 .read = exynos4_frc_read, 139 144 .mask = CLOCKSOURCE_MASK(64), 140 145 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 146 + .resume = exynos4_frc_resume, 141 147 }; 142 148 143 149 static void __init exynos4_clocksource_init(void) ··· 395 389 } 396 390 397 391 /* Setup the local clock events for a CPU */ 398 - void __cpuinit local_timer_setup(struct clock_event_device *evt) 392 + int __cpuinit local_timer_setup(struct clock_event_device *evt) 399 393 { 400 394 exynos4_mct_tick_init(evt); 395 + 396 + return 0; 401 397 } 402 398 403 399 int local_timer_ack(void)
+2
arch/arm/mach-exynos4/platsmp.c
··· 106 106 */ 107 107 spin_lock(&boot_lock); 108 108 spin_unlock(&boot_lock); 109 + 110 + set_cpu_online(cpu, true); 109 111 } 110 112 111 113 int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+6 -5
arch/arm/mach-exynos4/setup-keypad.c
··· 19 19 20 20 if (rows > 8) { 21 21 /* Set all the necessary GPX2 pins: KP_ROW[0~7] */ 22 - s3c_gpio_cfgrange_nopull(EXYNOS4_GPX2(0), 8, S3C_GPIO_SFN(3)); 22 + s3c_gpio_cfgall_range(EXYNOS4_GPX2(0), 8, S3C_GPIO_SFN(3), 23 + S3C_GPIO_PULL_UP); 23 24 24 25 /* Set all the necessary GPX3 pins: KP_ROW[8~] */ 25 - s3c_gpio_cfgrange_nopull(EXYNOS4_GPX3(0), (rows - 8), 26 - S3C_GPIO_SFN(3)); 26 + s3c_gpio_cfgall_range(EXYNOS4_GPX3(0), (rows - 8), 27 + S3C_GPIO_SFN(3), S3C_GPIO_PULL_UP); 27 28 } else { 28 29 /* Set all the necessary GPX2 pins: KP_ROW[x] */ 29 - s3c_gpio_cfgrange_nopull(EXYNOS4_GPX2(0), rows, 30 - S3C_GPIO_SFN(3)); 30 + s3c_gpio_cfgall_range(EXYNOS4_GPX2(0), rows, S3C_GPIO_SFN(3), 31 + S3C_GPIO_PULL_UP); 31 32 } 32 33 33 34 /* Set all the necessary GPX1 pins to special-function 3: KP_COL[x] */
+2
arch/arm/mach-integrator/integrator_ap.c
··· 32 32 #include <linux/interrupt.h> 33 33 #include <linux/io.h> 34 34 #include <linux/mtd/physmap.h> 35 + #include <video/vga.h> 35 36 36 37 #include <mach/hardware.h> 37 38 #include <mach/platform.h> ··· 155 154 static void __init ap_map_io(void) 156 155 { 157 156 iotable_init(ap_io_desc, ARRAY_SIZE(ap_io_desc)); 157 + vga_base = PCI_MEMORY_VADDR; 158 158 } 159 159 160 160 #define INTEGRATOR_SC_VALID_INT 0x003fffff
-2
arch/arm/mach-integrator/pci_v3.c
··· 27 27 #include <linux/spinlock.h> 28 28 #include <linux/init.h> 29 29 #include <linux/io.h> 30 - #include <video/vga.h> 31 30 32 31 #include <mach/hardware.h> 33 32 #include <mach/platform.h> ··· 504 505 505 506 pcibios_min_io = 0x6000; 506 507 pcibios_min_mem = 0x00100000; 507 - vga_base = PCI_MEMORY_VADDR; 508 508 509 509 /* 510 510 * Hook in our fault handler for PCI errors
+1
arch/arm/mach-mx5/board-mx53_ard.c
··· 234 234 imx53_add_imx_i2c(1, &mx53_ard_i2c2_data); 235 235 imx53_add_imx_i2c(2, &mx53_ard_i2c3_data); 236 236 imx_add_gpio_keys(&ard_button_data); 237 + imx53_add_ahci_imx(); 237 238 } 238 239 239 240 static void __init mx53_ard_timer_init(void)
+1
arch/arm/mach-mx5/board-mx53_loco.c
··· 293 293 imx53_add_sdhci_esdhc_imx(2, &mx53_loco_sd3_data); 294 294 imx_add_gpio_keys(&loco_button_data); 295 295 gpio_led_register_device(-1, &mx53loco_leds_data); 296 + imx53_add_ahci_imx(); 296 297 } 297 298 298 299 static void __init mx53_loco_timer_init(void)
+16
arch/arm/mach-mx5/board-mx53_smd.c
··· 35 35 #include "devices-imx53.h" 36 36 37 37 #define SMD_FEC_PHY_RST IMX_GPIO_NR(7, 6) 38 + #define MX53_SMD_SATA_PWR_EN IMX_GPIO_NR(3, 3) 38 39 39 40 static iomux_v3_cfg_t mx53_smd_pads[] = { 40 41 MX53_PAD_CSI0_DAT10__UART1_TXD_MUX, ··· 112 111 .bitrate = 100000, 113 112 }; 114 113 114 + static inline void mx53_smd_ahci_pwr_on(void) 115 + { 116 + int ret; 117 + 118 + /* Enable SATA PWR */ 119 + ret = gpio_request_one(MX53_SMD_SATA_PWR_EN, 120 + GPIOF_DIR_OUT | GPIOF_INIT_HIGH, "ahci-sata-pwr"); 121 + if (ret) { 122 + pr_err("failed to enable SATA_PWR_EN: %d\n", ret); 123 + return; 124 + } 125 + } 126 + 115 127 static void __init mx53_smd_board_init(void) 116 128 { 117 129 imx53_soc_init(); ··· 139 125 imx53_add_sdhci_esdhc_imx(0, NULL); 140 126 imx53_add_sdhci_esdhc_imx(1, NULL); 141 127 imx53_add_sdhci_esdhc_imx(2, NULL); 128 + mx53_smd_ahci_pwr_on(); 129 + imx53_add_ahci_imx(); 142 130 } 143 131 144 132 static void __init mx53_smd_timer_init(void)
+19
arch/arm/mach-mx5/clock-mx51-mx53.c
··· 1401 1401 .secondary = &esdhc4_ipg_clk, 1402 1402 }; 1403 1403 1404 + static struct clk sata_clk = { 1405 + .parent = &ipg_clk, 1406 + .enable = _clk_max_enable, 1407 + .enable_reg = MXC_CCM_CCGR4, 1408 + .enable_shift = MXC_CCM_CCGRx_CG1_OFFSET, 1409 + .disable = _clk_max_disable, 1410 + }; 1411 + 1412 + static struct clk ahci_phy_clk = { 1413 + .parent = &usb_phy1_clk, 1414 + }; 1415 + 1416 + static struct clk ahci_dma_clk = { 1417 + .parent = &ahb_clk, 1418 + }; 1419 + 1404 1420 DEFINE_CLOCK(mipi_esc_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG5_OFFSET, NULL, NULL, NULL, &pll2_sw_clk); 1405 1421 DEFINE_CLOCK(mipi_hsc2_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG4_OFFSET, NULL, NULL, &mipi_esc_clk, &pll2_sw_clk); 1406 1422 DEFINE_CLOCK(mipi_hsc1_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG3_OFFSET, NULL, NULL, &mipi_hsc2_clk, &pll2_sw_clk); ··· 1529 1513 _REGISTER_CLOCK("imx-ssi.2", NULL, ssi3_clk) 1530 1514 _REGISTER_CLOCK("imx-keypad", NULL, dummy_clk) 1531 1515 _REGISTER_CLOCK("pata_imx", NULL, pata_clk) 1516 + _REGISTER_CLOCK("imx53-ahci.0", "ahci", sata_clk) 1517 + _REGISTER_CLOCK("imx53-ahci.0", "ahci_phy", ahci_phy_clk) 1518 + _REGISTER_CLOCK("imx53-ahci.0", "ahci_dma", ahci_dma_clk) 1532 1519 }; 1533 1520 1534 1521 static void clk_tree_init(void)
+2
arch/arm/mach-mx5/devices-imx53.h
··· 44 44 extern const struct imx_pata_imx_data imx53_pata_imx_data; 45 45 #define imx53_add_pata_imx() \ 46 46 imx_add_pata_imx(&imx53_pata_imx_data) 47 + 48 + extern struct platform_device *__init imx53_add_ahci_imx(void);
+17
arch/arm/mach-mxs/Kconfig
··· 70 70 select MXS_HAVE_PLATFORM_MXS_PWM 71 71 select MXS_HAVE_PLATFORM_RTC_STMP3XXX 72 72 73 + config MODULE_M28 74 + bool 75 + select SOC_IMX28 76 + select LEDS_GPIO_REGISTER 77 + select MXS_HAVE_AMBA_DUART 78 + select MXS_HAVE_PLATFORM_AUART 79 + select MXS_HAVE_PLATFORM_FEC 80 + select MXS_HAVE_PLATFORM_FLEXCAN 81 + select MXS_HAVE_PLATFORM_MXS_I2C 82 + select MXS_HAVE_PLATFORM_MXS_MMC 83 + select MXS_HAVE_PLATFORM_MXSFB 84 + select MXS_OCOTP 85 + 73 86 config MACH_TX28 74 87 bool "Ka-Ro TX28 module" 75 88 select MODULE_TX28 89 + 90 + config MACH_M28EVK 91 + bool "Support DENX M28EVK Platform" 92 + select MODULE_M28 76 93 77 94 endif
+1
arch/arm/mach-mxs/Makefile
··· 10 10 obj-$(CONFIG_MACH_STMP378X_DEVB) += mach-stmp378x_devb.o 11 11 obj-$(CONFIG_MACH_MX23EVK) += mach-mx23evk.o 12 12 obj-$(CONFIG_MACH_MX28EVK) += mach-mx28evk.o 13 + obj-$(CONFIG_MACH_M28EVK) += mach-m28evk.o 13 14 obj-$(CONFIG_MODULE_TX28) += module-tx28.o 14 15 obj-$(CONFIG_MACH_TX28) += mach-tx28.o 15 16
+11 -5
arch/arm/mach-mxs/clock-mx28.c
··· 740 740 __raw_writel(BM_CLKCTRL_CPU_INTERRUPT_WAIT, 741 741 CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU_SET); 742 742 743 - /* Extra fec clock setting */ 744 - reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_ENET); 745 - reg &= ~BM_CLKCTRL_ENET_SLEEP; 746 - reg |= BM_CLKCTRL_ENET_CLK_OUT_EN; 747 - __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_ENET); 743 + /* 744 + * Extra fec clock setting 745 + * The DENX M28 uses an external clock source 746 + * and the clock output must not be enabled 747 + */ 748 + if (!machine_is_m28evk()) { 749 + reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_ENET); 750 + reg &= ~BM_CLKCTRL_ENET_SLEEP; 751 + reg |= BM_CLKCTRL_ENET_CLK_OUT_EN; 752 + __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_ENET); 753 + } 748 754 749 755 /* 750 756 * 480 MHz seems too high to be ssp clock source directly,
+1
arch/arm/mach-mxs/include/mach/mxs.h
··· 33 33 0) 34 34 #define cpu_is_mx28() ( \ 35 35 machine_is_mx28evk() || \ 36 + machine_is_m28evk() || \ 36 37 machine_is_tx28() || \ 37 38 0) 38 39
+1
arch/arm/mach-mxs/include/mach/uncompress.h
··· 63 63 mxs_duart_base = MX23_DUART_BASE_ADDR; 64 64 break; 65 65 case MACH_TYPE_MX28EVK: 66 + case MACH_TYPE_M28EVK: 66 67 case MACH_TYPE_TX28: 67 68 mxs_duart_base = MX28_DUART_BASE_ADDR; 68 69 break;
+366
arch/arm/mach-mxs/mach-m28evk.c
··· 1 + /* 2 + * Copyright (C) 2011 3 + * Stefano Babic, DENX Software Engineering, <sbabic@denx.de> 4 + * 5 + * based on: mach-mx28_evk.c 6 + * Copyright 2010 Freescale Semiconductor, Inc. All Rights Reserved. 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License as published by 10 + * the Free Software Foundation; either version 2 of the License, or 11 + * (at your option) any later version. 12 + * 13 + * This program is distributed in the hope that it will be useful, 14 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 + * GNU General Public License for more details. 17 + */ 18 + 19 + #include <linux/delay.h> 20 + #include <linux/platform_device.h> 21 + #include <linux/gpio.h> 22 + #include <linux/leds.h> 23 + #include <linux/irq.h> 24 + #include <linux/clk.h> 25 + #include <linux/i2c.h> 26 + #include <linux/i2c/at24.h> 27 + 28 + #include <asm/mach-types.h> 29 + #include <asm/mach/arch.h> 30 + #include <asm/mach/time.h> 31 + 32 + #include <mach/common.h> 33 + #include <mach/iomux-mx28.h> 34 + 35 + #include "devices-mx28.h" 36 + 37 + #define M28EVK_GPIO_USERLED1 MXS_GPIO_NR(3, 16) 38 + #define M28EVK_GPIO_USERLED2 MXS_GPIO_NR(3, 17) 39 + 40 + #define MX28EVK_BL_ENABLE MXS_GPIO_NR(3, 18) 41 + #define M28EVK_LCD_ENABLE MXS_GPIO_NR(3, 28) 42 + 43 + #define MX28EVK_MMC0_WRITE_PROTECT MXS_GPIO_NR(2, 12) 44 + #define MX28EVK_MMC1_WRITE_PROTECT MXS_GPIO_NR(0, 28) 45 + 46 + static const iomux_cfg_t m28evk_pads[] __initconst = { 47 + /* duart */ 48 + MX28_PAD_AUART0_CTS__DUART_RX | MXS_PAD_CTRL, 49 + MX28_PAD_AUART0_RTS__DUART_TX | MXS_PAD_CTRL, 50 + 51 + /* auart0 */ 52 + MX28_PAD_AUART0_RX__AUART0_RX | MXS_PAD_CTRL, 53 + MX28_PAD_AUART0_TX__AUART0_TX | MXS_PAD_CTRL, 54 + 55 + /* auart3 */ 56 + MX28_PAD_AUART3_RX__AUART3_RX | MXS_PAD_CTRL, 57 + MX28_PAD_AUART3_TX__AUART3_TX | MXS_PAD_CTRL, 58 + MX28_PAD_AUART3_CTS__AUART3_CTS | MXS_PAD_CTRL, 59 + MX28_PAD_AUART3_RTS__AUART3_RTS | MXS_PAD_CTRL, 60 + 61 + #define MXS_PAD_FEC (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP) 62 + /* fec0 */ 63 + MX28_PAD_ENET0_MDC__ENET0_MDC | MXS_PAD_FEC, 64 + MX28_PAD_ENET0_MDIO__ENET0_MDIO | MXS_PAD_FEC, 65 + MX28_PAD_ENET0_RX_EN__ENET0_RX_EN | MXS_PAD_FEC, 66 + MX28_PAD_ENET0_RXD0__ENET0_RXD0 | MXS_PAD_FEC, 67 + MX28_PAD_ENET0_RXD1__ENET0_RXD1 | MXS_PAD_FEC, 68 + MX28_PAD_ENET0_TX_EN__ENET0_TX_EN | MXS_PAD_FEC, 69 + MX28_PAD_ENET0_TXD0__ENET0_TXD0 | MXS_PAD_FEC, 70 + MX28_PAD_ENET0_TXD1__ENET0_TXD1 | MXS_PAD_FEC, 71 + MX28_PAD_ENET_CLK__CLKCTRL_ENET | MXS_PAD_FEC, 72 + /* fec1 */ 73 + MX28_PAD_ENET0_CRS__ENET1_RX_EN | MXS_PAD_FEC, 74 + MX28_PAD_ENET0_RXD2__ENET1_RXD0 | MXS_PAD_FEC, 75 + MX28_PAD_ENET0_RXD3__ENET1_RXD1 | MXS_PAD_FEC, 76 + MX28_PAD_ENET0_COL__ENET1_TX_EN | MXS_PAD_FEC, 77 + MX28_PAD_ENET0_TXD2__ENET1_TXD0 | MXS_PAD_FEC, 78 + MX28_PAD_ENET0_TXD3__ENET1_TXD1 | MXS_PAD_FEC, 79 + 80 + /* flexcan0 */ 81 + MX28_PAD_GPMI_RDY2__CAN0_TX, 82 + MX28_PAD_GPMI_RDY3__CAN0_RX, 83 + 84 + /* flexcan1 */ 85 + MX28_PAD_GPMI_CE2N__CAN1_TX, 86 + MX28_PAD_GPMI_CE3N__CAN1_RX, 87 + 88 + /* I2C */ 89 + MX28_PAD_I2C0_SCL__I2C0_SCL, 90 + MX28_PAD_I2C0_SDA__I2C0_SDA, 91 + 92 + /* mxsfb (lcdif) */ 93 + MX28_PAD_LCD_D00__LCD_D0 | MXS_PAD_CTRL, 94 + MX28_PAD_LCD_D01__LCD_D1 | MXS_PAD_CTRL, 95 + MX28_PAD_LCD_D02__LCD_D2 | MXS_PAD_CTRL, 96 + MX28_PAD_LCD_D03__LCD_D3 | MXS_PAD_CTRL, 97 + MX28_PAD_LCD_D04__LCD_D4 | MXS_PAD_CTRL, 98 + MX28_PAD_LCD_D05__LCD_D5 | MXS_PAD_CTRL, 99 + MX28_PAD_LCD_D06__LCD_D6 | MXS_PAD_CTRL, 100 + MX28_PAD_LCD_D07__LCD_D7 | MXS_PAD_CTRL, 101 + MX28_PAD_LCD_D08__LCD_D8 | MXS_PAD_CTRL, 102 + MX28_PAD_LCD_D09__LCD_D9 | MXS_PAD_CTRL, 103 + MX28_PAD_LCD_D10__LCD_D10 | MXS_PAD_CTRL, 104 + MX28_PAD_LCD_D11__LCD_D11 | MXS_PAD_CTRL, 105 + MX28_PAD_LCD_D12__LCD_D12 | MXS_PAD_CTRL, 106 + MX28_PAD_LCD_D13__LCD_D13 | MXS_PAD_CTRL, 107 + MX28_PAD_LCD_D14__LCD_D14 | MXS_PAD_CTRL, 108 + MX28_PAD_LCD_D15__LCD_D15 | MXS_PAD_CTRL, 109 + MX28_PAD_LCD_D16__LCD_D16 | MXS_PAD_CTRL, 110 + MX28_PAD_LCD_D17__LCD_D17 | MXS_PAD_CTRL, 111 + MX28_PAD_LCD_D18__LCD_D18 | MXS_PAD_CTRL, 112 + MX28_PAD_LCD_D19__LCD_D19 | MXS_PAD_CTRL, 113 + MX28_PAD_LCD_D20__LCD_D20 | MXS_PAD_CTRL, 114 + MX28_PAD_LCD_D21__LCD_D21 | MXS_PAD_CTRL, 115 + MX28_PAD_LCD_D22__LCD_D22 | MXS_PAD_CTRL, 116 + MX28_PAD_LCD_D23__LCD_D23 | MXS_PAD_CTRL, 117 + 118 + MX28_PAD_LCD_ENABLE__LCD_ENABLE | MXS_PAD_CTRL, 119 + MX28_PAD_LCD_DOTCLK__LCD_DOTCLK | MXS_PAD_CTRL, 120 + 121 + /* mmc0 */ 122 + MX28_PAD_SSP0_DATA0__SSP0_D0 | 123 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 124 + MX28_PAD_SSP0_DATA1__SSP0_D1 | 125 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 126 + MX28_PAD_SSP0_DATA2__SSP0_D2 | 127 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 128 + MX28_PAD_SSP0_DATA3__SSP0_D3 | 129 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 130 + MX28_PAD_SSP0_DATA4__SSP0_D4 | 131 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 132 + MX28_PAD_SSP0_DATA5__SSP0_D5 | 133 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 134 + MX28_PAD_SSP0_DATA6__SSP0_D6 | 135 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 136 + MX28_PAD_SSP0_DATA7__SSP0_D7 | 137 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 138 + MX28_PAD_SSP0_CMD__SSP0_CMD | 139 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 140 + MX28_PAD_SSP0_DETECT__SSP0_CARD_DETECT | 141 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_NOPULL), 142 + MX28_PAD_SSP0_SCK__SSP0_SCK | 143 + (MXS_PAD_12MA | MXS_PAD_3V3 | MXS_PAD_NOPULL), 144 + 145 + /* mmc1 */ 146 + MX28_PAD_GPMI_D00__SSP1_D0 | 147 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 148 + MX28_PAD_GPMI_D01__SSP1_D1 | 149 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 150 + MX28_PAD_GPMI_D02__SSP1_D2 | 151 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 152 + MX28_PAD_GPMI_D03__SSP1_D3 | 153 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 154 + MX28_PAD_GPMI_D04__SSP1_D4 | 155 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 156 + MX28_PAD_GPMI_D05__SSP1_D5 | 157 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 158 + MX28_PAD_GPMI_D06__SSP1_D6 | 159 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 160 + MX28_PAD_GPMI_D07__SSP1_D7 | 161 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 162 + MX28_PAD_GPMI_RDY1__SSP1_CMD | 163 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), 164 + MX28_PAD_GPMI_RDY0__SSP1_CARD_DETECT | 165 + (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_NOPULL), 166 + MX28_PAD_GPMI_WRN__SSP1_SCK | 167 + (MXS_PAD_12MA | MXS_PAD_3V3 | MXS_PAD_NOPULL), 168 + /* write protect */ 169 + MX28_PAD_GPMI_RESETN__GPIO_0_28 | 170 + (MXS_PAD_4MA | MXS_PAD_3V3 | MXS_PAD_NOPULL), 171 + /* slot power enable */ 172 + MX28_PAD_PWM4__GPIO_3_29 | 173 + (MXS_PAD_4MA | MXS_PAD_3V3 | MXS_PAD_NOPULL), 174 + 175 + /* led */ 176 + MX28_PAD_PWM0__GPIO_3_16 | MXS_PAD_CTRL, 177 + MX28_PAD_PWM1__GPIO_3_17 | MXS_PAD_CTRL, 178 + 179 + /* nand */ 180 + MX28_PAD_GPMI_D00__GPMI_D0 | 181 + (MXS_PAD_4MA | MXS_PAD_1V8 | MXS_PAD_NOPULL), 182 + MX28_PAD_GPMI_D01__GPMI_D1 | 183 + (MXS_PAD_4MA | MXS_PAD_1V8 | MXS_PAD_NOPULL), 184 + MX28_PAD_GPMI_D02__GPMI_D2 | 185 + (MXS_PAD_4MA | MXS_PAD_1V8 | MXS_PAD_NOPULL), 186 + MX28_PAD_GPMI_D03__GPMI_D3 | 187 + (MXS_PAD_4MA | MXS_PAD_1V8 | MXS_PAD_NOPULL), 188 + MX28_PAD_GPMI_D04__GPMI_D4 | 189 + (MXS_PAD_4MA | MXS_PAD_1V8 | MXS_PAD_NOPULL), 190 + MX28_PAD_GPMI_D05__GPMI_D5 | 191 + (MXS_PAD_4MA | MXS_PAD_1V8 | MXS_PAD_NOPULL), 192 + MX28_PAD_GPMI_D06__GPMI_D6 | 193 + (MXS_PAD_4MA | MXS_PAD_1V8 | MXS_PAD_NOPULL), 194 + MX28_PAD_GPMI_D07__GPMI_D7 | 195 + (MXS_PAD_4MA | MXS_PAD_1V8 | MXS_PAD_NOPULL), 196 + MX28_PAD_GPMI_CE0N__GPMI_CE0N | 197 + (MXS_PAD_4MA | MXS_PAD_1V8 | MXS_PAD_NOPULL), 198 + MX28_PAD_GPMI_RDY0__GPMI_READY0 | 199 + (MXS_PAD_4MA | MXS_PAD_1V8 | MXS_PAD_NOPULL), 200 + MX28_PAD_GPMI_RDN__GPMI_RDN | 201 + (MXS_PAD_12MA | MXS_PAD_1V8 | MXS_PAD_PULLUP), 202 + MX28_PAD_GPMI_WRN__GPMI_WRN | 203 + (MXS_PAD_12MA | MXS_PAD_1V8 | MXS_PAD_PULLUP), 204 + MX28_PAD_GPMI_ALE__GPMI_ALE | 205 + (MXS_PAD_4MA | MXS_PAD_1V8 | MXS_PAD_PULLUP), 206 + MX28_PAD_GPMI_CLE__GPMI_CLE | 207 + (MXS_PAD_4MA | MXS_PAD_1V8 | MXS_PAD_PULLUP), 208 + MX28_PAD_GPMI_RESETN__GPMI_RESETN | 209 + (MXS_PAD_12MA | MXS_PAD_1V8 | MXS_PAD_PULLUP), 210 + 211 + /* Backlight */ 212 + MX28_PAD_PWM3__GPIO_3_28 | MXS_PAD_CTRL, 213 + }; 214 + 215 + /* led */ 216 + static const struct gpio_led m28evk_leds[] __initconst = { 217 + { 218 + .name = "user-led1", 219 + .default_trigger = "heartbeat", 220 + .gpio = M28EVK_GPIO_USERLED1, 221 + }, 222 + { 223 + .name = "user-led2", 224 + .default_trigger = "heartbeat", 225 + .gpio = M28EVK_GPIO_USERLED2, 226 + }, 227 + }; 228 + 229 + static const struct gpio_led_platform_data m28evk_led_data __initconst = { 230 + .leds = m28evk_leds, 231 + .num_leds = ARRAY_SIZE(m28evk_leds), 232 + }; 233 + 234 + static struct fec_platform_data mx28_fec_pdata[] __initdata = { 235 + { 236 + /* fec0 */ 237 + .phy = PHY_INTERFACE_MODE_RMII, 238 + }, { 239 + /* fec1 */ 240 + .phy = PHY_INTERFACE_MODE_RMII, 241 + }, 242 + }; 243 + 244 + static int __init m28evk_fec_get_mac(void) 245 + { 246 + int i; 247 + u32 val; 248 + const u32 *ocotp = mxs_get_ocotp(); 249 + 250 + if (!ocotp) { 251 + pr_err("%s: timeout when reading fec mac from OCOTP\n", 252 + __func__); 253 + return -ETIMEDOUT; 254 + } 255 + 256 + /* 257 + * OCOTP only stores the last 4 octets for each mac address, 258 + * so hard-code DENX OUI (C0:E5:4E) here. 259 + */ 260 + for (i = 0; i < 2; i++) { 261 + val = ocotp[i * 4]; 262 + mx28_fec_pdata[i].mac[0] = 0xC0; 263 + mx28_fec_pdata[i].mac[1] = 0xE5; 264 + mx28_fec_pdata[i].mac[2] = 0x4E; 265 + mx28_fec_pdata[i].mac[3] = (val >> 16) & 0xff; 266 + mx28_fec_pdata[i].mac[4] = (val >> 8) & 0xff; 267 + mx28_fec_pdata[i].mac[5] = (val >> 0) & 0xff; 268 + } 269 + 270 + return 0; 271 + } 272 + 273 + /* mxsfb (lcdif) */ 274 + static struct fb_videomode m28evk_video_modes[] = { 275 + { 276 + .name = "Ampire AM-800480R2TMQW-T01H", 277 + .refresh = 60, 278 + .xres = 800, 279 + .yres = 480, 280 + .pixclock = 30066, /* picosecond (33.26 MHz) */ 281 + .left_margin = 0, 282 + .right_margin = 256, 283 + .upper_margin = 0, 284 + .lower_margin = 45, 285 + .hsync_len = 1, 286 + .vsync_len = 1, 287 + .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT, 288 + }, 289 + }; 290 + 291 + static const struct mxsfb_platform_data m28evk_mxsfb_pdata __initconst = { 292 + .mode_list = m28evk_video_modes, 293 + .mode_count = ARRAY_SIZE(m28evk_video_modes), 294 + .default_bpp = 16, 295 + .ld_intf_width = STMLCDIF_18BIT, 296 + }; 297 + 298 + static struct at24_platform_data m28evk_eeprom = { 299 + .byte_len = 16384, 300 + .page_size = 32, 301 + .flags = AT24_FLAG_ADDR16, 302 + }; 303 + 304 + static struct i2c_board_info m28_stk5v3_i2c_boardinfo[] __initdata = { 305 + { 306 + I2C_BOARD_INFO("at24", 0x51), /* E0=1, E1=0, E2=0 */ 307 + .platform_data = &m28evk_eeprom, 308 + }, 309 + }; 310 + 311 + static struct mxs_mmc_platform_data m28evk_mmc_pdata[] __initdata = { 312 + { 313 + /* mmc0 */ 314 + .wp_gpio = MX28EVK_MMC0_WRITE_PROTECT, 315 + .flags = SLOTF_8_BIT_CAPABLE, 316 + }, { 317 + /* mmc1 */ 318 + .wp_gpio = MX28EVK_MMC1_WRITE_PROTECT, 319 + .flags = SLOTF_8_BIT_CAPABLE, 320 + }, 321 + }; 322 + 323 + static void __init m28evk_init(void) 324 + { 325 + mxs_iomux_setup_multiple_pads(m28evk_pads, ARRAY_SIZE(m28evk_pads)); 326 + 327 + mx28_add_duart(); 328 + mx28_add_auart0(); 329 + mx28_add_auart3(); 330 + 331 + if (!m28evk_fec_get_mac()) { 332 + mx28_add_fec(0, &mx28_fec_pdata[0]); 333 + mx28_add_fec(1, &mx28_fec_pdata[1]); 334 + } 335 + 336 + mx28_add_flexcan(0, NULL); 337 + mx28_add_flexcan(1, NULL); 338 + 339 + mx28_add_mxsfb(&m28evk_mxsfb_pdata); 340 + 341 + mx28_add_mxs_mmc(0, &m28evk_mmc_pdata[0]); 342 + mx28_add_mxs_mmc(1, &m28evk_mmc_pdata[1]); 343 + 344 + gpio_led_register_device(0, &m28evk_led_data); 345 + 346 + /* I2C */ 347 + mx28_add_mxs_i2c(0); 348 + i2c_register_board_info(0, m28_stk5v3_i2c_boardinfo, 349 + ARRAY_SIZE(m28_stk5v3_i2c_boardinfo)); 350 + } 351 + 352 + static void __init m28evk_timer_init(void) 353 + { 354 + mx28_clocks_init(); 355 + } 356 + 357 + static struct sys_timer m28evk_timer = { 358 + .init = m28evk_timer_init, 359 + }; 360 + 361 + MACHINE_START(M28EVK, "DENX M28 EVK") 362 + .map_io = mx28_map_io, 363 + .init_irq = mx28_init_irq, 364 + .init_machine = m28evk_init, 365 + .timer = &m28evk_timer, 366 + MACHINE_END
+1 -1
arch/arm/mach-s3c2443/clock.c
··· 128 128 unsigned long clkcon0; 129 129 130 130 clkcon0 = __raw_readl(S3C2443_CLKDIV0); 131 - clkcon0 &= S3C2443_CLKDIV0_ARMDIV_MASK; 131 + clkcon0 &= ~S3C2443_CLKDIV0_ARMDIV_MASK; 132 132 clkcon0 |= val << S3C2443_CLKDIV0_ARMDIV_SHIFT; 133 133 __raw_writel(clkcon0, S3C2443_CLKDIV0); 134 134 }
-39
arch/arm/mach-s3c64xx/mach-smdk6410.c
··· 262 262 .cols = 8, 263 263 }; 264 264 265 - static int smdk6410_backlight_init(struct device *dev) 266 - { 267 - int ret; 268 - 269 - ret = gpio_request(S3C64XX_GPF(15), "Backlight"); 270 - if (ret) { 271 - printk(KERN_ERR "failed to request GPF for PWM-OUT1\n"); 272 - return ret; 273 - } 274 - 275 - /* Configure GPIO pin with S3C64XX_GPF15_PWM_TOUT1 */ 276 - s3c_gpio_cfgpin(S3C64XX_GPF(15), S3C_GPIO_SFN(2)); 277 - 278 - return 0; 279 - } 280 - 281 - static void smdk6410_backlight_exit(struct device *dev) 282 - { 283 - s3c_gpio_cfgpin(S3C64XX_GPF(15), S3C_GPIO_OUTPUT); 284 - gpio_free(S3C64XX_GPF(15)); 285 - } 286 - 287 - static struct platform_pwm_backlight_data smdk6410_backlight_data = { 288 - .pwm_id = 1, 289 - .max_brightness = 255, 290 - .dft_brightness = 255, 291 - .pwm_period_ns = 78770, 292 - .init = smdk6410_backlight_init, 293 - .exit = smdk6410_backlight_exit, 294 - }; 295 - 296 - static struct platform_device smdk6410_backlight_device = { 297 - .name = "pwm-backlight", 298 - .dev = { 299 - .parent = &s3c_device_timer[1].dev, 300 - .platform_data = &smdk6410_backlight_data, 301 - }, 302 - }; 303 - 304 265 static struct map_desc smdk6410_iodesc[] = {}; 305 266 306 267 static struct platform_device *smdk6410_devices[] __initdata = {
+2 -4
arch/arm/mach-s5pv210/clock.c
··· 815 815 .reg_div = { .reg = S5P_CLK_DIV3, .shift = 20, .size = 4 }, 816 816 }, { 817 817 .clk = { 818 - .name = "sclk_cam", 819 - .devname = "s5pv210-fimc.0", 818 + .name = "sclk_cam0", 820 819 .enable = s5pv210_clk_mask0_ctrl, 821 820 .ctrlbit = (1 << 3), 822 821 }, ··· 824 825 .reg_div = { .reg = S5P_CLK_DIV1, .shift = 12, .size = 4 }, 825 826 }, { 826 827 .clk = { 827 - .name = "sclk_cam", 828 - .devname = "s5pv210-fimc.1", 828 + .name = "sclk_cam1", 829 829 .enable = s5pv210_clk_mask0_ctrl, 830 830 .ctrlbit = (1 << 4), 831 831 },
+20
arch/arm/mm/cache-v7.S
··· 174 174 dcache_line_size r2, r3 175 175 sub r3, r2, #1 176 176 bic r12, r0, r3 177 + #ifdef CONFIG_ARM_ERRATA_764369 178 + ALT_SMP(W(dsb)) 179 + ALT_UP(W(nop)) 180 + #endif 177 181 1: 178 182 USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to the point of unification 179 183 add r12, r12, r2 ··· 227 223 add r1, r0, r1 228 224 sub r3, r2, #1 229 225 bic r0, r0, r3 226 + #ifdef CONFIG_ARM_ERRATA_764369 227 + ALT_SMP(W(dsb)) 228 + ALT_UP(W(nop)) 229 + #endif 230 230 1: 231 231 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line 232 232 add r0, r0, r2 ··· 255 247 sub r3, r2, #1 256 248 tst r0, r3 257 249 bic r0, r0, r3 250 + #ifdef CONFIG_ARM_ERRATA_764369 251 + ALT_SMP(W(dsb)) 252 + ALT_UP(W(nop)) 253 + #endif 258 254 mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line 259 255 260 256 tst r1, r3 ··· 282 270 dcache_line_size r2, r3 283 271 sub r3, r2, #1 284 272 bic r0, r0, r3 273 + #ifdef CONFIG_ARM_ERRATA_764369 274 + ALT_SMP(W(dsb)) 275 + ALT_UP(W(nop)) 276 + #endif 285 277 1: 286 278 mcr p15, 0, r0, c7, c10, 1 @ clean D / U line 287 279 add r0, r0, r2 ··· 304 288 dcache_line_size r2, r3 305 289 sub r3, r2, #1 306 290 bic r0, r0, r3 291 + #ifdef CONFIG_ARM_ERRATA_764369 292 + ALT_SMP(W(dsb)) 293 + ALT_UP(W(nop)) 294 + #endif 307 295 1: 308 296 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line 309 297 add r0, r0, r2
+2
arch/arm/mm/dma-mapping.c
··· 324 324 325 325 if (addr) 326 326 *handle = pfn_to_dma(dev, page_to_pfn(page)); 327 + else 328 + __dma_free_buffer(page, size); 327 329 328 330 return addr; 329 331 }
+4
arch/arm/plat-mxc/devices/Kconfig
··· 79 79 80 80 config IMX_HAVE_PLATFORM_SPI_IMX 81 81 bool 82 + 83 + config IMX_HAVE_PLATFORM_AHCI 84 + bool 85 + default y if ARCH_MX53
+1
arch/arm/plat-mxc/devices/Makefile
··· 26 26 obj-$(CONFIG_IMX_HAVE_PLATFORM_MXC_W1) += platform-mxc_w1.o 27 27 obj-$(CONFIG_IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX) += platform-sdhci-esdhc-imx.o 28 28 obj-$(CONFIG_IMX_HAVE_PLATFORM_SPI_IMX) += platform-spi_imx.o 29 + obj-$(CONFIG_IMX_HAVE_PLATFORM_AHCI) += platform-ahci-imx.o
+156
arch/arm/plat-mxc/devices/platform-ahci-imx.c
··· 1 + /* 2 + * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved. 3 + */ 4 + 5 + /* 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License as published by 8 + * the Free Software Foundation; either version 2 of the License, or 9 + * (at your option) any later version. 10 + 11 + * This program is distributed in the hope that it will be useful, 12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 + * GNU General Public License for more details. 15 + 16 + * You should have received a copy of the GNU General Public License along 17 + * with this program; if not, write to the Free Software Foundation, Inc., 18 + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 19 + */ 20 + 21 + #include <linux/io.h> 22 + #include <linux/clk.h> 23 + #include <linux/err.h> 24 + #include <linux/device.h> 25 + #include <linux/dma-mapping.h> 26 + #include <asm/sizes.h> 27 + #include <mach/hardware.h> 28 + #include <mach/devices-common.h> 29 + 30 + #define imx_ahci_imx_data_entry_single(soc, _devid) \ 31 + { \ 32 + .devid = _devid, \ 33 + .iobase = soc ## _SATA_BASE_ADDR, \ 34 + .irq = soc ## _INT_SATA, \ 35 + } 36 + 37 + #ifdef CONFIG_SOC_IMX53 38 + const struct imx_ahci_imx_data imx53_ahci_imx_data __initconst = 39 + imx_ahci_imx_data_entry_single(MX53, "imx53-ahci"); 40 + #endif 41 + 42 + enum { 43 + HOST_CAP = 0x00, 44 + HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */ 45 + HOST_PORTS_IMPL = 0x0c, 46 + HOST_TIMER1MS = 0xe0, /* Timer 1-ms */ 47 + }; 48 + 49 + static struct clk *sata_clk, *sata_ref_clk; 50 + 51 + /* AHCI module Initialization, if return 0, initialization is successful. */ 52 + static int imx_sata_init(struct device *dev, void __iomem *addr) 53 + { 54 + u32 tmpdata; 55 + int ret = 0; 56 + struct clk *clk; 57 + 58 + sata_clk = clk_get(dev, "ahci"); 59 + if (IS_ERR(sata_clk)) { 60 + dev_err(dev, "no sata clock.\n"); 61 + return PTR_ERR(sata_clk); 62 + } 63 + ret = clk_enable(sata_clk); 64 + if (ret) { 65 + dev_err(dev, "can't enable sata clock.\n"); 66 + goto put_sata_clk; 67 + } 68 + 69 + /* Get the AHCI SATA PHY CLK */ 70 + sata_ref_clk = clk_get(dev, "ahci_phy"); 71 + if (IS_ERR(sata_ref_clk)) { 72 + dev_err(dev, "no sata ref clock.\n"); 73 + ret = PTR_ERR(sata_ref_clk); 74 + goto release_sata_clk; 75 + } 76 + ret = clk_enable(sata_ref_clk); 77 + if (ret) { 78 + dev_err(dev, "can't enable sata ref clock.\n"); 79 + goto put_sata_ref_clk; 80 + } 81 + 82 + /* Get the AHB clock rate, and configure the TIMER1MS reg later */ 83 + clk = clk_get(dev, "ahci_dma"); 84 + if (IS_ERR(clk)) { 85 + dev_err(dev, "no dma clock.\n"); 86 + ret = PTR_ERR(clk); 87 + goto release_sata_ref_clk; 88 + } 89 + tmpdata = clk_get_rate(clk) / 1000; 90 + clk_put(clk); 91 + 92 + writel(tmpdata, addr + HOST_TIMER1MS); 93 + 94 + tmpdata = readl(addr + HOST_CAP); 95 + if (!(tmpdata & HOST_CAP_SSS)) { 96 + tmpdata |= HOST_CAP_SSS; 97 + writel(tmpdata, addr + HOST_CAP); 98 + } 99 + 100 + if (!(readl(addr + HOST_PORTS_IMPL) & 0x1)) 101 + writel((readl(addr + HOST_PORTS_IMPL) | 0x1), 102 + addr + HOST_PORTS_IMPL); 103 + 104 + return 0; 105 + 106 + release_sata_ref_clk: 107 + clk_disable(sata_ref_clk); 108 + put_sata_ref_clk: 109 + clk_put(sata_ref_clk); 110 + release_sata_clk: 111 + clk_disable(sata_clk); 112 + put_sata_clk: 113 + clk_put(sata_clk); 114 + 115 + return ret; 116 + } 117 + 118 + static void imx_sata_exit(struct device *dev) 119 + { 120 + clk_disable(sata_ref_clk); 121 + clk_put(sata_ref_clk); 122 + 123 + clk_disable(sata_clk); 124 + clk_put(sata_clk); 125 + 126 + } 127 + struct platform_device *__init imx_add_ahci_imx( 128 + const struct imx_ahci_imx_data *data, 129 + const struct ahci_platform_data *pdata) 130 + { 131 + struct resource res[] = { 132 + { 133 + .start = data->iobase, 134 + .end = data->iobase + SZ_4K - 1, 135 + .flags = IORESOURCE_MEM, 136 + }, { 137 + .start = data->irq, 138 + .end = data->irq, 139 + .flags = IORESOURCE_IRQ, 140 + }, 141 + }; 142 + 143 + return imx_add_platform_device_dmamask(data->devid, 0, 144 + res, ARRAY_SIZE(res), 145 + pdata, sizeof(*pdata), DMA_BIT_MASK(32)); 146 + } 147 + 148 + struct platform_device *__init imx53_add_ahci_imx(void) 149 + { 150 + struct ahci_platform_data pdata = { 151 + .init = imx_sata_init, 152 + .exit = imx_sata_exit, 153 + }; 154 + 155 + return imx_add_ahci_imx(&imx53_ahci_imx_data, &pdata); 156 + }
+10
arch/arm/plat-mxc/include/mach/devices-common.h
··· 309 309 struct platform_device *imx_add_imx_dma(void); 310 310 struct platform_device *imx_add_imx_sdma(char *name, 311 311 resource_size_t iobase, int irq, struct sdma_platform_data *pdata); 312 + 313 + #include <linux/ahci_platform.h> 314 + struct imx_ahci_imx_data { 315 + const char *devid; 316 + resource_size_t iobase; 317 + resource_size_t irq; 318 + }; 319 + struct platform_device *__init imx_add_ahci_imx( 320 + const struct imx_ahci_imx_data *data, 321 + const struct ahci_platform_data *pdata);
+5 -4
arch/arm/plat-s5p/irq-gpioint.c
··· 114 114 { 115 115 static int used_gpioint_groups = 0; 116 116 int group = chip->group; 117 - struct s5p_gpioint_bank *bank = NULL; 117 + struct s5p_gpioint_bank *b, *bank = NULL; 118 118 struct irq_chip_generic *gc; 119 119 struct irq_chip_type *ct; 120 120 121 121 if (used_gpioint_groups >= S5P_GPIOINT_GROUP_COUNT) 122 122 return -ENOMEM; 123 123 124 - list_for_each_entry(bank, &banks, list) { 125 - if (group >= bank->start && 126 - group < bank->start + bank->nr_groups) 124 + list_for_each_entry(b, &banks, list) { 125 + if (group >= b->start && group < b->start + b->nr_groups) { 126 + bank = b; 127 127 break; 128 + } 128 129 } 129 130 if (!bank) 130 131 return -EINVAL;
+11
arch/arm/plat-samsung/clock.c
··· 64 64 */ 65 65 DEFINE_SPINLOCK(clocks_lock); 66 66 67 + /* Global watchdog clock used by arch_wtd_reset() callback */ 68 + struct clk *s3c2410_wdtclk; 69 + static int __init s3c_wdt_reset_init(void) 70 + { 71 + s3c2410_wdtclk = clk_get(NULL, "watchdog"); 72 + if (IS_ERR(s3c2410_wdtclk)) 73 + printk(KERN_WARNING "%s: warning: cannot get watchdog clock\n", __func__); 74 + return 0; 75 + } 76 + arch_initcall(s3c_wdt_reset_init); 77 + 67 78 /* enable and disable calls for use with the clk struct */ 68 79 69 80 static int clk_null_enable(struct clk *clk, int enable)
+8
arch/arm/plat-samsung/include/plat/clock.h
··· 9 9 * published by the Free Software Foundation. 10 10 */ 11 11 12 + #ifndef __ASM_PLAT_CLOCK_H 13 + #define __ASM_PLAT_CLOCK_H __FILE__ 14 + 12 15 #include <linux/spinlock.h> 13 16 #include <linux/clkdev.h> 14 17 ··· 124 121 125 122 extern void s3c_pwmclk_init(void); 126 123 124 + /* Global watchdog clock used by arch_wtd_reset() callback */ 125 + 126 + extern struct clk *s3c2410_wdtclk; 127 + 128 + #endif /* __ASM_PLAT_CLOCK_H */
+3 -7
arch/arm/plat-samsung/include/plat/watchdog-reset.h
··· 10 10 * published by the Free Software Foundation. 11 11 */ 12 12 13 + #include <plat/clock.h> 13 14 #include <plat/regs-watchdog.h> 14 15 #include <mach/map.h> 15 16 ··· 20 19 21 20 static inline void arch_wdt_reset(void) 22 21 { 23 - struct clk *wdtclk; 24 - 25 22 printk("arch_reset: attempting watchdog reset\n"); 26 23 27 24 __raw_writel(0, S3C2410_WTCON); /* disable watchdog, to be safe */ 28 25 29 - wdtclk = clk_get(NULL, "watchdog"); 30 - if (!IS_ERR(wdtclk)) { 31 - clk_enable(wdtclk); 32 - } else 33 - printk(KERN_WARNING "%s: warning: cannot get watchdog clock\n", __func__); 26 + if (s3c2410_wdtclk) 27 + clk_enable(s3c2410_wdtclk); 34 28 35 29 /* put initial values into count and data */ 36 30 __raw_writel(0x80, S3C2410_WTCNT);
+14
arch/powerpc/platforms/powermac/pci.c
··· 561 561 .write = u4_pcie_write_config, 562 562 }; 563 563 564 + static void __devinit pmac_pci_fixup_u4_of_node(struct pci_dev *dev) 565 + { 566 + /* Apple's device-tree "hides" the root complex virtual P2P bridge 567 + * on U4. However, Linux sees it, causing the PCI <-> OF matching 568 + * code to fail to properly match devices below it. This works around 569 + * it by setting the node of the bridge to point to the PHB node, 570 + * which is not entirely correct but fixes the matching code and 571 + * doesn't break anything else. It's also the simplest possible fix. 572 + */ 573 + if (dev->dev.of_node == NULL) 574 + dev->dev.of_node = pcibios_get_phb_of_node(dev->bus); 575 + } 576 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_APPLE, 0x5b, pmac_pci_fixup_u4_of_node); 577 + 564 578 #endif /* CONFIG_PPC64 */ 565 579 566 580 #ifdef CONFIG_PPC32
+2 -1
arch/s390/include/asm/elf.h
··· 188 188 #define SET_PERSONALITY(ex) \ 189 189 do { \ 190 190 if (personality(current->personality) != PER_LINUX32) \ 191 - set_personality(PER_LINUX); \ 191 + set_personality(PER_LINUX | \ 192 + (current->personality & ~PER_MASK)); \ 192 193 if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ 193 194 set_thread_flag(TIF_31BIT); \ 194 195 else \
+2
arch/s390/include/asm/pgtable.h
··· 658 658 * struct gmap_struct - guest address space 659 659 * @mm: pointer to the parent mm_struct 660 660 * @table: pointer to the page directory 661 + * @asce: address space control element for gmap page table 661 662 * @crst_list: list of all crst tables used in the guest address space 662 663 */ 663 664 struct gmap { 664 665 struct list_head list; 665 666 struct mm_struct *mm; 666 667 unsigned long *table; 668 + unsigned long asce; 667 669 struct list_head crst_list; 668 670 }; 669 671
+3
arch/s390/kernel/asm-offsets.c
··· 10 10 #include <linux/sched.h> 11 11 #include <asm/vdso.h> 12 12 #include <asm/sigp.h> 13 + #include <asm/pgtable.h> 13 14 14 15 /* 15 16 * Make sure that the compiler is new enough. We want a compiler that ··· 127 126 DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack)); 128 127 DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack)); 129 128 DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack)); 129 + DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce)); 130 130 DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); 131 131 DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); 132 132 DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); ··· 153 151 DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); 154 152 DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); 155 153 DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp)); 154 + DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce)); 156 155 #endif /* CONFIG_32BIT */ 157 156 return 0; 158 157 }
+6
arch/s390/kernel/entry64.S
··· 1076 1076 lg %r14,__LC_THREAD_INFO # pointer thread_info struct 1077 1077 tm __TI_flags+7(%r14),_TIF_EXIT_SIE 1078 1078 jnz sie_exit 1079 + lg %r14,__LC_GMAP # get gmap pointer 1080 + ltgr %r14,%r14 1081 + jz sie_gmap 1082 + lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce 1083 + sie_gmap: 1079 1084 lg %r14,__SF_EMPTY(%r15) # get control block pointer 1080 1085 SPP __SF_EMPTY(%r15) # set guest id 1081 1086 sie 0(%r14) ··· 1088 1083 SPP __LC_CMF_HPP # set host id 1089 1084 lg %r14,__LC_THREAD_INFO # pointer thread_info struct 1090 1085 sie_exit: 1086 + lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 1091 1087 ni __TI_flags+6(%r14),255-(_TIF_SIE>>8) 1092 1088 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area 1093 1089 stmg %r0,%r13,0(%r14) # save guest gprs 0-13
+3 -2
arch/s390/kvm/kvm-s390.c
··· 123 123 124 124 switch (ext) { 125 125 case KVM_CAP_S390_PSW: 126 + case KVM_CAP_S390_GMAP: 126 127 r = 1; 127 128 break; 128 129 default: ··· 264 263 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK; 265 264 restore_fp_regs(&vcpu->arch.guest_fpregs); 266 265 restore_access_regs(vcpu->arch.guest_acrs); 266 + gmap_enable(vcpu->arch.gmap); 267 267 } 268 268 269 269 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 270 270 { 271 + gmap_disable(vcpu->arch.gmap); 271 272 save_fp_regs(&vcpu->arch.guest_fpregs); 272 273 save_access_regs(vcpu->arch.guest_acrs); 273 274 restore_fp_regs(&vcpu->arch.host_fpregs); ··· 464 461 local_irq_disable(); 465 462 kvm_guest_enter(); 466 463 local_irq_enable(); 467 - gmap_enable(vcpu->arch.gmap); 468 464 VCPU_EVENT(vcpu, 6, "entering sie flags %x", 469 465 atomic_read(&vcpu->arch.sie_block->cpuflags)); 470 466 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) { ··· 472 470 } 473 471 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", 474 472 vcpu->arch.sie_block->icptcode); 475 - gmap_disable(vcpu->arch.gmap); 476 473 local_irq_disable(); 477 474 kvm_guest_exit(); 478 475 local_irq_enable();
+6 -11
arch/s390/mm/pgtable.c
··· 160 160 table = (unsigned long *) page_to_phys(page); 161 161 crst_table_init(table, _REGION1_ENTRY_EMPTY); 162 162 gmap->table = table; 163 + gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH | 164 + _ASCE_USER_BITS | __pa(table); 163 165 list_add(&gmap->list, &mm->context.gmap_list); 164 166 return gmap; 165 167 ··· 242 240 */ 243 241 void gmap_enable(struct gmap *gmap) 244 242 { 245 - /* Load primary space page table origin. */ 246 - S390_lowcore.user_asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH | 247 - _ASCE_USER_BITS | __pa(gmap->table); 248 - asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) ); 249 243 S390_lowcore.gmap = (unsigned long) gmap; 250 244 } 251 245 EXPORT_SYMBOL_GPL(gmap_enable); ··· 252 254 */ 253 255 void gmap_disable(struct gmap *gmap) 254 256 { 255 - /* Load primary space page table origin. */ 256 - S390_lowcore.user_asce = 257 - gmap->mm->context.asce_bits | __pa(gmap->mm->pgd); 258 - asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) ); 259 257 S390_lowcore.gmap = 0UL; 260 258 } 261 259 EXPORT_SYMBOL_GPL(gmap_disable); ··· 303 309 /* Walk the guest addr space page table */ 304 310 table = gmap->table + (((to + off) >> 53) & 0x7ff); 305 311 if (*table & _REGION_ENTRY_INV) 306 - return 0; 312 + goto out; 307 313 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 308 314 table = table + (((to + off) >> 42) & 0x7ff); 309 315 if (*table & _REGION_ENTRY_INV) 310 - return 0; 316 + goto out; 311 317 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 312 318 table = table + (((to + off) >> 31) & 0x7ff); 313 319 if (*table & _REGION_ENTRY_INV) 314 - return 0; 320 + goto out; 315 321 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 316 322 table = table + (((to + off) >> 20) & 0x7ff); 317 323 ··· 319 325 flush |= gmap_unlink_segment(gmap, table); 320 326 *table = _SEGMENT_ENTRY_INV; 321 327 } 328 + out: 322 329 up_read(&gmap->mm->mmap_sem); 323 330 if (flush) 324 331 gmap_flush_tlb(gmap);
+2
arch/sparc/include/asm/spitfire.h
··· 43 43 #define SUN4V_CHIP_NIAGARA1 0x01 44 44 #define SUN4V_CHIP_NIAGARA2 0x02 45 45 #define SUN4V_CHIP_NIAGARA3 0x03 46 + #define SUN4V_CHIP_NIAGARA4 0x04 47 + #define SUN4V_CHIP_NIAGARA5 0x05 46 48 #define SUN4V_CHIP_UNKNOWN 0xff 47 49 48 50 #ifndef __ASSEMBLY__
+3 -1
arch/sparc/include/asm/xor_64.h
··· 66 66 ((tlb_type == hypervisor && \ 67 67 (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 || \ 68 68 sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || \ 69 - sun4v_chip_type == SUN4V_CHIP_NIAGARA3)) ? \ 69 + sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || \ 70 + sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || \ 71 + sun4v_chip_type == SUN4V_CHIP_NIAGARA5)) ? \ 70 72 &xor_block_niagara : \ 71 73 &xor_block_VIS)
+12
arch/sparc/kernel/cpu.c
··· 481 481 sparc_pmu_type = "niagara3"; 482 482 break; 483 483 484 + case SUN4V_CHIP_NIAGARA4: 485 + sparc_cpu_type = "UltraSparc T4 (Niagara4)"; 486 + sparc_fpu_type = "UltraSparc T4 integrated FPU"; 487 + sparc_pmu_type = "niagara4"; 488 + break; 489 + 490 + case SUN4V_CHIP_NIAGARA5: 491 + sparc_cpu_type = "UltraSparc T5 (Niagara5)"; 492 + sparc_fpu_type = "UltraSparc T5 integrated FPU"; 493 + sparc_pmu_type = "niagara5"; 494 + break; 495 + 484 496 default: 485 497 printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n", 486 498 prom_cpu_compatible);
+2
arch/sparc/kernel/cpumap.c
··· 325 325 case SUN4V_CHIP_NIAGARA1: 326 326 case SUN4V_CHIP_NIAGARA2: 327 327 case SUN4V_CHIP_NIAGARA3: 328 + case SUN4V_CHIP_NIAGARA4: 329 + case SUN4V_CHIP_NIAGARA5: 328 330 rover_inc_table = niagara_iterate_method; 329 331 break; 330 332 default:
+22 -3
arch/sparc/kernel/head_64.S
··· 133 133 prom_niagara_prefix: 134 134 .asciz "SUNW,UltraSPARC-T" 135 135 prom_sparc_prefix: 136 - .asciz "SPARC-T" 136 + .asciz "SPARC-" 137 137 .align 4 138 138 prom_root_compatible: 139 139 .skip 64 ··· 396 396 or %g1, %lo(prom_cpu_compatible), %g1 397 397 sethi %hi(prom_sparc_prefix), %g7 398 398 or %g7, %lo(prom_sparc_prefix), %g7 399 - mov 7, %g3 399 + mov 6, %g3 400 400 90: ldub [%g7], %g2 401 401 ldub [%g1], %g4 402 402 cmp %g2, %g4 ··· 408 408 409 409 sethi %hi(prom_cpu_compatible), %g1 410 410 or %g1, %lo(prom_cpu_compatible), %g1 411 - ldub [%g1 + 7], %g2 411 + ldub [%g1 + 6], %g2 412 + cmp %g2, 'T' 413 + be,pt %xcc, 70f 414 + cmp %g2, 'M' 415 + bne,pn %xcc, 4f 416 + nop 417 + 418 + 70: ldub [%g1 + 7], %g2 412 419 cmp %g2, '3' 413 420 be,pt %xcc, 5f 414 421 mov SUN4V_CHIP_NIAGARA3, %g4 422 + cmp %g2, '4' 423 + be,pt %xcc, 5f 424 + mov SUN4V_CHIP_NIAGARA4, %g4 425 + cmp %g2, '5' 426 + be,pt %xcc, 5f 427 + mov SUN4V_CHIP_NIAGARA5, %g4 415 428 ba,pt %xcc, 4f 416 429 nop 417 430 ··· 556 543 be,pt %xcc, niagara2_patch 557 544 nop 558 545 cmp %g1, SUN4V_CHIP_NIAGARA3 546 + be,pt %xcc, niagara2_patch 547 + nop 548 + cmp %g1, SUN4V_CHIP_NIAGARA4 549 + be,pt %xcc, niagara2_patch 550 + nop 551 + cmp %g1, SUN4V_CHIP_NIAGARA5 559 552 be,pt %xcc, niagara2_patch 560 553 nop 561 554
+1 -2
arch/sparc/kernel/process_32.c
··· 380 380 #endif 381 381 } 382 382 383 - /* Now, this task is no longer a kernel thread. */ 384 - current->thread.current_ds = USER_DS; 383 + /* This task is no longer a kernel thread. */ 385 384 if (current->thread.flags & SPARC_FLAG_KTHREAD) { 386 385 current->thread.flags &= ~SPARC_FLAG_KTHREAD; 387 386
-3
arch/sparc/kernel/process_64.c
··· 368 368 369 369 /* Clear FPU register state. */ 370 370 t->fpsaved[0] = 0; 371 - 372 - if (get_thread_current_ds() != ASI_AIUS) 373 - set_fs(USER_DS); 374 371 } 375 372 376 373 /* It's a bit more tricky when 64-bit tasks are involved... */
+1 -1
arch/sparc/kernel/setup_32.c
··· 137 137 prom_halt(); 138 138 break; 139 139 case 'p': 140 - /* Just ignore, this behavior is now the default. */ 140 + prom_early_console.flags &= ~CON_BOOT; 141 141 break; 142 142 default: 143 143 printk("Unknown boot switch (-%c)\n", c);
+13 -5
arch/sparc/kernel/setup_64.c
··· 106 106 prom_halt(); 107 107 break; 108 108 case 'p': 109 - /* Just ignore, this behavior is now the default. */ 109 + prom_early_console.flags &= ~CON_BOOT; 110 110 break; 111 111 case 'P': 112 112 /* Force UltraSPARC-III P-Cache on. */ ··· 425 425 else if (tlb_type == hypervisor) { 426 426 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 || 427 427 sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || 428 - sun4v_chip_type == SUN4V_CHIP_NIAGARA3) 428 + sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || 429 + sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || 430 + sun4v_chip_type == SUN4V_CHIP_NIAGARA5) 429 431 cap |= HWCAP_SPARC_BLKINIT; 430 432 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || 431 - sun4v_chip_type == SUN4V_CHIP_NIAGARA3) 433 + sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || 434 + sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || 435 + sun4v_chip_type == SUN4V_CHIP_NIAGARA5) 432 436 cap |= HWCAP_SPARC_N2; 433 437 } 434 438 ··· 456 452 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1) 457 453 cap |= AV_SPARC_ASI_BLK_INIT; 458 454 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || 459 - sun4v_chip_type == SUN4V_CHIP_NIAGARA3) 455 + sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || 456 + sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || 457 + sun4v_chip_type == SUN4V_CHIP_NIAGARA5) 460 458 cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 | 461 459 AV_SPARC_ASI_BLK_INIT | 462 460 AV_SPARC_POPC); 463 - if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3) 461 + if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || 462 + sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || 463 + sun4v_chip_type == SUN4V_CHIP_NIAGARA5) 464 464 cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC | 465 465 AV_SPARC_FMAF); 466 466 }
+5
arch/sparc/mm/init_64.c
··· 511 511 for (i = 0; i < prom_trans_ents; i++) 512 512 prom_trans[i].data &= ~0x0003fe0000000000UL; 513 513 } 514 + 515 + /* Force execute bit on. */ 516 + for (i = 0; i < prom_trans_ents; i++) 517 + prom_trans[i].data |= (tlb_type == hypervisor ? 518 + _PAGE_EXEC_4V : _PAGE_EXEC_4U); 514 519 } 515 520 516 521 static void __init hypervisor_tlb_lock(unsigned long vaddr,
+4
arch/um/Kconfig.x86
··· 10 10 bool 11 11 default n 12 12 13 + config CMPXCHG_DOUBLE 14 + bool 15 + default n 16 + 13 17 source "arch/x86/Kconfig.cpu" 14 18 15 19 endmenu
+1 -1
arch/um/Makefile
··· 41 41 KBUILD_CFLAGS += $(CFLAGS) $(CFLAGS-y) -D__arch_um__ -DSUBARCH=\"$(SUBARCH)\" \ 42 42 $(ARCH_INCLUDE) $(MODE_INCLUDE) -Dvmap=kernel_vmap \ 43 43 -Din6addr_loopback=kernel_in6addr_loopback \ 44 - -Din6addr_any=kernel_in6addr_any 44 + -Din6addr_any=kernel_in6addr_any -Dstrrchr=kernel_strrchr 45 45 46 46 KBUILD_AFLAGS += $(ARCH_INCLUDE) 47 47
+36 -25
arch/um/drivers/line.c
··· 399 399 * is done under a spinlock. Checking whether the device is in use is 400 400 * line->tty->count > 1, also under the spinlock. 401 401 * 402 - * tty->count serves to decide whether the device should be enabled or 403 - * disabled on the host. If it's equal to 1, then we are doing the 402 + * line->count serves to decide whether the device should be enabled or 403 + * disabled on the host. If it's equal to 0, then we are doing the 404 404 * first open or last close. Otherwise, open and close just return. 405 405 */ 406 406 ··· 414 414 goto out_unlock; 415 415 416 416 err = 0; 417 - if (tty->count > 1) 417 + if (line->count++) 418 418 goto out_unlock; 419 419 420 - spin_unlock(&line->count_lock); 421 - 420 + BUG_ON(tty->driver_data); 422 421 tty->driver_data = line; 423 422 line->tty = tty; 424 423 424 + spin_unlock(&line->count_lock); 425 425 err = enable_chan(line); 426 - if (err) 426 + if (err) /* line_close() will be called by our caller */ 427 427 return err; 428 428 429 429 INIT_DELAYED_WORK(&line->task, line_timer_cb); ··· 436 436 chan_window_size(&line->chan_list, &tty->winsize.ws_row, 437 437 &tty->winsize.ws_col); 438 438 439 - return err; 439 + return 0; 440 440 441 441 out_unlock: 442 442 spin_unlock(&line->count_lock); ··· 460 460 flush_buffer(line); 461 461 462 462 spin_lock(&line->count_lock); 463 - if (!line->valid) 464 - goto out_unlock; 463 + BUG_ON(!line->valid); 465 464 466 - if (tty->count > 1) 465 + if (--line->count) 467 466 goto out_unlock; 468 - 469 - spin_unlock(&line->count_lock); 470 467 471 468 line->tty = NULL; 472 469 tty->driver_data = NULL; 470 + 471 + spin_unlock(&line->count_lock); 473 472 474 473 if (line->sigio) { 475 474 unregister_winch(tty); ··· 497 498 498 499 spin_lock(&line->count_lock); 499 500 500 - if (line->tty != NULL) { 501 + if (line->count) { 501 502 *error_out = "Device is already open"; 502 503 goto out; 503 504 } ··· 721 722 int pid; 722 723 struct tty_struct *tty; 723 724 unsigned long stack; 725 + struct work_struct work; 724 726 }; 725 727 726 - static void free_winch(struct winch *winch, int free_irq_ok) 728 + static void __free_winch(struct work_struct *work) 727 729 { 728 - if (free_irq_ok) 729 - free_irq(WINCH_IRQ, winch); 730 - 731 - list_del(&winch->list); 730 + struct winch *winch = container_of(work, struct winch, work); 731 + free_irq(WINCH_IRQ, winch); 732 732 733 733 if (winch->pid != -1) 734 734 os_kill_process(winch->pid, 1); 735 - if (winch->fd != -1) 736 - os_close_file(winch->fd); 737 735 if (winch->stack != 0) 738 736 free_stack(winch->stack, 0); 739 737 kfree(winch); 738 + } 739 + 740 + static void free_winch(struct winch *winch) 741 + { 742 + int fd = winch->fd; 743 + winch->fd = -1; 744 + if (fd != -1) 745 + os_close_file(fd); 746 + list_del(&winch->list); 747 + __free_winch(&winch->work); 740 748 } 741 749 742 750 static irqreturn_t winch_interrupt(int irq, void *data) ··· 751 745 struct winch *winch = data; 752 746 struct tty_struct *tty; 753 747 struct line *line; 748 + int fd = winch->fd; 754 749 int err; 755 750 char c; 756 751 757 - if (winch->fd != -1) { 758 - err = generic_read(winch->fd, &c, NULL); 752 + if (fd != -1) { 753 + err = generic_read(fd, &c, NULL); 759 754 if (err < 0) { 760 755 if (err != -EAGAIN) { 756 + winch->fd = -1; 757 + list_del(&winch->list); 758 + os_close_file(fd); 761 759 printk(KERN_ERR "winch_interrupt : " 762 760 "read failed, errno = %d\n", -err); 763 761 printk(KERN_ERR "fd %d is losing SIGWINCH " 764 762 "support\n", winch->tty_fd); 765 - free_winch(winch, 0); 763 + INIT_WORK(&winch->work, __free_winch); 764 + schedule_work(&winch->work); 766 765 return IRQ_HANDLED; 767 766 } 768 767 goto out; ··· 839 828 list_for_each_safe(ele, next, &winch_handlers) { 840 829 winch = list_entry(ele, struct winch, list); 841 830 if (winch->tty == tty) { 842 - free_winch(winch, 1); 831 + free_winch(winch); 843 832 break; 844 833 } 845 834 } ··· 855 844 856 845 list_for_each_safe(ele, next, &winch_handlers) { 857 846 winch = list_entry(ele, struct winch, list); 858 - free_winch(winch, 1); 847 + free_winch(winch); 859 848 } 860 849 861 850 spin_unlock(&winch_handler_lock);
+1
arch/um/drivers/xterm.c
··· 123 123 err = -errno; 124 124 printk(UM_KERN_ERR "xterm_open : unlink failed, errno = %d\n", 125 125 errno); 126 + close(fd); 126 127 return err; 127 128 } 128 129 close(fd);
-4
arch/um/include/asm/ptrace-generic.h
··· 42 42 unsigned long addr, unsigned long data); 43 43 extern unsigned long getreg(struct task_struct *child, int regno); 44 44 extern int putreg(struct task_struct *child, int regno, unsigned long value); 45 - extern int get_fpregs(struct user_i387_struct __user *buf, 46 - struct task_struct *child); 47 - extern int set_fpregs(struct user_i387_struct __user *buf, 48 - struct task_struct *child); 49 45 50 46 extern int arch_copy_tls(struct task_struct *new); 51 47 extern void clear_flushed_tls(struct task_struct *task);
+1
arch/um/include/shared/line.h
··· 33 33 struct line { 34 34 struct tty_struct *tty; 35 35 spinlock_t count_lock; 36 + unsigned long count; 36 37 int valid; 37 38 38 39 char *init_str;
+1 -1
arch/um/include/shared/registers.h
··· 16 16 extern int save_registers(int pid, struct uml_pt_regs *regs); 17 17 extern int restore_registers(int pid, struct uml_pt_regs *regs); 18 18 extern int init_registers(int pid); 19 - extern void get_safe_registers(unsigned long *regs); 19 + extern void get_safe_registers(unsigned long *regs, unsigned long *fp_regs); 20 20 extern unsigned long get_thread_reg(int reg, jmp_buf *buf); 21 21 extern int get_fp_registers(int pid, unsigned long *regs); 22 22 extern int put_fp_registers(int pid, unsigned long *regs);
+1 -1
arch/um/kernel/process.c
··· 202 202 arch_copy_thread(&current->thread.arch, &p->thread.arch); 203 203 } 204 204 else { 205 - get_safe_registers(p->thread.regs.regs.gp); 205 + get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp); 206 206 p->thread.request.u.thread = current->thread.request.u.thread; 207 207 handler = new_thread_handler; 208 208 }
-28
arch/um/kernel/ptrace.c
··· 50 50 void __user *vp = p; 51 51 52 52 switch (request) { 53 - /* read word at location addr. */ 54 - case PTRACE_PEEKTEXT: 55 - case PTRACE_PEEKDATA: 56 - ret = generic_ptrace_peekdata(child, addr, data); 57 - break; 58 - 59 53 /* read the word at location addr in the USER area. */ 60 54 case PTRACE_PEEKUSR: 61 55 ret = peek_user(child, addr, data); 62 - break; 63 - 64 - /* write the word at location addr. */ 65 - case PTRACE_POKETEXT: 66 - case PTRACE_POKEDATA: 67 - ret = generic_ptrace_pokedata(child, addr, data); 68 56 break; 69 57 70 58 /* write the word at location addr in the USER area */ ··· 95 107 break; 96 108 } 97 109 #endif 98 - #ifdef PTRACE_GETFPREGS 99 - case PTRACE_GETFPREGS: /* Get the child FPU state. */ 100 - ret = get_fpregs(vp, child); 101 - break; 102 - #endif 103 - #ifdef PTRACE_SETFPREGS 104 - case PTRACE_SETFPREGS: /* Set the child FPU state. */ 105 - ret = set_fpregs(vp, child); 106 - break; 107 - #endif 108 110 case PTRACE_GET_THREAD_AREA: 109 111 ret = ptrace_get_thread_area(child, addr, vp); 110 112 break; ··· 131 153 ret = -EIO; 132 154 break; 133 155 } 134 - #endif 135 - #ifdef PTRACE_ARCH_PRCTL 136 - case PTRACE_ARCH_PRCTL: 137 - /* XXX Calls ptrace on the host - needs some SMP thinking */ 138 - ret = arch_prctl(child, data, (void __user *) addr); 139 - break; 140 156 #endif 141 157 default: 142 158 ret = ptrace_request(child, request, addr, data);
+8 -1
arch/um/os-Linux/registers.c
··· 8 8 #include <string.h> 9 9 #include <sys/ptrace.h> 10 10 #include "sysdep/ptrace.h" 11 + #include "sysdep/ptrace_user.h" 12 + #include "registers.h" 11 13 12 14 int save_registers(int pid, struct uml_pt_regs *regs) 13 15 { ··· 34 32 /* This is set once at boot time and not changed thereafter */ 35 33 36 34 static unsigned long exec_regs[MAX_REG_NR]; 35 + static unsigned long exec_fp_regs[FP_SIZE]; 37 36 38 37 int init_registers(int pid) 39 38 { ··· 45 42 return -errno; 46 43 47 44 arch_init_registers(pid); 45 + get_fp_registers(pid, exec_fp_regs); 48 46 return 0; 49 47 } 50 48 51 - void get_safe_registers(unsigned long *regs) 49 + void get_safe_registers(unsigned long *regs, unsigned long *fp_regs) 52 50 { 53 51 memcpy(regs, exec_regs, sizeof(exec_regs)); 52 + 53 + if (fp_regs) 54 + memcpy(fp_regs, exec_fp_regs, sizeof(exec_fp_regs)); 54 55 }
+1 -1
arch/um/os-Linux/skas/mem.c
··· 39 39 40 40 static int __init init_syscall_regs(void) 41 41 { 42 - get_safe_registers(syscall_regs); 42 + get_safe_registers(syscall_regs, NULL); 43 43 syscall_regs[REGS_IP_INDEX] = STUB_CODE + 44 44 ((unsigned long) &batch_syscall_stub - 45 45 (unsigned long) &__syscall_stub_start);
+18 -1
arch/um/os-Linux/skas/process.c
··· 373 373 if (ptrace(PTRACE_SETREGS, pid, 0, regs->gp)) 374 374 fatal_sigsegv(); 375 375 376 + if (put_fp_registers(pid, regs->fp)) 377 + fatal_sigsegv(); 378 + 376 379 /* Now we set local_using_sysemu to be used for one loop */ 377 380 local_using_sysemu = get_using_sysemu(); 378 381 ··· 398 395 regs->is_user = 1; 399 396 if (ptrace(PTRACE_GETREGS, pid, 0, regs->gp)) { 400 397 printk(UM_KERN_ERR "userspace - PTRACE_GETREGS failed, " 398 + "errno = %d\n", errno); 399 + fatal_sigsegv(); 400 + } 401 + 402 + if (get_fp_registers(pid, regs->fp)) { 403 + printk(UM_KERN_ERR "userspace - get_fp_registers failed, " 401 404 "errno = %d\n", errno); 402 405 fatal_sigsegv(); 403 406 } ··· 466 457 } 467 458 468 459 static unsigned long thread_regs[MAX_REG_NR]; 460 + static unsigned long thread_fp_regs[FP_SIZE]; 469 461 470 462 static int __init init_thread_regs(void) 471 463 { 472 - get_safe_registers(thread_regs); 464 + get_safe_registers(thread_regs, thread_fp_regs); 473 465 /* Set parent's instruction pointer to start of clone-stub */ 474 466 thread_regs[REGS_IP_INDEX] = STUB_CODE + 475 467 (unsigned long) stub_clone_handler - ··· 510 500 err = -errno; 511 501 printk(UM_KERN_ERR "copy_context_skas0 : PTRACE_SETREGS " 512 502 "failed, pid = %d, errno = %d\n", pid, -err); 503 + return err; 504 + } 505 + 506 + err = put_fp_registers(pid, thread_fp_regs); 507 + if (err < 0) { 508 + printk(UM_KERN_ERR "copy_context_skas0 : put_fp_registers " 509 + "failed, pid = %d, err = %d\n", pid, err); 513 510 return err; 514 511 } 515 512
-5
arch/um/sys-i386/asm/ptrace.h
··· 42 42 */ 43 43 struct user_desc; 44 44 45 - extern int get_fpxregs(struct user_fxsr_struct __user *buf, 46 - struct task_struct *child); 47 - extern int set_fpxregs(struct user_fxsr_struct __user *buf, 48 - struct task_struct *tsk); 49 - 50 45 extern int ptrace_get_thread_area(struct task_struct *child, int idx, 51 46 struct user_desc __user *user_desc); 52 47
+23 -5
arch/um/sys-i386/ptrace.c
··· 145 145 return put_user(tmp, (unsigned long __user *) data); 146 146 } 147 147 148 - int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) 148 + static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) 149 149 { 150 150 int err, n, cpu = ((struct thread_info *) child->stack)->cpu; 151 151 struct user_i387_struct fpregs; ··· 161 161 return n; 162 162 } 163 163 164 - int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) 164 + static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) 165 165 { 166 166 int n, cpu = ((struct thread_info *) child->stack)->cpu; 167 167 struct user_i387_struct fpregs; ··· 174 174 (unsigned long *) &fpregs); 175 175 } 176 176 177 - int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) 177 + static int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) 178 178 { 179 179 int err, n, cpu = ((struct thread_info *) child->stack)->cpu; 180 180 struct user_fxsr_struct fpregs; ··· 190 190 return n; 191 191 } 192 192 193 - int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) 193 + static int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) 194 194 { 195 195 int n, cpu = ((struct thread_info *) child->stack)->cpu; 196 196 struct user_fxsr_struct fpregs; ··· 206 206 long subarch_ptrace(struct task_struct *child, long request, 207 207 unsigned long addr, unsigned long data) 208 208 { 209 - return -EIO; 209 + int ret = -EIO; 210 + void __user *datap = (void __user *) data; 211 + switch (request) { 212 + case PTRACE_GETFPREGS: /* Get the child FPU state. */ 213 + ret = get_fpregs(datap, child); 214 + break; 215 + case PTRACE_SETFPREGS: /* Set the child FPU state. */ 216 + ret = set_fpregs(datap, child); 217 + break; 218 + case PTRACE_GETFPXREGS: /* Get the child FPU state. */ 219 + ret = get_fpxregs(datap, child); 220 + break; 221 + case PTRACE_SETFPXREGS: /* Set the child FPU state. */ 222 + ret = set_fpxregs(datap, child); 223 + break; 224 + default: 225 + ret = -EIO; 226 + } 227 + return ret; 210 228 }
+1
arch/um/sys-i386/shared/sysdep/ptrace.h
··· 53 53 54 54 struct uml_pt_regs { 55 55 unsigned long gp[MAX_REG_NR]; 56 + unsigned long fp[HOST_FPX_SIZE]; 56 57 struct faultinfo faultinfo; 57 58 long syscall; 58 59 int is_user;
+8 -4
arch/um/sys-x86_64/ptrace.c
··· 145 145 return instr == 0x050f; 146 146 } 147 147 148 - int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) 148 + static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) 149 149 { 150 150 int err, n, cpu = ((struct thread_info *) child->stack)->cpu; 151 151 long fpregs[HOST_FP_SIZE]; ··· 162 162 return n; 163 163 } 164 164 165 - int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) 165 + static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) 166 166 { 167 167 int n, cpu = ((struct thread_info *) child->stack)->cpu; 168 168 long fpregs[HOST_FP_SIZE]; ··· 182 182 void __user *datap = (void __user *) data; 183 183 184 184 switch (request) { 185 - case PTRACE_GETFPXREGS: /* Get the child FPU state. */ 185 + case PTRACE_GETFPREGS: /* Get the child FPU state. */ 186 186 ret = get_fpregs(datap, child); 187 187 break; 188 - case PTRACE_SETFPXREGS: /* Set the child FPU state. */ 188 + case PTRACE_SETFPREGS: /* Set the child FPU state. */ 189 189 ret = set_fpregs(datap, child); 190 + break; 191 + case PTRACE_ARCH_PRCTL: 192 + /* XXX Calls ptrace on the host - needs some SMP thinking */ 193 + ret = arch_prctl(child, data, (void __user *) addr); 190 194 break; 191 195 } 192 196
+1
arch/um/sys-x86_64/shared/sysdep/ptrace.h
··· 85 85 86 86 struct uml_pt_regs { 87 87 unsigned long gp[MAX_REG_NR]; 88 + unsigned long fp[HOST_FP_SIZE]; 88 89 struct faultinfo faultinfo; 89 90 long syscall; 90 91 int is_user;
-1
arch/x86/include/asm/alternative-asm.h
··· 16 16 #endif 17 17 18 18 .macro altinstruction_entry orig alt feature orig_len alt_len 19 - .align 8 20 19 .long \orig - . 21 20 .long \alt - . 22 21 .word \feature
-4
arch/x86/include/asm/alternative.h
··· 48 48 u16 cpuid; /* cpuid bit set for replacement */ 49 49 u8 instrlen; /* length of original instruction */ 50 50 u8 replacementlen; /* length of new instruction, <= instrlen */ 51 - #ifdef CONFIG_X86_64 52 - u32 pad2; 53 - #endif 54 51 }; 55 52 56 53 extern void alternative_instructions(void); ··· 80 83 \ 81 84 "661:\n\t" oldinstr "\n662:\n" \ 82 85 ".section .altinstructions,\"a\"\n" \ 83 - _ASM_ALIGN "\n" \ 84 86 " .long 661b - .\n" /* label */ \ 85 87 " .long 663f - .\n" /* new instruction */ \ 86 88 " .word " __stringify(feature) "\n" /* feature bit */ \
-2
arch/x86/include/asm/cpufeature.h
··· 332 332 asm goto("1: jmp %l[t_no]\n" 333 333 "2:\n" 334 334 ".section .altinstructions,\"a\"\n" 335 - _ASM_ALIGN "\n" 336 335 " .long 1b - .\n" 337 336 " .long 0\n" /* no replacement */ 338 337 " .word %P0\n" /* feature bit */ ··· 349 350 asm volatile("1: movb $0,%0\n" 350 351 "2:\n" 351 352 ".section .altinstructions,\"a\"\n" 352 - _ASM_ALIGN "\n" 353 353 " .long 1b - .\n" 354 354 " .long 3f - .\n" 355 355 " .word %P1\n" /* feature bit */
+12 -11
arch/x86/kernel/rtc.c
··· 42 42 { 43 43 int real_seconds, real_minutes, cmos_minutes; 44 44 unsigned char save_control, save_freq_select; 45 + unsigned long flags; 45 46 int retval = 0; 47 + 48 + spin_lock_irqsave(&rtc_lock, flags); 46 49 47 50 /* tell the clock it's being set */ 48 51 save_control = CMOS_READ(RTC_CONTROL); ··· 96 93 CMOS_WRITE(save_control, RTC_CONTROL); 97 94 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); 98 95 96 + spin_unlock_irqrestore(&rtc_lock, flags); 97 + 99 98 return retval; 100 99 } 101 100 102 101 unsigned long mach_get_cmos_time(void) 103 102 { 104 103 unsigned int status, year, mon, day, hour, min, sec, century = 0; 104 + unsigned long flags; 105 + 106 + spin_lock_irqsave(&rtc_lock, flags); 105 107 106 108 /* 107 109 * If UIP is clear, then we have >= 244 microseconds before ··· 132 124 133 125 status = CMOS_READ(RTC_CONTROL); 134 126 WARN_ON_ONCE(RTC_ALWAYS_BCD && (status & RTC_DM_BINARY)); 127 + 128 + spin_unlock_irqrestore(&rtc_lock, flags); 135 129 136 130 if (RTC_ALWAYS_BCD || !(status & RTC_DM_BINARY)) { 137 131 sec = bcd2bin(sec); ··· 179 169 180 170 int update_persistent_clock(struct timespec now) 181 171 { 182 - unsigned long flags; 183 - int retval; 184 - 185 - spin_lock_irqsave(&rtc_lock, flags); 186 - retval = x86_platform.set_wallclock(now.tv_sec); 187 - spin_unlock_irqrestore(&rtc_lock, flags); 188 - 189 - return retval; 172 + return x86_platform.set_wallclock(now.tv_sec); 190 173 } 191 174 192 175 /* not static: needed by APM */ 193 176 void read_persistent_clock(struct timespec *ts) 194 177 { 195 - unsigned long retval, flags; 178 + unsigned long retval; 196 179 197 - spin_lock_irqsave(&rtc_lock, flags); 198 180 retval = x86_platform.get_wallclock(); 199 - spin_unlock_irqrestore(&rtc_lock, flags); 200 181 201 182 ts->tv_sec = retval; 202 183 ts->tv_nsec = 0;
+1 -1
arch/x86/kvm/emulate.c
··· 3603 3603 break; 3604 3604 case Src2CL: 3605 3605 ctxt->src2.bytes = 1; 3606 - ctxt->src2.val = ctxt->regs[VCPU_REGS_RCX] & 0x8; 3606 + ctxt->src2.val = ctxt->regs[VCPU_REGS_RCX] & 0xff; 3607 3607 break; 3608 3608 case Src2ImmByte: 3609 3609 rc = decode_imm(ctxt, &ctxt->src2, 1, true);
+2 -1
arch/x86/kvm/mmu.c
··· 400 400 401 401 /* xchg acts as a barrier before the setting of the high bits */ 402 402 orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low); 403 - orig.spte_high = ssptep->spte_high = sspte.spte_high; 403 + orig.spte_high = ssptep->spte_high; 404 + ssptep->spte_high = sspte.spte_high; 404 405 count_spte_clear(sptep, spte); 405 406 406 407 return orig.spte;
+9
arch/x86/platform/mrst/vrtc.c
··· 58 58 unsigned long vrtc_get_time(void) 59 59 { 60 60 u8 sec, min, hour, mday, mon; 61 + unsigned long flags; 61 62 u32 year; 63 + 64 + spin_lock_irqsave(&rtc_lock, flags); 62 65 63 66 while ((vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP)) 64 67 cpu_relax(); ··· 72 69 mday = vrtc_cmos_read(RTC_DAY_OF_MONTH); 73 70 mon = vrtc_cmos_read(RTC_MONTH); 74 71 year = vrtc_cmos_read(RTC_YEAR); 72 + 73 + spin_unlock_irqrestore(&rtc_lock, flags); 75 74 76 75 /* vRTC YEAR reg contains the offset to 1960 */ 77 76 year += 1960; ··· 88 83 int vrtc_set_mmss(unsigned long nowtime) 89 84 { 90 85 int real_sec, real_min; 86 + unsigned long flags; 91 87 int vrtc_min; 92 88 89 + spin_lock_irqsave(&rtc_lock, flags); 93 90 vrtc_min = vrtc_cmos_read(RTC_MINUTES); 94 91 95 92 real_sec = nowtime % 60; ··· 102 95 103 96 vrtc_cmos_write(real_sec, RTC_SECONDS); 104 97 vrtc_cmos_write(real_min, RTC_MINUTES); 98 + spin_unlock_irqrestore(&rtc_lock, flags); 99 + 105 100 return 0; 106 101 } 107 102
+2 -4
arch/x86/xen/mmu.c
··· 1721 1721 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES; 1722 1722 } 1723 1723 #ifdef CONFIG_X86_32 1724 - if ((machine_to_phys_mapping + machine_to_phys_nr) 1725 - < machine_to_phys_mapping) 1726 - machine_to_phys_nr = (unsigned long *)NULL 1727 - - machine_to_phys_mapping; 1724 + WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1)) 1725 + < machine_to_phys_mapping); 1728 1726 #endif 1729 1727 } 1730 1728
+6 -4
arch/x86/xen/setup.c
··· 306 306 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); 307 307 308 308 extra_limit = xen_get_max_pages(); 309 - if (extra_limit >= max_pfn) 310 - extra_pages = extra_limit - max_pfn; 311 - else 312 - extra_pages = 0; 309 + if (max_pfn + extra_pages > extra_limit) { 310 + if (extra_limit > max_pfn) 311 + extra_pages = extra_limit - max_pfn; 312 + else 313 + extra_pages = 0; 314 + } 313 315 314 316 extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820); 315 317
-1
arch/x86/xen/smp.c
··· 532 532 WARN_ON(xen_smp_intr_init(0)); 533 533 534 534 xen_init_lock_cpu(0); 535 - xen_init_spinlocks(); 536 535 } 537 536 538 537 static int __cpuinit xen_hvm_cpu_up(unsigned int cpu)
+3 -2
arch/x86/xen/time.c
··· 168 168 struct pvclock_vcpu_time_info *src; 169 169 cycle_t ret; 170 170 171 - src = &get_cpu_var(xen_vcpu)->time; 171 + preempt_disable_notrace(); 172 + src = &__get_cpu_var(xen_vcpu)->time; 172 173 ret = pvclock_clocksource_read(src); 173 - put_cpu_var(xen_vcpu); 174 + preempt_enable_notrace(); 174 175 return ret; 175 176 } 176 177
+16 -21
block/blk-cgroup.c
··· 785 785 { 786 786 char *s[4], *p, *major_s = NULL, *minor_s = NULL; 787 787 int ret; 788 - unsigned long major, minor, temp; 788 + unsigned long major, minor; 789 789 int i = 0; 790 790 dev_t dev; 791 - u64 bps, iops; 791 + u64 temp; 792 792 793 793 memset(s, 0, sizeof(s)); 794 794 ··· 826 826 827 827 dev = MKDEV(major, minor); 828 828 829 - ret = blkio_check_dev_num(dev); 829 + ret = strict_strtoull(s[1], 10, &temp); 830 830 if (ret) 831 - return ret; 831 + return -EINVAL; 832 + 833 + /* For rule removal, do not check for device presence. */ 834 + if (temp) { 835 + ret = blkio_check_dev_num(dev); 836 + if (ret) 837 + return ret; 838 + } 832 839 833 840 newpn->dev = dev; 834 841 835 - if (s[1] == NULL) 836 - return -EINVAL; 837 - 838 842 switch (plid) { 839 843 case BLKIO_POLICY_PROP: 840 - ret = strict_strtoul(s[1], 10, &temp); 841 - if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) || 842 - temp > BLKIO_WEIGHT_MAX) 844 + if ((temp < BLKIO_WEIGHT_MIN && temp > 0) || 845 + temp > BLKIO_WEIGHT_MAX) 843 846 return -EINVAL; 844 847 845 848 newpn->plid = plid; ··· 853 850 switch(fileid) { 854 851 case BLKIO_THROTL_read_bps_device: 855 852 case BLKIO_THROTL_write_bps_device: 856 - ret = strict_strtoull(s[1], 10, &bps); 857 - if (ret) 858 - return -EINVAL; 859 - 860 853 newpn->plid = plid; 861 854 newpn->fileid = fileid; 862 - newpn->val.bps = bps; 855 + newpn->val.bps = temp; 863 856 break; 864 857 case BLKIO_THROTL_read_iops_device: 865 858 case BLKIO_THROTL_write_iops_device: 866 - ret = strict_strtoull(s[1], 10, &iops); 867 - if (ret) 868 - return -EINVAL; 869 - 870 - if (iops > THROTL_IOPS_MAX) 859 + if (temp > THROTL_IOPS_MAX) 871 860 return -EINVAL; 872 861 873 862 newpn->plid = plid; 874 863 newpn->fileid = fileid; 875 - newpn->val.iops = (unsigned int)iops; 864 + newpn->val.iops = (unsigned int)temp; 876 865 break; 877 866 } 878 867 break;
+15 -15
block/blk-core.c
··· 348 348 EXPORT_SYMBOL(blk_put_queue); 349 349 350 350 /* 351 - * Note: If a driver supplied the queue lock, it should not zap that lock 352 - * unexpectedly as some queue cleanup components like elevator_exit() and 353 - * blk_throtl_exit() need queue lock. 351 + * Note: If a driver supplied the queue lock, it is disconnected 352 + * by this function. The actual state of the lock doesn't matter 353 + * here as the request_queue isn't accessible after this point 354 + * (QUEUE_FLAG_DEAD is set) and no other requests will be queued. 354 355 */ 355 356 void blk_cleanup_queue(struct request_queue *q) 356 357 { ··· 368 367 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); 369 368 mutex_unlock(&q->sysfs_lock); 370 369 371 - if (q->elevator) 372 - elevator_exit(q->elevator); 373 - 374 - blk_throtl_exit(q); 370 + if (q->queue_lock != &q->__queue_lock) 371 + q->queue_lock = &q->__queue_lock; 375 372 376 373 blk_put_queue(q); 377 374 } ··· 1166 1167 * true if merge was successful, otherwise false. 1167 1168 */ 1168 1169 static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q, 1169 - struct bio *bio) 1170 + struct bio *bio, unsigned int *request_count) 1170 1171 { 1171 1172 struct blk_plug *plug; 1172 1173 struct request *rq; ··· 1175 1176 plug = tsk->plug; 1176 1177 if (!plug) 1177 1178 goto out; 1179 + *request_count = 0; 1178 1180 1179 1181 list_for_each_entry_reverse(rq, &plug->list, queuelist) { 1180 1182 int el_ret; 1183 + 1184 + (*request_count)++; 1181 1185 1182 1186 if (rq->q != q) 1183 1187 continue; ··· 1221 1219 struct blk_plug *plug; 1222 1220 int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT; 1223 1221 struct request *req; 1222 + unsigned int request_count = 0; 1224 1223 1225 1224 /* 1226 1225 * low level driver can indicate that it wants pages above a ··· 1240 1237 * Check if we can merge with the plugged list before grabbing 1241 1238 * any locks. 1242 1239 */ 1243 - if (attempt_plug_merge(current, q, bio)) 1240 + if (attempt_plug_merge(current, q, bio, &request_count)) 1244 1241 goto out; 1245 1242 1246 1243 spin_lock_irq(q->queue_lock); ··· 1305 1302 if (__rq->q != q) 1306 1303 plug->should_sort = 1; 1307 1304 } 1308 - list_add_tail(&req->queuelist, &plug->list); 1309 - plug->count++; 1310 - drive_stat_acct(req, 1); 1311 - if (plug->count >= BLK_MAX_REQUEST_COUNT) 1305 + if (request_count >= BLK_MAX_REQUEST_COUNT) 1312 1306 blk_flush_plug_list(plug, false); 1307 + list_add_tail(&req->queuelist, &plug->list); 1308 + drive_stat_acct(req, 1); 1313 1309 } else { 1314 1310 spin_lock_irq(q->queue_lock); 1315 1311 add_acct_request(q, req, where); ··· 2636 2634 INIT_LIST_HEAD(&plug->list); 2637 2635 INIT_LIST_HEAD(&plug->cb_list); 2638 2636 plug->should_sort = 0; 2639 - plug->count = 0; 2640 2637 2641 2638 /* 2642 2639 * If this is a nested plug, don't actually assign it. It will be ··· 2719 2718 return; 2720 2719 2721 2720 list_splice_init(&plug->list, &list); 2722 - plug->count = 0; 2723 2721 2724 2722 if (plug->should_sort) { 2725 2723 list_sort(NULL, &list, plug_rq_cmp);
+1 -1
block/blk-softirq.c
··· 115 115 /* 116 116 * Select completion CPU 117 117 */ 118 - if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && req->cpu != -1) { 118 + if (req->cpu != -1) { 119 119 ccpu = req->cpu; 120 120 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) { 121 121 ccpu = blk_cpu_to_group(ccpu);
+11 -4
block/blk-sysfs.c
··· 258 258 259 259 ret = queue_var_store(&val, page, count); 260 260 spin_lock_irq(q->queue_lock); 261 - if (val) { 261 + if (val == 2) { 262 262 queue_flag_set(QUEUE_FLAG_SAME_COMP, q); 263 - if (val == 2) 264 - queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); 265 - } else { 263 + queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); 264 + } else if (val == 1) { 265 + queue_flag_set(QUEUE_FLAG_SAME_COMP, q); 266 + queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); 267 + } else if (val == 0) { 266 268 queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); 267 269 queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); 268 270 } ··· 478 476 struct request_list *rl = &q->rq; 479 477 480 478 blk_sync_queue(q); 479 + 480 + if (q->elevator) 481 + elevator_exit(q->elevator); 482 + 483 + blk_throtl_exit(q); 481 484 482 485 if (rl->rq_pool) 483 486 mempool_destroy(rl->rq_pool);
+10 -10
block/cfq-iosched.c
··· 130 130 unsigned long slice_end; 131 131 long slice_resid; 132 132 133 - /* pending metadata requests */ 134 - int meta_pending; 133 + /* pending priority requests */ 134 + int prio_pending; 135 135 /* number of requests that are on the dispatch list or inside driver */ 136 136 int dispatched; 137 137 ··· 684 684 if (rq_is_sync(rq1) != rq_is_sync(rq2)) 685 685 return rq_is_sync(rq1) ? rq1 : rq2; 686 686 687 - if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_META) 688 - return rq1->cmd_flags & REQ_META ? rq1 : rq2; 687 + if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO) 688 + return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2; 689 689 690 690 s1 = blk_rq_pos(rq1); 691 691 s2 = blk_rq_pos(rq2); ··· 1612 1612 cfqq->cfqd->rq_queued--; 1613 1613 cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, 1614 1614 rq_data_dir(rq), rq_is_sync(rq)); 1615 - if (rq->cmd_flags & REQ_META) { 1616 - WARN_ON(!cfqq->meta_pending); 1617 - cfqq->meta_pending--; 1615 + if (rq->cmd_flags & REQ_PRIO) { 1616 + WARN_ON(!cfqq->prio_pending); 1617 + cfqq->prio_pending--; 1618 1618 } 1619 1619 } 1620 1620 ··· 3372 3372 * So both queues are sync. Let the new request get disk time if 3373 3373 * it's a metadata request and the current queue is doing regular IO. 3374 3374 */ 3375 - if ((rq->cmd_flags & REQ_META) && !cfqq->meta_pending) 3375 + if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending) 3376 3376 return true; 3377 3377 3378 3378 /* ··· 3439 3439 struct cfq_io_context *cic = RQ_CIC(rq); 3440 3440 3441 3441 cfqd->rq_queued++; 3442 - if (rq->cmd_flags & REQ_META) 3443 - cfqq->meta_pending++; 3442 + if (rq->cmd_flags & REQ_PRIO) 3443 + cfqq->prio_pending++; 3444 3444 3445 3445 cfq_update_io_thinktime(cfqd, cfqq, cic); 3446 3446 cfq_update_io_seektime(cfqd, cfqq, rq);
+1 -1
drivers/acpi/acpica/acconfig.h
··· 121 121 122 122 /* Maximum sleep allowed via Sleep() operator */ 123 123 124 - #define ACPI_MAX_SLEEP 20000 /* Two seconds */ 124 + #define ACPI_MAX_SLEEP 2000 /* Two seconds */ 125 125 126 126 /****************************************************************************** 127 127 *
+1
drivers/acpi/apei/Kconfig
··· 13 13 bool "APEI Generic Hardware Error Source" 14 14 depends on ACPI_APEI && X86 15 15 select ACPI_HED 16 + select IRQ_WORK 16 17 select LLIST 17 18 select GENERIC_ALLOCATOR 18 19 help
+1 -1
drivers/acpi/apei/apei-base.c
··· 618 618 }; 619 619 620 620 capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; 621 - capbuf[OSC_SUPPORT_TYPE] = 0; 621 + capbuf[OSC_SUPPORT_TYPE] = 1; 622 622 capbuf[OSC_CONTROL_TYPE] = 0; 623 623 624 624 if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))
+38 -37
drivers/base/power/clock_ops.c
··· 42 42 } 43 43 44 44 /** 45 + * pm_clk_acquire - Acquire a device clock. 46 + * @dev: Device whose clock is to be acquired. 47 + * @ce: PM clock entry corresponding to the clock. 48 + */ 49 + static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce) 50 + { 51 + ce->clk = clk_get(dev, ce->con_id); 52 + if (IS_ERR(ce->clk)) { 53 + ce->status = PCE_STATUS_ERROR; 54 + } else { 55 + ce->status = PCE_STATUS_ACQUIRED; 56 + dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id); 57 + } 58 + } 59 + 60 + /** 45 61 * pm_clk_add - Start using a device clock for power management. 46 62 * @dev: Device whose clock is going to be used for power management. 47 63 * @con_id: Connection ID of the clock. ··· 89 73 } 90 74 } 91 75 76 + pm_clk_acquire(dev, ce); 77 + 92 78 spin_lock_irq(&pcd->lock); 93 79 list_add_tail(&ce->node, &pcd->clock_list); 94 80 spin_unlock_irq(&pcd->lock); ··· 100 82 /** 101 83 * __pm_clk_remove - Destroy PM clock entry. 102 84 * @ce: PM clock entry to destroy. 103 - * 104 - * This routine must be called under the spinlock protecting the PM list of 105 - * clocks corresponding the the @ce's device. 106 85 */ 107 86 static void __pm_clk_remove(struct pm_clock_entry *ce) 108 87 { 109 88 if (!ce) 110 89 return; 111 - 112 - list_del(&ce->node); 113 90 114 91 if (ce->status < PCE_STATUS_ERROR) { 115 92 if (ce->status == PCE_STATUS_ENABLED) ··· 139 126 spin_lock_irq(&pcd->lock); 140 127 141 128 list_for_each_entry(ce, &pcd->clock_list, node) { 142 - if (!con_id && !ce->con_id) { 143 - __pm_clk_remove(ce); 144 - break; 145 - } else if (!con_id || !ce->con_id) { 129 + if (!con_id && !ce->con_id) 130 + goto remove; 131 + else if (!con_id || !ce->con_id) 146 132 continue; 147 - } else if (!strcmp(con_id, ce->con_id)) { 148 - __pm_clk_remove(ce); 149 - break; 150 - } 133 + else if (!strcmp(con_id, ce->con_id)) 134 + goto remove; 151 135 } 152 136 153 137 spin_unlock_irq(&pcd->lock); 138 + return; 139 + 140 + remove: 141 + list_del(&ce->node); 142 + spin_unlock_irq(&pcd->lock); 143 + 144 + __pm_clk_remove(ce); 154 145 } 155 146 156 147 /** ··· 192 175 { 193 176 struct pm_clk_data *pcd = __to_pcd(dev); 194 177 struct pm_clock_entry *ce, *c; 178 + struct list_head list; 195 179 196 180 if (!pcd) 197 181 return; 198 182 199 183 dev->power.subsys_data = NULL; 184 + INIT_LIST_HEAD(&list); 200 185 201 186 spin_lock_irq(&pcd->lock); 202 187 203 188 list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node) 204 - __pm_clk_remove(ce); 189 + list_move(&ce->node, &list); 205 190 206 191 spin_unlock_irq(&pcd->lock); 207 192 208 193 kfree(pcd); 194 + 195 + list_for_each_entry_safe_reverse(ce, c, &list, node) { 196 + list_del(&ce->node); 197 + __pm_clk_remove(ce); 198 + } 209 199 } 210 200 211 201 #endif /* CONFIG_PM */ 212 202 213 203 #ifdef CONFIG_PM_RUNTIME 214 - 215 - /** 216 - * pm_clk_acquire - Acquire a device clock. 217 - * @dev: Device whose clock is to be acquired. 218 - * @con_id: Connection ID of the clock. 219 - */ 220 - static void pm_clk_acquire(struct device *dev, 221 - struct pm_clock_entry *ce) 222 - { 223 - ce->clk = clk_get(dev, ce->con_id); 224 - if (IS_ERR(ce->clk)) { 225 - ce->status = PCE_STATUS_ERROR; 226 - } else { 227 - ce->status = PCE_STATUS_ACQUIRED; 228 - dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id); 229 - } 230 - } 231 204 232 205 /** 233 206 * pm_clk_suspend - Disable clocks in a device's PM clock list. ··· 237 230 spin_lock_irqsave(&pcd->lock, flags); 238 231 239 232 list_for_each_entry_reverse(ce, &pcd->clock_list, node) { 240 - if (ce->status == PCE_STATUS_NONE) 241 - pm_clk_acquire(dev, ce); 242 - 243 233 if (ce->status < PCE_STATUS_ERROR) { 244 234 clk_disable(ce->clk); 245 235 ce->status = PCE_STATUS_ACQUIRED; ··· 266 262 spin_lock_irqsave(&pcd->lock, flags); 267 263 268 264 list_for_each_entry(ce, &pcd->clock_list, node) { 269 - if (ce->status == PCE_STATUS_NONE) 270 - pm_clk_acquire(dev, ce); 271 - 272 265 if (ce->status < PCE_STATUS_ERROR) { 273 266 clk_enable(ce->clk); 274 267 ce->status = PCE_STATUS_ENABLED;
+4 -4
drivers/block/floppy.c
··· 4250 4250 use_virtual_dma = can_use_virtual_dma & 1; 4251 4251 fdc_state[0].address = FDC1; 4252 4252 if (fdc_state[0].address == -1) { 4253 - del_timer(&fd_timeout); 4253 + del_timer_sync(&fd_timeout); 4254 4254 err = -ENODEV; 4255 4255 goto out_unreg_region; 4256 4256 } ··· 4261 4261 fdc = 0; /* reset fdc in case of unexpected interrupt */ 4262 4262 err = floppy_grab_irq_and_dma(); 4263 4263 if (err) { 4264 - del_timer(&fd_timeout); 4264 + del_timer_sync(&fd_timeout); 4265 4265 err = -EBUSY; 4266 4266 goto out_unreg_region; 4267 4267 } ··· 4318 4318 user_reset_fdc(-1, FD_RESET_ALWAYS, false); 4319 4319 } 4320 4320 fdc = 0; 4321 - del_timer(&fd_timeout); 4321 + del_timer_sync(&fd_timeout); 4322 4322 current_drive = 0; 4323 4323 initialized = true; 4324 4324 if (have_no_fdc) { ··· 4368 4368 unregister_blkdev(FLOPPY_MAJOR, "fd"); 4369 4369 out_put_disk: 4370 4370 while (dr--) { 4371 - del_timer(&motor_off_timer[dr]); 4371 + del_timer_sync(&motor_off_timer[dr]); 4372 4372 if (disks[dr]->queue) 4373 4373 blk_cleanup_queue(disks[dr]->queue); 4374 4374 put_disk(disks[dr]);
+1 -1
drivers/block/xen-blkback/common.h
··· 46 46 47 47 #define DRV_PFX "xen-blkback:" 48 48 #define DPRINTK(fmt, args...) \ 49 - pr_debug(DRV_PFX "(%s:%d) " fmt ".\n", \ 49 + pr_debug(DRV_PFX "(%s:%d) " fmt ".\n", \ 50 50 __func__, __LINE__, ##args) 51 51 52 52
+3 -3
drivers/block/xen-blkback/xenbus.c
··· 590 590 591 591 /* 592 592 * Enforce precondition before potential leak point. 593 - * blkif_disconnect() is idempotent. 593 + * xen_blkif_disconnect() is idempotent. 594 594 */ 595 595 xen_blkif_disconnect(be->blkif); 596 596 ··· 601 601 break; 602 602 603 603 case XenbusStateClosing: 604 - xen_blkif_disconnect(be->blkif); 605 604 xenbus_switch_state(dev, XenbusStateClosing); 606 605 break; 607 606 608 607 case XenbusStateClosed: 608 + xen_blkif_disconnect(be->blkif); 609 609 xenbus_switch_state(dev, XenbusStateClosed); 610 610 if (xenbus_dev_is_online(dev)) 611 611 break; 612 612 /* fall through if not online */ 613 613 case XenbusStateUnknown: 614 - /* implies blkif_disconnect() via blkback_remove() */ 614 + /* implies xen_blkif_disconnect() via xen_blkbk_remove() */ 615 615 device_unregister(&dev->dev); 616 616 break; 617 617
+6
drivers/bluetooth/btusb.c
··· 72 72 /* Apple MacBookAir3,1, MacBookAir3,2 */ 73 73 { USB_DEVICE(0x05ac, 0x821b) }, 74 74 75 + /* Apple MacBookAir4,1 */ 76 + { USB_DEVICE(0x05ac, 0x821f) }, 77 + 75 78 /* Apple MacBookPro8,2 */ 76 79 { USB_DEVICE(0x05ac, 0x821a) }, 80 + 81 + /* Apple MacMini5,1 */ 82 + { USB_DEVICE(0x05ac, 0x8281) }, 77 83 78 84 /* AVM BlueFRITZ! USB v2.0 */ 79 85 { USB_DEVICE(0x057c, 0x3800) },
+8 -8
drivers/bluetooth/btwilink.c
··· 125 125 /* protocol structure registered with shared transport */ 126 126 static struct st_proto_s ti_st_proto[MAX_BT_CHNL_IDS] = { 127 127 { 128 + .chnl_id = HCI_EVENT_PKT, /* HCI Events */ 129 + .hdr_len = sizeof(struct hci_event_hdr), 130 + .offset_len_in_hdr = offsetof(struct hci_event_hdr, plen), 131 + .len_size = 1, /* sizeof(plen) in struct hci_event_hdr */ 132 + .reserve = 8, 133 + }, 134 + { 128 135 .chnl_id = HCI_ACLDATA_PKT, /* ACL */ 129 136 .hdr_len = sizeof(struct hci_acl_hdr), 130 137 .offset_len_in_hdr = offsetof(struct hci_acl_hdr, dlen), ··· 143 136 .hdr_len = sizeof(struct hci_sco_hdr), 144 137 .offset_len_in_hdr = offsetof(struct hci_sco_hdr, dlen), 145 138 .len_size = 1, /* sizeof(dlen) in struct hci_sco_hdr */ 146 - .reserve = 8, 147 - }, 148 - { 149 - .chnl_id = HCI_EVENT_PKT, /* HCI Events */ 150 - .hdr_len = sizeof(struct hci_event_hdr), 151 - .offset_len_in_hdr = offsetof(struct hci_event_hdr, plen), 152 - .len_size = 1, /* sizeof(plen) in struct hci_event_hdr */ 153 139 .reserve = 8, 154 140 }, 155 141 }; ··· 240 240 if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) 241 241 return 0; 242 242 243 - for (i = 0; i < MAX_BT_CHNL_IDS; i++) { 243 + for (i = MAX_BT_CHNL_IDS-1; i >= 0; i--) { 244 244 err = st_unregister(&ti_st_proto[i]); 245 245 if (err) 246 246 BT_ERR("st_unregister(%d) failed with error %d",
+1
drivers/char/tpm/Kconfig
··· 43 43 44 44 config TCG_ATMEL 45 45 tristate "Atmel TPM Interface" 46 + depends on PPC64 || HAS_IOPORT 46 47 ---help--- 47 48 If you have a TPM security chip from Atmel say Yes and it 48 49 will be accessible from within Linux. To compile this driver
+8 -1
drivers/char/tpm/tpm.c
··· 383 383 u32 count, ordinal; 384 384 unsigned long stop; 385 385 386 + if (bufsiz > TPM_BUFSIZE) 387 + bufsiz = TPM_BUFSIZE; 388 + 386 389 count = be32_to_cpu(*((__be32 *) (buf + 2))); 387 390 ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); 388 391 if (count == 0) ··· 1105 1102 { 1106 1103 struct tpm_chip *chip = file->private_data; 1107 1104 ssize_t ret_size; 1105 + int rc; 1108 1106 1109 1107 del_singleshot_timer_sync(&chip->user_read_timer); 1110 1108 flush_work_sync(&chip->work); ··· 1116 1112 ret_size = size; 1117 1113 1118 1114 mutex_lock(&chip->buffer_mutex); 1119 - if (copy_to_user(buf, chip->data_buffer, ret_size)) 1115 + rc = copy_to_user(buf, chip->data_buffer, ret_size); 1116 + memset(chip->data_buffer, 0, ret_size); 1117 + if (rc) 1120 1118 ret_size = -EFAULT; 1119 + 1121 1120 mutex_unlock(&chip->buffer_mutex); 1122 1121 } 1123 1122
-2
drivers/char/tpm/tpm_nsc.c
··· 396 396 if (pdev) { 397 397 tpm_nsc_remove(&pdev->dev); 398 398 platform_device_unregister(pdev); 399 - kfree(pdev); 400 - pdev = NULL; 401 399 } 402 400 403 401 platform_driver_unregister(&nsc_drv);
+3
drivers/cpufreq/pcc-cpufreq.c
··· 261 261 pr = per_cpu(processors, cpu); 262 262 pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); 263 263 264 + if (!pr) 265 + return -ENODEV; 266 + 264 267 status = acpi_evaluate_object(pr->handle, "PCCP", NULL, &buffer); 265 268 if (ACPI_FAILURE(status)) 266 269 return -ENODEV;
+3
drivers/firewire/ohci.c
··· 290 290 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID, 291 291 QUIRK_CYCLE_TIMER}, 292 292 293 + {PCI_VENDOR_ID_O2, PCI_ANY_ID, PCI_ANY_ID, 294 + QUIRK_NO_MSI}, 295 + 293 296 {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID, 294 297 QUIRK_CYCLE_TIMER}, 295 298
+5 -10
drivers/gpio/gpio-generic.c
··· 351 351 return 0; 352 352 } 353 353 354 - int __devexit bgpio_remove(struct bgpio_chip *bgc) 354 + int bgpio_remove(struct bgpio_chip *bgc) 355 355 { 356 356 int err = gpiochip_remove(&bgc->gc); 357 357 ··· 361 361 } 362 362 EXPORT_SYMBOL_GPL(bgpio_remove); 363 363 364 - int __devinit bgpio_init(struct bgpio_chip *bgc, 365 - struct device *dev, 366 - unsigned long sz, 367 - void __iomem *dat, 368 - void __iomem *set, 369 - void __iomem *clr, 370 - void __iomem *dirout, 371 - void __iomem *dirin, 372 - bool big_endian) 364 + int bgpio_init(struct bgpio_chip *bgc, struct device *dev, 365 + unsigned long sz, void __iomem *dat, void __iomem *set, 366 + void __iomem *clr, void __iomem *dirout, void __iomem *dirin, 367 + bool big_endian) 373 368 { 374 369 int ret; 375 370
+2 -2
drivers/gpu/drm/i915/i915_drv.c
··· 67 67 MODULE_PARM_DESC(i915_enable_rc6, 68 68 "Enable power-saving render C-state 6 (default: true)"); 69 69 70 - unsigned int i915_enable_fbc __read_mostly = 1; 70 + unsigned int i915_enable_fbc __read_mostly = -1; 71 71 module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); 72 72 MODULE_PARM_DESC(i915_enable_fbc, 73 73 "Enable frame buffer compression for power savings " 74 - "(default: false)"); 74 + "(default: -1 (use per-chip default))"); 75 75 76 76 unsigned int i915_lvds_downclock __read_mostly = 0; 77 77 module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
+16 -6
drivers/gpu/drm/i915/intel_display.c
··· 1799 1799 struct drm_framebuffer *fb; 1800 1800 struct intel_framebuffer *intel_fb; 1801 1801 struct drm_i915_gem_object *obj; 1802 + int enable_fbc; 1802 1803 1803 1804 DRM_DEBUG_KMS("\n"); 1804 1805 ··· 1840 1839 intel_fb = to_intel_framebuffer(fb); 1841 1840 obj = intel_fb->obj; 1842 1841 1843 - if (!i915_enable_fbc) { 1844 - DRM_DEBUG_KMS("fbc disabled per module param (default off)\n"); 1842 + enable_fbc = i915_enable_fbc; 1843 + if (enable_fbc < 0) { 1844 + DRM_DEBUG_KMS("fbc set to per-chip default\n"); 1845 + enable_fbc = 1; 1846 + if (INTEL_INFO(dev)->gen <= 5) 1847 + enable_fbc = 0; 1848 + } 1849 + if (!enable_fbc) { 1850 + DRM_DEBUG_KMS("fbc disabled per module param\n"); 1845 1851 dev_priv->no_fbc_reason = FBC_MODULE_PARAM; 1846 1852 goto out_disable; 1847 1853 } ··· 4695 4687 bpc = 6; /* min is 18bpp */ 4696 4688 break; 4697 4689 case 24: 4698 - bpc = min((unsigned int)8, display_bpc); 4690 + bpc = 8; 4699 4691 break; 4700 4692 case 30: 4701 - bpc = min((unsigned int)10, display_bpc); 4693 + bpc = 10; 4702 4694 break; 4703 4695 case 48: 4704 - bpc = min((unsigned int)12, display_bpc); 4696 + bpc = 12; 4705 4697 break; 4706 4698 default: 4707 4699 DRM_DEBUG("unsupported depth, assuming 24 bits\n"); ··· 4709 4701 break; 4710 4702 } 4711 4703 4704 + display_bpc = min(display_bpc, bpc); 4705 + 4712 4706 DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n", 4713 4707 bpc, display_bpc); 4714 4708 4715 - *pipe_bpp = bpc * 3; 4709 + *pipe_bpp = display_bpc * 3; 4716 4710 4717 4711 return display_bpc != bpc; 4718 4712 }
-3
drivers/gpu/drm/i915/intel_drv.h
··· 337 337 struct drm_connector *connector, 338 338 struct intel_load_detect_pipe *old); 339 339 340 - extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); 341 - extern int intel_sdvo_supports_hotplug(struct drm_connector *connector); 342 - extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable); 343 340 extern void intelfb_restore(void); 344 341 extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 345 342 u16 blue, int regno);
+29 -59
drivers/gpu/drm/i915/intel_sdvo.c
··· 92 92 */ 93 93 uint16_t attached_output; 94 94 95 + /* 96 + * Hotplug activation bits for this device 97 + */ 98 + uint8_t hotplug_active[2]; 99 + 95 100 /** 96 101 * This is used to select the color range of RBG outputs in HDMI mode. 97 102 * It is only valid when using TMDS encoding and 8 bit per color mode. ··· 1213 1208 return true; 1214 1209 } 1215 1210 1216 - /* No use! */ 1217 - #if 0 1218 - struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) 1219 - { 1220 - struct drm_connector *connector = NULL; 1221 - struct intel_sdvo *iout = NULL; 1222 - struct intel_sdvo *sdvo; 1223 - 1224 - /* find the sdvo connector */ 1225 - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1226 - iout = to_intel_sdvo(connector); 1227 - 1228 - if (iout->type != INTEL_OUTPUT_SDVO) 1229 - continue; 1230 - 1231 - sdvo = iout->dev_priv; 1232 - 1233 - if (sdvo->sdvo_reg == SDVOB && sdvoB) 1234 - return connector; 1235 - 1236 - if (sdvo->sdvo_reg == SDVOC && !sdvoB) 1237 - return connector; 1238 - 1239 - } 1240 - 1241 - return NULL; 1242 - } 1243 - 1244 - int intel_sdvo_supports_hotplug(struct drm_connector *connector) 1211 + static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo) 1245 1212 { 1246 1213 u8 response[2]; 1247 - u8 status; 1248 - struct intel_sdvo *intel_sdvo; 1249 - DRM_DEBUG_KMS("\n"); 1250 - 1251 - if (!connector) 1252 - return 0; 1253 - 1254 - intel_sdvo = to_intel_sdvo(connector); 1255 1214 1256 1215 return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, 1257 1216 &response, 2) && response[0]; 1258 1217 } 1259 1218 1260 - void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) 1219 + static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder) 1261 1220 { 1262 - u8 response[2]; 1263 - u8 status; 1264 - struct intel_sdvo *intel_sdvo = to_intel_sdvo(connector); 1221 + struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); 1265 1222 1266 - intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); 1267 - intel_sdvo_read_response(intel_sdvo, &response, 2); 1268 - 1269 - if (on) { 1270 - intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); 1271 - status = intel_sdvo_read_response(intel_sdvo, &response, 2); 1272 - 1273 - intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); 1274 - } else { 1275 - response[0] = 0; 1276 - response[1] = 0; 1277 - intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); 1278 - } 1279 - 1280 - intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); 1281 - intel_sdvo_read_response(intel_sdvo, &response, 2); 1223 + intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2); 1282 1224 } 1283 - #endif 1284 1225 1285 1226 static bool 1286 1227 intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) ··· 1996 2045 { 1997 2046 struct drm_encoder *encoder = &intel_sdvo->base.base; 1998 2047 struct drm_connector *connector; 2048 + struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 1999 2049 struct intel_connector *intel_connector; 2000 2050 struct intel_sdvo_connector *intel_sdvo_connector; 2001 2051 ··· 2014 2062 2015 2063 intel_connector = &intel_sdvo_connector->base; 2016 2064 connector = &intel_connector->base; 2017 - connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; 2065 + if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) { 2066 + connector->polled = DRM_CONNECTOR_POLL_HPD; 2067 + intel_sdvo->hotplug_active[0] |= 1 << device; 2068 + /* Some SDVO devices have one-shot hotplug interrupts. 2069 + * Ensure that they get re-enabled when an interrupt happens. 2070 + */ 2071 + intel_encoder->hot_plug = intel_sdvo_enable_hotplug; 2072 + intel_sdvo_enable_hotplug(intel_encoder); 2073 + } 2074 + else 2075 + connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; 2018 2076 encoder->encoder_type = DRM_MODE_ENCODER_TMDS; 2019 2077 connector->connector_type = DRM_MODE_CONNECTOR_DVID; 2020 2078 ··· 2530 2568 /* In default case sdvo lvds is false */ 2531 2569 if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) 2532 2570 goto err; 2571 + 2572 + /* Set up hotplug command - note paranoia about contents of reply. 2573 + * We assume that the hardware is in a sane state, and only touch 2574 + * the bits we think we understand. 2575 + */ 2576 + intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, 2577 + &intel_sdvo->hotplug_active, 2); 2578 + intel_sdvo->hotplug_active[0] &= ~0x3; 2533 2579 2534 2580 if (intel_sdvo_output_setup(intel_sdvo, 2535 2581 intel_sdvo->caps.output_flags) != true) {
+10 -6
drivers/gpu/drm/radeon/atombios_dp.c
··· 115 115 u8 msg[20]; 116 116 int msg_bytes = send_bytes + 4; 117 117 u8 ack; 118 + unsigned retry; 118 119 119 120 if (send_bytes > 16) 120 121 return -1; ··· 126 125 msg[3] = (msg_bytes << 4) | (send_bytes - 1); 127 126 memcpy(&msg[4], send, send_bytes); 128 127 129 - while (1) { 128 + for (retry = 0; retry < 4; retry++) { 130 129 ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, 131 130 msg, msg_bytes, NULL, 0, delay, &ack); 132 131 if (ret < 0) 133 132 return ret; 134 133 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) 135 - break; 134 + return send_bytes; 136 135 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 137 136 udelay(400); 138 137 else 139 138 return -EIO; 140 139 } 141 140 142 - return send_bytes; 141 + return -EIO; 143 142 } 144 143 145 144 static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, ··· 150 149 int msg_bytes = 4; 151 150 u8 ack; 152 151 int ret; 152 + unsigned retry; 153 153 154 154 msg[0] = address; 155 155 msg[1] = address >> 8; 156 156 msg[2] = AUX_NATIVE_READ << 4; 157 157 msg[3] = (msg_bytes << 4) | (recv_bytes - 1); 158 158 159 - while (1) { 159 + for (retry = 0; retry < 4; retry++) { 160 160 ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, 161 161 msg, msg_bytes, recv, recv_bytes, delay, &ack); 162 - if (ret == 0) 163 - return -EPROTO; 164 162 if (ret < 0) 165 163 return ret; 166 164 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) 167 165 return ret; 168 166 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 169 167 udelay(400); 168 + else if (ret == 0) 169 + return -EPROTO; 170 170 else 171 171 return -EIO; 172 172 } 173 + 174 + return -EIO; 173 175 } 174 176 175 177 static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector,
+8 -50
drivers/gpu/drm/radeon/evergreen.c
··· 1404 1404 /* Initialize the ring buffer's read and write pointers */ 1405 1405 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); 1406 1406 WREG32(CP_RB_RPTR_WR, 0); 1407 - WREG32(CP_RB_WPTR, 0); 1407 + rdev->cp.wptr = 0; 1408 + WREG32(CP_RB_WPTR, rdev->cp.wptr); 1408 1409 1409 1410 /* set the wb address wether it's enabled or not */ 1410 1411 WREG32(CP_RB_RPTR_ADDR, ··· 1427 1426 WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); 1428 1427 1429 1428 rdev->cp.rptr = RREG32(CP_RB_RPTR); 1430 - rdev->cp.wptr = RREG32(CP_RB_WPTR); 1431 1429 1432 1430 evergreen_cp_start(rdev); 1433 1431 rdev->cp.ready = true; ··· 1588 1588 } 1589 1589 1590 1590 return backend_map; 1591 - } 1592 - 1593 - static void evergreen_program_channel_remap(struct radeon_device *rdev) 1594 - { 1595 - u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp; 1596 - 1597 - tmp = RREG32(MC_SHARED_CHMAP); 1598 - switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { 1599 - case 0: 1600 - case 1: 1601 - case 2: 1602 - case 3: 1603 - default: 1604 - /* default mapping */ 1605 - mc_shared_chremap = 0x00fac688; 1606 - break; 1607 - } 1608 - 1609 - switch (rdev->family) { 1610 - case CHIP_HEMLOCK: 1611 - case CHIP_CYPRESS: 1612 - case CHIP_BARTS: 1613 - tcp_chan_steer_lo = 0x54763210; 1614 - tcp_chan_steer_hi = 0x0000ba98; 1615 - break; 1616 - case CHIP_JUNIPER: 1617 - case CHIP_REDWOOD: 1618 - case CHIP_CEDAR: 1619 - case CHIP_PALM: 1620 - case CHIP_SUMO: 1621 - case CHIP_SUMO2: 1622 - case CHIP_TURKS: 1623 - case CHIP_CAICOS: 1624 - default: 1625 - tcp_chan_steer_lo = 0x76543210; 1626 - tcp_chan_steer_hi = 0x0000ba98; 1627 - break; 1628 - } 1629 - 1630 - WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo); 1631 - WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi); 1632 - WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); 1633 1591 } 1634 1592 1635 1593 static void evergreen_gpu_init(struct radeon_device *rdev) ··· 2035 2077 WREG32(GB_ADDR_CONFIG, gb_addr_config); 2036 2078 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 2037 2079 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 2038 - 2039 - evergreen_program_channel_remap(rdev); 2040 2080 2041 2081 num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; 2042 2082 grbm_gfx_index = INSTANCE_BROADCAST_WRITES; ··· 3127 3171 } 3128 3172 3129 3173 int evergreen_copy_blit(struct radeon_device *rdev, 3130 - uint64_t src_offset, uint64_t dst_offset, 3131 - unsigned num_pages, struct radeon_fence *fence) 3174 + uint64_t src_offset, 3175 + uint64_t dst_offset, 3176 + unsigned num_gpu_pages, 3177 + struct radeon_fence *fence) 3132 3178 { 3133 3179 int r; 3134 3180 3135 3181 mutex_lock(&rdev->r600_blit.mutex); 3136 3182 rdev->r600_blit.vb_ib = NULL; 3137 - r = evergreen_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); 3183 + r = evergreen_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE); 3138 3184 if (r) { 3139 3185 if (rdev->r600_blit.vb_ib) 3140 3186 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); 3141 3187 mutex_unlock(&rdev->r600_blit.mutex); 3142 3188 return r; 3143 3189 } 3144 - evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); 3190 + evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE); 3145 3191 evergreen_blit_done_copy(rdev, fence); 3146 3192 mutex_unlock(&rdev->r600_blit.mutex); 3147 3193 return 0;
+6 -38
drivers/gpu/drm/radeon/ni.c
··· 569 569 return backend_map; 570 570 } 571 571 572 - static void cayman_program_channel_remap(struct radeon_device *rdev) 573 - { 574 - u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp; 575 - 576 - tmp = RREG32(MC_SHARED_CHMAP); 577 - switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { 578 - case 0: 579 - case 1: 580 - case 2: 581 - case 3: 582 - default: 583 - /* default mapping */ 584 - mc_shared_chremap = 0x00fac688; 585 - break; 586 - } 587 - 588 - switch (rdev->family) { 589 - case CHIP_CAYMAN: 590 - default: 591 - //tcp_chan_steer_lo = 0x54763210 592 - tcp_chan_steer_lo = 0x76543210; 593 - tcp_chan_steer_hi = 0x0000ba98; 594 - break; 595 - } 596 - 597 - WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo); 598 - WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi); 599 - WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); 600 - } 601 - 602 572 static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev, 603 573 u32 disable_mask_per_se, 604 574 u32 max_disable_mask_per_se, ··· 811 841 WREG32(GB_ADDR_CONFIG, gb_addr_config); 812 842 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 813 843 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 814 - 815 - cayman_program_channel_remap(rdev); 816 844 817 845 /* primary versions */ 818 846 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); ··· 1155 1187 1156 1188 /* Initialize the ring buffer's read and write pointers */ 1157 1189 WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); 1158 - WREG32(CP_RB0_WPTR, 0); 1190 + rdev->cp.wptr = 0; 1191 + WREG32(CP_RB0_WPTR, rdev->cp.wptr); 1159 1192 1160 1193 /* set the wb address wether it's enabled or not */ 1161 1194 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); ··· 1176 1207 WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8); 1177 1208 1178 1209 rdev->cp.rptr = RREG32(CP_RB0_RPTR); 1179 - rdev->cp.wptr = RREG32(CP_RB0_WPTR); 1180 1210 1181 1211 /* ring1 - compute only */ 1182 1212 /* Set ring buffer size */ ··· 1188 1220 1189 1221 /* Initialize the ring buffer's read and write pointers */ 1190 1222 WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); 1191 - WREG32(CP_RB1_WPTR, 0); 1223 + rdev->cp1.wptr = 0; 1224 + WREG32(CP_RB1_WPTR, rdev->cp1.wptr); 1192 1225 1193 1226 /* set the wb address wether it's enabled or not */ 1194 1227 WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); ··· 1201 1232 WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8); 1202 1233 1203 1234 rdev->cp1.rptr = RREG32(CP_RB1_RPTR); 1204 - rdev->cp1.wptr = RREG32(CP_RB1_WPTR); 1205 1235 1206 1236 /* ring2 - compute only */ 1207 1237 /* Set ring buffer size */ ··· 1213 1245 1214 1246 /* Initialize the ring buffer's read and write pointers */ 1215 1247 WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); 1216 - WREG32(CP_RB2_WPTR, 0); 1248 + rdev->cp2.wptr = 0; 1249 + WREG32(CP_RB2_WPTR, rdev->cp2.wptr); 1217 1250 1218 1251 /* set the wb address wether it's enabled or not */ 1219 1252 WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); ··· 1226 1257 WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8); 1227 1258 1228 1259 rdev->cp2.rptr = RREG32(CP_RB2_RPTR); 1229 - rdev->cp2.wptr = RREG32(CP_RB2_WPTR); 1230 1260 1231 1261 /* start the rings */ 1232 1262 cayman_cp_start(rdev);
+10 -12
drivers/gpu/drm/radeon/r100.c
··· 721 721 int r100_copy_blit(struct radeon_device *rdev, 722 722 uint64_t src_offset, 723 723 uint64_t dst_offset, 724 - unsigned num_pages, 724 + unsigned num_gpu_pages, 725 725 struct radeon_fence *fence) 726 726 { 727 727 uint32_t cur_pages; 728 - uint32_t stride_bytes = PAGE_SIZE; 728 + uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE; 729 729 uint32_t pitch; 730 730 uint32_t stride_pixels; 731 731 unsigned ndw; ··· 737 737 /* radeon pitch is /64 */ 738 738 pitch = stride_bytes / 64; 739 739 stride_pixels = stride_bytes / 4; 740 - num_loops = DIV_ROUND_UP(num_pages, 8191); 740 + num_loops = DIV_ROUND_UP(num_gpu_pages, 8191); 741 741 742 742 /* Ask for enough room for blit + flush + fence */ 743 743 ndw = 64 + (10 * num_loops); ··· 746 746 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); 747 747 return -EINVAL; 748 748 } 749 - while (num_pages > 0) { 750 - cur_pages = num_pages; 749 + while (num_gpu_pages > 0) { 750 + cur_pages = num_gpu_pages; 751 751 if (cur_pages > 8191) { 752 752 cur_pages = 8191; 753 753 } 754 - num_pages -= cur_pages; 754 + num_gpu_pages -= cur_pages; 755 755 756 756 /* pages are in Y direction - height 757 757 page width in X direction - width */ ··· 773 773 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); 774 774 radeon_ring_write(rdev, 0); 775 775 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); 776 - radeon_ring_write(rdev, num_pages); 777 - radeon_ring_write(rdev, num_pages); 776 + radeon_ring_write(rdev, num_gpu_pages); 777 + radeon_ring_write(rdev, num_gpu_pages); 778 778 radeon_ring_write(rdev, cur_pages | (stride_pixels << 16)); 779 779 } 780 780 radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); ··· 990 990 /* Force read & write ptr to 0 */ 991 991 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE); 992 992 WREG32(RADEON_CP_RB_RPTR_WR, 0); 993 - WREG32(RADEON_CP_RB_WPTR, 0); 993 + rdev->cp.wptr = 0; 994 + WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr); 994 995 995 996 /* set the wb address whether it's enabled or not */ 996 997 WREG32(R_00070C_CP_RB_RPTR_ADDR, ··· 1008 1007 WREG32(RADEON_CP_RB_CNTL, tmp); 1009 1008 udelay(10); 1010 1009 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); 1011 - rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR); 1012 - /* protect against crazy HW on resume */ 1013 - rdev->cp.wptr &= rdev->cp.ptr_mask; 1014 1010 /* Set cp mode to bus mastering & enable cp*/ 1015 1011 WREG32(RADEON_CP_CSQ_MODE, 1016 1012 REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
+2 -2
drivers/gpu/drm/radeon/r200.c
··· 84 84 int r200_copy_dma(struct radeon_device *rdev, 85 85 uint64_t src_offset, 86 86 uint64_t dst_offset, 87 - unsigned num_pages, 87 + unsigned num_gpu_pages, 88 88 struct radeon_fence *fence) 89 89 { 90 90 uint32_t size; ··· 93 93 int r = 0; 94 94 95 95 /* radeon pitch is /64 */ 96 - size = num_pages << PAGE_SHIFT; 96 + size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT; 97 97 num_loops = DIV_ROUND_UP(size, 0x1FFFFF); 98 98 r = radeon_ring_lock(rdev, num_loops * 4 + 64); 99 99 if (r) {
+8 -6
drivers/gpu/drm/radeon/r600.c
··· 2209 2209 /* Initialize the ring buffer's read and write pointers */ 2210 2210 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); 2211 2211 WREG32(CP_RB_RPTR_WR, 0); 2212 - WREG32(CP_RB_WPTR, 0); 2212 + rdev->cp.wptr = 0; 2213 + WREG32(CP_RB_WPTR, rdev->cp.wptr); 2213 2214 2214 2215 /* set the wb address whether it's enabled or not */ 2215 2216 WREG32(CP_RB_RPTR_ADDR, ··· 2232 2231 WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); 2233 2232 2234 2233 rdev->cp.rptr = RREG32(CP_RB_RPTR); 2235 - rdev->cp.wptr = RREG32(CP_RB_WPTR); 2236 2234 2237 2235 r600_cp_start(rdev); 2238 2236 rdev->cp.ready = true; ··· 2353 2353 } 2354 2354 2355 2355 int r600_copy_blit(struct radeon_device *rdev, 2356 - uint64_t src_offset, uint64_t dst_offset, 2357 - unsigned num_pages, struct radeon_fence *fence) 2356 + uint64_t src_offset, 2357 + uint64_t dst_offset, 2358 + unsigned num_gpu_pages, 2359 + struct radeon_fence *fence) 2358 2360 { 2359 2361 int r; 2360 2362 2361 2363 mutex_lock(&rdev->r600_blit.mutex); 2362 2364 rdev->r600_blit.vb_ib = NULL; 2363 - r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); 2365 + r = r600_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE); 2364 2366 if (r) { 2365 2367 if (rdev->r600_blit.vb_ib) 2366 2368 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); 2367 2369 mutex_unlock(&rdev->r600_blit.mutex); 2368 2370 return r; 2369 2371 } 2370 - r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); 2372 + r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE); 2371 2373 r600_blit_done_copy(rdev, fence); 2372 2374 mutex_unlock(&rdev->r600_blit.mutex); 2373 2375 return 0;
+4 -3
drivers/gpu/drm/radeon/radeon.h
··· 322 322 323 323 #define RADEON_GPU_PAGE_SIZE 4096 324 324 #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1) 325 + #define RADEON_GPU_PAGE_SHIFT 12 325 326 326 327 struct radeon_gart { 327 328 dma_addr_t table_addr; ··· 915 914 int (*copy_blit)(struct radeon_device *rdev, 916 915 uint64_t src_offset, 917 916 uint64_t dst_offset, 918 - unsigned num_pages, 917 + unsigned num_gpu_pages, 919 918 struct radeon_fence *fence); 920 919 int (*copy_dma)(struct radeon_device *rdev, 921 920 uint64_t src_offset, 922 921 uint64_t dst_offset, 923 - unsigned num_pages, 922 + unsigned num_gpu_pages, 924 923 struct radeon_fence *fence); 925 924 int (*copy)(struct radeon_device *rdev, 926 925 uint64_t src_offset, 927 926 uint64_t dst_offset, 928 - unsigned num_pages, 927 + unsigned num_gpu_pages, 929 928 struct radeon_fence *fence); 930 929 uint32_t (*get_engine_clock)(struct radeon_device *rdev); 931 930 void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
+4 -4
drivers/gpu/drm/radeon/radeon_asic.h
··· 75 75 int r100_copy_blit(struct radeon_device *rdev, 76 76 uint64_t src_offset, 77 77 uint64_t dst_offset, 78 - unsigned num_pages, 78 + unsigned num_gpu_pages, 79 79 struct radeon_fence *fence); 80 80 int r100_set_surface_reg(struct radeon_device *rdev, int reg, 81 81 uint32_t tiling_flags, uint32_t pitch, ··· 143 143 extern int r200_copy_dma(struct radeon_device *rdev, 144 144 uint64_t src_offset, 145 145 uint64_t dst_offset, 146 - unsigned num_pages, 146 + unsigned num_gpu_pages, 147 147 struct radeon_fence *fence); 148 148 void r200_set_safe_registers(struct radeon_device *rdev); 149 149 ··· 311 311 int r600_ring_test(struct radeon_device *rdev); 312 312 int r600_copy_blit(struct radeon_device *rdev, 313 313 uint64_t src_offset, uint64_t dst_offset, 314 - unsigned num_pages, struct radeon_fence *fence); 314 + unsigned num_gpu_pages, struct radeon_fence *fence); 315 315 void r600_hpd_init(struct radeon_device *rdev); 316 316 void r600_hpd_fini(struct radeon_device *rdev); 317 317 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); ··· 403 403 void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 404 404 int evergreen_copy_blit(struct radeon_device *rdev, 405 405 uint64_t src_offset, uint64_t dst_offset, 406 - unsigned num_pages, struct radeon_fence *fence); 406 + unsigned num_gpu_pages, struct radeon_fence *fence); 407 407 void evergreen_hpd_init(struct radeon_device *rdev); 408 408 void evergreen_hpd_fini(struct radeon_device *rdev); 409 409 bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+4 -4
drivers/gpu/drm/radeon/radeon_connectors.c
··· 68 68 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { 69 69 int saved_dpms = connector->dpms; 70 70 71 - if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) && 72 - radeon_dp_needs_link_train(radeon_connector)) 73 - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 74 - else 71 + /* Only turn off the display it it's physically disconnected */ 72 + if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) 75 73 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 74 + else if (radeon_dp_needs_link_train(radeon_connector)) 75 + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 76 76 connector->dpms = saved_dpms; 77 77 } 78 78 }
+19 -21
drivers/gpu/drm/radeon/radeon_cursor.c
··· 208 208 int xorigin = 0, yorigin = 0; 209 209 int w = radeon_crtc->cursor_width; 210 210 211 - if (x < 0) 212 - xorigin = -x + 1; 213 - if (y < 0) 214 - yorigin = -y + 1; 215 - if (xorigin >= CURSOR_WIDTH) 216 - xorigin = CURSOR_WIDTH - 1; 217 - if (yorigin >= CURSOR_HEIGHT) 218 - yorigin = CURSOR_HEIGHT - 1; 211 + if (ASIC_IS_AVIVO(rdev)) { 212 + /* avivo cursor are offset into the total surface */ 213 + x += crtc->x; 214 + y += crtc->y; 215 + } 216 + DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); 217 + 218 + if (x < 0) { 219 + xorigin = min(-x, CURSOR_WIDTH - 1); 220 + x = 0; 221 + } 222 + if (y < 0) { 223 + yorigin = min(-y, CURSOR_HEIGHT - 1); 224 + y = 0; 225 + } 219 226 220 227 if (ASIC_IS_AVIVO(rdev)) { 221 228 int i = 0; 222 229 struct drm_crtc *crtc_p; 223 - 224 - /* avivo cursor are offset into the total surface */ 225 - x += crtc->x; 226 - y += crtc->y; 227 - DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); 228 230 229 231 /* avivo cursor image can't end on 128 pixel boundary or 230 232 * go past the end of the frame if both crtcs are enabled ··· 255 253 256 254 radeon_lock_cursor(crtc, true); 257 255 if (ASIC_IS_DCE4(rdev)) { 258 - WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, 259 - ((xorigin ? 0 : x) << 16) | 260 - (yorigin ? 0 : y)); 256 + WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y); 261 257 WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); 262 258 WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset, 263 259 ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); 264 260 } else if (ASIC_IS_AVIVO(rdev)) { 265 - WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, 266 - ((xorigin ? 0 : x) << 16) | 267 - (yorigin ? 0 : y)); 261 + WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y); 268 262 WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); 269 263 WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset, 270 264 ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); ··· 274 276 | yorigin)); 275 277 WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset, 276 278 (RADEON_CUR_LOCK 277 - | ((xorigin ? 0 : x) << 16) 278 - | (yorigin ? 0 : y))); 279 + | (x << 16) 280 + | y)); 279 281 /* offset is from DISP(2)_BASE_ADDRESS */ 280 282 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset + 281 283 (yorigin * 256)));
+1 -1
drivers/gpu/drm/radeon/radeon_display.c
··· 473 473 spin_lock_irqsave(&dev->event_lock, flags); 474 474 radeon_crtc->unpin_work = NULL; 475 475 unlock_free: 476 - drm_gem_object_unreference_unlocked(old_radeon_fb->obj); 477 476 spin_unlock_irqrestore(&dev->event_lock, flags); 477 + drm_gem_object_unreference_unlocked(old_radeon_fb->obj); 478 478 radeon_fence_unref(&work->fence); 479 479 kfree(work); 480 480
+8 -1
drivers/gpu/drm/radeon/radeon_encoders.c
··· 1507 1507 switch (mode) { 1508 1508 case DRM_MODE_DPMS_ON: 1509 1509 args.ucAction = ATOM_ENABLE; 1510 - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1510 + /* workaround for DVOOutputControl on some RS690 systems */ 1511 + if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) { 1512 + u32 reg = RREG32(RADEON_BIOS_3_SCRATCH); 1513 + WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE); 1514 + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1515 + WREG32(RADEON_BIOS_3_SCRATCH, reg); 1516 + } else 1517 + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1511 1518 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 1512 1519 args.ucAction = ATOM_LCD_BLON; 1513 1520 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+6 -1
drivers/gpu/drm/radeon/radeon_ttm.c
··· 277 277 DRM_ERROR("Trying to move memory with CP turned off.\n"); 278 278 return -EINVAL; 279 279 } 280 - r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence); 280 + 281 + BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); 282 + 283 + r = radeon_copy(rdev, old_start, new_start, 284 + new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ 285 + fence); 281 286 /* FIXME: handle copy error */ 282 287 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, 283 288 evict, no_wait_reserve, no_wait_gpu, new_mem);
-51
drivers/gpu/drm/radeon/rv770.c
··· 536 536 return backend_map; 537 537 } 538 538 539 - static void rv770_program_channel_remap(struct radeon_device *rdev) 540 - { 541 - u32 tcp_chan_steer, mc_shared_chremap, tmp; 542 - bool force_no_swizzle; 543 - 544 - switch (rdev->family) { 545 - case CHIP_RV770: 546 - case CHIP_RV730: 547 - force_no_swizzle = false; 548 - break; 549 - case CHIP_RV710: 550 - case CHIP_RV740: 551 - default: 552 - force_no_swizzle = true; 553 - break; 554 - } 555 - 556 - tmp = RREG32(MC_SHARED_CHMAP); 557 - switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { 558 - case 0: 559 - case 1: 560 - default: 561 - /* default mapping */ 562 - mc_shared_chremap = 0x00fac688; 563 - break; 564 - case 2: 565 - case 3: 566 - if (force_no_swizzle) 567 - mc_shared_chremap = 0x00fac688; 568 - else 569 - mc_shared_chremap = 0x00bbc298; 570 - break; 571 - } 572 - 573 - if (rdev->family == CHIP_RV740) 574 - tcp_chan_steer = 0x00ef2a60; 575 - else 576 - tcp_chan_steer = 0x00fac688; 577 - 578 - /* RV770 CE has special chremap setup */ 579 - if (rdev->pdev->device == 0x944e) { 580 - tcp_chan_steer = 0x00b08b08; 581 - mc_shared_chremap = 0x00b08b08; 582 - } 583 - 584 - WREG32(TCP_CHAN_STEER, tcp_chan_steer); 585 - WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); 586 - } 587 - 588 539 static void rv770_gpu_init(struct radeon_device *rdev) 589 540 { 590 541 int i, j, num_qd_pipes; ··· 735 784 WREG32(GB_TILING_CONFIG, gb_tiling_config); 736 785 WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 737 786 WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 738 - 739 - rv770_program_channel_remap(rdev); 740 787 741 788 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 742 789 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
+2 -1
drivers/gpu/drm/ttm/ttm_bo.c
··· 394 394 395 395 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { 396 396 if (bo->ttm == NULL) { 397 - ret = ttm_bo_add_ttm(bo, false); 397 + bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); 398 + ret = ttm_bo_add_ttm(bo, zero); 398 399 if (ret) 399 400 goto out_err; 400 401 }
+2
drivers/hid/hid-wacom.c
··· 373 373 hidinput = list_entry(hdev->inputs.next, struct hid_input, list); 374 374 input = hidinput->input; 375 375 376 + __set_bit(INPUT_PROP_POINTER, input->propbit); 377 + 376 378 /* Basics */ 377 379 input->evbit[0] |= BIT(EV_KEY) | BIT(EV_ABS) | BIT(EV_REL); 378 380
+53 -163
drivers/hwmon/coretemp.c
··· 36 36 #include <linux/cpu.h> 37 37 #include <linux/pci.h> 38 38 #include <linux/smp.h> 39 + #include <linux/moduleparam.h> 39 40 #include <asm/msr.h> 40 41 #include <asm/processor.h> 41 42 42 43 #define DRVNAME "coretemp" 43 44 45 + /* 46 + * force_tjmax only matters when TjMax can't be read from the CPU itself. 47 + * When set, it replaces the driver's suboptimal heuristic. 48 + */ 49 + static int force_tjmax; 50 + module_param_named(tjmax, force_tjmax, int, 0444); 51 + MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius"); 52 + 44 53 #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */ 45 54 #define NUM_REAL_CORES 16 /* Number of Real cores per cpu */ 46 55 #define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */ 47 56 #define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */ 48 - #define MAX_THRESH_ATTRS 3 /* Maximum no of Threshold attrs */ 49 - #define TOTAL_ATTRS (MAX_CORE_ATTRS + MAX_THRESH_ATTRS) 57 + #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1) 50 58 #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO) 51 59 52 60 #ifdef CONFIG_SMP ··· 77 69 * This value is passed as "id" field to rdmsr/wrmsr functions. 78 70 * @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS, 79 71 * from where the temperature values should be read. 80 - * @intrpt_reg: One of IA32_THERM_INTERRUPT or IA32_PACKAGE_THERM_INTERRUPT, 81 - * from where the thresholds are read. 82 72 * @attr_size: Total number of pre-core attrs displayed in the sysfs. 83 73 * @is_pkg_data: If this is 1, the temp_data holds pkgtemp data. 84 74 * Otherwise, temp_data holds coretemp data. ··· 85 79 struct temp_data { 86 80 int temp; 87 81 int ttarget; 88 - int tmin; 89 82 int tjmax; 90 83 unsigned long last_updated; 91 84 unsigned int cpu; 92 85 u32 cpu_core_id; 93 86 u32 status_reg; 94 - u32 intrpt_reg; 95 87 int attr_size; 96 88 bool is_pkg_data; 97 89 bool valid; ··· 147 143 return sprintf(buf, "%d\n", (eax >> 5) & 1); 148 144 } 149 145 150 - static ssize_t show_max_alarm(struct device *dev, 151 - struct device_attribute *devattr, char *buf) 152 - { 153 - u32 eax, edx; 154 - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 155 - struct platform_data *pdata = dev_get_drvdata(dev); 156 - struct temp_data *tdata = pdata->core_data[attr->index]; 157 - 158 - rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx); 159 - 160 - return sprintf(buf, "%d\n", !!(eax & THERM_STATUS_THRESHOLD1)); 161 - } 162 - 163 146 static ssize_t show_tjmax(struct device *dev, 164 147 struct device_attribute *devattr, char *buf) 165 148 { ··· 163 172 struct platform_data *pdata = dev_get_drvdata(dev); 164 173 165 174 return sprintf(buf, "%d\n", pdata->core_data[attr->index]->ttarget); 166 - } 167 - 168 - static ssize_t store_ttarget(struct device *dev, 169 - struct device_attribute *devattr, 170 - const char *buf, size_t count) 171 - { 172 - struct platform_data *pdata = dev_get_drvdata(dev); 173 - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 174 - struct temp_data *tdata = pdata->core_data[attr->index]; 175 - u32 eax, edx; 176 - unsigned long val; 177 - int diff; 178 - 179 - if (strict_strtoul(buf, 10, &val)) 180 - return -EINVAL; 181 - 182 - /* 183 - * THERM_MASK_THRESHOLD1 is 7 bits wide. Values are entered in terms 184 - * of milli degree celsius. Hence don't accept val > (127 * 1000) 185 - */ 186 - if (val > tdata->tjmax || val > 127000) 187 - return -EINVAL; 188 - 189 - diff = (tdata->tjmax - val) / 1000; 190 - 191 - mutex_lock(&tdata->update_lock); 192 - rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx); 193 - eax = (eax & ~THERM_MASK_THRESHOLD1) | 194 - (diff << THERM_SHIFT_THRESHOLD1); 195 - wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx); 196 - tdata->ttarget = val; 197 - mutex_unlock(&tdata->update_lock); 198 - 199 - return count; 200 - } 201 - 202 - static ssize_t show_tmin(struct device *dev, 203 - struct device_attribute *devattr, char *buf) 204 - { 205 - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 206 - struct platform_data *pdata = dev_get_drvdata(dev); 207 - 208 - return sprintf(buf, "%d\n", pdata->core_data[attr->index]->tmin); 209 - } 210 - 211 - static ssize_t store_tmin(struct device *dev, 212 - struct device_attribute *devattr, 213 - const char *buf, size_t count) 214 - { 215 - struct platform_data *pdata = dev_get_drvdata(dev); 216 - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 217 - struct temp_data *tdata = pdata->core_data[attr->index]; 218 - u32 eax, edx; 219 - unsigned long val; 220 - int diff; 221 - 222 - if (strict_strtoul(buf, 10, &val)) 223 - return -EINVAL; 224 - 225 - /* 226 - * THERM_MASK_THRESHOLD0 is 7 bits wide. Values are entered in terms 227 - * of milli degree celsius. Hence don't accept val > (127 * 1000) 228 - */ 229 - if (val > tdata->tjmax || val > 127000) 230 - return -EINVAL; 231 - 232 - diff = (tdata->tjmax - val) / 1000; 233 - 234 - mutex_lock(&tdata->update_lock); 235 - rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx); 236 - eax = (eax & ~THERM_MASK_THRESHOLD0) | 237 - (diff << THERM_SHIFT_THRESHOLD0); 238 - wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx); 239 - tdata->tmin = val; 240 - mutex_unlock(&tdata->update_lock); 241 - 242 - return count; 243 175 } 244 176 245 177 static ssize_t show_temp(struct device *dev, ··· 288 374 289 375 static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) 290 376 { 291 - /* The 100C is default for both mobile and non mobile CPUs */ 292 377 int err; 293 378 u32 eax, edx; 294 379 u32 val; ··· 298 385 */ 299 386 err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx); 300 387 if (err) { 301 - dev_warn(dev, "Unable to read TjMax from CPU.\n"); 388 + if (c->x86_model > 0xe && c->x86_model != 0x1c) 389 + dev_warn(dev, "Unable to read TjMax from CPU %u\n", id); 302 390 } else { 303 391 val = (eax >> 16) & 0xff; 304 392 /* ··· 307 393 * will be used 308 394 */ 309 395 if (val) { 310 - dev_info(dev, "TjMax is %d C.\n", val); 396 + dev_dbg(dev, "TjMax is %d degrees C\n", val); 311 397 return val * 1000; 312 398 } 399 + } 400 + 401 + if (force_tjmax) { 402 + dev_notice(dev, "TjMax forced to %d degrees C by user\n", 403 + force_tjmax); 404 + return force_tjmax * 1000; 313 405 } 314 406 315 407 /* ··· 334 414 rdmsr(MSR_IA32_UCODE_REV, eax, *(u32 *)edx); 335 415 } 336 416 337 - static int get_pkg_tjmax(unsigned int cpu, struct device *dev) 338 - { 339 - int err; 340 - u32 eax, edx, val; 341 - 342 - err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx); 343 - if (!err) { 344 - val = (eax >> 16) & 0xff; 345 - if (val) 346 - return val * 1000; 347 - } 348 - dev_warn(dev, "Unable to read Pkg-TjMax from CPU:%u\n", cpu); 349 - return 100000; /* Default TjMax: 100 degree celsius */ 350 - } 351 - 352 417 static int create_name_attr(struct platform_data *pdata, struct device *dev) 353 418 { 354 419 sysfs_attr_init(&pdata->name_attr.attr); ··· 347 442 int attr_no) 348 443 { 349 444 int err, i; 350 - static ssize_t (*rd_ptr[TOTAL_ATTRS]) (struct device *dev, 445 + static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev, 351 446 struct device_attribute *devattr, char *buf) = { 352 447 show_label, show_crit_alarm, show_temp, show_tjmax, 353 - show_max_alarm, show_ttarget, show_tmin }; 354 - static ssize_t (*rw_ptr[TOTAL_ATTRS]) (struct device *dev, 355 - struct device_attribute *devattr, const char *buf, 356 - size_t count) = { NULL, NULL, NULL, NULL, NULL, 357 - store_ttarget, store_tmin }; 358 - static const char *names[TOTAL_ATTRS] = { 448 + show_ttarget }; 449 + static const char *const names[TOTAL_ATTRS] = { 359 450 "temp%d_label", "temp%d_crit_alarm", 360 451 "temp%d_input", "temp%d_crit", 361 - "temp%d_max_alarm", "temp%d_max", 362 - "temp%d_max_hyst" }; 452 + "temp%d_max" }; 363 453 364 454 for (i = 0; i < tdata->attr_size; i++) { 365 455 snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i], ··· 362 462 sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr); 363 463 tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i]; 364 464 tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO; 365 - if (rw_ptr[i]) { 366 - tdata->sd_attrs[i].dev_attr.attr.mode |= S_IWUSR; 367 - tdata->sd_attrs[i].dev_attr.store = rw_ptr[i]; 368 - } 369 465 tdata->sd_attrs[i].dev_attr.show = rd_ptr[i]; 370 466 tdata->sd_attrs[i].index = attr_no; 371 467 err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr); ··· 377 481 } 378 482 379 483 380 - static int __devinit chk_ucode_version(struct platform_device *pdev) 484 + static int __cpuinit chk_ucode_version(unsigned int cpu) 381 485 { 382 - struct cpuinfo_x86 *c = &cpu_data(pdev->id); 486 + struct cpuinfo_x86 *c = &cpu_data(cpu); 383 487 int err; 384 488 u32 edx; 385 489 ··· 390 494 */ 391 495 if (c->x86_model == 0xe && c->x86_mask < 0xc) { 392 496 /* check for microcode update */ 393 - err = smp_call_function_single(pdev->id, get_ucode_rev_on_cpu, 497 + err = smp_call_function_single(cpu, get_ucode_rev_on_cpu, 394 498 &edx, 1); 395 499 if (err) { 396 - dev_err(&pdev->dev, 397 - "Cannot determine microcode revision of " 398 - "CPU#%u (%d)!\n", pdev->id, err); 500 + pr_err("Cannot determine microcode revision of " 501 + "CPU#%u (%d)!\n", cpu, err); 399 502 return -ENODEV; 400 503 } else if (edx < 0x39) { 401 - dev_err(&pdev->dev, 402 - "Errata AE18 not fixed, update BIOS or " 403 - "microcode of the CPU!\n"); 504 + pr_err("Errata AE18 not fixed, update BIOS or " 505 + "microcode of the CPU!\n"); 404 506 return -ENODEV; 405 507 } 406 508 } ··· 432 538 433 539 tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS : 434 540 MSR_IA32_THERM_STATUS; 435 - tdata->intrpt_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_INTERRUPT : 436 - MSR_IA32_THERM_INTERRUPT; 437 541 tdata->is_pkg_data = pkg_flag; 438 542 tdata->cpu = cpu; 439 543 tdata->cpu_core_id = TO_CORE_ID(cpu); ··· 440 548 return tdata; 441 549 } 442 550 443 - static int create_core_data(struct platform_data *pdata, 444 - struct platform_device *pdev, 551 + static int create_core_data(struct platform_device *pdev, 445 552 unsigned int cpu, int pkg_flag) 446 553 { 447 554 struct temp_data *tdata; 555 + struct platform_data *pdata = platform_get_drvdata(pdev); 448 556 struct cpuinfo_x86 *c = &cpu_data(cpu); 449 557 u32 eax, edx; 450 558 int err, attr_no; ··· 480 588 goto exit_free; 481 589 482 590 /* We can access status register. Get Critical Temperature */ 483 - if (pkg_flag) 484 - tdata->tjmax = get_pkg_tjmax(pdev->id, &pdev->dev); 485 - else 486 - tdata->tjmax = get_tjmax(c, cpu, &pdev->dev); 591 + tdata->tjmax = get_tjmax(c, cpu, &pdev->dev); 487 592 488 593 /* 489 - * Test if we can access the intrpt register. If so, increase the 490 - * 'size' enough to have ttarget/tmin/max_alarm interfaces. 491 - * Initialize ttarget with bits 16:22 of MSR_IA32_THERM_INTERRUPT 594 + * Read the still undocumented bits 8:15 of IA32_TEMPERATURE_TARGET. 595 + * The target temperature is available on older CPUs but not in this 596 + * register. Atoms don't have the register at all. 492 597 */ 493 - err = rdmsr_safe_on_cpu(cpu, tdata->intrpt_reg, &eax, &edx); 494 - if (!err) { 495 - tdata->attr_size += MAX_THRESH_ATTRS; 496 - tdata->ttarget = tdata->tjmax - ((eax >> 16) & 0x7f) * 1000; 598 + if (c->x86_model > 0xe && c->x86_model != 0x1c) { 599 + err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, 600 + &eax, &edx); 601 + if (!err) { 602 + tdata->ttarget 603 + = tdata->tjmax - ((eax >> 8) & 0xff) * 1000; 604 + tdata->attr_size++; 605 + } 497 606 } 498 607 499 608 pdata->core_data[attr_no] = tdata; ··· 506 613 507 614 return 0; 508 615 exit_free: 616 + pdata->core_data[attr_no] = NULL; 509 617 kfree(tdata); 510 618 return err; 511 619 } 512 620 513 621 static void coretemp_add_core(unsigned int cpu, int pkg_flag) 514 622 { 515 - struct platform_data *pdata; 516 623 struct platform_device *pdev = coretemp_get_pdev(cpu); 517 624 int err; 518 625 519 626 if (!pdev) 520 627 return; 521 628 522 - pdata = platform_get_drvdata(pdev); 523 - 524 - err = create_core_data(pdata, pdev, cpu, pkg_flag); 629 + err = create_core_data(pdev, cpu, pkg_flag); 525 630 if (err) 526 631 dev_err(&pdev->dev, "Adding Core %u failed\n", cpu); 527 632 } ··· 543 652 struct platform_data *pdata; 544 653 int err; 545 654 546 - /* Check the microcode version of the CPU */ 547 - err = chk_ucode_version(pdev); 548 - if (err) 549 - return err; 550 - 551 655 /* Initialize the per-package data structures */ 552 656 pdata = kzalloc(sizeof(struct platform_data), GFP_KERNEL); 553 657 if (!pdata) ··· 552 666 if (err) 553 667 goto exit_free; 554 668 555 - pdata->phys_proc_id = TO_PHYS_ID(pdev->id); 669 + pdata->phys_proc_id = pdev->id; 556 670 platform_set_drvdata(pdev, pdata); 557 671 558 672 pdata->hwmon_dev = hwmon_device_register(&pdev->dev); ··· 604 718 605 719 mutex_lock(&pdev_list_mutex); 606 720 607 - pdev = platform_device_alloc(DRVNAME, cpu); 721 + pdev = platform_device_alloc(DRVNAME, TO_PHYS_ID(cpu)); 608 722 if (!pdev) { 609 723 err = -ENOMEM; 610 724 pr_err("Device allocation failed\n"); ··· 624 738 } 625 739 626 740 pdev_entry->pdev = pdev; 627 - pdev_entry->phys_proc_id = TO_PHYS_ID(cpu); 741 + pdev_entry->phys_proc_id = pdev->id; 628 742 629 743 list_add_tail(&pdev_entry->list, &pdev_list); 630 744 mutex_unlock(&pdev_list_mutex); ··· 685 799 return; 686 800 687 801 if (!pdev) { 802 + /* Check the microcode version of the CPU */ 803 + if (chk_ucode_version(cpu)) 804 + return; 805 + 688 806 /* 689 807 * Alright, we have DTS support. 690 808 * We are bringing the _first_ core in this pkg
+1 -1
drivers/hwmon/ds620.c
··· 72 72 char valid; /* !=0 if following fields are valid */ 73 73 unsigned long last_updated; /* In jiffies */ 74 74 75 - u16 temp[3]; /* Register values, word */ 75 + s16 temp[3]; /* Register values, word */ 76 76 }; 77 77 78 78 /*
+8 -1
drivers/hwmon/pmbus/pmbus_core.c
··· 978 978 struct pmbus_limit_attr { 979 979 u16 reg; /* Limit register */ 980 980 bool update; /* True if register needs updates */ 981 + bool low; /* True if low limit; for limits with compare 982 + functions only */ 981 983 const char *attr; /* Attribute name */ 982 984 const char *alarm; /* Alarm attribute name */ 983 985 u32 sbit; /* Alarm attribute status bit */ ··· 1031 1029 if (attr->compare) { 1032 1030 pmbus_add_boolean_cmp(data, name, 1033 1031 l->alarm, index, 1034 - cbase, cindex, 1032 + l->low ? cindex : cbase, 1033 + l->low ? cbase : cindex, 1035 1034 attr->sbase + page, l->sbit); 1036 1035 } else { 1037 1036 pmbus_add_boolean_reg(data, name, ··· 1369 1366 static const struct pmbus_limit_attr temp_limit_attrs[] = { 1370 1367 { 1371 1368 .reg = PMBUS_UT_WARN_LIMIT, 1369 + .low = true, 1372 1370 .attr = "min", 1373 1371 .alarm = "min_alarm", 1374 1372 .sbit = PB_TEMP_UT_WARNING, 1375 1373 }, { 1376 1374 .reg = PMBUS_UT_FAULT_LIMIT, 1375 + .low = true, 1377 1376 .attr = "lcrit", 1378 1377 .alarm = "lcrit_alarm", 1379 1378 .sbit = PB_TEMP_UT_FAULT, ··· 1404 1399 static const struct pmbus_limit_attr temp_limit_attrs23[] = { 1405 1400 { 1406 1401 .reg = PMBUS_UT_WARN_LIMIT, 1402 + .low = true, 1407 1403 .attr = "min", 1408 1404 .alarm = "min_alarm", 1409 1405 .sbit = PB_TEMP_UT_WARNING, 1410 1406 }, { 1411 1407 .reg = PMBUS_UT_FAULT_LIMIT, 1408 + .low = true, 1412 1409 .attr = "lcrit", 1413 1410 .alarm = "lcrit_alarm", 1414 1411 .sbit = PB_TEMP_UT_FAULT,
+2 -2
drivers/hwmon/w83791d.c
··· 329 329 struct i2c_board_info *info); 330 330 static int w83791d_remove(struct i2c_client *client); 331 331 332 - static int w83791d_read(struct i2c_client *client, u8 register); 333 - static int w83791d_write(struct i2c_client *client, u8 register, u8 value); 332 + static int w83791d_read(struct i2c_client *client, u8 reg); 333 + static int w83791d_write(struct i2c_client *client, u8 reg, u8 value); 334 334 static struct w83791d_data *w83791d_update_device(struct device *dev); 335 335 336 336 #ifdef DEBUG
+6 -1
drivers/ide/ide-disk.c
··· 435 435 if (!(rq->cmd_flags & REQ_FLUSH)) 436 436 return BLKPREP_OK; 437 437 438 - cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); 438 + if (rq->special) { 439 + cmd = rq->special; 440 + memset(cmd, 0, sizeof(*cmd)); 441 + } else { 442 + cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); 443 + } 439 444 440 445 /* FIXME: map struct ide_taskfile on rq->cmd[] */ 441 446 BUG_ON(cmd == NULL);
+5 -5
drivers/infiniband/hw/cxgb3/iwch_cm.c
··· 287 287 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { 288 288 cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid); 289 289 dst_release(ep->dst); 290 - l2t_release(L2DATA(ep->com.tdev), ep->l2t); 290 + l2t_release(ep->com.tdev, ep->l2t); 291 291 } 292 292 kfree(ep); 293 293 } ··· 1178 1178 release_tid(ep->com.tdev, GET_TID(rpl), NULL); 1179 1179 cxgb3_free_atid(ep->com.tdev, ep->atid); 1180 1180 dst_release(ep->dst); 1181 - l2t_release(L2DATA(ep->com.tdev), ep->l2t); 1181 + l2t_release(ep->com.tdev, ep->l2t); 1182 1182 put_ep(&ep->com); 1183 1183 return CPL_RET_BUF_DONE; 1184 1184 } ··· 1377 1377 if (!child_ep) { 1378 1378 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", 1379 1379 __func__); 1380 - l2t_release(L2DATA(tdev), l2t); 1380 + l2t_release(tdev, l2t); 1381 1381 dst_release(dst); 1382 1382 goto reject; 1383 1383 } ··· 1956 1956 if (!err) 1957 1957 goto out; 1958 1958 1959 - l2t_release(L2DATA(h->rdev.t3cdev_p), ep->l2t); 1959 + l2t_release(h->rdev.t3cdev_p, ep->l2t); 1960 1960 fail4: 1961 1961 dst_release(ep->dst); 1962 1962 fail3: ··· 2127 2127 PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new, 2128 2128 l2t); 2129 2129 dst_hold(new); 2130 - l2t_release(L2DATA(ep->com.tdev), ep->l2t); 2130 + l2t_release(ep->com.tdev, ep->l2t); 2131 2131 ep->l2t = l2t; 2132 2132 dst_release(old); 2133 2133 ep->dst = new;
-1
drivers/input/keyboard/adp5588-keys.c
··· 668 668 MODULE_LICENSE("GPL"); 669 669 MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); 670 670 MODULE_DESCRIPTION("ADP5588/87 Keypad driver"); 671 - MODULE_ALIAS("platform:adp5588-keys");
+1 -1
drivers/input/misc/cm109.c
··· 475 475 le16_to_cpu(dev->ctl_req->wIndex), 476 476 dev->ctl_data, 477 477 USB_PKT_LEN, USB_CTRL_SET_TIMEOUT); 478 - if (error && error != EINTR) 478 + if (error < 0 && error != -EINTR) 479 479 err("%s: usb_control_msg() failed %d", __func__, error); 480 480 } 481 481
+20
drivers/input/mouse/bcm5974.c
··· 67 67 #define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245 68 68 #define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246 69 69 #define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247 70 + /* MacbookAir4,1 (unibody, July 2011) */ 71 + #define USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI 0x0249 72 + #define USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO 0x024a 73 + #define USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS 0x024b 70 74 /* MacbookAir4,2 (unibody, July 2011) */ 71 75 #define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI 0x024c 72 76 #define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO 0x024d ··· 116 112 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI), 117 113 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ISO), 118 114 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_JIS), 115 + /* MacbookAir4,1 */ 116 + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI), 117 + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO), 118 + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS), 119 119 /* MacbookAir4,2 */ 120 120 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI), 121 121 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ISO), ··· 341 333 { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, 342 334 { DIM_X, DIM_X / SN_COORD, -4750, 5280 }, 343 335 { DIM_Y, DIM_Y / SN_COORD, -150, 6730 } 336 + }, 337 + { 338 + USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI, 339 + USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO, 340 + USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS, 341 + HAS_INTEGRATED_BUTTON, 342 + 0x84, sizeof(struct bt_data), 343 + 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 344 + { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, 345 + { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, 346 + { DIM_X, DIM_X / SN_COORD, -4620, 5140 }, 347 + { DIM_Y, DIM_Y / SN_COORD, -150, 6600 } 344 348 }, 345 349 {} 346 350 };
-14
drivers/input/tablet/wacom_sys.c
··· 229 229 get_unaligned_le16(&report[i + 3]); 230 230 i += 4; 231 231 } 232 - } else if (usage == WCM_DIGITIZER) { 233 - /* max pressure isn't reported 234 - features->pressure_max = (unsigned short) 235 - (report[i+4] << 8 | report[i + 3]); 236 - */ 237 - features->pressure_max = 255; 238 - i += 4; 239 232 } 240 233 break; 241 234 ··· 283 290 case HID_USAGE_STYLUS: 284 291 pen = 1; 285 292 i++; 286 - break; 287 - 288 - case HID_USAGE_UNDEFINED: 289 - if (usage == WCM_DESKTOP && finger) /* capacity */ 290 - features->pressure_max = 291 - get_unaligned_le16(&report[i + 3]); 292 - i += 4; 293 293 break; 294 294 } 295 295 break;
+35 -10
drivers/input/tablet/wacom_wac.c
··· 800 800 int i; 801 801 802 802 for (i = 0; i < 2; i++) { 803 - int p = data[9 * i + 2]; 804 - bool touch = p && !wacom->shared->stylus_in_proximity; 803 + int offset = (data[1] & 0x80) ? (8 * i) : (9 * i); 804 + bool touch = data[offset + 3] & 0x80; 805 805 806 - input_mt_slot(input, i); 807 - input_mt_report_slot_state(input, MT_TOOL_FINGER, touch); 808 806 /* 809 807 * Touch events need to be disabled while stylus is 810 808 * in proximity because user's hand is resting on touchpad 811 809 * and sending unwanted events. User expects tablet buttons 812 810 * to continue working though. 813 811 */ 812 + touch = touch && !wacom->shared->stylus_in_proximity; 813 + 814 + input_mt_slot(input, i); 815 + input_mt_report_slot_state(input, MT_TOOL_FINGER, touch); 814 816 if (touch) { 815 - int x = get_unaligned_be16(&data[9 * i + 3]) & 0x7ff; 816 - int y = get_unaligned_be16(&data[9 * i + 5]) & 0x7ff; 817 + int x = get_unaligned_be16(&data[offset + 3]) & 0x7ff; 818 + int y = get_unaligned_be16(&data[offset + 5]) & 0x7ff; 817 819 if (features->quirks & WACOM_QUIRK_BBTOUCH_LOWRES) { 818 820 x <<= 5; 819 821 y <<= 5; 820 822 } 821 - input_report_abs(input, ABS_MT_PRESSURE, p); 822 823 input_report_abs(input, ABS_MT_POSITION_X, x); 823 824 input_report_abs(input, ABS_MT_POSITION_Y, y); 824 825 } ··· 1057 1056 features->x_fuzz, 0); 1058 1057 input_set_abs_params(input_dev, ABS_Y, 0, features->y_max, 1059 1058 features->y_fuzz, 0); 1060 - input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max, 1061 - features->pressure_fuzz, 0); 1062 1059 1063 1060 if (features->device_type == BTN_TOOL_PEN) { 1061 + input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max, 1062 + features->pressure_fuzz, 0); 1063 + 1064 1064 /* penabled devices have fixed resolution for each model */ 1065 1065 input_abs_set_res(input_dev, ABS_X, features->x_resolution); 1066 1066 input_abs_set_res(input_dev, ABS_Y, features->y_resolution); ··· 1100 1098 __set_bit(BTN_TOOL_MOUSE, input_dev->keybit); 1101 1099 __set_bit(BTN_STYLUS, input_dev->keybit); 1102 1100 __set_bit(BTN_STYLUS2, input_dev->keybit); 1101 + 1102 + __set_bit(INPUT_PROP_POINTER, input_dev->propbit); 1103 1103 break; 1104 1104 1105 1105 case WACOM_21UX2: ··· 1130 1126 } 1131 1127 1132 1128 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); 1129 + 1130 + __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); 1131 + 1133 1132 wacom_setup_cintiq(wacom_wac); 1134 1133 break; 1135 1134 ··· 1157 1150 /* fall through */ 1158 1151 1159 1152 case INTUOS: 1153 + __set_bit(INPUT_PROP_POINTER, input_dev->propbit); 1154 + 1160 1155 wacom_setup_intuos(wacom_wac); 1161 1156 break; 1162 1157 ··· 1174 1165 1175 1166 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); 1176 1167 wacom_setup_intuos(wacom_wac); 1168 + 1169 + __set_bit(INPUT_PROP_POINTER, input_dev->propbit); 1177 1170 break; 1178 1171 1179 1172 case TABLETPC2FG: ··· 1194 1183 case TABLETPC: 1195 1184 __clear_bit(ABS_MISC, input_dev->absbit); 1196 1185 1186 + __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); 1187 + 1197 1188 if (features->device_type != BTN_TOOL_PEN) 1198 1189 break; /* no need to process stylus stuff */ 1199 1190 1200 1191 /* fall through */ 1201 1192 1202 1193 case PL: 1203 - case PTU: 1204 1194 case DTU: 1205 1195 __set_bit(BTN_TOOL_PEN, input_dev->keybit); 1196 + __set_bit(BTN_TOOL_RUBBER, input_dev->keybit); 1206 1197 __set_bit(BTN_STYLUS, input_dev->keybit); 1198 + __set_bit(BTN_STYLUS2, input_dev->keybit); 1199 + 1200 + __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); 1201 + break; 1202 + 1203 + case PTU: 1207 1204 __set_bit(BTN_STYLUS2, input_dev->keybit); 1208 1205 /* fall through */ 1209 1206 1210 1207 case PENPARTNER: 1208 + __set_bit(BTN_TOOL_PEN, input_dev->keybit); 1211 1209 __set_bit(BTN_TOOL_RUBBER, input_dev->keybit); 1210 + __set_bit(BTN_STYLUS, input_dev->keybit); 1211 + 1212 + __set_bit(INPUT_PROP_POINTER, input_dev->propbit); 1212 1213 break; 1213 1214 1214 1215 case BAMBOO_PT: 1215 1216 __clear_bit(ABS_MISC, input_dev->absbit); 1217 + 1218 + __set_bit(INPUT_PROP_POINTER, input_dev->propbit); 1216 1219 1217 1220 if (features->device_type == BTN_TOOL_DOUBLETAP) { 1218 1221 __set_bit(BTN_LEFT, input_dev->keybit);
+2
drivers/input/touchscreen/wacom_w8001.c
··· 383 383 dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); 384 384 strlcat(w8001->name, "Wacom Serial", sizeof(w8001->name)); 385 385 386 + __set_bit(INPUT_PROP_DIRECT, dev->propbit); 387 + 386 388 /* penabled? */ 387 389 error = w8001_command(w8001, W8001_CMD_QUERY, true); 388 390 if (!error) {
+1 -1
drivers/iommu/dmar.c
··· 1388 1388 return ret; 1389 1389 } 1390 1390 1391 - ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu); 1391 + ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu); 1392 1392 if (ret) 1393 1393 printk(KERN_ERR "IOMMU: can't request irq\n"); 1394 1394 return ret;
+2
drivers/leds/ledtrig-timer.c
··· 41 41 42 42 if (count == size) { 43 43 led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off); 44 + led_cdev->blink_delay_on = state; 44 45 ret = count; 45 46 } 46 47 ··· 70 69 71 70 if (count == size) { 72 71 led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state); 72 + led_cdev->blink_delay_off = state; 73 73 ret = count; 74 74 } 75 75
-13
drivers/media/video/omap/omap_vout.c
··· 2194 2194 "'%s' Display already enabled\n", 2195 2195 def_display->name); 2196 2196 } 2197 - /* set the update mode */ 2198 - if (def_display->caps & 2199 - OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) { 2200 - if (dssdrv->enable_te) 2201 - dssdrv->enable_te(def_display, 0); 2202 - if (dssdrv->set_update_mode) 2203 - dssdrv->set_update_mode(def_display, 2204 - OMAP_DSS_UPDATE_MANUAL); 2205 - } else { 2206 - if (dssdrv->set_update_mode) 2207 - dssdrv->set_update_mode(def_display, 2208 - OMAP_DSS_UPDATE_AUTO); 2209 - } 2210 2197 } 2211 2198 } 2212 2199
+1
drivers/media/video/omap3isp/ispccdc.c
··· 31 31 #include <linux/dma-mapping.h> 32 32 #include <linux/mm.h> 33 33 #include <linux/sched.h> 34 + #include <linux/slab.h> 34 35 #include <media/v4l2-event.h> 35 36 36 37 #include "isp.h"
+1 -1
drivers/media/video/uvc/uvc_driver.c
··· 1961 1961 1962 1962 list_for_each_entry(stream, &dev->streams, list) { 1963 1963 if (stream->intf == intf) 1964 - return uvc_video_resume(stream); 1964 + return uvc_video_resume(stream, reset); 1965 1965 } 1966 1966 1967 1967 uvc_trace(UVC_TRACE_SUSPEND, "Resume: video streaming USB interface "
+1 -1
drivers/media/video/uvc/uvc_entity.c
··· 49 49 if (remote == NULL) 50 50 return -EINVAL; 51 51 52 - source = (UVC_ENTITY_TYPE(remote) != UVC_TT_STREAMING) 52 + source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING) 53 53 ? (remote->vdev ? &remote->vdev->entity : NULL) 54 54 : &remote->subdev.entity; 55 55 if (source == NULL)
+9 -1
drivers/media/video/uvc/uvc_video.c
··· 1104 1104 * buffers, making sure userspace applications are notified of the problem 1105 1105 * instead of waiting forever. 1106 1106 */ 1107 - int uvc_video_resume(struct uvc_streaming *stream) 1107 + int uvc_video_resume(struct uvc_streaming *stream, int reset) 1108 1108 { 1109 1109 int ret; 1110 + 1111 + /* If the bus has been reset on resume, set the alternate setting to 0. 1112 + * This should be the default value, but some devices crash or otherwise 1113 + * misbehave if they don't receive a SET_INTERFACE request before any 1114 + * other video control request. 1115 + */ 1116 + if (reset) 1117 + usb_set_interface(stream->dev->udev, stream->intfnum, 0); 1110 1118 1111 1119 stream->frozen = 0; 1112 1120
+1 -1
drivers/media/video/uvc/uvcvideo.h
··· 638 638 /* Video */ 639 639 extern int uvc_video_init(struct uvc_streaming *stream); 640 640 extern int uvc_video_suspend(struct uvc_streaming *stream); 641 - extern int uvc_video_resume(struct uvc_streaming *stream); 641 + extern int uvc_video_resume(struct uvc_streaming *stream, int reset); 642 642 extern int uvc_video_enable(struct uvc_streaming *stream, int enable); 643 643 extern int uvc_probe_video(struct uvc_streaming *stream, 644 644 struct uvc_streaming_control *probe);
+11
drivers/media/video/v4l2-dev.c
··· 173 173 media_device_unregister_entity(&vdev->entity); 174 174 #endif 175 175 176 + /* Do not call v4l2_device_put if there is no release callback set. 177 + * Drivers that have no v4l2_device release callback might free the 178 + * v4l2_dev instance in the video_device release callback below, so we 179 + * must perform this check here. 180 + * 181 + * TODO: In the long run all drivers that use v4l2_device should use the 182 + * v4l2_device release callback. This check will then be unnecessary. 183 + */ 184 + if (v4l2_dev->release == NULL) 185 + v4l2_dev = NULL; 186 + 176 187 /* Release video_device and perform other 177 188 cleanups as needed. */ 178 189 vdev->release(vdev);
+2
drivers/media/video/v4l2-device.c
··· 38 38 mutex_init(&v4l2_dev->ioctl_lock); 39 39 v4l2_prio_init(&v4l2_dev->prio); 40 40 kref_init(&v4l2_dev->ref); 41 + get_device(dev); 41 42 v4l2_dev->dev = dev; 42 43 if (dev == NULL) { 43 44 /* If dev == NULL, then name must be filled in by the caller */ ··· 94 93 95 94 if (dev_get_drvdata(v4l2_dev->dev) == v4l2_dev) 96 95 dev_set_drvdata(v4l2_dev->dev, NULL); 96 + put_device(v4l2_dev->dev); 97 97 v4l2_dev->dev = NULL; 98 98 } 99 99 EXPORT_SYMBOL_GPL(v4l2_device_disconnect);
+1 -1
drivers/mfd/jz4740-adc.c
··· 273 273 ct->regs.ack = JZ_REG_ADC_STATUS; 274 274 ct->chip.irq_mask = irq_gc_mask_set_bit; 275 275 ct->chip.irq_unmask = irq_gc_mask_clr_bit; 276 - ct->chip.irq_ack = irq_gc_ack; 276 + ct->chip.irq_ack = irq_gc_ack_set_bit; 277 277 278 278 irq_setup_generic_chip(gc, IRQ_MSK(5), 0, 0, IRQ_NOPROBE | IRQ_LEVEL); 279 279
+5
drivers/mfd/max8997.c
··· 135 135 max8997->dev = &i2c->dev; 136 136 max8997->i2c = i2c; 137 137 max8997->type = id->driver_data; 138 + max8997->irq = i2c->irq; 138 139 139 140 if (!pdata) 140 141 goto err; 141 142 143 + max8997->irq_base = pdata->irq_base; 144 + max8997->ono = pdata->ono; 142 145 max8997->wakeup = pdata->wakeup; 143 146 144 147 mutex_init(&max8997->iolock); ··· 154 151 i2c_set_clientdata(max8997->muic, max8997); 155 152 156 153 pm_runtime_set_active(max8997->dev); 154 + 155 + max8997_irq_init(max8997); 157 156 158 157 mfd_add_devices(max8997->dev, -1, max8997_devs, 159 158 ARRAY_SIZE(max8997_devs),
+1 -1
drivers/mfd/omap-usb-host.c
··· 17 17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 18 */ 19 19 #include <linux/kernel.h> 20 + #include <linux/module.h> 20 21 #include <linux/types.h> 21 22 #include <linux/slab.h> 22 23 #include <linux/delay.h> ··· 677 676 | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF 678 677 | OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE); 679 678 680 - reg |= (1 << (i + 1)); 681 679 } else 682 680 continue; 683 681
+2
drivers/mfd/tps65910-irq.c
··· 178 178 switch (tps65910_chip_id(tps65910)) { 179 179 case TPS65910: 180 180 tps65910->irq_num = TPS65910_NUM_IRQ; 181 + break; 181 182 case TPS65911: 182 183 tps65910->irq_num = TPS65911_NUM_IRQ; 184 + break; 183 185 } 184 186 185 187 /* Register with genirq */
+4 -1
drivers/mfd/twl4030-madc.c
··· 510 510 u8 ch_msb, ch_lsb; 511 511 int ret; 512 512 513 - if (!req) 513 + if (!req || !twl4030_madc) 514 514 return -EINVAL; 515 + 515 516 mutex_lock(&twl4030_madc->lock); 516 517 if (req->method < TWL4030_MADC_RT || req->method > TWL4030_MADC_SW2) { 517 518 ret = -EINVAL; ··· 706 705 madc = kzalloc(sizeof(*madc), GFP_KERNEL); 707 706 if (!madc) 708 707 return -ENOMEM; 708 + 709 + madc->dev = &pdev->dev; 709 710 710 711 /* 711 712 * Phoenix provides 2 interrupt lines. The first one is connected to
+2 -2
drivers/mfd/wm8350-gpio.c
··· 37 37 return ret; 38 38 } 39 39 40 - static int gpio_set_debounce(struct wm8350 *wm8350, int gpio, int db) 40 + static int wm8350_gpio_set_debounce(struct wm8350 *wm8350, int gpio, int db) 41 41 { 42 42 if (db == WM8350_GPIO_DEBOUNCE_ON) 43 43 return wm8350_set_bits(wm8350, WM8350_GPIO_DEBOUNCE, ··· 210 210 goto err; 211 211 if (gpio_set_polarity(wm8350, gpio, pol)) 212 212 goto err; 213 - if (gpio_set_debounce(wm8350, gpio, debounce)) 213 + if (wm8350_gpio_set_debounce(wm8350, gpio, debounce)) 214 214 goto err; 215 215 if (gpio_set_dir(wm8350, gpio, dir)) 216 216 goto err;
+8 -6
drivers/misc/lis3lv02d/lis3lv02d.c
··· 375 375 * both have been read. So the value read will always be correct. 376 376 * Set BOOT bit to refresh factory tuning values. 377 377 */ 378 - lis3->read(lis3, CTRL_REG2, &reg); 379 - if (lis3->whoami == WAI_12B) 380 - reg |= CTRL2_BDU | CTRL2_BOOT; 381 - else 382 - reg |= CTRL2_BOOT_8B; 383 - lis3->write(lis3, CTRL_REG2, reg); 378 + if (lis3->pdata) { 379 + lis3->read(lis3, CTRL_REG2, &reg); 380 + if (lis3->whoami == WAI_12B) 381 + reg |= CTRL2_BDU | CTRL2_BOOT; 382 + else 383 + reg |= CTRL2_BOOT_8B; 384 + lis3->write(lis3, CTRL_REG2, reg); 385 + } 384 386 385 387 /* LIS3 power on delay is quite long */ 386 388 msleep(lis3->pwron_delay / lis3lv02d_get_odr());
+5 -7
drivers/misc/pti.c
··· 165 165 static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc, 166 166 const char *thread_name) 167 167 { 168 + /* 169 + * Since we access the comm member in current's task_struct, we only 170 + * need to be as large as what 'comm' in that structure is. 171 + */ 172 + char comm[TASK_COMM_LEN]; 168 173 struct pti_masterchannel mccontrol = {.master = CONTROL_ID, 169 174 .channel = 0}; 170 175 const char *thread_name_p; ··· 177 172 u8 control_frame[CONTROL_FRAME_LEN]; 178 173 179 174 if (!thread_name) { 180 - /* 181 - * Since we access the comm member in current's task_struct, 182 - * we only need to be as large as what 'comm' in that 183 - * structure is. 184 - */ 185 - char comm[TASK_COMM_LEN]; 186 - 187 175 if (!in_interrupt()) 188 176 get_task_comm(comm, current); 189 177 else
+3
drivers/mmc/card/block.c
··· 926 926 /* 927 927 * Reliable writes are used to implement Forced Unit Access and 928 928 * REQ_META accesses, and are supported only on MMCs. 929 + * 930 + * XXX: this really needs a good explanation of why REQ_META 931 + * is treated special. 929 932 */ 930 933 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || 931 934 (req->cmd_flags & REQ_META)) &&
+6 -5
drivers/net/Kconfig
··· 2535 2535 source "drivers/net/stmmac/Kconfig" 2536 2536 2537 2537 config PCH_GBE 2538 - tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GbE" 2538 + tristate "Intel EG20T PCH/OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE" 2539 2539 depends on PCI 2540 2540 select MII 2541 2541 ---help--- ··· 2548 2548 This driver enables Gigabit Ethernet function. 2549 2549 2550 2550 This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ 2551 - Output Hub), ML7223. 2552 - ML7223 IOH is for MP(Media Phone) use. 2553 - ML7223 is companion chip for Intel Atom E6xx series. 2554 - ML7223 is completely compatible for Intel EG20T PCH. 2551 + Output Hub), ML7223/ML7831. 2552 + ML7223 IOH is for MP(Media Phone) use. ML7831 IOH is for general 2553 + purpose use. 2554 + ML7223/ML7831 is companion chip for Intel Atom E6xx series. 2555 + ML7223/ML7831 is completely compatible for Intel EG20T PCH. 2555 2556 2556 2557 config FTGMAC100 2557 2558 tristate "Faraday FTGMAC100 Gigabit Ethernet support"
+91 -31
drivers/net/bnx2x/bnx2x.h
··· 315 315 u32 raw; 316 316 }; 317 317 318 + /* dropless fc FW/HW related params */ 319 + #define BRB_SIZE(bp) (CHIP_IS_E3(bp) ? 1024 : 512) 320 + #define MAX_AGG_QS(bp) (CHIP_IS_E1(bp) ? \ 321 + ETH_MAX_AGGREGATION_QUEUES_E1 :\ 322 + ETH_MAX_AGGREGATION_QUEUES_E1H_E2) 323 + #define FW_DROP_LEVEL(bp) (3 + MAX_SPQ_PENDING + MAX_AGG_QS(bp)) 324 + #define FW_PREFETCH_CNT 16 325 + #define DROPLESS_FC_HEADROOM 100 318 326 319 327 /* MC hsi */ 320 328 #define BCM_PAGE_SHIFT 12 ··· 339 331 /* SGE ring related macros */ 340 332 #define NUM_RX_SGE_PAGES 2 341 333 #define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) 342 - #define MAX_RX_SGE_CNT (RX_SGE_CNT - 2) 334 + #define NEXT_PAGE_SGE_DESC_CNT 2 335 + #define MAX_RX_SGE_CNT (RX_SGE_CNT - NEXT_PAGE_SGE_DESC_CNT) 343 336 /* RX_SGE_CNT is promised to be a power of 2 */ 344 337 #define RX_SGE_MASK (RX_SGE_CNT - 1) 345 338 #define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) 346 339 #define MAX_RX_SGE (NUM_RX_SGE - 1) 347 340 #define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \ 348 - (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1) 341 + (MAX_RX_SGE_CNT - 1)) ? \ 342 + (x) + 1 + NEXT_PAGE_SGE_DESC_CNT : \ 343 + (x) + 1) 349 344 #define RX_SGE(x) ((x) & MAX_RX_SGE) 345 + 346 + /* 347 + * Number of required SGEs is the sum of two: 348 + * 1. Number of possible opened aggregations (next packet for 349 + * these aggregations will probably consume SGE immidiatelly) 350 + * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only 351 + * after placement on BD for new TPA aggregation) 352 + * 353 + * Takes into account NEXT_PAGE_SGE_DESC_CNT "next" elements on each page 354 + */ 355 + #define NUM_SGE_REQ (MAX_AGG_QS(bp) + \ 356 + (BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2) 357 + #define NUM_SGE_PG_REQ ((NUM_SGE_REQ + MAX_RX_SGE_CNT - 1) / \ 358 + MAX_RX_SGE_CNT) 359 + #define SGE_TH_LO(bp) (NUM_SGE_REQ + \ 360 + NUM_SGE_PG_REQ * NEXT_PAGE_SGE_DESC_CNT) 361 + #define SGE_TH_HI(bp) (SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM) 350 362 351 363 /* Manipulate a bit vector defined as an array of u64 */ 352 364 ··· 579 551 580 552 #define NUM_TX_RINGS 16 581 553 #define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types)) 582 - #define MAX_TX_DESC_CNT (TX_DESC_CNT - 1) 554 + #define NEXT_PAGE_TX_DESC_CNT 1 555 + #define MAX_TX_DESC_CNT (TX_DESC_CNT - NEXT_PAGE_TX_DESC_CNT) 583 556 #define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) 584 557 #define MAX_TX_BD (NUM_TX_BD - 1) 585 558 #define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) 586 559 #define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \ 587 - (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) 560 + (MAX_TX_DESC_CNT - 1)) ? \ 561 + (x) + 1 + NEXT_PAGE_TX_DESC_CNT : \ 562 + (x) + 1) 588 563 #define TX_BD(x) ((x) & MAX_TX_BD) 589 564 #define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT) 590 565 591 566 /* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */ 592 567 #define NUM_RX_RINGS 8 593 568 #define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) 594 - #define MAX_RX_DESC_CNT (RX_DESC_CNT - 2) 569 + #define NEXT_PAGE_RX_DESC_CNT 2 570 + #define MAX_RX_DESC_CNT (RX_DESC_CNT - NEXT_PAGE_RX_DESC_CNT) 595 571 #define RX_DESC_MASK (RX_DESC_CNT - 1) 596 572 #define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) 597 573 #define MAX_RX_BD (NUM_RX_BD - 1) 598 574 #define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) 599 - #define MIN_RX_AVAIL 128 575 + 576 + /* dropless fc calculations for BDs 577 + * 578 + * Number of BDs should as number of buffers in BRB: 579 + * Low threshold takes into account NEXT_PAGE_RX_DESC_CNT 580 + * "next" elements on each page 581 + */ 582 + #define NUM_BD_REQ BRB_SIZE(bp) 583 + #define NUM_BD_PG_REQ ((NUM_BD_REQ + MAX_RX_DESC_CNT - 1) / \ 584 + MAX_RX_DESC_CNT) 585 + #define BD_TH_LO(bp) (NUM_BD_REQ + \ 586 + NUM_BD_PG_REQ * NEXT_PAGE_RX_DESC_CNT + \ 587 + FW_DROP_LEVEL(bp)) 588 + #define BD_TH_HI(bp) (BD_TH_LO(bp) + DROPLESS_FC_HEADROOM) 589 + 590 + #define MIN_RX_AVAIL ((bp)->dropless_fc ? BD_TH_HI(bp) + 128 : 128) 600 591 601 592 #define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \ 602 593 ETH_MIN_RX_CQES_WITH_TPA_E1 : \ ··· 626 579 MIN_RX_AVAIL)) 627 580 628 581 #define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ 629 - (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1) 582 + (MAX_RX_DESC_CNT - 1)) ? \ 583 + (x) + 1 + NEXT_PAGE_RX_DESC_CNT : \ 584 + (x) + 1) 630 585 #define RX_BD(x) ((x) & MAX_RX_BD) 631 586 632 587 /* ··· 638 589 #define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd)) 639 590 #define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL) 640 591 #define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) 641 - #define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1) 592 + #define NEXT_PAGE_RCQ_DESC_CNT 1 593 + #define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - NEXT_PAGE_RCQ_DESC_CNT) 642 594 #define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS) 643 595 #define MAX_RCQ_BD (NUM_RCQ_BD - 1) 644 596 #define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2) 645 597 #define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \ 646 - (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) 598 + (MAX_RCQ_DESC_CNT - 1)) ? \ 599 + (x) + 1 + NEXT_PAGE_RCQ_DESC_CNT : \ 600 + (x) + 1) 647 601 #define RCQ_BD(x) ((x) & MAX_RCQ_BD) 602 + 603 + /* dropless fc calculations for RCQs 604 + * 605 + * Number of RCQs should be as number of buffers in BRB: 606 + * Low threshold takes into account NEXT_PAGE_RCQ_DESC_CNT 607 + * "next" elements on each page 608 + */ 609 + #define NUM_RCQ_REQ BRB_SIZE(bp) 610 + #define NUM_RCQ_PG_REQ ((NUM_BD_REQ + MAX_RCQ_DESC_CNT - 1) / \ 611 + MAX_RCQ_DESC_CNT) 612 + #define RCQ_TH_LO(bp) (NUM_RCQ_REQ + \ 613 + NUM_RCQ_PG_REQ * NEXT_PAGE_RCQ_DESC_CNT + \ 614 + FW_DROP_LEVEL(bp)) 615 + #define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM) 648 616 649 617 650 618 /* This is needed for determining of last_max */ ··· 751 685 #define FP_CSB_FUNC_OFF \ 752 686 offsetof(struct cstorm_status_block_c, func) 753 687 754 - #define HC_INDEX_TOE_RX_CQ_CONS 0 /* Formerly Ustorm TOE CQ index */ 755 - /* (HC_INDEX_U_TOE_RX_CQ_CONS) */ 756 - #define HC_INDEX_ETH_RX_CQ_CONS 1 /* Formerly Ustorm ETH CQ index */ 757 - /* (HC_INDEX_U_ETH_RX_CQ_CONS) */ 758 - #define HC_INDEX_ETH_RX_BD_CONS 2 /* Formerly Ustorm ETH BD index */ 759 - /* (HC_INDEX_U_ETH_RX_BD_CONS) */ 688 + #define HC_INDEX_ETH_RX_CQ_CONS 1 760 689 761 - #define HC_INDEX_TOE_TX_CQ_CONS 4 /* Formerly Cstorm TOE CQ index */ 762 - /* (HC_INDEX_C_TOE_TX_CQ_CONS) */ 763 - #define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 /* Formerly Cstorm ETH CQ index */ 764 - /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ 765 - #define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 /* Formerly Cstorm ETH CQ index */ 766 - /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ 767 - #define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 /* Formerly Cstorm ETH CQ index */ 768 - /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ 690 + #define HC_INDEX_OOO_TX_CQ_CONS 4 691 + 692 + #define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 693 + 694 + #define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 695 + 696 + #define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 769 697 770 698 #define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0 771 - 772 699 773 700 #define BNX2X_RX_SB_INDEX \ 774 701 (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]) ··· 1159 1100 #define BP_PORT(bp) (bp->pfid & 1) 1160 1101 #define BP_FUNC(bp) (bp->pfid) 1161 1102 #define BP_ABS_FUNC(bp) (bp->pf_num) 1162 - #define BP_E1HVN(bp) (bp->pfid >> 1) 1163 - #define BP_VN(bp) (BP_E1HVN(bp)) /*remove when approved*/ 1164 - #define BP_L_ID(bp) (BP_E1HVN(bp) << 2) 1165 - #define BP_FW_MB_IDX(bp) (BP_PORT(bp) +\ 1166 - BP_VN(bp) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1)) 1103 + #define BP_VN(bp) ((bp)->pfid >> 1) 1104 + #define BP_MAX_VN_NUM(bp) (CHIP_MODE_IS_4_PORT(bp) ? 2 : 4) 1105 + #define BP_L_ID(bp) (BP_VN(bp) << 2) 1106 + #define BP_FW_MB_IDX_VN(bp, vn) (BP_PORT(bp) +\ 1107 + (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1)) 1108 + #define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp)) 1167 1109 1168 1110 struct net_device *dev; 1169 1111 struct pci_dev *pdev; ··· 1827 1767 1828 1768 #define MAX_DMAE_C_PER_PORT 8 1829 1769 #define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ 1830 - BP_E1HVN(bp)) 1770 + BP_VN(bp)) 1831 1771 #define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ 1832 1772 E1HVN_MAX) 1833 1773 ··· 1853 1793 1854 1794 /* must be used on a CID before placing it on a HW ring */ 1855 1795 #define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ 1856 - (BP_E1HVN(bp) << BNX2X_SWCID_SHIFT) | \ 1796 + (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \ 1857 1797 (x)) 1858 1798 1859 1799 #define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe))
+14 -13
drivers/net/bnx2x/bnx2x_cmn.c
··· 987 987 void bnx2x_init_rx_rings(struct bnx2x *bp) 988 988 { 989 989 int func = BP_FUNC(bp); 990 - int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : 991 - ETH_MAX_AGGREGATION_QUEUES_E1H_E2; 992 990 u16 ring_prod; 993 991 int i, j; 994 992 ··· 999 1001 1000 1002 if (!fp->disable_tpa) { 1001 1003 /* Fill the per-aggregtion pool */ 1002 - for (i = 0; i < max_agg_queues; i++) { 1004 + for (i = 0; i < MAX_AGG_QS(bp); i++) { 1003 1005 struct bnx2x_agg_info *tpa_info = 1004 1006 &fp->tpa_info[i]; 1005 1007 struct sw_rx_bd *first_buf = ··· 1039 1041 bnx2x_free_rx_sge_range(bp, fp, 1040 1042 ring_prod); 1041 1043 bnx2x_free_tpa_pool(bp, fp, 1042 - max_agg_queues); 1044 + MAX_AGG_QS(bp)); 1043 1045 fp->disable_tpa = 1; 1044 1046 ring_prod = 0; 1045 1047 break; ··· 1135 1137 bnx2x_free_rx_bds(fp); 1136 1138 1137 1139 if (!fp->disable_tpa) 1138 - bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? 1139 - ETH_MAX_AGGREGATION_QUEUES_E1 : 1140 - ETH_MAX_AGGREGATION_QUEUES_E1H_E2); 1140 + bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp)); 1141 1141 } 1142 1142 } 1143 1143 ··· 3091 3095 struct bnx2x_fastpath *fp = &bp->fp[index]; 3092 3096 int ring_size = 0; 3093 3097 u8 cos; 3098 + int rx_ring_size = 0; 3094 3099 3095 3100 /* if rx_ring_size specified - use it */ 3096 - int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size : 3097 - MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); 3101 + if (!bp->rx_ring_size) { 3098 3102 3099 - /* allocate at least number of buffers required by FW */ 3100 - rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : 3101 - MIN_RX_SIZE_TPA, 3102 - rx_ring_size); 3103 + rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); 3104 + 3105 + /* allocate at least number of buffers required by FW */ 3106 + rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : 3107 + MIN_RX_SIZE_TPA, rx_ring_size); 3108 + 3109 + bp->rx_ring_size = rx_ring_size; 3110 + } else 3111 + rx_ring_size = bp->rx_ring_size; 3103 3112 3104 3113 /* Common */ 3105 3114 sb = &bnx2x_fp(bp, index, status_blk);
+1
drivers/net/bnx2x/bnx2x_dcb.c
··· 2120 2120 break; 2121 2121 case DCB_CAP_ATTR_DCBX: 2122 2122 *cap = BNX2X_DCBX_CAPS; 2123 + break; 2123 2124 default: 2124 2125 rval = -EINVAL; 2125 2126 break;
+41 -7
drivers/net/bnx2x/bnx2x_ethtool.c
··· 363 363 } 364 364 365 365 /* advertise the requested speed and duplex if supported */ 366 - cmd->advertising &= bp->port.supported[cfg_idx]; 366 + if (cmd->advertising & ~(bp->port.supported[cfg_idx])) { 367 + DP(NETIF_MSG_LINK, "Advertisement parameters " 368 + "are not supported\n"); 369 + return -EINVAL; 370 + } 367 371 368 372 bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG; 369 - bp->link_params.req_duplex[cfg_idx] = DUPLEX_FULL; 370 - bp->port.advertising[cfg_idx] |= (ADVERTISED_Autoneg | 373 + bp->link_params.req_duplex[cfg_idx] = cmd->duplex; 374 + bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg | 371 375 cmd->advertising); 376 + if (cmd->advertising) { 372 377 378 + bp->link_params.speed_cap_mask[cfg_idx] = 0; 379 + if (cmd->advertising & ADVERTISED_10baseT_Half) { 380 + bp->link_params.speed_cap_mask[cfg_idx] |= 381 + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF; 382 + } 383 + if (cmd->advertising & ADVERTISED_10baseT_Full) 384 + bp->link_params.speed_cap_mask[cfg_idx] |= 385 + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL; 386 + 387 + if (cmd->advertising & ADVERTISED_100baseT_Full) 388 + bp->link_params.speed_cap_mask[cfg_idx] |= 389 + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL; 390 + 391 + if (cmd->advertising & ADVERTISED_100baseT_Half) { 392 + bp->link_params.speed_cap_mask[cfg_idx] |= 393 + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF; 394 + } 395 + if (cmd->advertising & ADVERTISED_1000baseT_Half) { 396 + bp->link_params.speed_cap_mask[cfg_idx] |= 397 + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G; 398 + } 399 + if (cmd->advertising & (ADVERTISED_1000baseT_Full | 400 + ADVERTISED_1000baseKX_Full)) 401 + bp->link_params.speed_cap_mask[cfg_idx] |= 402 + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G; 403 + 404 + if (cmd->advertising & (ADVERTISED_10000baseT_Full | 405 + ADVERTISED_10000baseKX4_Full | 406 + ADVERTISED_10000baseKR_Full)) 407 + bp->link_params.speed_cap_mask[cfg_idx] |= 408 + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G; 409 + } 373 410 } else { /* forced speed */ 374 411 /* advertise the requested speed and duplex if supported */ 375 412 switch (speed) { ··· 1347 1310 if (bp->rx_ring_size) 1348 1311 ering->rx_pending = bp->rx_ring_size; 1349 1312 else 1350 - if (bp->state == BNX2X_STATE_OPEN && bp->num_queues) 1351 - ering->rx_pending = MAX_RX_AVAIL/bp->num_queues; 1352 - else 1353 - ering->rx_pending = MAX_RX_AVAIL; 1313 + ering->rx_pending = MAX_RX_AVAIL; 1354 1314 1355 1315 ering->rx_mini_pending = 0; 1356 1316 ering->rx_jumbo_pending = 0;
+23 -23
drivers/net/bnx2x/bnx2x_link.c
··· 778 778 { 779 779 u32 nig_reg_adress_crd_weight = 0; 780 780 u32 pbf_reg_adress_crd_weight = 0; 781 - /* Calculate and set BW for this COS*/ 782 - const u32 cos_bw_nig = (bw * min_w_val_nig) / total_bw; 783 - const u32 cos_bw_pbf = (bw * min_w_val_pbf) / total_bw; 781 + /* Calculate and set BW for this COS - use 1 instead of 0 for BW */ 782 + const u32 cos_bw_nig = ((bw ? bw : 1) * min_w_val_nig) / total_bw; 783 + const u32 cos_bw_pbf = ((bw ? bw : 1) * min_w_val_pbf) / total_bw; 784 784 785 785 switch (cos_entry) { 786 786 case 0: ··· 852 852 /* Calculate total BW requested */ 853 853 for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) { 854 854 if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) { 855 - 856 - if (0 == ets_params->cos[cos_idx].params.bw_params.bw) { 857 - DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW" 858 - "was set to 0\n"); 859 - return -EINVAL; 855 + *total_bw += 856 + ets_params->cos[cos_idx].params.bw_params.bw; 860 857 } 861 - *total_bw += 862 - ets_params->cos[cos_idx].params.bw_params.bw; 863 - } 864 858 } 865 859 866 - /*Check taotl BW is valid */ 860 + /* Check total BW is valid */ 867 861 if ((100 != *total_bw) || (0 == *total_bw)) { 868 862 if (0 == *total_bw) { 869 863 DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW" ··· 1720 1726 1721 1727 /* Check loopback mode */ 1722 1728 if (lb) 1723 - val |= XMAC_CTRL_REG_CORE_LOCAL_LPBK; 1729 + val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK; 1724 1730 REG_WR(bp, xmac_base + XMAC_REG_CTRL, val); 1725 1731 bnx2x_set_xumac_nig(params, 1726 1732 ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1); ··· 3623 3629 /* Advertised speeds */ 3624 3630 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3625 3631 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16); 3632 + 3633 + /* Advertised and set FEC (Forward Error Correction) */ 3634 + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3635 + MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2, 3636 + (MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY | 3637 + MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ)); 3626 3638 3627 3639 /* Enable CL37 BAM */ 3628 3640 if (REG_RD(bp, params->shmem_base + ··· 5924 5924 (tmp | EMAC_LED_OVERRIDE)); 5925 5925 /* 5926 5926 * return here without enabling traffic 5927 - * LED blink andsetting rate in ON mode. 5927 + * LED blink and setting rate in ON mode. 5928 5928 * In oper mode, enabling LED blink 5929 5929 * and setting rate is needed. 5930 5930 */ ··· 5936 5936 * This is a work-around for HW issue found when link 5937 5937 * is up in CL73 5938 5938 */ 5939 - REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); 5939 + if ((!CHIP_IS_E3(bp)) || 5940 + (CHIP_IS_E3(bp) && 5941 + mode == LED_MODE_ON)) 5942 + REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); 5943 + 5940 5944 if (CHIP_IS_E1x(bp) || 5941 5945 CHIP_IS_E2(bp) || 5942 5946 (mode == LED_MODE_ON)) ··· 10642 10638 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, 10643 10639 .addr = 0xff, 10644 10640 .def_md_devad = 0, 10645 - .flags = (FLAGS_HW_LOCK_REQUIRED | 10646 - FLAGS_TX_ERROR_CHECK), 10641 + .flags = FLAGS_HW_LOCK_REQUIRED, 10647 10642 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10648 10643 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10649 10644 .mdio_ctrl = 0, ··· 10768 10765 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706, 10769 10766 .addr = 0xff, 10770 10767 .def_md_devad = 0, 10771 - .flags = (FLAGS_INIT_XGXS_FIRST | 10772 - FLAGS_TX_ERROR_CHECK), 10768 + .flags = FLAGS_INIT_XGXS_FIRST, 10773 10769 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10774 10770 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10775 10771 .mdio_ctrl = 0, ··· 10799 10797 .addr = 0xff, 10800 10798 .def_md_devad = 0, 10801 10799 .flags = (FLAGS_HW_LOCK_REQUIRED | 10802 - FLAGS_INIT_XGXS_FIRST | 10803 - FLAGS_TX_ERROR_CHECK), 10800 + FLAGS_INIT_XGXS_FIRST), 10804 10801 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10805 10802 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10806 10803 .mdio_ctrl = 0, ··· 10830 10829 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, 10831 10830 .addr = 0xff, 10832 10831 .def_md_devad = 0, 10833 - .flags = (FLAGS_FAN_FAILURE_DET_REQ | 10834 - FLAGS_TX_ERROR_CHECK), 10832 + .flags = FLAGS_FAN_FAILURE_DET_REQ, 10835 10833 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10836 10834 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10837 10835 .mdio_ctrl = 0,
+128 -50
drivers/net/bnx2x/bnx2x_main.c
··· 407 407 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); 408 408 409 409 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); 410 - opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) | 411 - (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); 410 + opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) | 411 + (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); 412 412 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); 413 413 414 414 #ifdef __BIG_ENDIAN ··· 1419 1419 if (!CHIP_IS_E1(bp)) { 1420 1420 /* init leading/trailing edge */ 1421 1421 if (IS_MF(bp)) { 1422 - val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); 1422 + val = (0xee0f | (1 << (BP_VN(bp) + 4))); 1423 1423 if (bp->port.pmf) 1424 1424 /* enable nig and gpio3 attention */ 1425 1425 val |= 0x1100; ··· 1471 1471 1472 1472 /* init leading/trailing edge */ 1473 1473 if (IS_MF(bp)) { 1474 - val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); 1474 + val = (0xee0f | (1 << (BP_VN(bp) + 4))); 1475 1475 if (bp->port.pmf) 1476 1476 /* enable nig and gpio3 attention */ 1477 1477 val |= 0x1100; ··· 2287 2287 int vn; 2288 2288 2289 2289 bp->vn_weight_sum = 0; 2290 - for (vn = VN_0; vn < E1HVN_MAX; vn++) { 2290 + for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2291 2291 u32 vn_cfg = bp->mf_config[vn]; 2292 2292 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 2293 2293 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; ··· 2320 2320 CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2321 2321 } 2322 2322 2323 + /* returns func by VN for current port */ 2324 + static inline int func_by_vn(struct bnx2x *bp, int vn) 2325 + { 2326 + return 2 * vn + BP_PORT(bp); 2327 + } 2328 + 2323 2329 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) 2324 2330 { 2325 2331 struct rate_shaping_vars_per_vn m_rs_vn; 2326 2332 struct fairness_vars_per_vn m_fair_vn; 2327 2333 u32 vn_cfg = bp->mf_config[vn]; 2328 - int func = 2*vn + BP_PORT(bp); 2334 + int func = func_by_vn(bp, vn); 2329 2335 u16 vn_min_rate, vn_max_rate; 2330 2336 int i; 2331 2337 ··· 2428 2422 * 2429 2423 * and there are 2 functions per port 2430 2424 */ 2431 - for (vn = VN_0; vn < E1HVN_MAX; vn++) { 2425 + for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2432 2426 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp); 2433 2427 2434 2428 if (func >= E1H_FUNC_MAX) ··· 2460 2454 2461 2455 /* calculate and set min-max rate for each vn */ 2462 2456 if (bp->port.pmf) 2463 - for (vn = VN_0; vn < E1HVN_MAX; vn++) 2457 + for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) 2464 2458 bnx2x_init_vn_minmax(bp, vn); 2465 2459 2466 2460 /* always enable rate shaping and fairness */ ··· 2479 2473 2480 2474 static inline void bnx2x_link_sync_notify(struct bnx2x *bp) 2481 2475 { 2482 - int port = BP_PORT(bp); 2483 2476 int func; 2484 2477 int vn; 2485 2478 2486 2479 /* Set the attention towards other drivers on the same port */ 2487 - for (vn = VN_0; vn < E1HVN_MAX; vn++) { 2488 - if (vn == BP_E1HVN(bp)) 2480 + for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2481 + if (vn == BP_VN(bp)) 2489 2482 continue; 2490 2483 2491 - func = ((vn << 1) | port); 2484 + func = func_by_vn(bp, vn); 2492 2485 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + 2493 2486 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); 2494 2487 } ··· 2582 2577 bnx2x_dcbx_pmf_update(bp); 2583 2578 2584 2579 /* enable nig attention */ 2585 - val = (0xff0f | (1 << (BP_E1HVN(bp) + 4))); 2580 + val = (0xff0f | (1 << (BP_VN(bp) + 4))); 2586 2581 if (bp->common.int_block == INT_BLOCK_HC) { 2587 2582 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); 2588 2583 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); ··· 2761 2756 u16 tpa_agg_size = 0; 2762 2757 2763 2758 if (!fp->disable_tpa) { 2764 - pause->sge_th_hi = 250; 2765 - pause->sge_th_lo = 150; 2759 + pause->sge_th_lo = SGE_TH_LO(bp); 2760 + pause->sge_th_hi = SGE_TH_HI(bp); 2761 + 2762 + /* validate SGE ring has enough to cross high threshold */ 2763 + WARN_ON(bp->dropless_fc && 2764 + pause->sge_th_hi + FW_PREFETCH_CNT > 2765 + MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES); 2766 + 2766 2767 tpa_agg_size = min_t(u32, 2767 2768 (min_t(u32, 8, MAX_SKB_FRAGS) * 2768 2769 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); ··· 2782 2771 2783 2772 /* pause - not for e1 */ 2784 2773 if (!CHIP_IS_E1(bp)) { 2785 - pause->bd_th_hi = 350; 2786 - pause->bd_th_lo = 250; 2787 - pause->rcq_th_hi = 350; 2788 - pause->rcq_th_lo = 250; 2774 + pause->bd_th_lo = BD_TH_LO(bp); 2775 + pause->bd_th_hi = BD_TH_HI(bp); 2776 + 2777 + pause->rcq_th_lo = RCQ_TH_LO(bp); 2778 + pause->rcq_th_hi = RCQ_TH_HI(bp); 2779 + /* 2780 + * validate that rings have enough entries to cross 2781 + * high thresholds 2782 + */ 2783 + WARN_ON(bp->dropless_fc && 2784 + pause->bd_th_hi + FW_PREFETCH_CNT > 2785 + bp->rx_ring_size); 2786 + WARN_ON(bp->dropless_fc && 2787 + pause->rcq_th_hi + FW_PREFETCH_CNT > 2788 + NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT); 2789 2789 2790 2790 pause->pri_map = 1; 2791 2791 } ··· 2824 2802 * For PF Clients it should be the maximum avaliable number. 2825 2803 * VF driver(s) may want to define it to a smaller value. 2826 2804 */ 2827 - rxq_init->max_tpa_queues = 2828 - (CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : 2829 - ETH_MAX_AGGREGATION_QUEUES_E1H_E2); 2805 + rxq_init->max_tpa_queues = MAX_AGG_QS(bp); 2830 2806 2831 2807 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; 2832 2808 rxq_init->fw_sb_id = fp->fw_sb_id; ··· 4828 4808 hc_sm->time_to_expire = 0xFFFFFFFF; 4829 4809 } 4830 4810 4811 + 4812 + /* allocates state machine ids. */ 4813 + static inline 4814 + void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) 4815 + { 4816 + /* zero out state machine indices */ 4817 + /* rx indices */ 4818 + index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 4819 + 4820 + /* tx indices */ 4821 + index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 4822 + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; 4823 + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; 4824 + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; 4825 + 4826 + /* map indices */ 4827 + /* rx indices */ 4828 + index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= 4829 + SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 4830 + 4831 + /* tx indices */ 4832 + index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= 4833 + SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 4834 + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= 4835 + SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 4836 + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= 4837 + SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 4838 + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= 4839 + SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 4840 + } 4841 + 4831 4842 static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, 4832 4843 u8 vf_valid, int fw_sb_id, int igu_sb_id) 4833 4844 { ··· 4890 4839 hc_sm_p = sb_data_e2.common.state_machine; 4891 4840 sb_data_p = (u32 *)&sb_data_e2; 4892 4841 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); 4842 + bnx2x_map_sb_state_machines(sb_data_e2.index_data); 4893 4843 } else { 4894 4844 memset(&sb_data_e1x, 0, 4895 4845 sizeof(struct hc_status_block_data_e1x)); ··· 4905 4853 hc_sm_p = sb_data_e1x.common.state_machine; 4906 4854 sb_data_p = (u32 *)&sb_data_e1x; 4907 4855 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); 4856 + bnx2x_map_sb_state_machines(sb_data_e1x.index_data); 4908 4857 } 4909 4858 4910 4859 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], ··· 4943 4890 int igu_seg_id; 4944 4891 int port = BP_PORT(bp); 4945 4892 int func = BP_FUNC(bp); 4946 - int reg_offset; 4893 + int reg_offset, reg_offset_en5; 4947 4894 u64 section; 4948 4895 int index; 4949 4896 struct hc_sp_status_block_data sp_sb_data; ··· 4966 4913 4967 4914 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 4968 4915 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 4916 + reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : 4917 + MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0); 4969 4918 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 4970 4919 int sindex; 4971 4920 /* take care of sig[0]..sig[4] */ ··· 4982 4927 * and not 16 between the different groups 4983 4928 */ 4984 4929 bp->attn_group[index].sig[4] = REG_RD(bp, 4985 - reg_offset + 0x10 + 0x4*index); 4930 + reg_offset_en5 + 0x4*index); 4986 4931 else 4987 4932 bp->attn_group[index].sig[4] = 0; 4988 4933 } ··· 5857 5802 * take the UNDI lock to protect undi_unload flow from accessing 5858 5803 * registers while we're resetting the chip 5859 5804 */ 5860 - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 5805 + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 5861 5806 5862 5807 bnx2x_reset_common(bp); 5863 5808 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); ··· 5869 5814 } 5870 5815 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); 5871 5816 5872 - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 5817 + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 5873 5818 5874 5819 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); 5875 5820 ··· 6726 6671 if (CHIP_MODE_IS_4_PORT(bp)) 6727 6672 dsb_idx = BP_FUNC(bp); 6728 6673 else 6729 - dsb_idx = BP_E1HVN(bp); 6674 + dsb_idx = BP_VN(bp); 6730 6675 6731 6676 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ? 6732 6677 IGU_BC_BASE_DSB_PROD + dsb_idx : 6733 6678 IGU_NORM_BASE_DSB_PROD + dsb_idx); 6734 6679 6680 + /* 6681 + * igu prods come in chunks of E1HVN_MAX (4) - 6682 + * does not matters what is the current chip mode 6683 + */ 6735 6684 for (i = 0; i < (num_segs * E1HVN_MAX); 6736 6685 i += E1HVN_MAX) { 6737 6686 addr = IGU_REG_PROD_CONS_MEMORY + ··· 7627 7568 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 7628 7569 u8 *mac_addr = bp->dev->dev_addr; 7629 7570 u32 val; 7571 + u16 pmc; 7572 + 7630 7573 /* The mac address is written to entries 1-4 to 7631 - preserve entry 0 which is used by the PMF */ 7632 - u8 entry = (BP_E1HVN(bp) + 1)*8; 7574 + * preserve entry 0 which is used by the PMF 7575 + */ 7576 + u8 entry = (BP_VN(bp) + 1)*8; 7633 7577 7634 7578 val = (mac_addr[0] << 8) | mac_addr[1]; 7635 7579 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); ··· 7640 7578 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 7641 7579 (mac_addr[4] << 8) | mac_addr[5]; 7642 7580 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); 7581 + 7582 + /* Enable the PME and clear the status */ 7583 + pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc); 7584 + pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS; 7585 + pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc); 7643 7586 7644 7587 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; 7645 7588 ··· 8613 8546 /* Check if there is any driver already loaded */ 8614 8547 val = REG_RD(bp, MISC_REG_UNPREPARED); 8615 8548 if (val == 0x1) { 8616 - /* Check if it is the UNDI driver 8549 + 8550 + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 8551 + /* 8552 + * Check if it is the UNDI driver 8617 8553 * UNDI driver initializes CID offset for normal bell to 0x7 8618 8554 */ 8619 - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 8620 8555 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 8621 8556 if (val == 0x7) { 8622 8557 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; ··· 8655 8586 8656 8587 bnx2x_fw_command(bp, reset_code, 0); 8657 8588 } 8658 - 8659 - /* now it's safe to release the lock */ 8660 - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 8661 8589 8662 8590 bnx2x_undi_int_disable(bp); 8663 8591 port = BP_PORT(bp); ··· 8705 8639 bp->fw_seq = 8706 8640 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) & 8707 8641 DRV_MSG_SEQ_NUMBER_MASK); 8708 - } else 8709 - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 8642 + } 8643 + 8644 + /* now it's safe to release the lock */ 8645 + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 8710 8646 } 8711 8647 } 8712 8648 ··· 8845 8777 static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) 8846 8778 { 8847 8779 int pfid = BP_FUNC(bp); 8848 - int vn = BP_E1HVN(bp); 8849 8780 int igu_sb_id; 8850 8781 u32 val; 8851 8782 u8 fid, igu_sb_cnt = 0; 8852 8783 8853 8784 bp->igu_base_sb = 0xff; 8854 8785 if (CHIP_INT_MODE_IS_BC(bp)) { 8786 + int vn = BP_VN(bp); 8855 8787 igu_sb_cnt = bp->igu_sb_cnt; 8856 8788 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * 8857 8789 FP_SB_MAX_E1x; ··· 9484 9416 bp->igu_base_sb = 0; 9485 9417 } else { 9486 9418 bp->common.int_block = INT_BLOCK_IGU; 9419 + 9420 + /* do not allow device reset during IGU info preocessing */ 9421 + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 9422 + 9487 9423 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); 9488 9424 9489 9425 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { ··· 9519 9447 9520 9448 bnx2x_get_igu_cam_info(bp); 9521 9449 9450 + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 9522 9451 } 9523 9452 9524 9453 /* ··· 9546 9473 9547 9474 bp->mf_ov = 0; 9548 9475 bp->mf_mode = 0; 9549 - vn = BP_E1HVN(bp); 9476 + vn = BP_VN(bp); 9550 9477 9551 9478 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { 9552 9479 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", ··· 9665 9592 9666 9593 /* port info */ 9667 9594 bnx2x_get_port_hwinfo(bp); 9668 - 9669 - if (!BP_NOMCP(bp)) { 9670 - bp->fw_seq = 9671 - (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & 9672 - DRV_MSG_SEQ_NUMBER_MASK); 9673 - BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); 9674 - } 9675 9595 9676 9596 /* Get MAC addresses */ 9677 9597 bnx2x_get_mac_hwinfo(bp); ··· 9830 9764 /* need to reset chip if undi was active */ 9831 9765 if (!BP_NOMCP(bp)) 9832 9766 bnx2x_undi_unload(bp); 9767 + 9768 + /* init fw_seq after undi_unload! */ 9769 + if (!BP_NOMCP(bp)) { 9770 + bp->fw_seq = 9771 + (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & 9772 + DRV_MSG_SEQ_NUMBER_MASK); 9773 + BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); 9774 + } 9833 9775 9834 9776 if (CHIP_REV_IS_FPGA(bp)) 9835 9777 dev_err(&bp->pdev->dev, "FPGA detected\n"); ··· 10333 10259 /* clean indirect addresses */ 10334 10260 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 10335 10261 PCICFG_VENDOR_ID_OFFSET); 10336 - /* Clean the following indirect addresses for all functions since it 10262 + /* 10263 + * Clean the following indirect addresses for all functions since it 10337 10264 * is not used by the driver. 10338 10265 */ 10339 10266 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0); 10340 10267 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0); 10341 10268 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); 10342 10269 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); 10343 - REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); 10344 - REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); 10345 - REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); 10346 - REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); 10270 + 10271 + if (CHIP_IS_E1x(bp)) { 10272 + REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); 10273 + REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); 10274 + REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); 10275 + REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); 10276 + } 10347 10277 10348 10278 /* 10349 10279 * Enable internal target-read (in case we are probed after PF FLR).
+17 -2
drivers/net/bnx2x/bnx2x_reg.h
··· 1384 1384 Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ 1385 1385 #define MISC_REG_AEU_ENABLE4_PXP_0 0xa108 1386 1386 #define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8 1387 + /* [RW 32] fifth 32b for enabling the output for function 0 output0. Mapped 1388 + * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC 1389 + * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6] 1390 + * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1 1391 + * parity; [31-10] Reserved; */ 1392 + #define MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0 0xa688 1393 + /* [RW 32] Fifth 32b for enabling the output for function 1 output0. Mapped 1394 + * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC 1395 + * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6] 1396 + * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1 1397 + * parity; [31-10] Reserved; */ 1398 + #define MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 0xa6b0 1387 1399 /* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu 1388 1400 128 bit vector */ 1389 1401 #define MISC_REG_AEU_GENERAL_ATTN_0 0xa000 ··· 5332 5320 #define XCM_REG_XX_OVFL_EVNT_ID 0x20058 5333 5321 #define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0) 5334 5322 #define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1) 5335 - #define XMAC_CTRL_REG_CORE_LOCAL_LPBK (0x1<<3) 5323 + #define XMAC_CTRL_REG_LINE_LOCAL_LPBK (0x1<<2) 5336 5324 #define XMAC_CTRL_REG_RX_EN (0x1<<1) 5337 5325 #define XMAC_CTRL_REG_SOFT_RESET (0x1<<6) 5338 5326 #define XMAC_CTRL_REG_TX_EN (0x1<<0) ··· 5778 5766 #define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8 5779 5767 #define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9 5780 5768 #define HW_LOCK_RESOURCE_SPIO 2 5781 - #define HW_LOCK_RESOURCE_UNDI 5 5769 + #define HW_LOCK_RESOURCE_RESET 5 5782 5770 #define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4) 5783 5771 #define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5) 5784 5772 #define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18) ··· 6865 6853 #define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7 6866 6854 #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10 6867 6855 #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11 6856 + #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2 0x12 6857 + #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY 0x4000 6858 + #define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ 0x8000 6868 6859 #define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150 0x96 6869 6860 #define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000 6870 6861 #define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e
+4 -3
drivers/net/bnx2x/bnx2x_stats.c
··· 710 710 break; 711 711 712 712 case MAC_TYPE_NONE: /* unreached */ 713 - BNX2X_ERR("stats updated by DMAE but no MAC active\n"); 713 + DP(BNX2X_MSG_STATS, 714 + "stats updated by DMAE but no MAC active\n"); 714 715 return -1; 715 716 716 717 default: /* unreached */ ··· 1392 1391 1393 1392 static void bnx2x_func_stats_base_init(struct bnx2x *bp) 1394 1393 { 1395 - int vn, vn_max = IS_MF(bp) ? E1HVN_MAX : E1VN_MAX; 1394 + int vn, vn_max = IS_MF(bp) ? BP_MAX_VN_NUM(bp) : E1VN_MAX; 1396 1395 u32 func_stx; 1397 1396 1398 1397 /* sanity */ ··· 1405 1404 func_stx = bp->func_stx; 1406 1405 1407 1406 for (vn = VN_0; vn < vn_max; vn++) { 1408 - int mb_idx = CHIP_IS_E1x(bp) ? 2*vn + BP_PORT(bp) : vn; 1407 + int mb_idx = BP_FW_MB_IDX_VN(bp, vn); 1409 1408 1410 1409 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); 1411 1410 bnx2x_func_stats_init(bp);
+2 -1
drivers/net/bonding/bond_3ad.c
··· 2168 2168 } 2169 2169 2170 2170 re_arm: 2171 - queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); 2171 + if (!bond->kill_timers) 2172 + queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); 2172 2173 out: 2173 2174 read_unlock(&bond->lock); 2174 2175 }
+2 -1
drivers/net/bonding/bond_alb.c
··· 1440 1440 } 1441 1441 1442 1442 re_arm: 1443 - queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks); 1443 + if (!bond->kill_timers) 1444 + queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks); 1444 1445 out: 1445 1446 read_unlock(&bond->lock); 1446 1447 }
+8 -5
drivers/net/bonding/bond_main.c
··· 777 777 778 778 read_lock(&bond->lock); 779 779 780 + if (bond->kill_timers) 781 + goto out; 782 + 780 783 /* rejoin all groups on bond device */ 781 784 __bond_resend_igmp_join_requests(bond->dev); 782 785 ··· 793 790 __bond_resend_igmp_join_requests(vlan_dev); 794 791 } 795 792 796 - if (--bond->igmp_retrans > 0) 793 + if ((--bond->igmp_retrans > 0) && !bond->kill_timers) 797 794 queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); 798 - 795 + out: 799 796 read_unlock(&bond->lock); 800 797 } 801 798 ··· 2541 2538 } 2542 2539 2543 2540 re_arm: 2544 - if (bond->params.miimon) 2541 + if (bond->params.miimon && !bond->kill_timers) 2545 2542 queue_delayed_work(bond->wq, &bond->mii_work, 2546 2543 msecs_to_jiffies(bond->params.miimon)); 2547 2544 out: ··· 2889 2886 } 2890 2887 2891 2888 re_arm: 2892 - if (bond->params.arp_interval) 2889 + if (bond->params.arp_interval && !bond->kill_timers) 2893 2890 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); 2894 2891 out: 2895 2892 read_unlock(&bond->lock); ··· 3157 3154 bond_ab_arp_probe(bond); 3158 3155 3159 3156 re_arm: 3160 - if (bond->params.arp_interval) 3157 + if (bond->params.arp_interval && !bond->kill_timers) 3161 3158 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); 3162 3159 out: 3163 3160 read_unlock(&bond->lock);
+1
drivers/net/can/ti_hecc.c
··· 46 46 #include <linux/skbuff.h> 47 47 #include <linux/platform_device.h> 48 48 #include <linux/clk.h> 49 + #include <linux/io.h> 49 50 50 51 #include <linux/can/dev.h> 51 52 #include <linux/can/error.h>
+18 -5
drivers/net/cxgb3/cxgb3_offload.c
··· 1146 1146 if (te && te->ctx && te->client && te->client->redirect) { 1147 1147 update_tcb = te->client->redirect(te->ctx, old, new, e); 1148 1148 if (update_tcb) { 1149 + rcu_read_lock(); 1149 1150 l2t_hold(L2DATA(tdev), e); 1151 + rcu_read_unlock(); 1150 1152 set_l2t_ix(tdev, tid, e); 1151 1153 } 1152 1154 } 1153 1155 } 1154 - l2t_release(L2DATA(tdev), e); 1156 + l2t_release(tdev, e); 1155 1157 } 1156 1158 1157 1159 /* ··· 1266 1264 goto out_free; 1267 1265 1268 1266 err = -ENOMEM; 1269 - L2DATA(dev) = t3_init_l2t(l2t_capacity); 1267 + RCU_INIT_POINTER(dev->l2opt, t3_init_l2t(l2t_capacity)); 1270 1268 if (!L2DATA(dev)) 1271 1269 goto out_free; 1272 1270 ··· 1300 1298 1301 1299 out_free_l2t: 1302 1300 t3_free_l2t(L2DATA(dev)); 1303 - L2DATA(dev) = NULL; 1301 + rcu_assign_pointer(dev->l2opt, NULL); 1304 1302 out_free: 1305 1303 kfree(t); 1306 1304 return err; 1307 1305 } 1308 1306 1307 + static void clean_l2_data(struct rcu_head *head) 1308 + { 1309 + struct l2t_data *d = container_of(head, struct l2t_data, rcu_head); 1310 + t3_free_l2t(d); 1311 + } 1312 + 1313 + 1309 1314 void cxgb3_offload_deactivate(struct adapter *adapter) 1310 1315 { 1311 1316 struct t3cdev *tdev = &adapter->tdev; 1312 1317 struct t3c_data *t = T3C_DATA(tdev); 1318 + struct l2t_data *d; 1313 1319 1314 1320 remove_adapter(adapter); 1315 1321 if (list_empty(&adapter_list)) ··· 1325 1315 1326 1316 free_tid_maps(&t->tid_maps); 1327 1317 T3C_DATA(tdev) = NULL; 1328 - t3_free_l2t(L2DATA(tdev)); 1329 - L2DATA(tdev) = NULL; 1318 + rcu_read_lock(); 1319 + d = L2DATA(tdev); 1320 + rcu_read_unlock(); 1321 + rcu_assign_pointer(tdev->l2opt, NULL); 1322 + call_rcu(&d->rcu_head, clean_l2_data); 1330 1323 if (t->nofail_skb) 1331 1324 kfree_skb(t->nofail_skb); 1332 1325 kfree(t);
+12 -3
drivers/net/cxgb3/l2t.c
··· 300 300 struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, 301 301 struct net_device *dev) 302 302 { 303 - struct l2t_entry *e; 304 - struct l2t_data *d = L2DATA(cdev); 303 + struct l2t_entry *e = NULL; 304 + struct l2t_data *d; 305 + int hash; 305 306 u32 addr = *(u32 *) neigh->primary_key; 306 307 int ifidx = neigh->dev->ifindex; 307 - int hash = arp_hash(addr, ifidx, d); 308 308 struct port_info *p = netdev_priv(dev); 309 309 int smt_idx = p->port_id; 310 + 311 + rcu_read_lock(); 312 + d = L2DATA(cdev); 313 + if (!d) 314 + goto done_rcu; 315 + 316 + hash = arp_hash(addr, ifidx, d); 310 317 311 318 write_lock_bh(&d->lock); 312 319 for (e = d->l2tab[hash].first; e; e = e->next) ··· 345 338 } 346 339 done: 347 340 write_unlock_bh(&d->lock); 341 + done_rcu: 342 + rcu_read_unlock(); 348 343 return e; 349 344 } 350 345
+12 -4
drivers/net/cxgb3/l2t.h
··· 76 76 atomic_t nfree; /* number of free entries */ 77 77 rwlock_t lock; 78 78 struct l2t_entry l2tab[0]; 79 + struct rcu_head rcu_head; /* to handle rcu cleanup */ 79 80 }; 80 81 81 82 typedef void (*arp_failure_handler_func)(struct t3cdev * dev, ··· 100 99 /* 101 100 * Getting to the L2 data from an offload device. 102 101 */ 103 - #define L2DATA(dev) ((dev)->l2opt) 102 + #define L2DATA(cdev) (rcu_dereference((cdev)->l2opt)) 104 103 105 104 #define W_TCB_L2T_IX 0 106 105 #define S_TCB_L2T_IX 7 ··· 127 126 return t3_l2t_send_slow(dev, skb, e); 128 127 } 129 128 130 - static inline void l2t_release(struct l2t_data *d, struct l2t_entry *e) 129 + static inline void l2t_release(struct t3cdev *t, struct l2t_entry *e) 131 130 { 132 - if (atomic_dec_and_test(&e->refcnt)) 131 + struct l2t_data *d; 132 + 133 + rcu_read_lock(); 134 + d = L2DATA(t); 135 + 136 + if (atomic_dec_and_test(&e->refcnt) && d) 133 137 t3_l2e_free(d, e); 138 + 139 + rcu_read_unlock(); 134 140 } 135 141 136 142 static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) 137 143 { 138 - if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ 144 + if (d && atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ 139 145 atomic_dec(&d->nfree); 140 146 } 141 147
+3
drivers/net/cxgb4/cxgb4_main.c
··· 3712 3712 setup_debugfs(adapter); 3713 3713 } 3714 3714 3715 + /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 3716 + pdev->needs_freset = 1; 3717 + 3715 3718 if (is_offload(adapter)) 3716 3719 attach_ulds(adapter); 3717 3720
+6
drivers/net/e1000/e1000_hw.c
··· 4026 4026 checksum += eeprom_data; 4027 4027 } 4028 4028 4029 + #ifdef CONFIG_PARISC 4030 + /* This is a signature and not a checksum on HP c8000 */ 4031 + if ((hw->subsystem_vendor_id == 0x103C) && (eeprom_data == 0x16d6)) 4032 + return E1000_SUCCESS; 4033 + 4034 + #endif 4029 4035 if (checksum == (u16) EEPROM_SUM) 4030 4036 return E1000_SUCCESS; 4031 4037 else {
+4 -4
drivers/net/gianfar_ethtool.c
··· 1669 1669 u32 i = 0; 1670 1670 1671 1671 list_for_each_entry(comp, &priv->rx_list.list, list) { 1672 - if (i <= cmd->rule_cnt) { 1673 - rule_locs[i] = comp->fs.location; 1674 - i++; 1675 - } 1672 + if (i == cmd->rule_cnt) 1673 + return -EMSGSIZE; 1674 + rule_locs[i] = comp->fs.location; 1675 + i++; 1676 1676 } 1677 1677 1678 1678 cmd->data = MAX_FILER_IDX;
+10 -2
drivers/net/greth.c
··· 428 428 dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE); 429 429 430 430 status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN); 431 + greth->tx_bufs_length[greth->tx_next] = skb->len & GRETH_BD_LEN; 431 432 432 433 /* Wrap around descriptor ring */ 433 434 if (greth->tx_next == GRETH_TXBD_NUM_MASK) { ··· 491 490 if (nr_frags != 0) 492 491 status = GRETH_TXBD_MORE; 493 492 494 - status |= GRETH_TXBD_CSALL; 493 + if (skb->ip_summed == CHECKSUM_PARTIAL) 494 + status |= GRETH_TXBD_CSALL; 495 495 status |= skb_headlen(skb) & GRETH_BD_LEN; 496 496 if (greth->tx_next == GRETH_TXBD_NUM_MASK) 497 497 status |= GRETH_BD_WR; ··· 515 513 greth->tx_skbuff[curr_tx] = NULL; 516 514 bdp = greth->tx_bd_base + curr_tx; 517 515 518 - status = GRETH_TXBD_CSALL | GRETH_BD_EN; 516 + status = GRETH_BD_EN; 517 + if (skb->ip_summed == CHECKSUM_PARTIAL) 518 + status |= GRETH_TXBD_CSALL; 519 519 status |= frag->size & GRETH_BD_LEN; 520 520 521 521 /* Wrap around descriptor ring */ ··· 645 641 dev->stats.tx_fifo_errors++; 646 642 } 647 643 dev->stats.tx_packets++; 644 + dev->stats.tx_bytes += greth->tx_bufs_length[greth->tx_last]; 648 645 greth->tx_last = NEXT_TX(greth->tx_last); 649 646 greth->tx_free++; 650 647 } ··· 700 695 greth->tx_skbuff[greth->tx_last] = NULL; 701 696 702 697 greth_update_tx_stats(dev, stat); 698 + dev->stats.tx_bytes += skb->len; 703 699 704 700 bdp = greth->tx_bd_base + greth->tx_last; 705 701 ··· 802 796 memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len); 803 797 804 798 skb->protocol = eth_type_trans(skb, dev); 799 + dev->stats.rx_bytes += pkt_len; 805 800 dev->stats.rx_packets++; 806 801 netif_receive_skb(skb); 807 802 } ··· 917 910 918 911 skb->protocol = eth_type_trans(skb, dev); 919 912 dev->stats.rx_packets++; 913 + dev->stats.rx_bytes += pkt_len; 920 914 netif_receive_skb(skb); 921 915 922 916 greth->rx_skbuff[greth->rx_cur] = newskb;
+1
drivers/net/greth.h
··· 103 103 104 104 unsigned char *tx_bufs[GRETH_TXBD_NUM]; 105 105 unsigned char *rx_bufs[GRETH_RXBD_NUM]; 106 + u16 tx_bufs_length[GRETH_TXBD_NUM]; 106 107 107 108 u16 tx_next; 108 109 u16 tx_last;
+33 -19
drivers/net/ibmveth.c
··· 636 636 netdev_err(netdev, "unable to request irq 0x%x, rc %d\n", 637 637 netdev->irq, rc); 638 638 do { 639 - rc = h_free_logical_lan(adapter->vdev->unit_address); 640 - } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); 639 + lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); 640 + } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); 641 641 642 642 goto err_out; 643 643 } ··· 757 757 struct ibmveth_adapter *adapter = netdev_priv(dev); 758 758 unsigned long set_attr, clr_attr, ret_attr; 759 759 unsigned long set_attr6, clr_attr6; 760 - long ret, ret6; 760 + long ret, ret4, ret6; 761 761 int rc1 = 0, rc2 = 0; 762 762 int restart = 0; 763 763 ··· 770 770 771 771 set_attr = 0; 772 772 clr_attr = 0; 773 + set_attr6 = 0; 774 + clr_attr6 = 0; 773 775 774 776 if (data) { 775 777 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; ··· 786 784 if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) && 787 785 !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) && 788 786 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) { 789 - ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr, 787 + ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr, 790 788 set_attr, &ret_attr); 791 789 792 - if (ret != H_SUCCESS) { 790 + if (ret4 != H_SUCCESS) { 793 791 netdev_err(dev, "unable to change IPv4 checksum " 794 792 "offload settings. %d rc=%ld\n", 795 - data, ret); 793 + data, ret4); 796 794 797 - ret = h_illan_attributes(adapter->vdev->unit_address, 798 - set_attr, clr_attr, &ret_attr); 795 + h_illan_attributes(adapter->vdev->unit_address, 796 + set_attr, clr_attr, &ret_attr); 797 + 798 + if (data == 1) 799 + dev->features &= ~NETIF_F_IP_CSUM; 800 + 799 801 } else { 800 802 adapter->fw_ipv4_csum_support = data; 801 803 } ··· 810 804 if (ret6 != H_SUCCESS) { 811 805 netdev_err(dev, "unable to change IPv6 checksum " 812 806 "offload settings. %d rc=%ld\n", 813 - data, ret); 807 + data, ret6); 814 808 815 - ret = h_illan_attributes(adapter->vdev->unit_address, 816 - set_attr6, clr_attr6, 817 - &ret_attr); 809 + h_illan_attributes(adapter->vdev->unit_address, 810 + set_attr6, clr_attr6, &ret_attr); 811 + 812 + if (data == 1) 813 + dev->features &= ~NETIF_F_IPV6_CSUM; 814 + 818 815 } else 819 816 adapter->fw_ipv6_csum_support = data; 820 817 821 - if (ret != H_SUCCESS || ret6 != H_SUCCESS) 818 + if (ret4 == H_SUCCESS || ret6 == H_SUCCESS) 822 819 adapter->rx_csum = data; 823 820 else 824 821 rc1 = -EIO; ··· 939 930 union ibmveth_buf_desc descs[6]; 940 931 int last, i; 941 932 int force_bounce = 0; 933 + dma_addr_t dma_addr; 942 934 943 935 /* 944 936 * veth handles a maximum of 6 segments including the header, so ··· 1004 994 } 1005 995 1006 996 /* Map the header */ 1007 - descs[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data, 1008 - skb_headlen(skb), 1009 - DMA_TO_DEVICE); 1010 - if (dma_mapping_error(&adapter->vdev->dev, descs[0].fields.address)) 997 + dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, 998 + skb_headlen(skb), DMA_TO_DEVICE); 999 + if (dma_mapping_error(&adapter->vdev->dev, dma_addr)) 1011 1000 goto map_failed; 1012 1001 1013 1002 descs[0].fields.flags_len = desc_flags | skb_headlen(skb); 1003 + descs[0].fields.address = dma_addr; 1014 1004 1015 1005 /* Map the frags */ 1016 1006 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1017 - unsigned long dma_addr; 1018 1007 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1019 1008 1020 1009 dma_addr = dma_map_page(&adapter->vdev->dev, frag->page, ··· 1035 1026 netdev->stats.tx_bytes += skb->len; 1036 1027 } 1037 1028 1038 - for (i = 0; i < skb_shinfo(skb)->nr_frags + 1; i++) 1029 + dma_unmap_single(&adapter->vdev->dev, 1030 + descs[0].fields.address, 1031 + descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK, 1032 + DMA_TO_DEVICE); 1033 + 1034 + for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++) 1039 1035 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address, 1040 1036 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK, 1041 1037 DMA_TO_DEVICE);
+2 -2
drivers/net/ixgbe/ixgbe_main.c
··· 1321 1321 if (ring_is_rsc_enabled(rx_ring)) 1322 1322 pkt_is_rsc = ixgbe_get_rsc_state(rx_desc); 1323 1323 1324 - /* if this is a skb from previous receive DMA will be 0 */ 1325 - if (rx_buffer_info->dma) { 1324 + /* linear means we are building an skb from multiple pages */ 1325 + if (!skb_is_nonlinear(skb)) { 1326 1326 u16 hlen; 1327 1327 if (pkt_is_rsc && 1328 1328 !(staterr & IXGBE_RXD_STAT_EOP) &&
+7 -1
drivers/net/netconsole.c
··· 799 799 } 800 800 } 801 801 802 - module_init(init_netconsole); 802 + /* 803 + * Use late_initcall to ensure netconsole is 804 + * initialized after network device driver if built-in. 805 + * 806 + * late_initcall() and module_init() are identical if built as module. 807 + */ 808 + late_initcall(init_netconsole); 803 809 module_exit(cleanup_netconsole);
+10 -2
drivers/net/pch_gbe/pch_gbe.h
··· 127 127 128 128 /* Reset */ 129 129 #define PCH_GBE_ALL_RST 0x80000000 /* All reset */ 130 - #define PCH_GBE_TX_RST 0x40000000 /* TX MAC, TX FIFO, TX DMA reset */ 131 - #define PCH_GBE_RX_RST 0x04000000 /* RX MAC, RX FIFO, RX DMA reset */ 130 + #define PCH_GBE_TX_RST 0x00008000 /* TX MAC, TX FIFO, TX DMA reset */ 131 + #define PCH_GBE_RX_RST 0x00004000 /* RX MAC, RX FIFO, RX DMA reset */ 132 132 133 133 /* TCP/IP Accelerator Control */ 134 134 #define PCH_GBE_EX_LIST_EN 0x00000008 /* External List Enable */ ··· 275 275 /* DMA Control */ 276 276 #define PCH_GBE_RX_DMA_EN 0x00000002 /* Enables Receive DMA */ 277 277 #define PCH_GBE_TX_DMA_EN 0x00000001 /* Enables Transmission DMA */ 278 + 279 + /* RX DMA STATUS */ 280 + #define PCH_GBE_IDLE_CHECK 0xFFFFFFFE 278 281 279 282 /* Wake On LAN Status */ 280 283 #define PCH_GBE_WLS_BR 0x00000008 /* Broadcas Address */ ··· 474 471 struct pch_gbe_buffer { 475 472 struct sk_buff *skb; 476 473 dma_addr_t dma; 474 + unsigned char *rx_buffer; 477 475 unsigned long time_stamp; 478 476 u16 length; 479 477 bool mapped; ··· 515 511 struct pch_gbe_rx_ring { 516 512 struct pch_gbe_rx_desc *desc; 517 513 dma_addr_t dma; 514 + unsigned char *rx_buff_pool; 515 + dma_addr_t rx_buff_pool_logic; 516 + unsigned int rx_buff_pool_size; 518 517 unsigned int size; 519 518 unsigned int count; 520 519 unsigned int next_to_use; ··· 629 622 unsigned long rx_buffer_len; 630 623 unsigned long tx_queue_len; 631 624 bool have_msi; 625 + bool rx_stop_flag; 632 626 }; 633 627 634 628 extern const char pch_driver_version[];
+213 -131
drivers/net/pch_gbe/pch_gbe_main.c
··· 20 20 21 21 #include "pch_gbe.h" 22 22 #include "pch_gbe_api.h" 23 - #include <linux/prefetch.h> 24 23 25 24 #define DRV_VERSION "1.00" 26 25 const char pch_driver_version[] = DRV_VERSION; ··· 33 34 #define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ 34 35 #define PCH_GBE_COPYBREAK_DEFAULT 256 35 36 #define PCH_GBE_PCI_BAR 1 37 + #define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */ 36 38 37 39 /* Macros for ML7223 */ 38 40 #define PCI_VENDOR_ID_ROHM 0x10db 39 41 #define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013 42 + 43 + /* Macros for ML7831 */ 44 + #define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802 40 45 41 46 #define PCH_GBE_TX_WEIGHT 64 42 47 #define PCH_GBE_RX_WEIGHT 64 ··· 55 52 ) 56 53 57 54 /* Ethertype field values */ 55 + #define PCH_GBE_MAX_RX_BUFFER_SIZE 0x2880 58 56 #define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318 59 57 #define PCH_GBE_FRAME_SIZE_2048 2048 60 58 #define PCH_GBE_FRAME_SIZE_4096 4096 ··· 87 83 #define PCH_GBE_INT_ENABLE_MASK ( \ 88 84 PCH_GBE_INT_RX_DMA_CMPLT | \ 89 85 PCH_GBE_INT_RX_DSC_EMP | \ 86 + PCH_GBE_INT_RX_FIFO_ERR | \ 90 87 PCH_GBE_INT_WOL_DET | \ 91 88 PCH_GBE_INT_TX_CMPLT \ 92 89 ) 93 90 91 + #define PCH_GBE_INT_DISABLE_ALL 0 94 92 95 93 static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; 96 94 ··· 144 138 if (!tmp) 145 139 pr_err("Error: busy bit is not cleared\n"); 146 140 } 141 + 142 + /** 143 + * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context 144 + * @reg: Pointer of register 145 + * @busy: Busy bit 146 + */ 147 + static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit) 148 + { 149 + u32 tmp; 150 + int ret = -1; 151 + /* wait busy */ 152 + tmp = 20; 153 + while ((ioread32(reg) & bit) && --tmp) 154 + udelay(5); 155 + if (!tmp) 156 + pr_err("Error: busy bit is not cleared\n"); 157 + else 158 + ret = 0; 159 + return ret; 160 + } 161 + 147 162 /** 148 163 * pch_gbe_mac_mar_set - Set MAC address register 149 164 * @hw: Pointer to the HW structure ··· 212 185 #endif 213 186 pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST); 214 187 /* Setup the receive address */ 188 + pch_gbe_mac_mar_set(hw, hw->mac.addr, 0); 189 + return; 190 + } 191 + 192 + static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw) 193 + { 194 + /* Read the MAC address. and store to the private data */ 195 + pch_gbe_mac_read_mac_addr(hw); 196 + iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET); 197 + pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST); 198 + /* Setup the MAC address */ 215 199 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0); 216 200 return; 217 201 } ··· 709 671 710 672 tcpip = ioread32(&hw->reg->TCPIP_ACC); 711 673 712 - if (netdev->features & NETIF_F_RXCSUM) { 713 - tcpip &= ~PCH_GBE_RX_TCPIPACC_OFF; 714 - tcpip |= PCH_GBE_RX_TCPIPACC_EN; 715 - } else { 716 - tcpip |= PCH_GBE_RX_TCPIPACC_OFF; 717 - tcpip &= ~PCH_GBE_RX_TCPIPACC_EN; 718 - } 674 + tcpip |= PCH_GBE_RX_TCPIPACC_OFF; 675 + tcpip &= ~PCH_GBE_RX_TCPIPACC_EN; 719 676 iowrite32(tcpip, &hw->reg->TCPIP_ACC); 720 677 return; 721 678 } ··· 750 717 iowrite32(rdba, &hw->reg->RX_DSC_BASE); 751 718 iowrite32(rdlen, &hw->reg->RX_DSC_SIZE); 752 719 iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P); 753 - 754 - /* Enables Receive DMA */ 755 - rxdma = ioread32(&hw->reg->DMA_CTRL); 756 - rxdma |= PCH_GBE_RX_DMA_EN; 757 - iowrite32(rxdma, &hw->reg->DMA_CTRL); 758 - /* Enables Receive */ 759 - iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN); 760 720 } 761 721 762 722 /** ··· 1123 1097 spin_unlock_irqrestore(&adapter->stats_lock, flags); 1124 1098 } 1125 1099 1100 + static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter) 1101 + { 1102 + struct pch_gbe_hw *hw = &adapter->hw; 1103 + u32 rxdma; 1104 + u16 value; 1105 + int ret; 1106 + 1107 + /* Disable Receive DMA */ 1108 + rxdma = ioread32(&hw->reg->DMA_CTRL); 1109 + rxdma &= ~PCH_GBE_RX_DMA_EN; 1110 + iowrite32(rxdma, &hw->reg->DMA_CTRL); 1111 + /* Wait Rx DMA BUS is IDLE */ 1112 + ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK); 1113 + if (ret) { 1114 + /* Disable Bus master */ 1115 + pci_read_config_word(adapter->pdev, PCI_COMMAND, &value); 1116 + value &= ~PCI_COMMAND_MASTER; 1117 + pci_write_config_word(adapter->pdev, PCI_COMMAND, value); 1118 + /* Stop Receive */ 1119 + pch_gbe_mac_reset_rx(hw); 1120 + /* Enable Bus master */ 1121 + value |= PCI_COMMAND_MASTER; 1122 + pci_write_config_word(adapter->pdev, PCI_COMMAND, value); 1123 + } else { 1124 + /* Stop Receive */ 1125 + pch_gbe_mac_reset_rx(hw); 1126 + } 1127 + } 1128 + 1129 + static void pch_gbe_start_receive(struct pch_gbe_hw *hw) 1130 + { 1131 + u32 rxdma; 1132 + 1133 + /* Enables Receive DMA */ 1134 + rxdma = ioread32(&hw->reg->DMA_CTRL); 1135 + rxdma |= PCH_GBE_RX_DMA_EN; 1136 + iowrite32(rxdma, &hw->reg->DMA_CTRL); 1137 + /* Enables Receive */ 1138 + iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN); 1139 + return; 1140 + } 1141 + 1126 1142 /** 1127 1143 * pch_gbe_intr - Interrupt Handler 1128 1144 * @irq: Interrupt number ··· 1191 1123 if (int_st & PCH_GBE_INT_RX_FRAME_ERR) 1192 1124 adapter->stats.intr_rx_frame_err_count++; 1193 1125 if (int_st & PCH_GBE_INT_RX_FIFO_ERR) 1194 - adapter->stats.intr_rx_fifo_err_count++; 1126 + if (!adapter->rx_stop_flag) { 1127 + adapter->stats.intr_rx_fifo_err_count++; 1128 + pr_debug("Rx fifo over run\n"); 1129 + adapter->rx_stop_flag = true; 1130 + int_en = ioread32(&hw->reg->INT_EN); 1131 + iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR), 1132 + &hw->reg->INT_EN); 1133 + pch_gbe_stop_receive(adapter); 1134 + int_st |= ioread32(&hw->reg->INT_ST); 1135 + int_st = int_st & ioread32(&hw->reg->INT_EN); 1136 + } 1195 1137 if (int_st & PCH_GBE_INT_RX_DMA_ERR) 1196 1138 adapter->stats.intr_rx_dma_err_count++; 1197 1139 if (int_st & PCH_GBE_INT_TX_FIFO_ERR) ··· 1213 1135 /* When Rx descriptor is empty */ 1214 1136 if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) { 1215 1137 adapter->stats.intr_rx_dsc_empty_count++; 1216 - pr_err("Rx descriptor is empty\n"); 1138 + pr_debug("Rx descriptor is empty\n"); 1217 1139 int_en = ioread32(&hw->reg->INT_EN); 1218 1140 iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN); 1219 1141 if (hw->mac.tx_fc_enable) { 1220 1142 /* Set Pause packet */ 1221 1143 pch_gbe_mac_set_pause_packet(hw); 1222 1144 } 1223 - if ((int_en & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) 1224 - == 0) { 1225 - return IRQ_HANDLED; 1226 - } 1227 1145 } 1228 1146 1229 1147 /* When request status is Receive interruption */ 1230 - if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))) { 1148 + if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) || 1149 + (adapter->rx_stop_flag == true)) { 1231 1150 if (likely(napi_schedule_prep(&adapter->napi))) { 1232 1151 /* Enable only Rx Descriptor empty */ 1233 1152 atomic_inc(&adapter->irq_sem); ··· 1260 1185 unsigned int i; 1261 1186 unsigned int bufsz; 1262 1187 1263 - bufsz = adapter->rx_buffer_len + PCH_GBE_DMA_ALIGN; 1188 + bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; 1264 1189 i = rx_ring->next_to_use; 1265 1190 1266 1191 while ((cleaned_count--)) { 1267 1192 buffer_info = &rx_ring->buffer_info[i]; 1268 - skb = buffer_info->skb; 1269 - if (skb) { 1270 - skb_trim(skb, 0); 1271 - } else { 1272 - skb = netdev_alloc_skb(netdev, bufsz); 1273 - if (unlikely(!skb)) { 1274 - /* Better luck next round */ 1275 - adapter->stats.rx_alloc_buff_failed++; 1276 - break; 1277 - } 1278 - /* 64byte align */ 1279 - skb_reserve(skb, PCH_GBE_DMA_ALIGN); 1280 - 1281 - buffer_info->skb = skb; 1282 - buffer_info->length = adapter->rx_buffer_len; 1193 + skb = netdev_alloc_skb(netdev, bufsz); 1194 + if (unlikely(!skb)) { 1195 + /* Better luck next round */ 1196 + adapter->stats.rx_alloc_buff_failed++; 1197 + break; 1283 1198 } 1199 + /* align */ 1200 + skb_reserve(skb, NET_IP_ALIGN); 1201 + buffer_info->skb = skb; 1202 + 1284 1203 buffer_info->dma = dma_map_single(&pdev->dev, 1285 - skb->data, 1204 + buffer_info->rx_buffer, 1286 1205 buffer_info->length, 1287 1206 DMA_FROM_DEVICE); 1288 1207 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { ··· 1307 1238 &hw->reg->RX_DSC_SW_P); 1308 1239 } 1309 1240 return; 1241 + } 1242 + 1243 + static int 1244 + pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter, 1245 + struct pch_gbe_rx_ring *rx_ring, int cleaned_count) 1246 + { 1247 + struct pci_dev *pdev = adapter->pdev; 1248 + struct pch_gbe_buffer *buffer_info; 1249 + unsigned int i; 1250 + unsigned int bufsz; 1251 + unsigned int size; 1252 + 1253 + bufsz = adapter->rx_buffer_len; 1254 + 1255 + size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY; 1256 + rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size, 1257 + &rx_ring->rx_buff_pool_logic, 1258 + GFP_KERNEL); 1259 + if (!rx_ring->rx_buff_pool) { 1260 + pr_err("Unable to allocate memory for the receive poll buffer\n"); 1261 + return -ENOMEM; 1262 + } 1263 + memset(rx_ring->rx_buff_pool, 0, size); 1264 + rx_ring->rx_buff_pool_size = size; 1265 + for (i = 0; i < rx_ring->count; i++) { 1266 + buffer_info = &rx_ring->buffer_info[i]; 1267 + buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i; 1268 + buffer_info->length = bufsz; 1269 + } 1270 + return 0; 1310 1271 } 1311 1272 1312 1273 /** ··· 1384 1285 struct sk_buff *skb; 1385 1286 unsigned int i; 1386 1287 unsigned int cleaned_count = 0; 1387 - bool cleaned = false; 1288 + bool cleaned = true; 1388 1289 1389 1290 pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean); 1390 1291 ··· 1395 1296 1396 1297 while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) { 1397 1298 pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status); 1398 - cleaned = true; 1399 1299 buffer_info = &tx_ring->buffer_info[i]; 1400 1300 skb = buffer_info->skb; 1401 1301 ··· 1437 1339 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i); 1438 1340 1439 1341 /* weight of a sort for tx, to avoid endless transmit cleanup */ 1440 - if (cleaned_count++ == PCH_GBE_TX_WEIGHT) 1342 + if (cleaned_count++ == PCH_GBE_TX_WEIGHT) { 1343 + cleaned = false; 1441 1344 break; 1345 + } 1442 1346 } 1443 1347 pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n", 1444 1348 cleaned_count); ··· 1480 1380 unsigned int i; 1481 1381 unsigned int cleaned_count = 0; 1482 1382 bool cleaned = false; 1483 - struct sk_buff *skb, *new_skb; 1383 + struct sk_buff *skb; 1484 1384 u8 dma_status; 1485 1385 u16 gbec_status; 1486 1386 u32 tcp_ip_status; ··· 1501 1401 rx_desc->gbec_status = DSC_INIT16; 1502 1402 buffer_info = &rx_ring->buffer_info[i]; 1503 1403 skb = buffer_info->skb; 1404 + buffer_info->skb = NULL; 1504 1405 1505 1406 /* unmap dma */ 1506 1407 dma_unmap_single(&pdev->dev, buffer_info->dma, 1507 1408 buffer_info->length, DMA_FROM_DEVICE); 1508 1409 buffer_info->mapped = false; 1509 - /* Prefetch the packet */ 1510 - prefetch(skb->data); 1511 1410 1512 1411 pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x " 1513 1412 "TCP:0x%08x] BufInf = 0x%p\n", ··· 1526 1427 pr_err("Receive CRC Error\n"); 1527 1428 } else { 1528 1429 /* get receive length */ 1529 - /* length convert[-3] */ 1530 - length = (rx_desc->rx_words_eob) - 3; 1430 + /* length convert[-3], length includes FCS length */ 1431 + length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN; 1432 + if (rx_desc->rx_words_eob & 0x02) 1433 + length = length - 4; 1434 + /* 1435 + * buffer_info->rx_buffer: [Header:14][payload] 1436 + * skb->data: [Reserve:2][Header:14][payload] 1437 + */ 1438 + memcpy(skb->data, buffer_info->rx_buffer, length); 1531 1439 1532 - /* Decide the data conversion method */ 1533 - if (!(netdev->features & NETIF_F_RXCSUM)) { 1534 - /* [Header:14][payload] */ 1535 - if (NET_IP_ALIGN) { 1536 - /* Because alignment differs, 1537 - * the new_skb is newly allocated, 1538 - * and data is copied to new_skb.*/ 1539 - new_skb = netdev_alloc_skb(netdev, 1540 - length + NET_IP_ALIGN); 1541 - if (!new_skb) { 1542 - /* dorrop error */ 1543 - pr_err("New skb allocation " 1544 - "Error\n"); 1545 - goto dorrop; 1546 - } 1547 - skb_reserve(new_skb, NET_IP_ALIGN); 1548 - memcpy(new_skb->data, skb->data, 1549 - length); 1550 - skb = new_skb; 1551 - } else { 1552 - /* DMA buffer is used as SKB as it is.*/ 1553 - buffer_info->skb = NULL; 1554 - } 1555 - } else { 1556 - /* [Header:14][padding:2][payload] */ 1557 - /* The length includes padding length */ 1558 - length = length - PCH_GBE_DMA_PADDING; 1559 - if ((length < copybreak) || 1560 - (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) { 1561 - /* Because alignment differs, 1562 - * the new_skb is newly allocated, 1563 - * and data is copied to new_skb. 1564 - * Padding data is deleted 1565 - * at the time of a copy.*/ 1566 - new_skb = netdev_alloc_skb(netdev, 1567 - length + NET_IP_ALIGN); 1568 - if (!new_skb) { 1569 - /* dorrop error */ 1570 - pr_err("New skb allocation " 1571 - "Error\n"); 1572 - goto dorrop; 1573 - } 1574 - skb_reserve(new_skb, NET_IP_ALIGN); 1575 - memcpy(new_skb->data, skb->data, 1576 - ETH_HLEN); 1577 - memcpy(&new_skb->data[ETH_HLEN], 1578 - &skb->data[ETH_HLEN + 1579 - PCH_GBE_DMA_PADDING], 1580 - length - ETH_HLEN); 1581 - skb = new_skb; 1582 - } else { 1583 - /* Padding data is deleted 1584 - * by moving header data.*/ 1585 - memmove(&skb->data[PCH_GBE_DMA_PADDING], 1586 - &skb->data[0], ETH_HLEN); 1587 - skb_reserve(skb, NET_IP_ALIGN); 1588 - buffer_info->skb = NULL; 1589 - } 1590 - } 1591 - /* The length includes FCS length */ 1592 - length = length - ETH_FCS_LEN; 1593 1440 /* update status of driver */ 1594 1441 adapter->stats.rx_bytes += length; 1595 1442 adapter->stats.rx_packets++; ··· 1554 1509 pr_debug("Receive skb->ip_summed: %d length: %d\n", 1555 1510 skb->ip_summed, length); 1556 1511 } 1557 - dorrop: 1558 1512 /* return some buffers to hardware, one at a time is too slow */ 1559 1513 if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) { 1560 1514 pch_gbe_alloc_rx_buffers(adapter, rx_ring, ··· 1758 1714 pr_err("Error: can't bring device up\n"); 1759 1715 return err; 1760 1716 } 1717 + err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count); 1718 + if (err) { 1719 + pr_err("Error: can't bring device up\n"); 1720 + return err; 1721 + } 1761 1722 pch_gbe_alloc_tx_buffers(adapter, tx_ring); 1762 1723 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count); 1763 1724 adapter->tx_queue_len = netdev->tx_queue_len; 1725 + pch_gbe_start_receive(&adapter->hw); 1764 1726 1765 1727 mod_timer(&adapter->watchdog_timer, jiffies); 1766 1728 ··· 1784 1734 void pch_gbe_down(struct pch_gbe_adapter *adapter) 1785 1735 { 1786 1736 struct net_device *netdev = adapter->netdev; 1737 + struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring; 1787 1738 1788 1739 /* signal that we're down so the interrupt handler does not 1789 1740 * reschedule our watchdog timer */ ··· 1803 1752 pch_gbe_reset(adapter); 1804 1753 pch_gbe_clean_tx_ring(adapter, adapter->tx_ring); 1805 1754 pch_gbe_clean_rx_ring(adapter, adapter->rx_ring); 1755 + 1756 + pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size, 1757 + rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic); 1758 + rx_ring->rx_buff_pool_logic = 0; 1759 + rx_ring->rx_buff_pool_size = 0; 1760 + rx_ring->rx_buff_pool = NULL; 1806 1761 } 1807 1762 1808 1763 /** ··· 2061 2004 { 2062 2005 struct pch_gbe_adapter *adapter = netdev_priv(netdev); 2063 2006 int max_frame; 2007 + unsigned long old_rx_buffer_len = adapter->rx_buffer_len; 2008 + int err; 2064 2009 2065 2010 max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 2066 2011 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || ··· 2077 2018 else if (max_frame <= PCH_GBE_FRAME_SIZE_8192) 2078 2019 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192; 2079 2020 else 2080 - adapter->rx_buffer_len = PCH_GBE_MAX_JUMBO_FRAME_SIZE; 2081 - netdev->mtu = new_mtu; 2082 - adapter->hw.mac.max_frame_size = max_frame; 2021 + adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE; 2083 2022 2084 - if (netif_running(netdev)) 2085 - pch_gbe_reinit_locked(adapter); 2086 - else 2023 + if (netif_running(netdev)) { 2024 + pch_gbe_down(adapter); 2025 + err = pch_gbe_up(adapter); 2026 + if (err) { 2027 + adapter->rx_buffer_len = old_rx_buffer_len; 2028 + pch_gbe_up(adapter); 2029 + return -ENOMEM; 2030 + } else { 2031 + netdev->mtu = new_mtu; 2032 + adapter->hw.mac.max_frame_size = max_frame; 2033 + } 2034 + } else { 2087 2035 pch_gbe_reset(adapter); 2036 + netdev->mtu = new_mtu; 2037 + adapter->hw.mac.max_frame_size = max_frame; 2038 + } 2088 2039 2089 2040 pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n", 2090 2041 max_frame, (u32) adapter->rx_buffer_len, netdev->mtu, ··· 2168 2099 { 2169 2100 struct pch_gbe_adapter *adapter = 2170 2101 container_of(napi, struct pch_gbe_adapter, napi); 2171 - struct net_device *netdev = adapter->netdev; 2172 2102 int work_done = 0; 2173 2103 bool poll_end_flag = false; 2174 2104 bool cleaned = false; 2105 + u32 int_en; 2175 2106 2176 2107 pr_debug("budget : %d\n", budget); 2177 2108 2178 - /* Keep link state information with original netdev */ 2179 - if (!netif_carrier_ok(netdev)) { 2180 - poll_end_flag = true; 2181 - } else { 2182 - cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring); 2183 - pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); 2109 + pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); 2110 + cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring); 2184 2111 2185 - if (cleaned) 2186 - work_done = budget; 2187 - /* If no Tx and not enough Rx work done, 2188 - * exit the polling mode 2189 - */ 2190 - if ((work_done < budget) || !netif_running(netdev)) 2191 - poll_end_flag = true; 2192 - } 2112 + if (!cleaned) 2113 + work_done = budget; 2114 + /* If no Tx and not enough Rx work done, 2115 + * exit the polling mode 2116 + */ 2117 + if (work_done < budget) 2118 + poll_end_flag = true; 2193 2119 2194 2120 if (poll_end_flag) { 2195 2121 napi_complete(napi); 2122 + if (adapter->rx_stop_flag) { 2123 + adapter->rx_stop_flag = false; 2124 + pch_gbe_start_receive(&adapter->hw); 2125 + } 2196 2126 pch_gbe_irq_enable(adapter); 2197 - } 2127 + } else 2128 + if (adapter->rx_stop_flag) { 2129 + adapter->rx_stop_flag = false; 2130 + pch_gbe_start_receive(&adapter->hw); 2131 + int_en = ioread32(&adapter->hw.reg->INT_EN); 2132 + iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR), 2133 + &adapter->hw.reg->INT_EN); 2134 + } 2198 2135 2199 2136 pr_debug("poll_end_flag : %d work_done : %d budget : %d\n", 2200 2137 poll_end_flag, work_done, budget); ··· 2522 2447 }, 2523 2448 {.vendor = PCI_VENDOR_ID_ROHM, 2524 2449 .device = PCI_DEVICE_ID_ROHM_ML7223_GBE, 2450 + .subvendor = PCI_ANY_ID, 2451 + .subdevice = PCI_ANY_ID, 2452 + .class = (PCI_CLASS_NETWORK_ETHERNET << 8), 2453 + .class_mask = (0xFFFF00) 2454 + }, 2455 + {.vendor = PCI_VENDOR_ID_ROHM, 2456 + .device = PCI_DEVICE_ID_ROHM_ML7831_GBE, 2525 2457 .subvendor = PCI_ANY_ID, 2526 2458 .subdevice = PCI_ANY_ID, 2527 2459 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
+2 -2
drivers/net/phy/dp83640.c
··· 589 589 prune_rx_ts(dp83640); 590 590 591 591 if (list_empty(&dp83640->rxpool)) { 592 - pr_warning("dp83640: rx timestamp pool is empty\n"); 592 + pr_debug("dp83640: rx timestamp pool is empty\n"); 593 593 goto out; 594 594 } 595 595 rxts = list_first_entry(&dp83640->rxpool, struct rxts, list); ··· 612 612 skb = skb_dequeue(&dp83640->tx_queue); 613 613 614 614 if (!skb) { 615 - pr_warning("dp83640: have timestamp but tx_queue empty\n"); 615 + pr_debug("dp83640: have timestamp but tx_queue empty\n"); 616 616 return; 617 617 } 618 618 ns = phy2txts(phy_txts);
+6 -1
drivers/net/ppp_generic.c
··· 1465 1465 continue; 1466 1466 } 1467 1467 1468 - mtu = pch->chan->mtu - hdrlen; 1468 + /* 1469 + * hdrlen includes the 2-byte PPP protocol field, but the 1470 + * MTU counts only the payload excluding the protocol field. 1471 + * (RFC1661 Section 2) 1472 + */ 1473 + mtu = pch->chan->mtu - (hdrlen - 2); 1469 1474 if (mtu < 4) 1470 1475 mtu = 4; 1471 1476 if (flen > mtu)
+1
drivers/net/pxa168_eth.c
··· 40 40 #include <linux/clk.h> 41 41 #include <linux/phy.h> 42 42 #include <linux/io.h> 43 + #include <linux/interrupt.h> 43 44 #include <linux/types.h> 44 45 #include <asm/pgtable.h> 45 46 #include <asm/system.h>
+27 -5
drivers/net/r8169.c
··· 407 407 RxOK = 0x0001, 408 408 409 409 /* RxStatusDesc */ 410 + RxBOVF = (1 << 24), 410 411 RxFOVF = (1 << 23), 411 412 RxRWT = (1 << 22), 412 413 RxRES = (1 << 21), ··· 683 682 struct mii_if_info mii; 684 683 struct rtl8169_counters counters; 685 684 u32 saved_wolopts; 685 + u32 opts1_mask; 686 686 687 687 struct rtl_fw { 688 688 const struct firmware *fw; ··· 712 710 MODULE_FIRMWARE(FIRMWARE_8168D_2); 713 711 MODULE_FIRMWARE(FIRMWARE_8168E_1); 714 712 MODULE_FIRMWARE(FIRMWARE_8168E_2); 713 + MODULE_FIRMWARE(FIRMWARE_8168E_3); 715 714 MODULE_FIRMWARE(FIRMWARE_8105E_1); 716 715 717 716 static int rtl8169_open(struct net_device *dev); ··· 3080 3077 netif_err(tp, link, dev, "PHY reset failed\n"); 3081 3078 } 3082 3079 3080 + static bool rtl_tbi_enabled(struct rtl8169_private *tp) 3081 + { 3082 + void __iomem *ioaddr = tp->mmio_addr; 3083 + 3084 + return (tp->mac_version == RTL_GIGA_MAC_VER_01) && 3085 + (RTL_R8(PHYstatus) & TBI_Enable); 3086 + } 3087 + 3083 3088 static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) 3084 3089 { 3085 3090 void __iomem *ioaddr = tp->mmio_addr; ··· 3120 3109 ADVERTISED_1000baseT_Half | 3121 3110 ADVERTISED_1000baseT_Full : 0)); 3122 3111 3123 - if (RTL_R8(PHYstatus) & TBI_Enable) 3112 + if (rtl_tbi_enabled(tp)) 3124 3113 netif_info(tp, link, dev, "TBI auto-negotiating\n"); 3125 3114 } 3126 3115 ··· 3330 3319 3331 3320 static void r810x_pll_power_down(struct rtl8169_private *tp) 3332 3321 { 3322 + void __iomem *ioaddr = tp->mmio_addr; 3323 + 3333 3324 if (__rtl8169_get_wol(tp) & WAKE_ANY) { 3334 3325 rtl_writephy(tp, 0x1f, 0x0000); 3335 3326 rtl_writephy(tp, MII_BMCR, 0x0000); 3327 + 3328 + if (tp->mac_version == RTL_GIGA_MAC_VER_29 || 3329 + tp->mac_version == RTL_GIGA_MAC_VER_30) 3330 + RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast | 3331 + AcceptMulticast | AcceptMyPhys); 3336 3332 return; 3337 3333 } 3338 3334 ··· 3435 3417 rtl_writephy(tp, MII_BMCR, 0x0000); 3436 3418 3437 3419 if (tp->mac_version == RTL_GIGA_MAC_VER_32 || 3438 - tp->mac_version == RTL_GIGA_MAC_VER_33) 3420 + tp->mac_version == RTL_GIGA_MAC_VER_33 || 3421 + tp->mac_version == RTL_GIGA_MAC_VER_34) 3439 3422 RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast | 3440 3423 AcceptMulticast | AcceptMyPhys); 3441 3424 return; ··· 3746 3727 tp->features |= rtl_try_msi(pdev, ioaddr, cfg); 3747 3728 RTL_W8(Cfg9346, Cfg9346_Lock); 3748 3729 3749 - if ((tp->mac_version <= RTL_GIGA_MAC_VER_06) && 3750 - (RTL_R8(PHYstatus) & TBI_Enable)) { 3730 + if (rtl_tbi_enabled(tp)) { 3751 3731 tp->set_speed = rtl8169_set_speed_tbi; 3752 3732 tp->get_settings = rtl8169_gset_tbi; 3753 3733 tp->phy_reset_enable = rtl8169_tbi_reset_enable; ··· 3794 3776 tp->hw_start = cfg->hw_start; 3795 3777 tp->intr_event = cfg->intr_event; 3796 3778 tp->napi_event = cfg->napi_event; 3779 + 3780 + tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ? 3781 + ~(RxBOVF | RxFOVF) : ~0; 3797 3782 3798 3783 init_timer(&tp->timer); 3799 3784 tp->timer.data = (unsigned long) dev; ··· 4009 3988 while (RTL_R8(TxPoll) & NPQ) 4010 3989 udelay(20); 4011 3990 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34) { 3991 + RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); 4012 3992 while (!(RTL_R32(TxConfig) & TXCFG_EMPTY)) 4013 3993 udelay(100); 4014 3994 } else { ··· 5336 5314 u32 status; 5337 5315 5338 5316 rmb(); 5339 - status = le32_to_cpu(desc->opts1); 5317 + status = le32_to_cpu(desc->opts1) & tp->opts1_mask; 5340 5318 5341 5319 if (status & DescOwn) 5342 5320 break;
+2 -16
drivers/net/sfc/efx.c
··· 1050 1050 { 1051 1051 struct pci_dev *pci_dev = efx->pci_dev; 1052 1052 dma_addr_t dma_mask = efx->type->max_dma_mask; 1053 - bool use_wc; 1054 1053 int rc; 1055 1054 1056 1055 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); ··· 1100 1101 rc = -EIO; 1101 1102 goto fail3; 1102 1103 } 1103 - 1104 - /* bug22643: If SR-IOV is enabled then tx push over a write combined 1105 - * mapping is unsafe. We need to disable write combining in this case. 1106 - * MSI is unsupported when SR-IOV is enabled, and the firmware will 1107 - * have removed the MSI capability. So write combining is safe if 1108 - * there is an MSI capability. 1109 - */ 1110 - use_wc = (!EFX_WORKAROUND_22643(efx) || 1111 - pci_find_capability(pci_dev, PCI_CAP_ID_MSI)); 1112 - if (use_wc) 1113 - efx->membase = ioremap_wc(efx->membase_phys, 1114 - efx->type->mem_map_size); 1115 - else 1116 - efx->membase = ioremap_nocache(efx->membase_phys, 1117 - efx->type->mem_map_size); 1104 + efx->membase = ioremap_nocache(efx->membase_phys, 1105 + efx->type->mem_map_size); 1118 1106 if (!efx->membase) { 1119 1107 netif_err(efx, probe, efx->net_dev, 1120 1108 "could not map memory BAR at %llx+%x\n",
-6
drivers/net/sfc/io.h
··· 103 103 _efx_writed(efx, value->u32[2], reg + 8); 104 104 _efx_writed(efx, value->u32[3], reg + 12); 105 105 #endif 106 - wmb(); 107 106 mmiowb(); 108 107 spin_unlock_irqrestore(&efx->biu_lock, flags); 109 108 } ··· 125 126 __raw_writel((__force u32)value->u32[0], membase + addr); 126 127 __raw_writel((__force u32)value->u32[1], membase + addr + 4); 127 128 #endif 128 - wmb(); 129 129 mmiowb(); 130 130 spin_unlock_irqrestore(&efx->biu_lock, flags); 131 131 } ··· 139 141 140 142 /* No lock required */ 141 143 _efx_writed(efx, value->u32[0], reg); 142 - wmb(); 143 144 } 144 145 145 146 /* Read a 128-bit CSR, locking as appropriate. */ ··· 149 152 150 153 spin_lock_irqsave(&efx->biu_lock, flags); 151 154 value->u32[0] = _efx_readd(efx, reg + 0); 152 - rmb(); 153 155 value->u32[1] = _efx_readd(efx, reg + 4); 154 156 value->u32[2] = _efx_readd(efx, reg + 8); 155 157 value->u32[3] = _efx_readd(efx, reg + 12); ··· 171 175 value->u64[0] = (__force __le64)__raw_readq(membase + addr); 172 176 #else 173 177 value->u32[0] = (__force __le32)__raw_readl(membase + addr); 174 - rmb(); 175 178 value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); 176 179 #endif 177 180 spin_unlock_irqrestore(&efx->biu_lock, flags); ··· 244 249 _efx_writed(efx, value->u32[2], reg + 8); 245 250 _efx_writed(efx, value->u32[3], reg + 12); 246 251 #endif 247 - wmb(); 248 252 } 249 253 #define efx_writeo_page(efx, value, reg, page) \ 250 254 _efx_writeo_page(efx, value, \
+17 -29
drivers/net/sfc/mcdi.c
··· 50 50 return &nic_data->mcdi; 51 51 } 52 52 53 - static inline void 54 - efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg) 55 - { 56 - struct siena_nic_data *nic_data = efx->nic_data; 57 - value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg); 58 - } 59 - 60 - static inline void 61 - efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg) 62 - { 63 - struct siena_nic_data *nic_data = efx->nic_data; 64 - __raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg); 65 - } 66 - 67 53 void efx_mcdi_init(struct efx_nic *efx) 68 54 { 69 55 struct efx_mcdi_iface *mcdi; ··· 70 84 const u8 *inbuf, size_t inlen) 71 85 { 72 86 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 73 - unsigned pdu = MCDI_PDU(efx); 74 - unsigned doorbell = MCDI_DOORBELL(efx); 87 + unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 88 + unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); 75 89 unsigned int i; 76 90 efx_dword_t hdr; 77 91 u32 xflags, seqno; ··· 92 106 MCDI_HEADER_SEQ, seqno, 93 107 MCDI_HEADER_XFLAGS, xflags); 94 108 95 - efx_mcdi_writed(efx, &hdr, pdu); 109 + efx_writed(efx, &hdr, pdu); 96 110 97 111 for (i = 0; i < inlen; i += 4) 98 - efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i), 99 - pdu + 4 + i); 112 + _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i); 113 + 114 + /* Ensure the payload is written out before the header */ 115 + wmb(); 100 116 101 117 /* ring the doorbell with a distinctive value */ 102 - EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc); 103 - efx_mcdi_writed(efx, &hdr, doorbell); 118 + _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); 104 119 } 105 120 106 121 static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) 107 122 { 108 123 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 109 - unsigned int pdu = MCDI_PDU(efx); 124 + unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 110 125 int i; 111 126 112 127 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); 113 128 BUG_ON(outlen & 3 || outlen >= 0x100); 114 129 115 130 for (i = 0; i < outlen; i += 4) 116 - efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i); 131 + *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i); 117 132 } 118 133 119 134 static int efx_mcdi_poll(struct efx_nic *efx) ··· 122 135 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 123 136 unsigned int time, finish; 124 137 unsigned int respseq, respcmd, error; 125 - unsigned int pdu = MCDI_PDU(efx); 138 + unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 126 139 unsigned int rc, spins; 127 140 efx_dword_t reg; 128 141 ··· 148 161 149 162 time = get_seconds(); 150 163 151 - efx_mcdi_readd(efx, &reg, pdu); 164 + rmb(); 165 + efx_readd(efx, &reg, pdu); 152 166 153 167 /* All 1's indicates that shared memory is in reset (and is 154 168 * not a valid header). Wait for it to come out reset before ··· 176 188 respseq, mcdi->seqno); 177 189 rc = EIO; 178 190 } else if (error) { 179 - efx_mcdi_readd(efx, &reg, pdu + 4); 191 + efx_readd(efx, &reg, pdu + 4); 180 192 switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { 181 193 #define TRANSLATE_ERROR(name) \ 182 194 case MC_CMD_ERR_ ## name: \ ··· 210 222 /* Test and clear MC-rebooted flag for this port/function */ 211 223 int efx_mcdi_poll_reboot(struct efx_nic *efx) 212 224 { 213 - unsigned int addr = MCDI_REBOOT_FLAG(efx); 225 + unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx); 214 226 efx_dword_t reg; 215 227 uint32_t value; 216 228 217 229 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) 218 230 return false; 219 231 220 - efx_mcdi_readd(efx, &reg, addr); 232 + efx_readd(efx, &reg, addr); 221 233 value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); 222 234 223 235 if (value == 0) 224 236 return 0; 225 237 226 238 EFX_ZERO_DWORD(reg); 227 - efx_mcdi_writed(efx, &reg, addr); 239 + efx_writed(efx, &reg, addr); 228 240 229 241 if (value == MC_STATUS_DWORD_ASSERT) 230 242 return -EINTR;
-7
drivers/net/sfc/nic.c
··· 1936 1936 1937 1937 size = min_t(size_t, table->step, 16); 1938 1938 1939 - if (table->offset >= efx->type->mem_map_size) { 1940 - /* No longer mapped; return dummy data */ 1941 - memcpy(buf, "\xde\xc0\xad\xde", 4); 1942 - buf += table->rows * size; 1943 - continue; 1944 - } 1945 - 1946 1939 for (i = 0; i < table->rows; i++) { 1947 1940 switch (table->step) { 1948 1941 case 4: /* 32-bit register or SRAM */
-2
drivers/net/sfc/nic.h
··· 143 143 /** 144 144 * struct siena_nic_data - Siena NIC state 145 145 * @mcdi: Management-Controller-to-Driver Interface 146 - * @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable. 147 146 * @wol_filter_id: Wake-on-LAN packet filter id 148 147 */ 149 148 struct siena_nic_data { 150 149 struct efx_mcdi_iface mcdi; 151 - void __iomem *mcdi_smem; 152 150 int wol_filter_id; 153 151 }; 154 152
+4 -21
drivers/net/sfc/siena.c
··· 250 250 efx_reado(efx, &reg, FR_AZ_CS_DEBUG); 251 251 efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; 252 252 253 - /* Initialise MCDI */ 254 - nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys + 255 - FR_CZ_MC_TREG_SMEM, 256 - FR_CZ_MC_TREG_SMEM_STEP * 257 - FR_CZ_MC_TREG_SMEM_ROWS); 258 - if (!nic_data->mcdi_smem) { 259 - netif_err(efx, probe, efx->net_dev, 260 - "could not map MCDI at %llx+%x\n", 261 - (unsigned long long)efx->membase_phys + 262 - FR_CZ_MC_TREG_SMEM, 263 - FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS); 264 - rc = -ENOMEM; 265 - goto fail1; 266 - } 267 253 efx_mcdi_init(efx); 268 254 269 255 /* Recover from a failed assertion before probing */ 270 256 rc = efx_mcdi_handle_assertion(efx); 271 257 if (rc) 272 - goto fail2; 258 + goto fail1; 273 259 274 260 /* Let the BMC know that the driver is now in charge of link and 275 261 * filter settings. We must do this before we reset the NIC */ ··· 310 324 fail3: 311 325 efx_mcdi_drv_attach(efx, false, NULL); 312 326 fail2: 313 - iounmap(nic_data->mcdi_smem); 314 327 fail1: 315 328 kfree(efx->nic_data); 316 329 return rc; ··· 389 404 390 405 static void siena_remove_nic(struct efx_nic *efx) 391 406 { 392 - struct siena_nic_data *nic_data = efx->nic_data; 393 - 394 407 efx_nic_free_buffer(efx, &efx->irq_status); 395 408 396 409 siena_reset_hw(efx, RESET_TYPE_ALL); ··· 398 415 efx_mcdi_drv_attach(efx, false, NULL); 399 416 400 417 /* Tear down the private nic state */ 401 - iounmap(nic_data->mcdi_smem); 402 - kfree(nic_data); 418 + kfree(efx->nic_data); 403 419 efx->nic_data = NULL; 404 420 } 405 421 ··· 638 656 .default_mac_ops = &efx_mcdi_mac_operations, 639 657 640 658 .revision = EFX_REV_SIENA_A0, 641 - .mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */ 659 + .mem_map_size = (FR_CZ_MC_TREG_SMEM + 660 + FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS), 642 661 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, 643 662 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, 644 663 .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
-2
drivers/net/sfc/workarounds.h
··· 38 38 #define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS 39 39 /* Legacy interrupt storm when interrupt fifo fills */ 40 40 #define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA 41 - /* Write combining and sriov=enabled are incompatible */ 42 - #define EFX_WORKAROUND_22643 EFX_WORKAROUND_SIENA 43 41 44 42 /* Spurious parity errors in TSORT buffers */ 45 43 #define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
-2
drivers/net/tg3.c
··· 6234 6234 } 6235 6235 } 6236 6236 6237 - #ifdef BCM_KERNEL_SUPPORTS_8021Q 6238 6237 if (vlan_tx_tag_present(skb)) { 6239 6238 base_flags |= TXD_FLAG_VLAN; 6240 6239 vlan = vlan_tx_tag_get(skb); 6241 6240 } 6242 - #endif 6243 6241 6244 6242 if (tg3_flag(tp, USE_JUMBO_BDFLAG) && 6245 6243 !mss && skb->len > VLAN_ETH_FRAME_LEN)
+5
drivers/net/usb/ipheth.c
··· 59 59 #define USB_PRODUCT_IPHONE_3G 0x1292 60 60 #define USB_PRODUCT_IPHONE_3GS 0x1294 61 61 #define USB_PRODUCT_IPHONE_4 0x1297 62 + #define USB_PRODUCT_IPHONE_4_VZW 0x129c 62 63 63 64 #define IPHETH_USBINTF_CLASS 255 64 65 #define IPHETH_USBINTF_SUBCLASS 253 ··· 97 96 IPHETH_USBINTF_PROTO) }, 98 97 { USB_DEVICE_AND_INTERFACE_INFO( 99 98 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4, 99 + IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, 100 + IPHETH_USBINTF_PROTO) }, 101 + { USB_DEVICE_AND_INTERFACE_INFO( 102 + USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW, 100 103 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, 101 104 IPHETH_USBINTF_PROTO) }, 102 105 { }
+2 -1
drivers/net/wireless/ath/ath9k/ar9002_calib.c
··· 41 41 case ADC_DC_CAL: 42 42 /* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */ 43 43 if (!IS_CHAN_B(chan) && 44 - !(IS_CHAN_2GHZ(chan) && IS_CHAN_HT20(chan))) 44 + !((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) && 45 + IS_CHAN_HT20(chan))) 45 46 supported = true; 46 47 break; 47 48 }
+1 -1
drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
··· 1514 1514 {0x00008258, 0x00000000}, 1515 1515 {0x0000825c, 0x40000000}, 1516 1516 {0x00008260, 0x00080922}, 1517 - {0x00008264, 0x9bc00010}, 1517 + {0x00008264, 0x9d400010}, 1518 1518 {0x00008268, 0xffffffff}, 1519 1519 {0x0000826c, 0x0000ffff}, 1520 1520 {0x00008270, 0x00000000},
+1 -1
drivers/net/wireless/ath/ath9k/ar9003_phy.c
··· 671 671 REG_WRITE_ARRAY(&ah->iniModesAdditional, 672 672 modesIndex, regWrites); 673 673 674 - if (AR_SREV_9300(ah)) 674 + if (AR_SREV_9330(ah)) 675 675 REG_WRITE_ARRAY(&ah->iniModesAdditional, 1, regWrites); 676 676 677 677 if (AR_SREV_9340(ah) && !ah->is_clk_25mhz)
+6
drivers/net/wireless/ath/ath9k/main.c
··· 2303 2303 mutex_lock(&sc->mutex); 2304 2304 cancel_delayed_work_sync(&sc->tx_complete_work); 2305 2305 2306 + if (ah->ah_flags & AH_UNPLUGGED) { 2307 + ath_dbg(common, ATH_DBG_ANY, "Device has been unplugged!\n"); 2308 + mutex_unlock(&sc->mutex); 2309 + return; 2310 + } 2311 + 2306 2312 if (sc->sc_flags & SC_OP_INVALID) { 2307 2313 ath_dbg(common, ATH_DBG_ANY, "Device not present\n"); 2308 2314 mutex_unlock(&sc->mutex);
+9 -1
drivers/net/wireless/ath/ath9k/recv.c
··· 205 205 206 206 static void ath_rx_edma_cleanup(struct ath_softc *sc) 207 207 { 208 + struct ath_hw *ah = sc->sc_ah; 209 + struct ath_common *common = ath9k_hw_common(ah); 208 210 struct ath_buf *bf; 209 211 210 212 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 211 213 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 212 214 213 215 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 214 - if (bf->bf_mpdu) 216 + if (bf->bf_mpdu) { 217 + dma_unmap_single(sc->dev, bf->bf_buf_addr, 218 + common->rx_bufsize, 219 + DMA_BIDIRECTIONAL); 215 220 dev_kfree_skb_any(bf->bf_mpdu); 221 + bf->bf_buf_addr = 0; 222 + bf->bf_mpdu = NULL; 223 + } 216 224 } 217 225 218 226 INIT_LIST_HEAD(&sc->rx.rxbuf);
+2 -1
drivers/net/wireless/b43/main.c
··· 1632 1632 u32 cmd, beacon0_valid, beacon1_valid; 1633 1633 1634 1634 if (!b43_is_mode(wl, NL80211_IFTYPE_AP) && 1635 - !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT)) 1635 + !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT) && 1636 + !b43_is_mode(wl, NL80211_IFTYPE_ADHOC)) 1636 1637 return; 1637 1638 1638 1639 /* This is the bottom half of the asynchronous beacon update. */
+14 -7
drivers/net/wireless/ipw2x00/ipw2100.c
··· 1903 1903 static int ipw2100_net_init(struct net_device *dev) 1904 1904 { 1905 1905 struct ipw2100_priv *priv = libipw_priv(dev); 1906 + 1907 + return ipw2100_up(priv, 1); 1908 + } 1909 + 1910 + static int ipw2100_wdev_init(struct net_device *dev) 1911 + { 1912 + struct ipw2100_priv *priv = libipw_priv(dev); 1906 1913 const struct libipw_geo *geo = libipw_get_geo(priv->ieee); 1907 1914 struct wireless_dev *wdev = &priv->ieee->wdev; 1908 - int ret; 1909 1915 int i; 1910 - 1911 - ret = ipw2100_up(priv, 1); 1912 - if (ret) 1913 - return ret; 1914 1916 1915 1917 memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN); 1916 1918 ··· 6352 6350 "Error calling register_netdev.\n"); 6353 6351 goto fail; 6354 6352 } 6353 + registered = 1; 6354 + 6355 + err = ipw2100_wdev_init(dev); 6356 + if (err) 6357 + goto fail; 6355 6358 6356 6359 mutex_lock(&priv->action_mutex); 6357 - registered = 1; 6358 6360 6359 6361 IPW_DEBUG_INFO("%s: Bound to %s\n", dev->name, pci_name(pci_dev)); 6360 6362 ··· 6395 6389 6396 6390 fail_unlock: 6397 6391 mutex_unlock(&priv->action_mutex); 6398 - 6392 + wiphy_unregister(priv->ieee->wdev.wiphy); 6393 + kfree(priv->ieee->bg_band.channels); 6399 6394 fail: 6400 6395 if (dev) { 6401 6396 if (registered)
+26 -13
drivers/net/wireless/ipw2x00/ipw2200.c
··· 11425 11425 /* Called by register_netdev() */ 11426 11426 static int ipw_net_init(struct net_device *dev) 11427 11427 { 11428 + int rc = 0; 11429 + struct ipw_priv *priv = libipw_priv(dev); 11430 + 11431 + mutex_lock(&priv->mutex); 11432 + if (ipw_up(priv)) 11433 + rc = -EIO; 11434 + mutex_unlock(&priv->mutex); 11435 + 11436 + return rc; 11437 + } 11438 + 11439 + static int ipw_wdev_init(struct net_device *dev) 11440 + { 11428 11441 int i, rc = 0; 11429 11442 struct ipw_priv *priv = libipw_priv(dev); 11430 11443 const struct libipw_geo *geo = libipw_get_geo(priv->ieee); 11431 11444 struct wireless_dev *wdev = &priv->ieee->wdev; 11432 - mutex_lock(&priv->mutex); 11433 - 11434 - if (ipw_up(priv)) { 11435 - rc = -EIO; 11436 - goto out; 11437 - } 11438 11445 11439 11446 memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN); 11440 11447 ··· 11526 11519 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev); 11527 11520 11528 11521 /* With that information in place, we can now register the wiphy... */ 11529 - if (wiphy_register(wdev->wiphy)) { 11522 + if (wiphy_register(wdev->wiphy)) 11530 11523 rc = -EIO; 11531 - goto out; 11532 - } 11533 - 11534 11524 out: 11535 - mutex_unlock(&priv->mutex); 11536 11525 return rc; 11537 11526 } 11538 11527 ··· 11835 11832 goto out_remove_sysfs; 11836 11833 } 11837 11834 11835 + err = ipw_wdev_init(net_dev); 11836 + if (err) { 11837 + IPW_ERROR("failed to register wireless device\n"); 11838 + goto out_unregister_netdev; 11839 + } 11840 + 11838 11841 #ifdef CONFIG_IPW2200_PROMISCUOUS 11839 11842 if (rtap_iface) { 11840 11843 err = ipw_prom_alloc(priv); 11841 11844 if (err) { 11842 11845 IPW_ERROR("Failed to register promiscuous network " 11843 11846 "device (error %d).\n", err); 11844 - unregister_netdev(priv->net_dev); 11845 - goto out_remove_sysfs; 11847 + wiphy_unregister(priv->ieee->wdev.wiphy); 11848 + kfree(priv->ieee->a_band.channels); 11849 + kfree(priv->ieee->bg_band.channels); 11850 + goto out_unregister_netdev; 11846 11851 } 11847 11852 } 11848 11853 #endif ··· 11862 11851 11863 11852 return 0; 11864 11853 11854 + out_unregister_netdev: 11855 + unregister_netdev(priv->net_dev); 11865 11856 out_remove_sysfs: 11866 11857 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); 11867 11858 out_release_irq:
+8 -5
drivers/net/wireless/iwlegacy/iwl-3945-rs.c
··· 822 822 823 823 out: 824 824 825 - rs_sta->last_txrate_idx = index; 826 - if (sband->band == IEEE80211_BAND_5GHZ) 827 - info->control.rates[0].idx = rs_sta->last_txrate_idx - 828 - IWL_FIRST_OFDM_RATE; 829 - else 825 + if (sband->band == IEEE80211_BAND_5GHZ) { 826 + if (WARN_ON_ONCE(index < IWL_FIRST_OFDM_RATE)) 827 + index = IWL_FIRST_OFDM_RATE; 828 + rs_sta->last_txrate_idx = index; 829 + info->control.rates[0].idx = index - IWL_FIRST_OFDM_RATE; 830 + } else { 831 + rs_sta->last_txrate_idx = index; 830 832 info->control.rates[0].idx = rs_sta->last_txrate_idx; 833 + } 831 834 832 835 IWL_DEBUG_RATE(priv, "leave: %d\n", index); 833 836 }
+2 -2
drivers/net/wireless/iwlegacy/iwl-core.c
··· 937 937 &priv->contexts[IWL_RXON_CTX_BSS]); 938 938 #endif 939 939 940 - wake_up_interruptible(&priv->wait_command_queue); 940 + wake_up(&priv->wait_command_queue); 941 941 942 942 /* Keep the restart process from trying to send host 943 943 * commands by clearing the INIT status bit */ ··· 1746 1746 1747 1747 /* Set the FW error flag -- cleared on iwl_down */ 1748 1748 set_bit(STATUS_FW_ERROR, &priv->status); 1749 - wake_up_interruptible(&priv->wait_command_queue); 1749 + wake_up(&priv->wait_command_queue); 1750 1750 /* 1751 1751 * Keep the restart process from trying to send host 1752 1752 * commands by clearing the INIT status bit
+1 -1
drivers/net/wireless/iwlegacy/iwl-hcmd.c
··· 167 167 goto out; 168 168 } 169 169 170 - ret = wait_event_interruptible_timeout(priv->wait_command_queue, 170 + ret = wait_event_timeout(priv->wait_command_queue, 171 171 !test_bit(STATUS_HCMD_ACTIVE, &priv->status), 172 172 HOST_COMPLETE_TIMEOUT); 173 173 if (!ret) {
+3 -1
drivers/net/wireless/iwlegacy/iwl-tx.c
··· 625 625 cmd = txq->cmd[cmd_index]; 626 626 meta = &txq->meta[cmd_index]; 627 627 628 + txq->time_stamp = jiffies; 629 + 628 630 pci_unmap_single(priv->pci_dev, 629 631 dma_unmap_addr(meta, mapping), 630 632 dma_unmap_len(meta, len), ··· 647 645 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 648 646 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n", 649 647 iwl_legacy_get_cmd_string(cmd->hdr.cmd)); 650 - wake_up_interruptible(&priv->wait_command_queue); 648 + wake_up(&priv->wait_command_queue); 651 649 } 652 650 653 651 /* Mark as unmapped */
+4 -4
drivers/net/wireless/iwlegacy/iwl3945-base.c
··· 841 841 wiphy_rfkill_set_hw_state(priv->hw->wiphy, 842 842 test_bit(STATUS_RF_KILL_HW, &priv->status)); 843 843 else 844 - wake_up_interruptible(&priv->wait_command_queue); 844 + wake_up(&priv->wait_command_queue); 845 845 } 846 846 847 847 /** ··· 2269 2269 iwl3945_reg_txpower_periodic(priv); 2270 2270 2271 2271 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); 2272 - wake_up_interruptible(&priv->wait_command_queue); 2272 + wake_up(&priv->wait_command_queue); 2273 2273 2274 2274 return; 2275 2275 ··· 2300 2300 iwl_legacy_clear_driver_stations(priv); 2301 2301 2302 2302 /* Unblock any waiting calls */ 2303 - wake_up_interruptible_all(&priv->wait_command_queue); 2303 + wake_up_all(&priv->wait_command_queue); 2304 2304 2305 2305 /* Wipe out the EXIT_PENDING status bit if we are not actually 2306 2306 * exiting the module */ ··· 2853 2853 2854 2854 /* Wait for START_ALIVE from ucode. Otherwise callbacks from 2855 2855 * mac80211 will not be run successfully. */ 2856 - ret = wait_event_interruptible_timeout(priv->wait_command_queue, 2856 + ret = wait_event_timeout(priv->wait_command_queue, 2857 2857 test_bit(STATUS_READY, &priv->status), 2858 2858 UCODE_READY_TIMEOUT); 2859 2859 if (!ret) {
+5 -5
drivers/net/wireless/iwlegacy/iwl4965-base.c
··· 576 576 wiphy_rfkill_set_hw_state(priv->hw->wiphy, 577 577 test_bit(STATUS_RF_KILL_HW, &priv->status)); 578 578 else 579 - wake_up_interruptible(&priv->wait_command_queue); 579 + wake_up(&priv->wait_command_queue); 580 580 } 581 581 582 582 /** ··· 926 926 handled |= CSR_INT_BIT_FH_TX; 927 927 /* Wake up uCode load routine, now that load is complete */ 928 928 priv->ucode_write_complete = 1; 929 - wake_up_interruptible(&priv->wait_command_queue); 929 + wake_up(&priv->wait_command_queue); 930 930 } 931 931 932 932 if (inta & ~handled) { ··· 1795 1795 iwl4965_rf_kill_ct_config(priv); 1796 1796 1797 1797 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); 1798 - wake_up_interruptible(&priv->wait_command_queue); 1798 + wake_up(&priv->wait_command_queue); 1799 1799 1800 1800 iwl_legacy_power_update_mode(priv, true); 1801 1801 IWL_DEBUG_INFO(priv, "Updated power mode\n"); ··· 1828 1828 iwl_legacy_clear_driver_stations(priv); 1829 1829 1830 1830 /* Unblock any waiting calls */ 1831 - wake_up_interruptible_all(&priv->wait_command_queue); 1831 + wake_up_all(&priv->wait_command_queue); 1832 1832 1833 1833 /* Wipe out the EXIT_PENDING status bit if we are not actually 1834 1834 * exiting the module */ ··· 2266 2266 2267 2267 /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from 2268 2268 * mac80211 will not be run successfully. */ 2269 - ret = wait_event_interruptible_timeout(priv->wait_command_queue, 2269 + ret = wait_event_timeout(priv->wait_command_queue, 2270 2270 test_bit(STATUS_READY, &priv->status), 2271 2271 UCODE_READY_TIMEOUT); 2272 2272 if (!ret) {
+1 -1
drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
··· 167 167 168 168 memset(&cmd, 0, sizeof(cmd)); 169 169 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); 170 - memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(offset_calib)); 170 + memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(*offset_calib)); 171 171 if (!(cmd.radio_sensor_offset)) 172 172 cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET; 173 173
+5
drivers/net/wireless/iwlwifi/iwl-agn.c
··· 2140 2140 IEEE80211_HW_SPECTRUM_MGMT | 2141 2141 IEEE80211_HW_REPORTS_TX_ACK_STATUS; 2142 2142 2143 + /* 2144 + * Including the following line will crash some AP's. This 2145 + * workaround removes the stimulus which causes the crash until 2146 + * the AP software can be fixed. 2143 2147 hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF; 2148 + */ 2144 2149 2145 2150 hw->flags |= IEEE80211_HW_SUPPORTS_PS | 2146 2151 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
+16 -14
drivers/net/wireless/iwlwifi/iwl-scan.c
··· 405 405 406 406 mutex_lock(&priv->mutex); 407 407 408 - if (test_bit(STATUS_SCANNING, &priv->status) && 409 - priv->scan_type != IWL_SCAN_NORMAL) { 410 - IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); 411 - ret = -EAGAIN; 412 - goto out_unlock; 413 - } 414 - 415 - /* mac80211 will only ask for one band at a time */ 416 - priv->scan_request = req; 417 - priv->scan_vif = vif; 418 - 419 408 /* 420 409 * If an internal scan is in progress, just set 421 410 * up the scan_request as per above. 422 411 */ 423 412 if (priv->scan_type != IWL_SCAN_NORMAL) { 424 - IWL_DEBUG_SCAN(priv, "SCAN request during internal scan\n"); 413 + IWL_DEBUG_SCAN(priv, 414 + "SCAN request during internal scan - defer\n"); 415 + priv->scan_request = req; 416 + priv->scan_vif = vif; 425 417 ret = 0; 426 - } else 418 + } else { 419 + priv->scan_request = req; 420 + priv->scan_vif = vif; 421 + /* 422 + * mac80211 will only ask for one band at a time 423 + * so using channels[0] here is ok 424 + */ 427 425 ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL, 428 426 req->channels[0]->band); 427 + if (ret) { 428 + priv->scan_request = NULL; 429 + priv->scan_vif = NULL; 430 + } 431 + } 429 432 430 433 IWL_DEBUG_MAC80211(priv, "leave\n"); 431 434 432 - out_unlock: 433 435 mutex_unlock(&priv->mutex); 434 436 435 437 return ret;
+2
drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
··· 771 771 cmd = txq->cmd[cmd_index]; 772 772 meta = &txq->meta[cmd_index]; 773 773 774 + txq->time_stamp = jiffies; 775 + 774 776 iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); 775 777 776 778 /* Input error checking is done when commands are added to queue. */
+26 -21
drivers/net/wireless/rt2x00/rt2800lib.c
··· 3697 3697 rt2800_regbusy_read(rt2x00dev, EFUSE_CTRL, EFUSE_CTRL_KICK, &reg); 3698 3698 3699 3699 /* Apparently the data is read from end to start */ 3700 - rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, 3701 - (u32 *)&rt2x00dev->eeprom[i]); 3702 - rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, 3703 - (u32 *)&rt2x00dev->eeprom[i + 2]); 3704 - rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, 3705 - (u32 *)&rt2x00dev->eeprom[i + 4]); 3706 - rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0, 3707 - (u32 *)&rt2x00dev->eeprom[i + 6]); 3700 + rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, &reg); 3701 + /* The returned value is in CPU order, but eeprom is le */ 3702 + rt2x00dev->eeprom[i] = cpu_to_le32(reg); 3703 + rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, &reg); 3704 + *(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg); 3705 + rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, &reg); 3706 + *(u32 *)&rt2x00dev->eeprom[i + 4] = cpu_to_le32(reg); 3707 + rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0, &reg); 3708 + *(u32 *)&rt2x00dev->eeprom[i + 6] = cpu_to_le32(reg); 3708 3709 3709 3710 mutex_unlock(&rt2x00dev->csr_mutex); 3710 3711 } ··· 3871 3870 return -ENODEV; 3872 3871 } 3873 3872 3874 - if (!rt2x00_rf(rt2x00dev, RF2820) && 3875 - !rt2x00_rf(rt2x00dev, RF2850) && 3876 - !rt2x00_rf(rt2x00dev, RF2720) && 3877 - !rt2x00_rf(rt2x00dev, RF2750) && 3878 - !rt2x00_rf(rt2x00dev, RF3020) && 3879 - !rt2x00_rf(rt2x00dev, RF2020) && 3880 - !rt2x00_rf(rt2x00dev, RF3021) && 3881 - !rt2x00_rf(rt2x00dev, RF3022) && 3882 - !rt2x00_rf(rt2x00dev, RF3052) && 3883 - !rt2x00_rf(rt2x00dev, RF3320) && 3884 - !rt2x00_rf(rt2x00dev, RF5370) && 3885 - !rt2x00_rf(rt2x00dev, RF5390)) { 3886 - ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); 3873 + switch (rt2x00dev->chip.rf) { 3874 + case RF2820: 3875 + case RF2850: 3876 + case RF2720: 3877 + case RF2750: 3878 + case RF3020: 3879 + case RF2020: 3880 + case RF3021: 3881 + case RF3022: 3882 + case RF3052: 3883 + case RF3320: 3884 + case RF5370: 3885 + case RF5390: 3886 + break; 3887 + default: 3888 + ERROR(rt2x00dev, "Invalid RF chipset 0x%x detected.\n", 3889 + rt2x00dev->chip.rf); 3887 3890 return -ENODEV; 3888 3891 } 3889 3892
+8
drivers/net/wireless/rtlwifi/core.c
··· 610 610 611 611 mac->link_state = MAC80211_NOLINK; 612 612 memset(mac->bssid, 0, 6); 613 + 614 + /* reset sec info */ 615 + rtl_cam_reset_sec_info(hw); 616 + 617 + rtl_cam_reset_all_entry(hw); 613 618 mac->vendor = PEER_UNKNOWN; 614 619 615 620 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, ··· 1068 1063 *or clear all entry here. 1069 1064 */ 1070 1065 rtl_cam_delete_one_entry(hw, mac_addr, key_idx); 1066 + 1067 + rtl_cam_reset_sec_info(hw); 1068 + 1071 1069 break; 1072 1070 default: 1073 1071 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+6 -5
drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
··· 549 549 (tcb_desc->rts_use_shortpreamble ? 1 : 0) 550 550 : (tcb_desc->rts_use_shortgi ? 1 : 0))); 551 551 if (mac->bw_40) { 552 - if (tcb_desc->packet_bw) { 552 + if (rate_flag & IEEE80211_TX_RC_DUP_DATA) { 553 553 SET_TX_DESC_DATA_BW(txdesc, 1); 554 554 SET_TX_DESC_DATA_SC(txdesc, 3); 555 + } else if(rate_flag & IEEE80211_TX_RC_40_MHZ_WIDTH){ 556 + SET_TX_DESC_DATA_BW(txdesc, 1); 557 + SET_TX_DESC_DATA_SC(txdesc, mac->cur_40_prime_sc); 555 558 } else { 556 559 SET_TX_DESC_DATA_BW(txdesc, 0); 557 - if (rate_flag & IEEE80211_TX_RC_DUP_DATA) 558 - SET_TX_DESC_DATA_SC(txdesc, 559 - mac->cur_40_prime_sc); 560 - } 560 + SET_TX_DESC_DATA_SC(txdesc, 0); 561 + } 561 562 } else { 562 563 SET_TX_DESC_DATA_BW(txdesc, 0); 563 564 SET_TX_DESC_DATA_SC(txdesc, 0);
+1
drivers/net/wireless/rtlwifi/usb.c
··· 863 863 u8 tid = 0; 864 864 u16 seq_number = 0; 865 865 866 + memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); 866 867 if (ieee80211_is_auth(fc)) { 867 868 RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n")); 868 869 rtl_ips_nic_on(hw);
+2 -2
drivers/net/xen-netback/interface.c
··· 327 327 xenvif_get(vif); 328 328 329 329 rtnl_lock(); 330 - if (netif_running(vif->dev)) 331 - xenvif_up(vif); 332 330 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) 333 331 dev_set_mtu(vif->dev, ETH_DATA_LEN); 334 332 netdev_update_features(vif->dev); 335 333 netif_carrier_on(vif->dev); 334 + if (netif_running(vif->dev)) 335 + xenvif_up(vif); 336 336 rtnl_unlock(); 337 337 338 338 return 0;
+5 -1
drivers/pci/pci.c
··· 77 77 unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; 78 78 unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; 79 79 80 - enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE; 80 + enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF; 81 81 82 82 /* 83 83 * The default CLS is used if arch didn't set CLS explicitly and not ··· 3568 3568 pci_hotplug_io_size = memparse(str + 9, &str); 3569 3569 } else if (!strncmp(str, "hpmemsize=", 10)) { 3570 3570 pci_hotplug_mem_size = memparse(str + 10, &str); 3571 + } else if (!strncmp(str, "pcie_bus_tune_off", 17)) { 3572 + pcie_bus_config = PCIE_BUS_TUNE_OFF; 3571 3573 } else if (!strncmp(str, "pcie_bus_safe", 13)) { 3572 3574 pcie_bus_config = PCIE_BUS_SAFE; 3573 3575 } else if (!strncmp(str, "pcie_bus_perf", 13)) { 3574 3576 pcie_bus_config = PCIE_BUS_PERFORMANCE; 3577 + } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) { 3578 + pcie_bus_config = PCIE_BUS_PEER2PEER; 3575 3579 } else { 3576 3580 printk(KERN_ERR "PCI: Unknown option `%s'\n", 3577 3581 str);
+15 -2
drivers/pci/probe.c
··· 1351 1351 * will occur as normal. 1352 1352 */ 1353 1353 if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) || 1354 - dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT)) 1354 + (dev->bus->self && 1355 + dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT))) 1355 1356 *smpss = 0; 1356 1357 1357 1358 if (*smpss > dev->pcie_mpss) ··· 1458 1457 */ 1459 1458 void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss) 1460 1459 { 1461 - u8 smpss = mpss; 1460 + u8 smpss; 1462 1461 1463 1462 if (!pci_is_pcie(bus->self)) 1464 1463 return; 1465 1464 1465 + if (pcie_bus_config == PCIE_BUS_TUNE_OFF) 1466 + return; 1467 + 1468 + /* FIXME - Peer to peer DMA is possible, though the endpoint would need 1469 + * to be aware to the MPS of the destination. To work around this, 1470 + * simply force the MPS of the entire system to the smallest possible. 1471 + */ 1472 + if (pcie_bus_config == PCIE_BUS_PEER2PEER) 1473 + smpss = 0; 1474 + 1466 1475 if (pcie_bus_config == PCIE_BUS_SAFE) { 1476 + smpss = mpss; 1477 + 1467 1478 pcie_find_smpss(bus->self, &smpss); 1468 1479 pci_walk_bus(bus, pcie_find_smpss, &smpss); 1469 1480 }
+1
drivers/rtc/rtc-imxdi.c
··· 35 35 #include <linux/module.h> 36 36 #include <linux/platform_device.h> 37 37 #include <linux/rtc.h> 38 + #include <linux/sched.h> 38 39 #include <linux/workqueue.h> 39 40 40 41 /* DryIce Register Definitions */
+26
drivers/rtc/rtc-s3c.c
··· 51 51 52 52 static DEFINE_SPINLOCK(s3c_rtc_pie_lock); 53 53 54 + static void s3c_rtc_alarm_clk_enable(bool enable) 55 + { 56 + static DEFINE_SPINLOCK(s3c_rtc_alarm_clk_lock); 57 + static bool alarm_clk_enabled; 58 + unsigned long irq_flags; 59 + 60 + spin_lock_irqsave(&s3c_rtc_alarm_clk_lock, irq_flags); 61 + if (enable) { 62 + if (!alarm_clk_enabled) { 63 + clk_enable(rtc_clk); 64 + alarm_clk_enabled = true; 65 + } 66 + } else { 67 + if (alarm_clk_enabled) { 68 + clk_disable(rtc_clk); 69 + alarm_clk_enabled = false; 70 + } 71 + } 72 + spin_unlock_irqrestore(&s3c_rtc_alarm_clk_lock, irq_flags); 73 + } 74 + 54 75 /* IRQ Handlers */ 55 76 56 77 static irqreturn_t s3c_rtc_alarmirq(int irq, void *id) ··· 85 64 writeb(S3C2410_INTP_ALM, s3c_rtc_base + S3C2410_INTP); 86 65 87 66 clk_disable(rtc_clk); 67 + 68 + s3c_rtc_alarm_clk_enable(false); 69 + 88 70 return IRQ_HANDLED; 89 71 } 90 72 ··· 120 96 121 97 writeb(tmp, s3c_rtc_base + S3C2410_RTCALM); 122 98 clk_disable(rtc_clk); 99 + 100 + s3c_rtc_alarm_clk_enable(enabled); 123 101 124 102 return 0; 125 103 }
+6 -2
drivers/s390/cio/cio.c
··· 654 654 static int console_subchannel_in_use; 655 655 656 656 /* 657 - * Use tpi to get a pending interrupt, call the interrupt handler and 658 - * return a pointer to the subchannel structure. 657 + * Use cio_tpi to get a pending interrupt and call the interrupt handler. 658 + * Return non-zero if an interrupt was processed, zero otherwise. 659 659 */ 660 660 static int cio_tpi(void) 661 661 { ··· 667 667 tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; 668 668 if (tpi(NULL) != 1) 669 669 return 0; 670 + if (tpi_info->adapter_IO) { 671 + do_adapter_IO(tpi_info->isc); 672 + return 1; 673 + } 670 674 irb = (struct irb *)&S390_lowcore.irb; 671 675 /* Store interrupt response block to lowcore. */ 672 676 if (tsch(tpi_info->schid, irb) != 0)
+2
drivers/scsi/3w-9xxx.c
··· 1800 1800 switch (retval) { 1801 1801 case SCSI_MLQUEUE_HOST_BUSY: 1802 1802 twa_free_request_id(tw_dev, request_id); 1803 + twa_unmap_scsi_data(tw_dev, request_id); 1803 1804 break; 1804 1805 case 1: 1805 1806 tw_dev->state[request_id] = TW_S_COMPLETED; 1806 1807 twa_free_request_id(tw_dev, request_id); 1808 + twa_unmap_scsi_data(tw_dev, request_id); 1807 1809 SCpnt->result = (DID_ERROR << 16); 1808 1810 done(SCpnt); 1809 1811 retval = 0;
+1
drivers/scsi/Kconfig
··· 837 837 # (temporary): known alpha quality driver 838 838 depends on EXPERIMENTAL 839 839 select SCSI_SAS_LIBSAS 840 + select SCSI_SAS_HOST_SMP 840 841 ---help--- 841 842 This driver supports the 6Gb/s SAS capabilities of the storage 842 843 control unit found in the Intel(R) C600 series chipset.
+1 -1
drivers/scsi/Makefile
··· 88 88 obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o 89 89 obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o 90 90 obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/ 91 - obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx/ 91 + obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/ 92 92 obj-$(CONFIG_SCSI_LPFC) += lpfc/ 93 93 obj-$(CONFIG_SCSI_BFA_FC) += bfa/ 94 94 obj-$(CONFIG_SCSI_PAS16) += pas16.o
+2
drivers/scsi/aacraid/commsup.c
··· 1283 1283 kfree(aac->queues); 1284 1284 aac->queues = NULL; 1285 1285 free_irq(aac->pdev->irq, aac); 1286 + if (aac->msi) 1287 + pci_disable_msi(aac->pdev); 1286 1288 kfree(aac->fsa_dev); 1287 1289 aac->fsa_dev = NULL; 1288 1290 quirks = aac_get_driver_ident(index)->quirks;
+1 -1
drivers/scsi/bnx2i/bnx2i_hwi.c
··· 563 563 nopout_wqe->itt = ((u16)task->itt | 564 564 (ISCSI_TASK_TYPE_MPATH << 565 565 ISCSI_TMF_REQUEST_TYPE_SHIFT)); 566 - nopout_wqe->ttt = nopout_hdr->ttt; 566 + nopout_wqe->ttt = be32_to_cpu(nopout_hdr->ttt); 567 567 nopout_wqe->flags = 0; 568 568 if (!unsol) 569 569 nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
+1 -1
drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
··· 913 913 struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev; 914 914 915 915 if (csk->l2t) { 916 - l2t_release(L2DATA(t3dev), csk->l2t); 916 + l2t_release(t3dev, csk->l2t); 917 917 csk->l2t = NULL; 918 918 cxgbi_sock_put(csk); 919 919 }
+8 -5
drivers/scsi/fcoe/fcoe.c
··· 432 432 u8 flogi_maddr[ETH_ALEN]; 433 433 const struct net_device_ops *ops; 434 434 435 + rtnl_lock(); 436 + 435 437 /* 436 438 * Don't listen for Ethernet packets anymore. 437 439 * synchronize_net() ensures that the packet handlers are not running ··· 462 460 FCOE_NETDEV_DBG(netdev, "Failed to disable FCoE" 463 461 " specific feature for LLD.\n"); 464 462 } 463 + 464 + rtnl_unlock(); 465 465 466 466 /* Release the self-reference taken during fcoe_interface_create() */ 467 467 fcoe_interface_put(fcoe); ··· 1955 1951 fcoe_if_destroy(port->lport); 1956 1952 1957 1953 /* Do not tear down the fcoe interface for NPIV port */ 1958 - if (!npiv) { 1959 - rtnl_lock(); 1954 + if (!npiv) 1960 1955 fcoe_interface_cleanup(fcoe); 1961 - rtnl_unlock(); 1962 - } 1963 1956 1964 1957 mutex_unlock(&fcoe_config_mutex); 1965 1958 } ··· 2010 2009 printk(KERN_ERR "fcoe: Failed to create interface (%s)\n", 2011 2010 netdev->name); 2012 2011 rc = -EIO; 2012 + rtnl_unlock(); 2013 2013 fcoe_interface_cleanup(fcoe); 2014 - goto out_nodev; 2014 + goto out_nortnl; 2015 2015 } 2016 2016 2017 2017 /* Make this the "master" N_Port */ ··· 2029 2027 2030 2028 out_nodev: 2031 2029 rtnl_unlock(); 2030 + out_nortnl: 2032 2031 mutex_unlock(&fcoe_config_mutex); 2033 2032 return rc; 2034 2033 }
+37 -20
drivers/scsi/hpsa.c
··· 676 676 BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA); 677 677 removed[*nremoved] = h->dev[entry]; 678 678 (*nremoved)++; 679 + 680 + /* 681 + * New physical devices won't have target/lun assigned yet 682 + * so we need to preserve the values in the slot we are replacing. 683 + */ 684 + if (new_entry->target == -1) { 685 + new_entry->target = h->dev[entry]->target; 686 + new_entry->lun = h->dev[entry]->lun; 687 + } 688 + 679 689 h->dev[entry] = new_entry; 680 690 added[*nadded] = new_entry; 681 691 (*nadded)++; ··· 1558 1548 } 1559 1549 1560 1550 static int hpsa_update_device_info(struct ctlr_info *h, 1561 - unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device) 1551 + unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device, 1552 + unsigned char *is_OBDR_device) 1562 1553 { 1563 - #define OBDR_TAPE_INQ_SIZE 49 1554 + 1555 + #define OBDR_SIG_OFFSET 43 1556 + #define OBDR_TAPE_SIG "$DR-10" 1557 + #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1) 1558 + #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN) 1559 + 1564 1560 unsigned char *inq_buff; 1561 + unsigned char *obdr_sig; 1565 1562 1566 1563 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); 1567 1564 if (!inq_buff) ··· 1599 1582 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); 1600 1583 else 1601 1584 this_device->raid_level = RAID_UNKNOWN; 1585 + 1586 + if (is_OBDR_device) { 1587 + /* See if this is a One-Button-Disaster-Recovery device 1588 + * by looking for "$DR-10" at offset 43 in inquiry data. 1589 + */ 1590 + obdr_sig = &inq_buff[OBDR_SIG_OFFSET]; 1591 + *is_OBDR_device = (this_device->devtype == TYPE_ROM && 1592 + strncmp(obdr_sig, OBDR_TAPE_SIG, 1593 + OBDR_SIG_LEN) == 0); 1594 + } 1602 1595 1603 1596 kfree(inq_buff); 1604 1597 return 0; ··· 1743 1716 return 0; 1744 1717 } 1745 1718 1746 - if (hpsa_update_device_info(h, scsi3addr, this_device)) 1719 + if (hpsa_update_device_info(h, scsi3addr, this_device, NULL)) 1747 1720 return 0; 1748 1721 (*nmsa2xxx_enclosures)++; 1749 1722 hpsa_set_bus_target_lun(this_device, bus, target, 0); ··· 1835 1808 */ 1836 1809 struct ReportLUNdata *physdev_list = NULL; 1837 1810 struct ReportLUNdata *logdev_list = NULL; 1838 - unsigned char *inq_buff = NULL; 1839 1811 u32 nphysicals = 0; 1840 1812 u32 nlogicals = 0; 1841 1813 u32 ndev_allocated = 0; ··· 1850 1824 GFP_KERNEL); 1851 1825 physdev_list = kzalloc(reportlunsize, GFP_KERNEL); 1852 1826 logdev_list = kzalloc(reportlunsize, GFP_KERNEL); 1853 - inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); 1854 1827 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); 1855 1828 1856 - if (!currentsd || !physdev_list || !logdev_list || 1857 - !inq_buff || !tmpdevice) { 1829 + if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) { 1858 1830 dev_err(&h->pdev->dev, "out of memory\n"); 1859 1831 goto out; 1860 1832 } ··· 1887 1863 /* adjust our table of devices */ 1888 1864 nmsa2xxx_enclosures = 0; 1889 1865 for (i = 0; i < nphysicals + nlogicals + 1; i++) { 1890 - u8 *lunaddrbytes; 1866 + u8 *lunaddrbytes, is_OBDR = 0; 1891 1867 1892 1868 /* Figure out where the LUN ID info is coming from */ 1893 1869 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, ··· 1898 1874 continue; 1899 1875 1900 1876 /* Get device type, vendor, model, device id */ 1901 - if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice)) 1877 + if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice, 1878 + &is_OBDR)) 1902 1879 continue; /* skip it if we can't talk to it. */ 1903 1880 figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun, 1904 1881 tmpdevice); ··· 1923 1898 hpsa_set_bus_target_lun(this_device, bus, target, lun); 1924 1899 1925 1900 switch (this_device->devtype) { 1926 - case TYPE_ROM: { 1901 + case TYPE_ROM: 1927 1902 /* We don't *really* support actual CD-ROM devices, 1928 1903 * just "One Button Disaster Recovery" tape drive 1929 1904 * which temporarily pretends to be a CD-ROM drive. ··· 1931 1906 * device by checking for "$DR-10" in bytes 43-48 of 1932 1907 * the inquiry data. 1933 1908 */ 1934 - char obdr_sig[7]; 1935 - #define OBDR_TAPE_SIG "$DR-10" 1936 - strncpy(obdr_sig, &inq_buff[43], 6); 1937 - obdr_sig[6] = '\0'; 1938 - if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0) 1939 - /* Not OBDR device, ignore it. */ 1940 - break; 1941 - } 1942 - ncurrent++; 1909 + if (is_OBDR) 1910 + ncurrent++; 1943 1911 break; 1944 1912 case TYPE_DISK: 1945 1913 if (i < nphysicals) ··· 1965 1947 for (i = 0; i < ndev_allocated; i++) 1966 1948 kfree(currentsd[i]); 1967 1949 kfree(currentsd); 1968 - kfree(inq_buff); 1969 1950 kfree(physdev_list); 1970 1951 kfree(logdev_list); 1971 1952 }
+12 -1
drivers/scsi/isci/host.c
··· 531 531 break; 532 532 533 533 case SCU_COMPLETION_TYPE_EVENT: 534 + sci_controller_event_completion(ihost, ent); 535 + break; 536 + 534 537 case SCU_COMPLETION_TYPE_NOTIFY: { 535 538 event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) << 536 539 (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT); ··· 1094 1091 struct isci_request *request; 1095 1092 struct isci_request *next_request; 1096 1093 struct sas_task *task; 1094 + u16 active; 1097 1095 1098 1096 INIT_LIST_HEAD(&completed_request_list); 1099 1097 INIT_LIST_HEAD(&errored_request_list); ··· 1185 1181 } 1186 1182 } 1187 1183 1184 + /* the coalesence timeout doubles at each encoding step, so 1185 + * update it based on the ilog2 value of the outstanding requests 1186 + */ 1187 + active = isci_tci_active(ihost); 1188 + writel(SMU_ICC_GEN_VAL(NUMBER, active) | 1189 + SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)), 1190 + &ihost->smu_registers->interrupt_coalesce_control); 1188 1191 } 1189 1192 1190 1193 /** ··· 1482 1471 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); 1483 1472 1484 1473 /* set the default interrupt coalescence number and timeout value. */ 1485 - sci_controller_set_interrupt_coalescence(ihost, 0x10, 250); 1474 + sci_controller_set_interrupt_coalescence(ihost, 0, 0); 1486 1475 } 1487 1476 1488 1477 static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
+3
drivers/scsi/isci/host.h
··· 369 369 #define ISCI_TAG_SEQ(tag) (((tag) >> 12) & (SCI_MAX_SEQ-1)) 370 370 #define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1)) 371 371 372 + /* interrupt coalescing baseline: 9 == 3 to 5us interrupt delay per command */ 373 + #define ISCI_COALESCE_BASE 9 374 + 372 375 /* expander attached sata devices require 3 rnc slots */ 373 376 static inline int sci_remote_device_node_count(struct isci_remote_device *idev) 374 377 {
+28 -19
drivers/scsi/isci/init.c
··· 59 59 #include <linux/firmware.h> 60 60 #include <linux/efi.h> 61 61 #include <asm/string.h> 62 + #include <scsi/scsi_host.h> 62 63 #include "isci.h" 63 64 #include "task.h" 64 65 #include "probe_roms.h" 66 + 67 + #define MAJ 1 68 + #define MIN 0 69 + #define BUILD 0 70 + #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ 71 + __stringify(BUILD) 72 + 73 + MODULE_VERSION(DRV_VERSION); 65 74 66 75 static struct scsi_transport_template *isci_transport_template; 67 76 ··· 122 113 module_param(max_concurr_spinup, byte, 0); 123 114 MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup"); 124 115 116 + static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf) 117 + { 118 + struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev); 119 + struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); 120 + struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha); 121 + 122 + return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id); 123 + } 124 + 125 + static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL); 126 + 127 + struct device_attribute *isci_host_attrs[] = { 128 + &dev_attr_isci_id, 129 + NULL 130 + }; 131 + 125 132 static struct scsi_host_template isci_sht = { 126 133 127 134 .module = THIS_MODULE, ··· 163 138 .slave_alloc = sas_slave_alloc, 164 139 .target_destroy = sas_target_destroy, 165 140 .ioctl = sas_ioctl, 141 + .shost_attrs = isci_host_attrs, 166 142 }; 167 143 168 144 static struct sas_domain_function_template isci_transport_ops = { ··· 258 232 return 0; 259 233 } 260 234 261 - static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf) 262 - { 263 - struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev); 264 - struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); 265 - struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha); 266 - 267 - return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id); 268 - } 269 - 270 - static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL); 271 - 272 235 static void isci_unregister(struct isci_host *isci_host) 273 236 { 274 237 struct Scsi_Host *shost; ··· 266 251 return; 267 252 268 253 shost = isci_host->shost; 269 - device_remove_file(&shost->shost_dev, &dev_attr_isci_id); 270 254 271 255 sas_unregister_ha(&isci_host->sas_ha); 272 256 ··· 429 415 if (err) 430 416 goto err_shost_remove; 431 417 432 - err = device_create_file(&shost->shost_dev, &dev_attr_isci_id); 433 - if (err) 434 - goto err_unregister_ha; 435 - 436 418 return isci_host; 437 419 438 - err_unregister_ha: 439 - sas_unregister_ha(&(isci_host->sas_ha)); 440 420 err_shost_remove: 441 421 scsi_remove_host(shost); 442 422 err_shost: ··· 548 540 { 549 541 int err; 550 542 551 - pr_info("%s: Intel(R) C600 SAS Controller Driver\n", DRV_NAME); 543 + pr_info("%s: Intel(R) C600 SAS Controller Driver - version %s\n", 544 + DRV_NAME, DRV_VERSION); 552 545 553 546 isci_transport_template = sas_domain_attach_transport(&isci_transport_ops); 554 547 if (!isci_transport_template)
+13
drivers/scsi/isci/phy.c
··· 104 104 u32 parity_count = 0; 105 105 u32 llctl, link_rate; 106 106 u32 clksm_value = 0; 107 + u32 sp_timeouts = 0; 107 108 108 109 iphy->link_layer_registers = reg; 109 110 ··· 211 210 } 212 211 llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate); 213 212 writel(llctl, &iphy->link_layer_registers->link_layer_control); 213 + 214 + sp_timeouts = readl(&iphy->link_layer_registers->sas_phy_timeouts); 215 + 216 + /* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */ 217 + sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF); 218 + 219 + /* Set RATE_CHANGE timeout value to 0x3B (59us). This ensures SCU can 220 + * lock with 3Gb drive when SCU max rate is set to 1.5Gb. 221 + */ 222 + sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B); 223 + 224 + writel(sp_timeouts, &iphy->link_layer_registers->sas_phy_timeouts); 214 225 215 226 if (is_a2(ihost->pdev)) { 216 227 /* Program the max ARB time for the PHY to 700us so we inter-operate with
+12
drivers/scsi/isci/registers.h
··· 1299 1299 #define SCU_AFE_XCVRCR_OFFSET 0x00DC 1300 1300 #define SCU_AFE_LUTCR_OFFSET 0x00E0 1301 1301 1302 + #define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_SHIFT (0UL) 1303 + #define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_MASK (0x000000FFUL) 1304 + #define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_SHIFT (8UL) 1305 + #define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_MASK (0x0000FF00UL) 1306 + #define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_SHIFT (16UL) 1307 + #define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_MASK (0x00FF0000UL) 1308 + #define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_SHIFT (24UL) 1309 + #define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_MASK (0xFF000000UL) 1310 + 1311 + #define SCU_SAS_PHYTOV_GEN_VAL(name, value) \ 1312 + SCU_GEN_VALUE(SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_##name, value) 1313 + 1302 1314 #define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_SHIFT (0) 1303 1315 #define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_MASK (0x00000003) 1304 1316 #define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1 (0)
+16 -14
drivers/scsi/isci/request.c
··· 732 732 sci_change_state(&ireq->sm, SCI_REQ_ABORTING); 733 733 return SCI_SUCCESS; 734 734 case SCI_REQ_TASK_WAIT_TC_RESP: 735 + /* The task frame was already confirmed to have been 736 + * sent by the SCU HW. Since the state machine is 737 + * now only waiting for the task response itself, 738 + * abort the request and complete it immediately 739 + * and don't wait for the task response. 740 + */ 735 741 sci_change_state(&ireq->sm, SCI_REQ_ABORTING); 736 742 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 737 743 return SCI_SUCCESS; 738 744 case SCI_REQ_ABORTING: 739 - sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 740 - return SCI_SUCCESS; 745 + /* If a request has a termination requested twice, return 746 + * a failure indication, since HW confirmation of the first 747 + * abort is still outstanding. 748 + */ 741 749 case SCI_REQ_COMPLETED: 742 750 default: 743 751 dev_warn(&ireq->owning_controller->pdev->dev, ··· 2407 2399 } 2408 2400 } 2409 2401 2410 - static void isci_request_process_stp_response(struct sas_task *task, 2411 - void *response_buffer) 2402 + static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis) 2412 2403 { 2413 - struct dev_to_host_fis *d2h_reg_fis = response_buffer; 2414 2404 struct task_status_struct *ts = &task->task_status; 2415 2405 struct ata_task_resp *resp = (void *)&ts->buf[0]; 2416 2406 2417 - resp->frame_len = le16_to_cpu(*(__le16 *)(response_buffer + 6)); 2418 - memcpy(&resp->ending_fis[0], response_buffer + 16, 24); 2407 + resp->frame_len = sizeof(*fis); 2408 + memcpy(resp->ending_fis, fis, sizeof(*fis)); 2419 2409 ts->buf_valid_size = sizeof(*resp); 2420 2410 2421 - /** 2422 - * If the device fault bit is set in the status register, then 2411 + /* If the device fault bit is set in the status register, then 2423 2412 * set the sense data and return. 2424 2413 */ 2425 - if (d2h_reg_fis->status & ATA_DF) 2414 + if (fis->status & ATA_DF) 2426 2415 ts->stat = SAS_PROTO_RESPONSE; 2427 2416 else 2428 2417 ts->stat = SAM_STAT_GOOD; ··· 2433 2428 { 2434 2429 struct sas_task *task = isci_request_access_task(request); 2435 2430 struct ssp_response_iu *resp_iu; 2436 - void *resp_buf; 2437 2431 unsigned long task_flags; 2438 2432 struct isci_remote_device *idev = isci_lookup_device(task->dev); 2439 2433 enum service_response response = SAS_TASK_UNDELIVERED; ··· 2569 2565 task); 2570 2566 2571 2567 if (sas_protocol_ata(task->task_proto)) { 2572 - resp_buf = &request->stp.rsp; 2573 - isci_request_process_stp_response(task, 2574 - resp_buf); 2568 + isci_process_stp_response(task, &request->stp.rsp); 2575 2569 } else if (SAS_PROTOCOL_SSP == task->task_proto) { 2576 2570 2577 2571 /* crack the iu response buffer. */
+1 -1
drivers/scsi/isci/unsolicited_frame_control.c
··· 72 72 */ 73 73 buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE; 74 74 header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header); 75 - size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(dma_addr_t); 75 + size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(uf_control->address_table.array[0]); 76 76 77 77 /* 78 78 * The Unsolicited Frame buffers are set at the start of the UF
+1 -1
drivers/scsi/isci/unsolicited_frame_control.h
··· 214 214 * starting address of the UF address table. 215 215 * 64-bit pointers are required by the hardware. 216 216 */ 217 - dma_addr_t *array; 217 + u64 *array; 218 218 219 219 /** 220 220 * This field specifies the physical address location for the UF
+41 -18
drivers/scsi/libfc/fc_exch.c
··· 494 494 */ 495 495 error = lport->tt.frame_send(lport, fp); 496 496 497 + if (fh->fh_type == FC_TYPE_BLS) 498 + return error; 499 + 497 500 /* 498 501 * Update the exchange and sequence flags, 499 502 * assuming all frames for the sequence have been sent. ··· 578 575 } 579 576 580 577 /** 581 - * fc_seq_exch_abort() - Abort an exchange and sequence 582 - * @req_sp: The sequence to be aborted 578 + * fc_exch_abort_locked() - Abort an exchange 579 + * @ep: The exchange to be aborted 583 580 * @timer_msec: The period of time to wait before aborting 584 581 * 585 - * Generally called because of a timeout or an abort from the upper layer. 582 + * Locking notes: Called with exch lock held 583 + * 584 + * Return value: 0 on success else error code 586 585 */ 587 - static int fc_seq_exch_abort(const struct fc_seq *req_sp, 588 - unsigned int timer_msec) 586 + static int fc_exch_abort_locked(struct fc_exch *ep, 587 + unsigned int timer_msec) 589 588 { 590 589 struct fc_seq *sp; 591 - struct fc_exch *ep; 592 590 struct fc_frame *fp; 593 591 int error; 594 592 595 - ep = fc_seq_exch(req_sp); 596 - 597 - spin_lock_bh(&ep->ex_lock); 598 593 if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) || 599 - ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) { 600 - spin_unlock_bh(&ep->ex_lock); 594 + ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) 601 595 return -ENXIO; 602 - } 603 596 604 597 /* 605 598 * Send the abort on a new sequence if possible. 606 599 */ 607 600 sp = fc_seq_start_next_locked(&ep->seq); 608 - if (!sp) { 609 - spin_unlock_bh(&ep->ex_lock); 601 + if (!sp) 610 602 return -ENOMEM; 611 - } 612 603 613 604 ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL; 614 605 if (timer_msec) 615 606 fc_exch_timer_set_locked(ep, timer_msec); 616 - spin_unlock_bh(&ep->ex_lock); 617 607 618 608 /* 619 609 * If not logged into the fabric, don't send ABTS but leave ··· 625 629 error = fc_seq_send(ep->lp, sp, fp); 626 630 } else 627 631 error = -ENOBUFS; 632 + return error; 633 + } 634 + 635 + /** 636 + * fc_seq_exch_abort() - Abort an exchange and sequence 637 + * @req_sp: The sequence to be aborted 638 + * @timer_msec: The period of time to wait before aborting 639 + * 640 + * Generally called because of a timeout or an abort from the upper layer. 641 + * 642 + * Return value: 0 on success else error code 643 + */ 644 + static int fc_seq_exch_abort(const struct fc_seq *req_sp, 645 + unsigned int timer_msec) 646 + { 647 + struct fc_exch *ep; 648 + int error; 649 + 650 + ep = fc_seq_exch(req_sp); 651 + spin_lock_bh(&ep->ex_lock); 652 + error = fc_exch_abort_locked(ep, timer_msec); 653 + spin_unlock_bh(&ep->ex_lock); 628 654 return error; 629 655 } 630 656 ··· 1733 1715 int rc = 1; 1734 1716 1735 1717 spin_lock_bh(&ep->ex_lock); 1718 + fc_exch_abort_locked(ep, 0); 1736 1719 ep->state |= FC_EX_RST_CLEANUP; 1737 1720 if (cancel_delayed_work(&ep->timeout_work)) 1738 1721 atomic_dec(&ep->ex_refcnt); /* drop hold for timer */ ··· 1981 1962 struct fc_exch *ep; 1982 1963 struct fc_seq *sp = NULL; 1983 1964 struct fc_frame_header *fh; 1965 + struct fc_fcp_pkt *fsp = NULL; 1984 1966 int rc = 1; 1985 1967 1986 1968 ep = fc_exch_alloc(lport, fp); ··· 2004 1984 fc_exch_setup_hdr(ep, fp, ep->f_ctl); 2005 1985 sp->cnt++; 2006 1986 2007 - if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) 1987 + if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) { 1988 + fsp = fr_fsp(fp); 2008 1989 fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); 1990 + } 2009 1991 2010 1992 if (unlikely(lport->tt.frame_send(lport, fp))) 2011 1993 goto err; ··· 2021 1999 spin_unlock_bh(&ep->ex_lock); 2022 2000 return sp; 2023 2001 err: 2024 - fc_fcp_ddp_done(fr_fsp(fp)); 2002 + if (fsp) 2003 + fc_fcp_ddp_done(fsp); 2025 2004 rc = fc_exch_done_locked(ep); 2026 2005 spin_unlock_bh(&ep->ex_lock); 2027 2006 if (!rc)
+9 -2
drivers/scsi/libfc/fc_fcp.c
··· 2019 2019 struct fc_fcp_internal *si; 2020 2020 int rc = FAILED; 2021 2021 unsigned long flags; 2022 + int rval; 2023 + 2024 + rval = fc_block_scsi_eh(sc_cmd); 2025 + if (rval) 2026 + return rval; 2022 2027 2023 2028 lport = shost_priv(sc_cmd->device->host); 2024 2029 if (lport->state != LPORT_ST_READY) ··· 2073 2068 int rc = FAILED; 2074 2069 int rval; 2075 2070 2076 - rval = fc_remote_port_chkready(rport); 2071 + rval = fc_block_scsi_eh(sc_cmd); 2077 2072 if (rval) 2078 - goto out; 2073 + return rval; 2079 2074 2080 2075 lport = shost_priv(sc_cmd->device->host); 2081 2076 ··· 2120 2115 unsigned long wait_tmo; 2121 2116 2122 2117 FC_SCSI_DBG(lport, "Resetting host\n"); 2118 + 2119 + fc_block_scsi_eh(sc_cmd); 2123 2120 2124 2121 lport->tt.lport_reset(lport); 2125 2122 wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
+10 -1
drivers/scsi/libfc/fc_lport.c
··· 88 88 */ 89 89 90 90 #include <linux/timer.h> 91 + #include <linux/delay.h> 91 92 #include <linux/slab.h> 92 93 #include <asm/unaligned.h> 93 94 ··· 1030 1029 FCH_EVT_LIPRESET, 0); 1031 1030 fc_vports_linkchange(lport); 1032 1031 fc_lport_reset_locked(lport); 1033 - if (lport->link_up) 1032 + if (lport->link_up) { 1033 + /* 1034 + * Wait upto resource allocation time out before 1035 + * doing re-login since incomplete FIP exchanged 1036 + * from last session may collide with exchanges 1037 + * in new session. 1038 + */ 1039 + msleep(lport->r_a_tov); 1034 1040 fc_lport_enter_flogi(lport); 1041 + } 1035 1042 } 1036 1043 1037 1044 /**
+1 -1
drivers/scsi/libsas/sas_expander.c
··· 1721 1721 list_for_each_entry(ch, &ex->children, siblings) { 1722 1722 if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) { 1723 1723 res = sas_find_bcast_dev(ch, src_dev); 1724 - if (src_dev) 1724 + if (*src_dev) 1725 1725 return res; 1726 1726 } 1727 1727 }
+5 -2
drivers/scsi/qla2xxx/qla_attr.c
··· 1786 1786 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); 1787 1787 } 1788 1788 1789 - if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { 1789 + if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { 1790 1790 if (ha->fw_attributes & BIT_4) { 1791 + int prot = 0; 1791 1792 vha->flags.difdix_supported = 1; 1792 1793 ql_dbg(ql_dbg_user, vha, 0x7082, 1793 1794 "Registered for DIF/DIX type 1 and 3 protection.\n"); 1795 + if (ql2xenabledif == 1) 1796 + prot = SHOST_DIX_TYPE0_PROTECTION; 1794 1797 scsi_host_set_prot(vha->host, 1795 - SHOST_DIF_TYPE1_PROTECTION 1798 + prot | SHOST_DIF_TYPE1_PROTECTION 1796 1799 | SHOST_DIF_TYPE2_PROTECTION 1797 1800 | SHOST_DIF_TYPE3_PROTECTION 1798 1801 | SHOST_DIX_TYPE1_PROTECTION
+18 -18
drivers/scsi/qla2xxx/qla_dbg.c
··· 8 8 /* 9 9 * Table for showing the current message id in use for particular level 10 10 * Change this table for addition of log/debug messages. 11 - * ----------------------------------------------------- 12 - * | Level | Last Value Used | 13 - * ----------------------------------------------------- 14 - * | Module Init and Probe | 0x0116 | 15 - * | Mailbox commands | 0x111e | 16 - * | Device Discovery | 0x2083 | 17 - * | Queue Command and IO tracing | 0x302e | 18 - * | DPC Thread | 0x401c | 19 - * | Async Events | 0x5059 | 20 - * | Timer Routines | 0x600d | 21 - * | User Space Interactions | 0x709c | 22 - * | Task Management | 0x8043 | 23 - * | AER/EEH | 0x900f | 24 - * | Virtual Port | 0xa007 | 25 - * | ISP82XX Specific | 0xb027 | 26 - * | MultiQ | 0xc00b | 27 - * | Misc | 0xd00b | 28 - * ----------------------------------------------------- 11 + * ---------------------------------------------------------------------- 12 + * | Level | Last Value Used | Holes | 13 + * ---------------------------------------------------------------------- 14 + * | Module Init and Probe | 0x0116 | | 15 + * | Mailbox commands | 0x1126 | | 16 + * | Device Discovery | 0x2083 | | 17 + * | Queue Command and IO tracing | 0x302e | 0x3008 | 18 + * | DPC Thread | 0x401c | | 19 + * | Async Events | 0x5059 | | 20 + * | Timer Routines | 0x600d | | 21 + * | User Space Interactions | 0x709d | | 22 + * | Task Management | 0x8041 | | 23 + * | AER/EEH | 0x900f | | 24 + * | Virtual Port | 0xa007 | | 25 + * | ISP82XX Specific | 0xb04f | | 26 + * | MultiQ | 0xc00b | | 27 + * | Misc | 0xd00b | | 28 + * ---------------------------------------------------------------------- 29 29 */ 30 30 31 31 #include "qla_def.h"
+2
drivers/scsi/qla2xxx/qla_def.h
··· 2529 2529 #define DT_ISP8021 BIT_14 2530 2530 #define DT_ISP_LAST (DT_ISP8021 << 1) 2531 2531 2532 + #define DT_T10_PI BIT_25 2532 2533 #define DT_IIDMA BIT_26 2533 2534 #define DT_FWI2 BIT_27 2534 2535 #define DT_ZIO_SUPPORTED BIT_28 ··· 2573 2572 #define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha)) 2574 2573 #define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha)) 2575 2574 2575 + #define IS_T10_PI_CAPABLE(ha) ((ha)->device_type & DT_T10_PI) 2576 2576 #define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA) 2577 2577 #define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2) 2578 2578 #define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED)
+5
drivers/scsi/qla2xxx/qla_fw.h
··· 537 537 /* 538 538 * If DIF Error is set in comp_status, these additional fields are 539 539 * defined: 540 + * 541 + * !!! NOTE: Firmware sends expected/actual DIF data in big endian 542 + * format; but all of the "data" field gets swab32-d in the beginning 543 + * of qla2x00_status_entry(). 544 + * 540 545 * &data[10] : uint8_t report_runt_bg[2]; - computed guard 541 546 * &data[12] : uint8_t actual_dif[8]; - DIF Data received 542 547 * &data[20] : uint8_t expected_dif[8]; - DIF Data computed
-3
drivers/scsi/qla2xxx/qla_init.c
··· 3838 3838 req = vha->req; 3839 3839 rsp = req->rsp; 3840 3840 3841 - atomic_set(&vha->loop_state, LOOP_UPDATE); 3842 3841 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 3843 3842 if (vha->flags.online) { 3844 3843 if (!(rval = qla2x00_fw_ready(vha))) { 3845 3844 /* Wait at most MAX_TARGET RSCNs for a stable link. */ 3846 3845 wait_time = 256; 3847 3846 do { 3848 - atomic_set(&vha->loop_state, LOOP_UPDATE); 3849 - 3850 3847 /* Issue a marker after FW becomes ready. */ 3851 3848 qla2x00_marker(vha, req, rsp, 0, 0, 3852 3849 MK_SYNC_ALL);
+29
drivers/scsi/qla2xxx/qla_inline.h
··· 102 102 fcport->d_id.b.al_pa); 103 103 } 104 104 } 105 + 106 + static inline int 107 + qla2x00_hba_err_chk_enabled(srb_t *sp) 108 + { 109 + /* 110 + * Uncomment when corresponding SCSI changes are done. 111 + * 112 + if (!sp->cmd->prot_chk) 113 + return 0; 114 + * 115 + */ 116 + 117 + switch (scsi_get_prot_op(sp->cmd)) { 118 + case SCSI_PROT_READ_STRIP: 119 + case SCSI_PROT_WRITE_INSERT: 120 + if (ql2xenablehba_err_chk >= 1) 121 + return 1; 122 + break; 123 + case SCSI_PROT_READ_PASS: 124 + case SCSI_PROT_WRITE_PASS: 125 + if (ql2xenablehba_err_chk >= 2) 126 + return 1; 127 + break; 128 + case SCSI_PROT_READ_INSERT: 129 + case SCSI_PROT_WRITE_STRIP: 130 + return 1; 131 + } 132 + return 0; 133 + }
+235 -47
drivers/scsi/qla2xxx/qla_iocb.c
··· 709 709 * 710 710 */ 711 711 static inline void 712 - qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt, 712 + qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt, 713 713 unsigned int protcnt) 714 714 { 715 - struct sd_dif_tuple *spt; 715 + struct scsi_cmnd *cmd = sp->cmd; 716 716 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 717 - unsigned char op = scsi_get_prot_op(cmd); 718 717 719 718 switch (scsi_get_prot_type(cmd)) { 720 - /* For TYPE 0 protection: no checking */ 721 719 case SCSI_PROT_DIF_TYPE0: 722 - pkt->ref_tag_mask[0] = 0x00; 723 - pkt->ref_tag_mask[1] = 0x00; 724 - pkt->ref_tag_mask[2] = 0x00; 725 - pkt->ref_tag_mask[3] = 0x00; 720 + /* 721 + * No check for ql2xenablehba_err_chk, as it would be an 722 + * I/O error if hba tag generation is not done. 723 + */ 724 + pkt->ref_tag = cpu_to_le32((uint32_t) 725 + (0xffffffff & scsi_get_lba(cmd))); 726 + 727 + if (!qla2x00_hba_err_chk_enabled(sp)) 728 + break; 729 + 730 + pkt->ref_tag_mask[0] = 0xff; 731 + pkt->ref_tag_mask[1] = 0xff; 732 + pkt->ref_tag_mask[2] = 0xff; 733 + pkt->ref_tag_mask[3] = 0xff; 726 734 break; 727 735 728 736 /* ··· 738 730 * match LBA in CDB + N 739 731 */ 740 732 case SCSI_PROT_DIF_TYPE2: 741 - if (!ql2xenablehba_err_chk) 742 - break; 743 - 744 - if (scsi_prot_sg_count(cmd)) { 745 - spt = page_address(sg_page(scsi_prot_sglist(cmd))) + 746 - scsi_prot_sglist(cmd)[0].offset; 747 - pkt->app_tag = swab32(spt->app_tag); 748 - pkt->app_tag_mask[0] = 0xff; 749 - pkt->app_tag_mask[1] = 0xff; 750 - } 733 + pkt->app_tag = __constant_cpu_to_le16(0); 734 + pkt->app_tag_mask[0] = 0x0; 735 + pkt->app_tag_mask[1] = 0x0; 751 736 752 737 pkt->ref_tag = cpu_to_le32((uint32_t) 753 738 (0xffffffff & scsi_get_lba(cmd))); 739 + 740 + if (!qla2x00_hba_err_chk_enabled(sp)) 741 + break; 754 742 755 743 /* enable ALL bytes of the ref tag */ 756 744 pkt->ref_tag_mask[0] = 0xff; ··· 767 763 * 16 bit app tag. 768 764 */ 769 765 case SCSI_PROT_DIF_TYPE1: 770 - if (!ql2xenablehba_err_chk) 766 + pkt->ref_tag = cpu_to_le32((uint32_t) 767 + (0xffffffff & scsi_get_lba(cmd))); 768 + pkt->app_tag = __constant_cpu_to_le16(0); 769 + pkt->app_tag_mask[0] = 0x0; 770 + pkt->app_tag_mask[1] = 0x0; 771 + 772 + if (!qla2x00_hba_err_chk_enabled(sp)) 771 773 break; 772 774 773 - if (protcnt && (op == SCSI_PROT_WRITE_STRIP || 774 - op == SCSI_PROT_WRITE_PASS)) { 775 - spt = page_address(sg_page(scsi_prot_sglist(cmd))) + 776 - scsi_prot_sglist(cmd)[0].offset; 777 - ql_dbg(ql_dbg_io, vha, 0x3008, 778 - "LBA from user %p, lba = 0x%x for cmd=%p.\n", 779 - spt, (int)spt->ref_tag, cmd); 780 - pkt->ref_tag = swab32(spt->ref_tag); 781 - pkt->app_tag_mask[0] = 0x0; 782 - pkt->app_tag_mask[1] = 0x0; 783 - } else { 784 - pkt->ref_tag = cpu_to_le32((uint32_t) 785 - (0xffffffff & scsi_get_lba(cmd))); 786 - pkt->app_tag = __constant_cpu_to_le16(0); 787 - pkt->app_tag_mask[0] = 0x0; 788 - pkt->app_tag_mask[1] = 0x0; 789 - } 790 775 /* enable ALL bytes of the ref tag */ 791 776 pkt->ref_tag_mask[0] = 0xff; 792 777 pkt->ref_tag_mask[1] = 0xff; ··· 791 798 scsi_get_prot_type(cmd), cmd); 792 799 } 793 800 801 + struct qla2_sgx { 802 + dma_addr_t dma_addr; /* OUT */ 803 + uint32_t dma_len; /* OUT */ 794 804 805 + uint32_t tot_bytes; /* IN */ 806 + struct scatterlist *cur_sg; /* IN */ 807 + 808 + /* for book keeping, bzero on initial invocation */ 809 + uint32_t bytes_consumed; 810 + uint32_t num_bytes; 811 + uint32_t tot_partial; 812 + 813 + /* for debugging */ 814 + uint32_t num_sg; 815 + srb_t *sp; 816 + }; 817 + 818 + static int 819 + qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx, 820 + uint32_t *partial) 821 + { 822 + struct scatterlist *sg; 823 + uint32_t cumulative_partial, sg_len; 824 + dma_addr_t sg_dma_addr; 825 + 826 + if (sgx->num_bytes == sgx->tot_bytes) 827 + return 0; 828 + 829 + sg = sgx->cur_sg; 830 + cumulative_partial = sgx->tot_partial; 831 + 832 + sg_dma_addr = sg_dma_address(sg); 833 + sg_len = sg_dma_len(sg); 834 + 835 + sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed; 836 + 837 + if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) { 838 + sgx->dma_len = (blk_sz - cumulative_partial); 839 + sgx->tot_partial = 0; 840 + sgx->num_bytes += blk_sz; 841 + *partial = 0; 842 + } else { 843 + sgx->dma_len = sg_len - sgx->bytes_consumed; 844 + sgx->tot_partial += sgx->dma_len; 845 + *partial = 1; 846 + } 847 + 848 + sgx->bytes_consumed += sgx->dma_len; 849 + 850 + if (sg_len == sgx->bytes_consumed) { 851 + sg = sg_next(sg); 852 + sgx->num_sg++; 853 + sgx->cur_sg = sg; 854 + sgx->bytes_consumed = 0; 855 + } 856 + 857 + return 1; 858 + } 859 + 860 + static int 861 + qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, 862 + uint32_t *dsd, uint16_t tot_dsds) 863 + { 864 + void *next_dsd; 865 + uint8_t avail_dsds = 0; 866 + uint32_t dsd_list_len; 867 + struct dsd_dma *dsd_ptr; 868 + struct scatterlist *sg_prot; 869 + uint32_t *cur_dsd = dsd; 870 + uint16_t used_dsds = tot_dsds; 871 + 872 + uint32_t prot_int; 873 + uint32_t partial; 874 + struct qla2_sgx sgx; 875 + dma_addr_t sle_dma; 876 + uint32_t sle_dma_len, tot_prot_dma_len = 0; 877 + struct scsi_cmnd *cmd = sp->cmd; 878 + 879 + prot_int = cmd->device->sector_size; 880 + 881 + memset(&sgx, 0, sizeof(struct qla2_sgx)); 882 + sgx.tot_bytes = scsi_bufflen(sp->cmd); 883 + sgx.cur_sg = scsi_sglist(sp->cmd); 884 + sgx.sp = sp; 885 + 886 + sg_prot = scsi_prot_sglist(sp->cmd); 887 + 888 + while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) { 889 + 890 + sle_dma = sgx.dma_addr; 891 + sle_dma_len = sgx.dma_len; 892 + alloc_and_fill: 893 + /* Allocate additional continuation packets? */ 894 + if (avail_dsds == 0) { 895 + avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? 896 + QLA_DSDS_PER_IOCB : used_dsds; 897 + dsd_list_len = (avail_dsds + 1) * 12; 898 + used_dsds -= avail_dsds; 899 + 900 + /* allocate tracking DS */ 901 + dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 902 + if (!dsd_ptr) 903 + return 1; 904 + 905 + /* allocate new list */ 906 + dsd_ptr->dsd_addr = next_dsd = 907 + dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, 908 + &dsd_ptr->dsd_list_dma); 909 + 910 + if (!next_dsd) { 911 + /* 912 + * Need to cleanup only this dsd_ptr, rest 913 + * will be done by sp_free_dma() 914 + */ 915 + kfree(dsd_ptr); 916 + return 1; 917 + } 918 + 919 + list_add_tail(&dsd_ptr->list, 920 + &((struct crc_context *)sp->ctx)->dsd_list); 921 + 922 + sp->flags |= SRB_CRC_CTX_DSD_VALID; 923 + 924 + /* add new list to cmd iocb or last list */ 925 + *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 926 + *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); 927 + *cur_dsd++ = dsd_list_len; 928 + cur_dsd = (uint32_t *)next_dsd; 929 + } 930 + *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 931 + *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 932 + *cur_dsd++ = cpu_to_le32(sle_dma_len); 933 + avail_dsds--; 934 + 935 + if (partial == 0) { 936 + /* Got a full protection interval */ 937 + sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len; 938 + sle_dma_len = 8; 939 + 940 + tot_prot_dma_len += sle_dma_len; 941 + if (tot_prot_dma_len == sg_dma_len(sg_prot)) { 942 + tot_prot_dma_len = 0; 943 + sg_prot = sg_next(sg_prot); 944 + } 945 + 946 + partial = 1; /* So as to not re-enter this block */ 947 + goto alloc_and_fill; 948 + } 949 + } 950 + /* Null termination */ 951 + *cur_dsd++ = 0; 952 + *cur_dsd++ = 0; 953 + *cur_dsd++ = 0; 954 + return 0; 955 + } 795 956 static int 796 957 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, 797 958 uint16_t tot_dsds) ··· 1128 981 struct scsi_cmnd *cmd; 1129 982 struct scatterlist *cur_seg; 1130 983 int sgc; 1131 - uint32_t total_bytes; 984 + uint32_t total_bytes = 0; 1132 985 uint32_t data_bytes; 1133 986 uint32_t dif_bytes; 1134 987 uint8_t bundling = 1; ··· 1170 1023 __constant_cpu_to_le16(CF_READ_DATA); 1171 1024 } 1172 1025 1173 - tot_prot_dsds = scsi_prot_sg_count(cmd); 1174 - if (!tot_prot_dsds) 1026 + if ((scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_INSERT) || 1027 + (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_STRIP) || 1028 + (scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_STRIP) || 1029 + (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_INSERT)) 1175 1030 bundling = 0; 1176 1031 1177 1032 /* Allocate CRC context from global pool */ ··· 1196 1047 1197 1048 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); 1198 1049 1199 - qla24xx_set_t10dif_tags(cmd, (struct fw_dif_context *) 1050 + qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *) 1200 1051 &crc_ctx_pkt->ref_tag, tot_prot_dsds); 1201 1052 1202 1053 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); ··· 1225 1076 fcp_cmnd->additional_cdb_len |= 2; 1226 1077 1227 1078 int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun); 1228 - host_to_fcp_swap((uint8_t *)&fcp_cmnd->lun, sizeof(fcp_cmnd->lun)); 1229 1079 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); 1230 1080 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len); 1231 1081 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32( ··· 1255 1107 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ 1256 1108 1257 1109 /* Compute dif len and adjust data len to incude protection */ 1258 - total_bytes = data_bytes; 1259 1110 dif_bytes = 0; 1260 1111 blk_size = cmd->device->sector_size; 1261 - if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 1262 - dif_bytes = (data_bytes / blk_size) * 8; 1263 - total_bytes += dif_bytes; 1112 + dif_bytes = (data_bytes / blk_size) * 8; 1113 + 1114 + switch (scsi_get_prot_op(sp->cmd)) { 1115 + case SCSI_PROT_READ_INSERT: 1116 + case SCSI_PROT_WRITE_STRIP: 1117 + total_bytes = data_bytes; 1118 + data_bytes += dif_bytes; 1119 + break; 1120 + 1121 + case SCSI_PROT_READ_STRIP: 1122 + case SCSI_PROT_WRITE_INSERT: 1123 + case SCSI_PROT_READ_PASS: 1124 + case SCSI_PROT_WRITE_PASS: 1125 + total_bytes = data_bytes + dif_bytes; 1126 + break; 1127 + default: 1128 + BUG(); 1264 1129 } 1265 1130 1266 - if (!ql2xenablehba_err_chk) 1131 + if (!qla2x00_hba_err_chk_enabled(sp)) 1267 1132 fw_prot_opts |= 0x10; /* Disable Guard tag checking */ 1268 1133 1269 1134 if (!bundling) { ··· 1312 1151 1313 1152 cmd_pkt->control_flags |= 1314 1153 __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); 1315 - if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, 1154 + 1155 + if (!bundling && tot_prot_dsds) { 1156 + if (qla24xx_walk_and_build_sglist_no_difb(ha, sp, 1157 + cur_dsd, tot_dsds)) 1158 + goto crc_queuing_error; 1159 + } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, 1316 1160 (tot_dsds - tot_prot_dsds))) 1317 1161 goto crc_queuing_error; 1318 1162 ··· 1580 1414 goto queuing_error; 1581 1415 else 1582 1416 sp->flags |= SRB_DMA_VALID; 1417 + 1418 + if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 1419 + (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { 1420 + struct qla2_sgx sgx; 1421 + uint32_t partial; 1422 + 1423 + memset(&sgx, 0, sizeof(struct qla2_sgx)); 1424 + sgx.tot_bytes = scsi_bufflen(cmd); 1425 + sgx.cur_sg = scsi_sglist(cmd); 1426 + sgx.sp = sp; 1427 + 1428 + nseg = 0; 1429 + while (qla24xx_get_one_block_sg( 1430 + cmd->device->sector_size, &sgx, &partial)) 1431 + nseg++; 1432 + } 1583 1433 } else 1584 1434 nseg = 0; 1585 1435 ··· 1610 1428 goto queuing_error; 1611 1429 else 1612 1430 sp->flags |= SRB_CRC_PROT_DMA_VALID; 1431 + 1432 + if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 1433 + (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { 1434 + nseg = scsi_bufflen(cmd) / cmd->device->sector_size; 1435 + } 1613 1436 } else { 1614 1437 nseg = 0; 1615 1438 } ··· 1641 1454 /* Build header part of command packet (excluding the OPCODE). */ 1642 1455 req->current_outstanding_cmd = handle; 1643 1456 req->outstanding_cmds[handle] = sp; 1457 + sp->handle = handle; 1644 1458 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 1645 1459 req->cnt -= req_cnt; 1646 1460
+87 -28
drivers/scsi/qla2xxx/qla_isr.c
··· 719 719 vha->flags.rscn_queue_overflow = 1; 720 720 } 721 721 722 - atomic_set(&vha->loop_state, LOOP_UPDATE); 723 722 atomic_set(&vha->loop_down_timer, 0); 724 723 vha->flags.management_server_logged_in = 0; 725 724 ··· 1434 1435 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST 1435 1436 * to indicate to the kernel that the HBA detected error. 1436 1437 */ 1437 - static inline void 1438 + static inline int 1438 1439 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) 1439 1440 { 1440 1441 struct scsi_qla_host *vha = sp->fcport->vha; 1441 1442 struct scsi_cmnd *cmd = sp->cmd; 1442 - struct scsi_dif_tuple *ep = 1443 - (struct scsi_dif_tuple *)&sts24->data[20]; 1444 - struct scsi_dif_tuple *ap = 1445 - (struct scsi_dif_tuple *)&sts24->data[12]; 1443 + uint8_t *ap = &sts24->data[12]; 1444 + uint8_t *ep = &sts24->data[20]; 1446 1445 uint32_t e_ref_tag, a_ref_tag; 1447 1446 uint16_t e_app_tag, a_app_tag; 1448 1447 uint16_t e_guard, a_guard; 1449 1448 1450 - e_ref_tag = be32_to_cpu(ep->ref_tag); 1451 - a_ref_tag = be32_to_cpu(ap->ref_tag); 1452 - e_app_tag = be16_to_cpu(ep->app_tag); 1453 - a_app_tag = be16_to_cpu(ap->app_tag); 1454 - e_guard = be16_to_cpu(ep->guard); 1455 - a_guard = be16_to_cpu(ap->guard); 1449 + /* 1450 + * swab32 of the "data" field in the beginning of qla2x00_status_entry() 1451 + * would make guard field appear at offset 2 1452 + */ 1453 + a_guard = le16_to_cpu(*(uint16_t *)(ap + 2)); 1454 + a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0)); 1455 + a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4)); 1456 + e_guard = le16_to_cpu(*(uint16_t *)(ep + 2)); 1457 + e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0)); 1458 + e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4)); 1456 1459 1457 1460 ql_dbg(ql_dbg_io, vha, 0x3023, 1458 1461 "iocb(s) %p Returned STATUS.\n", sts24); ··· 1466 1465 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, 1467 1466 a_app_tag, e_app_tag, a_guard, e_guard); 1468 1467 1468 + /* 1469 + * Ignore sector if: 1470 + * For type 3: ref & app tag is all 'f's 1471 + * For type 0,1,2: app tag is all 'f's 1472 + */ 1473 + if ((a_app_tag == 0xffff) && 1474 + ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) || 1475 + (a_ref_tag == 0xffffffff))) { 1476 + uint32_t blocks_done, resid; 1477 + sector_t lba_s = scsi_get_lba(cmd); 1478 + 1479 + /* 2TB boundary case covered automatically with this */ 1480 + blocks_done = e_ref_tag - (uint32_t)lba_s + 1; 1481 + 1482 + resid = scsi_bufflen(cmd) - (blocks_done * 1483 + cmd->device->sector_size); 1484 + 1485 + scsi_set_resid(cmd, resid); 1486 + cmd->result = DID_OK << 16; 1487 + 1488 + /* Update protection tag */ 1489 + if (scsi_prot_sg_count(cmd)) { 1490 + uint32_t i, j = 0, k = 0, num_ent; 1491 + struct scatterlist *sg; 1492 + struct sd_dif_tuple *spt; 1493 + 1494 + /* Patch the corresponding protection tags */ 1495 + scsi_for_each_prot_sg(cmd, sg, 1496 + scsi_prot_sg_count(cmd), i) { 1497 + num_ent = sg_dma_len(sg) / 8; 1498 + if (k + num_ent < blocks_done) { 1499 + k += num_ent; 1500 + continue; 1501 + } 1502 + j = blocks_done - k - 1; 1503 + k = blocks_done; 1504 + break; 1505 + } 1506 + 1507 + if (k != blocks_done) { 1508 + qla_printk(KERN_WARNING, sp->fcport->vha->hw, 1509 + "unexpected tag values tag:lba=%x:%llx)\n", 1510 + e_ref_tag, (unsigned long long)lba_s); 1511 + return 1; 1512 + } 1513 + 1514 + spt = page_address(sg_page(sg)) + sg->offset; 1515 + spt += j; 1516 + 1517 + spt->app_tag = 0xffff; 1518 + if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) 1519 + spt->ref_tag = 0xffffffff; 1520 + } 1521 + 1522 + return 0; 1523 + } 1524 + 1469 1525 /* check guard */ 1470 1526 if (e_guard != a_guard) { 1471 1527 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, ··· 1530 1472 set_driver_byte(cmd, DRIVER_SENSE); 1531 1473 set_host_byte(cmd, DID_ABORT); 1532 1474 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1533 - return; 1534 - } 1535 - 1536 - /* check appl tag */ 1537 - if (e_app_tag != a_app_tag) { 1538 - scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1539 - 0x10, 0x2); 1540 - set_driver_byte(cmd, DRIVER_SENSE); 1541 - set_host_byte(cmd, DID_ABORT); 1542 - cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1543 - return; 1475 + return 1; 1544 1476 } 1545 1477 1546 1478 /* check ref tag */ ··· 1540 1492 set_driver_byte(cmd, DRIVER_SENSE); 1541 1493 set_host_byte(cmd, DID_ABORT); 1542 1494 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1543 - return; 1495 + return 1; 1544 1496 } 1497 + 1498 + /* check appl tag */ 1499 + if (e_app_tag != a_app_tag) { 1500 + scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1501 + 0x10, 0x2); 1502 + set_driver_byte(cmd, DRIVER_SENSE); 1503 + set_host_byte(cmd, DID_ABORT); 1504 + cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1505 + return 1; 1506 + } 1507 + 1508 + return 1; 1545 1509 } 1546 1510 1547 1511 /** ··· 1827 1767 break; 1828 1768 1829 1769 case CS_DIF_ERROR: 1830 - qla2x00_handle_dif_error(sp, sts24); 1770 + logit = qla2x00_handle_dif_error(sp, sts24); 1831 1771 break; 1832 1772 default: 1833 1773 cp->result = DID_ERROR << 16; ··· 2528 2468 goto skip_msi; 2529 2469 } 2530 2470 2531 - if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX || 2532 - !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) { 2471 + if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) { 2533 2472 ql_log(ql_log_warn, vha, 0x0035, 2534 2473 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", 2535 - ha->pdev->revision, ha->fw_attributes); 2474 + ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX); 2536 2475 goto skip_msix; 2537 2476 } 2538 2477
+1 -1
drivers/scsi/qla2xxx/qla_mid.c
··· 472 472 host->can_queue = base_vha->req->length + 128; 473 473 host->this_id = 255; 474 474 host->cmd_per_lun = 3; 475 - if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) 475 + if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) 476 476 host->max_cmd_len = 32; 477 477 else 478 478 host->max_cmd_len = MAX_CMDSZ;
+13 -12
drivers/scsi/qla2xxx/qla_nx.c
··· 2208 2208 struct qla_hw_data *ha; 2209 2209 struct rsp_que *rsp; 2210 2210 struct device_reg_82xx __iomem *reg; 2211 + unsigned long flags; 2211 2212 2212 2213 rsp = (struct rsp_que *) dev_id; 2213 2214 if (!rsp) { ··· 2219 2218 2220 2219 ha = rsp->hw; 2221 2220 reg = &ha->iobase->isp82; 2222 - spin_lock_irq(&ha->hardware_lock); 2221 + spin_lock_irqsave(&ha->hardware_lock, flags); 2223 2222 vha = pci_get_drvdata(ha->pdev); 2224 2223 qla24xx_process_response_queue(vha, rsp); 2225 2224 WRT_REG_DWORD(&reg->host_int, 0); 2226 - spin_unlock_irq(&ha->hardware_lock); 2225 + spin_unlock_irqrestore(&ha->hardware_lock, flags); 2227 2226 return IRQ_HANDLED; 2228 2227 } 2229 2228 ··· 2839 2838 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); 2840 2839 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 2841 2840 2841 + /* build FCP_CMND IU */ 2842 + memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd)); 2843 + int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun); 2844 + ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; 2845 + 2846 + if (cmd->sc_data_direction == DMA_TO_DEVICE) 2847 + ctx->fcp_cmnd->additional_cdb_len |= 1; 2848 + else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 2849 + ctx->fcp_cmnd->additional_cdb_len |= 2; 2850 + 2842 2851 /* 2843 2852 * Update tagged queuing modifier -- default is TSK_SIMPLE (0). 2844 2853 */ ··· 2864 2853 break; 2865 2854 } 2866 2855 } 2867 - 2868 - /* build FCP_CMND IU */ 2869 - memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd)); 2870 - int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun); 2871 - ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; 2872 - 2873 - if (cmd->sc_data_direction == DMA_TO_DEVICE) 2874 - ctx->fcp_cmnd->additional_cdb_len |= 1; 2875 - else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 2876 - ctx->fcp_cmnd->additional_cdb_len |= 2; 2877 2856 2878 2857 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); 2879 2858
+22 -8
drivers/scsi/qla2xxx/qla_os.c
··· 106 106 "Maximum queue depth to report for target devices."); 107 107 108 108 /* Do not change the value of this after module load */ 109 - int ql2xenabledif = 1; 109 + int ql2xenabledif = 0; 110 110 module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR); 111 111 MODULE_PARM_DESC(ql2xenabledif, 112 112 " Enable T10-CRC-DIF " 113 - " Default is 0 - No DIF Support. 1 - Enable it"); 113 + " Default is 0 - No DIF Support. 1 - Enable it" 114 + ", 2 - Enable DIF for all types, except Type 0."); 114 115 115 - int ql2xenablehba_err_chk; 116 + int ql2xenablehba_err_chk = 2; 116 117 module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR); 117 118 MODULE_PARM_DESC(ql2xenablehba_err_chk, 118 - " Enable T10-CRC-DIF Error isolation by HBA" 119 - " Default is 0 - Error isolation disabled, 1 - Enable it"); 119 + " Enable T10-CRC-DIF Error isolation by HBA:\n" 120 + " Default is 1.\n" 121 + " 0 -- Error isolation disabled\n" 122 + " 1 -- Error isolation enabled only for DIX Type 0\n" 123 + " 2 -- Error isolation enabled for all Types\n"); 120 124 121 125 int ql2xiidmaenable=1; 122 126 module_param(ql2xiidmaenable, int, S_IRUGO); ··· 913 909 "Abort command mbx success.\n"); 914 910 wait = 1; 915 911 } 912 + 913 + spin_lock_irqsave(&ha->hardware_lock, flags); 916 914 qla2x00_sp_compl(ha, sp); 915 + spin_unlock_irqrestore(&ha->hardware_lock, flags); 916 + 917 + /* Did the command return during mailbox execution? */ 918 + if (ret == FAILED && !CMD_SP(cmd)) 919 + ret = SUCCESS; 917 920 918 921 /* Wait for the command to be returned. */ 919 922 if (wait) { ··· 2262 2251 host->this_id = 255; 2263 2252 host->cmd_per_lun = 3; 2264 2253 host->unique_id = host->host_no; 2265 - if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) 2254 + if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) 2266 2255 host->max_cmd_len = 32; 2267 2256 else 2268 2257 host->max_cmd_len = MAX_CMDSZ; ··· 2389 2378 "Detected hba at address=%p.\n", 2390 2379 ha); 2391 2380 2392 - if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { 2381 + if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { 2393 2382 if (ha->fw_attributes & BIT_4) { 2383 + int prot = 0; 2394 2384 base_vha->flags.difdix_supported = 1; 2395 2385 ql_dbg(ql_dbg_init, base_vha, 0x00f1, 2396 2386 "Registering for DIF/DIX type 1 and 3 protection.\n"); 2387 + if (ql2xenabledif == 1) 2388 + prot = SHOST_DIX_TYPE0_PROTECTION; 2397 2389 scsi_host_set_prot(host, 2398 - SHOST_DIF_TYPE1_PROTECTION 2390 + prot | SHOST_DIF_TYPE1_PROTECTION 2399 2391 | SHOST_DIF_TYPE2_PROTECTION 2400 2392 | SHOST_DIF_TYPE3_PROTECTION 2401 2393 | SHOST_DIX_TYPE1_PROTECTION
+1 -1
drivers/scsi/qla2xxx/qla_version.h
··· 7 7 /* 8 8 * Driver version 9 9 */ 10 - #define QLA2XXX_VERSION "8.03.07.03-k" 10 + #define QLA2XXX_VERSION "8.03.07.07-k" 11 11 12 12 #define QLA_DRIVER_MAJOR_VER 8 13 13 #define QLA_DRIVER_MINOR_VER 3
+3
drivers/spi/spi-fsl-spi.c
··· 825 825 { 826 826 struct device *dev = mspi->dev; 827 827 828 + if (!(mspi->flags & SPI_CPM_MODE)) 829 + return; 830 + 828 831 dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE); 829 832 dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE); 830 833 cpm_muram_free(cpm_muram_offset(mspi->tx_bd));
+3 -1
drivers/spi/spi-imx.c
··· 786 786 int cs_gpio = of_get_named_gpio(np, "cs-gpios", i); 787 787 if (cs_gpio < 0) 788 788 cs_gpio = mxc_platform_info->chipselect[i]; 789 + 790 + spi_imx->chipselect[i] = cs_gpio; 789 791 if (cs_gpio < 0) 790 792 continue; 791 - spi_imx->chipselect[i] = cs_gpio; 793 + 792 794 ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME); 793 795 if (ret) { 794 796 while (i > 0) {
+66 -27
drivers/spi/spi-topcliff-pch.c
··· 50 50 #define PCH_RX_THOLD 7 51 51 #define PCH_RX_THOLD_MAX 15 52 52 53 + #define PCH_TX_THOLD 2 54 + 53 55 #define PCH_MAX_BAUDRATE 5000000 54 56 #define PCH_MAX_FIFO_DEPTH 16 55 57 ··· 60 58 #define PCH_SLEEP_TIME 10 61 59 62 60 #define SSN_LOW 0x02U 61 + #define SSN_HIGH 0x03U 63 62 #define SSN_NO_CONTROL 0x00U 64 63 #define PCH_MAX_CS 0xFF 65 64 #define PCI_DEVICE_ID_GE_SPI 0x8816 ··· 319 316 320 317 /* if transfer complete interrupt */ 321 318 if (reg_spsr_val & SPSR_FI_BIT) { 322 - if (tx_index < bpw_len) 319 + if ((tx_index == bpw_len) && (rx_index == tx_index)) { 320 + /* disable interrupts */ 321 + pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL); 322 + 323 + /* transfer is completed; 324 + inform pch_spi_process_messages */ 325 + data->transfer_complete = true; 326 + data->transfer_active = false; 327 + wake_up(&data->wait); 328 + } else { 323 329 dev_err(&data->master->dev, 324 330 "%s : Transfer is not completed", __func__); 325 - /* disable interrupts */ 326 - pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL); 327 - 328 - /* transfer is completed;inform pch_spi_process_messages */ 329 - data->transfer_complete = true; 330 - data->transfer_active = false; 331 - wake_up(&data->wait); 331 + } 332 332 } 333 333 } 334 334 ··· 354 348 "%s returning due to suspend\n", __func__); 355 349 return IRQ_NONE; 356 350 } 357 - if (data->use_dma) 358 - return IRQ_NONE; 359 351 360 352 io_remap_addr = data->io_remap_addr; 361 353 spsr = io_remap_addr + PCH_SPSR; 362 354 363 355 reg_spsr_val = ioread32(spsr); 364 356 365 - if (reg_spsr_val & SPSR_ORF_BIT) 366 - dev_err(&board_dat->pdev->dev, "%s Over run error", __func__); 357 + if (reg_spsr_val & SPSR_ORF_BIT) { 358 + dev_err(&board_dat->pdev->dev, "%s Over run error\n", __func__); 359 + if (data->current_msg->complete != 0) { 360 + data->transfer_complete = true; 361 + data->current_msg->status = -EIO; 362 + data->current_msg->complete(data->current_msg->context); 363 + data->bcurrent_msg_processing = false; 364 + data->current_msg = NULL; 365 + data->cur_trans = NULL; 366 + } 367 + } 368 + 369 + if (data->use_dma) 370 + return IRQ_NONE; 367 371 368 372 /* Check if the interrupt is for SPI device */ 369 373 if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) { ··· 772 756 773 757 wait_event_interruptible(data->wait, data->transfer_complete); 774 758 775 - pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL); 776 - dev_dbg(&data->master->dev, 777 - "%s:no more control over SSN-writing 0 to SSNXCR.", __func__); 778 - 779 759 /* clear all interrupts */ 780 760 pch_spi_writereg(data->master, PCH_SPSR, 781 761 pch_spi_readreg(data->master, PCH_SPSR)); ··· 827 815 } 828 816 } 829 817 830 - static void pch_spi_start_transfer(struct pch_spi_data *data) 818 + static int pch_spi_start_transfer(struct pch_spi_data *data) 831 819 { 832 820 struct pch_spi_dma_ctrl *dma; 833 821 unsigned long flags; 822 + int rtn; 834 823 835 824 dma = &data->dma; 836 825 ··· 846 833 initiating the transfer. */ 847 834 dev_dbg(&data->master->dev, 848 835 "%s:waiting for transfer to get over\n", __func__); 849 - wait_event_interruptible(data->wait, data->transfer_complete); 836 + rtn = wait_event_interruptible_timeout(data->wait, 837 + data->transfer_complete, 838 + msecs_to_jiffies(2 * HZ)); 850 839 851 840 dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent, 852 841 DMA_FROM_DEVICE); 842 + 843 + dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent, 844 + DMA_FROM_DEVICE); 845 + memset(data->dma.tx_buf_virt, 0, PAGE_SIZE); 846 + 853 847 async_tx_ack(dma->desc_rx); 854 848 async_tx_ack(dma->desc_tx); 855 849 kfree(dma->sg_tx_p); 856 850 kfree(dma->sg_rx_p); 857 851 858 852 spin_lock_irqsave(&data->lock, flags); 859 - pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL); 860 - dev_dbg(&data->master->dev, 861 - "%s:no more control over SSN-writing 0 to SSNXCR.", __func__); 862 853 863 854 /* clear fifo threshold, disable interrupts, disable SPI transfer */ 864 855 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, ··· 875 858 pch_spi_clear_fifo(data->master); 876 859 877 860 spin_unlock_irqrestore(&data->lock, flags); 861 + 862 + return rtn; 878 863 } 879 864 880 865 static void pch_dma_rx_complete(void *arg) ··· 1042 1023 /* set receive fifo threshold and transmit fifo threshold */ 1043 1024 pch_spi_setclr_reg(data->master, PCH_SPCR, 1044 1025 ((size - 1) << SPCR_RFIC_FIELD) | 1045 - ((PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE) << 1046 - SPCR_TFIC_FIELD), 1026 + (PCH_TX_THOLD << SPCR_TFIC_FIELD), 1047 1027 MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS); 1048 1028 1049 1029 spin_unlock_irqrestore(&data->lock, flags); ··· 1053 1035 /* offset, length setting */ 1054 1036 sg = dma->sg_rx_p; 1055 1037 for (i = 0; i < num; i++, sg++) { 1056 - if (i == 0) { 1057 - sg->offset = 0; 1038 + if (i == (num - 2)) { 1039 + sg->offset = size * i; 1040 + sg->offset = sg->offset * (*bpw / 8); 1058 1041 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem, 1059 1042 sg->offset); 1060 1043 sg_dma_len(sg) = rem; 1044 + } else if (i == (num - 1)) { 1045 + sg->offset = size * (i - 1) + rem; 1046 + sg->offset = sg->offset * (*bpw / 8); 1047 + sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size, 1048 + sg->offset); 1049 + sg_dma_len(sg) = size; 1061 1050 } else { 1062 - sg->offset = rem + size * (i - 1); 1051 + sg->offset = size * i; 1063 1052 sg->offset = sg->offset * (*bpw / 8); 1064 1053 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size, 1065 1054 sg->offset); ··· 1090 1065 dma->desc_rx = desc_rx; 1091 1066 1092 1067 /* TX */ 1068 + if (data->bpw_len > PCH_DMA_TRANS_SIZE) { 1069 + num = data->bpw_len / PCH_DMA_TRANS_SIZE; 1070 + size = PCH_DMA_TRANS_SIZE; 1071 + rem = 16; 1072 + } else { 1073 + num = 1; 1074 + size = data->bpw_len; 1075 + rem = data->bpw_len; 1076 + } 1077 + 1093 1078 dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC); 1094 1079 sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */ 1095 1080 /* offset, length setting */ ··· 1197 1162 if (data->use_dma) 1198 1163 pch_spi_request_dma(data, 1199 1164 data->current_msg->spi->bits_per_word); 1165 + pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL); 1200 1166 do { 1201 1167 /* If we are already processing a message get the next 1202 1168 transfer structure from the message otherwise retrieve ··· 1220 1184 1221 1185 if (data->use_dma) { 1222 1186 pch_spi_handle_dma(data, &bpw); 1223 - pch_spi_start_transfer(data); 1187 + if (!pch_spi_start_transfer(data)) 1188 + goto out; 1224 1189 pch_spi_copy_rx_data_for_dma(data, bpw); 1225 1190 } else { 1226 1191 pch_spi_set_tx(data, &bpw); ··· 1259 1222 1260 1223 } while (data->cur_trans != NULL); 1261 1224 1225 + out: 1226 + pch_spi_writereg(data->master, PCH_SSNXCR, SSN_HIGH); 1262 1227 if (data->use_dma) 1263 1228 pch_spi_release_dma(data); 1264 1229 }
+3 -1
drivers/staging/comedi/drivers/ni_labpc.c
··· 241 241 struct comedi_insn *insn, 242 242 unsigned int *data); 243 243 static void labpc_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd); 244 - #ifdef CONFIG_COMEDI_PCI 244 + #ifdef CONFIG_ISA_DMA_API 245 245 static unsigned int labpc_suggest_transfer_size(struct comedi_cmd cmd); 246 + #endif 247 + #ifdef CONFIG_COMEDI_PCI 246 248 static int labpc_find_device(struct comedi_device *dev, int bus, int slot); 247 249 #endif 248 250 static int labpc_dio_mem_callback(int dir, int port, int data,
+1 -1
drivers/staging/zcache/zcache-main.c
··· 1242 1242 int ret = 0; 1243 1243 1244 1244 BUG_ON(!is_ephemeral(pool)); 1245 - zbud_decompress(virt_to_page(data), pampd); 1245 + zbud_decompress((struct page *)(data), pampd); 1246 1246 zbud_free_and_delist((struct zbud_hdr *)pampd); 1247 1247 atomic_dec(&zcache_curr_eph_pampd_count); 1248 1248 return ret;
+1 -1
drivers/target/iscsi/iscsi_target_parameters.c
··· 1430 1430 u8 DataSequenceInOrder = 0; 1431 1431 u8 ErrorRecoveryLevel = 0, SessionType = 0; 1432 1432 u8 IFMarker = 0, OFMarker = 0; 1433 - u8 IFMarkInt_Reject = 0, OFMarkInt_Reject = 0; 1433 + u8 IFMarkInt_Reject = 1, OFMarkInt_Reject = 1; 1434 1434 u32 FirstBurstLength = 0, MaxBurstLength = 0; 1435 1435 struct iscsi_param *param = NULL; 1436 1436
+21 -249
drivers/target/iscsi/iscsi_target_util.c
··· 875 875 } 876 876 877 877 /* 878 - * Used before iscsi_do[rx,tx]_data() to determine iov and [rx,tx]_marker 879 - * array counts needed for sync and steering. 880 - */ 881 - static int iscsit_determine_sync_and_steering_counts( 882 - struct iscsi_conn *conn, 883 - struct iscsi_data_count *count) 884 - { 885 - u32 length = count->data_length; 886 - u32 marker, markint; 887 - 888 - count->sync_and_steering = 1; 889 - 890 - marker = (count->type == ISCSI_RX_DATA) ? 891 - conn->of_marker : conn->if_marker; 892 - markint = (count->type == ISCSI_RX_DATA) ? 893 - (conn->conn_ops->OFMarkInt * 4) : 894 - (conn->conn_ops->IFMarkInt * 4); 895 - count->ss_iov_count = count->iov_count; 896 - 897 - while (length > 0) { 898 - if (length >= marker) { 899 - count->ss_iov_count += 3; 900 - count->ss_marker_count += 2; 901 - 902 - length -= marker; 903 - marker = markint; 904 - } else 905 - length = 0; 906 - } 907 - 908 - return 0; 909 - } 910 - 911 - /* 912 878 * Setup conn->if_marker and conn->of_marker values based upon 913 879 * the initial marker-less interval. (see iSCSI v19 A.2) 914 880 */ ··· 1256 1290 struct kvec iov; 1257 1291 u32 tx_hdr_size, data_len; 1258 1292 u32 offset = cmd->first_data_sg_off; 1259 - int tx_sent; 1293 + int tx_sent, iov_off; 1260 1294 1261 1295 send_hdr: 1262 1296 tx_hdr_size = ISCSI_HDR_LEN; ··· 1276 1310 } 1277 1311 1278 1312 data_len = cmd->tx_size - tx_hdr_size - cmd->padding; 1279 - if (conn->conn_ops->DataDigest) 1313 + /* 1314 + * Set iov_off used by padding and data digest tx_data() calls below 1315 + * in order to determine proper offset into cmd->iov_data[] 1316 + */ 1317 + if (conn->conn_ops->DataDigest) { 1280 1318 data_len -= ISCSI_CRC_LEN; 1281 - 1319 + if (cmd->padding) 1320 + iov_off = (cmd->iov_data_count - 2); 1321 + else 1322 + iov_off = (cmd->iov_data_count - 1); 1323 + } else { 1324 + iov_off = (cmd->iov_data_count - 1); 1325 + } 1282 1326 /* 1283 1327 * Perform sendpage() for each page in the scatterlist 1284 1328 */ ··· 1317 1341 1318 1342 send_padding: 1319 1343 if (cmd->padding) { 1320 - struct kvec *iov_p = 1321 - &cmd->iov_data[cmd->iov_data_count-1]; 1344 + struct kvec *iov_p = &cmd->iov_data[iov_off++]; 1322 1345 1323 1346 tx_sent = tx_data(conn, iov_p, 1, cmd->padding); 1324 1347 if (cmd->padding != tx_sent) { ··· 1331 1356 1332 1357 send_datacrc: 1333 1358 if (conn->conn_ops->DataDigest) { 1334 - struct kvec *iov_d = 1335 - &cmd->iov_data[cmd->iov_data_count]; 1359 + struct kvec *iov_d = &cmd->iov_data[iov_off]; 1336 1360 1337 1361 tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN); 1338 1362 if (ISCSI_CRC_LEN != tx_sent) { ··· 1405 1431 struct iscsi_data_count *count) 1406 1432 { 1407 1433 int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len; 1408 - u32 rx_marker_val[count->ss_marker_count], rx_marker_iov = 0; 1409 - struct kvec iov[count->ss_iov_count], *iov_p; 1434 + struct kvec *iov_p; 1410 1435 struct msghdr msg; 1411 1436 1412 1437 if (!conn || !conn->sock || !conn->conn_ops) ··· 1413 1440 1414 1441 memset(&msg, 0, sizeof(struct msghdr)); 1415 1442 1416 - if (count->sync_and_steering) { 1417 - int size = 0; 1418 - u32 i, orig_iov_count = 0; 1419 - u32 orig_iov_len = 0, orig_iov_loc = 0; 1420 - u32 iov_count = 0, per_iov_bytes = 0; 1421 - u32 *rx_marker, old_rx_marker = 0; 1422 - struct kvec *iov_record; 1423 - 1424 - memset(&rx_marker_val, 0, 1425 - count->ss_marker_count * sizeof(u32)); 1426 - memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec)); 1427 - 1428 - iov_record = count->iov; 1429 - orig_iov_count = count->iov_count; 1430 - rx_marker = &conn->of_marker; 1431 - 1432 - i = 0; 1433 - size = data; 1434 - orig_iov_len = iov_record[orig_iov_loc].iov_len; 1435 - while (size > 0) { 1436 - pr_debug("rx_data: #1 orig_iov_len %u," 1437 - " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc); 1438 - pr_debug("rx_data: #2 rx_marker %u, size" 1439 - " %u\n", *rx_marker, size); 1440 - 1441 - if (orig_iov_len >= *rx_marker) { 1442 - iov[iov_count].iov_len = *rx_marker; 1443 - iov[iov_count++].iov_base = 1444 - (iov_record[orig_iov_loc].iov_base + 1445 - per_iov_bytes); 1446 - 1447 - iov[iov_count].iov_len = (MARKER_SIZE / 2); 1448 - iov[iov_count++].iov_base = 1449 - &rx_marker_val[rx_marker_iov++]; 1450 - iov[iov_count].iov_len = (MARKER_SIZE / 2); 1451 - iov[iov_count++].iov_base = 1452 - &rx_marker_val[rx_marker_iov++]; 1453 - old_rx_marker = *rx_marker; 1454 - 1455 - /* 1456 - * OFMarkInt is in 32-bit words. 1457 - */ 1458 - *rx_marker = (conn->conn_ops->OFMarkInt * 4); 1459 - size -= old_rx_marker; 1460 - orig_iov_len -= old_rx_marker; 1461 - per_iov_bytes += old_rx_marker; 1462 - 1463 - pr_debug("rx_data: #3 new_rx_marker" 1464 - " %u, size %u\n", *rx_marker, size); 1465 - } else { 1466 - iov[iov_count].iov_len = orig_iov_len; 1467 - iov[iov_count++].iov_base = 1468 - (iov_record[orig_iov_loc].iov_base + 1469 - per_iov_bytes); 1470 - 1471 - per_iov_bytes = 0; 1472 - *rx_marker -= orig_iov_len; 1473 - size -= orig_iov_len; 1474 - 1475 - if (size) 1476 - orig_iov_len = 1477 - iov_record[++orig_iov_loc].iov_len; 1478 - 1479 - pr_debug("rx_data: #4 new_rx_marker" 1480 - " %u, size %u\n", *rx_marker, size); 1481 - } 1482 - } 1483 - data += (rx_marker_iov * (MARKER_SIZE / 2)); 1484 - 1485 - iov_p = &iov[0]; 1486 - iov_len = iov_count; 1487 - 1488 - if (iov_count > count->ss_iov_count) { 1489 - pr_err("iov_count: %d, count->ss_iov_count:" 1490 - " %d\n", iov_count, count->ss_iov_count); 1491 - return -1; 1492 - } 1493 - if (rx_marker_iov > count->ss_marker_count) { 1494 - pr_err("rx_marker_iov: %d, count->ss_marker" 1495 - "_count: %d\n", rx_marker_iov, 1496 - count->ss_marker_count); 1497 - return -1; 1498 - } 1499 - } else { 1500 - iov_p = count->iov; 1501 - iov_len = count->iov_count; 1502 - } 1443 + iov_p = count->iov; 1444 + iov_len = count->iov_count; 1503 1445 1504 1446 while (total_rx < data) { 1505 1447 rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len, ··· 1429 1541 rx_loop, total_rx, data); 1430 1542 } 1431 1543 1432 - if (count->sync_and_steering) { 1433 - int j; 1434 - for (j = 0; j < rx_marker_iov; j++) { 1435 - pr_debug("rx_data: #5 j: %d, offset: %d\n", 1436 - j, rx_marker_val[j]); 1437 - conn->of_marker_offset = rx_marker_val[j]; 1438 - } 1439 - total_rx -= (rx_marker_iov * (MARKER_SIZE / 2)); 1440 - } 1441 - 1442 1544 return total_rx; 1443 1545 } 1444 1546 ··· 1437 1559 struct iscsi_data_count *count) 1438 1560 { 1439 1561 int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len; 1440 - u32 tx_marker_val[count->ss_marker_count], tx_marker_iov = 0; 1441 - struct kvec iov[count->ss_iov_count], *iov_p; 1562 + struct kvec *iov_p; 1442 1563 struct msghdr msg; 1443 1564 1444 1565 if (!conn || !conn->sock || !conn->conn_ops) ··· 1450 1573 1451 1574 memset(&msg, 0, sizeof(struct msghdr)); 1452 1575 1453 - if (count->sync_and_steering) { 1454 - int size = 0; 1455 - u32 i, orig_iov_count = 0; 1456 - u32 orig_iov_len = 0, orig_iov_loc = 0; 1457 - u32 iov_count = 0, per_iov_bytes = 0; 1458 - u32 *tx_marker, old_tx_marker = 0; 1459 - struct kvec *iov_record; 1460 - 1461 - memset(&tx_marker_val, 0, 1462 - count->ss_marker_count * sizeof(u32)); 1463 - memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec)); 1464 - 1465 - iov_record = count->iov; 1466 - orig_iov_count = count->iov_count; 1467 - tx_marker = &conn->if_marker; 1468 - 1469 - i = 0; 1470 - size = data; 1471 - orig_iov_len = iov_record[orig_iov_loc].iov_len; 1472 - while (size > 0) { 1473 - pr_debug("tx_data: #1 orig_iov_len %u," 1474 - " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc); 1475 - pr_debug("tx_data: #2 tx_marker %u, size" 1476 - " %u\n", *tx_marker, size); 1477 - 1478 - if (orig_iov_len >= *tx_marker) { 1479 - iov[iov_count].iov_len = *tx_marker; 1480 - iov[iov_count++].iov_base = 1481 - (iov_record[orig_iov_loc].iov_base + 1482 - per_iov_bytes); 1483 - 1484 - tx_marker_val[tx_marker_iov] = 1485 - (size - *tx_marker); 1486 - iov[iov_count].iov_len = (MARKER_SIZE / 2); 1487 - iov[iov_count++].iov_base = 1488 - &tx_marker_val[tx_marker_iov++]; 1489 - iov[iov_count].iov_len = (MARKER_SIZE / 2); 1490 - iov[iov_count++].iov_base = 1491 - &tx_marker_val[tx_marker_iov++]; 1492 - old_tx_marker = *tx_marker; 1493 - 1494 - /* 1495 - * IFMarkInt is in 32-bit words. 1496 - */ 1497 - *tx_marker = (conn->conn_ops->IFMarkInt * 4); 1498 - size -= old_tx_marker; 1499 - orig_iov_len -= old_tx_marker; 1500 - per_iov_bytes += old_tx_marker; 1501 - 1502 - pr_debug("tx_data: #3 new_tx_marker" 1503 - " %u, size %u\n", *tx_marker, size); 1504 - pr_debug("tx_data: #4 offset %u\n", 1505 - tx_marker_val[tx_marker_iov-1]); 1506 - } else { 1507 - iov[iov_count].iov_len = orig_iov_len; 1508 - iov[iov_count++].iov_base 1509 - = (iov_record[orig_iov_loc].iov_base + 1510 - per_iov_bytes); 1511 - 1512 - per_iov_bytes = 0; 1513 - *tx_marker -= orig_iov_len; 1514 - size -= orig_iov_len; 1515 - 1516 - if (size) 1517 - orig_iov_len = 1518 - iov_record[++orig_iov_loc].iov_len; 1519 - 1520 - pr_debug("tx_data: #5 new_tx_marker" 1521 - " %u, size %u\n", *tx_marker, size); 1522 - } 1523 - } 1524 - 1525 - data += (tx_marker_iov * (MARKER_SIZE / 2)); 1526 - 1527 - iov_p = &iov[0]; 1528 - iov_len = iov_count; 1529 - 1530 - if (iov_count > count->ss_iov_count) { 1531 - pr_err("iov_count: %d, count->ss_iov_count:" 1532 - " %d\n", iov_count, count->ss_iov_count); 1533 - return -1; 1534 - } 1535 - if (tx_marker_iov > count->ss_marker_count) { 1536 - pr_err("tx_marker_iov: %d, count->ss_marker" 1537 - "_count: %d\n", tx_marker_iov, 1538 - count->ss_marker_count); 1539 - return -1; 1540 - } 1541 - } else { 1542 - iov_p = count->iov; 1543 - iov_len = count->iov_count; 1544 - } 1576 + iov_p = count->iov; 1577 + iov_len = count->iov_count; 1545 1578 1546 1579 while (total_tx < data) { 1547 1580 tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len, ··· 1465 1678 pr_debug("tx_loop: %d, total_tx: %d, data: %d\n", 1466 1679 tx_loop, total_tx, data); 1467 1680 } 1468 - 1469 - if (count->sync_and_steering) 1470 - total_tx -= (tx_marker_iov * (MARKER_SIZE / 2)); 1471 1681 1472 1682 return total_tx; 1473 1683 } ··· 1486 1702 c.data_length = data; 1487 1703 c.type = ISCSI_RX_DATA; 1488 1704 1489 - if (conn->conn_ops->OFMarker && 1490 - (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) { 1491 - if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0) 1492 - return -1; 1493 - } 1494 - 1495 1705 return iscsit_do_rx_data(conn, &c); 1496 1706 } 1497 1707 ··· 1505 1727 c.iov_count = iov_count; 1506 1728 c.data_length = data; 1507 1729 c.type = ISCSI_TX_DATA; 1508 - 1509 - if (conn->conn_ops->IFMarker && 1510 - (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) { 1511 - if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0) 1512 - return -1; 1513 - } 1514 1730 1515 1731 return iscsit_do_tx_data(conn, &c); 1516 1732 }
+33 -2
drivers/target/target_core_cdb.c
··· 24 24 */ 25 25 26 26 #include <linux/kernel.h> 27 + #include <linux/ctype.h> 27 28 #include <asm/unaligned.h> 28 29 #include <scsi/scsi.h> 29 30 ··· 155 154 return 0; 156 155 } 157 156 157 + static void 158 + target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf_off) 159 + { 160 + unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0]; 161 + unsigned char *buf = buf_off; 162 + int cnt = 0, next = 1; 163 + /* 164 + * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on 165 + * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field 166 + * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION 167 + * to complete the payload. These are based from VPD=0x80 PRODUCT SERIAL 168 + * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure 169 + * per device uniqeness. 170 + */ 171 + while (*p != '\0') { 172 + if (cnt >= 13) 173 + break; 174 + if (!isxdigit(*p)) { 175 + p++; 176 + continue; 177 + } 178 + if (next != 0) { 179 + buf[cnt++] |= hex_to_bin(*p++); 180 + next = 0; 181 + } else { 182 + buf[cnt] = hex_to_bin(*p++) << 4; 183 + next = 1; 184 + } 185 + } 186 + } 187 + 158 188 /* 159 189 * Device identification VPD, for a complete list of 160 190 * DESIGNATOR TYPEs see spc4r17 Table 459. ··· 251 219 * VENDOR_SPECIFIC_IDENTIFIER and 252 220 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION 253 221 */ 254 - buf[off++] |= hex_to_bin(dev->se_sub_dev->t10_wwn.unit_serial[0]); 255 - hex2bin(&buf[off], &dev->se_sub_dev->t10_wwn.unit_serial[1], 12); 222 + target_parse_naa_6h_vendor_specific(dev, &buf[off]); 256 223 257 224 len = 20; 258 225 off = (len + 4);
+4 -5
drivers/target/target_core_transport.c
··· 977 977 { 978 978 struct se_device *dev = container_of(work, struct se_device, 979 979 qf_work_queue); 980 + LIST_HEAD(qf_cmd_list); 980 981 struct se_cmd *cmd, *cmd_tmp; 981 982 982 983 spin_lock_irq(&dev->qf_cmd_lock); 983 - list_for_each_entry_safe(cmd, cmd_tmp, &dev->qf_cmd_list, se_qf_node) { 984 + list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); 985 + spin_unlock_irq(&dev->qf_cmd_lock); 984 986 987 + list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 985 988 list_del(&cmd->se_qf_node); 986 989 atomic_dec(&dev->dev_qf_count); 987 990 smp_mb__after_atomic_dec(); 988 - spin_unlock_irq(&dev->qf_cmd_lock); 989 991 990 992 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 991 993 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, ··· 999 997 * has been added to head of queue 1000 998 */ 1001 999 transport_add_cmd_to_queue(cmd, cmd->t_state); 1002 - 1003 - spin_lock_irq(&dev->qf_cmd_lock); 1004 1000 } 1005 - spin_unlock_irq(&dev->qf_cmd_lock); 1006 1001 } 1007 1002 1008 1003 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
+2 -10
drivers/target/tcm_fc/tcm_fc.h
··· 98 98 struct list_head list; /* linkage in ft_lport_acl tpg_list */ 99 99 struct list_head lun_list; /* head of LUNs */ 100 100 struct se_portal_group se_tpg; 101 - struct task_struct *thread; /* processing thread */ 102 - struct se_queue_obj qobj; /* queue for processing thread */ 101 + struct workqueue_struct *workqueue; 103 102 }; 104 103 105 104 struct ft_lport_acl { ··· 109 110 struct se_wwn fc_lport_wwn; 110 111 }; 111 112 112 - enum ft_cmd_state { 113 - FC_CMD_ST_NEW = 0, 114 - FC_CMD_ST_REJ 115 - }; 116 - 117 113 /* 118 114 * Commands 119 115 */ 120 116 struct ft_cmd { 121 - enum ft_cmd_state state; 122 117 u32 lun; /* LUN from request */ 123 118 struct ft_sess *sess; /* session held for cmd */ 124 119 struct fc_seq *seq; /* sequence in exchange mgr */ ··· 120 127 struct fc_frame *req_frame; 121 128 unsigned char *cdb; /* pointer to CDB inside frame */ 122 129 u32 write_data_len; /* data received on writes */ 123 - struct se_queue_req se_req; 130 + struct work_struct work; 124 131 /* Local sense buffer */ 125 132 unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER]; 126 133 u32 was_ddp_setup:1; /* Set only if ddp is setup */ ··· 170 177 /* 171 178 * other internal functions. 172 179 */ 173 - int ft_thread(void *); 174 180 void ft_recv_req(struct ft_sess *, struct fc_frame *); 175 181 struct ft_tpg *ft_lport_find_tpg(struct fc_lport *); 176 182 struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *);
+11 -79
drivers/target/tcm_fc/tfc_cmd.c
··· 62 62 int count; 63 63 64 64 se_cmd = &cmd->se_cmd; 65 - pr_debug("%s: cmd %p state %d sess %p seq %p se_cmd %p\n", 66 - caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd); 65 + pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n", 66 + caller, cmd, cmd->sess, cmd->seq, se_cmd); 67 67 pr_debug("%s: cmd %p cdb %p\n", 68 68 caller, cmd, cmd->cdb); 69 69 pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun); ··· 88 88 } 89 89 print_hex_dump(KERN_INFO, "ft_dump_cmd ", DUMP_PREFIX_NONE, 90 90 16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0); 91 - } 92 - 93 - static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd) 94 - { 95 - struct ft_tpg *tpg = sess->tport->tpg; 96 - struct se_queue_obj *qobj = &tpg->qobj; 97 - unsigned long flags; 98 - 99 - qobj = &sess->tport->tpg->qobj; 100 - spin_lock_irqsave(&qobj->cmd_queue_lock, flags); 101 - list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list); 102 - atomic_inc(&qobj->queue_cnt); 103 - spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 104 - 105 - wake_up_process(tpg->thread); 106 - } 107 - 108 - static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj) 109 - { 110 - unsigned long flags; 111 - struct se_queue_req *qr; 112 - 113 - spin_lock_irqsave(&qobj->cmd_queue_lock, flags); 114 - if (list_empty(&qobj->qobj_list)) { 115 - spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 116 - return NULL; 117 - } 118 - qr = list_first_entry(&qobj->qobj_list, struct se_queue_req, qr_list); 119 - list_del(&qr->qr_list); 120 - atomic_dec(&qobj->queue_cnt); 121 - spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 122 - return container_of(qr, struct ft_cmd, se_req); 123 91 } 124 92 125 93 static void ft_free_cmd(struct ft_cmd *cmd) ··· 250 282 251 283 int ft_get_cmd_state(struct se_cmd *se_cmd) 252 284 { 253 - struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); 254 - 255 - return cmd->state; 285 + return 0; 256 286 } 257 287 258 288 int ft_is_state_remove(struct se_cmd *se_cmd) ··· 471 505 return 0; 472 506 } 473 507 508 + static void ft_send_work(struct work_struct *work); 509 + 474 510 /* 475 511 * Handle incoming FCP command. 476 512 */ ··· 491 523 goto busy; 492 524 } 493 525 cmd->req_frame = fp; /* hold frame during cmd */ 494 - ft_queue_cmd(sess, cmd); 526 + 527 + INIT_WORK(&cmd->work, ft_send_work); 528 + queue_work(sess->tport->tpg->workqueue, &cmd->work); 495 529 return; 496 530 497 531 busy: ··· 533 563 /* 534 564 * Send new command to target. 535 565 */ 536 - static void ft_send_cmd(struct ft_cmd *cmd) 566 + static void ft_send_work(struct work_struct *work) 537 567 { 568 + struct ft_cmd *cmd = container_of(work, struct ft_cmd, work); 538 569 struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame); 539 570 struct se_cmd *se_cmd; 540 571 struct fcp_cmnd *fcp; 541 - int data_dir; 572 + int data_dir = 0; 542 573 u32 data_len; 543 574 int task_attr; 544 575 int ret; ··· 645 674 646 675 err: 647 676 ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID); 648 - } 649 - 650 - /* 651 - * Handle request in the command thread. 652 - */ 653 - static void ft_exec_req(struct ft_cmd *cmd) 654 - { 655 - pr_debug("cmd state %x\n", cmd->state); 656 - switch (cmd->state) { 657 - case FC_CMD_ST_NEW: 658 - ft_send_cmd(cmd); 659 - break; 660 - default: 661 - break; 662 - } 663 - } 664 - 665 - /* 666 - * Processing thread. 667 - * Currently one thread per tpg. 668 - */ 669 - int ft_thread(void *arg) 670 - { 671 - struct ft_tpg *tpg = arg; 672 - struct se_queue_obj *qobj = &tpg->qobj; 673 - struct ft_cmd *cmd; 674 - 675 - while (!kthread_should_stop()) { 676 - schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT); 677 - if (kthread_should_stop()) 678 - goto out; 679 - 680 - cmd = ft_dequeue_cmd(qobj); 681 - if (cmd) 682 - ft_exec_req(cmd); 683 - } 684 - 685 - out: 686 - return 0; 687 677 }
+3 -4
drivers/target/tcm_fc/tfc_conf.c
··· 327 327 tpg->index = index; 328 328 tpg->lport_acl = lacl; 329 329 INIT_LIST_HEAD(&tpg->lun_list); 330 - transport_init_queue_obj(&tpg->qobj); 331 330 332 331 ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg, 333 332 tpg, TRANSPORT_TPG_TYPE_NORMAL); ··· 335 336 return NULL; 336 337 } 337 338 338 - tpg->thread = kthread_run(ft_thread, tpg, "ft_tpg%lu", index); 339 - if (IS_ERR(tpg->thread)) { 339 + tpg->workqueue = alloc_workqueue("tcm_fc", 0, 1); 340 + if (!tpg->workqueue) { 340 341 kfree(tpg); 341 342 return NULL; 342 343 } ··· 355 356 pr_debug("del tpg %s\n", 356 357 config_item_name(&tpg->se_tpg.tpg_group.cg_item)); 357 358 358 - kthread_stop(tpg->thread); 359 + destroy_workqueue(tpg->workqueue); 359 360 360 361 /* Wait for sessions to be freed thru RCU, for BUG_ON below */ 361 362 synchronize_rcu();
+30 -32
drivers/target/tcm_fc/tfc_io.c
··· 219 219 if (cmd->was_ddp_setup) { 220 220 BUG_ON(!ep); 221 221 BUG_ON(!lport); 222 - } 223 - 224 - /* 225 - * Doesn't expect payload if DDP is setup. Payload 226 - * is expected to be copied directly to user buffers 227 - * due to DDP (Large Rx offload), 228 - */ 229 - buf = fc_frame_payload_get(fp, 1); 230 - if (buf) 231 - pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, " 222 + /* 223 + * Since DDP (Large Rx offload) was setup for this request, 224 + * payload is expected to be copied directly to user buffers. 225 + */ 226 + buf = fc_frame_payload_get(fp, 1); 227 + if (buf) 228 + pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, " 232 229 "cmd->sg_cnt 0x%x. DDP was setup" 233 230 " hence not expected to receive frame with " 234 - "payload, Frame will be dropped if " 235 - "'Sequence Initiative' bit in f_ctl is " 231 + "payload, Frame will be dropped if" 232 + "'Sequence Initiative' bit in f_ctl is" 236 233 "not set\n", __func__, ep->xid, f_ctl, 237 234 cmd->sg, cmd->sg_cnt); 238 - /* 239 - * Invalidate HW DDP context if it was setup for respective 240 - * command. Invalidation of HW DDP context is requited in both 241 - * situation (success and error). 242 - */ 243 - ft_invl_hw_context(cmd); 235 + /* 236 + * Invalidate HW DDP context if it was setup for respective 237 + * command. Invalidation of HW DDP context is requited in both 238 + * situation (success and error). 239 + */ 240 + ft_invl_hw_context(cmd); 244 241 245 - /* 246 - * If "Sequence Initiative (TSI)" bit set in f_ctl, means last 247 - * write data frame is received successfully where payload is 248 - * posted directly to user buffer and only the last frame's 249 - * header is posted in receive queue. 250 - * 251 - * If "Sequence Initiative (TSI)" bit is not set, means error 252 - * condition w.r.t. DDP, hence drop the packet and let explict 253 - * ABORTS from other end of exchange timer trigger the recovery. 254 - */ 255 - if (f_ctl & FC_FC_SEQ_INIT) 256 - goto last_frame; 257 - else 258 - goto drop; 242 + /* 243 + * If "Sequence Initiative (TSI)" bit set in f_ctl, means last 244 + * write data frame is received successfully where payload is 245 + * posted directly to user buffer and only the last frame's 246 + * header is posted in receive queue. 247 + * 248 + * If "Sequence Initiative (TSI)" bit is not set, means error 249 + * condition w.r.t. DDP, hence drop the packet and let explict 250 + * ABORTS from other end of exchange timer trigger the recovery. 251 + */ 252 + if (f_ctl & FC_FC_SEQ_INIT) 253 + goto last_frame; 254 + else 255 + goto drop; 256 + } 259 257 260 258 rel_off = ntohl(fh->fh_parm_offset); 261 259 frame_len = fr_len(fp);
+2 -2
drivers/tty/serial/crisv10.c
··· 4450 4450 4451 4451 #if defined(CONFIG_ETRAX_RS485) 4452 4452 #if defined(CONFIG_ETRAX_RS485_ON_PA) 4453 - if (cris_io_interface_allocate_pins(if_ser0, 'a', rs485_pa_bit, 4453 + if (cris_io_interface_allocate_pins(if_serial_0, 'a', rs485_pa_bit, 4454 4454 rs485_pa_bit)) { 4455 4455 printk(KERN_CRIT "ETRAX100LX serial: Could not allocate " 4456 4456 "RS485 pin\n"); ··· 4459 4459 } 4460 4460 #endif 4461 4461 #if defined(CONFIG_ETRAX_RS485_ON_PORT_G) 4462 - if (cris_io_interface_allocate_pins(if_ser0, 'g', rs485_pa_bit, 4462 + if (cris_io_interface_allocate_pins(if_serial_0, 'g', rs485_pa_bit, 4463 4463 rs485_port_g_bit)) { 4464 4464 printk(KERN_CRIT "ETRAX100LX serial: Could not allocate " 4465 4465 "RS485 pin\n");
+1 -1
drivers/usb/host/xhci-hub.c
··· 761 761 memset(buf, 0, retval); 762 762 status = 0; 763 763 764 - mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC; 764 + mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC; 765 765 766 766 spin_lock_irqsave(&xhci->lock, flags); 767 767 /* For each port, did anything change? If so, set that bit in buf. */
+19
drivers/usb/host/xhci-ring.c
··· 1934 1934 int status = -EINPROGRESS; 1935 1935 struct urb_priv *urb_priv; 1936 1936 struct xhci_ep_ctx *ep_ctx; 1937 + struct list_head *tmp; 1937 1938 u32 trb_comp_code; 1938 1939 int ret = 0; 1940 + int td_num = 0; 1939 1941 1940 1942 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 1941 1943 xdev = xhci->devs[slot_id]; ··· 1957 1955 xhci_err(xhci, "ERROR Transfer event for disabled endpoint " 1958 1956 "or incorrect stream ring\n"); 1959 1957 return -ENODEV; 1958 + } 1959 + 1960 + /* Count current td numbers if ep->skip is set */ 1961 + if (ep->skip) { 1962 + list_for_each(tmp, &ep_ring->td_list) 1963 + td_num++; 1960 1964 } 1961 1965 1962 1966 event_dma = le64_to_cpu(event->buffer); ··· 2076 2068 goto cleanup; 2077 2069 } 2078 2070 2071 + /* We've skipped all the TDs on the ep ring when ep->skip set */ 2072 + if (ep->skip && td_num == 0) { 2073 + ep->skip = false; 2074 + xhci_dbg(xhci, "All tds on the ep_ring skipped. " 2075 + "Clear skip flag.\n"); 2076 + ret = 0; 2077 + goto cleanup; 2078 + } 2079 + 2079 2080 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); 2081 + if (ep->skip) 2082 + td_num--; 2080 2083 2081 2084 /* Is this a TRB in the currently executing TD? */ 2082 2085 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
+7 -6
drivers/watchdog/hpwdt.c
··· 494 494 asminline_call(&cmn_regs, cru_rom_addr); 495 495 die_nmi_called = 1; 496 496 spin_unlock_irqrestore(&rom_lock, rom_pl); 497 - if (!is_icru) { 498 - if (cmn_regs.u1.ral == 0) { 499 - printk(KERN_WARNING "hpwdt: An NMI occurred, " 500 - "but unable to determine source.\n"); 501 - } 502 - } 503 497 504 498 if (allow_kdump) 505 499 hpwdt_stop(); 500 + 501 + if (!is_icru) { 502 + if (cmn_regs.u1.ral == 0) { 503 + panic("An NMI occurred, " 504 + "but unable to determine source.\n"); 505 + } 506 + } 506 507 panic("An NMI occurred, please see the Integrated " 507 508 "Management Log for details.\n"); 508 509
+4 -4
drivers/watchdog/lantiq_wdt.c
··· 51 51 static void 52 52 ltq_wdt_enable(void) 53 53 { 54 - ltq_wdt_timeout = ltq_wdt_timeout * 54 + unsigned long int timeout = ltq_wdt_timeout * 55 55 (ltq_io_region_clk_rate / LTQ_WDT_DIVIDER) + 0x1000; 56 - if (ltq_wdt_timeout > LTQ_MAX_TIMEOUT) 57 - ltq_wdt_timeout = LTQ_MAX_TIMEOUT; 56 + if (timeout > LTQ_MAX_TIMEOUT) 57 + timeout = LTQ_MAX_TIMEOUT; 58 58 59 59 /* write the first password magic */ 60 60 ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR); 61 61 /* write the second magic plus the configuration and new timeout */ 62 62 ltq_w32(LTQ_WDT_SR_EN | LTQ_WDT_SR_PWD | LTQ_WDT_SR_CLKDIV | 63 - LTQ_WDT_PW2 | ltq_wdt_timeout, ltq_wdt_membase + LTQ_WDT_CR); 63 + LTQ_WDT_PW2 | timeout, ltq_wdt_membase + LTQ_WDT_CR); 64 64 } 65 65 66 66 static void
+1 -1
drivers/watchdog/sbc_epx_c3.c
··· 173 173 .notifier_call = epx_c3_notify_sys, 174 174 }; 175 175 176 - static const char banner[] __initdata = KERN_INFO PFX 176 + static const char banner[] __initconst = KERN_INFO PFX 177 177 "Hardware Watchdog Timer for Winsystems EPX-C3 SBC: 0.1\n"; 178 178 179 179 static int __init watchdog_init(void)
+7 -7
drivers/watchdog/watchdog_dev.c
··· 59 59 60 60 static int watchdog_ping(struct watchdog_device *wddev) 61 61 { 62 - if (test_bit(WDOG_ACTIVE, &wdd->status)) { 62 + if (test_bit(WDOG_ACTIVE, &wddev->status)) { 63 63 if (wddev->ops->ping) 64 64 return wddev->ops->ping(wddev); /* ping the watchdog */ 65 65 else ··· 81 81 { 82 82 int err; 83 83 84 - if (!test_bit(WDOG_ACTIVE, &wdd->status)) { 84 + if (!test_bit(WDOG_ACTIVE, &wddev->status)) { 85 85 err = wddev->ops->start(wddev); 86 86 if (err < 0) 87 87 return err; 88 88 89 - set_bit(WDOG_ACTIVE, &wdd->status); 89 + set_bit(WDOG_ACTIVE, &wddev->status); 90 90 } 91 91 return 0; 92 92 } ··· 105 105 { 106 106 int err = -EBUSY; 107 107 108 - if (test_bit(WDOG_NO_WAY_OUT, &wdd->status)) { 108 + if (test_bit(WDOG_NO_WAY_OUT, &wddev->status)) { 109 109 pr_info("%s: nowayout prevents watchdog to be stopped!\n", 110 - wdd->info->identity); 110 + wddev->info->identity); 111 111 return err; 112 112 } 113 113 114 - if (test_bit(WDOG_ACTIVE, &wdd->status)) { 114 + if (test_bit(WDOG_ACTIVE, &wddev->status)) { 115 115 err = wddev->ops->stop(wddev); 116 116 if (err < 0) 117 117 return err; 118 118 119 - clear_bit(WDOG_ACTIVE, &wdd->status); 119 + clear_bit(WDOG_ACTIVE, &wddev->status); 120 120 } 121 121 return 0; 122 122 }
+20 -20
drivers/xen/events.c
··· 54 54 * This lock protects updates to the following mapping and reference-count 55 55 * arrays. The lock does not need to be acquired to read the mapping tables. 56 56 */ 57 - static DEFINE_SPINLOCK(irq_mapping_update_lock); 57 + static DEFINE_MUTEX(irq_mapping_update_lock); 58 58 59 59 static LIST_HEAD(xen_irq_list_head); 60 60 ··· 631 631 int irq = -1; 632 632 struct physdev_irq irq_op; 633 633 634 - spin_lock(&irq_mapping_update_lock); 634 + mutex_lock(&irq_mapping_update_lock); 635 635 636 636 irq = find_irq_by_gsi(gsi); 637 637 if (irq != -1) { ··· 684 684 handle_edge_irq, name); 685 685 686 686 out: 687 - spin_unlock(&irq_mapping_update_lock); 687 + mutex_unlock(&irq_mapping_update_lock); 688 688 689 689 return irq; 690 690 } ··· 710 710 { 711 711 int irq, ret; 712 712 713 - spin_lock(&irq_mapping_update_lock); 713 + mutex_lock(&irq_mapping_update_lock); 714 714 715 715 irq = xen_allocate_irq_dynamic(); 716 716 if (irq == -1) ··· 724 724 if (ret < 0) 725 725 goto error_irq; 726 726 out: 727 - spin_unlock(&irq_mapping_update_lock); 727 + mutex_unlock(&irq_mapping_update_lock); 728 728 return irq; 729 729 error_irq: 730 - spin_unlock(&irq_mapping_update_lock); 730 + mutex_unlock(&irq_mapping_update_lock); 731 731 xen_free_irq(irq); 732 732 return -1; 733 733 } ··· 740 740 struct irq_info *info = info_for_irq(irq); 741 741 int rc = -ENOENT; 742 742 743 - spin_lock(&irq_mapping_update_lock); 743 + mutex_lock(&irq_mapping_update_lock); 744 744 745 745 desc = irq_to_desc(irq); 746 746 if (!desc) ··· 766 766 xen_free_irq(irq); 767 767 768 768 out: 769 - spin_unlock(&irq_mapping_update_lock); 769 + mutex_unlock(&irq_mapping_update_lock); 770 770 return rc; 771 771 } 772 772 ··· 776 776 777 777 struct irq_info *info; 778 778 779 - spin_lock(&irq_mapping_update_lock); 779 + mutex_lock(&irq_mapping_update_lock); 780 780 781 781 list_for_each_entry(info, &xen_irq_list_head, list) { 782 782 if (info == NULL || info->type != IRQT_PIRQ) ··· 787 787 } 788 788 irq = -1; 789 789 out: 790 - spin_unlock(&irq_mapping_update_lock); 790 + mutex_unlock(&irq_mapping_update_lock); 791 791 792 792 return irq; 793 793 } ··· 802 802 { 803 803 int irq; 804 804 805 - spin_lock(&irq_mapping_update_lock); 805 + mutex_lock(&irq_mapping_update_lock); 806 806 807 807 irq = evtchn_to_irq[evtchn]; 808 808 ··· 818 818 } 819 819 820 820 out: 821 - spin_unlock(&irq_mapping_update_lock); 821 + mutex_unlock(&irq_mapping_update_lock); 822 822 823 823 return irq; 824 824 } ··· 829 829 struct evtchn_bind_ipi bind_ipi; 830 830 int evtchn, irq; 831 831 832 - spin_lock(&irq_mapping_update_lock); 832 + mutex_lock(&irq_mapping_update_lock); 833 833 834 834 irq = per_cpu(ipi_to_irq, cpu)[ipi]; 835 835 ··· 853 853 } 854 854 855 855 out: 856 - spin_unlock(&irq_mapping_update_lock); 856 + mutex_unlock(&irq_mapping_update_lock); 857 857 return irq; 858 858 } 859 859 ··· 878 878 struct evtchn_bind_virq bind_virq; 879 879 int evtchn, irq; 880 880 881 - spin_lock(&irq_mapping_update_lock); 881 + mutex_lock(&irq_mapping_update_lock); 882 882 883 883 irq = per_cpu(virq_to_irq, cpu)[virq]; 884 884 ··· 903 903 } 904 904 905 905 out: 906 - spin_unlock(&irq_mapping_update_lock); 906 + mutex_unlock(&irq_mapping_update_lock); 907 907 908 908 return irq; 909 909 } ··· 913 913 struct evtchn_close close; 914 914 int evtchn = evtchn_from_irq(irq); 915 915 916 - spin_lock(&irq_mapping_update_lock); 916 + mutex_lock(&irq_mapping_update_lock); 917 917 918 918 if (VALID_EVTCHN(evtchn)) { 919 919 close.port = evtchn; ··· 943 943 944 944 xen_free_irq(irq); 945 945 946 - spin_unlock(&irq_mapping_update_lock); 946 + mutex_unlock(&irq_mapping_update_lock); 947 947 } 948 948 949 949 int bind_evtchn_to_irqhandler(unsigned int evtchn, ··· 1279 1279 will also be masked. */ 1280 1280 disable_irq(irq); 1281 1281 1282 - spin_lock(&irq_mapping_update_lock); 1282 + mutex_lock(&irq_mapping_update_lock); 1283 1283 1284 1284 /* After resume the irq<->evtchn mappings are all cleared out */ 1285 1285 BUG_ON(evtchn_to_irq[evtchn] != -1); ··· 1289 1289 1290 1290 xen_irq_info_evtchn_init(irq, evtchn); 1291 1291 1292 - spin_unlock(&irq_mapping_update_lock); 1292 + mutex_unlock(&irq_mapping_update_lock); 1293 1293 1294 1294 /* new event channels are always bound to cpu 0 */ 1295 1295 irq_set_affinity(irq, cpumask_of(0));
+6 -1
drivers/zorro/zorro.c
··· 148 148 } 149 149 platform_set_drvdata(pdev, bus); 150 150 151 - /* Register all devices */ 152 151 pr_info("Zorro: Probing AutoConfig expansion devices: %u device%s\n", 153 152 zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s"); 154 153 154 + /* First identify all devices ... */ 155 155 for (i = 0; i < zorro_num_autocon; i++) { 156 156 z = &zorro_autocon[i]; 157 157 z->id = (z->rom.er_Manufacturer<<16) | (z->rom.er_Product<<8); ··· 172 172 dev_set_name(&z->dev, "%02x", i); 173 173 z->dev.parent = &bus->dev; 174 174 z->dev.bus = &zorro_bus_type; 175 + } 176 + 177 + /* ... then register them */ 178 + for (i = 0; i < zorro_num_autocon; i++) { 179 + z = &zorro_autocon[i]; 175 180 error = device_register(&z->dev); 176 181 if (error) { 177 182 dev_err(&bus->dev, "Error registering device %s\n",
+23 -10
fs/btrfs/file.c
··· 1036 1036 * on error we return an unlocked page and the error value 1037 1037 * on success we return a locked page and 0 1038 1038 */ 1039 - static int prepare_uptodate_page(struct page *page, u64 pos) 1039 + static int prepare_uptodate_page(struct page *page, u64 pos, 1040 + bool force_uptodate) 1040 1041 { 1041 1042 int ret = 0; 1042 1043 1043 - if ((pos & (PAGE_CACHE_SIZE - 1)) && !PageUptodate(page)) { 1044 + if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) && 1045 + !PageUptodate(page)) { 1044 1046 ret = btrfs_readpage(NULL, page); 1045 1047 if (ret) 1046 1048 return ret; ··· 1063 1061 static noinline int prepare_pages(struct btrfs_root *root, struct file *file, 1064 1062 struct page **pages, size_t num_pages, 1065 1063 loff_t pos, unsigned long first_index, 1066 - size_t write_bytes) 1064 + size_t write_bytes, bool force_uptodate) 1067 1065 { 1068 1066 struct extent_state *cached_state = NULL; 1069 1067 int i; ··· 1088 1086 } 1089 1087 1090 1088 if (i == 0) 1091 - err = prepare_uptodate_page(pages[i], pos); 1089 + err = prepare_uptodate_page(pages[i], pos, 1090 + force_uptodate); 1092 1091 if (i == num_pages - 1) 1093 1092 err = prepare_uptodate_page(pages[i], 1094 - pos + write_bytes); 1093 + pos + write_bytes, false); 1095 1094 if (err) { 1096 1095 page_cache_release(pages[i]); 1097 1096 faili = i - 1; ··· 1161 1158 size_t num_written = 0; 1162 1159 int nrptrs; 1163 1160 int ret = 0; 1161 + bool force_page_uptodate = false; 1164 1162 1165 1163 nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) / 1166 1164 PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / ··· 1204 1200 * contents of pages from loop to loop 1205 1201 */ 1206 1202 ret = prepare_pages(root, file, pages, num_pages, 1207 - pos, first_index, write_bytes); 1203 + pos, first_index, write_bytes, 1204 + force_page_uptodate); 1208 1205 if (ret) { 1209 1206 btrfs_delalloc_release_space(inode, 1210 1207 num_pages << PAGE_CACHE_SHIFT); ··· 1222 1217 if (copied < write_bytes) 1223 1218 nrptrs = 1; 1224 1219 1225 - if (copied == 0) 1220 + if (copied == 0) { 1221 + force_page_uptodate = true; 1226 1222 dirty_pages = 0; 1227 - else 1223 + } else { 1224 + force_page_uptodate = false; 1228 1225 dirty_pages = (copied + offset + 1229 1226 PAGE_CACHE_SIZE - 1) >> 1230 1227 PAGE_CACHE_SHIFT; 1228 + } 1231 1229 1232 1230 /* 1233 1231 * If we had a short copy we need to release the excess delaloc ··· 1825 1817 goto out; 1826 1818 case SEEK_DATA: 1827 1819 case SEEK_HOLE: 1820 + if (offset >= i_size_read(inode)) { 1821 + mutex_unlock(&inode->i_mutex); 1822 + return -ENXIO; 1823 + } 1824 + 1828 1825 ret = find_desired_extent(inode, &offset, origin); 1829 1826 if (ret) { 1830 1827 mutex_unlock(&inode->i_mutex); ··· 1838 1825 } 1839 1826 1840 1827 if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) { 1841 - ret = -EINVAL; 1828 + offset = -EINVAL; 1842 1829 goto out; 1843 1830 } 1844 1831 if (offset > inode->i_sb->s_maxbytes) { 1845 - ret = -EINVAL; 1832 + offset = -EINVAL; 1846 1833 goto out; 1847 1834 } 1848 1835
+14 -4
fs/btrfs/inode.c
··· 4018 4018 memcpy(&location, dentry->d_fsdata, sizeof(struct btrfs_key)); 4019 4019 kfree(dentry->d_fsdata); 4020 4020 dentry->d_fsdata = NULL; 4021 - d_clear_need_lookup(dentry); 4021 + /* This thing is hashed, drop it for now */ 4022 + d_drop(dentry); 4022 4023 } else { 4023 4024 ret = btrfs_inode_by_name(dir, dentry, &location); 4024 4025 } ··· 4086 4085 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, 4087 4086 struct nameidata *nd) 4088 4087 { 4089 - return d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry); 4088 + struct dentry *ret; 4089 + 4090 + ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry); 4091 + if (unlikely(d_need_lookup(dentry))) { 4092 + spin_lock(&dentry->d_lock); 4093 + dentry->d_flags &= ~DCACHE_NEED_LOOKUP; 4094 + spin_unlock(&dentry->d_lock); 4095 + } 4096 + return ret; 4090 4097 } 4091 4098 4092 4099 unsigned char btrfs_filetype_table[] = { ··· 4134 4125 4135 4126 /* special case for "." */ 4136 4127 if (filp->f_pos == 0) { 4137 - over = filldir(dirent, ".", 1, 1, btrfs_ino(inode), DT_DIR); 4128 + over = filldir(dirent, ".", 1, 4129 + filp->f_pos, btrfs_ino(inode), DT_DIR); 4138 4130 if (over) 4139 4131 return 0; 4140 4132 filp->f_pos = 1; ··· 4144 4134 if (filp->f_pos == 1) { 4145 4135 u64 pino = parent_ino(filp->f_path.dentry); 4146 4136 over = filldir(dirent, "..", 2, 4147 - 2, pino, DT_DIR); 4137 + filp->f_pos, pino, DT_DIR); 4148 4138 if (over) 4149 4139 return 0; 4150 4140 filp->f_pos = 2;
+15 -6
fs/btrfs/ioctl.c
··· 2177 2177 if (!(src_file->f_mode & FMODE_READ)) 2178 2178 goto out_fput; 2179 2179 2180 + /* don't make the dst file partly checksummed */ 2181 + if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) != 2182 + (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) 2183 + goto out_fput; 2184 + 2180 2185 ret = -EISDIR; 2181 2186 if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode)) 2182 2187 goto out_fput; ··· 2231 2226 goto out_unlock; 2232 2227 } 2233 2228 2229 + /* truncate page cache pages from target inode range */ 2230 + truncate_inode_pages_range(&inode->i_data, destoff, 2231 + PAGE_CACHE_ALIGN(destoff + len) - 1); 2232 + 2234 2233 /* do any pending delalloc/csum calc on src, one way or 2235 2234 another, and lock file content */ 2236 2235 while (1) { ··· 2250 2241 btrfs_put_ordered_extent(ordered); 2251 2242 btrfs_wait_ordered_range(src, off, len); 2252 2243 } 2253 - 2254 - /* truncate page cache pages from target inode range */ 2255 - truncate_inode_pages_range(&inode->i_data, off, 2256 - ALIGN(off + len, PAGE_CACHE_SIZE) - 1); 2257 2244 2258 2245 /* clone data */ 2259 2246 key.objectid = btrfs_ino(src); ··· 2328 2323 else 2329 2324 new_key.offset = destoff; 2330 2325 2331 - trans = btrfs_start_transaction(root, 1); 2326 + /* 2327 + * 1 - adjusting old extent (we may have to split it) 2328 + * 1 - add new extent 2329 + * 1 - inode update 2330 + */ 2331 + trans = btrfs_start_transaction(root, 3); 2332 2332 if (IS_ERR(trans)) { 2333 2333 ret = PTR_ERR(trans); 2334 2334 goto out; ··· 2452 2442 if (endoff > inode->i_size) 2453 2443 btrfs_i_size_write(inode, endoff); 2454 2444 2455 - BTRFS_I(inode)->flags = BTRFS_I(src)->flags; 2456 2445 ret = btrfs_update_inode(trans, root, inode); 2457 2446 BUG_ON(ret); 2458 2447 btrfs_end_transaction(trans, root);
+11 -43
fs/cifs/cifsencrypt.c
··· 351 351 build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp) 352 352 { 353 353 unsigned int dlen; 354 - unsigned int wlen; 355 - unsigned int size = 6 * sizeof(struct ntlmssp2_name); 356 - __le64 curtime; 354 + unsigned int size = 2 * sizeof(struct ntlmssp2_name); 357 355 char *defdmname = "WORKGROUP"; 358 356 unsigned char *blobptr; 359 357 struct ntlmssp2_name *attrptr; ··· 363 365 } 364 366 365 367 dlen = strlen(ses->domainName); 366 - wlen = strlen(ses->server->hostname); 367 368 368 - /* The length of this blob is a size which is 369 - * six times the size of a structure which holds name/size + 370 - * two times the unicode length of a domain name + 371 - * two times the unicode length of a server name + 372 - * size of a timestamp (which is 8 bytes). 369 + /* 370 + * The length of this blob is two times the size of a 371 + * structure (av pair) which holds name/size 372 + * ( for NTLMSSP_AV_NB_DOMAIN_NAME followed by NTLMSSP_AV_EOL ) + 373 + * unicode length of a netbios domain name 373 374 */ 374 - ses->auth_key.len = size + 2 * (2 * dlen) + 2 * (2 * wlen) + 8; 375 + ses->auth_key.len = size + 2 * dlen; 375 376 ses->auth_key.response = kzalloc(ses->auth_key.len, GFP_KERNEL); 376 377 if (!ses->auth_key.response) { 377 378 ses->auth_key.len = 0; ··· 381 384 blobptr = ses->auth_key.response; 382 385 attrptr = (struct ntlmssp2_name *) blobptr; 383 386 387 + /* 388 + * As defined in MS-NTLM 3.3.2, just this av pair field 389 + * is sufficient as part of the temp 390 + */ 384 391 attrptr->type = cpu_to_le16(NTLMSSP_AV_NB_DOMAIN_NAME); 385 392 attrptr->length = cpu_to_le16(2 * dlen); 386 393 blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name); 387 394 cifs_strtoUCS((__le16 *)blobptr, ses->domainName, dlen, nls_cp); 388 - 389 - blobptr += 2 * dlen; 390 - attrptr = (struct ntlmssp2_name *) blobptr; 391 - 392 - attrptr->type = cpu_to_le16(NTLMSSP_AV_NB_COMPUTER_NAME); 393 - attrptr->length = cpu_to_le16(2 * wlen); 394 - blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name); 395 - cifs_strtoUCS((__le16 *)blobptr, ses->server->hostname, wlen, nls_cp); 396 - 397 - blobptr += 2 * wlen; 398 - attrptr = (struct ntlmssp2_name *) blobptr; 399 - 400 - attrptr->type = cpu_to_le16(NTLMSSP_AV_DNS_DOMAIN_NAME); 401 - attrptr->length = cpu_to_le16(2 * dlen); 402 - blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name); 403 - cifs_strtoUCS((__le16 *)blobptr, ses->domainName, dlen, nls_cp); 404 - 405 - blobptr += 2 * dlen; 406 - attrptr = (struct ntlmssp2_name *) blobptr; 407 - 408 - attrptr->type = cpu_to_le16(NTLMSSP_AV_DNS_COMPUTER_NAME); 409 - attrptr->length = cpu_to_le16(2 * wlen); 410 - blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name); 411 - cifs_strtoUCS((__le16 *)blobptr, ses->server->hostname, wlen, nls_cp); 412 - 413 - blobptr += 2 * wlen; 414 - attrptr = (struct ntlmssp2_name *) blobptr; 415 - 416 - attrptr->type = cpu_to_le16(NTLMSSP_AV_TIMESTAMP); 417 - attrptr->length = cpu_to_le16(sizeof(__le64)); 418 - blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name); 419 - curtime = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME)); 420 - memcpy(blobptr, &curtime, sizeof(__le64)); 421 395 422 396 return 0; 423 397 }
+6 -4
fs/cifs/cifsfs.c
··· 548 548 struct inode *dir = dentry->d_inode; 549 549 struct dentry *child; 550 550 551 + if (!dir) { 552 + dput(dentry); 553 + dentry = ERR_PTR(-ENOENT); 554 + break; 555 + } 556 + 551 557 /* skip separators */ 552 558 while (*s == sep) 553 559 s++; ··· 569 563 mutex_unlock(&dir->i_mutex); 570 564 dput(dentry); 571 565 dentry = child; 572 - if (!dentry->d_inode) { 573 - dput(dentry); 574 - dentry = ERR_PTR(-ENOENT); 575 - } 576 566 } while (!IS_ERR(dentry)); 577 567 _FreeXid(xid); 578 568 kfree(full_path);
+2 -1
fs/cifs/cifssmb.c
··· 4079 4079 T2_FNEXT_RSP_PARMS *parms; 4080 4080 char *response_data; 4081 4081 int rc = 0; 4082 - int bytes_returned, name_len; 4082 + int bytes_returned; 4083 + unsigned int name_len; 4083 4084 __u16 params, byte_count; 4084 4085 4085 4086 cFYI(1, "In FindNext");
+2 -2
fs/cifs/connect.c
··· 1298 1298 /* ignore */ 1299 1299 } else if (strnicmp(data, "guest", 5) == 0) { 1300 1300 /* ignore */ 1301 - } else if (strnicmp(data, "rw", 2) == 0) { 1301 + } else if (strnicmp(data, "rw", 2) == 0 && strlen(data) == 2) { 1302 1302 /* ignore */ 1303 1303 } else if (strnicmp(data, "ro", 2) == 0) { 1304 1304 /* ignore */ ··· 1401 1401 vol->server_ino = 1; 1402 1402 } else if (strnicmp(data, "noserverino", 9) == 0) { 1403 1403 vol->server_ino = 0; 1404 - } else if (strnicmp(data, "rwpidforward", 4) == 0) { 1404 + } else if (strnicmp(data, "rwpidforward", 12) == 0) { 1405 1405 vol->rwpidforward = 1; 1406 1406 } else if (strnicmp(data, "cifsacl", 7) == 0) { 1407 1407 vol->cifs_acl = 1;
+2 -2
fs/ext3/inode.c
··· 1134 1134 return bh; 1135 1135 if (buffer_uptodate(bh)) 1136 1136 return bh; 1137 - ll_rw_block(READ_META, 1, &bh); 1137 + ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh); 1138 1138 wait_on_buffer(bh); 1139 1139 if (buffer_uptodate(bh)) 1140 1140 return bh; ··· 2807 2807 trace_ext3_load_inode(inode); 2808 2808 get_bh(bh); 2809 2809 bh->b_end_io = end_buffer_read_sync; 2810 - submit_bh(READ_META, bh); 2810 + submit_bh(READ | REQ_META | REQ_PRIO, bh); 2811 2811 wait_on_buffer(bh); 2812 2812 if (!buffer_uptodate(bh)) { 2813 2813 ext3_error(inode->i_sb, "ext3_get_inode_loc",
+2 -1
fs/ext3/namei.c
··· 922 922 bh = ext3_getblk(NULL, dir, b++, 0, &err); 923 923 bh_use[ra_max] = bh; 924 924 if (bh) 925 - ll_rw_block(READ_META, 1, &bh); 925 + ll_rw_block(READ | REQ_META | REQ_PRIO, 926 + 1, &bh); 926 927 } 927 928 } 928 929 if ((bh = bh_use[ra_ptr++]) == NULL)
+2 -2
fs/ext4/inode.c
··· 647 647 return bh; 648 648 if (buffer_uptodate(bh)) 649 649 return bh; 650 - ll_rw_block(READ_META, 1, &bh); 650 + ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh); 651 651 wait_on_buffer(bh); 652 652 if (buffer_uptodate(bh)) 653 653 return bh; ··· 3298 3298 trace_ext4_load_inode(inode); 3299 3299 get_bh(bh); 3300 3300 bh->b_end_io = end_buffer_read_sync; 3301 - submit_bh(READ_META, bh); 3301 + submit_bh(READ | REQ_META | REQ_PRIO, bh); 3302 3302 wait_on_buffer(bh); 3303 3303 if (!buffer_uptodate(bh)) { 3304 3304 EXT4_ERROR_INODE_BLOCK(inode, block,
+2 -1
fs/ext4/namei.c
··· 922 922 bh = ext4_getblk(NULL, dir, b++, 0, &err); 923 923 bh_use[ra_max] = bh; 924 924 if (bh) 925 - ll_rw_block(READ_META, 1, &bh); 925 + ll_rw_block(READ | REQ_META | REQ_PRIO, 926 + 1, &bh); 926 927 } 927 928 } 928 929 if ((bh = bh_use[ra_ptr++]) == NULL)
+2 -2
fs/gfs2/log.c
··· 624 624 bh->b_end_io = end_buffer_write_sync; 625 625 get_bh(bh); 626 626 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) 627 - submit_bh(WRITE_SYNC | REQ_META, bh); 627 + submit_bh(WRITE_SYNC | REQ_META | REQ_PRIO, bh); 628 628 else 629 - submit_bh(WRITE_FLUSH_FUA | REQ_META, bh); 629 + submit_bh(WRITE_FLUSH_FUA | REQ_META | REQ_PRIO, bh); 630 630 wait_on_buffer(bh); 631 631 632 632 if (!buffer_uptodate(bh))
+3 -3
fs/gfs2/meta_io.c
··· 37 37 { 38 38 struct buffer_head *bh, *head; 39 39 int nr_underway = 0; 40 - int write_op = REQ_META | 40 + int write_op = REQ_META | REQ_PRIO | 41 41 (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); 42 42 43 43 BUG_ON(!PageLocked(page)); ··· 225 225 } 226 226 bh->b_end_io = end_buffer_read_sync; 227 227 get_bh(bh); 228 - submit_bh(READ_SYNC | REQ_META, bh); 228 + submit_bh(READ_SYNC | REQ_META | REQ_PRIO, bh); 229 229 if (!(flags & DIO_WAIT)) 230 230 return 0; 231 231 ··· 435 435 if (buffer_uptodate(first_bh)) 436 436 goto out; 437 437 if (!buffer_locked(first_bh)) 438 - ll_rw_block(READ_SYNC | REQ_META, 1, &first_bh); 438 + ll_rw_block(READ_SYNC | REQ_META | REQ_PRIO, 1, &first_bh); 439 439 440 440 dblock++; 441 441 extlen--;
+1 -1
fs/gfs2/ops_fstype.c
··· 224 224 225 225 bio->bi_end_io = end_bio_io_page; 226 226 bio->bi_private = page; 227 - submit_bio(READ_SYNC | REQ_META, bio); 227 + submit_bio(READ_SYNC | REQ_META | REQ_PRIO, bio); 228 228 wait_on_page_locked(page); 229 229 bio_put(bio); 230 230 if (!PageUptodate(page)) {
+1 -1
fs/gfs2/quota.c
··· 709 709 set_buffer_uptodate(bh); 710 710 711 711 if (!buffer_uptodate(bh)) { 712 - ll_rw_block(READ_META, 1, &bh); 712 + ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh); 713 713 wait_on_buffer(bh); 714 714 if (!buffer_uptodate(bh)) 715 715 goto unlock_out;
+10 -5
fs/hfsplus/super.c
··· 344 344 struct inode *root, *inode; 345 345 struct qstr str; 346 346 struct nls_table *nls = NULL; 347 + u64 last_fs_block, last_fs_page; 347 348 int err; 348 349 349 350 err = -EINVAL; ··· 400 399 if (!sbi->rsrc_clump_blocks) 401 400 sbi->rsrc_clump_blocks = 1; 402 401 403 - err = generic_check_addressable(sbi->alloc_blksz_shift, 404 - sbi->total_blocks); 405 - if (err) { 402 + err = -EFBIG; 403 + last_fs_block = sbi->total_blocks - 1; 404 + last_fs_page = (last_fs_block << sbi->alloc_blksz_shift) >> 405 + PAGE_CACHE_SHIFT; 406 + 407 + if ((last_fs_block > (sector_t)(~0ULL) >> (sbi->alloc_blksz_shift - 9)) || 408 + (last_fs_page > (pgoff_t)(~0ULL))) { 406 409 printk(KERN_ERR "hfs: filesystem size too large.\n"); 407 410 goto out_free_vhdr; 408 411 } ··· 530 525 out_close_ext_tree: 531 526 hfs_btree_close(sbi->ext_tree); 532 527 out_free_vhdr: 533 - kfree(sbi->s_vhdr); 534 - kfree(sbi->s_backup_vhdr); 528 + kfree(sbi->s_vhdr_buf); 529 + kfree(sbi->s_backup_vhdr_buf); 535 530 out_unload_nls: 536 531 unload_nls(sbi->nls); 537 532 unload_nls(nls);
+2 -2
fs/hfsplus/wrapper.c
··· 272 272 return 0; 273 273 274 274 out_free_backup_vhdr: 275 - kfree(sbi->s_backup_vhdr); 275 + kfree(sbi->s_backup_vhdr_buf); 276 276 out_free_vhdr: 277 - kfree(sbi->s_vhdr); 277 + kfree(sbi->s_vhdr_buf); 278 278 out: 279 279 return error; 280 280 }
+5 -7
fs/namei.c
··· 721 721 if (!path->dentry->d_op || !path->dentry->d_op->d_automount) 722 722 return -EREMOTE; 723 723 724 - /* We don't want to mount if someone supplied AT_NO_AUTOMOUNT 725 - * and this is the terminal part of the path. 726 - */ 727 - if ((flags & LOOKUP_NO_AUTOMOUNT) && !(flags & LOOKUP_PARENT)) 728 - return -EISDIR; /* we actually want to stop here */ 729 - 730 724 /* We don't want to mount if someone's just doing a stat - 731 725 * unless they're stat'ing a directory and appended a '/' to 732 726 * the name. ··· 733 739 * of the daemon to instantiate them before they can be used. 734 740 */ 735 741 if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY | 736 - LOOKUP_OPEN | LOOKUP_CREATE)) && 742 + LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) && 737 743 path->dentry->d_inode) 738 744 return -EISDIR; 739 745 ··· 2610 2616 if (!dir->i_op->rmdir) 2611 2617 return -EPERM; 2612 2618 2619 + dget(dentry); 2613 2620 mutex_lock(&dentry->d_inode->i_mutex); 2614 2621 2615 2622 error = -EBUSY; ··· 2631 2636 2632 2637 out: 2633 2638 mutex_unlock(&dentry->d_inode->i_mutex); 2639 + dput(dentry); 2634 2640 if (!error) 2635 2641 d_delete(dentry); 2636 2642 return error; ··· 3021 3025 if (error) 3022 3026 return error; 3023 3027 3028 + dget(new_dentry); 3024 3029 if (target) 3025 3030 mutex_lock(&target->i_mutex); 3026 3031 ··· 3042 3045 out: 3043 3046 if (target) 3044 3047 mutex_unlock(&target->i_mutex); 3048 + dput(new_dentry); 3045 3049 if (!error) 3046 3050 if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) 3047 3051 d_move(old_dentry,new_dentry);
+1 -1
fs/namespace.c
··· 1757 1757 return err; 1758 1758 if (!old_name || !*old_name) 1759 1759 return -EINVAL; 1760 - err = kern_path(old_name, LOOKUP_FOLLOW, &old_path); 1760 + err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path); 1761 1761 if (err) 1762 1762 return err; 1763 1763
+5 -3
fs/nfs/nfs4_fs.h
··· 56 56 NFS4_SESSION_DRAINING, 57 57 }; 58 58 59 + #define NFS4_RENEW_TIMEOUT 0x01 60 + #define NFS4_RENEW_DELEGATION_CB 0x02 61 + 59 62 struct nfs4_minor_version_ops { 60 63 u32 minor_version; 61 64 ··· 228 225 }; 229 226 230 227 struct nfs4_state_maintenance_ops { 231 - int (*sched_state_renewal)(struct nfs_client *, struct rpc_cred *); 228 + int (*sched_state_renewal)(struct nfs_client *, struct rpc_cred *, unsigned); 232 229 struct rpc_cred * (*get_state_renewal_cred_locked)(struct nfs_client *); 233 230 int (*renew_lease)(struct nfs_client *, struct rpc_cred *); 234 231 }; ··· 240 237 extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *); 241 238 extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *); 242 239 extern int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred); 243 - extern int nfs4_proc_async_renew(struct nfs_client *, struct rpc_cred *); 244 - extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *); 245 240 extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *); 246 241 extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *); 247 242 extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc); ··· 350 349 extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t); 351 350 extern void nfs4_schedule_lease_recovery(struct nfs_client *); 352 351 extern void nfs4_schedule_state_manager(struct nfs_client *); 352 + extern void nfs4_schedule_path_down_recovery(struct nfs_client *clp); 353 353 extern void nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs4_state *); 354 354 extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags); 355 355 extern void nfs41_handle_recall_slot(struct nfs_client *clp);
+14 -6
fs/nfs/nfs4proc.c
··· 3374 3374 3375 3375 if (task->tk_status < 0) { 3376 3376 /* Unless we're shutting down, schedule state recovery! */ 3377 - if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) != 0) 3377 + if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0) 3378 + return; 3379 + if (task->tk_status != NFS4ERR_CB_PATH_DOWN) { 3378 3380 nfs4_schedule_lease_recovery(clp); 3379 - return; 3381 + return; 3382 + } 3383 + nfs4_schedule_path_down_recovery(clp); 3380 3384 } 3381 3385 do_renew_lease(clp, timestamp); 3382 3386 } ··· 3390 3386 .rpc_release = nfs4_renew_release, 3391 3387 }; 3392 3388 3393 - int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred) 3389 + static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) 3394 3390 { 3395 3391 struct rpc_message msg = { 3396 3392 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], ··· 3399 3395 }; 3400 3396 struct nfs4_renewdata *data; 3401 3397 3398 + if (renew_flags == 0) 3399 + return 0; 3402 3400 if (!atomic_inc_not_zero(&clp->cl_count)) 3403 3401 return -EIO; 3404 - data = kmalloc(sizeof(*data), GFP_KERNEL); 3402 + data = kmalloc(sizeof(*data), GFP_NOFS); 3405 3403 if (data == NULL) 3406 3404 return -ENOMEM; 3407 3405 data->client = clp; ··· 3412 3406 &nfs4_renew_ops, data); 3413 3407 } 3414 3408 3415 - int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) 3409 + static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) 3416 3410 { 3417 3411 struct rpc_message msg = { 3418 3412 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], ··· 5510 5504 return rpc_run_task(&task_setup_data); 5511 5505 } 5512 5506 5513 - static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred) 5507 + static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) 5514 5508 { 5515 5509 struct rpc_task *task; 5516 5510 int ret = 0; 5517 5511 5512 + if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) 5513 + return 0; 5518 5514 task = _nfs41_proc_sequence(clp, cred); 5519 5515 if (IS_ERR(task)) 5520 5516 ret = PTR_ERR(task);
+9 -3
fs/nfs/nfs4renewd.c
··· 60 60 struct rpc_cred *cred; 61 61 long lease; 62 62 unsigned long last, now; 63 + unsigned renew_flags = 0; 63 64 64 65 ops = clp->cl_mvops->state_renewal_ops; 65 66 dprintk("%s: start\n", __func__); ··· 73 72 last = clp->cl_last_renewal; 74 73 now = jiffies; 75 74 /* Are we close to a lease timeout? */ 76 - if (time_after(now, last + lease/3)) { 75 + if (time_after(now, last + lease/3)) 76 + renew_flags |= NFS4_RENEW_TIMEOUT; 77 + if (nfs_delegations_present(clp)) 78 + renew_flags |= NFS4_RENEW_DELEGATION_CB; 79 + 80 + if (renew_flags != 0) { 77 81 cred = ops->get_state_renewal_cred_locked(clp); 78 82 spin_unlock(&clp->cl_lock); 79 83 if (cred == NULL) { 80 - if (!nfs_delegations_present(clp)) { 84 + if (!(renew_flags & NFS4_RENEW_DELEGATION_CB)) { 81 85 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); 82 86 goto out; 83 87 } 84 88 nfs_expire_all_delegations(clp); 85 89 } else { 86 90 /* Queue an asynchronous RENEW. */ 87 - ops->sched_state_renewal(clp, cred); 91 + ops->sched_state_renewal(clp, cred, renew_flags); 88 92 put_rpccred(cred); 89 93 goto out_exp; 90 94 }
+6
fs/nfs/nfs4state.c
··· 1038 1038 nfs4_schedule_state_manager(clp); 1039 1039 } 1040 1040 1041 + void nfs4_schedule_path_down_recovery(struct nfs_client *clp) 1042 + { 1043 + nfs_handle_cb_pathdown(clp); 1044 + nfs4_schedule_state_manager(clp); 1045 + } 1046 + 1041 1047 static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state) 1042 1048 { 1043 1049
+21 -4
fs/nfs/super.c
··· 2035 2035 sb->s_blocksize = nfs_block_bits(server->wsize, 2036 2036 &sb->s_blocksize_bits); 2037 2037 2038 - if (server->flags & NFS_MOUNT_NOAC) 2039 - sb->s_flags |= MS_SYNCHRONOUS; 2040 - 2041 2038 sb->s_bdi = &server->backing_dev_info; 2042 2039 2043 2040 nfs_super_set_maxbytes(sb, server->maxfilesize); ··· 2246 2249 if (server->flags & NFS_MOUNT_UNSHARED) 2247 2250 compare_super = NULL; 2248 2251 2252 + /* -o noac implies -o sync */ 2253 + if (server->flags & NFS_MOUNT_NOAC) 2254 + sb_mntdata.mntflags |= MS_SYNCHRONOUS; 2255 + 2249 2256 /* Get a superblock - note that we may end up sharing one that already exists */ 2250 2257 s = sget(fs_type, compare_super, nfs_set_super, &sb_mntdata); 2251 2258 if (IS_ERR(s)) { ··· 2361 2360 2362 2361 if (server->flags & NFS_MOUNT_UNSHARED) 2363 2362 compare_super = NULL; 2363 + 2364 + /* -o noac implies -o sync */ 2365 + if (server->flags & NFS_MOUNT_NOAC) 2366 + sb_mntdata.mntflags |= MS_SYNCHRONOUS; 2364 2367 2365 2368 /* Get a superblock - note that we may end up sharing one that already exists */ 2366 2369 s = sget(&nfs_fs_type, compare_super, nfs_set_super, &sb_mntdata); ··· 2633 2628 if (server->flags & NFS4_MOUNT_UNSHARED) 2634 2629 compare_super = NULL; 2635 2630 2631 + /* -o noac implies -o sync */ 2632 + if (server->flags & NFS_MOUNT_NOAC) 2633 + sb_mntdata.mntflags |= MS_SYNCHRONOUS; 2634 + 2636 2635 /* Get a superblock - note that we may end up sharing one that already exists */ 2637 2636 s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata); 2638 2637 if (IS_ERR(s)) { ··· 2798 2789 goto out_put_mnt_ns; 2799 2790 2800 2791 ret = vfs_path_lookup(root_mnt->mnt_root, root_mnt, 2801 - export_path, LOOKUP_FOLLOW, &path); 2792 + export_path, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path); 2802 2793 2803 2794 nfs_referral_loop_unprotect(); 2804 2795 put_mnt_ns(ns_private); ··· 2925 2916 if (server->flags & NFS4_MOUNT_UNSHARED) 2926 2917 compare_super = NULL; 2927 2918 2919 + /* -o noac implies -o sync */ 2920 + if (server->flags & NFS_MOUNT_NOAC) 2921 + sb_mntdata.mntflags |= MS_SYNCHRONOUS; 2922 + 2928 2923 /* Get a superblock - note that we may end up sharing one that already exists */ 2929 2924 s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata); 2930 2925 if (IS_ERR(s)) { ··· 3015 3002 3016 3003 if (server->flags & NFS4_MOUNT_UNSHARED) 3017 3004 compare_super = NULL; 3005 + 3006 + /* -o noac implies -o sync */ 3007 + if (server->flags & NFS_MOUNT_NOAC) 3008 + sb_mntdata.mntflags |= MS_SYNCHRONOUS; 3018 3009 3019 3010 /* Get a superblock - note that we may end up sharing one that already exists */ 3020 3011 s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
+1 -1
fs/nfs/write.c
··· 958 958 if (!data) 959 959 goto out_bad; 960 960 data->pagevec[0] = page; 961 - nfs_write_rpcsetup(req, data, wsize, offset, desc->pg_ioflags); 961 + nfs_write_rpcsetup(req, data, len, offset, desc->pg_ioflags); 962 962 list_add(&data->list, res); 963 963 requests++; 964 964 nbytes -= len;
+55 -25
fs/proc/task_mmu.c
··· 877 877 struct numa_maps md; 878 878 }; 879 879 880 - static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty) 880 + static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty, 881 + unsigned long nr_pages) 881 882 { 882 883 int count = page_mapcount(page); 883 884 884 - md->pages++; 885 + md->pages += nr_pages; 885 886 if (pte_dirty || PageDirty(page)) 886 - md->dirty++; 887 + md->dirty += nr_pages; 887 888 888 889 if (PageSwapCache(page)) 889 - md->swapcache++; 890 + md->swapcache += nr_pages; 890 891 891 892 if (PageActive(page) || PageUnevictable(page)) 892 - md->active++; 893 + md->active += nr_pages; 893 894 894 895 if (PageWriteback(page)) 895 - md->writeback++; 896 + md->writeback += nr_pages; 896 897 897 898 if (PageAnon(page)) 898 - md->anon++; 899 + md->anon += nr_pages; 899 900 900 901 if (count > md->mapcount_max) 901 902 md->mapcount_max = count; 902 903 903 - md->node[page_to_nid(page)]++; 904 + md->node[page_to_nid(page)] += nr_pages; 905 + } 906 + 907 + static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, 908 + unsigned long addr) 909 + { 910 + struct page *page; 911 + int nid; 912 + 913 + if (!pte_present(pte)) 914 + return NULL; 915 + 916 + page = vm_normal_page(vma, addr, pte); 917 + if (!page) 918 + return NULL; 919 + 920 + if (PageReserved(page)) 921 + return NULL; 922 + 923 + nid = page_to_nid(page); 924 + if (!node_isset(nid, node_states[N_HIGH_MEMORY])) 925 + return NULL; 926 + 927 + return page; 904 928 } 905 929 906 930 static int gather_pte_stats(pmd_t *pmd, unsigned long addr, ··· 936 912 pte_t *pte; 937 913 938 914 md = walk->private; 915 + spin_lock(&walk->mm->page_table_lock); 916 + if (pmd_trans_huge(*pmd)) { 917 + if (pmd_trans_splitting(*pmd)) { 918 + spin_unlock(&walk->mm->page_table_lock); 919 + wait_split_huge_page(md->vma->anon_vma, pmd); 920 + } else { 921 + pte_t huge_pte = *(pte_t *)pmd; 922 + struct page *page; 923 + 924 + page = can_gather_numa_stats(huge_pte, md->vma, addr); 925 + if (page) 926 + gather_stats(page, md, pte_dirty(huge_pte), 927 + HPAGE_PMD_SIZE/PAGE_SIZE); 928 + spin_unlock(&walk->mm->page_table_lock); 929 + return 0; 930 + } 931 + } else { 932 + spin_unlock(&walk->mm->page_table_lock); 933 + } 934 + 939 935 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 940 936 do { 941 - struct page *page; 942 - int nid; 943 - 944 - if (!pte_present(*pte)) 945 - continue; 946 - 947 - page = vm_normal_page(md->vma, addr, *pte); 937 + struct page *page = can_gather_numa_stats(*pte, md->vma, addr); 948 938 if (!page) 949 939 continue; 950 - 951 - if (PageReserved(page)) 952 - continue; 953 - 954 - nid = page_to_nid(page); 955 - if (!node_isset(nid, node_states[N_HIGH_MEMORY])) 956 - continue; 957 - 958 - gather_stats(page, md, pte_dirty(*pte)); 940 + gather_stats(page, md, pte_dirty(*pte), 1); 959 941 960 942 } while (pte++, addr += PAGE_SIZE, addr != end); 961 943 pte_unmap_unlock(orig_pte, ptl); ··· 982 952 return 0; 983 953 984 954 md = walk->private; 985 - gather_stats(page, md, pte_dirty(*pte)); 955 + gather_stats(page, md, pte_dirty(*pte), 1); 986 956 return 0; 987 957 } 988 958
+1 -1
fs/quota/quota.c
··· 355 355 * resolution (think about autofs) and thus deadlocks could arise. 356 356 */ 357 357 if (cmds == Q_QUOTAON) { 358 - ret = user_path_at(AT_FDCWD, addr, LOOKUP_FOLLOW, &path); 358 + ret = user_path_at(AT_FDCWD, addr, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path); 359 359 if (ret) 360 360 pathp = ERR_PTR(ret); 361 361 else
-2
fs/stat.c
··· 81 81 82 82 if (!(flag & AT_SYMLINK_NOFOLLOW)) 83 83 lookup_flags |= LOOKUP_FOLLOW; 84 - if (flag & AT_NO_AUTOMOUNT) 85 - lookup_flags |= LOOKUP_NO_AUTOMOUNT; 86 84 if (flag & AT_EMPTY_PATH) 87 85 lookup_flags |= LOOKUP_EMPTY; 88 86
+2 -1
fs/xfs/xfs_aops.c
··· 1300 1300 bool is_async) 1301 1301 { 1302 1302 struct xfs_ioend *ioend = iocb->private; 1303 + struct inode *inode = ioend->io_inode; 1303 1304 1304 1305 /* 1305 1306 * blockdev_direct_IO can return an error even after the I/O ··· 1332 1331 } 1333 1332 1334 1333 /* XXX: probably should move into the real I/O completion handler */ 1335 - inode_dio_done(ioend->io_inode); 1334 + inode_dio_done(inode); 1336 1335 } 1337 1336 1338 1337 STATIC ssize_t
+5 -10
include/linux/basic_mmio_gpio.h
··· 63 63 return container_of(gc, struct bgpio_chip, gc); 64 64 } 65 65 66 - int __devexit bgpio_remove(struct bgpio_chip *bgc); 67 - int __devinit bgpio_init(struct bgpio_chip *bgc, 68 - struct device *dev, 69 - unsigned long sz, 70 - void __iomem *dat, 71 - void __iomem *set, 72 - void __iomem *clr, 73 - void __iomem *dirout, 74 - void __iomem *dirin, 75 - bool big_endian); 66 + int bgpio_remove(struct bgpio_chip *bgc); 67 + int bgpio_init(struct bgpio_chip *bgc, struct device *dev, 68 + unsigned long sz, void __iomem *dat, void __iomem *set, 69 + void __iomem *clr, void __iomem *dirout, void __iomem *dirin, 70 + bool big_endian); 76 71 77 72 #endif /* __BASIC_MMIO_GPIO_H */
+4 -2
include/linux/blk_types.h
··· 124 124 125 125 __REQ_SYNC, /* request is sync (sync write or read) */ 126 126 __REQ_META, /* metadata io request */ 127 + __REQ_PRIO, /* boost priority in cfq */ 127 128 __REQ_DISCARD, /* request to discard sectors */ 128 129 __REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */ 129 130 ··· 162 161 #define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER) 163 162 #define REQ_SYNC (1 << __REQ_SYNC) 164 163 #define REQ_META (1 << __REQ_META) 164 + #define REQ_PRIO (1 << __REQ_PRIO) 165 165 #define REQ_DISCARD (1 << __REQ_DISCARD) 166 166 #define REQ_NOIDLE (1 << __REQ_NOIDLE) 167 167 168 168 #define REQ_FAILFAST_MASK \ 169 169 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) 170 170 #define REQ_COMMON_MASK \ 171 - (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_DISCARD | \ 172 - REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE) 171 + (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \ 172 + REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE) 173 173 #define REQ_CLONE_MASK REQ_COMMON_MASK 174 174 175 175 #define REQ_RAHEAD (1 << __REQ_RAHEAD)
-1
include/linux/blkdev.h
··· 873 873 struct list_head list; 874 874 struct list_head cb_list; 875 875 unsigned int should_sort; 876 - unsigned int count; 877 876 }; 878 877 #define BLK_MAX_REQUEST_COUNT 16 879 878
-2
include/linux/fs.h
··· 162 162 #define READA RWA_MASK 163 163 164 164 #define READ_SYNC (READ | REQ_SYNC) 165 - #define READ_META (READ | REQ_META) 166 165 #define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE) 167 166 #define WRITE_ODIRECT (WRITE | REQ_SYNC) 168 - #define WRITE_META (WRITE | REQ_META) 169 167 #define WRITE_FLUSH (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH) 170 168 #define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FUA) 171 169 #define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
+1
include/linux/irqdomain.h
··· 80 80 #endif /* CONFIG_IRQ_DOMAIN */ 81 81 82 82 #if defined(CONFIG_IRQ_DOMAIN) && defined(CONFIG_OF_IRQ) 83 + extern struct irq_domain_ops irq_domain_simple_ops; 83 84 extern void irq_domain_add_simple(struct device_node *controller, int irq_base); 84 85 extern void irq_domain_generate_simple(const struct of_device_id *match, 85 86 u64 phys_base, unsigned int irq_start);
+1
include/linux/kvm.h
··· 553 553 #define KVM_CAP_SPAPR_TCE 63 554 554 #define KVM_CAP_PPC_SMT 64 555 555 #define KVM_CAP_PPC_RMA 65 556 + #define KVM_CAP_S390_GMAP 71 556 557 557 558 #ifdef KVM_CAP_IRQ_ROUTING 558 559
-19
include/linux/memcontrol.h
··· 39 39 struct mem_cgroup *mem_cont, 40 40 int active, int file); 41 41 42 - struct memcg_scanrecord { 43 - struct mem_cgroup *mem; /* scanend memory cgroup */ 44 - struct mem_cgroup *root; /* scan target hierarchy root */ 45 - int context; /* scanning context (see memcontrol.c) */ 46 - unsigned long nr_scanned[2]; /* the number of scanned pages */ 47 - unsigned long nr_rotated[2]; /* the number of rotated pages */ 48 - unsigned long nr_freed[2]; /* the number of freed pages */ 49 - unsigned long elapsed; /* nsec of time elapsed while scanning */ 50 - }; 51 - 52 42 #ifdef CONFIG_CGROUP_MEM_RES_CTLR 53 43 /* 54 44 * All "charge" functions with gfp_mask should use GFP_KERNEL or ··· 116 126 mem_cgroup_get_reclaim_stat_from_page(struct page *page); 117 127 extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, 118 128 struct task_struct *p); 119 - 120 - extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, 121 - gfp_t gfp_mask, bool noswap, 122 - struct memcg_scanrecord *rec); 123 - extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, 124 - gfp_t gfp_mask, bool noswap, 125 - struct zone *zone, 126 - struct memcg_scanrecord *rec, 127 - unsigned long *nr_scanned); 128 129 129 130 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 130 131 extern int do_swap_account;
+1 -1
include/linux/mfd/wm8994/pdata.h
··· 26 26 struct regulator_init_data *init_data; 27 27 }; 28 28 29 - #define WM8994_CONFIGURE_GPIO 0x8000 29 + #define WM8994_CONFIGURE_GPIO 0x10000 30 30 31 31 #define WM8994_DRC_REGS 5 32 32 #define WM8994_EQ_REGS 20
+2 -1
include/linux/namei.h
··· 48 48 */ 49 49 #define LOOKUP_FOLLOW 0x0001 50 50 #define LOOKUP_DIRECTORY 0x0002 51 + #define LOOKUP_AUTOMOUNT 0x0004 51 52 52 53 #define LOOKUP_PARENT 0x0010 53 54 #define LOOKUP_REVAL 0x0020 54 55 #define LOOKUP_RCU 0x0040 55 - #define LOOKUP_NO_AUTOMOUNT 0x0080 56 + 56 57 /* 57 58 * Intent data 58 59 */
+2 -1
include/linux/pci.h
··· 621 621 extern void pcie_bus_configure_settings(struct pci_bus *bus, u8 smpss); 622 622 623 623 enum pcie_bus_config_types { 624 - PCIE_BUS_PERFORMANCE, 624 + PCIE_BUS_TUNE_OFF, 625 625 PCIE_BUS_SAFE, 626 + PCIE_BUS_PERFORMANCE, 626 627 PCIE_BUS_PEER2PEER, 627 628 }; 628 629
+10 -3
include/linux/ptp_classify.h
··· 51 51 #define PTP_CLASS_V2_VLAN (PTP_CLASS_V2 | PTP_CLASS_VLAN) 52 52 53 53 #define PTP_EV_PORT 319 54 + #define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */ 54 55 55 56 #define OFF_ETYPE 12 56 57 #define OFF_IHL 14 ··· 117 116 {OP_OR, 0, 0, PTP_CLASS_IPV6 }, /* */ \ 118 117 {OP_RETA, 0, 0, 0 }, /* */ \ 119 118 /*L3x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE }, /* */ \ 120 - /*L40*/ {OP_JEQ, 0, 6, ETH_P_8021Q }, /* f goto L50 */ \ 119 + /*L40*/ {OP_JEQ, 0, 9, ETH_P_8021Q }, /* f goto L50 */ \ 121 120 {OP_LDH, 0, 0, OFF_ETYPE + 4 }, /* */ \ 122 - {OP_JEQ, 0, 9, ETH_P_1588 }, /* f goto L60 */ \ 121 + {OP_JEQ, 0, 15, ETH_P_1588 }, /* f goto L60 */ \ 122 + {OP_LDB, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \ 123 + {OP_AND, 0, 0, PTP_GEN_BIT }, /* */ \ 124 + {OP_JEQ, 0, 12, 0 }, /* f goto L6x */ \ 123 125 {OP_LDH, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \ 124 126 {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \ 125 127 {OP_OR, 0, 0, PTP_CLASS_VLAN }, /* */ \ 126 128 {OP_RETA, 0, 0, 0 }, /* */ \ 127 - /*L50*/ {OP_JEQ, 0, 4, ETH_P_1588 }, /* f goto L61 */ \ 129 + /*L50*/ {OP_JEQ, 0, 7, ETH_P_1588 }, /* f goto L61 */ \ 130 + {OP_LDB, 0, 0, ETH_HLEN }, /* */ \ 131 + {OP_AND, 0, 0, PTP_GEN_BIT }, /* */ \ 132 + {OP_JEQ, 0, 4, 0 }, /* f goto L6x */ \ 128 133 {OP_LDH, 0, 0, ETH_HLEN }, /* */ \ 129 134 {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \ 130 135 {OP_OR, 0, 0, PTP_CLASS_L2 }, /* */ \
-1
include/linux/sched.h
··· 1956 1956 1957 1957 extern unsigned long long 1958 1958 task_sched_runtime(struct task_struct *task); 1959 - extern unsigned long long thread_group_sched_runtime(struct task_struct *task); 1960 1959 1961 1960 /* sched_exec is called by processes performing an exec */ 1962 1961 #ifdef CONFIG_SMP
+1
include/linux/skbuff.h
··· 524 524 extern bool skb_recycle_check(struct sk_buff *skb, int skb_size); 525 525 526 526 extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); 527 + extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); 527 528 extern struct sk_buff *skb_clone(struct sk_buff *skb, 528 529 gfp_t priority); 529 530 extern struct sk_buff *skb_copy(const struct sk_buff *skb,
+2
include/linux/snmp.h
··· 231 231 LINUX_MIB_TCPDEFERACCEPTDROP, 232 232 LINUX_MIB_IPRPFILTER, /* IP Reverse Path Filter (rp_filter) */ 233 233 LINUX_MIB_TCPTIMEWAITOVERFLOW, /* TCPTimeWaitOverflow */ 234 + LINUX_MIB_TCPREQQFULLDOCOOKIES, /* TCPReqQFullDoCookies */ 235 + LINUX_MIB_TCPREQQFULLDROP, /* TCPReqQFullDrop */ 234 236 __LINUX_MIB_MAX 235 237 }; 236 238
+6
include/linux/swap.h
··· 252 252 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 253 253 gfp_t gfp_mask, nodemask_t *mask); 254 254 extern int __isolate_lru_page(struct page *page, int mode, int file); 255 + extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, 256 + gfp_t gfp_mask, bool noswap); 257 + extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, 258 + gfp_t gfp_mask, bool noswap, 259 + struct zone *zone, 260 + unsigned long *nr_scanned); 255 261 extern unsigned long shrink_all_memory(unsigned long nr_pages); 256 262 extern int vm_swappiness; 257 263 extern int remove_mapping(struct address_space *mapping, struct page *page);
+22 -3
include/net/flow.h
··· 7 7 #ifndef _NET_FLOW_H 8 8 #define _NET_FLOW_H 9 9 10 + #include <linux/socket.h> 10 11 #include <linux/in6.h> 11 12 #include <linux/atomic.h> 12 13 ··· 69 68 #define fl4_ipsec_spi uli.spi 70 69 #define fl4_mh_type uli.mht.type 71 70 #define fl4_gre_key uli.gre_key 72 - }; 71 + } __attribute__((__aligned__(BITS_PER_LONG/8))); 73 72 74 73 static inline void flowi4_init_output(struct flowi4 *fl4, int oif, 75 74 __u32 mark, __u8 tos, __u8 scope, ··· 113 112 #define fl6_ipsec_spi uli.spi 114 113 #define fl6_mh_type uli.mht.type 115 114 #define fl6_gre_key uli.gre_key 116 - }; 115 + } __attribute__((__aligned__(BITS_PER_LONG/8))); 117 116 118 117 struct flowidn { 119 118 struct flowi_common __fl_common; ··· 128 127 union flowi_uli uli; 129 128 #define fld_sport uli.ports.sport 130 129 #define fld_dport uli.ports.dport 131 - }; 130 + } __attribute__((__aligned__(BITS_PER_LONG/8))); 132 131 133 132 struct flowi { 134 133 union { ··· 160 159 static inline struct flowi *flowidn_to_flowi(struct flowidn *fldn) 161 160 { 162 161 return container_of(fldn, struct flowi, u.dn); 162 + } 163 + 164 + typedef unsigned long flow_compare_t; 165 + 166 + static inline size_t flow_key_size(u16 family) 167 + { 168 + switch (family) { 169 + case AF_INET: 170 + BUILD_BUG_ON(sizeof(struct flowi4) % sizeof(flow_compare_t)); 171 + return sizeof(struct flowi4) / sizeof(flow_compare_t); 172 + case AF_INET6: 173 + BUILD_BUG_ON(sizeof(struct flowi6) % sizeof(flow_compare_t)); 174 + return sizeof(struct flowi6) / sizeof(flow_compare_t); 175 + case AF_DECnet: 176 + BUILD_BUG_ON(sizeof(struct flowidn) % sizeof(flow_compare_t)); 177 + return sizeof(struct flowidn) / sizeof(flow_compare_t); 178 + } 179 + return 0; 163 180 } 164 181 165 182 #define FLOW_DIR_IN 0
+2 -1
include/net/request_sock.h
··· 96 96 */ 97 97 struct listen_sock { 98 98 u8 max_qlen_log; 99 - /* 3 bytes hole, try to use */ 99 + u8 synflood_warned; 100 + /* 2 bytes hole, try to use */ 100 101 int qlen; 101 102 int qlen_young; 102 103 int clock_hand;
+1
include/net/sctp/command.h
··· 109 109 SCTP_CMD_SEND_MSG, /* Send the whole use message */ 110 110 SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */ 111 111 SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/ 112 + SCTP_CMD_SET_ASOC, /* Restore association context */ 112 113 SCTP_CMD_LAST 113 114 } sctp_verb_t; 114 115
+21 -1
include/net/tcp.h
··· 431 431 extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS]; 432 432 extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, 433 433 struct ip_options *opt); 434 + #ifdef CONFIG_SYN_COOKIES 434 435 extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, 435 436 __u16 *mss); 437 + #else 438 + static inline __u32 cookie_v4_init_sequence(struct sock *sk, 439 + struct sk_buff *skb, 440 + __u16 *mss) 441 + { 442 + return 0; 443 + } 444 + #endif 436 445 437 446 extern __u32 cookie_init_timestamp(struct request_sock *req); 438 447 extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *); 439 448 440 449 /* From net/ipv6/syncookies.c */ 441 450 extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb); 451 + #ifdef CONFIG_SYN_COOKIES 442 452 extern __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, 443 453 __u16 *mss); 444 - 454 + #else 455 + static inline __u32 cookie_v6_init_sequence(struct sock *sk, 456 + struct sk_buff *skb, 457 + __u16 *mss) 458 + { 459 + return 0; 460 + } 461 + #endif 445 462 /* tcp_output.c */ 446 463 447 464 extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, ··· 477 460 extern void tcp_send_fin(struct sock *sk); 478 461 extern void tcp_send_active_reset(struct sock *sk, gfp_t priority); 479 462 extern int tcp_send_synack(struct sock *); 463 + extern int tcp_syn_flood_action(struct sock *sk, 464 + const struct sk_buff *skb, 465 + const char *proto); 480 466 extern void tcp_push_one(struct sock *, unsigned int mss_now); 481 467 extern void tcp_send_ack(struct sock *sk); 482 468 extern void tcp_send_delayed_ack(struct sock *sk);
+1
include/net/transp_v6.h
··· 39 39 struct sk_buff *skb); 40 40 41 41 extern int datagram_send_ctl(struct net *net, 42 + struct sock *sk, 42 43 struct msghdr *msg, 43 44 struct flowi6 *fl6, 44 45 struct ipv6_txoptions *opt,
+5 -5
include/trace/events/writeback.h
··· 298 298 __array(char, name, 32) 299 299 __field(unsigned long, ino) 300 300 __field(unsigned long, state) 301 - __field(unsigned long, age) 301 + __field(unsigned long, dirtied_when) 302 302 __field(unsigned long, writeback_index) 303 303 __field(long, nr_to_write) 304 304 __field(unsigned long, wrote) ··· 309 309 dev_name(inode->i_mapping->backing_dev_info->dev), 32); 310 310 __entry->ino = inode->i_ino; 311 311 __entry->state = inode->i_state; 312 - __entry->age = (jiffies - inode->dirtied_when) * 313 - 1000 / HZ; 312 + __entry->dirtied_when = inode->dirtied_when; 314 313 __entry->writeback_index = inode->i_mapping->writeback_index; 315 314 __entry->nr_to_write = nr_to_write; 316 315 __entry->wrote = nr_to_write - wbc->nr_to_write; 317 316 ), 318 317 319 - TP_printk("bdi %s: ino=%lu state=%s age=%lu " 318 + TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu " 320 319 "index=%lu to_write=%ld wrote=%lu", 321 320 __entry->name, 322 321 __entry->ino, 323 322 show_inode_state(__entry->state), 324 - __entry->age, 323 + __entry->dirtied_when, 324 + (jiffies - __entry->dirtied_when) / HZ, 325 325 __entry->writeback_index, 326 326 __entry->nr_to_write, 327 327 __entry->wrote
+14 -5
init/main.c
··· 209 209 210 210 static int __init loglevel(char *str) 211 211 { 212 - get_option(&str, &console_loglevel); 213 - return 0; 212 + int newlevel; 213 + 214 + /* 215 + * Only update loglevel value when a correct setting was passed, 216 + * to prevent blind crashes (when loglevel being set to 0) that 217 + * are quite hard to debug 218 + */ 219 + if (get_option(&str, &newlevel)) { 220 + console_loglevel = newlevel; 221 + return 0; 222 + } 223 + 224 + return -EINVAL; 214 225 } 215 226 216 227 early_param("loglevel", loglevel); ··· 380 369 init_idle_bootup_task(current); 381 370 preempt_enable_no_resched(); 382 371 schedule(); 383 - 384 - /* At this point, we can enable user mode helper functionality */ 385 - usermodehelper_enable(); 386 372 387 373 /* Call into cpu_idle with preempt disabled */ 388 374 preempt_disable(); ··· 730 722 driver_init(); 731 723 init_irq_proc(); 732 724 do_ctors(); 725 + usermodehelper_enable(); 733 726 do_initcalls(); 734 727 } 735 728
+1 -1
kernel/irq/chip.c
··· 178 178 desc->depth = 1; 179 179 if (desc->irq_data.chip->irq_shutdown) 180 180 desc->irq_data.chip->irq_shutdown(&desc->irq_data); 181 - if (desc->irq_data.chip->irq_disable) 181 + else if (desc->irq_data.chip->irq_disable) 182 182 desc->irq_data.chip->irq_disable(&desc->irq_data); 183 183 else 184 184 desc->irq_data.chip->irq_mask(&desc->irq_data);
+5 -1
kernel/irq/irqdomain.c
··· 29 29 */ 30 30 for (hwirq = 0; hwirq < domain->nr_irq; hwirq++) { 31 31 d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq)); 32 - if (d || d->domain) { 32 + if (!d) { 33 + WARN(1, "error: assigning domain to non existant irq_desc"); 34 + return; 35 + } 36 + if (d->domain) { 33 37 /* things are broken; just report, don't clean up */ 34 38 WARN(1, "error: irq_desc already assigned to a domain"); 35 39 return;
+3 -2
kernel/posix-cpu-timers.c
··· 250 250 do { 251 251 times->utime = cputime_add(times->utime, t->utime); 252 252 times->stime = cputime_add(times->stime, t->stime); 253 - times->sum_exec_runtime += t->se.sum_exec_runtime; 253 + times->sum_exec_runtime += task_sched_runtime(t); 254 254 } while_each_thread(tsk, t); 255 255 out: 256 256 rcu_read_unlock(); ··· 312 312 cpu->cpu = cputime.utime; 313 313 break; 314 314 case CPUCLOCK_SCHED: 315 - cpu->sched = thread_group_sched_runtime(p); 315 + thread_group_cputime(p, &cputime); 316 + cpu->sched = cputime.sum_exec_runtime; 316 317 break; 317 318 } 318 319 return 0;
+10 -13
kernel/ptrace.c
··· 744 744 break; 745 745 746 746 si = child->last_siginfo; 747 - if (unlikely(!si || si->si_code >> 8 != PTRACE_EVENT_STOP)) 748 - break; 749 - 750 - child->jobctl |= JOBCTL_LISTENING; 751 - 752 - /* 753 - * If NOTIFY is set, it means event happened between start 754 - * of this trap and now. Trigger re-trap immediately. 755 - */ 756 - if (child->jobctl & JOBCTL_TRAP_NOTIFY) 757 - signal_wake_up(child, true); 758 - 747 + if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) { 748 + child->jobctl |= JOBCTL_LISTENING; 749 + /* 750 + * If NOTIFY is set, it means event happened between 751 + * start of this trap and now. Trigger re-trap. 752 + */ 753 + if (child->jobctl & JOBCTL_TRAP_NOTIFY) 754 + signal_wake_up(child, true); 755 + ret = 0; 756 + } 759 757 unlock_task_sighand(child, &flags); 760 - ret = 0; 761 758 break; 762 759 763 760 case PTRACE_DETACH: /* detach a process that was attached. */
+6 -1
kernel/resource.c
··· 419 419 else 420 420 tmp.end = root->end; 421 421 422 + if (tmp.end < tmp.start) 423 + goto next; 424 + 422 425 resource_clip(&tmp, constraint->min, constraint->max); 423 426 arch_remove_reservations(&tmp); 424 427 ··· 439 436 return 0; 440 437 } 441 438 } 442 - if (!this) 439 + 440 + next: if (!this || this->end == root->end) 443 441 break; 442 + 444 443 if (this != old) 445 444 tmp.start = this->end + 1; 446 445 this = this->sibling;
+1 -25
kernel/sched.c
··· 3725 3725 } 3726 3726 3727 3727 /* 3728 - * Return sum_exec_runtime for the thread group. 3729 - * In case the task is currently running, return the sum plus current's 3730 - * pending runtime that have not been accounted yet. 3731 - * 3732 - * Note that the thread group might have other running tasks as well, 3733 - * so the return value not includes other pending runtime that other 3734 - * running tasks might have. 3735 - */ 3736 - unsigned long long thread_group_sched_runtime(struct task_struct *p) 3737 - { 3738 - struct task_cputime totals; 3739 - unsigned long flags; 3740 - struct rq *rq; 3741 - u64 ns; 3742 - 3743 - rq = task_rq_lock(p, &flags); 3744 - thread_group_cputime(p, &totals); 3745 - ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq); 3746 - task_rq_unlock(rq, p, &flags); 3747 - 3748 - return ns; 3749 - } 3750 - 3751 - /* 3752 3728 * Account user cpu time to a process. 3753 3729 * @p: the process that the cpu time gets accounted to 3754 3730 * @cputime: the cpu time spent in user space since the last update ··· 4348 4372 blk_schedule_flush_plug(tsk); 4349 4373 } 4350 4374 4351 - asmlinkage void schedule(void) 4375 + asmlinkage void __sched schedule(void) 4352 4376 { 4353 4377 struct task_struct *tsk = current; 4354 4378
+2 -2
kernel/sched_rt.c
··· 1050 1050 */ 1051 1051 if (curr && unlikely(rt_task(curr)) && 1052 1052 (curr->rt.nr_cpus_allowed < 2 || 1053 - curr->prio < p->prio) && 1053 + curr->prio <= p->prio) && 1054 1054 (p->rt.nr_cpus_allowed > 1)) { 1055 1055 int target = find_lowest_rq(p); 1056 1056 ··· 1581 1581 p->rt.nr_cpus_allowed > 1 && 1582 1582 rt_task(rq->curr) && 1583 1583 (rq->curr->rt.nr_cpus_allowed < 2 || 1584 - rq->curr->prio < p->prio)) 1584 + rq->curr->prio <= p->prio)) 1585 1585 push_rt_tasks(rq); 1586 1586 } 1587 1587
+1
kernel/taskstats.c
··· 655 655 .cmd = TASKSTATS_CMD_GET, 656 656 .doit = taskstats_user_cmd, 657 657 .policy = taskstats_cmd_get_policy, 658 + .flags = GENL_ADMIN_PERM, 658 659 }; 659 660 660 661 static struct genl_ops cgroupstats_ops = {
+8 -7
kernel/tsacct.c
··· 78 78 79 79 #define KB 1024 80 80 #define MB (1024*KB) 81 + #define KB_MASK (~(KB-1)) 81 82 /* 82 83 * fill in extended accounting fields 83 84 */ ··· 96 95 stats->hiwater_vm = get_mm_hiwater_vm(mm) * PAGE_SIZE / KB; 97 96 mmput(mm); 98 97 } 99 - stats->read_char = p->ioac.rchar; 100 - stats->write_char = p->ioac.wchar; 101 - stats->read_syscalls = p->ioac.syscr; 102 - stats->write_syscalls = p->ioac.syscw; 98 + stats->read_char = p->ioac.rchar & KB_MASK; 99 + stats->write_char = p->ioac.wchar & KB_MASK; 100 + stats->read_syscalls = p->ioac.syscr & KB_MASK; 101 + stats->write_syscalls = p->ioac.syscw & KB_MASK; 103 102 #ifdef CONFIG_TASK_IO_ACCOUNTING 104 - stats->read_bytes = p->ioac.read_bytes; 105 - stats->write_bytes = p->ioac.write_bytes; 106 - stats->cancelled_write_bytes = p->ioac.cancelled_write_bytes; 103 + stats->read_bytes = p->ioac.read_bytes & KB_MASK; 104 + stats->write_bytes = p->ioac.write_bytes & KB_MASK; 105 + stats->cancelled_write_bytes = p->ioac.cancelled_write_bytes & KB_MASK; 107 106 #else 108 107 stats->read_bytes = 0; 109 108 stats->write_bytes = 0;
+6 -1
kernel/workqueue.c
··· 2412 2412 2413 2413 for_each_cwq_cpu(cpu, wq) { 2414 2414 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 2415 + bool drained; 2415 2416 2416 - if (!cwq->nr_active && list_empty(&cwq->delayed_works)) 2417 + spin_lock_irq(&cwq->gcwq->lock); 2418 + drained = !cwq->nr_active && list_empty(&cwq->delayed_works); 2419 + spin_unlock_irq(&cwq->gcwq->lock); 2420 + 2421 + if (drained) 2417 2422 continue; 2418 2423 2419 2424 if (++flush_cnt == 10 ||
+1
lib/sha1.c
··· 8 8 #include <linux/kernel.h> 9 9 #include <linux/module.h> 10 10 #include <linux/bitops.h> 11 + #include <linux/cryptohash.h> 11 12 #include <asm/unaligned.h> 12 13 13 14 /*
+20 -7
lib/xz/xz_dec_bcj.c
··· 441 441 * next filter in the chain. Apply the BCJ filter on the new data 442 442 * in the output buffer. If everything cannot be filtered, copy it 443 443 * to temp and rewind the output buffer position accordingly. 444 + * 445 + * This needs to be always run when temp.size == 0 to handle a special 446 + * case where the output buffer is full and the next filter has no 447 + * more output coming but hasn't returned XZ_STREAM_END yet. 444 448 */ 445 - if (s->temp.size < b->out_size - b->out_pos) { 449 + if (s->temp.size < b->out_size - b->out_pos || s->temp.size == 0) { 446 450 out_start = b->out_pos; 447 451 memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size); 448 452 b->out_pos += s->temp.size; ··· 469 465 s->temp.size = b->out_pos - out_start; 470 466 b->out_pos -= s->temp.size; 471 467 memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size); 468 + 469 + /* 470 + * If there wasn't enough input to the next filter to fill 471 + * the output buffer with unfiltered data, there's no point 472 + * to try decoding more data to temp. 473 + */ 474 + if (b->out_pos + s->temp.size < b->out_size) 475 + return XZ_OK; 472 476 } 473 477 474 478 /* 475 - * If we have unfiltered data in temp, try to fill by decoding more 476 - * data from the next filter. Apply the BCJ filter on temp. Then we 477 - * hopefully can fill the actual output buffer by copying filtered 478 - * data from temp. A mix of filtered and unfiltered data may be left 479 - * in temp; it will be taken care on the next call to this function. 479 + * We have unfiltered data in temp. If the output buffer isn't full 480 + * yet, try to fill the temp buffer by decoding more data from the 481 + * next filter. Apply the BCJ filter on temp. Then we hopefully can 482 + * fill the actual output buffer by copying filtered data from temp. 483 + * A mix of filtered and unfiltered data may be left in temp; it will 484 + * be taken care on the next call to this function. 480 485 */ 481 - if (s->temp.size > 0) { 486 + if (b->out_pos < b->out_size) { 482 487 /* Make b->out{,_pos,_size} temporarily point to s->temp. */ 483 488 s->out = b->out; 484 489 s->out_pos = b->out_pos;
+21 -9
mm/backing-dev.c
··· 359 359 return max(5UL * 60 * HZ, interval); 360 360 } 361 361 362 + /* 363 + * Clear pending bit and wakeup anybody waiting for flusher thread creation or 364 + * shutdown 365 + */ 366 + static void bdi_clear_pending(struct backing_dev_info *bdi) 367 + { 368 + clear_bit(BDI_pending, &bdi->state); 369 + smp_mb__after_clear_bit(); 370 + wake_up_bit(&bdi->state, BDI_pending); 371 + } 372 + 362 373 static int bdi_forker_thread(void *ptr) 363 374 { 364 375 struct bdi_writeback *me = ptr; ··· 401 390 } 402 391 403 392 spin_lock_bh(&bdi_lock); 393 + /* 394 + * In the following loop we are going to check whether we have 395 + * some work to do without any synchronization with tasks 396 + * waking us up to do work for them. So we have to set task 397 + * state already here so that we don't miss wakeups coming 398 + * after we verify some condition. 399 + */ 404 400 set_current_state(TASK_INTERRUPTIBLE); 405 401 406 402 list_for_each_entry(bdi, &bdi_list, bdi_list) { ··· 487 469 spin_unlock_bh(&bdi->wb_lock); 488 470 wake_up_process(task); 489 471 } 472 + bdi_clear_pending(bdi); 490 473 break; 491 474 492 475 case KILL_THREAD: 493 476 __set_current_state(TASK_RUNNING); 494 477 kthread_stop(task); 478 + bdi_clear_pending(bdi); 495 479 break; 496 480 497 481 case NO_ACTION: ··· 509 489 else 510 490 schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10)); 511 491 try_to_freeze(); 512 - /* Back to the main loop */ 513 - continue; 492 + break; 514 493 } 515 - 516 - /* 517 - * Clear pending bit and wakeup anybody waiting to tear us down. 518 - */ 519 - clear_bit(BDI_pending, &bdi->state); 520 - smp_mb__after_clear_bit(); 521 - wake_up_bit(&bdi->state, BDI_pending); 522 494 } 523 495 524 496 return 0;
+4 -2
mm/filemap.c
··· 827 827 { 828 828 unsigned int i; 829 829 unsigned int ret; 830 - unsigned int nr_found; 830 + unsigned int nr_found, nr_skip; 831 831 832 832 rcu_read_lock(); 833 833 restart: 834 834 nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree, 835 835 (void ***)pages, NULL, start, nr_pages); 836 836 ret = 0; 837 + nr_skip = 0; 837 838 for (i = 0; i < nr_found; i++) { 838 839 struct page *page; 839 840 repeat: ··· 857 856 * here as an exceptional entry: so skip over it - 858 857 * we only reach this from invalidate_mapping_pages(). 859 858 */ 859 + nr_skip++; 860 860 continue; 861 861 } 862 862 ··· 878 876 * If all entries were removed before we could secure them, 879 877 * try again, because callers stop trying once 0 is returned. 880 878 */ 881 - if (unlikely(!ret && nr_found)) 879 + if (unlikely(!ret && nr_found > nr_skip)) 882 880 goto restart; 883 881 rcu_read_unlock(); 884 882 return ret;
+6 -166
mm/memcontrol.c
··· 204 204 static void mem_cgroup_threshold(struct mem_cgroup *mem); 205 205 static void mem_cgroup_oom_notify(struct mem_cgroup *mem); 206 206 207 - enum { 208 - SCAN_BY_LIMIT, 209 - SCAN_BY_SYSTEM, 210 - NR_SCAN_CONTEXT, 211 - SCAN_BY_SHRINK, /* not recorded now */ 212 - }; 213 - 214 - enum { 215 - SCAN, 216 - SCAN_ANON, 217 - SCAN_FILE, 218 - ROTATE, 219 - ROTATE_ANON, 220 - ROTATE_FILE, 221 - FREED, 222 - FREED_ANON, 223 - FREED_FILE, 224 - ELAPSED, 225 - NR_SCANSTATS, 226 - }; 227 - 228 - struct scanstat { 229 - spinlock_t lock; 230 - unsigned long stats[NR_SCAN_CONTEXT][NR_SCANSTATS]; 231 - unsigned long rootstats[NR_SCAN_CONTEXT][NR_SCANSTATS]; 232 - }; 233 - 234 - const char *scanstat_string[NR_SCANSTATS] = { 235 - "scanned_pages", 236 - "scanned_anon_pages", 237 - "scanned_file_pages", 238 - "rotated_pages", 239 - "rotated_anon_pages", 240 - "rotated_file_pages", 241 - "freed_pages", 242 - "freed_anon_pages", 243 - "freed_file_pages", 244 - "elapsed_ns", 245 - }; 246 - #define SCANSTAT_WORD_LIMIT "_by_limit" 247 - #define SCANSTAT_WORD_SYSTEM "_by_system" 248 - #define SCANSTAT_WORD_HIERARCHY "_under_hierarchy" 249 - 250 - 251 207 /* 252 208 * The memory controller data structure. The memory controller controls both 253 209 * page cache and RSS per cgroup. We would eventually like to provide ··· 269 313 270 314 /* For oom notifier event fd */ 271 315 struct list_head oom_notify; 272 - /* For recording LRU-scan statistics */ 273 - struct scanstat scanstat; 316 + 274 317 /* 275 318 * Should we move charges of a task when a task is moved into this 276 319 * mem_cgroup ? And what type of charges should we move ? ··· 1633 1678 } 1634 1679 #endif 1635 1680 1636 - static void __mem_cgroup_record_scanstat(unsigned long *stats, 1637 - struct memcg_scanrecord *rec) 1638 - { 1639 - 1640 - stats[SCAN] += rec->nr_scanned[0] + rec->nr_scanned[1]; 1641 - stats[SCAN_ANON] += rec->nr_scanned[0]; 1642 - stats[SCAN_FILE] += rec->nr_scanned[1]; 1643 - 1644 - stats[ROTATE] += rec->nr_rotated[0] + rec->nr_rotated[1]; 1645 - stats[ROTATE_ANON] += rec->nr_rotated[0]; 1646 - stats[ROTATE_FILE] += rec->nr_rotated[1]; 1647 - 1648 - stats[FREED] += rec->nr_freed[0] + rec->nr_freed[1]; 1649 - stats[FREED_ANON] += rec->nr_freed[0]; 1650 - stats[FREED_FILE] += rec->nr_freed[1]; 1651 - 1652 - stats[ELAPSED] += rec->elapsed; 1653 - } 1654 - 1655 - static void mem_cgroup_record_scanstat(struct memcg_scanrecord *rec) 1656 - { 1657 - struct mem_cgroup *mem; 1658 - int context = rec->context; 1659 - 1660 - if (context >= NR_SCAN_CONTEXT) 1661 - return; 1662 - 1663 - mem = rec->mem; 1664 - spin_lock(&mem->scanstat.lock); 1665 - __mem_cgroup_record_scanstat(mem->scanstat.stats[context], rec); 1666 - spin_unlock(&mem->scanstat.lock); 1667 - 1668 - mem = rec->root; 1669 - spin_lock(&mem->scanstat.lock); 1670 - __mem_cgroup_record_scanstat(mem->scanstat.rootstats[context], rec); 1671 - spin_unlock(&mem->scanstat.lock); 1672 - } 1673 - 1674 1681 /* 1675 1682 * Scan the hierarchy if needed to reclaim memory. We remember the last child 1676 1683 * we reclaimed from, so that we don't end up penalizing one child extensively ··· 1657 1740 bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP; 1658 1741 bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK; 1659 1742 bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT; 1660 - struct memcg_scanrecord rec; 1661 1743 unsigned long excess; 1662 - unsigned long scanned; 1744 + unsigned long nr_scanned; 1663 1745 1664 1746 excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT; 1665 1747 1666 1748 /* If memsw_is_minimum==1, swap-out is of-no-use. */ 1667 1749 if (!check_soft && !shrink && root_mem->memsw_is_minimum) 1668 1750 noswap = true; 1669 - 1670 - if (shrink) 1671 - rec.context = SCAN_BY_SHRINK; 1672 - else if (check_soft) 1673 - rec.context = SCAN_BY_SYSTEM; 1674 - else 1675 - rec.context = SCAN_BY_LIMIT; 1676 - 1677 - rec.root = root_mem; 1678 1751 1679 1752 while (1) { 1680 1753 victim = mem_cgroup_select_victim(root_mem); ··· 1706 1799 css_put(&victim->css); 1707 1800 continue; 1708 1801 } 1709 - rec.mem = victim; 1710 - rec.nr_scanned[0] = 0; 1711 - rec.nr_scanned[1] = 0; 1712 - rec.nr_rotated[0] = 0; 1713 - rec.nr_rotated[1] = 0; 1714 - rec.nr_freed[0] = 0; 1715 - rec.nr_freed[1] = 0; 1716 - rec.elapsed = 0; 1717 1802 /* we use swappiness of local cgroup */ 1718 1803 if (check_soft) { 1719 1804 ret = mem_cgroup_shrink_node_zone(victim, gfp_mask, 1720 - noswap, zone, &rec, &scanned); 1721 - *total_scanned += scanned; 1805 + noswap, zone, &nr_scanned); 1806 + *total_scanned += nr_scanned; 1722 1807 } else 1723 1808 ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, 1724 - noswap, &rec); 1725 - mem_cgroup_record_scanstat(&rec); 1809 + noswap); 1726 1810 css_put(&victim->css); 1727 1811 /* 1728 1812 * At shrinking usage, we can't check we should stop here or ··· 3752 3854 /* try to free all pages in this cgroup */ 3753 3855 shrink = 1; 3754 3856 while (nr_retries && mem->res.usage > 0) { 3755 - struct memcg_scanrecord rec; 3756 3857 int progress; 3757 3858 3758 3859 if (signal_pending(current)) { 3759 3860 ret = -EINTR; 3760 3861 goto out; 3761 3862 } 3762 - rec.context = SCAN_BY_SHRINK; 3763 - rec.mem = mem; 3764 - rec.root = mem; 3765 3863 progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL, 3766 - false, &rec); 3864 + false); 3767 3865 if (!progress) { 3768 3866 nr_retries--; 3769 3867 /* maybe some writeback is necessary */ ··· 4603 4709 } 4604 4710 #endif /* CONFIG_NUMA */ 4605 4711 4606 - static int mem_cgroup_vmscan_stat_read(struct cgroup *cgrp, 4607 - struct cftype *cft, 4608 - struct cgroup_map_cb *cb) 4609 - { 4610 - struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); 4611 - char string[64]; 4612 - int i; 4613 - 4614 - for (i = 0; i < NR_SCANSTATS; i++) { 4615 - strcpy(string, scanstat_string[i]); 4616 - strcat(string, SCANSTAT_WORD_LIMIT); 4617 - cb->fill(cb, string, mem->scanstat.stats[SCAN_BY_LIMIT][i]); 4618 - } 4619 - 4620 - for (i = 0; i < NR_SCANSTATS; i++) { 4621 - strcpy(string, scanstat_string[i]); 4622 - strcat(string, SCANSTAT_WORD_SYSTEM); 4623 - cb->fill(cb, string, mem->scanstat.stats[SCAN_BY_SYSTEM][i]); 4624 - } 4625 - 4626 - for (i = 0; i < NR_SCANSTATS; i++) { 4627 - strcpy(string, scanstat_string[i]); 4628 - strcat(string, SCANSTAT_WORD_LIMIT); 4629 - strcat(string, SCANSTAT_WORD_HIERARCHY); 4630 - cb->fill(cb, string, mem->scanstat.rootstats[SCAN_BY_LIMIT][i]); 4631 - } 4632 - for (i = 0; i < NR_SCANSTATS; i++) { 4633 - strcpy(string, scanstat_string[i]); 4634 - strcat(string, SCANSTAT_WORD_SYSTEM); 4635 - strcat(string, SCANSTAT_WORD_HIERARCHY); 4636 - cb->fill(cb, string, mem->scanstat.rootstats[SCAN_BY_SYSTEM][i]); 4637 - } 4638 - return 0; 4639 - } 4640 - 4641 - static int mem_cgroup_reset_vmscan_stat(struct cgroup *cgrp, 4642 - unsigned int event) 4643 - { 4644 - struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); 4645 - 4646 - spin_lock(&mem->scanstat.lock); 4647 - memset(&mem->scanstat.stats, 0, sizeof(mem->scanstat.stats)); 4648 - memset(&mem->scanstat.rootstats, 0, sizeof(mem->scanstat.rootstats)); 4649 - spin_unlock(&mem->scanstat.lock); 4650 - return 0; 4651 - } 4652 - 4653 - 4654 4712 static struct cftype mem_cgroup_files[] = { 4655 4713 { 4656 4714 .name = "usage_in_bytes", ··· 4673 4827 .mode = S_IRUGO, 4674 4828 }, 4675 4829 #endif 4676 - { 4677 - .name = "vmscan_stat", 4678 - .read_map = mem_cgroup_vmscan_stat_read, 4679 - .trigger = mem_cgroup_reset_vmscan_stat, 4680 - }, 4681 4830 }; 4682 4831 4683 4832 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP ··· 4936 5095 atomic_set(&mem->refcnt, 1); 4937 5096 mem->move_charge_at_immigrate = 0; 4938 5097 mutex_init(&mem->thresholds_lock); 4939 - spin_lock_init(&mem->scanstat.lock); 4940 5098 return &mem->css; 4941 5099 free_out: 4942 5100 __mem_cgroup_free(mem);
+5 -4
mm/mempolicy.c
··· 636 636 struct vm_area_struct *prev; 637 637 struct vm_area_struct *vma; 638 638 int err = 0; 639 - pgoff_t pgoff; 640 639 unsigned long vmstart; 641 640 unsigned long vmend; 642 641 ··· 648 649 vmstart = max(start, vma->vm_start); 649 650 vmend = min(end, vma->vm_end); 650 651 651 - pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); 652 652 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, 653 - vma->anon_vma, vma->vm_file, pgoff, new_pol); 653 + vma->anon_vma, vma->vm_file, vma->vm_pgoff, 654 + new_pol); 654 655 if (prev) { 655 656 vma = prev; 656 657 next = vma->vm_next; ··· 1411 1412 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags); 1412 1413 1413 1414 if (!err && nmask) { 1414 - err = copy_from_user(bm, nm, alloc_size); 1415 + unsigned long copy_size; 1416 + copy_size = min_t(unsigned long, sizeof(bm), alloc_size); 1417 + err = copy_from_user(bm, nm, copy_size); 1415 1418 /* ensure entire bitmap is zeroed */ 1416 1419 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); 1417 1420 err |= compat_put_bitmap(nmask, bm, nr_bits);
+1 -1
mm/slub.c
··· 2377 2377 */ 2378 2378 if (unlikely(!prior)) { 2379 2379 remove_full(s, page); 2380 - add_partial(n, page, 0); 2380 + add_partial(n, page, 1); 2381 2381 stat(s, FREE_ADD_PARTIAL); 2382 2382 } 2383 2383 }
+8
mm/vmalloc.c
··· 2140 2140 return NULL; 2141 2141 } 2142 2142 2143 + /* 2144 + * If the allocated address space is passed to a hypercall 2145 + * before being used then we cannot rely on a page fault to 2146 + * trigger an update of the page tables. So sync all the page 2147 + * tables here. 2148 + */ 2149 + vmalloc_sync_all(); 2150 + 2143 2151 return area; 2144 2152 } 2145 2153 EXPORT_SYMBOL_GPL(alloc_vm_area);
+17 -49
mm/vmscan.c
··· 105 105 106 106 /* Which cgroup do we reclaim from */ 107 107 struct mem_cgroup *mem_cgroup; 108 - struct memcg_scanrecord *memcg_record; 109 108 110 109 /* 111 110 * Nodemask of nodes allowed by the caller. If NULL, all nodes ··· 1348 1349 int file = is_file_lru(lru); 1349 1350 int numpages = hpage_nr_pages(page); 1350 1351 reclaim_stat->recent_rotated[file] += numpages; 1351 - if (!scanning_global_lru(sc)) 1352 - sc->memcg_record->nr_rotated[file] += numpages; 1353 1352 } 1354 1353 if (!pagevec_add(&pvec, page)) { 1355 1354 spin_unlock_irq(&zone->lru_lock); ··· 1391 1394 1392 1395 reclaim_stat->recent_scanned[0] += *nr_anon; 1393 1396 reclaim_stat->recent_scanned[1] += *nr_file; 1394 - if (!scanning_global_lru(sc)) { 1395 - sc->memcg_record->nr_scanned[0] += *nr_anon; 1396 - sc->memcg_record->nr_scanned[1] += *nr_file; 1397 - } 1398 1397 } 1399 1398 1400 1399 /* ··· 1504 1511 nr_reclaimed += shrink_page_list(&page_list, zone, sc); 1505 1512 } 1506 1513 1507 - if (!scanning_global_lru(sc)) 1508 - sc->memcg_record->nr_freed[file] += nr_reclaimed; 1509 - 1510 1514 local_irq_disable(); 1511 1515 if (current_is_kswapd()) 1512 1516 __count_vm_events(KSWAPD_STEAL, nr_reclaimed); ··· 1603 1613 } 1604 1614 1605 1615 reclaim_stat->recent_scanned[file] += nr_taken; 1606 - if (!scanning_global_lru(sc)) 1607 - sc->memcg_record->nr_scanned[file] += nr_taken; 1608 1616 1609 1617 __count_zone_vm_events(PGREFILL, zone, pgscanned); 1610 1618 if (file) ··· 1654 1666 * get_scan_ratio. 1655 1667 */ 1656 1668 reclaim_stat->recent_rotated[file] += nr_rotated; 1657 - if (!scanning_global_lru(sc)) 1658 - sc->memcg_record->nr_rotated[file] += nr_rotated; 1659 1669 1660 1670 move_active_pages_to_lru(zone, &l_active, 1661 1671 LRU_ACTIVE + file * LRU_FILE); ··· 1794 1808 u64 fraction[2], denominator; 1795 1809 enum lru_list l; 1796 1810 int noswap = 0; 1797 - int force_scan = 0; 1811 + bool force_scan = false; 1798 1812 unsigned long nr_force_scan[2]; 1799 1813 1800 - 1801 - anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) + 1802 - zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON); 1803 - file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) + 1804 - zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE); 1805 - 1806 - if (((anon + file) >> priority) < SWAP_CLUSTER_MAX) { 1807 - /* kswapd does zone balancing and need to scan this zone */ 1808 - if (scanning_global_lru(sc) && current_is_kswapd()) 1809 - force_scan = 1; 1810 - /* memcg may have small limit and need to avoid priority drop */ 1811 - if (!scanning_global_lru(sc)) 1812 - force_scan = 1; 1813 - } 1814 + /* kswapd does zone balancing and needs to scan this zone */ 1815 + if (scanning_global_lru(sc) && current_is_kswapd()) 1816 + force_scan = true; 1817 + /* memcg may have small limit and need to avoid priority drop */ 1818 + if (!scanning_global_lru(sc)) 1819 + force_scan = true; 1814 1820 1815 1821 /* If we have no swap space, do not bother scanning anon pages. */ 1816 1822 if (!sc->may_swap || (nr_swap_pages <= 0)) { ··· 1814 1836 nr_force_scan[1] = SWAP_CLUSTER_MAX; 1815 1837 goto out; 1816 1838 } 1839 + 1840 + anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) + 1841 + zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON); 1842 + file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) + 1843 + zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE); 1817 1844 1818 1845 if (scanning_global_lru(sc)) { 1819 1846 free = zone_page_state(zone, NR_FREE_PAGES); ··· 2251 2268 #ifdef CONFIG_CGROUP_MEM_RES_CTLR 2252 2269 2253 2270 unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, 2254 - gfp_t gfp_mask, bool noswap, 2255 - struct zone *zone, 2256 - struct memcg_scanrecord *rec, 2257 - unsigned long *scanned) 2271 + gfp_t gfp_mask, bool noswap, 2272 + struct zone *zone, 2273 + unsigned long *nr_scanned) 2258 2274 { 2259 2275 struct scan_control sc = { 2260 2276 .nr_scanned = 0, ··· 2263 2281 .may_swap = !noswap, 2264 2282 .order = 0, 2265 2283 .mem_cgroup = mem, 2266 - .memcg_record = rec, 2267 2284 }; 2268 - ktime_t start, end; 2269 2285 2270 2286 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 2271 2287 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); ··· 2272 2292 sc.may_writepage, 2273 2293 sc.gfp_mask); 2274 2294 2275 - start = ktime_get(); 2276 2295 /* 2277 2296 * NOTE: Although we can get the priority field, using it 2278 2297 * here is not a good idea, since it limits the pages we can scan. ··· 2280 2301 * the priority and make it zero. 2281 2302 */ 2282 2303 shrink_zone(0, zone, &sc); 2283 - end = ktime_get(); 2284 - 2285 - if (rec) 2286 - rec->elapsed += ktime_to_ns(ktime_sub(end, start)); 2287 - *scanned = sc.nr_scanned; 2288 2304 2289 2305 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); 2290 2306 2307 + *nr_scanned = sc.nr_scanned; 2291 2308 return sc.nr_reclaimed; 2292 2309 } 2293 2310 2294 2311 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, 2295 2312 gfp_t gfp_mask, 2296 - bool noswap, 2297 - struct memcg_scanrecord *rec) 2313 + bool noswap) 2298 2314 { 2299 2315 struct zonelist *zonelist; 2300 2316 unsigned long nr_reclaimed; 2301 - ktime_t start, end; 2302 2317 int nid; 2303 2318 struct scan_control sc = { 2304 2319 .may_writepage = !laptop_mode, ··· 2301 2328 .nr_to_reclaim = SWAP_CLUSTER_MAX, 2302 2329 .order = 0, 2303 2330 .mem_cgroup = mem_cont, 2304 - .memcg_record = rec, 2305 2331 .nodemask = NULL, /* we don't care the placement */ 2306 2332 .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 2307 2333 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), ··· 2309 2337 .gfp_mask = sc.gfp_mask, 2310 2338 }; 2311 2339 2312 - start = ktime_get(); 2313 2340 /* 2314 2341 * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't 2315 2342 * take care of from where we get pages. So the node where we start the ··· 2323 2352 sc.gfp_mask); 2324 2353 2325 2354 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); 2326 - end = ktime_get(); 2327 - if (rec) 2328 - rec->elapsed += ktime_to_ns(ktime_sub(end, start)); 2329 2355 2330 2356 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); 2331 2357
+2 -2
mm/vmstat.c
··· 659 659 } 660 660 #endif 661 661 662 - #if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) 662 + #if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA) 663 663 #ifdef CONFIG_ZONE_DMA 664 664 #define TEXT_FOR_DMA(xx) xx "_dma", 665 665 #else ··· 788 788 789 789 #endif /* CONFIG_VM_EVENTS_COUNTERS */ 790 790 }; 791 - #endif /* CONFIG_PROC_FS || CONFIG_SYSFS */ 791 + #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */ 792 792 793 793 794 794 #ifdef CONFIG_PROC_FS
+5 -5
net/batman-adv/soft-interface.c
··· 565 565 struct orig_node *orig_node = NULL; 566 566 int data_len = skb->len, ret; 567 567 short vid = -1; 568 - bool do_bcast = false; 568 + bool do_bcast; 569 569 570 570 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) 571 571 goto dropped; ··· 598 598 tt_local_add(soft_iface, ethhdr->h_source); 599 599 600 600 orig_node = transtable_search(bat_priv, ethhdr->h_dest); 601 - if (is_multicast_ether_addr(ethhdr->h_dest) || 602 - (orig_node && orig_node->gw_flags)) { 601 + do_bcast = is_multicast_ether_addr(ethhdr->h_dest); 602 + if (do_bcast || (orig_node && orig_node->gw_flags)) { 603 603 ret = gw_is_target(bat_priv, skb, orig_node); 604 604 605 605 if (ret < 0) 606 606 goto dropped; 607 607 608 - if (ret == 0) 609 - do_bcast = true; 608 + if (ret) 609 + do_bcast = false; 610 610 } 611 611 612 612 /* ethernet packet should be broadcasted */
+8 -9
net/bluetooth/hci_event.c
··· 58 58 if (status) 59 59 return; 60 60 61 - if (test_bit(HCI_MGMT, &hdev->flags) && 62 - test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 61 + if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) && 62 + test_bit(HCI_MGMT, &hdev->flags)) 63 63 mgmt_discovering(hdev->id, 0); 64 64 65 65 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status); ··· 76 76 if (status) 77 77 return; 78 78 79 - if (test_bit(HCI_MGMT, &hdev->flags) && 80 - test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 79 + if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) && 80 + test_bit(HCI_MGMT, &hdev->flags)) 81 81 mgmt_discovering(hdev->id, 0); 82 82 83 83 hci_conn_check_pending(hdev); ··· 959 959 return; 960 960 } 961 961 962 - if (test_bit(HCI_MGMT, &hdev->flags) && 963 - !test_and_set_bit(HCI_INQUIRY, 964 - &hdev->flags)) 962 + if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags) && 963 + test_bit(HCI_MGMT, &hdev->flags)) 965 964 mgmt_discovering(hdev->id, 1); 966 965 } 967 966 ··· 1339 1340 1340 1341 BT_DBG("%s status %d", hdev->name, status); 1341 1342 1342 - if (test_bit(HCI_MGMT, &hdev->flags) && 1343 - test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 1343 + if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) && 1344 + test_bit(HCI_MGMT, &hdev->flags)) 1344 1345 mgmt_discovering(hdev->id, 0); 1345 1346 1346 1347 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
+1 -1
net/bridge/netfilter/Kconfig
··· 4 4 5 5 menuconfig BRIDGE_NF_EBTABLES 6 6 tristate "Ethernet Bridge tables (ebtables) support" 7 - depends on BRIDGE && BRIDGE_NETFILTER 7 + depends on BRIDGE && NETFILTER 8 8 select NETFILTER_XTABLES 9 9 help 10 10 ebtables is a general, extensible frame/packet identification
+5 -1
net/caif/caif_dev.c
··· 93 93 caifdevs = caif_device_list(dev_net(dev)); 94 94 BUG_ON(!caifdevs); 95 95 96 - caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC); 96 + caifd = kzalloc(sizeof(*caifd), GFP_KERNEL); 97 97 if (!caifd) 98 98 return NULL; 99 99 caifd->pcpu_refcnt = alloc_percpu(int); 100 + if (!caifd->pcpu_refcnt) { 101 + kfree(caifd); 102 + return NULL; 103 + } 100 104 caifd->netdev = dev; 101 105 dev_hold(dev); 102 106 return caifd;
+1 -1
net/can/af_can.c
··· 857 857 struct net_device *dev; 858 858 859 859 if (stats_timer) 860 - del_timer(&can_stattimer); 860 + del_timer_sync(&can_stattimer); 861 861 862 862 can_remove_proc(); 863 863
+24 -29
net/can/bcm.c
··· 344 344 } 345 345 } 346 346 347 + static void bcm_tx_start_timer(struct bcm_op *op) 348 + { 349 + if (op->kt_ival1.tv64 && op->count) 350 + hrtimer_start(&op->timer, 351 + ktime_add(ktime_get(), op->kt_ival1), 352 + HRTIMER_MODE_ABS); 353 + else if (op->kt_ival2.tv64) 354 + hrtimer_start(&op->timer, 355 + ktime_add(ktime_get(), op->kt_ival2), 356 + HRTIMER_MODE_ABS); 357 + } 358 + 347 359 static void bcm_tx_timeout_tsklet(unsigned long data) 348 360 { 349 361 struct bcm_op *op = (struct bcm_op *)data; ··· 377 365 378 366 bcm_send_to_user(op, &msg_head, NULL, 0); 379 367 } 380 - } 381 - 382 - if (op->kt_ival1.tv64 && (op->count > 0)) { 383 - 384 - /* send (next) frame */ 385 368 bcm_can_tx(op); 386 - hrtimer_start(&op->timer, 387 - ktime_add(ktime_get(), op->kt_ival1), 388 - HRTIMER_MODE_ABS); 389 369 390 - } else { 391 - if (op->kt_ival2.tv64) { 370 + } else if (op->kt_ival2.tv64) 371 + bcm_can_tx(op); 392 372 393 - /* send (next) frame */ 394 - bcm_can_tx(op); 395 - hrtimer_start(&op->timer, 396 - ktime_add(ktime_get(), op->kt_ival2), 397 - HRTIMER_MODE_ABS); 398 - } 399 - } 373 + bcm_tx_start_timer(op); 400 374 } 401 375 402 376 /* ··· 962 964 hrtimer_cancel(&op->timer); 963 965 } 964 966 965 - if ((op->flags & STARTTIMER) && 966 - ((op->kt_ival1.tv64 && op->count) || op->kt_ival2.tv64)) { 967 - 967 + if (op->flags & STARTTIMER) { 968 + hrtimer_cancel(&op->timer); 968 969 /* spec: send can_frame when starting timer */ 969 970 op->flags |= TX_ANNOUNCE; 970 - 971 - if (op->kt_ival1.tv64 && (op->count > 0)) { 972 - /* op->count-- is done in bcm_tx_timeout_handler */ 973 - hrtimer_start(&op->timer, op->kt_ival1, 974 - HRTIMER_MODE_REL); 975 - } else 976 - hrtimer_start(&op->timer, op->kt_ival2, 977 - HRTIMER_MODE_REL); 978 971 } 979 972 980 - if (op->flags & TX_ANNOUNCE) 973 + if (op->flags & TX_ANNOUNCE) { 981 974 bcm_can_tx(op); 975 + if (op->count) 976 + op->count--; 977 + } 978 + 979 + if (op->flags & STARTTIMER) 980 + bcm_tx_start_timer(op); 982 981 983 982 return msg_head->nframes * CFSIZ + MHSIZ; 984 983 }
+1
net/ceph/ceph_common.c
··· 232 232 ceph_crypto_key_destroy(opt->key); 233 233 kfree(opt->key); 234 234 } 235 + kfree(opt->mon_addr); 235 236 kfree(opt); 236 237 } 237 238 EXPORT_SYMBOL(ceph_destroy_options);
+1
net/ceph/messenger.c
··· 2307 2307 m->front_max = front_len; 2308 2308 m->front_is_vmalloc = false; 2309 2309 m->more_to_follow = false; 2310 + m->ack_stamp = 0; 2310 2311 m->pool = NULL; 2311 2312 2312 2313 /* middle */
+1 -3
net/ceph/osd_client.c
··· 217 217 INIT_LIST_HEAD(&req->r_unsafe_item); 218 218 INIT_LIST_HEAD(&req->r_linger_item); 219 219 INIT_LIST_HEAD(&req->r_linger_osd); 220 + INIT_LIST_HEAD(&req->r_req_lru_item); 220 221 req->r_flags = flags; 221 222 222 223 WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0); ··· 817 816 { 818 817 req->r_tid = ++osdc->last_tid; 819 818 req->r_request->hdr.tid = cpu_to_le64(req->r_tid); 820 - INIT_LIST_HEAD(&req->r_req_lru_item); 821 - 822 819 dout("__register_request %p tid %lld\n", req, req->r_tid); 823 820 __insert_request(osdc, req); 824 821 ceph_osdc_get_request(req); 825 822 osdc->num_requests++; 826 - 827 823 if (osdc->num_requests == 1) { 828 824 dout(" first request, scheduling timeout\n"); 829 825 __schedule_osd_timeout(osdc);
+45 -39
net/ceph/osdmap.c
··· 339 339 struct ceph_pg_mapping *pg = NULL; 340 340 int c; 341 341 342 + dout("__insert_pg_mapping %llx %p\n", *(u64 *)&new->pgid, new); 342 343 while (*p) { 343 344 parent = *p; 344 345 pg = rb_entry(parent, struct ceph_pg_mapping, node); ··· 367 366 while (n) { 368 367 pg = rb_entry(n, struct ceph_pg_mapping, node); 369 368 c = pgid_cmp(pgid, pg->pgid); 370 - if (c < 0) 369 + if (c < 0) { 371 370 n = n->rb_left; 372 - else if (c > 0) 371 + } else if (c > 0) { 373 372 n = n->rb_right; 374 - else 373 + } else { 374 + dout("__lookup_pg_mapping %llx got %p\n", 375 + *(u64 *)&pgid, pg); 375 376 return pg; 377 + } 376 378 } 377 379 return NULL; 380 + } 381 + 382 + static int __remove_pg_mapping(struct rb_root *root, struct ceph_pg pgid) 383 + { 384 + struct ceph_pg_mapping *pg = __lookup_pg_mapping(root, pgid); 385 + 386 + if (pg) { 387 + dout("__remove_pg_mapping %llx %p\n", *(u64 *)&pgid, pg); 388 + rb_erase(&pg->node, root); 389 + kfree(pg); 390 + return 0; 391 + } 392 + dout("__remove_pg_mapping %llx dne\n", *(u64 *)&pgid); 393 + return -ENOENT; 378 394 } 379 395 380 396 /* ··· 729 711 void *start = *p; 730 712 int err = -EINVAL; 731 713 u16 version; 732 - struct rb_node *rbp; 733 714 734 715 ceph_decode_16_safe(p, end, version, bad); 735 716 if (version > CEPH_OSDMAP_INC_VERSION) { ··· 878 861 } 879 862 880 863 /* new_pg_temp */ 881 - rbp = rb_first(&map->pg_temp); 882 864 ceph_decode_32_safe(p, end, len, bad); 883 865 while (len--) { 884 866 struct ceph_pg_mapping *pg; ··· 887 871 ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad); 888 872 ceph_decode_copy(p, &pgid, sizeof(pgid)); 889 873 pglen = ceph_decode_32(p); 890 - 891 - /* remove any? */ 892 - while (rbp && pgid_cmp(rb_entry(rbp, struct ceph_pg_mapping, 893 - node)->pgid, pgid) <= 0) { 894 - struct ceph_pg_mapping *cur = 895 - rb_entry(rbp, struct ceph_pg_mapping, node); 896 - 897 - rbp = rb_next(rbp); 898 - dout(" removed pg_temp %llx\n", *(u64 *)&cur->pgid); 899 - rb_erase(&cur->node, &map->pg_temp); 900 - kfree(cur); 901 - } 902 874 903 875 if (pglen) { 904 876 /* insert */ ··· 907 903 } 908 904 dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid, 909 905 pglen); 906 + } else { 907 + /* remove */ 908 + __remove_pg_mapping(&map->pg_temp, pgid); 910 909 } 911 - } 912 - while (rbp) { 913 - struct ceph_pg_mapping *cur = 914 - rb_entry(rbp, struct ceph_pg_mapping, node); 915 - 916 - rbp = rb_next(rbp); 917 - dout(" removed pg_temp %llx\n", *(u64 *)&cur->pgid); 918 - rb_erase(&cur->node, &map->pg_temp); 919 - kfree(cur); 920 910 } 921 911 922 912 /* ignore the rest */ ··· 1044 1046 struct ceph_pg_mapping *pg; 1045 1047 struct ceph_pg_pool_info *pool; 1046 1048 int ruleno; 1047 - unsigned poolid, ps, pps; 1049 + unsigned poolid, ps, pps, t; 1048 1050 int preferred; 1049 1051 1052 + poolid = le32_to_cpu(pgid.pool); 1053 + ps = le16_to_cpu(pgid.ps); 1054 + preferred = (s16)le16_to_cpu(pgid.preferred); 1055 + 1056 + pool = __lookup_pg_pool(&osdmap->pg_pools, poolid); 1057 + if (!pool) 1058 + return NULL; 1059 + 1050 1060 /* pg_temp? */ 1061 + if (preferred >= 0) 1062 + t = ceph_stable_mod(ps, le32_to_cpu(pool->v.lpg_num), 1063 + pool->lpgp_num_mask); 1064 + else 1065 + t = ceph_stable_mod(ps, le32_to_cpu(pool->v.pg_num), 1066 + pool->pgp_num_mask); 1067 + pgid.ps = cpu_to_le16(t); 1051 1068 pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid); 1052 1069 if (pg) { 1053 1070 *num = pg->len; ··· 1070 1057 } 1071 1058 1072 1059 /* crush */ 1073 - poolid = le32_to_cpu(pgid.pool); 1074 - ps = le16_to_cpu(pgid.ps); 1075 - preferred = (s16)le16_to_cpu(pgid.preferred); 1076 - 1077 - /* don't forcefeed bad device ids to crush */ 1078 - if (preferred >= osdmap->max_osd || 1079 - preferred >= osdmap->crush->max_devices) 1080 - preferred = -1; 1081 - 1082 - pool = __lookup_pg_pool(&osdmap->pg_pools, poolid); 1083 - if (!pool) 1084 - return NULL; 1085 1060 ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset, 1086 1061 pool->v.type, pool->v.size); 1087 1062 if (ruleno < 0) { ··· 1078 1077 pool->v.size); 1079 1078 return NULL; 1080 1079 } 1080 + 1081 + /* don't forcefeed bad device ids to crush */ 1082 + if (preferred >= osdmap->max_osd || 1083 + preferred >= osdmap->crush->max_devices) 1084 + preferred = -1; 1081 1085 1082 1086 if (preferred >= 0) 1083 1087 pps = ceph_stable_mod(ps,
+8
net/core/dev.c
··· 1515 1515 */ 1516 1516 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 1517 1517 { 1518 + if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 1519 + if (skb_copy_ubufs(skb, GFP_ATOMIC)) { 1520 + atomic_long_inc(&dev->rx_dropped); 1521 + kfree_skb(skb); 1522 + return NET_RX_DROP; 1523 + } 1524 + } 1525 + 1518 1526 skb_orphan(skb); 1519 1527 nf_reset(skb); 1520 1528
+2 -2
net/core/fib_rules.c
··· 384 384 */ 385 385 list_for_each_entry(r, &ops->rules_list, list) { 386 386 if (r->action == FR_ACT_GOTO && 387 - r->target == rule->pref) { 388 - BUG_ON(rtnl_dereference(r->ctarget) != NULL); 387 + r->target == rule->pref && 388 + rtnl_dereference(r->ctarget) == NULL) { 389 389 rcu_assign_pointer(r->ctarget, rule); 390 390 if (--ops->unresolved_rules == 0) 391 391 break;
+21 -15
net/core/flow.c
··· 30 30 struct hlist_node hlist; 31 31 struct list_head gc_list; 32 32 } u; 33 + struct net *net; 33 34 u16 family; 34 35 u8 dir; 35 36 u32 genid; ··· 173 172 174 173 static u32 flow_hash_code(struct flow_cache *fc, 175 174 struct flow_cache_percpu *fcp, 176 - const struct flowi *key) 175 + const struct flowi *key, 176 + size_t keysize) 177 177 { 178 178 const u32 *k = (const u32 *) key; 179 + const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32); 179 180 180 - return jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd) 181 + return jhash2(k, length, fcp->hash_rnd) 181 182 & (flow_cache_hash_size(fc) - 1); 182 183 } 183 184 184 - typedef unsigned long flow_compare_t; 185 - 186 185 /* I hear what you're saying, use memcmp. But memcmp cannot make 187 - * important assumptions that we can here, such as alignment and 188 - * constant size. 186 + * important assumptions that we can here, such as alignment. 189 187 */ 190 - static int flow_key_compare(const struct flowi *key1, const struct flowi *key2) 188 + static int flow_key_compare(const struct flowi *key1, const struct flowi *key2, 189 + size_t keysize) 191 190 { 192 191 const flow_compare_t *k1, *k1_lim, *k2; 193 - const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t); 194 - 195 - BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t)); 196 192 197 193 k1 = (const flow_compare_t *) key1; 198 - k1_lim = k1 + n_elem; 194 + k1_lim = k1 + keysize; 199 195 200 196 k2 = (const flow_compare_t *) key2; 201 197 ··· 213 215 struct flow_cache_entry *fle, *tfle; 214 216 struct hlist_node *entry; 215 217 struct flow_cache_object *flo; 218 + size_t keysize; 216 219 unsigned int hash; 217 220 218 221 local_bh_disable(); ··· 221 222 222 223 fle = NULL; 223 224 flo = NULL; 225 + 226 + keysize = flow_key_size(family); 227 + if (!keysize) 228 + goto nocache; 229 + 224 230 /* Packet really early in init? Making flow_cache_init a 225 231 * pre-smp initcall would solve this. --RR */ 226 232 if (!fcp->hash_table) ··· 234 230 if (fcp->hash_rnd_recalc) 235 231 flow_new_hash_rnd(fc, fcp); 236 232 237 - hash = flow_hash_code(fc, fcp, key); 233 + hash = flow_hash_code(fc, fcp, key, keysize); 238 234 hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) { 239 - if (tfle->family == family && 235 + if (tfle->net == net && 236 + tfle->family == family && 240 237 tfle->dir == dir && 241 - flow_key_compare(key, &tfle->key) == 0) { 238 + flow_key_compare(key, &tfle->key, keysize) == 0) { 242 239 fle = tfle; 243 240 break; 244 241 } ··· 251 246 252 247 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); 253 248 if (fle) { 249 + fle->net = net; 254 250 fle->family = family; 255 251 fle->dir = dir; 256 - memcpy(&fle->key, key, sizeof(*key)); 252 + memcpy(&fle->key, key, keysize * sizeof(flow_compare_t)); 257 253 fle->object = NULL; 258 254 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]); 259 255 fcp->hash_count++;
+17 -5
net/core/skbuff.c
··· 611 611 } 612 612 EXPORT_SYMBOL_GPL(skb_morph); 613 613 614 - /* skb frags copy userspace buffers to kernel */ 615 - static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 614 + /* skb_copy_ubufs - copy userspace skb frags buffers to kernel 615 + * @skb: the skb to modify 616 + * @gfp_mask: allocation priority 617 + * 618 + * This must be called on SKBTX_DEV_ZEROCOPY skb. 619 + * It will copy all frags into kernel and drop the reference 620 + * to userspace pages. 621 + * 622 + * If this function is called from an interrupt gfp_mask() must be 623 + * %GFP_ATOMIC. 624 + * 625 + * Returns 0 on success or a negative error code on failure 626 + * to allocate kernel memory to copy to. 627 + */ 628 + int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 616 629 { 617 630 int i; 618 631 int num_frags = skb_shinfo(skb)->nr_frags; ··· 665 652 skb_shinfo(skb)->frags[i - 1].page = head; 666 653 head = (struct page *)head->private; 667 654 } 655 + 656 + skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 668 657 return 0; 669 658 } 670 659 ··· 692 677 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 693 678 if (skb_copy_ubufs(skb, gfp_mask)) 694 679 return NULL; 695 - skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 696 680 } 697 681 698 682 n = skb + 1; ··· 817 803 n = NULL; 818 804 goto out; 819 805 } 820 - skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 821 806 } 822 807 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 823 808 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; ··· 909 896 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 910 897 if (skb_copy_ubufs(skb, gfp_mask)) 911 898 goto nofrags; 912 - skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 913 899 } 914 900 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 915 901 get_page(skb_shinfo(skb)->frags[i].page);
+1 -1
net/ethernet/eth.c
··· 340 340 dev->addr_len = ETH_ALEN; 341 341 dev->tx_queue_len = 1000; /* Ethernet wants good queues */ 342 342 dev->flags = IFF_BROADCAST|IFF_MULTICAST; 343 - dev->priv_flags = IFF_TX_SKB_SHARING; 343 + dev->priv_flags |= IFF_TX_SKB_SHARING; 344 344 345 345 memset(dev->broadcast, 0xFF, ETH_ALEN); 346 346
+6 -1
net/ipv4/af_inet.c
··· 466 466 goto out; 467 467 468 468 if (addr->sin_family != AF_INET) { 469 + /* Compatibility games : accept AF_UNSPEC (mapped to AF_INET) 470 + * only if s_addr is INADDR_ANY. 471 + */ 469 472 err = -EAFNOSUPPORT; 470 - goto out; 473 + if (addr->sin_family != AF_UNSPEC || 474 + addr->sin_addr.s_addr != htonl(INADDR_ANY)) 475 + goto out; 471 476 } 472 477 473 478 chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
+9 -1
net/ipv4/fib_semantics.c
··· 142 142 }; 143 143 144 144 /* Release a nexthop info record */ 145 + static void free_fib_info_rcu(struct rcu_head *head) 146 + { 147 + struct fib_info *fi = container_of(head, struct fib_info, rcu); 148 + 149 + if (fi->fib_metrics != (u32 *) dst_default_metrics) 150 + kfree(fi->fib_metrics); 151 + kfree(fi); 152 + } 145 153 146 154 void free_fib_info(struct fib_info *fi) 147 155 { ··· 164 156 } endfor_nexthops(fi); 165 157 fib_info_cnt--; 166 158 release_net(fi->fib_net); 167 - kfree_rcu(fi, rcu); 159 + call_rcu(&fi->rcu, free_fib_info_rcu); 168 160 } 169 161 170 162 void fib_release_info(struct fib_info *fi)
+5 -7
net/ipv4/netfilter/ip_queue.c
··· 218 218 return skb; 219 219 220 220 nlmsg_failure: 221 + kfree_skb(skb); 221 222 *errp = -EINVAL; 222 223 printk(KERN_ERR "ip_queue: error creating packet message\n"); 223 224 return NULL; ··· 314 313 { 315 314 struct nf_queue_entry *entry; 316 315 317 - if (vmsg->value > NF_MAX_VERDICT) 316 + if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN) 318 317 return -EINVAL; 319 318 320 319 entry = ipq_find_dequeue_entry(vmsg->id); ··· 359 358 break; 360 359 361 360 case IPQM_VERDICT: 362 - if (pmsg->msg.verdict.value > NF_MAX_VERDICT) 363 - status = -EINVAL; 364 - else 365 - status = ipq_set_verdict(&pmsg->msg.verdict, 366 - len - sizeof(*pmsg)); 367 - break; 361 + status = ipq_set_verdict(&pmsg->msg.verdict, 362 + len - sizeof(*pmsg)); 363 + break; 368 364 default: 369 365 status = -EINVAL; 370 366 }
+2
net/ipv4/proc.c
··· 254 254 SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP), 255 255 SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER), 256 256 SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW), 257 + SNMP_MIB_ITEM("TCPReqQFullDoCookies", LINUX_MIB_TCPREQQFULLDOCOOKIES), 258 + SNMP_MIB_ITEM("TCPReqQFullDrop", LINUX_MIB_TCPREQQFULLDROP), 257 259 SNMP_MIB_SENTINEL 258 260 }; 259 261
+1 -1
net/ipv4/tcp_input.c
··· 1124 1124 return 0; 1125 1125 1126 1126 /* ...Then it's D-SACK, and must reside below snd_una completely */ 1127 - if (!after(end_seq, tp->snd_una)) 1127 + if (after(end_seq, tp->snd_una)) 1128 1128 return 0; 1129 1129 1130 1130 if (!before(start_seq, tp->undo_marker))
+28 -21
net/ipv4/tcp_ipv4.c
··· 808 808 kfree(inet_rsk(req)->opt); 809 809 } 810 810 811 - static void syn_flood_warning(const struct sk_buff *skb) 811 + /* 812 + * Return 1 if a syncookie should be sent 813 + */ 814 + int tcp_syn_flood_action(struct sock *sk, 815 + const struct sk_buff *skb, 816 + const char *proto) 812 817 { 813 - const char *msg; 818 + const char *msg = "Dropping request"; 819 + int want_cookie = 0; 820 + struct listen_sock *lopt; 821 + 822 + 814 823 815 824 #ifdef CONFIG_SYN_COOKIES 816 - if (sysctl_tcp_syncookies) 825 + if (sysctl_tcp_syncookies) { 817 826 msg = "Sending cookies"; 818 - else 827 + want_cookie = 1; 828 + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES); 829 + } else 819 830 #endif 820 - msg = "Dropping request"; 831 + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP); 821 832 822 - pr_info("TCP: Possible SYN flooding on port %d. %s.\n", 823 - ntohs(tcp_hdr(skb)->dest), msg); 833 + lopt = inet_csk(sk)->icsk_accept_queue.listen_opt; 834 + if (!lopt->synflood_warned) { 835 + lopt->synflood_warned = 1; 836 + pr_info("%s: Possible SYN flooding on port %d. %s. " 837 + " Check SNMP counters.\n", 838 + proto, ntohs(tcp_hdr(skb)->dest), msg); 839 + } 840 + return want_cookie; 824 841 } 842 + EXPORT_SYMBOL(tcp_syn_flood_action); 825 843 826 844 /* 827 845 * Save and compile IPv4 options into the request_sock if needed. ··· 1253 1235 __be32 saddr = ip_hdr(skb)->saddr; 1254 1236 __be32 daddr = ip_hdr(skb)->daddr; 1255 1237 __u32 isn = TCP_SKB_CB(skb)->when; 1256 - #ifdef CONFIG_SYN_COOKIES 1257 1238 int want_cookie = 0; 1258 - #else 1259 - #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */ 1260 - #endif 1261 1239 1262 1240 /* Never answer to SYNs send to broadcast or multicast */ 1263 1241 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) ··· 1264 1250 * evidently real one. 1265 1251 */ 1266 1252 if (inet_csk_reqsk_queue_is_full(sk) && !isn) { 1267 - if (net_ratelimit()) 1268 - syn_flood_warning(skb); 1269 - #ifdef CONFIG_SYN_COOKIES 1270 - if (sysctl_tcp_syncookies) { 1271 - want_cookie = 1; 1272 - } else 1273 - #endif 1274 - goto drop; 1253 + want_cookie = tcp_syn_flood_action(sk, skb, "TCP"); 1254 + if (!want_cookie) 1255 + goto drop; 1275 1256 } 1276 1257 1277 1258 /* Accept backlog is full. If we have already queued enough ··· 1312 1303 while (l-- > 0) 1313 1304 *c++ ^= *hash_location++; 1314 1305 1315 - #ifdef CONFIG_SYN_COOKIES 1316 1306 want_cookie = 0; /* not our kind of cookie */ 1317 - #endif 1318 1307 tmp_ext.cookie_out_never = 0; /* false */ 1319 1308 tmp_ext.cookie_plus = tmp_opt.cookie_plus; 1320 1309 } else if (!tp->rx_opt.cookie_in_always) {
+2 -2
net/ipv6/addrconf.c
··· 374 374 "%s(): cannot allocate memory for statistics; dev=%s.\n", 375 375 __func__, dev->name)); 376 376 neigh_parms_release(&nd_tbl, ndev->nd_parms); 377 - ndev->dead = 1; 378 - in6_dev_finish_destroy(ndev); 377 + dev_put(dev); 378 + kfree(ndev); 379 379 return NULL; 380 380 } 381 381
+3 -2
net/ipv6/datagram.c
··· 599 599 return 0; 600 600 } 601 601 602 - int datagram_send_ctl(struct net *net, 602 + int datagram_send_ctl(struct net *net, struct sock *sk, 603 603 struct msghdr *msg, struct flowi6 *fl6, 604 604 struct ipv6_txoptions *opt, 605 605 int *hlimit, int *tclass, int *dontfrag) ··· 658 658 659 659 if (addr_type != IPV6_ADDR_ANY) { 660 660 int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL; 661 - if (!ipv6_chk_addr(net, &src_info->ipi6_addr, 661 + if (!inet_sk(sk)->transparent && 662 + !ipv6_chk_addr(net, &src_info->ipi6_addr, 662 663 strict ? dev : NULL, 0)) 663 664 err = -EINVAL; 664 665 else
+4 -4
net/ipv6/ip6_flowlabel.c
··· 322 322 } 323 323 324 324 static struct ip6_flowlabel * 325 - fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval, 326 - int optlen, int *err_p) 325 + fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, 326 + char __user *optval, int optlen, int *err_p) 327 327 { 328 328 struct ip6_flowlabel *fl = NULL; 329 329 int olen; ··· 360 360 msg.msg_control = (void*)(fl->opt+1); 361 361 memset(&flowi6, 0, sizeof(flowi6)); 362 362 363 - err = datagram_send_ctl(net, &msg, &flowi6, fl->opt, &junk, 363 + err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk, 364 364 &junk, &junk); 365 365 if (err) 366 366 goto done; ··· 528 528 if (freq.flr_label & ~IPV6_FLOWLABEL_MASK) 529 529 return -EINVAL; 530 530 531 - fl = fl_create(net, &freq, optval, optlen, &err); 531 + fl = fl_create(net, sk, &freq, optval, optlen, &err); 532 532 if (fl == NULL) 533 533 return err; 534 534 sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
+6 -2
net/ipv6/ip6mr.c
··· 696 696 int err; 697 697 698 698 err = ip6mr_fib_lookup(net, &fl6, &mrt); 699 - if (err < 0) 699 + if (err < 0) { 700 + kfree_skb(skb); 700 701 return err; 702 + } 701 703 702 704 read_lock(&mrt_lock); 703 705 dev->stats.tx_bytes += skb->len; ··· 2054 2052 int err; 2055 2053 2056 2054 err = ip6mr_fib_lookup(net, &fl6, &mrt); 2057 - if (err < 0) 2055 + if (err < 0) { 2056 + kfree_skb(skb); 2058 2057 return err; 2058 + } 2059 2059 2060 2060 read_lock(&mrt_lock); 2061 2061 cache = ip6mr_cache_find(mrt,
+1 -1
net/ipv6/ipv6_sockglue.c
··· 475 475 msg.msg_controllen = optlen; 476 476 msg.msg_control = (void*)(opt+1); 477 477 478 - retv = datagram_send_ctl(net, &msg, &fl6, opt, &junk, &junk, 478 + retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk, 479 479 &junk); 480 480 if (retv) 481 481 goto done;
+5 -7
net/ipv6/netfilter/ip6_queue.c
··· 218 218 return skb; 219 219 220 220 nlmsg_failure: 221 + kfree_skb(skb); 221 222 *errp = -EINVAL; 222 223 printk(KERN_ERR "ip6_queue: error creating packet message\n"); 223 224 return NULL; ··· 314 313 { 315 314 struct nf_queue_entry *entry; 316 315 317 - if (vmsg->value > NF_MAX_VERDICT) 316 + if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN) 318 317 return -EINVAL; 319 318 320 319 entry = ipq_find_dequeue_entry(vmsg->id); ··· 359 358 break; 360 359 361 360 case IPQM_VERDICT: 362 - if (pmsg->msg.verdict.value > NF_MAX_VERDICT) 363 - status = -EINVAL; 364 - else 365 - status = ipq_set_verdict(&pmsg->msg.verdict, 366 - len - sizeof(*pmsg)); 367 - break; 361 + status = ipq_set_verdict(&pmsg->msg.verdict, 362 + len - sizeof(*pmsg)); 363 + break; 368 364 default: 369 365 status = -EINVAL; 370 366 }
+2 -2
net/ipv6/raw.c
··· 817 817 memset(opt, 0, sizeof(struct ipv6_txoptions)); 818 818 opt->tot_len = sizeof(struct ipv6_txoptions); 819 819 820 - err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit, 821 - &tclass, &dontfrag); 820 + err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 821 + &hlimit, &tclass, &dontfrag); 822 822 if (err < 0) { 823 823 fl6_sock_release(flowlabel); 824 824 return err;
+25 -12
net/ipv6/route.c
··· 104 104 struct inet_peer *peer; 105 105 u32 *p = NULL; 106 106 107 + if (!(rt->dst.flags & DST_HOST)) 108 + return NULL; 109 + 107 110 if (!rt->rt6i_peer) 108 111 rt6_bind_peer(rt, 1); 109 112 ··· 244 241 { 245 242 struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags); 246 243 247 - memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry)); 244 + if (rt != NULL) 245 + memset(&rt->rt6i_table, 0, 246 + sizeof(*rt) - sizeof(struct dst_entry)); 248 247 249 248 return rt; 250 249 } ··· 256 251 struct rt6_info *rt = (struct rt6_info *)dst; 257 252 struct inet6_dev *idev = rt->rt6i_idev; 258 253 struct inet_peer *peer = rt->rt6i_peer; 254 + 255 + if (!(rt->dst.flags & DST_HOST)) 256 + dst_destroy_metrics_generic(dst); 259 257 260 258 if (idev != NULL) { 261 259 rt->rt6i_idev = NULL; ··· 731 723 ipv6_addr_copy(&rt->rt6i_gateway, daddr); 732 724 } 733 725 734 - rt->rt6i_dst.plen = 128; 735 726 rt->rt6i_flags |= RTF_CACHE; 736 - rt->dst.flags |= DST_HOST; 737 727 738 728 #ifdef CONFIG_IPV6_SUBTREES 739 729 if (rt->rt6i_src.plen && saddr) { ··· 781 775 struct rt6_info *rt = ip6_rt_copy(ort, daddr); 782 776 783 777 if (rt) { 784 - rt->rt6i_dst.plen = 128; 785 778 rt->rt6i_flags |= RTF_CACHE; 786 - rt->dst.flags |= DST_HOST; 787 779 dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_raw(&ort->dst))); 788 780 } 789 781 return rt; ··· 1082 1078 neigh = NULL; 1083 1079 } 1084 1080 1085 - rt->rt6i_idev = idev; 1081 + rt->dst.flags |= DST_HOST; 1082 + rt->dst.output = ip6_output; 1086 1083 dst_set_neighbour(&rt->dst, neigh); 1087 1084 atomic_set(&rt->dst.__refcnt, 1); 1088 - ipv6_addr_copy(&rt->rt6i_dst.addr, addr); 1089 1085 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255); 1090 - rt->dst.output = ip6_output; 1086 + 1087 + ipv6_addr_copy(&rt->rt6i_dst.addr, addr); 1088 + rt->rt6i_dst.plen = 128; 1089 + rt->rt6i_idev = idev; 1091 1090 1092 1091 spin_lock_bh(&icmp6_dst_lock); 1093 1092 rt->dst.next = icmp6_dst_gc_list; ··· 1268 1261 if (rt->rt6i_dst.plen == 128) 1269 1262 rt->dst.flags |= DST_HOST; 1270 1263 1264 + if (!(rt->dst.flags & DST_HOST) && cfg->fc_mx) { 1265 + u32 *metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); 1266 + if (!metrics) { 1267 + err = -ENOMEM; 1268 + goto out; 1269 + } 1270 + dst_init_metrics(&rt->dst, metrics, 0); 1271 + } 1271 1272 #ifdef CONFIG_IPV6_SUBTREES 1272 1273 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len); 1273 1274 rt->rt6i_src.plen = cfg->fc_src_len; ··· 1622 1607 if (on_link) 1623 1608 nrt->rt6i_flags &= ~RTF_GATEWAY; 1624 1609 1625 - nrt->rt6i_dst.plen = 128; 1626 - nrt->dst.flags |= DST_HOST; 1627 - 1628 1610 ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key); 1629 1611 dst_set_neighbour(&nrt->dst, neigh_clone(neigh)); 1630 1612 ··· 1766 1754 if (rt) { 1767 1755 rt->dst.input = ort->dst.input; 1768 1756 rt->dst.output = ort->dst.output; 1757 + rt->dst.flags |= DST_HOST; 1769 1758 1770 1759 ipv6_addr_copy(&rt->rt6i_dst.addr, dest); 1771 - rt->rt6i_dst.plen = ort->rt6i_dst.plen; 1760 + rt->rt6i_dst.plen = 128; 1772 1761 dst_copy_metrics(&rt->dst, &ort->dst); 1773 1762 rt->dst.error = ort->dst.error; 1774 1763 rt->rt6i_idev = ort->rt6i_idev;
+6 -28
net/ipv6/tcp_ipv6.c
··· 531 531 return tcp_v6_send_synack(sk, req, rvp); 532 532 } 533 533 534 - static inline void syn_flood_warning(struct sk_buff *skb) 535 - { 536 - #ifdef CONFIG_SYN_COOKIES 537 - if (sysctl_tcp_syncookies) 538 - printk(KERN_INFO 539 - "TCPv6: Possible SYN flooding on port %d. " 540 - "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest)); 541 - else 542 - #endif 543 - printk(KERN_INFO 544 - "TCPv6: Possible SYN flooding on port %d. " 545 - "Dropping request.\n", ntohs(tcp_hdr(skb)->dest)); 546 - } 547 - 548 534 static void tcp_v6_reqsk_destructor(struct request_sock *req) 549 535 { 550 536 kfree_skb(inet6_rsk(req)->pktopts); ··· 1165 1179 struct tcp_sock *tp = tcp_sk(sk); 1166 1180 __u32 isn = TCP_SKB_CB(skb)->when; 1167 1181 struct dst_entry *dst = NULL; 1168 - #ifdef CONFIG_SYN_COOKIES 1169 1182 int want_cookie = 0; 1170 - #else 1171 - #define want_cookie 0 1172 - #endif 1173 1183 1174 1184 if (skb->protocol == htons(ETH_P_IP)) 1175 1185 return tcp_v4_conn_request(sk, skb); ··· 1174 1192 goto drop; 1175 1193 1176 1194 if (inet_csk_reqsk_queue_is_full(sk) && !isn) { 1177 - if (net_ratelimit()) 1178 - syn_flood_warning(skb); 1179 - #ifdef CONFIG_SYN_COOKIES 1180 - if (sysctl_tcp_syncookies) 1181 - want_cookie = 1; 1182 - else 1183 - #endif 1184 - goto drop; 1195 + want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6"); 1196 + if (!want_cookie) 1197 + goto drop; 1185 1198 } 1186 1199 1187 1200 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) ··· 1226 1249 while (l-- > 0) 1227 1250 *c++ ^= *hash_location++; 1228 1251 1229 - #ifdef CONFIG_SYN_COOKIES 1230 1252 want_cookie = 0; /* not our kind of cookie */ 1231 - #endif 1232 1253 tmp_ext.cookie_out_never = 0; /* false */ 1233 1254 tmp_ext.cookie_plus = tmp_opt.cookie_plus; 1234 1255 } else if (!tp->rx_opt.cookie_in_always) { ··· 1383 1408 newtp->af_specific = &tcp_sock_ipv6_mapped_specific; 1384 1409 #endif 1385 1410 1411 + newnp->ipv6_ac_list = NULL; 1412 + newnp->ipv6_fl_list = NULL; 1386 1413 newnp->pktoptions = NULL; 1387 1414 newnp->opt = NULL; 1388 1415 newnp->mcast_oif = inet6_iif(skb); ··· 1449 1472 First: no IPv4 options. 1450 1473 */ 1451 1474 newinet->inet_opt = NULL; 1475 + newnp->ipv6_ac_list = NULL; 1452 1476 newnp->ipv6_fl_list = NULL; 1453 1477 1454 1478 /* Clone RX bits */
+2 -2
net/ipv6/udp.c
··· 1090 1090 memset(opt, 0, sizeof(struct ipv6_txoptions)); 1091 1091 opt->tot_len = sizeof(*opt); 1092 1092 1093 - err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit, 1094 - &tclass, &dontfrag); 1093 + err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, 1094 + &hlimit, &tclass, &dontfrag); 1095 1095 if (err < 0) { 1096 1096 fl6_sock_release(flowlabel); 1097 1097 return err;
+3 -3
net/irda/irsysctl.c
··· 40 40 extern int sysctl_fast_poll_increase; 41 41 extern char sysctl_devname[]; 42 42 extern int sysctl_max_baud_rate; 43 - extern int sysctl_min_tx_turn_time; 44 - extern int sysctl_max_tx_data_size; 45 - extern int sysctl_max_tx_window; 43 + extern unsigned int sysctl_min_tx_turn_time; 44 + extern unsigned int sysctl_max_tx_data_size; 45 + extern unsigned int sysctl_max_tx_window; 46 46 extern int sysctl_max_noreply_time; 47 47 extern int sysctl_warn_noreply_time; 48 48 extern int sysctl_lap_keepalive_time;
+3 -3
net/irda/qos.c
··· 60 60 * Default is 10us which means using the unmodified value given by the 61 61 * peer except if it's 0 (0 is likely a bug in the other stack). 62 62 */ 63 - unsigned sysctl_min_tx_turn_time = 10; 63 + unsigned int sysctl_min_tx_turn_time = 10; 64 64 /* 65 65 * Maximum data size to be used in transmission in payload of LAP frame. 66 66 * There is a bit of confusion in the IrDA spec : ··· 75 75 * bytes frames or all negotiated frame sizes, but you can use the sysctl 76 76 * to play with this value anyway. 77 77 * Jean II */ 78 - unsigned sysctl_max_tx_data_size = 2042; 78 + unsigned int sysctl_max_tx_data_size = 2042; 79 79 /* 80 80 * Maximum transmit window, i.e. number of LAP frames between turn-around. 81 81 * This allow to override what the peer told us. Some peers are buggy and 82 82 * don't always support what they tell us. 83 83 * Jean II */ 84 - unsigned sysctl_max_tx_window = 7; 84 + unsigned int sysctl_max_tx_window = 7; 85 85 86 86 static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get); 87 87 static int irlap_param_link_disconnect(void *instance, irda_param_t *parm,
+1 -1
net/mac80211/sta_info.c
··· 665 665 BUG_ON(!sdata->bss); 666 666 667 667 atomic_dec(&sdata->bss->num_sta_ps); 668 - __sta_info_clear_tim_bit(sdata->bss, sta); 668 + sta_info_clear_tim_bit(sta); 669 669 } 670 670 671 671 local->num_sta--;
+1
net/netfilter/nf_conntrack_pptp.c
··· 364 364 break; 365 365 366 366 case PPTP_WAN_ERROR_NOTIFY: 367 + case PPTP_SET_LINK_INFO: 367 368 case PPTP_ECHO_REQUEST: 368 369 case PPTP_ECHO_REPLY: 369 370 /* I don't have to explain these ;) */
+3 -3
net/netfilter/nf_conntrack_proto_tcp.c
··· 409 409 if (opsize < 2) /* "silly options" */ 410 410 return; 411 411 if (opsize > length) 412 - break; /* don't parse partial options */ 412 + return; /* don't parse partial options */ 413 413 414 414 if (opcode == TCPOPT_SACK_PERM 415 415 && opsize == TCPOLEN_SACK_PERM) ··· 447 447 BUG_ON(ptr == NULL); 448 448 449 449 /* Fast path for timestamp-only option */ 450 - if (length == TCPOLEN_TSTAMP_ALIGNED*4 450 + if (length == TCPOLEN_TSTAMP_ALIGNED 451 451 && *(__be32 *)ptr == htonl((TCPOPT_NOP << 24) 452 452 | (TCPOPT_NOP << 16) 453 453 | (TCPOPT_TIMESTAMP << 8) ··· 469 469 if (opsize < 2) /* "silly options" */ 470 470 return; 471 471 if (opsize > length) 472 - break; /* don't parse partial options */ 472 + return; /* don't parse partial options */ 473 473 474 474 if (opcode == TCPOPT_SACK 475 475 && opsize >= (TCPOLEN_SACK_BASE
+4 -5
net/netfilter/xt_rateest.c
··· 78 78 { 79 79 struct xt_rateest_match_info *info = par->matchinfo; 80 80 struct xt_rateest *est1, *est2; 81 - int ret = false; 81 + int ret = -EINVAL; 82 82 83 83 if (hweight32(info->flags & (XT_RATEEST_MATCH_ABS | 84 84 XT_RATEEST_MATCH_REL)) != 1) ··· 101 101 if (!est1) 102 102 goto err1; 103 103 104 + est2 = NULL; 104 105 if (info->flags & XT_RATEEST_MATCH_REL) { 105 106 est2 = xt_rateest_lookup(info->name2); 106 107 if (!est2) 107 108 goto err2; 108 - } else 109 - est2 = NULL; 110 - 109 + } 111 110 112 111 info->est1 = est1; 113 112 info->est2 = est2; ··· 115 116 err2: 116 117 xt_rateest_put(est1); 117 118 err1: 118 - return -EINVAL; 119 + return ret; 119 120 } 120 121 121 122 static void xt_rateest_mt_destroy(const struct xt_mtdtor_param *par)
+4 -1
net/packet/af_packet.c
··· 961 961 return 0; 962 962 963 963 drop_n_acct: 964 - po->stats.tp_drops = atomic_inc_return(&sk->sk_drops); 964 + spin_lock(&sk->sk_receive_queue.lock); 965 + po->stats.tp_drops++; 966 + atomic_inc(&sk->sk_drops); 967 + spin_unlock(&sk->sk_receive_queue.lock); 965 968 966 969 drop_n_restore: 967 970 if (skb_head != skb->data && skb_shared(skb)) {
+9 -4
net/rds/iw_rdma.c
··· 84 84 static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); 85 85 static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, 86 86 struct list_head *unmap_list, 87 - struct list_head *kill_list); 87 + struct list_head *kill_list, 88 + int *unpinned); 88 89 static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); 89 90 90 91 static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id) ··· 500 499 LIST_HEAD(unmap_list); 501 500 LIST_HEAD(kill_list); 502 501 unsigned long flags; 503 - unsigned int nfreed = 0, ncleaned = 0, free_goal; 502 + unsigned int nfreed = 0, ncleaned = 0, unpinned = 0, free_goal; 504 503 int ret = 0; 505 504 506 505 rds_iw_stats_inc(s_iw_rdma_mr_pool_flush); ··· 525 524 * will be destroyed by the unmap function. 526 525 */ 527 526 if (!list_empty(&unmap_list)) { 528 - ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list, &kill_list); 527 + ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list, 528 + &kill_list, &unpinned); 529 529 /* If we've been asked to destroy all MRs, move those 530 530 * that were simply cleaned to the kill list */ 531 531 if (free_all) ··· 550 548 spin_unlock_irqrestore(&pool->list_lock, flags); 551 549 } 552 550 551 + atomic_sub(unpinned, &pool->free_pinned); 553 552 atomic_sub(ncleaned, &pool->dirty_count); 554 553 atomic_sub(nfreed, &pool->item_count); 555 554 ··· 831 828 832 829 static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, 833 830 struct list_head *unmap_list, 834 - struct list_head *kill_list) 831 + struct list_head *kill_list, 832 + int *unpinned) 835 833 { 836 834 struct rds_iw_mapping *mapping, *next; 837 835 unsigned int ncleaned = 0; ··· 859 855 860 856 spin_lock_irqsave(&pool->list_lock, flags); 861 857 list_for_each_entry_safe(mapping, next, unmap_list, m_list) { 858 + *unpinned += mapping->m_sg.len; 862 859 list_move(&mapping->m_list, &laundered); 863 860 ncleaned++; 864 861 }
+13 -14
net/sched/cls_rsvp.h
··· 425 425 struct rsvp_filter *f, **fp; 426 426 struct rsvp_session *s, **sp; 427 427 struct tc_rsvp_pinfo *pinfo = NULL; 428 - struct nlattr *opt = tca[TCA_OPTIONS-1]; 428 + struct nlattr *opt = tca[TCA_OPTIONS]; 429 429 struct nlattr *tb[TCA_RSVP_MAX + 1]; 430 430 struct tcf_exts e; 431 431 unsigned int h1, h2; ··· 439 439 if (err < 0) 440 440 return err; 441 441 442 - err = tcf_exts_validate(tp, tb, tca[TCA_RATE-1], &e, &rsvp_ext_map); 442 + err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &rsvp_ext_map); 443 443 if (err < 0) 444 444 return err; 445 445 ··· 449 449 450 450 if (f->handle != handle && handle) 451 451 goto errout2; 452 - if (tb[TCA_RSVP_CLASSID-1]) { 453 - f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID-1]); 452 + if (tb[TCA_RSVP_CLASSID]) { 453 + f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]); 454 454 tcf_bind_filter(tp, &f->res, base); 455 455 } 456 456 ··· 462 462 err = -EINVAL; 463 463 if (handle) 464 464 goto errout2; 465 - if (tb[TCA_RSVP_DST-1] == NULL) 465 + if (tb[TCA_RSVP_DST] == NULL) 466 466 goto errout2; 467 467 468 468 err = -ENOBUFS; ··· 471 471 goto errout2; 472 472 473 473 h2 = 16; 474 - if (tb[TCA_RSVP_SRC-1]) { 475 - memcpy(f->src, nla_data(tb[TCA_RSVP_SRC-1]), sizeof(f->src)); 474 + if (tb[TCA_RSVP_SRC]) { 475 + memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src)); 476 476 h2 = hash_src(f->src); 477 477 } 478 - if (tb[TCA_RSVP_PINFO-1]) { 479 - pinfo = nla_data(tb[TCA_RSVP_PINFO-1]); 478 + if (tb[TCA_RSVP_PINFO]) { 479 + pinfo = nla_data(tb[TCA_RSVP_PINFO]); 480 480 f->spi = pinfo->spi; 481 481 f->tunnelhdr = pinfo->tunnelhdr; 482 482 } 483 - if (tb[TCA_RSVP_CLASSID-1]) 484 - f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID-1]); 483 + if (tb[TCA_RSVP_CLASSID]) 484 + f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]); 485 485 486 - dst = nla_data(tb[TCA_RSVP_DST-1]); 486 + dst = nla_data(tb[TCA_RSVP_DST]); 487 487 h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0); 488 488 489 489 err = -ENOMEM; ··· 642 642 return -1; 643 643 } 644 644 645 - static struct tcf_proto_ops RSVP_OPS = { 646 - .next = NULL, 645 + static struct tcf_proto_ops RSVP_OPS __read_mostly = { 647 646 .kind = RSVP_ID, 648 647 .classify = rsvp_classify, 649 648 .init = rsvp_init,
+5
net/sctp/sm_sideeffect.c
··· 1689 1689 case SCTP_CMD_PURGE_ASCONF_QUEUE: 1690 1690 sctp_asconf_queue_teardown(asoc); 1691 1691 break; 1692 + 1693 + case SCTP_CMD_SET_ASOC: 1694 + asoc = cmd->obj.asoc; 1695 + break; 1696 + 1692 1697 default: 1693 1698 pr_warn("Impossible command: %u, %p\n", 1694 1699 cmd->verb, cmd->obj.ptr);
+6
net/sctp/sm_statefuns.c
··· 2047 2047 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); 2048 2048 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); 2049 2049 2050 + /* Restore association pointer to provide SCTP command interpeter 2051 + * with a valid context in case it needs to manipulate 2052 + * the queues */ 2053 + sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, 2054 + SCTP_ASOC((struct sctp_association *)asoc)); 2055 + 2050 2056 return retval; 2051 2057 2052 2058 nomem:
+4 -1
net/wireless/nl80211.c
··· 4113 4113 if (len % sizeof(u32)) 4114 4114 return -EINVAL; 4115 4115 4116 + if (settings->n_akm_suites > NL80211_MAX_NR_AKM_SUITES) 4117 + return -EINVAL; 4118 + 4116 4119 memcpy(settings->akm_suites, data, len); 4117 4120 4118 - for (i = 0; i < settings->n_ciphers_pairwise; i++) 4121 + for (i = 0; i < settings->n_akm_suites; i++) 4119 4122 if (!nl80211_valid_akm_suite(settings->akm_suites[i])) 4120 4123 return -EINVAL; 4121 4124 }
+1
net/wireless/reg.c
··· 852 852 return; 853 853 } 854 854 855 + chan->beacon_found = false; 855 856 chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags); 856 857 chan->max_antenna_gain = min(chan->orig_mag, 857 858 (int) MBI_TO_DBI(power_rule->max_antenna_gain));
+2
net/wireless/sme.c
··· 118 118 i++, j++) 119 119 request->channels[i] = 120 120 &wdev->wiphy->bands[band]->channels[j]; 121 + request->rates[band] = 122 + (1 << wdev->wiphy->bands[band]->n_bitrates) - 1; 121 123 } 122 124 } 123 125 request->n_channels = n_channels;
+5
net/xfrm/xfrm_input.c
··· 212 212 /* only the first xfrm gets the encap type */ 213 213 encap_type = 0; 214 214 215 + if (async && x->repl->check(x, skb, seq)) { 216 + XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); 217 + goto drop_unlock; 218 + } 219 + 215 220 x->repl->advance(x, seq); 216 221 217 222 x->curlft.bytes += skb->len;
+6 -4
net/xfrm/xfrm_policy.c
··· 1349 1349 BUG(); 1350 1350 } 1351 1351 xdst = dst_alloc(dst_ops, NULL, 0, 0, 0); 1352 - memset(&xdst->u.rt6.rt6i_table, 0, sizeof(*xdst) - sizeof(struct dst_entry)); 1353 - xfrm_policy_put_afinfo(afinfo); 1354 1352 1355 - if (likely(xdst)) 1353 + if (likely(xdst)) { 1354 + memset(&xdst->u.rt6.rt6i_table, 0, 1355 + sizeof(*xdst) - sizeof(struct dst_entry)); 1356 1356 xdst->flo.ops = &xfrm_bundle_fc_ops; 1357 - else 1357 + } else 1358 1358 xdst = ERR_PTR(-ENOBUFS); 1359 + 1360 + xfrm_policy_put_afinfo(afinfo); 1359 1361 1360 1362 return xdst; 1361 1363 }
+24 -9
sound/core/pcm_lib.c
··· 1761 1761 snd_pcm_uframes_t avail = 0; 1762 1762 long wait_time, tout; 1763 1763 1764 + init_waitqueue_entry(&wait, current); 1765 + set_current_state(TASK_INTERRUPTIBLE); 1766 + add_wait_queue(&runtime->tsleep, &wait); 1767 + 1764 1768 if (runtime->no_period_wakeup) 1765 1769 wait_time = MAX_SCHEDULE_TIMEOUT; 1766 1770 else { ··· 1775 1771 } 1776 1772 wait_time = msecs_to_jiffies(wait_time * 1000); 1777 1773 } 1778 - init_waitqueue_entry(&wait, current); 1779 - add_wait_queue(&runtime->tsleep, &wait); 1774 + 1780 1775 for (;;) { 1781 1776 if (signal_pending(current)) { 1782 1777 err = -ERESTARTSYS; 1783 1778 break; 1784 1779 } 1780 + 1781 + /* 1782 + * We need to check if space became available already 1783 + * (and thus the wakeup happened already) first to close 1784 + * the race of space already having become available. 1785 + * This check must happen after been added to the waitqueue 1786 + * and having current state be INTERRUPTIBLE. 1787 + */ 1788 + if (is_playback) 1789 + avail = snd_pcm_playback_avail(runtime); 1790 + else 1791 + avail = snd_pcm_capture_avail(runtime); 1792 + if (avail >= runtime->twake) 1793 + break; 1785 1794 snd_pcm_stream_unlock_irq(substream); 1786 - tout = schedule_timeout_interruptible(wait_time); 1795 + 1796 + tout = schedule_timeout(wait_time); 1797 + 1787 1798 snd_pcm_stream_lock_irq(substream); 1799 + set_current_state(TASK_INTERRUPTIBLE); 1788 1800 switch (runtime->status->state) { 1789 1801 case SNDRV_PCM_STATE_SUSPENDED: 1790 1802 err = -ESTRPIPE; ··· 1826 1806 err = -EIO; 1827 1807 break; 1828 1808 } 1829 - if (is_playback) 1830 - avail = snd_pcm_playback_avail(runtime); 1831 - else 1832 - avail = snd_pcm_capture_avail(runtime); 1833 - if (avail >= runtime->twake) 1834 - break; 1835 1809 } 1836 1810 _endloop: 1811 + set_current_state(TASK_RUNNING); 1837 1812 remove_wait_queue(&runtime->tsleep, &wait); 1838 1813 *availp = avail; 1839 1814 return err;
+10 -5
sound/pci/fm801.c
··· 68 68 module_param_array(tea575x_tuner, int, NULL, 0444); 69 69 MODULE_PARM_DESC(tea575x_tuner, "TEA575x tuner access method (0 = auto, 1 = SF256-PCS, 2=SF256-PCP, 3=SF64-PCR, 8=disable, +16=tuner-only)."); 70 70 71 + #define TUNER_DISABLED (1<<3) 71 72 #define TUNER_ONLY (1<<4) 72 73 #define TUNER_TYPE_MASK (~TUNER_ONLY & 0xFFFF) 73 74 ··· 1151 1150 1152 1151 __end_hw: 1153 1152 #ifdef CONFIG_SND_FM801_TEA575X_BOOL 1154 - snd_tea575x_exit(&chip->tea); 1153 + if (!(chip->tea575x_tuner & TUNER_DISABLED)) 1154 + snd_tea575x_exit(&chip->tea); 1155 1155 #endif 1156 1156 if (chip->irq >= 0) 1157 1157 free_irq(chip->irq, chip); ··· 1238 1236 (tea575x_tuner & TUNER_TYPE_MASK) < 4) { 1239 1237 if (snd_tea575x_init(&chip->tea)) { 1240 1238 snd_printk(KERN_ERR "TEA575x radio not found\n"); 1241 - snd_fm801_free(chip); 1242 1239 return -ENODEV; 1243 1240 } 1244 1241 } else if ((tea575x_tuner & TUNER_TYPE_MASK) == 0) { ··· 1252 1251 } 1253 1252 if (tea575x_tuner == 4) { 1254 1253 snd_printk(KERN_ERR "TEA575x radio not found\n"); 1255 - snd_fm801_free(chip); 1256 - return -ENODEV; 1254 + chip->tea575x_tuner = TUNER_DISABLED; 1257 1255 } 1258 1256 } 1259 - strlcpy(chip->tea.card, snd_fm801_tea575x_gpios[(tea575x_tuner & TUNER_TYPE_MASK) - 1].name, sizeof(chip->tea.card)); 1257 + if (!(chip->tea575x_tuner & TUNER_DISABLED)) { 1258 + strlcpy(chip->tea.card, 1259 + snd_fm801_tea575x_gpios[(tea575x_tuner & 1260 + TUNER_TYPE_MASK) - 1].name, 1261 + sizeof(chip->tea.card)); 1262 + } 1260 1263 #endif 1261 1264 1262 1265 *rchip = chip;
+5 -1
sound/pci/hda/hda_codec.c
··· 579 579 return -1; 580 580 } 581 581 recursive++; 582 - for (i = 0; i < nums; i++) 582 + for (i = 0; i < nums; i++) { 583 + unsigned int type = get_wcaps_type(get_wcaps(codec, conn[i])); 584 + if (type == AC_WID_PIN || type == AC_WID_AUD_OUT) 585 + continue; 583 586 if (snd_hda_get_conn_index(codec, conn[i], nid, recursive) >= 0) 584 587 return i; 588 + } 585 589 return -1; 586 590 } 587 591 EXPORT_SYMBOL_HDA(snd_hda_get_conn_index);
+5 -4
sound/pci/hda/hda_intel.c
··· 1924 1924 } 1925 1925 1926 1926 static unsigned int azx_get_position(struct azx *chip, 1927 - struct azx_dev *azx_dev) 1927 + struct azx_dev *azx_dev, 1928 + bool with_check) 1928 1929 { 1929 1930 unsigned int pos; 1930 1931 int stream = azx_dev->substream->stream; ··· 1941 1940 default: 1942 1941 /* use the position buffer */ 1943 1942 pos = le32_to_cpu(*azx_dev->posbuf); 1944 - if (chip->position_fix[stream] == POS_FIX_AUTO) { 1943 + if (with_check && chip->position_fix[stream] == POS_FIX_AUTO) { 1945 1944 if (!pos || pos == (u32)-1) { 1946 1945 printk(KERN_WARNING 1947 1946 "hda-intel: Invalid position buffer, " ··· 1965 1964 struct azx *chip = apcm->chip; 1966 1965 struct azx_dev *azx_dev = get_azx_dev(substream); 1967 1966 return bytes_to_frames(substream->runtime, 1968 - azx_get_position(chip, azx_dev)); 1967 + azx_get_position(chip, azx_dev, false)); 1969 1968 } 1970 1969 1971 1970 /* ··· 1988 1987 return -1; /* bogus (too early) interrupt */ 1989 1988 1990 1989 stream = azx_dev->substream->stream; 1991 - pos = azx_get_position(chip, azx_dev); 1990 + pos = azx_get_position(chip, azx_dev, true); 1992 1991 1993 1992 if (WARN_ONCE(!azx_dev->period_bytes, 1994 1993 "hda-intel: zero azx_dev->period_bytes"))
+1 -1
sound/pci/hda/patch_cirrus.c
··· 535 535 int index, unsigned int pval, int dir, 536 536 struct snd_kcontrol **kctlp) 537 537 { 538 - char tmp[32]; 538 + char tmp[44]; 539 539 struct snd_kcontrol_new knew = 540 540 HDA_CODEC_VOLUME_IDX(tmp, index, 0, 0, HDA_OUTPUT); 541 541 knew.private_value = pval;
+12 -5
sound/pci/hda/patch_realtek.c
··· 168 168 unsigned int auto_mic_valid_imux:1; /* valid imux for auto-mic */ 169 169 unsigned int automute:1; /* HP automute enabled */ 170 170 unsigned int detect_line:1; /* Line-out detection enabled */ 171 - unsigned int automute_lines:1; /* automute line-out as well */ 171 + unsigned int automute_lines:1; /* automute line-out as well; NOP when automute_hp_lo isn't set */ 172 172 unsigned int automute_hp_lo:1; /* both HP and LO available */ 173 173 174 174 /* other flags */ ··· 551 551 if (spec->autocfg.line_out_pins[0] == spec->autocfg.hp_pins[0] || 552 552 spec->autocfg.line_out_pins[0] == spec->autocfg.speaker_pins[0]) 553 553 return; 554 - if (!spec->automute_lines || !spec->automute) 554 + if (!spec->automute || (spec->automute_hp_lo && !spec->automute_lines)) 555 555 on = 0; 556 556 else 557 557 on = spec->jack_present; ··· 577 577 static void alc_line_automute(struct hda_codec *codec) 578 578 { 579 579 struct alc_spec *spec = codec->spec; 580 + 581 + /* check LO jack only when it's different from HP */ 582 + if (spec->autocfg.line_out_pins[0] == spec->autocfg.hp_pins[0]) 583 + return; 580 584 581 585 spec->line_jack_present = 582 586 detect_jacks(codec, ARRAY_SIZE(spec->autocfg.line_out_pins), ··· 807 803 unsigned int val; 808 804 if (!spec->automute) 809 805 val = 0; 810 - else if (!spec->automute_lines) 806 + else if (!spec->automute_hp_lo || !spec->automute_lines) 811 807 val = 1; 812 808 else 813 809 val = 2; ··· 828 824 spec->automute = 0; 829 825 break; 830 826 case 1: 831 - if (spec->automute && !spec->automute_lines) 827 + if (spec->automute && 828 + (!spec->automute_hp_lo || !spec->automute_lines)) 832 829 return 0; 833 830 spec->automute = 1; 834 831 spec->automute_lines = 0; ··· 1325 1320 * 15 : 1 --> enable the function "Mute internal speaker 1326 1321 * when the external headphone out jack is plugged" 1327 1322 */ 1328 - if (!spec->autocfg.hp_pins[0]) { 1323 + if (!spec->autocfg.hp_pins[0] && 1324 + !(spec->autocfg.line_out_pins[0] && 1325 + spec->autocfg.line_out_type == AUTO_PIN_HP_OUT)) { 1329 1326 hda_nid_t nid; 1330 1327 tmp = (ass >> 11) & 0x3; /* HP to chassis */ 1331 1328 if (tmp == 0)
+2
sound/pci/hda/patch_sigmatel.c
··· 5630 5630 switch (codec->vendor_id) { 5631 5631 case 0x111d76d1: 5632 5632 case 0x111d76d9: 5633 + case 0x111d76df: 5633 5634 case 0x111d76e5: 5634 5635 case 0x111d7666: 5635 5636 case 0x111d7667: ··· 6574 6573 { .id = 0x111d76cc, .name = "92HD89F3", .patch = patch_stac92hd73xx }, 6575 6574 { .id = 0x111d76cd, .name = "92HD89F2", .patch = patch_stac92hd73xx }, 6576 6575 { .id = 0x111d76ce, .name = "92HD89F1", .patch = patch_stac92hd73xx }, 6576 + { .id = 0x111d76df, .name = "92HD93BXX", .patch = patch_stac92hd83xxx}, 6577 6577 { .id = 0x111d76e0, .name = "92HD91BXX", .patch = patch_stac92hd83xxx}, 6578 6578 { .id = 0x111d76e3, .name = "92HD98BXX", .patch = patch_stac92hd83xxx}, 6579 6579 { .id = 0x111d76e5, .name = "92HD99BXX", .patch = patch_stac92hd83xxx},
+2 -2
sound/soc/blackfin/bf5xx-ad193x.c
··· 103 103 .cpu_dai_name = "bfin-tdm.0", 104 104 .codec_dai_name ="ad193x-hifi", 105 105 .platform_name = "bfin-tdm-pcm-audio", 106 - .codec_name = "ad193x.5", 106 + .codec_name = "spi0.5", 107 107 .ops = &bf5xx_ad193x_ops, 108 108 }, 109 109 { ··· 112 112 .cpu_dai_name = "bfin-tdm.1", 113 113 .codec_dai_name ="ad193x-hifi", 114 114 .platform_name = "bfin-tdm-pcm-audio", 115 - .codec_name = "ad193x.5", 115 + .codec_name = "spi0.5", 116 116 .ops = &bf5xx_ad193x_ops, 117 117 }, 118 118 };
+1 -1
sound/soc/blackfin/bf5xx-ad73311.c
··· 128 128 return 0; 129 129 } 130 130 131 - static int bf5xx_probe(struct platform_device *pdev) 131 + static int bf5xx_probe(struct snd_soc_card *card) 132 132 { 133 133 int err; 134 134 if (gpio_request(GPIO_SE, "AD73311_SE")) {
+2 -1
sound/soc/codecs/ssm2602.c
··· 431 431 static int ssm2602_set_bias_level(struct snd_soc_codec *codec, 432 432 enum snd_soc_bias_level level) 433 433 { 434 - u16 reg = snd_soc_read(codec, SSM2602_PWR) & 0xff7f; 434 + u16 reg = snd_soc_read(codec, SSM2602_PWR); 435 + reg &= ~(PWR_POWER_OFF | PWR_OSC_PDN); 435 436 436 437 switch (level) { 437 438 case SND_SOC_BIAS_ON:
+2 -2
sound/soc/codecs/wm8753.c
··· 1454 1454 /* set the update bits */ 1455 1455 snd_soc_update_bits(codec, WM8753_LDAC, 0x0100, 0x0100); 1456 1456 snd_soc_update_bits(codec, WM8753_RDAC, 0x0100, 0x0100); 1457 - snd_soc_update_bits(codec, WM8753_LDAC, 0x0100, 0x0100); 1458 - snd_soc_update_bits(codec, WM8753_RDAC, 0x0100, 0x0100); 1457 + snd_soc_update_bits(codec, WM8753_LADC, 0x0100, 0x0100); 1458 + snd_soc_update_bits(codec, WM8753_RADC, 0x0100, 0x0100); 1459 1459 snd_soc_update_bits(codec, WM8753_LOUT1V, 0x0100, 0x0100); 1460 1460 snd_soc_update_bits(codec, WM8753_ROUT1V, 0x0100, 0x0100); 1461 1461 snd_soc_update_bits(codec, WM8753_LOUT2V, 0x0100, 0x0100);
-26
sound/soc/codecs/wm8962.c
··· 3479 3479 } 3480 3480 EXPORT_SYMBOL_GPL(wm8962_mic_detect); 3481 3481 3482 - #ifdef CONFIG_PM 3483 - static int wm8962_resume(struct snd_soc_codec *codec) 3484 - { 3485 - u16 *reg_cache = codec->reg_cache; 3486 - int i; 3487 - 3488 - /* Restore the registers */ 3489 - for (i = 1; i < codec->driver->reg_cache_size; i++) { 3490 - switch (i) { 3491 - case WM8962_SOFTWARE_RESET: 3492 - continue; 3493 - default: 3494 - break; 3495 - } 3496 - 3497 - if (reg_cache[i] != wm8962_reg[i]) 3498 - snd_soc_write(codec, i, reg_cache[i]); 3499 - } 3500 - 3501 - return 0; 3502 - } 3503 - #else 3504 - #define wm8962_resume NULL 3505 - #endif 3506 - 3507 3482 #if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE) 3508 3483 static int beep_rates[] = { 3509 3484 500, 1000, 2000, 4000, ··· 3990 4015 static struct snd_soc_codec_driver soc_codec_dev_wm8962 = { 3991 4016 .probe = wm8962_probe, 3992 4017 .remove = wm8962_remove, 3993 - .resume = wm8962_resume, 3994 4018 .set_bias_level = wm8962_set_bias_level, 3995 4019 .reg_cache_size = WM8962_MAX_REGISTER + 1, 3996 4020 .reg_word_size = sizeof(u16),
+3 -3
sound/soc/fsl/mpc5200_dma.c
··· 369 369 .pcm_free = &psc_dma_free, 370 370 }; 371 371 372 - static int mpc5200_hpcd_probe(struct of_device *op) 372 + static int mpc5200_hpcd_probe(struct platform_device *op) 373 373 { 374 374 phys_addr_t fifo; 375 375 struct psc_dma *psc_dma; ··· 487 487 return ret; 488 488 } 489 489 490 - static int mpc5200_hpcd_remove(struct of_device *op) 490 + static int mpc5200_hpcd_remove(struct platform_device *op) 491 491 { 492 492 struct psc_dma *psc_dma = dev_get_drvdata(&op->dev); 493 493 ··· 519 519 static struct platform_driver mpc5200_hpcd_of_driver = { 520 520 .probe = mpc5200_hpcd_probe, 521 521 .remove = mpc5200_hpcd_remove, 522 - .dev = { 522 + .driver = { 523 523 .owner = THIS_MODULE, 524 524 .name = "mpc5200-pcm-audio", 525 525 .of_match_table = mpc5200_hpcd_match,
-1
sound/soc/imx/imx-pcm-fiq.c
··· 240 240 241 241 static int imx_pcm_fiq_new(struct snd_soc_pcm_runtime *rtd) 242 242 { 243 - struct snd_card *card = rtd->card->snd_card; 244 243 struct snd_soc_dai *dai = rtd->cpu_dai; 245 244 struct snd_pcm *pcm = rtd->pcm; 246 245 int ret;
+1 -1
sound/soc/kirkwood/kirkwood-i2s.c
··· 424 424 if (!priv->mem) { 425 425 dev_err(&pdev->dev, "request_mem_region failed\n"); 426 426 err = -EBUSY; 427 - goto error_alloc; 427 + goto err_alloc; 428 428 } 429 429 430 430 priv->io = ioremap(priv->mem->start, SZ_16K);
+1 -1
sound/soc/omap/mcpdm.c
··· 449 449 return ret; 450 450 } 451 451 452 - int __devexit omap_mcpdm_remove(struct platform_device *pdev) 452 + int omap_mcpdm_remove(struct platform_device *pdev) 453 453 { 454 454 struct omap_mcpdm *mcpdm_ptr = platform_get_drvdata(pdev); 455 455
+1 -1
sound/soc/omap/mcpdm.h
··· 150 150 extern void omap_mcpdm_free(void); 151 151 extern int omap_mcpdm_set_offset(int offset1, int offset2); 152 152 int __devinit omap_mcpdm_probe(struct platform_device *pdev); 153 - int __devexit omap_mcpdm_remove(struct platform_device *pdev); 153 + int omap_mcpdm_remove(struct platform_device *pdev);
+6
sound/soc/omap/omap-mcbsp.c
··· 516 516 struct omap_mcbsp_reg_cfg *regs = &mcbsp_data->regs; 517 517 int err = 0; 518 518 519 + if (mcbsp_data->active) 520 + if (freq == mcbsp_data->in_freq) 521 + return 0; 522 + else 523 + return -EBUSY; 524 + 519 525 /* The McBSP signal muxing functions are only available on McBSP1 */ 520 526 if (clk_id == OMAP_MCBSP_CLKR_SRC_CLKR || 521 527 clk_id == OMAP_MCBSP_CLKR_SRC_CLKX ||
+4 -4
sound/soc/pxa/zylonite.c
··· 196 196 if (clk_pout) { 197 197 pout = clk_get(NULL, "CLK_POUT"); 198 198 if (IS_ERR(pout)) { 199 - dev_err(&pdev->dev, "Unable to obtain CLK_POUT: %ld\n", 199 + dev_err(card->dev, "Unable to obtain CLK_POUT: %ld\n", 200 200 PTR_ERR(pout)); 201 201 return PTR_ERR(pout); 202 202 } 203 203 204 204 ret = clk_enable(pout); 205 205 if (ret != 0) { 206 - dev_err(&pdev->dev, "Unable to enable CLK_POUT: %d\n", 206 + dev_err(card->dev, "Unable to enable CLK_POUT: %d\n", 207 207 ret); 208 208 clk_put(pout); 209 209 return ret; 210 210 } 211 211 212 - dev_dbg(&pdev->dev, "MCLK enabled at %luHz\n", 212 + dev_dbg(card->dev, "MCLK enabled at %luHz\n", 213 213 clk_get_rate(pout)); 214 214 } 215 215 ··· 241 241 if (clk_pout) { 242 242 ret = clk_enable(pout); 243 243 if (ret != 0) 244 - dev_err(&pdev->dev, "Unable to enable CLK_POUT: %d\n", 244 + dev_err(card->dev, "Unable to enable CLK_POUT: %d\n", 245 245 ret); 246 246 } 247 247
+6 -6
sound/soc/soc-cache.c
··· 203 203 rbnode = rb_entry(node, struct snd_soc_rbtree_node, node); 204 204 for (i = 0; i < rbnode->blklen; ++i) { 205 205 regtmp = rbnode->base_reg + i; 206 - WARN_ON(codec->writable_register && 207 - codec->writable_register(codec, regtmp)); 208 206 val = snd_soc_rbtree_get_register(rbnode, i); 209 207 def = snd_soc_get_cache_val(codec->reg_def_copy, i, 210 208 rbnode->word_size); 211 209 if (val == def) 212 210 continue; 211 + 212 + WARN_ON(!snd_soc_codec_writable_register(codec, regtmp)); 213 213 214 214 codec->cache_bypass = 1; 215 215 ret = snd_soc_write(codec, regtmp, val); ··· 563 563 564 564 lzo_blocks = codec->reg_cache; 565 565 for_each_set_bit(i, lzo_blocks[0]->sync_bmp, lzo_blocks[0]->sync_bmp_nbits) { 566 - WARN_ON(codec->writable_register && 567 - codec->writable_register(codec, i)); 566 + WARN_ON(!snd_soc_codec_writable_register(codec, i)); 568 567 ret = snd_soc_cache_read(codec, i, &val); 569 568 if (ret) 570 569 return ret; ··· 822 823 823 824 codec_drv = codec->driver; 824 825 for (i = 0; i < codec_drv->reg_cache_size; ++i) { 825 - WARN_ON(codec->writable_register && 826 - codec->writable_register(codec, i)); 827 826 ret = snd_soc_cache_read(codec, i, &val); 828 827 if (ret) 829 828 return ret; ··· 829 832 if (snd_soc_get_cache_val(codec->reg_def_copy, 830 833 i, codec_drv->reg_word_size) == val) 831 834 continue; 835 + 836 + WARN_ON(!snd_soc_codec_writable_register(codec, i)); 837 + 832 838 ret = snd_soc_write(codec, i, val); 833 839 if (ret) 834 840 return ret;
+17 -5
sound/soc/soc-core.c
··· 30 30 #include <linux/bitops.h> 31 31 #include <linux/debugfs.h> 32 32 #include <linux/platform_device.h> 33 + #include <linux/ctype.h> 33 34 #include <linux/slab.h> 34 35 #include <sound/ac97_codec.h> 35 36 #include <sound/core.h> ··· 1435 1434 "%s", card->name); 1436 1435 snprintf(card->snd_card->longname, sizeof(card->snd_card->longname), 1437 1436 "%s", card->long_name ? card->long_name : card->name); 1438 - if (card->driver_name) 1439 - strlcpy(card->snd_card->driver, card->driver_name, 1440 - sizeof(card->snd_card->driver)); 1437 + snprintf(card->snd_card->driver, sizeof(card->snd_card->driver), 1438 + "%s", card->driver_name ? card->driver_name : card->name); 1439 + for (i = 0; i < ARRAY_SIZE(card->snd_card->driver); i++) { 1440 + switch (card->snd_card->driver[i]) { 1441 + case '_': 1442 + case '-': 1443 + case '\0': 1444 + break; 1445 + default: 1446 + if (!isalnum(card->snd_card->driver[i])) 1447 + card->snd_card->driver[i] = '_'; 1448 + break; 1449 + } 1450 + } 1441 1451 1442 1452 if (card->late_probe) { 1443 1453 ret = card->late_probe(card); ··· 1645 1633 if (codec->readable_register) 1646 1634 return codec->readable_register(codec, reg); 1647 1635 else 1648 - return 0; 1636 + return 1; 1649 1637 } 1650 1638 EXPORT_SYMBOL_GPL(snd_soc_codec_readable_register); 1651 1639 ··· 1663 1651 if (codec->writable_register) 1664 1652 return codec->writable_register(codec, reg); 1665 1653 else 1666 - return 0; 1654 + return 1; 1667 1655 } 1668 1656 EXPORT_SYMBOL_GPL(snd_soc_codec_writable_register); 1669 1657
+1 -1
sound/soc/soc-dapm.c
··· 2763 2763 2764 2764 /** 2765 2765 * snd_soc_dapm_free - free dapm resources 2766 - * @card: SoC device 2766 + * @dapm: DAPM context 2767 2767 * 2768 2768 * Free all dapm widgets and resources. 2769 2769 */
+1 -1
sound/soc/soc-jack.c
··· 105 105 106 106 snd_soc_dapm_sync(dapm); 107 107 108 - snd_jack_report(jack->jack, status); 108 + snd_jack_report(jack->jack, jack->status); 109 109 110 110 out: 111 111 mutex_unlock(&codec->mutex);
+5 -2
sound/usb/card.c
··· 530 530 return chip; 531 531 532 532 __error: 533 - if (chip && !chip->num_interfaces) 534 - snd_card_free(chip->card); 533 + if (chip) { 534 + if (!chip->num_interfaces) 535 + snd_card_free(chip->card); 536 + chip->probing = 0; 537 + } 535 538 mutex_unlock(&register_mutex); 536 539 __err_val: 537 540 return NULL;
+8 -1
tools/perf/Makefile
··· 30 30 # Define EXTRA_CFLAGS=-m64 or EXTRA_CFLAGS=-m32 as appropriate for cross-builds. 31 31 # 32 32 # Define NO_DWARF if you do not want debug-info analysis feature at all. 33 + # 34 + # Define WERROR=0 to disable treating any warnings as errors. 33 35 34 36 $(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE 35 37 @$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT) ··· 63 61 ARCH_CFLAGS := -DARCH_X86_64 64 62 ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S 65 63 endif 64 + endif 65 + 66 + # Treat warnings as errors unless directed not to 67 + ifneq ($(WERROR),0) 68 + CFLAGS_WERROR := -Werror 66 69 endif 67 70 68 71 # ··· 102 95 CFLAGS_OPTIMIZE = -O6 103 96 endif 104 97 105 - CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) 98 + CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 $(CFLAGS_WERROR) $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) 106 99 EXTLIBS = -lpthread -lrt -lelf -lm 107 100 ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 108 101 ALL_LDFLAGS = $(LDFLAGS)
+3
tools/perf/builtin-record.c
··· 161 161 struct perf_event_attr *attr = &evsel->attr; 162 162 int track = !evsel->idx; /* only the first counter needs these */ 163 163 164 + attr->disabled = 1; 164 165 attr->inherit = !no_inherit; 165 166 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | 166 167 PERF_FORMAT_TOTAL_TIME_RUNNING | ··· 671 670 exit(-1); 672 671 } 673 672 } 673 + 674 + perf_evlist__enable(evsel_list); 674 675 675 676 /* 676 677 * Let the child rip
+1 -1
tools/perf/builtin-test.c
··· 561 561 } 562 562 563 563 err = perf_event__parse_sample(event, attr.sample_type, sample_size, 564 - false, &sample); 564 + false, &sample, false); 565 565 if (err) { 566 566 pr_err("Can't parse sample, err = %d\n", err); 567 567 goto out_munmap;
+5 -4
tools/perf/builtin-top.c
··· 191 191 symbol__annotate_zero_histograms(sym); 192 192 } 193 193 194 - static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip) 194 + static void record_precise_ip(struct sym_entry *syme, struct map *map, 195 + int counter, u64 ip) 195 196 { 196 197 struct annotation *notes; 197 198 struct symbol *sym; ··· 206 205 if (pthread_mutex_trylock(&notes->lock)) 207 206 return; 208 207 209 - ip = syme->map->map_ip(syme->map, ip); 210 - symbol__inc_addr_samples(sym, syme->map, counter, ip); 208 + ip = map->map_ip(map, ip); 209 + symbol__inc_addr_samples(sym, map, counter, ip); 211 210 212 211 pthread_mutex_unlock(&notes->lock); 213 212 } ··· 811 810 evsel = perf_evlist__id2evsel(top.evlist, sample->id); 812 811 assert(evsel != NULL); 813 812 syme->count[evsel->idx]++; 814 - record_precise_ip(syme, evsel->idx, ip); 813 + record_precise_ip(syme, al.map, evsel->idx, ip); 815 814 pthread_mutex_lock(&top.active_symbols_lock); 816 815 if (list_empty(&syme->node) || !syme->node.next) { 817 816 static bool first = true;
+5
tools/perf/util/event.c
··· 169 169 continue; 170 170 pbf += n + 3; 171 171 if (*pbf == 'x') { /* vm_exec */ 172 + char anonstr[] = "//anon\n"; 172 173 char *execname = strchr(bf, '/'); 173 174 174 175 /* Catch VDSO */ 175 176 if (execname == NULL) 176 177 execname = strstr(bf, "[vdso]"); 178 + 179 + /* Catch anonymous mmaps */ 180 + if ((execname == NULL) && !strstr(bf, "[")) 181 + execname = anonstr; 177 182 178 183 if (execname == NULL) 179 184 continue;
+1 -1
tools/perf/util/event.h
··· 186 186 187 187 int perf_event__parse_sample(const union perf_event *event, u64 type, 188 188 int sample_size, bool sample_id_all, 189 - struct perf_sample *sample); 189 + struct perf_sample *sample, bool swapped); 190 190 191 191 #endif /* __PERF_RECORD_H */
+13
tools/perf/util/evlist.c
··· 113 113 } 114 114 } 115 115 116 + void perf_evlist__enable(struct perf_evlist *evlist) 117 + { 118 + int cpu, thread; 119 + struct perf_evsel *pos; 120 + 121 + for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 122 + list_for_each_entry(pos, &evlist->entries, node) { 123 + for (thread = 0; thread < evlist->threads->nr; thread++) 124 + ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_ENABLE); 125 + } 126 + } 127 + } 128 + 116 129 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) 117 130 { 118 131 int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
+1
tools/perf/util/evlist.h
··· 54 54 void perf_evlist__munmap(struct perf_evlist *evlist); 55 55 56 56 void perf_evlist__disable(struct perf_evlist *evlist); 57 + void perf_evlist__enable(struct perf_evlist *evlist); 57 58 58 59 static inline void perf_evlist__set_maps(struct perf_evlist *evlist, 59 60 struct cpu_map *cpus,
+46 -11
tools/perf/util/evsel.c
··· 7 7 * Released under the GPL v2. (and only v2, not any later version) 8 8 */ 9 9 10 + #include <byteswap.h> 11 + #include "asm/bug.h" 10 12 #include "evsel.h" 11 13 #include "evlist.h" 12 14 #include "util.h" ··· 344 342 345 343 int perf_event__parse_sample(const union perf_event *event, u64 type, 346 344 int sample_size, bool sample_id_all, 347 - struct perf_sample *data) 345 + struct perf_sample *data, bool swapped) 348 346 { 349 347 const u64 *array; 348 + 349 + /* 350 + * used for cross-endian analysis. See git commit 65014ab3 351 + * for why this goofiness is needed. 352 + */ 353 + union { 354 + u64 val64; 355 + u32 val32[2]; 356 + } u; 357 + 350 358 351 359 data->cpu = data->pid = data->tid = -1; 352 360 data->stream_id = data->id = data->time = -1ULL; ··· 378 366 } 379 367 380 368 if (type & PERF_SAMPLE_TID) { 381 - u32 *p = (u32 *)array; 382 - data->pid = p[0]; 383 - data->tid = p[1]; 369 + u.val64 = *array; 370 + if (swapped) { 371 + /* undo swap of u64, then swap on individual u32s */ 372 + u.val64 = bswap_64(u.val64); 373 + u.val32[0] = bswap_32(u.val32[0]); 374 + u.val32[1] = bswap_32(u.val32[1]); 375 + } 376 + 377 + data->pid = u.val32[0]; 378 + data->tid = u.val32[1]; 384 379 array++; 385 380 } 386 381 ··· 414 395 } 415 396 416 397 if (type & PERF_SAMPLE_CPU) { 417 - u32 *p = (u32 *)array; 418 - data->cpu = *p; 398 + 399 + u.val64 = *array; 400 + if (swapped) { 401 + /* undo swap of u64, then swap on individual u32s */ 402 + u.val64 = bswap_64(u.val64); 403 + u.val32[0] = bswap_32(u.val32[0]); 404 + } 405 + 406 + data->cpu = u.val32[0]; 419 407 array++; 420 408 } 421 409 ··· 449 423 } 450 424 451 425 if (type & PERF_SAMPLE_RAW) { 452 - u32 *p = (u32 *)array; 426 + const u64 *pdata; 427 + 428 + u.val64 = *array; 429 + if (WARN_ONCE(swapped, 430 + "Endianness of raw data not corrected!\n")) { 431 + /* undo swap of u64, then swap on individual u32s */ 432 + u.val64 = bswap_64(u.val64); 433 + u.val32[0] = bswap_32(u.val32[0]); 434 + u.val32[1] = bswap_32(u.val32[1]); 435 + } 453 436 454 437 if (sample_overlap(event, array, sizeof(u32))) 455 438 return -EFAULT; 456 439 457 - data->raw_size = *p; 458 - p++; 440 + data->raw_size = u.val32[0]; 441 + pdata = (void *) array + sizeof(u32); 459 442 460 - if (sample_overlap(event, p, data->raw_size)) 443 + if (sample_overlap(event, pdata, data->raw_size)) 461 444 return -EFAULT; 462 445 463 - data->raw_data = p; 446 + data->raw_data = (void *) pdata; 464 447 } 465 448 466 449 return 0;
+1 -1
tools/perf/util/probe-finder.c
··· 659 659 if (!die_find_variable_at(&pf->cu_die, pf->pvar->var, 0, &vr_die)) 660 660 ret = -ENOENT; 661 661 } 662 - if (ret == 0) 662 + if (ret >= 0) 663 663 ret = convert_variable(&vr_die, pf); 664 664 665 665 if (ret < 0)
+1 -1
tools/perf/util/python.c
··· 803 803 first = list_entry(evlist->entries.next, struct perf_evsel, node); 804 804 err = perf_event__parse_sample(event, first->attr.sample_type, 805 805 perf_evsel__sample_size(first), 806 - sample_id_all, &pevent->sample); 806 + sample_id_all, &pevent->sample, false); 807 807 if (err) 808 808 return PyErr_Format(PyExc_OSError, 809 809 "perf: can't parse sample, err=%d", err);
+2 -1
tools/perf/util/session.h
··· 162 162 { 163 163 return perf_event__parse_sample(event, session->sample_type, 164 164 session->sample_size, 165 - session->sample_id_all, sample); 165 + session->sample_id_all, sample, 166 + session->header.needs_swap); 166 167 } 167 168 168 169 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
+8 -2
tools/perf/util/sort.c
··· 151 151 { 152 152 u64 ip_l, ip_r; 153 153 154 + if (!left->ms.sym && !right->ms.sym) 155 + return right->level - left->level; 156 + 157 + if (!left->ms.sym || !right->ms.sym) 158 + return cmp_null(left->ms.sym, right->ms.sym); 159 + 154 160 if (left->ms.sym == right->ms.sym) 155 161 return 0; 156 162 157 - ip_l = left->ms.sym ? left->ms.sym->start : left->ip; 158 - ip_r = right->ms.sym ? right->ms.sym->start : right->ip; 163 + ip_l = left->ms.sym->start; 164 + ip_r = right->ms.sym->start; 159 165 160 166 return (int64_t)(ip_r - ip_l); 161 167 }
+118 -35
tools/perf/util/symbol.c
··· 74 74 75 75 bool symbol_type__is_a(char symbol_type, enum map_type map_type) 76 76 { 77 + symbol_type = toupper(symbol_type); 78 + 77 79 switch (map_type) { 78 80 case MAP__FUNCTION: 79 81 return symbol_type == 'T' || symbol_type == 'W'; 80 82 case MAP__VARIABLE: 81 - return symbol_type == 'D' || symbol_type == 'd'; 83 + return symbol_type == 'D'; 82 84 default: 83 85 return false; 86 + } 87 + } 88 + 89 + static int prefix_underscores_count(const char *str) 90 + { 91 + const char *tail = str; 92 + 93 + while (*tail == '_') 94 + tail++; 95 + 96 + return tail - str; 97 + } 98 + 99 + #define SYMBOL_A 0 100 + #define SYMBOL_B 1 101 + 102 + static int choose_best_symbol(struct symbol *syma, struct symbol *symb) 103 + { 104 + s64 a; 105 + s64 b; 106 + 107 + /* Prefer a symbol with non zero length */ 108 + a = syma->end - syma->start; 109 + b = symb->end - symb->start; 110 + if ((b == 0) && (a > 0)) 111 + return SYMBOL_A; 112 + else if ((a == 0) && (b > 0)) 113 + return SYMBOL_B; 114 + 115 + /* Prefer a non weak symbol over a weak one */ 116 + a = syma->binding == STB_WEAK; 117 + b = symb->binding == STB_WEAK; 118 + if (b && !a) 119 + return SYMBOL_A; 120 + if (a && !b) 121 + return SYMBOL_B; 122 + 123 + /* Prefer a global symbol over a non global one */ 124 + a = syma->binding == STB_GLOBAL; 125 + b = symb->binding == STB_GLOBAL; 126 + if (a && !b) 127 + return SYMBOL_A; 128 + if (b && !a) 129 + return SYMBOL_B; 130 + 131 + /* Prefer a symbol with less underscores */ 132 + a = prefix_underscores_count(syma->name); 133 + b = prefix_underscores_count(symb->name); 134 + if (b > a) 135 + return SYMBOL_A; 136 + else if (a > b) 137 + return SYMBOL_B; 138 + 139 + /* If all else fails, choose the symbol with the longest name */ 140 + if (strlen(syma->name) >= strlen(symb->name)) 141 + return SYMBOL_A; 142 + else 143 + return SYMBOL_B; 144 + } 145 + 146 + static void symbols__fixup_duplicate(struct rb_root *symbols) 147 + { 148 + struct rb_node *nd; 149 + struct symbol *curr, *next; 150 + 151 + nd = rb_first(symbols); 152 + 153 + while (nd) { 154 + curr = rb_entry(nd, struct symbol, rb_node); 155 + again: 156 + nd = rb_next(&curr->rb_node); 157 + next = rb_entry(nd, struct symbol, rb_node); 158 + 159 + if (!nd) 160 + break; 161 + 162 + if (curr->start != next->start) 163 + continue; 164 + 165 + if (choose_best_symbol(curr, next) == SYMBOL_A) { 166 + rb_erase(&next->rb_node, symbols); 167 + goto again; 168 + } else { 169 + nd = rb_next(&curr->rb_node); 170 + rb_erase(&curr->rb_node, symbols); 171 + } 84 172 } 85 173 } 86 174 ··· 526 438 char *line = NULL; 527 439 size_t n; 528 440 int err = -1; 529 - u64 prev_start = 0; 530 - char prev_symbol_type = 0; 531 - char *prev_symbol_name; 532 441 FILE *file = fopen(filename, "r"); 533 442 534 443 if (file == NULL) 535 444 goto out_failure; 536 - 537 - prev_symbol_name = malloc(KSYM_NAME_LEN); 538 - if (prev_symbol_name == NULL) 539 - goto out_close; 540 445 541 446 err = 0; 542 447 ··· 551 470 if (len + 2 >= line_len) 552 471 continue; 553 472 554 - symbol_type = toupper(line[len]); 473 + symbol_type = line[len]; 555 474 len += 2; 556 475 symbol_name = line + len; 557 476 len = line_len - len; ··· 561 480 break; 562 481 } 563 482 564 - if (prev_symbol_type) { 565 - u64 end = start; 566 - if (end != prev_start) 567 - --end; 568 - err = process_symbol(arg, prev_symbol_name, 569 - prev_symbol_type, prev_start, end); 570 - if (err) 571 - break; 572 - } 573 - 574 - memcpy(prev_symbol_name, symbol_name, len + 1); 575 - prev_symbol_type = symbol_type; 576 - prev_start = start; 483 + /* 484 + * module symbols are not sorted so we add all 485 + * symbols with zero length and rely on 486 + * symbols__fixup_end() to fix it up. 487 + */ 488 + err = process_symbol(arg, symbol_name, 489 + symbol_type, start, start); 490 + if (err) 491 + break; 577 492 } 578 493 579 - free(prev_symbol_name); 580 494 free(line); 581 - out_close: 582 495 fclose(file); 583 496 return err; 584 497 ··· 777 702 778 703 if (dso__load_all_kallsyms(dso, filename, map) < 0) 779 704 return -1; 705 + 706 + symbols__fixup_duplicate(&dso->symbols[map->type]); 707 + symbols__fixup_end(&dso->symbols[map->type]); 780 708 781 709 if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 782 710 dso->symtab_type = SYMTAB__GUEST_KALLSYMS; ··· 1170 1092 if (dso->has_build_id) { 1171 1093 u8 build_id[BUILD_ID_SIZE]; 1172 1094 1173 - if (elf_read_build_id(elf, build_id, 1174 - BUILD_ID_SIZE) != BUILD_ID_SIZE) 1095 + if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0) 1175 1096 goto out_elf_end; 1176 1097 1177 1098 if (!dso__build_id_equal(dso, build_id)) ··· 1188 1111 } 1189 1112 1190 1113 opdsec = elf_section_by_name(elf, &ehdr, &opdshdr, ".opd", &opdidx); 1114 + if (opdshdr.sh_type != SHT_PROGBITS) 1115 + opdsec = NULL; 1191 1116 if (opdsec) 1192 1117 opddata = elf_rawdata(opdsec, NULL); 1193 1118 ··· 1355 1276 * For misannotated, zeroed, ASM function sizes. 1356 1277 */ 1357 1278 if (nr > 0) { 1279 + symbols__fixup_duplicate(&dso->symbols[map->type]); 1358 1280 symbols__fixup_end(&dso->symbols[map->type]); 1359 1281 if (kmap) { 1360 1282 /* ··· 1442 1362 ptr = data->d_buf; 1443 1363 while (ptr < (data->d_buf + data->d_size)) { 1444 1364 GElf_Nhdr *nhdr = ptr; 1445 - int namesz = NOTE_ALIGN(nhdr->n_namesz), 1446 - descsz = NOTE_ALIGN(nhdr->n_descsz); 1365 + size_t namesz = NOTE_ALIGN(nhdr->n_namesz), 1366 + descsz = NOTE_ALIGN(nhdr->n_descsz); 1447 1367 const char *name; 1448 1368 1449 1369 ptr += sizeof(*nhdr); ··· 1452 1372 if (nhdr->n_type == NT_GNU_BUILD_ID && 1453 1373 nhdr->n_namesz == sizeof("GNU")) { 1454 1374 if (memcmp(name, "GNU", sizeof("GNU")) == 0) { 1455 - memcpy(bf, ptr, BUILD_ID_SIZE); 1456 - err = BUILD_ID_SIZE; 1375 + size_t sz = min(size, descsz); 1376 + memcpy(bf, ptr, sz); 1377 + memset(bf + sz, 0, size - sz); 1378 + err = descsz; 1457 1379 break; 1458 1380 } 1459 1381 } ··· 1507 1425 while (1) { 1508 1426 char bf[BUFSIZ]; 1509 1427 GElf_Nhdr nhdr; 1510 - int namesz, descsz; 1428 + size_t namesz, descsz; 1511 1429 1512 1430 if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr)) 1513 1431 break; ··· 1516 1434 descsz = NOTE_ALIGN(nhdr.n_descsz); 1517 1435 if (nhdr.n_type == NT_GNU_BUILD_ID && 1518 1436 nhdr.n_namesz == sizeof("GNU")) { 1519 - if (read(fd, bf, namesz) != namesz) 1437 + if (read(fd, bf, namesz) != (ssize_t)namesz) 1520 1438 break; 1521 1439 if (memcmp(bf, "GNU", sizeof("GNU")) == 0) { 1522 - if (read(fd, build_id, 1523 - BUILD_ID_SIZE) == BUILD_ID_SIZE) { 1440 + size_t sz = min(descsz, size); 1441 + if (read(fd, build_id, sz) == (ssize_t)sz) { 1442 + memset(build_id + sz, 0, size - sz); 1524 1443 err = 0; 1525 1444 break; 1526 1445 } 1527 - } else if (read(fd, bf, descsz) != descsz) 1446 + } else if (read(fd, bf, descsz) != (ssize_t)descsz) 1528 1447 break; 1529 1448 } else { 1530 1449 int n = namesz + descsz;