Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull another networking update from David Miller:
"Small follow-up to the main merge pull from the other day:

1) Alexander Duyck's DMA memory barrier patch set.

2) cxgb4 driver fixes from Karen Xie.

3) Add missing export of fixed_phy_register() to modules, from Mark
Salter.

4) DSA bug fixes from Florian Fainelli"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (24 commits)
net/macb: add TX multiqueue support for gem
linux/interrupt.h: remove the definition of unused tasklet_hi_enable
jme: replace calls to redundant function
net: ethernet: davicom: Allow to select DM9000 for nios2
net: ethernet: smsc: Allow to select SMC91X for nios2
cxgb4: Add support for QSA modules
libcxgbi: fix freeing skb prematurely
cxgb4i: use set_wr_txq() to set tx queues
cxgb4i: handle non-pdu-aligned rx data
cxgb4i: additional types of negative advice
cxgb4/cxgb4i: set the max. pdu length in firmware
cxgb4i: fix credit check for tx_data_wr
cxgb4i: fix tx immediate data credit check
net: phy: export fixed_phy_register()
fib_trie: Fix trie balancing issue if new node pushes down existing node
vlan: Add ability to always enable TSO/UFO
r8169:update rtl8168g pcie ephy parameter
net: dsa: bcm_sf2: force link for all fixed PHY devices
fm10k/igb/ixgbe: Use dma_rmb on Rx descriptor reads
r8169: Use dma_rmb() and dma_wmb() for DescOwn checks
...

+786 -440
+42
Documentation/memory-barriers.txt
··· 1633 1633 operations" subsection for information on where to use these. 1634 1634 1635 1635 1636 + (*) dma_wmb(); 1637 + (*) dma_rmb(); 1638 + 1639 + These are for use with consistent memory to guarantee the ordering 1640 + of writes or reads of shared memory accessible to both the CPU and a 1641 + DMA capable device. 1642 + 1643 + For example, consider a device driver that shares memory with a device 1644 + and uses a descriptor status value to indicate if the descriptor belongs 1645 + to the device or the CPU, and a doorbell to notify it when new 1646 + descriptors are available: 1647 + 1648 + if (desc->status != DEVICE_OWN) { 1649 + /* do not read data until we own descriptor */ 1650 + dma_rmb(); 1651 + 1652 + /* read/modify data */ 1653 + read_data = desc->data; 1654 + desc->data = write_data; 1655 + 1656 + /* flush modifications before status update */ 1657 + dma_wmb(); 1658 + 1659 + /* assign ownership */ 1660 + desc->status = DEVICE_OWN; 1661 + 1662 + /* force memory to sync before notifying device via MMIO */ 1663 + wmb(); 1664 + 1665 + /* notify device of new descriptors */ 1666 + writel(DESC_NOTIFY, doorbell); 1667 + } 1668 + 1669 + The dma_rmb() allows us guarantee the device has released ownership 1670 + before we read the data from the descriptor, and he dma_wmb() allows 1671 + us to guarantee the data is written to the descriptor before the device 1672 + can see it now has ownership. The wmb() is needed to guarantee that the 1673 + cache coherent memory writes have completed before attempting a write to 1674 + the cache incoherent MMIO region. 1675 + 1676 + See Documentation/DMA-API.txt for more information on consistent memory. 1677 + 1636 1678 MMIO WRITE BARRIER 1637 1679 ------------------ 1638 1680
+51
arch/alpha/include/asm/barrier.h
··· 7 7 #define rmb() __asm__ __volatile__("mb": : :"memory") 8 8 #define wmb() __asm__ __volatile__("wmb": : :"memory") 9 9 10 + /** 11 + * read_barrier_depends - Flush all pending reads that subsequents reads 12 + * depend on. 13 + * 14 + * No data-dependent reads from memory-like regions are ever reordered 15 + * over this barrier. All reads preceding this primitive are guaranteed 16 + * to access memory (but not necessarily other CPUs' caches) before any 17 + * reads following this primitive that depend on the data return by 18 + * any of the preceding reads. This primitive is much lighter weight than 19 + * rmb() on most CPUs, and is never heavier weight than is 20 + * rmb(). 21 + * 22 + * These ordering constraints are respected by both the local CPU 23 + * and the compiler. 24 + * 25 + * Ordering is not guaranteed by anything other than these primitives, 26 + * not even by data dependencies. See the documentation for 27 + * memory_barrier() for examples and URLs to more information. 28 + * 29 + * For example, the following code would force ordering (the initial 30 + * value of "a" is zero, "b" is one, and "p" is "&a"): 31 + * 32 + * <programlisting> 33 + * CPU 0 CPU 1 34 + * 35 + * b = 2; 36 + * memory_barrier(); 37 + * p = &b; q = p; 38 + * read_barrier_depends(); 39 + * d = *q; 40 + * </programlisting> 41 + * 42 + * because the read of "*q" depends on the read of "p" and these 43 + * two reads are separated by a read_barrier_depends(). However, 44 + * the following code, with the same initial values for "a" and "b": 45 + * 46 + * <programlisting> 47 + * CPU 0 CPU 1 48 + * 49 + * a = 2; 50 + * memory_barrier(); 51 + * b = 3; y = b; 52 + * read_barrier_depends(); 53 + * x = a; 54 + * </programlisting> 55 + * 56 + * does not enforce ordering, since there is no data dependency between 57 + * the read of "a" and the read of "b". Therefore, on some CPUs, such 58 + * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() 59 + * in cases like this where there are no data dependencies. 60 + */ 10 61 #define read_barrier_depends() __asm__ __volatile__("mb": : :"memory") 11 62 12 63 #ifdef CONFIG_SMP
+4
arch/arm/include/asm/barrier.h
··· 43 43 #define mb() do { dsb(); outer_sync(); } while (0) 44 44 #define rmb() dsb() 45 45 #define wmb() do { dsb(st); outer_sync(); } while (0) 46 + #define dma_rmb() dmb(osh) 47 + #define dma_wmb() dmb(oshst) 46 48 #else 47 49 #define mb() barrier() 48 50 #define rmb() barrier() 49 51 #define wmb() barrier() 52 + #define dma_rmb() barrier() 53 + #define dma_wmb() barrier() 50 54 #endif 51 55 52 56 #ifndef CONFIG_SMP
+3
arch/arm64/include/asm/barrier.h
··· 32 32 #define rmb() dsb(ld) 33 33 #define wmb() dsb(st) 34 34 35 + #define dma_rmb() dmb(oshld) 36 + #define dma_wmb() dmb(oshst) 37 + 35 38 #ifndef CONFIG_SMP 36 39 #define smp_mb() barrier() 37 40 #define smp_rmb() barrier()
+51
arch/blackfin/include/asm/barrier.h
··· 22 22 # define mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0) 23 23 # define rmb() do { barrier(); smp_check_barrier(); } while (0) 24 24 # define wmb() do { barrier(); smp_mark_barrier(); } while (0) 25 + /* 26 + * read_barrier_depends - Flush all pending reads that subsequents reads 27 + * depend on. 28 + * 29 + * No data-dependent reads from memory-like regions are ever reordered 30 + * over this barrier. All reads preceding this primitive are guaranteed 31 + * to access memory (but not necessarily other CPUs' caches) before any 32 + * reads following this primitive that depend on the data return by 33 + * any of the preceding reads. This primitive is much lighter weight than 34 + * rmb() on most CPUs, and is never heavier weight than is 35 + * rmb(). 36 + * 37 + * These ordering constraints are respected by both the local CPU 38 + * and the compiler. 39 + * 40 + * Ordering is not guaranteed by anything other than these primitives, 41 + * not even by data dependencies. See the documentation for 42 + * memory_barrier() for examples and URLs to more information. 43 + * 44 + * For example, the following code would force ordering (the initial 45 + * value of "a" is zero, "b" is one, and "p" is "&a"): 46 + * 47 + * <programlisting> 48 + * CPU 0 CPU 1 49 + * 50 + * b = 2; 51 + * memory_barrier(); 52 + * p = &b; q = p; 53 + * read_barrier_depends(); 54 + * d = *q; 55 + * </programlisting> 56 + * 57 + * because the read of "*q" depends on the read of "p" and these 58 + * two reads are separated by a read_barrier_depends(). However, 59 + * the following code, with the same initial values for "a" and "b": 60 + * 61 + * <programlisting> 62 + * CPU 0 CPU 1 63 + * 64 + * a = 2; 65 + * memory_barrier(); 66 + * b = 3; y = b; 67 + * read_barrier_depends(); 68 + * x = a; 69 + * </programlisting> 70 + * 71 + * does not enforce ordering, since there is no data dependency between 72 + * the read of "a" and the read of "b". Therefore, on some CPUs, such 73 + * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() 74 + * in cases like this where there are no data dependencies. 75 + */ 25 76 # define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0) 26 77 #endif 27 78
+12 -13
arch/ia64/include/asm/barrier.h
··· 35 35 * it's (presumably) much slower than mf and (b) mf.a is supported for 36 36 * sequential memory pages only. 37 37 */ 38 - #define mb() ia64_mf() 39 - #define rmb() mb() 40 - #define wmb() mb() 41 - #define read_barrier_depends() do { } while(0) 38 + #define mb() ia64_mf() 39 + #define rmb() mb() 40 + #define wmb() mb() 41 + 42 + #define dma_rmb() mb() 43 + #define dma_wmb() mb() 42 44 43 45 #ifdef CONFIG_SMP 44 46 # define smp_mb() mb() 45 - # define smp_rmb() rmb() 46 - # define smp_wmb() wmb() 47 - # define smp_read_barrier_depends() read_barrier_depends() 48 - 49 47 #else 50 - 51 48 # define smp_mb() barrier() 52 - # define smp_rmb() barrier() 53 - # define smp_wmb() barrier() 54 - # define smp_read_barrier_depends() do { } while(0) 55 - 56 49 #endif 50 + 51 + #define smp_rmb() smp_mb() 52 + #define smp_wmb() smp_mb() 53 + 54 + #define read_barrier_depends() do { } while (0) 55 + #define smp_read_barrier_depends() do { } while (0) 57 56 58 57 #define smp_mb__before_atomic() barrier() 59 58 #define smp_mb__after_atomic() barrier()
+10 -9
arch/metag/include/asm/barrier.h
··· 4 4 #include <asm/metag_mem.h> 5 5 6 6 #define nop() asm volatile ("NOP") 7 - #define mb() wmb() 8 - #define rmb() barrier() 9 7 10 8 #ifdef CONFIG_METAG_META21 11 9 ··· 39 41 40 42 #endif /* !CONFIG_METAG_META21 */ 41 43 42 - static inline void wmb(void) 43 - { 44 - /* flush writes through the write combiner */ 45 - wr_fence(); 46 - } 44 + /* flush writes through the write combiner */ 45 + #define mb() wr_fence() 46 + #define rmb() barrier() 47 + #define wmb() mb() 47 48 48 - #define read_barrier_depends() do { } while (0) 49 + #define dma_rmb() rmb() 50 + #define dma_wmb() wmb() 49 51 50 52 #ifndef CONFIG_SMP 51 53 #define fence() do { } while (0) ··· 80 82 #define smp_wmb() barrier() 81 83 #endif 82 84 #endif 83 - #define smp_read_barrier_depends() do { } while (0) 85 + 86 + #define read_barrier_depends() do { } while (0) 87 + #define smp_read_barrier_depends() do { } while (0) 88 + 84 89 #define set_mb(var, value) do { var = value; smp_mb(); } while (0) 85 90 86 91 #define smp_store_release(p, v) \
+5 -56
arch/mips/include/asm/barrier.h
··· 10 10 11 11 #include <asm/addrspace.h> 12 12 13 - /* 14 - * read_barrier_depends - Flush all pending reads that subsequents reads 15 - * depend on. 16 - * 17 - * No data-dependent reads from memory-like regions are ever reordered 18 - * over this barrier. All reads preceding this primitive are guaranteed 19 - * to access memory (but not necessarily other CPUs' caches) before any 20 - * reads following this primitive that depend on the data return by 21 - * any of the preceding reads. This primitive is much lighter weight than 22 - * rmb() on most CPUs, and is never heavier weight than is 23 - * rmb(). 24 - * 25 - * These ordering constraints are respected by both the local CPU 26 - * and the compiler. 27 - * 28 - * Ordering is not guaranteed by anything other than these primitives, 29 - * not even by data dependencies. See the documentation for 30 - * memory_barrier() for examples and URLs to more information. 31 - * 32 - * For example, the following code would force ordering (the initial 33 - * value of "a" is zero, "b" is one, and "p" is "&a"): 34 - * 35 - * <programlisting> 36 - * CPU 0 CPU 1 37 - * 38 - * b = 2; 39 - * memory_barrier(); 40 - * p = &b; q = p; 41 - * read_barrier_depends(); 42 - * d = *q; 43 - * </programlisting> 44 - * 45 - * because the read of "*q" depends on the read of "p" and these 46 - * two reads are separated by a read_barrier_depends(). However, 47 - * the following code, with the same initial values for "a" and "b": 48 - * 49 - * <programlisting> 50 - * CPU 0 CPU 1 51 - * 52 - * a = 2; 53 - * memory_barrier(); 54 - * b = 3; y = b; 55 - * read_barrier_depends(); 56 - * x = a; 57 - * </programlisting> 58 - * 59 - * does not enforce ordering, since there is no data dependency between 60 - * the read of "a" and the read of "b". Therefore, on some CPUs, such 61 - * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() 62 - * in cases like this where there are no data dependencies. 63 - */ 64 - 65 13 #define read_barrier_depends() do { } while(0) 66 14 #define smp_read_barrier_depends() do { } while(0) 67 15 ··· 75 127 76 128 #include <asm/wbflush.h> 77 129 78 - #define wmb() fast_wmb() 79 - #define rmb() fast_rmb() 80 130 #define mb() wbflush() 81 131 #define iob() wbflush() 82 132 83 133 #else /* !CONFIG_CPU_HAS_WB */ 84 134 85 - #define wmb() fast_wmb() 86 - #define rmb() fast_rmb() 87 135 #define mb() fast_mb() 88 136 #define iob() fast_iob() 89 137 90 138 #endif /* !CONFIG_CPU_HAS_WB */ 139 + 140 + #define wmb() fast_wmb() 141 + #define rmb() fast_rmb() 142 + #define dma_wmb() fast_wmb() 143 + #define dma_rmb() fast_rmb() 91 144 92 145 #if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP) 93 146 # ifdef CONFIG_CPU_CAVIUM_OCTEON
+11 -8
arch/powerpc/include/asm/barrier.h
··· 33 33 #define mb() __asm__ __volatile__ ("sync" : : : "memory") 34 34 #define rmb() __asm__ __volatile__ ("sync" : : : "memory") 35 35 #define wmb() __asm__ __volatile__ ("sync" : : : "memory") 36 - #define read_barrier_depends() do { } while(0) 37 36 38 37 #define set_mb(var, value) do { var = value; mb(); } while (0) 39 - 40 - #ifdef CONFIG_SMP 41 38 42 39 #ifdef __SUBARCH_HAS_LWSYNC 43 40 # define SMPWMB LWSYNC ··· 43 46 #endif 44 47 45 48 #define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") 49 + #define dma_rmb() __lwsync() 50 + #define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") 51 + 52 + #ifdef CONFIG_SMP 53 + #define smp_lwsync() __lwsync() 46 54 47 55 #define smp_mb() mb() 48 56 #define smp_rmb() __lwsync() 49 57 #define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") 50 - #define smp_read_barrier_depends() read_barrier_depends() 51 58 #else 52 - #define __lwsync() barrier() 59 + #define smp_lwsync() barrier() 53 60 54 61 #define smp_mb() barrier() 55 62 #define smp_rmb() barrier() 56 63 #define smp_wmb() barrier() 57 - #define smp_read_barrier_depends() do { } while(0) 58 64 #endif /* CONFIG_SMP */ 65 + 66 + #define read_barrier_depends() do { } while (0) 67 + #define smp_read_barrier_depends() do { } while (0) 59 68 60 69 /* 61 70 * This is a barrier which prevents following instructions from being ··· 75 72 #define smp_store_release(p, v) \ 76 73 do { \ 77 74 compiletime_assert_atomic_type(*p); \ 78 - __lwsync(); \ 75 + smp_lwsync(); \ 79 76 ACCESS_ONCE(*p) = (v); \ 80 77 } while (0) 81 78 ··· 83 80 ({ \ 84 81 typeof(*p) ___p1 = ACCESS_ONCE(*p); \ 85 82 compiletime_assert_atomic_type(*p); \ 86 - __lwsync(); \ 83 + smp_lwsync(); \ 87 84 ___p1; \ 88 85 }) 89 86
+5 -2
arch/s390/include/asm/barrier.h
··· 24 24 25 25 #define rmb() mb() 26 26 #define wmb() mb() 27 - #define read_barrier_depends() do { } while(0) 27 + #define dma_rmb() rmb() 28 + #define dma_wmb() wmb() 28 29 #define smp_mb() mb() 29 30 #define smp_rmb() rmb() 30 31 #define smp_wmb() wmb() 31 - #define smp_read_barrier_depends() read_barrier_depends() 32 + 33 + #define read_barrier_depends() do { } while (0) 34 + #define smp_read_barrier_depends() do { } while (0) 32 35 33 36 #define smp_mb__before_atomic() smp_mb() 34 37 #define smp_mb__after_atomic() smp_mb()
+5 -2
arch/sparc/include/asm/barrier_64.h
··· 37 37 #define rmb() __asm__ __volatile__("":::"memory") 38 38 #define wmb() __asm__ __volatile__("":::"memory") 39 39 40 - #define read_barrier_depends() do { } while(0) 40 + #define dma_rmb() rmb() 41 + #define dma_wmb() wmb() 42 + 41 43 #define set_mb(__var, __value) \ 42 44 do { __var = __value; membar_safe("#StoreLoad"); } while(0) 43 45 ··· 53 51 #define smp_wmb() __asm__ __volatile__("":::"memory") 54 52 #endif 55 53 56 - #define smp_read_barrier_depends() do { } while(0) 54 + #define read_barrier_depends() do { } while (0) 55 + #define smp_read_barrier_depends() do { } while (0) 57 56 58 57 #define smp_store_release(p, v) \ 59 58 do { \
+10 -60
arch/x86/include/asm/barrier.h
··· 24 24 #define wmb() asm volatile("sfence" ::: "memory") 25 25 #endif 26 26 27 - /** 28 - * read_barrier_depends - Flush all pending reads that subsequents reads 29 - * depend on. 30 - * 31 - * No data-dependent reads from memory-like regions are ever reordered 32 - * over this barrier. All reads preceding this primitive are guaranteed 33 - * to access memory (but not necessarily other CPUs' caches) before any 34 - * reads following this primitive that depend on the data return by 35 - * any of the preceding reads. This primitive is much lighter weight than 36 - * rmb() on most CPUs, and is never heavier weight than is 37 - * rmb(). 38 - * 39 - * These ordering constraints are respected by both the local CPU 40 - * and the compiler. 41 - * 42 - * Ordering is not guaranteed by anything other than these primitives, 43 - * not even by data dependencies. See the documentation for 44 - * memory_barrier() for examples and URLs to more information. 45 - * 46 - * For example, the following code would force ordering (the initial 47 - * value of "a" is zero, "b" is one, and "p" is "&a"): 48 - * 49 - * <programlisting> 50 - * CPU 0 CPU 1 51 - * 52 - * b = 2; 53 - * memory_barrier(); 54 - * p = &b; q = p; 55 - * read_barrier_depends(); 56 - * d = *q; 57 - * </programlisting> 58 - * 59 - * because the read of "*q" depends on the read of "p" and these 60 - * two reads are separated by a read_barrier_depends(). However, 61 - * the following code, with the same initial values for "a" and "b": 62 - * 63 - * <programlisting> 64 - * CPU 0 CPU 1 65 - * 66 - * a = 2; 67 - * memory_barrier(); 68 - * b = 3; y = b; 69 - * read_barrier_depends(); 70 - * x = a; 71 - * </programlisting> 72 - * 73 - * does not enforce ordering, since there is no data dependency between 74 - * the read of "a" and the read of "b". Therefore, on some CPUs, such 75 - * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() 76 - * in cases like this where there are no data dependencies. 77 - **/ 78 - 79 - #define read_barrier_depends() do { } while (0) 27 + #ifdef CONFIG_X86_PPRO_FENCE 28 + #define dma_rmb() rmb() 29 + #else 30 + #define dma_rmb() barrier() 31 + #endif 32 + #define dma_wmb() barrier() 80 33 81 34 #ifdef CONFIG_SMP 82 35 #define smp_mb() mb() 83 - #ifdef CONFIG_X86_PPRO_FENCE 84 - # define smp_rmb() rmb() 85 - #else 86 - # define smp_rmb() barrier() 87 - #endif 36 + #define smp_rmb() dma_rmb() 88 37 #define smp_wmb() barrier() 89 - #define smp_read_barrier_depends() read_barrier_depends() 90 38 #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) 91 39 #else /* !SMP */ 92 40 #define smp_mb() barrier() 93 41 #define smp_rmb() barrier() 94 42 #define smp_wmb() barrier() 95 - #define smp_read_barrier_depends() do { } while (0) 96 43 #define set_mb(var, value) do { var = value; barrier(); } while (0) 97 44 #endif /* SMP */ 45 + 46 + #define read_barrier_depends() do { } while (0) 47 + #define smp_read_barrier_depends() do { } while (0) 98 48 99 49 #if defined(CONFIG_X86_PPRO_FENCE) 100 50
+10 -10
arch/x86/um/asm/barrier.h
··· 29 29 30 30 #endif /* CONFIG_X86_32 */ 31 31 32 - #define read_barrier_depends() do { } while (0) 32 + #ifdef CONFIG_X86_PPRO_FENCE 33 + #define dma_rmb() rmb() 34 + #else /* CONFIG_X86_PPRO_FENCE */ 35 + #define dma_rmb() barrier() 36 + #endif /* CONFIG_X86_PPRO_FENCE */ 37 + #define dma_wmb() barrier() 33 38 34 39 #ifdef CONFIG_SMP 35 40 36 41 #define smp_mb() mb() 37 - #ifdef CONFIG_X86_PPRO_FENCE 38 - #define smp_rmb() rmb() 39 - #else /* CONFIG_X86_PPRO_FENCE */ 40 - #define smp_rmb() barrier() 41 - #endif /* CONFIG_X86_PPRO_FENCE */ 42 - 42 + #define smp_rmb() dma_rmb() 43 43 #define smp_wmb() barrier() 44 - 45 - #define smp_read_barrier_depends() read_barrier_depends() 46 44 #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) 47 45 48 46 #else /* CONFIG_SMP */ ··· 48 50 #define smp_mb() barrier() 49 51 #define smp_rmb() barrier() 50 52 #define smp_wmb() barrier() 51 - #define smp_read_barrier_depends() do { } while (0) 52 53 #define set_mb(var, value) do { var = value; barrier(); } while (0) 53 54 54 55 #endif /* CONFIG_SMP */ 56 + 57 + #define read_barrier_depends() do { } while (0) 58 + #define smp_read_barrier_depends() do { } while (0) 55 59 56 60 /* 57 61 * Stop RDTSC speculation. This is needed when you need to use RDTSC
+13 -10
drivers/net/dsa/bcm_sf2.c
··· 684 684 struct fixed_phy_status *status) 685 685 { 686 686 struct bcm_sf2_priv *priv = ds_to_priv(ds); 687 - u32 link, duplex, pause, speed; 687 + u32 duplex, pause, speed; 688 688 u32 reg; 689 689 690 - link = core_readl(priv, CORE_LNKSTS); 691 690 duplex = core_readl(priv, CORE_DUPSTS); 692 691 pause = core_readl(priv, CORE_PAUSESTS); 693 692 speed = core_readl(priv, CORE_SPDSTS); ··· 700 701 * which means that we need to force the link at the port override 701 702 * level to get the data to flow. We do use what the interrupt handler 702 703 * did determine before. 704 + * 705 + * For the other ports, we just force the link status, since this is 706 + * a fixed PHY device. 703 707 */ 704 708 if (port == 7) { 705 709 status->link = priv->port_sts[port].link; 706 - reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(7)); 707 - reg |= SW_OVERRIDE; 708 - if (status->link) 709 - reg |= LINK_STS; 710 - else 711 - reg &= ~LINK_STS; 712 - core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(7)); 713 710 status->duplex = 1; 714 711 } else { 715 - status->link = !!(link & (1 << port)); 712 + status->link = 1; 716 713 status->duplex = !!(duplex & (1 << port)); 717 714 } 715 + 716 + reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(port)); 717 + reg |= SW_OVERRIDE; 718 + if (status->link) 719 + reg |= LINK_STS; 720 + else 721 + reg &= ~LINK_STS; 722 + core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port)); 718 723 719 724 switch (speed) { 720 725 case SPDSTS_10:
+312 -174
drivers/net/ethernet/cadence/macb.c
··· 66 66 return index & (TX_RING_SIZE - 1); 67 67 } 68 68 69 - static struct macb_dma_desc *macb_tx_desc(struct macb *bp, unsigned int index) 69 + static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue, 70 + unsigned int index) 70 71 { 71 - return &bp->tx_ring[macb_tx_ring_wrap(index)]; 72 + return &queue->tx_ring[macb_tx_ring_wrap(index)]; 72 73 } 73 74 74 - static struct macb_tx_skb *macb_tx_skb(struct macb *bp, unsigned int index) 75 + static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue, 76 + unsigned int index) 75 77 { 76 - return &bp->tx_skb[macb_tx_ring_wrap(index)]; 78 + return &queue->tx_skb[macb_tx_ring_wrap(index)]; 77 79 } 78 80 79 - static dma_addr_t macb_tx_dma(struct macb *bp, unsigned int index) 81 + static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index) 80 82 { 81 83 dma_addr_t offset; 82 84 83 85 offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc); 84 86 85 - return bp->tx_ring_dma + offset; 87 + return queue->tx_ring_dma + offset; 86 88 } 87 89 88 90 static unsigned int macb_rx_ring_wrap(unsigned int index) ··· 492 490 493 491 static void macb_tx_error_task(struct work_struct *work) 494 492 { 495 - struct macb *bp = container_of(work, struct macb, tx_error_task); 493 + struct macb_queue *queue = container_of(work, struct macb_queue, 494 + tx_error_task); 495 + struct macb *bp = queue->bp; 496 496 struct macb_tx_skb *tx_skb; 497 + struct macb_dma_desc *desc; 497 498 struct sk_buff *skb; 498 499 unsigned int tail; 500 + unsigned long flags; 499 501 500 - netdev_vdbg(bp->dev, "macb_tx_error_task: t = %u, h = %u\n", 501 - bp->tx_tail, bp->tx_head); 502 + netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n", 503 + (unsigned int)(queue - bp->queues), 504 + queue->tx_tail, queue->tx_head); 505 + 506 + /* Prevent the queue IRQ handlers from running: each of them may call 507 + * macb_tx_interrupt(), which in turn may call netif_wake_subqueue(). 508 + * As explained below, we have to halt the transmission before updating 509 + * TBQP registers so we call netif_tx_stop_all_queues() to notify the 510 + * network engine about the macb/gem being halted. 511 + */ 512 + spin_lock_irqsave(&bp->lock, flags); 502 513 503 514 /* Make sure nobody is trying to queue up new packets */ 504 - netif_stop_queue(bp->dev); 515 + netif_tx_stop_all_queues(bp->dev); 505 516 506 517 /* 507 518 * Stop transmission now 508 519 * (in case we have just queued new packets) 520 + * macb/gem must be halted to write TBQP register 509 521 */ 510 522 if (macb_halt_tx(bp)) 511 523 /* Just complain for now, reinitializing TX path can be good */ 512 524 netdev_err(bp->dev, "BUG: halt tx timed out\n"); 513 525 514 - /* No need for the lock here as nobody will interrupt us anymore */ 515 - 516 526 /* 517 527 * Treat frames in TX queue including the ones that caused the error. 518 528 * Free transmit buffers in upper layer. 519 529 */ 520 - for (tail = bp->tx_tail; tail != bp->tx_head; tail++) { 521 - struct macb_dma_desc *desc; 522 - u32 ctrl; 530 + for (tail = queue->tx_tail; tail != queue->tx_head; tail++) { 531 + u32 ctrl; 523 532 524 - desc = macb_tx_desc(bp, tail); 533 + desc = macb_tx_desc(queue, tail); 525 534 ctrl = desc->ctrl; 526 - tx_skb = macb_tx_skb(bp, tail); 535 + tx_skb = macb_tx_skb(queue, tail); 527 536 skb = tx_skb->skb; 528 537 529 538 if (ctrl & MACB_BIT(TX_USED)) { ··· 542 529 while (!skb) { 543 530 macb_tx_unmap(bp, tx_skb); 544 531 tail++; 545 - tx_skb = macb_tx_skb(bp, tail); 532 + tx_skb = macb_tx_skb(queue, tail); 546 533 skb = tx_skb->skb; 547 534 } 548 535 ··· 571 558 macb_tx_unmap(bp, tx_skb); 572 559 } 573 560 561 + /* Set end of TX queue */ 562 + desc = macb_tx_desc(queue, 0); 563 + desc->addr = 0; 564 + desc->ctrl = MACB_BIT(TX_USED); 565 + 574 566 /* Make descriptor updates visible to hardware */ 575 567 wmb(); 576 568 577 569 /* Reinitialize the TX desc queue */ 578 - macb_writel(bp, TBQP, bp->tx_ring_dma); 570 + queue_writel(queue, TBQP, queue->tx_ring_dma); 579 571 /* Make TX ring reflect state of hardware */ 580 - bp->tx_head = bp->tx_tail = 0; 581 - 582 - /* Now we are ready to start transmission again */ 583 - netif_wake_queue(bp->dev); 572 + queue->tx_head = 0; 573 + queue->tx_tail = 0; 584 574 585 575 /* Housework before enabling TX IRQ */ 586 576 macb_writel(bp, TSR, macb_readl(bp, TSR)); 587 - macb_writel(bp, IER, MACB_TX_INT_FLAGS); 577 + queue_writel(queue, IER, MACB_TX_INT_FLAGS); 578 + 579 + /* Now we are ready to start transmission again */ 580 + netif_tx_start_all_queues(bp->dev); 581 + macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); 582 + 583 + spin_unlock_irqrestore(&bp->lock, flags); 588 584 } 589 585 590 - static void macb_tx_interrupt(struct macb *bp) 586 + static void macb_tx_interrupt(struct macb_queue *queue) 591 587 { 592 588 unsigned int tail; 593 589 unsigned int head; 594 590 u32 status; 591 + struct macb *bp = queue->bp; 592 + u16 queue_index = queue - bp->queues; 595 593 596 594 status = macb_readl(bp, TSR); 597 595 macb_writel(bp, TSR, status); 598 596 599 597 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 600 - macb_writel(bp, ISR, MACB_BIT(TCOMP)); 598 + queue_writel(queue, ISR, MACB_BIT(TCOMP)); 601 599 602 600 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", 603 601 (unsigned long)status); 604 602 605 - head = bp->tx_head; 606 - for (tail = bp->tx_tail; tail != head; tail++) { 603 + head = queue->tx_head; 604 + for (tail = queue->tx_tail; tail != head; tail++) { 607 605 struct macb_tx_skb *tx_skb; 608 606 struct sk_buff *skb; 609 607 struct macb_dma_desc *desc; 610 608 u32 ctrl; 611 609 612 - desc = macb_tx_desc(bp, tail); 610 + desc = macb_tx_desc(queue, tail); 613 611 614 612 /* Make hw descriptor updates visible to CPU */ 615 613 rmb(); ··· 635 611 636 612 /* Process all buffers of the current transmitted frame */ 637 613 for (;; tail++) { 638 - tx_skb = macb_tx_skb(bp, tail); 614 + tx_skb = macb_tx_skb(queue, tail); 639 615 skb = tx_skb->skb; 640 616 641 617 /* First, update TX stats if needed */ ··· 658 634 } 659 635 } 660 636 661 - bp->tx_tail = tail; 662 - if (netif_queue_stopped(bp->dev) 663 - && CIRC_CNT(bp->tx_head, bp->tx_tail, 664 - TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH) 665 - netif_wake_queue(bp->dev); 637 + queue->tx_tail = tail; 638 + if (__netif_subqueue_stopped(bp->dev, queue_index) && 639 + CIRC_CNT(queue->tx_head, queue->tx_tail, 640 + TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH) 641 + netif_wake_subqueue(bp->dev, queue_index); 666 642 } 667 643 668 644 static void gem_rx_refill(struct macb *bp) ··· 973 949 974 950 static irqreturn_t macb_interrupt(int irq, void *dev_id) 975 951 { 976 - struct net_device *dev = dev_id; 977 - struct macb *bp = netdev_priv(dev); 952 + struct macb_queue *queue = dev_id; 953 + struct macb *bp = queue->bp; 954 + struct net_device *dev = bp->dev; 978 955 u32 status; 979 956 980 - status = macb_readl(bp, ISR); 957 + status = queue_readl(queue, ISR); 981 958 982 959 if (unlikely(!status)) 983 960 return IRQ_NONE; ··· 988 963 while (status) { 989 964 /* close possible race with dev_close */ 990 965 if (unlikely(!netif_running(dev))) { 991 - macb_writel(bp, IDR, -1); 966 + queue_writel(queue, IDR, -1); 992 967 break; 993 968 } 994 969 995 - netdev_vdbg(bp->dev, "isr = 0x%08lx\n", (unsigned long)status); 970 + netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n", 971 + (unsigned int)(queue - bp->queues), 972 + (unsigned long)status); 996 973 997 974 if (status & MACB_RX_INT_FLAGS) { 998 975 /* ··· 1004 977 * is already scheduled, so disable interrupts 1005 978 * now. 1006 979 */ 1007 - macb_writel(bp, IDR, MACB_RX_INT_FLAGS); 980 + queue_writel(queue, IDR, MACB_RX_INT_FLAGS); 1008 981 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1009 - macb_writel(bp, ISR, MACB_BIT(RCOMP)); 982 + queue_writel(queue, ISR, MACB_BIT(RCOMP)); 1010 983 1011 984 if (napi_schedule_prep(&bp->napi)) { 1012 985 netdev_vdbg(bp->dev, "scheduling RX softirq\n"); ··· 1015 988 } 1016 989 1017 990 if (unlikely(status & (MACB_TX_ERR_FLAGS))) { 1018 - macb_writel(bp, IDR, MACB_TX_INT_FLAGS); 1019 - schedule_work(&bp->tx_error_task); 991 + queue_writel(queue, IDR, MACB_TX_INT_FLAGS); 992 + schedule_work(&queue->tx_error_task); 1020 993 1021 994 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1022 - macb_writel(bp, ISR, MACB_TX_ERR_FLAGS); 995 + queue_writel(queue, ISR, MACB_TX_ERR_FLAGS); 1023 996 1024 997 break; 1025 998 } 1026 999 1027 1000 if (status & MACB_BIT(TCOMP)) 1028 - macb_tx_interrupt(bp); 1001 + macb_tx_interrupt(queue); 1029 1002 1030 1003 /* 1031 1004 * Link change detection isn't possible with RMII, so we'll ··· 1040 1013 bp->hw_stats.macb.rx_overruns++; 1041 1014 1042 1015 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1043 - macb_writel(bp, ISR, MACB_BIT(ISR_ROVR)); 1016 + queue_writel(queue, ISR, MACB_BIT(ISR_ROVR)); 1044 1017 } 1045 1018 1046 1019 if (status & MACB_BIT(HRESP)) { ··· 1052 1025 netdev_err(dev, "DMA bus error: HRESP not OK\n"); 1053 1026 1054 1027 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1055 - macb_writel(bp, ISR, MACB_BIT(HRESP)); 1028 + queue_writel(queue, ISR, MACB_BIT(HRESP)); 1056 1029 } 1057 1030 1058 - status = macb_readl(bp, ISR); 1031 + status = queue_readl(queue, ISR); 1059 1032 } 1060 1033 1061 1034 spin_unlock(&bp->lock); ··· 1070 1043 */ 1071 1044 static void macb_poll_controller(struct net_device *dev) 1072 1045 { 1046 + struct macb *bp = netdev_priv(dev); 1047 + struct macb_queue *queue; 1073 1048 unsigned long flags; 1049 + unsigned int q; 1074 1050 1075 1051 local_irq_save(flags); 1076 - macb_interrupt(dev->irq, dev); 1052 + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) 1053 + macb_interrupt(dev->irq, queue); 1077 1054 local_irq_restore(flags); 1078 1055 } 1079 1056 #endif ··· 1089 1058 } 1090 1059 1091 1060 static unsigned int macb_tx_map(struct macb *bp, 1061 + struct macb_queue *queue, 1092 1062 struct sk_buff *skb) 1093 1063 { 1094 1064 dma_addr_t mapping; 1095 - unsigned int len, entry, i, tx_head = bp->tx_head; 1065 + unsigned int len, entry, i, tx_head = queue->tx_head; 1096 1066 struct macb_tx_skb *tx_skb = NULL; 1097 1067 struct macb_dma_desc *desc; 1098 1068 unsigned int offset, size, count = 0; ··· 1107 1075 while (len) { 1108 1076 size = min(len, bp->max_tx_length); 1109 1077 entry = macb_tx_ring_wrap(tx_head); 1110 - tx_skb = &bp->tx_skb[entry]; 1078 + tx_skb = &queue->tx_skb[entry]; 1111 1079 1112 1080 mapping = dma_map_single(&bp->pdev->dev, 1113 1081 skb->data + offset, ··· 1136 1104 while (len) { 1137 1105 size = min(len, bp->max_tx_length); 1138 1106 entry = macb_tx_ring_wrap(tx_head); 1139 - tx_skb = &bp->tx_skb[entry]; 1107 + tx_skb = &queue->tx_skb[entry]; 1140 1108 1141 1109 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 1142 1110 offset, size, DMA_TO_DEVICE); ··· 1175 1143 i = tx_head; 1176 1144 entry = macb_tx_ring_wrap(i); 1177 1145 ctrl = MACB_BIT(TX_USED); 1178 - desc = &bp->tx_ring[entry]; 1146 + desc = &queue->tx_ring[entry]; 1179 1147 desc->ctrl = ctrl; 1180 1148 1181 1149 do { 1182 1150 i--; 1183 1151 entry = macb_tx_ring_wrap(i); 1184 - tx_skb = &bp->tx_skb[entry]; 1185 - desc = &bp->tx_ring[entry]; 1152 + tx_skb = &queue->tx_skb[entry]; 1153 + desc = &queue->tx_ring[entry]; 1186 1154 1187 1155 ctrl = (u32)tx_skb->size; 1188 1156 if (eof) { ··· 1199 1167 */ 1200 1168 wmb(); 1201 1169 desc->ctrl = ctrl; 1202 - } while (i != bp->tx_head); 1170 + } while (i != queue->tx_head); 1203 1171 1204 - bp->tx_head = tx_head; 1172 + queue->tx_head = tx_head; 1205 1173 1206 1174 return count; 1207 1175 1208 1176 dma_error: 1209 1177 netdev_err(bp->dev, "TX DMA map failed\n"); 1210 1178 1211 - for (i = bp->tx_head; i != tx_head; i++) { 1212 - tx_skb = macb_tx_skb(bp, i); 1179 + for (i = queue->tx_head; i != tx_head; i++) { 1180 + tx_skb = macb_tx_skb(queue, i); 1213 1181 1214 1182 macb_tx_unmap(bp, tx_skb); 1215 1183 } ··· 1219 1187 1220 1188 static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) 1221 1189 { 1190 + u16 queue_index = skb_get_queue_mapping(skb); 1222 1191 struct macb *bp = netdev_priv(dev); 1192 + struct macb_queue *queue = &bp->queues[queue_index]; 1223 1193 unsigned long flags; 1224 1194 unsigned int count, nr_frags, frag_size, f; 1225 1195 1226 1196 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 1227 1197 netdev_vdbg(bp->dev, 1228 - "start_xmit: len %u head %p data %p tail %p end %p\n", 1229 - skb->len, skb->head, skb->data, 1198 + "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n", 1199 + queue_index, skb->len, skb->head, skb->data, 1230 1200 skb_tail_pointer(skb), skb_end_pointer(skb)); 1231 1201 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1, 1232 1202 skb->data, 16, true); ··· 1248 1214 spin_lock_irqsave(&bp->lock, flags); 1249 1215 1250 1216 /* This is a hard error, log it. */ 1251 - if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < count) { 1252 - netif_stop_queue(dev); 1217 + if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < count) { 1218 + netif_stop_subqueue(dev, queue_index); 1253 1219 spin_unlock_irqrestore(&bp->lock, flags); 1254 1220 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", 1255 - bp->tx_head, bp->tx_tail); 1221 + queue->tx_head, queue->tx_tail); 1256 1222 return NETDEV_TX_BUSY; 1257 1223 } 1258 1224 1259 1225 /* Map socket buffer for DMA transfer */ 1260 - if (!macb_tx_map(bp, skb)) { 1226 + if (!macb_tx_map(bp, queue, skb)) { 1261 1227 dev_kfree_skb_any(skb); 1262 1228 goto unlock; 1263 1229 } ··· 1269 1235 1270 1236 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); 1271 1237 1272 - if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) 1273 - netif_stop_queue(dev); 1238 + if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < 1) 1239 + netif_stop_subqueue(dev, queue_index); 1274 1240 1275 1241 unlock: 1276 1242 spin_unlock_irqrestore(&bp->lock, flags); ··· 1338 1304 1339 1305 static void macb_free_consistent(struct macb *bp) 1340 1306 { 1341 - if (bp->tx_skb) { 1342 - kfree(bp->tx_skb); 1343 - bp->tx_skb = NULL; 1344 - } 1307 + struct macb_queue *queue; 1308 + unsigned int q; 1309 + 1345 1310 bp->macbgem_ops.mog_free_rx_buffers(bp); 1346 1311 if (bp->rx_ring) { 1347 1312 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES, 1348 1313 bp->rx_ring, bp->rx_ring_dma); 1349 1314 bp->rx_ring = NULL; 1350 1315 } 1351 - if (bp->tx_ring) { 1352 - dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES, 1353 - bp->tx_ring, bp->tx_ring_dma); 1354 - bp->tx_ring = NULL; 1316 + 1317 + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1318 + kfree(queue->tx_skb); 1319 + queue->tx_skb = NULL; 1320 + if (queue->tx_ring) { 1321 + dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES, 1322 + queue->tx_ring, queue->tx_ring_dma); 1323 + queue->tx_ring = NULL; 1324 + } 1355 1325 } 1356 1326 } 1357 1327 ··· 1392 1354 1393 1355 static int macb_alloc_consistent(struct macb *bp) 1394 1356 { 1357 + struct macb_queue *queue; 1358 + unsigned int q; 1395 1359 int size; 1396 1360 1397 - size = TX_RING_SIZE * sizeof(struct macb_tx_skb); 1398 - bp->tx_skb = kmalloc(size, GFP_KERNEL); 1399 - if (!bp->tx_skb) 1400 - goto out_err; 1361 + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1362 + size = TX_RING_BYTES; 1363 + queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, 1364 + &queue->tx_ring_dma, 1365 + GFP_KERNEL); 1366 + if (!queue->tx_ring) 1367 + goto out_err; 1368 + netdev_dbg(bp->dev, 1369 + "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n", 1370 + q, size, (unsigned long)queue->tx_ring_dma, 1371 + queue->tx_ring); 1372 + 1373 + size = TX_RING_SIZE * sizeof(struct macb_tx_skb); 1374 + queue->tx_skb = kmalloc(size, GFP_KERNEL); 1375 + if (!queue->tx_skb) 1376 + goto out_err; 1377 + } 1401 1378 1402 1379 size = RX_RING_BYTES; 1403 1380 bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, ··· 1422 1369 netdev_dbg(bp->dev, 1423 1370 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n", 1424 1371 size, (unsigned long)bp->rx_ring_dma, bp->rx_ring); 1425 - 1426 - size = TX_RING_BYTES; 1427 - bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, 1428 - &bp->tx_ring_dma, GFP_KERNEL); 1429 - if (!bp->tx_ring) 1430 - goto out_err; 1431 - netdev_dbg(bp->dev, 1432 - "Allocated TX ring of %d bytes at %08lx (mapped %p)\n", 1433 - size, (unsigned long)bp->tx_ring_dma, bp->tx_ring); 1434 1372 1435 1373 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) 1436 1374 goto out_err; ··· 1435 1391 1436 1392 static void gem_init_rings(struct macb *bp) 1437 1393 { 1394 + struct macb_queue *queue; 1395 + unsigned int q; 1438 1396 int i; 1439 1397 1440 - for (i = 0; i < TX_RING_SIZE; i++) { 1441 - bp->tx_ring[i].addr = 0; 1442 - bp->tx_ring[i].ctrl = MACB_BIT(TX_USED); 1398 + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1399 + for (i = 0; i < TX_RING_SIZE; i++) { 1400 + queue->tx_ring[i].addr = 0; 1401 + queue->tx_ring[i].ctrl = MACB_BIT(TX_USED); 1402 + } 1403 + queue->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); 1404 + queue->tx_head = 0; 1405 + queue->tx_tail = 0; 1443 1406 } 1444 - bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); 1445 1407 1446 - bp->rx_tail = bp->rx_prepared_head = bp->tx_head = bp->tx_tail = 0; 1408 + bp->rx_tail = 0; 1409 + bp->rx_prepared_head = 0; 1447 1410 1448 1411 gem_rx_refill(bp); 1449 1412 } ··· 1469 1418 bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP); 1470 1419 1471 1420 for (i = 0; i < TX_RING_SIZE; i++) { 1472 - bp->tx_ring[i].addr = 0; 1473 - bp->tx_ring[i].ctrl = MACB_BIT(TX_USED); 1421 + bp->queues[0].tx_ring[i].addr = 0; 1422 + bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED); 1423 + bp->queues[0].tx_head = 0; 1424 + bp->queues[0].tx_tail = 0; 1474 1425 } 1475 - bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); 1426 + bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); 1476 1427 1477 - bp->rx_tail = bp->tx_head = bp->tx_tail = 0; 1428 + bp->rx_tail = 0; 1478 1429 } 1479 1430 1480 1431 static void macb_reset_hw(struct macb *bp) 1481 1432 { 1433 + struct macb_queue *queue; 1434 + unsigned int q; 1435 + 1482 1436 /* 1483 1437 * Disable RX and TX (XXX: Should we halt the transmission 1484 1438 * more gracefully?) ··· 1498 1442 macb_writel(bp, RSR, -1); 1499 1443 1500 1444 /* Disable all interrupts */ 1501 - macb_writel(bp, IDR, -1); 1502 - macb_readl(bp, ISR); 1445 + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1446 + queue_writel(queue, IDR, -1); 1447 + queue_readl(queue, ISR); 1448 + } 1503 1449 } 1504 1450 1505 1451 static u32 gem_mdc_clk_div(struct macb *bp) ··· 1598 1540 1599 1541 static void macb_init_hw(struct macb *bp) 1600 1542 { 1543 + struct macb_queue *queue; 1544 + unsigned int q; 1545 + 1601 1546 u32 config; 1602 1547 1603 1548 macb_reset_hw(bp); ··· 1626 1565 1627 1566 /* Initialize TX and RX buffers */ 1628 1567 macb_writel(bp, RBQP, bp->rx_ring_dma); 1629 - macb_writel(bp, TBQP, bp->tx_ring_dma); 1568 + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1569 + queue_writel(queue, TBQP, queue->tx_ring_dma); 1570 + 1571 + /* Enable interrupts */ 1572 + queue_writel(queue, IER, 1573 + MACB_RX_INT_FLAGS | 1574 + MACB_TX_INT_FLAGS | 1575 + MACB_BIT(HRESP)); 1576 + } 1630 1577 1631 1578 /* Enable TX and RX */ 1632 1579 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); 1633 - 1634 - /* Enable interrupts */ 1635 - macb_writel(bp, IER, (MACB_RX_INT_FLAGS 1636 - | MACB_TX_INT_FLAGS 1637 - | MACB_BIT(HRESP))); 1638 - 1639 1580 } 1640 1581 1641 1582 /* ··· 1799 1736 /* schedule a link state check */ 1800 1737 phy_start(bp->phy_dev); 1801 1738 1802 - netif_start_queue(dev); 1739 + netif_tx_start_all_queues(dev); 1803 1740 1804 1741 return 0; 1805 1742 } ··· 1809 1746 struct macb *bp = netdev_priv(dev); 1810 1747 unsigned long flags; 1811 1748 1812 - netif_stop_queue(dev); 1749 + netif_tx_stop_all_queues(dev); 1813 1750 napi_disable(&bp->napi); 1814 1751 1815 1752 if (bp->phy_dev) ··· 1958 1895 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1)) 1959 1896 | MACB_GREGS_VERSION; 1960 1897 1961 - tail = macb_tx_ring_wrap(bp->tx_tail); 1962 - head = macb_tx_ring_wrap(bp->tx_head); 1898 + tail = macb_tx_ring_wrap(bp->queues[0].tx_tail); 1899 + head = macb_tx_ring_wrap(bp->queues[0].tx_head); 1963 1900 1964 1901 regs_buff[0] = macb_readl(bp, NCR); 1965 1902 regs_buff[1] = macb_or_gem_readl(bp, NCFGR); ··· 1972 1909 1973 1910 regs_buff[8] = tail; 1974 1911 regs_buff[9] = head; 1975 - regs_buff[10] = macb_tx_dma(bp, tail); 1976 - regs_buff[11] = macb_tx_dma(bp, head); 1912 + regs_buff[10] = macb_tx_dma(&bp->queues[0], tail); 1913 + regs_buff[11] = macb_tx_dma(&bp->queues[0], head); 1977 1914 1978 1915 if (macb_is_gem(bp)) { 1979 1916 regs_buff[12] = gem_readl(bp, USRIO); ··· 2124 2061 netdev_dbg(bp->dev, "Cadence caps 0x%08x\n", bp->caps); 2125 2062 } 2126 2063 2064 + static void macb_probe_queues(void __iomem *mem, 2065 + unsigned int *queue_mask, 2066 + unsigned int *num_queues) 2067 + { 2068 + unsigned int hw_q; 2069 + u32 mid; 2070 + 2071 + *queue_mask = 0x1; 2072 + *num_queues = 1; 2073 + 2074 + /* is it macb or gem ? */ 2075 + mid = __raw_readl(mem + MACB_MID); 2076 + if (MACB_BFEXT(IDNUM, mid) != 0x2) 2077 + return; 2078 + 2079 + /* bit 0 is never set but queue 0 always exists */ 2080 + *queue_mask = __raw_readl(mem + GEM_DCFG6) & 0xff; 2081 + *queue_mask |= 0x1; 2082 + 2083 + for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q) 2084 + if (*queue_mask & (1 << hw_q)) 2085 + (*num_queues)++; 2086 + } 2087 + 2127 2088 static int __init macb_probe(struct platform_device *pdev) 2128 2089 { 2129 2090 struct macb_platform_data *pdata; 2130 2091 struct resource *regs; 2131 2092 struct net_device *dev; 2132 2093 struct macb *bp; 2094 + struct macb_queue *queue; 2133 2095 struct phy_device *phydev; 2134 2096 u32 config; 2135 2097 int err = -ENXIO; 2136 2098 const char *mac; 2099 + void __iomem *mem; 2100 + unsigned int hw_q, queue_mask, q, num_queues, q_irq = 0; 2101 + struct clk *pclk, *hclk, *tx_clk; 2137 2102 2138 2103 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2139 2104 if (!regs) { ··· 2169 2078 goto err_out; 2170 2079 } 2171 2080 2172 - err = -ENOMEM; 2173 - dev = alloc_etherdev(sizeof(*bp)); 2174 - if (!dev) 2081 + pclk = devm_clk_get(&pdev->dev, "pclk"); 2082 + if (IS_ERR(pclk)) { 2083 + err = PTR_ERR(pclk); 2084 + dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err); 2175 2085 goto err_out; 2086 + } 2087 + 2088 + hclk = devm_clk_get(&pdev->dev, "hclk"); 2089 + if (IS_ERR(hclk)) { 2090 + err = PTR_ERR(hclk); 2091 + dev_err(&pdev->dev, "failed to get hclk (%u)\n", err); 2092 + goto err_out; 2093 + } 2094 + 2095 + tx_clk = devm_clk_get(&pdev->dev, "tx_clk"); 2096 + 2097 + err = clk_prepare_enable(pclk); 2098 + if (err) { 2099 + dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err); 2100 + goto err_out; 2101 + } 2102 + 2103 + err = clk_prepare_enable(hclk); 2104 + if (err) { 2105 + dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err); 2106 + goto err_out_disable_pclk; 2107 + } 2108 + 2109 + if (!IS_ERR(tx_clk)) { 2110 + err = clk_prepare_enable(tx_clk); 2111 + if (err) { 2112 + dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", 2113 + err); 2114 + goto err_out_disable_hclk; 2115 + } 2116 + } 2117 + 2118 + err = -ENOMEM; 2119 + mem = devm_ioremap(&pdev->dev, regs->start, resource_size(regs)); 2120 + if (!mem) { 2121 + dev_err(&pdev->dev, "failed to map registers, aborting.\n"); 2122 + goto err_out_disable_clocks; 2123 + } 2124 + 2125 + macb_probe_queues(mem, &queue_mask, &num_queues); 2126 + dev = alloc_etherdev_mq(sizeof(*bp), num_queues); 2127 + if (!dev) 2128 + goto err_out_disable_clocks; 2176 2129 2177 2130 SET_NETDEV_DEV(dev, &pdev->dev); 2178 2131 2179 2132 bp = netdev_priv(dev); 2180 2133 bp->pdev = pdev; 2181 2134 bp->dev = dev; 2135 + bp->regs = mem; 2136 + bp->num_queues = num_queues; 2137 + bp->pclk = pclk; 2138 + bp->hclk = hclk; 2139 + bp->tx_clk = tx_clk; 2182 2140 2183 2141 spin_lock_init(&bp->lock); 2184 - INIT_WORK(&bp->tx_error_task, macb_tx_error_task); 2185 2142 2186 - bp->pclk = devm_clk_get(&pdev->dev, "pclk"); 2187 - if (IS_ERR(bp->pclk)) { 2188 - err = PTR_ERR(bp->pclk); 2189 - dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err); 2190 - goto err_out_free_dev; 2191 - } 2143 + /* set the queue register mapping once for all: queue0 has a special 2144 + * register mapping but we don't want to test the queue index then 2145 + * compute the corresponding register offset at run time. 2146 + */ 2147 + for (hw_q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) { 2148 + if (!(queue_mask & (1 << hw_q))) 2149 + continue; 2192 2150 2193 - bp->hclk = devm_clk_get(&pdev->dev, "hclk"); 2194 - if (IS_ERR(bp->hclk)) { 2195 - err = PTR_ERR(bp->hclk); 2196 - dev_err(&pdev->dev, "failed to get hclk (%u)\n", err); 2197 - goto err_out_free_dev; 2198 - } 2199 - 2200 - bp->tx_clk = devm_clk_get(&pdev->dev, "tx_clk"); 2201 - 2202 - err = clk_prepare_enable(bp->pclk); 2203 - if (err) { 2204 - dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err); 2205 - goto err_out_free_dev; 2206 - } 2207 - 2208 - err = clk_prepare_enable(bp->hclk); 2209 - if (err) { 2210 - dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err); 2211 - goto err_out_disable_pclk; 2212 - } 2213 - 2214 - if (!IS_ERR(bp->tx_clk)) { 2215 - err = clk_prepare_enable(bp->tx_clk); 2216 - if (err) { 2217 - dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", 2218 - err); 2219 - goto err_out_disable_hclk; 2151 + queue = &bp->queues[q_irq]; 2152 + queue->bp = bp; 2153 + if (hw_q) { 2154 + queue->ISR = GEM_ISR(hw_q - 1); 2155 + queue->IER = GEM_IER(hw_q - 1); 2156 + queue->IDR = GEM_IDR(hw_q - 1); 2157 + queue->IMR = GEM_IMR(hw_q - 1); 2158 + queue->TBQP = GEM_TBQP(hw_q - 1); 2159 + } else { 2160 + /* queue0 uses legacy registers */ 2161 + queue->ISR = MACB_ISR; 2162 + queue->IER = MACB_IER; 2163 + queue->IDR = MACB_IDR; 2164 + queue->IMR = MACB_IMR; 2165 + queue->TBQP = MACB_TBQP; 2220 2166 } 2221 - } 2222 2167 2223 - bp->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs)); 2224 - if (!bp->regs) { 2225 - dev_err(&pdev->dev, "failed to map registers, aborting.\n"); 2226 - err = -ENOMEM; 2227 - goto err_out_disable_clocks; 2228 - } 2168 + /* get irq: here we use the linux queue index, not the hardware 2169 + * queue index. the queue irq definitions in the device tree 2170 + * must remove the optional gaps that could exist in the 2171 + * hardware queue mask. 2172 + */ 2173 + queue->irq = platform_get_irq(pdev, q_irq); 2174 + err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt, 2175 + 0, dev->name, queue); 2176 + if (err) { 2177 + dev_err(&pdev->dev, 2178 + "Unable to request IRQ %d (error %d)\n", 2179 + queue->irq, err); 2180 + goto err_out_free_irq; 2181 + } 2229 2182 2230 - dev->irq = platform_get_irq(pdev, 0); 2231 - err = devm_request_irq(&pdev->dev, dev->irq, macb_interrupt, 0, 2232 - dev->name, dev); 2233 - if (err) { 2234 - dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n", 2235 - dev->irq, err); 2236 - goto err_out_disable_clocks; 2183 + INIT_WORK(&queue->tx_error_task, macb_tx_error_task); 2184 + q_irq++; 2237 2185 } 2186 + dev->irq = bp->queues[0].irq; 2238 2187 2239 2188 dev->netdev_ops = &macb_netdev_ops; 2240 2189 netif_napi_add(dev, &bp->napi, macb_poll, 64); ··· 2350 2219 err = register_netdev(dev); 2351 2220 if (err) { 2352 2221 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); 2353 - goto err_out_disable_clocks; 2222 + goto err_out_free_irq; 2354 2223 } 2355 2224 2356 2225 err = macb_mii_init(bp); ··· 2373 2242 2374 2243 err_out_unregister_netdev: 2375 2244 unregister_netdev(dev); 2376 - err_out_disable_clocks: 2377 - if (!IS_ERR(bp->tx_clk)) 2378 - clk_disable_unprepare(bp->tx_clk); 2379 - err_out_disable_hclk: 2380 - clk_disable_unprepare(bp->hclk); 2381 - err_out_disable_pclk: 2382 - clk_disable_unprepare(bp->pclk); 2383 - err_out_free_dev: 2245 + err_out_free_irq: 2246 + for (q = 0, queue = bp->queues; q < q_irq; ++q, ++queue) 2247 + devm_free_irq(&pdev->dev, queue->irq, queue); 2384 2248 free_netdev(dev); 2249 + err_out_disable_clocks: 2250 + if (!IS_ERR(tx_clk)) 2251 + clk_disable_unprepare(tx_clk); 2252 + err_out_disable_hclk: 2253 + clk_disable_unprepare(hclk); 2254 + err_out_disable_pclk: 2255 + clk_disable_unprepare(pclk); 2385 2256 err_out: 2386 2257 return err; 2387 2258 } ··· 2392 2259 { 2393 2260 struct net_device *dev; 2394 2261 struct macb *bp; 2262 + struct macb_queue *queue; 2263 + unsigned int q; 2395 2264 2396 2265 dev = platform_get_drvdata(pdev); 2397 2266 ··· 2405 2270 kfree(bp->mii_bus->irq); 2406 2271 mdiobus_free(bp->mii_bus); 2407 2272 unregister_netdev(dev); 2273 + queue = bp->queues; 2274 + for (q = 0; q < bp->num_queues; ++q, ++queue) 2275 + devm_free_irq(&pdev->dev, queue->irq, queue); 2276 + free_netdev(dev); 2408 2277 if (!IS_ERR(bp->tx_clk)) 2409 2278 clk_disable_unprepare(bp->tx_clk); 2410 2279 clk_disable_unprepare(bp->hclk); 2411 2280 clk_disable_unprepare(bp->pclk); 2412 - free_netdev(dev); 2413 2281 } 2414 2282 2415 2283 return 0;
+31 -5
drivers/net/ethernet/cadence/macb.h
··· 12 12 13 13 #define MACB_GREGS_NBR 16 14 14 #define MACB_GREGS_VERSION 1 15 + #define MACB_MAX_QUEUES 8 15 16 16 17 /* MACB register offsets */ 17 18 #define MACB_NCR 0x0000 ··· 89 88 #define GEM_DCFG5 0x0290 90 89 #define GEM_DCFG6 0x0294 91 90 #define GEM_DCFG7 0x0298 91 + 92 + #define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2)) 93 + #define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2)) 94 + #define GEM_RBQP(hw_q) (0x0480 + ((hw_q) << 2)) 95 + #define GEM_IER(hw_q) (0x0600 + ((hw_q) << 2)) 96 + #define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2)) 97 + #define GEM_IMR(hw_q) (0x0640 + ((hw_q) << 2)) 92 98 93 99 /* Bitfields in NCR */ 94 100 #define MACB_LB_OFFSET 0 ··· 384 376 __raw_readl((port)->regs + GEM_##reg) 385 377 #define gem_writel(port, reg, value) \ 386 378 __raw_writel((value), (port)->regs + GEM_##reg) 379 + #define queue_readl(queue, reg) \ 380 + __raw_readl((queue)->bp->regs + (queue)->reg) 381 + #define queue_writel(queue, reg, value) \ 382 + __raw_writel((value), (queue)->bp->regs + (queue)->reg) 387 383 388 384 /* 389 385 * Conditional GEM/MACB macros. These perform the operation to the correct ··· 609 597 unsigned int dma_burst_length; 610 598 }; 611 599 600 + struct macb_queue { 601 + struct macb *bp; 602 + int irq; 603 + 604 + unsigned int ISR; 605 + unsigned int IER; 606 + unsigned int IDR; 607 + unsigned int IMR; 608 + unsigned int TBQP; 609 + 610 + unsigned int tx_head, tx_tail; 611 + struct macb_dma_desc *tx_ring; 612 + struct macb_tx_skb *tx_skb; 613 + dma_addr_t tx_ring_dma; 614 + struct work_struct tx_error_task; 615 + }; 616 + 612 617 struct macb { 613 618 void __iomem *regs; 614 619 ··· 636 607 void *rx_buffers; 637 608 size_t rx_buffer_size; 638 609 639 - unsigned int tx_head, tx_tail; 640 - struct macb_dma_desc *tx_ring; 641 - struct macb_tx_skb *tx_skb; 610 + unsigned int num_queues; 611 + struct macb_queue queues[MACB_MAX_QUEUES]; 642 612 643 613 spinlock_t lock; 644 614 struct platform_device *pdev; ··· 646 618 struct clk *tx_clk; 647 619 struct net_device *dev; 648 620 struct napi_struct napi; 649 - struct work_struct tx_error_task; 650 621 struct net_device_stats stats; 651 622 union { 652 623 struct macb_stats macb; ··· 653 626 } hw_stats; 654 627 655 628 dma_addr_t rx_ring_dma; 656 - dma_addr_t tx_ring_dma; 657 629 dma_addr_t rx_buffers_dma; 658 630 659 631 struct macb_or_gem_ops macbgem_ops;
+1 -1
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
··· 392 392 s16 xact_addr_filt; /* index of exact MAC address filter */ 393 393 u16 rss_size; /* size of VI's RSS table slice */ 394 394 s8 mdio_addr; 395 - u8 port_type; 395 + enum fw_port_type port_type; 396 396 u8 mod_type; 397 397 u8 port_id; 398 398 u8 tx_chan;
+10 -3
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
··· 2325 2325 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val); 2326 2326 } 2327 2327 2328 - static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps) 2328 + static unsigned int from_fw_linkcaps(enum fw_port_type type, unsigned int caps) 2329 2329 { 2330 2330 unsigned int v = 0; 2331 2331 ··· 2354 2354 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full | 2355 2355 SUPPORTED_10000baseKX4_Full; 2356 2356 else if (type == FW_PORT_TYPE_FIBER_XFI || 2357 - type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) { 2357 + type == FW_PORT_TYPE_FIBER_XAUI || 2358 + type == FW_PORT_TYPE_SFP || 2359 + type == FW_PORT_TYPE_QSFP_10G || 2360 + type == FW_PORT_TYPE_QSA) { 2358 2361 v |= SUPPORTED_FIBRE; 2359 2362 if (caps & FW_PORT_CAP_SPEED_1G) 2360 2363 v |= SUPPORTED_1000baseT_Full; 2361 2364 if (caps & FW_PORT_CAP_SPEED_10G) 2362 2365 v |= SUPPORTED_10000baseT_Full; 2363 - } else if (type == FW_PORT_TYPE_BP40_BA) 2366 + } else if (type == FW_PORT_TYPE_BP40_BA || 2367 + type == FW_PORT_TYPE_QSFP) { 2364 2368 v |= SUPPORTED_40000baseSR4_Full; 2369 + v |= SUPPORTED_FIBRE; 2370 + } 2365 2371 2366 2372 if (caps & FW_PORT_CAP_ANEG) 2367 2373 v |= SUPPORTED_Autoneg; ··· 2402 2396 cmd->port = PORT_FIBRE; 2403 2397 else if (p->port_type == FW_PORT_TYPE_SFP || 2404 2398 p->port_type == FW_PORT_TYPE_QSFP_10G || 2399 + p->port_type == FW_PORT_TYPE_QSA || 2405 2400 p->port_type == FW_PORT_TYPE_QSFP) { 2406 2401 if (p->mod_type == FW_PORT_MOD_TYPE_LR || 2407 2402 p->mod_type == FW_PORT_MOD_TYPE_SR ||
+2
drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
··· 560 560 FW_FLOWC_MNEM_RCVNXT, 561 561 FW_FLOWC_MNEM_SNDBUF, 562 562 FW_FLOWC_MNEM_MSS, 563 + FW_FLOWC_MNEM_TXDATAPLEN_MAX, 563 564 }; 564 565 565 566 struct fw_flowc_mnemval { ··· 2471 2470 FW_PORT_TYPE_BP4_AP, 2472 2471 FW_PORT_TYPE_QSFP_10G, 2473 2472 FW_PORT_TYPE_QSFP, 2473 + FW_PORT_TYPE_QSA, 2474 2474 FW_PORT_TYPE_BP40_BA, 2475 2475 2476 2476 FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_M
+1 -1
drivers/net/ethernet/davicom/Kconfig
··· 4 4 5 5 config DM9000 6 6 tristate "DM9000 support" 7 - depends on ARM || BLACKFIN || MIPS || COLDFIRE 7 + depends on ARM || BLACKFIN || MIPS || COLDFIRE || NIOS2 8 8 select CRC32 9 9 select MII 10 10 ---help---
+3 -3
drivers/net/ethernet/intel/fm10k/fm10k_main.c
··· 615 615 616 616 rx_desc = FM10K_RX_DESC(rx_ring, rx_ring->next_to_clean); 617 617 618 - if (!fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_DD)) 618 + if (!rx_desc->d.staterr) 619 619 break; 620 620 621 621 /* This memory barrier is needed to keep us from reading 622 622 * any other fields out of the rx_desc until we know the 623 - * RXD_STATUS_DD bit is set 623 + * descriptor has been written back 624 624 */ 625 - rmb(); 625 + dma_rmb(); 626 626 627 627 /* retrieve a buffer from the ring */ 628 628 skb = fm10k_fetch_rx_buffer(rx_ring, rx_desc, skb);
+3 -3
drivers/net/ethernet/intel/igb/igb_main.c
··· 6910 6910 6911 6911 rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean); 6912 6912 6913 - if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) 6913 + if (!rx_desc->wb.upper.status_error) 6914 6914 break; 6915 6915 6916 6916 /* This memory barrier is needed to keep us from reading 6917 6917 * any other fields out of the rx_desc until we know the 6918 - * RXD_STAT_DD bit is set 6918 + * descriptor has been written back 6919 6919 */ 6920 - rmb(); 6920 + dma_rmb(); 6921 6921 6922 6922 /* retrieve a buffer from the ring */ 6923 6923 skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
+4 -5
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 2009 2009 2010 2010 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); 2011 2011 2012 - if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) 2012 + if (!rx_desc->wb.upper.status_error) 2013 2013 break; 2014 2014 2015 - /* 2016 - * This memory barrier is needed to keep us from reading 2015 + /* This memory barrier is needed to keep us from reading 2017 2016 * any other fields out of the rx_desc until we know the 2018 - * RXD_STAT_DD bit is set 2017 + * descriptor has been written back 2019 2018 */ 2020 - rmb(); 2019 + dma_rmb(); 2021 2020 2022 2021 /* retrieve a buffer from the ring */ 2023 2022 skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
+6 -6
drivers/net/ethernet/jme.c
··· 1364 1364 jme_free_rx_resources(jme); 1365 1365 out_enable_tasklet: 1366 1366 tasklet_enable(&jme->txclean_task); 1367 - tasklet_hi_enable(&jme->rxclean_task); 1368 - tasklet_hi_enable(&jme->rxempty_task); 1367 + tasklet_enable(&jme->rxclean_task); 1368 + tasklet_enable(&jme->rxempty_task); 1369 1369 out: 1370 1370 atomic_inc(&jme->link_changing); 1371 1371 } ··· 2408 2408 if (test_bit(JME_FLAG_POLL, &jme->flags)) { 2409 2409 JME_NAPI_ENABLE(jme); 2410 2410 } else { 2411 - tasklet_hi_enable(&jme->rxclean_task); 2412 - tasklet_hi_enable(&jme->rxempty_task); 2411 + tasklet_enable(&jme->rxclean_task); 2412 + tasklet_enable(&jme->rxempty_task); 2413 2413 } 2414 2414 dpi->cur = PCC_P1; 2415 2415 dpi->attempt = PCC_P1; ··· 3290 3290 } 3291 3291 3292 3292 tasklet_enable(&jme->txclean_task); 3293 - tasklet_hi_enable(&jme->rxclean_task); 3294 - tasklet_hi_enable(&jme->rxempty_task); 3293 + tasklet_enable(&jme->rxclean_task); 3294 + tasklet_enable(&jme->rxempty_task); 3295 3295 3296 3296 jme_powersave_phy(jme); 3297 3297
+42 -11
drivers/net/ethernet/realtek/r8169.c
··· 5919 5919 rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC); 5920 5920 } 5921 5921 5922 - static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) 5922 + static void rtl_hw_start_8168g(struct rtl8169_private *tp) 5923 5923 { 5924 5924 void __iomem *ioaddr = tp->mmio_addr; 5925 5925 struct pci_dev *pdev = tp->pci_dev; ··· 5954 5954 rtl_pcie_state_l2l3_enable(tp, false); 5955 5955 } 5956 5956 5957 + static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) 5958 + { 5959 + void __iomem *ioaddr = tp->mmio_addr; 5960 + static const struct ephy_info e_info_8168g_1[] = { 5961 + { 0x00, 0x0000, 0x0008 }, 5962 + { 0x0c, 0x37d0, 0x0820 }, 5963 + { 0x1e, 0x0000, 0x0001 }, 5964 + { 0x19, 0x8000, 0x0000 } 5965 + }; 5966 + 5967 + rtl_hw_start_8168g(tp); 5968 + 5969 + /* disable aspm and clock request before access ephy */ 5970 + RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); 5971 + RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); 5972 + rtl_ephy_init(tp, e_info_8168g_1, ARRAY_SIZE(e_info_8168g_1)); 5973 + } 5974 + 5957 5975 static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) 5958 5976 { 5959 5977 void __iomem *ioaddr = tp->mmio_addr; ··· 5982 5964 { 0x1e, 0xffff, 0x20eb } 5983 5965 }; 5984 5966 5985 - rtl_hw_start_8168g_1(tp); 5967 + rtl_hw_start_8168g(tp); 5986 5968 5987 5969 /* disable aspm and clock request before access ephy */ 5988 5970 RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); ··· 6001 5983 { 0x1e, 0x0000, 0x2000 } 6002 5984 }; 6003 5985 6004 - rtl_hw_start_8168g_1(tp); 5986 + rtl_hw_start_8168g(tp); 6005 5987 6006 5988 /* disable aspm and clock request before access ephy */ 6007 5989 RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); ··· 6623 6605 { 6624 6606 u32 eor = le32_to_cpu(desc->opts1) & RingEnd; 6625 6607 6608 + /* Force memory writes to complete before releasing descriptor */ 6609 + dma_wmb(); 6610 + 6626 6611 desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz); 6627 6612 } 6628 6613 ··· 6633 6612 u32 rx_buf_sz) 6634 6613 { 6635 6614 desc->addr = cpu_to_le64(mapping); 6636 - wmb(); 6637 6615 rtl8169_mark_to_asic(desc, rx_buf_sz); 6638 6616 } 6639 6617 ··· 7093 7073 7094 7074 skb_tx_timestamp(skb); 7095 7075 7096 - wmb(); 7076 + /* Force memory writes to complete before releasing descriptor */ 7077 + dma_wmb(); 7097 7078 7098 7079 /* Anti gcc 2.95.3 bugware (sic) */ 7099 7080 status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC)); 7100 7081 txd->opts1 = cpu_to_le32(status); 7101 7082 7102 - tp->cur_tx += frags + 1; 7103 - 7083 + /* Force all memory writes to complete before notifying device */ 7104 7084 wmb(); 7085 + 7086 + tp->cur_tx += frags + 1; 7105 7087 7106 7088 RTL_W8(TxPoll, NPQ); 7107 7089 ··· 7203 7181 struct ring_info *tx_skb = tp->tx_skb + entry; 7204 7182 u32 status; 7205 7183 7206 - rmb(); 7207 7184 status = le32_to_cpu(tp->TxDescArray[entry].opts1); 7208 7185 if (status & DescOwn) 7209 7186 break; 7187 + 7188 + /* This barrier is needed to keep us from reading 7189 + * any other fields out of the Tx descriptor until 7190 + * we know the status of DescOwn 7191 + */ 7192 + dma_rmb(); 7210 7193 7211 7194 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, 7212 7195 tp->TxDescArray + entry); ··· 7307 7280 struct RxDesc *desc = tp->RxDescArray + entry; 7308 7281 u32 status; 7309 7282 7310 - rmb(); 7311 7283 status = le32_to_cpu(desc->opts1) & tp->opts1_mask; 7312 - 7313 7284 if (status & DescOwn) 7314 7285 break; 7286 + 7287 + /* This barrier is needed to keep us from reading 7288 + * any other fields out of the Rx descriptor until 7289 + * we know the status of DescOwn 7290 + */ 7291 + dma_rmb(); 7292 + 7315 7293 if (unlikely(status & RxRES)) { 7316 7294 netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n", 7317 7295 status); ··· 7378 7346 } 7379 7347 release_descriptor: 7380 7348 desc->opts2 = 0; 7381 - wmb(); 7382 7349 rtl8169_mark_to_asic(desc, rx_buf_sz); 7383 7350 } 7384 7351
+2 -2
drivers/net/ethernet/smsc/Kconfig
··· 6 6 bool "SMC (SMSC)/Western Digital devices" 7 7 default y 8 8 depends on ARM || ISA || MAC || ARM64 || MIPS || M32R || SUPERH || \ 9 - BLACKFIN || MN10300 || COLDFIRE || XTENSA || PCI || PCMCIA 9 + BLACKFIN || MN10300 || COLDFIRE || XTENSA || NIOS2 || PCI || PCMCIA 10 10 ---help--- 11 11 If you have a network (Ethernet) card belonging to this class, say Y 12 12 and read the Ethernet-HOWTO, available from ··· 39 39 select CRC32 40 40 select MII 41 41 depends on (ARM || M32R || SUPERH || MIPS || BLACKFIN || \ 42 - MN10300 || COLDFIRE || ARM64 || XTENSA) 42 + MN10300 || COLDFIRE || ARM64 || XTENSA || NIOS2) 43 43 ---help--- 44 44 This is a driver for SMC's 91x series of Ethernet chipsets, 45 45 including the SMC91C94 and the SMC91C111. Say Y if you want it
+1
drivers/net/phy/fixed.c
··· 274 274 275 275 return phy; 276 276 } 277 + EXPORT_SYMBOL_GPL(fixed_phy_register); 277 278 278 279 static int __init fixed_mdio_bus_init(void) 279 280 {
+104 -40
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
··· 75 75 static void *t4_uld_add(const struct cxgb4_lld_info *); 76 76 static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *); 77 77 static int t4_uld_state_change(void *, enum cxgb4_state state); 78 + static inline int send_tx_flowc_wr(struct cxgbi_sock *); 78 79 79 80 static const struct cxgb4_uld_info cxgb4i_uld_info = { 80 81 .name = DRV_MODULE_NAME, ··· 158 157 #define RCV_BUFSIZ_MASK 0x3FFU 159 158 #define MAX_IMM_TX_PKT_LEN 128 160 159 161 - static inline void set_queue(struct sk_buff *skb, unsigned int queue, 162 - const struct cxgbi_sock *csk) 163 - { 164 - skb->queue_mapping = queue; 165 - } 166 - 167 160 static int push_tx_frames(struct cxgbi_sock *, int); 168 161 169 162 /* ··· 167 172 * Returns true if a packet can be sent as an offload WR with immediate 168 173 * data. We currently use the same limit as for Ethernet packets. 169 174 */ 170 - static inline int is_ofld_imm(const struct sk_buff *skb) 175 + static inline bool is_ofld_imm(const struct sk_buff *skb) 171 176 { 172 - return skb->len <= (MAX_IMM_TX_PKT_LEN - 173 - sizeof(struct fw_ofld_tx_data_wr)); 177 + int len = skb->len; 178 + 179 + if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) 180 + len += sizeof(struct fw_ofld_tx_data_wr); 181 + 182 + return len <= MAX_IMM_TX_PKT_LEN; 174 183 } 175 184 176 185 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, ··· 387 388 388 389 if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev) 389 390 return; 391 + 392 + if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { 393 + send_tx_flowc_wr(csk); 394 + cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 395 + } 396 + 390 397 cxgbi_sock_set_state(csk, CTP_ABORTING); 391 398 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING); 392 399 cxgbi_sock_purge_write_queue(csk); 393 400 394 401 csk->cpl_abort_req = NULL; 395 402 req = (struct cpl_abort_req *)skb->head; 396 - set_queue(skb, CPL_PRIORITY_DATA, csk); 403 + set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 397 404 req->cmd = CPL_ABORT_SEND_RST; 398 405 t4_set_arp_err_handler(skb, csk, abort_arp_failure); 399 406 INIT_TP_WR(req, csk->tid); ··· 425 420 csk, csk->state, csk->flags, csk->tid, rst_status); 426 421 427 422 csk->cpl_abort_rpl = NULL; 428 - set_queue(skb, CPL_PRIORITY_DATA, csk); 423 + set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 429 424 INIT_TP_WR(rpl, csk->tid); 430 425 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid)); 431 426 rpl->cmd = rst_status; ··· 496 491 return flits + sgl_len(cnt); 497 492 } 498 493 499 - static inline void send_tx_flowc_wr(struct cxgbi_sock *csk) 494 + #define FLOWC_WR_NPARAMS_MIN 9 495 + static inline int tx_flowc_wr_credits(int *nparamsp, int *flowclenp) 496 + { 497 + int nparams, flowclen16, flowclen; 498 + 499 + nparams = FLOWC_WR_NPARAMS_MIN; 500 + flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]); 501 + flowclen16 = DIV_ROUND_UP(flowclen, 16); 502 + flowclen = flowclen16 * 16; 503 + /* 504 + * Return the number of 16-byte credits used by the FlowC request. 505 + * Pass back the nparams and actual FlowC length if requested. 506 + */ 507 + if (nparamsp) 508 + *nparamsp = nparams; 509 + if (flowclenp) 510 + *flowclenp = flowclen; 511 + 512 + return flowclen16; 513 + } 514 + 515 + static inline int send_tx_flowc_wr(struct cxgbi_sock *csk) 500 516 { 501 517 struct sk_buff *skb; 502 518 struct fw_flowc_wr *flowc; 503 - int flowclen, i; 519 + int nparams, flowclen16, flowclen; 504 520 505 - flowclen = 80; 521 + flowclen16 = tx_flowc_wr_credits(&nparams, &flowclen); 506 522 skb = alloc_wr(flowclen, 0, GFP_ATOMIC); 507 523 flowc = (struct fw_flowc_wr *)skb->head; 508 524 flowc->op_to_nparams = 509 - htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(8)); 525 + htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(nparams)); 510 526 flowc->flowid_len16 = 511 - htonl(FW_WR_LEN16_V(DIV_ROUND_UP(72, 16)) | 512 - FW_WR_FLOWID_V(csk->tid)); 527 + htonl(FW_WR_LEN16_V(flowclen16) | FW_WR_FLOWID_V(csk->tid)); 513 528 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 514 529 flowc->mnemval[0].val = htonl(csk->cdev->pfvf); 515 530 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; ··· 548 523 flowc->mnemval[7].val = htonl(csk->advmss); 549 524 flowc->mnemval[8].mnemonic = 0; 550 525 flowc->mnemval[8].val = 0; 551 - for (i = 0; i < 9; i++) { 552 - flowc->mnemval[i].r4[0] = 0; 553 - flowc->mnemval[i].r4[1] = 0; 554 - flowc->mnemval[i].r4[2] = 0; 555 - } 556 - set_queue(skb, CPL_PRIORITY_DATA, csk); 526 + flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX; 527 + flowc->mnemval[8].val = 16384; 528 + 529 + set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 557 530 558 531 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 559 532 "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n", ··· 560 537 csk->advmss); 561 538 562 539 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 540 + 541 + return flowclen16; 563 542 } 564 543 565 544 static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, ··· 570 545 struct fw_ofld_tx_data_wr *req; 571 546 unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3; 572 547 unsigned int wr_ulp_mode = 0, val; 548 + bool imm = is_ofld_imm(skb); 573 549 574 550 req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req)); 575 551 576 - if (is_ofld_imm(skb)) { 552 + if (imm) { 577 553 req->op_to_immdlen = htonl(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | 578 554 FW_WR_COMPL_F | 579 555 FW_WR_IMMDLEN_V(dlen)); ··· 623 597 int dlen = skb->len; 624 598 int len = skb->len; 625 599 unsigned int credits_needed; 600 + int flowclen16 = 0; 626 601 627 602 skb_reset_transport_header(skb); 628 603 if (is_ofld_imm(skb)) 629 - credits_needed = DIV_ROUND_UP(dlen + 630 - sizeof(struct fw_ofld_tx_data_wr), 16); 604 + credits_needed = DIV_ROUND_UP(dlen, 16); 631 605 else 632 - credits_needed = DIV_ROUND_UP(8*calc_tx_flits_ofld(skb) 633 - + sizeof(struct fw_ofld_tx_data_wr), 606 + credits_needed = DIV_ROUND_UP( 607 + 8 * calc_tx_flits_ofld(skb), 608 + 16); 609 + 610 + if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) 611 + credits_needed += DIV_ROUND_UP( 612 + sizeof(struct fw_ofld_tx_data_wr), 634 613 16); 614 + 615 + /* 616 + * Assumes the initial credits is large enough to support 617 + * fw_flowc_wr plus largest possible first payload 618 + */ 619 + if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { 620 + flowclen16 = send_tx_flowc_wr(csk); 621 + csk->wr_cred -= flowclen16; 622 + csk->wr_una_cred += flowclen16; 623 + cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); 624 + } 635 625 636 626 if (csk->wr_cred < credits_needed) { 637 627 log_debug(1 << CXGBI_DBG_PDU_TX, ··· 657 615 break; 658 616 } 659 617 __skb_unlink(skb, &csk->write_queue); 660 - set_queue(skb, CPL_PRIORITY_DATA, csk); 661 - skb->csum = credits_needed; 618 + set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 619 + skb->csum = credits_needed + flowclen16; 662 620 csk->wr_cred -= credits_needed; 663 621 csk->wr_una_cred += credits_needed; 664 622 cxgbi_sock_enqueue_wr(csk, skb); ··· 669 627 csk->wr_cred, csk->wr_una_cred); 670 628 671 629 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) { 672 - if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { 673 - send_tx_flowc_wr(csk); 674 - skb->csum += 5; 675 - csk->wr_cred -= 5; 676 - csk->wr_una_cred += 5; 677 - } 678 630 len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); 679 631 make_tx_data_wr(csk, skb, dlen, len, credits_needed, 680 632 req_completion); ··· 843 807 844 808 } 845 809 810 + static inline bool is_neg_adv(unsigned int status) 811 + { 812 + return status == CPL_ERR_RTX_NEG_ADVICE || 813 + status == CPL_ERR_KEEPALV_NEG_ADVICE || 814 + status == CPL_ERR_PERSIST_NEG_ADVICE; 815 + } 816 + 846 817 static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) 847 818 { 848 819 struct cxgbi_sock *csk; ··· 871 828 "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr), 872 829 atid, tid, status, csk, csk->state, csk->flags); 873 830 874 - if (status == CPL_ERR_RTX_NEG_ADVICE) 831 + if (is_neg_adv(status)) 875 832 goto rel_skb; 876 833 877 834 module_put(THIS_MODULE); ··· 977 934 (&csk->saddr), (&csk->daddr), 978 935 csk, csk->state, csk->flags, csk->tid, req->status); 979 936 980 - if (req->status == CPL_ERR_RTX_NEG_ADVICE || 981 - req->status == CPL_ERR_PERSIST_NEG_ADVICE) 937 + if (is_neg_adv(req->status)) 982 938 goto rel_skb; 983 939 984 940 cxgbi_sock_get(csk); ··· 1028 986 1029 987 cxgbi_sock_rcv_abort_rpl(csk); 1030 988 rel_skb: 989 + __kfree_skb(skb); 990 + } 991 + 992 + static void do_rx_data(struct cxgbi_device *cdev, struct sk_buff *skb) 993 + { 994 + struct cxgbi_sock *csk; 995 + struct cpl_rx_data *cpl = (struct cpl_rx_data *)skb->data; 996 + unsigned int tid = GET_TID(cpl); 997 + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 998 + struct tid_info *t = lldi->tids; 999 + 1000 + csk = lookup_tid(t, tid); 1001 + if (!csk) { 1002 + pr_err("can't find connection for tid %u.\n", tid); 1003 + } else { 1004 + /* not expecting this, reset the connection. */ 1005 + pr_err("csk 0x%p, tid %u, rcv cpl_rx_data.\n", csk, tid); 1006 + spin_lock_bh(&csk->lock); 1007 + send_abort_req(csk); 1008 + spin_unlock_bh(&csk->lock); 1009 + } 1031 1010 __kfree_skb(skb); 1032 1011 } 1033 1012 ··· 1471 1408 [CPL_SET_TCB_RPL] = do_set_tcb_rpl, 1472 1409 [CPL_RX_DATA_DDP] = do_rx_data_ddp, 1473 1410 [CPL_RX_ISCSI_DDP] = do_rx_data_ddp, 1411 + [CPL_RX_DATA] = do_rx_data, 1474 1412 }; 1475 1413 1476 1414 int cxgb4i_ofld_init(struct cxgbi_device *cdev) ··· 1549 1485 return -ENOMEM; 1550 1486 } 1551 1487 req = (struct ulp_mem_io *)skb->head; 1552 - set_queue(skb, CPL_PRIORITY_CONTROL, NULL); 1488 + set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); 1553 1489 1554 1490 ulp_mem_io_set_hdr(lldi, req, wr_len, dlen, pm_addr); 1555 1491 idata = (struct ulptx_idata *)(req + 1);
+3 -1
drivers/scsi/cxgbi/libcxgbi.c
··· 2294 2294 return err; 2295 2295 } 2296 2296 2297 - kfree_skb(skb); 2298 2297 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, 2299 2298 "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n", 2300 2299 task->itt, skb, skb->len, skb->data_len, err); 2300 + 2301 + kfree_skb(skb); 2302 + 2301 2303 iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err); 2302 2304 iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED); 2303 2305 return err;
+2 -2
drivers/scsi/cxgbi/libcxgbi.h
··· 317 317 __clear_bit(flag, &(cxgbi_skcb_flags(skb))); 318 318 } 319 319 320 - static inline int cxgbi_skcb_test_flag(struct sk_buff *skb, 321 - enum cxgbi_skcb_flags flag) 320 + static inline int cxgbi_skcb_test_flag(const struct sk_buff *skb, 321 + enum cxgbi_skcb_flags flag) 322 322 { 323 323 return test_bit(flag, &(cxgbi_skcb_flags(skb))); 324 324 }
+8
include/asm-generic/barrier.h
··· 42 42 #define wmb() mb() 43 43 #endif 44 44 45 + #ifndef dma_rmb 46 + #define dma_rmb() rmb() 47 + #endif 48 + 49 + #ifndef dma_wmb 50 + #define dma_wmb() wmb() 51 + #endif 52 + 45 53 #ifndef read_barrier_depends 46 54 #define read_barrier_depends() do { } while (0) 47 55 #endif
-6
include/linux/interrupt.h
··· 556 556 atomic_dec(&t->count); 557 557 } 558 558 559 - static inline void tasklet_hi_enable(struct tasklet_struct *t) 560 - { 561 - smp_mb__before_atomic(); 562 - atomic_dec(&t->count); 563 - } 564 - 565 559 extern void tasklet_kill(struct tasklet_struct *t); 566 560 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); 567 561 extern void tasklet_init(struct tasklet_struct *t,
+4 -3
net/8021q/vlan_dev.c
··· 579 579 (1<<__LINK_STATE_PRESENT); 580 580 581 581 dev->hw_features = NETIF_F_ALL_CSUM | NETIF_F_SG | 582 - NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | 582 + NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | 583 583 NETIF_F_HIGHDMA | NETIF_F_SCTP_CSUM | 584 584 NETIF_F_ALL_FCOE; 585 585 586 - dev->features |= real_dev->vlan_features | NETIF_F_LLTX; 586 + dev->features |= real_dev->vlan_features | NETIF_F_LLTX | 587 + NETIF_F_GSO_SOFTWARE; 587 588 dev->gso_max_size = real_dev->gso_max_size; 588 589 if (dev->features & NETIF_F_VLAN_FEATURES) 589 590 netdev_warn(real_dev, "VLAN features are set incorrectly. Q-in-Q configurations may not work correctly.\n"); ··· 649 648 features |= NETIF_F_RXCSUM; 650 649 features = netdev_intersect_features(features, real_dev->features); 651 650 652 - features |= old_features & NETIF_F_SOFT_FEATURES; 651 + features |= old_features & (NETIF_F_SOFT_FEATURES | NETIF_F_GSO_SOFTWARE); 653 652 features |= NETIF_F_LLTX; 654 653 655 654 return features;
+13 -3
net/dsa/slave.c
··· 512 512 } 513 513 514 514 /* slave device setup *******************************************************/ 515 - static void dsa_slave_phy_setup(struct dsa_slave_priv *p, 515 + static int dsa_slave_phy_setup(struct dsa_slave_priv *p, 516 516 struct net_device *slave_dev) 517 517 { 518 518 struct dsa_switch *ds = p->parent; ··· 533 533 ret = of_phy_register_fixed_link(port_dn); 534 534 if (ret) { 535 535 netdev_err(slave_dev, "failed to register fixed PHY\n"); 536 - return; 536 + return ret; 537 537 } 538 538 phy_is_fixed = true; 539 539 phy_dn = port_dn; ··· 555 555 */ 556 556 if (!p->phy) { 557 557 p->phy = ds->slave_mii_bus->phy_map[p->port]; 558 + if (!p->phy) 559 + return -ENODEV; 560 + 558 561 phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, 559 562 p->phy_interface); 560 563 } else { 561 564 netdev_info(slave_dev, "attached PHY at address %d [%s]\n", 562 565 p->phy->addr, p->phy->drv->name); 563 566 } 567 + 568 + return 0; 564 569 } 565 570 566 571 int dsa_slave_suspend(struct net_device *slave_dev) ··· 658 653 p->old_link = -1; 659 654 p->old_duplex = -1; 660 655 661 - dsa_slave_phy_setup(p, slave_dev); 656 + ret = dsa_slave_phy_setup(p, slave_dev); 657 + if (ret) { 658 + free_netdev(slave_dev); 659 + return NULL; 660 + } 662 661 663 662 ret = register_netdev(slave_dev); 664 663 if (ret) { 665 664 netdev_err(master, "error %d registering interface %s\n", 666 665 ret, slave_dev->name); 666 + phy_disconnect(p->phy); 667 667 free_netdev(slave_dev); 668 668 return NULL; 669 669 }
+2 -1
net/ipv4/fib_trie.c
··· 1143 1143 put_child(tp, cindex, (struct rt_trie_node *)tn); 1144 1144 } else { 1145 1145 rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn); 1146 - tp = tn; 1147 1146 } 1147 + 1148 + tp = tn; 1148 1149 } 1149 1150 1150 1151 if (tp && tp->pos + tp->bits > 32)