Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Davem says:

1) Fix JIT code generation on x86-64 for divide by zero, from Eric Dumazet.

2) tg3 header length computation correction from Eric Dumazet.

3) More build and reference counting fixes for socket memory cgroup
code from Glauber Costa.

4) module.h snuck back into a core header after all the hard work we
did to remove that, from Paul Gortmaker and Jesper Dangaard Brouer.

5) Fix PHY naming regression and add some new PCI IDs in stmmac, from
Alessandro Rubini.

6) Netlink message generation fix in new team driver, should only advertise
the entries that changed during events, from Jiri Pirko.

7) SRIOV VF registration and unregistration fixes, and also add a
missing PCI ID, from Roopa Prabhu.

8) Fix infinite loop in tx queue flush code of brcmsmac, from Stanislaw Gruszka.

9) ftgmac100/ftmac100 build fix, missing interrupt.h include.

10) Memory leak fix in net/hyperv do_set_mutlicast() handling, from Wei Yongjun.

11) Off by one fix in netem packet scheduler, from Vijay Subramanian.

12) TCP loss detection fix from Yuchung Cheng.

13) TCP reset packet MD5 calculation uses wrong address, fix from Shawn Lu.

14) skge carrier assertion and DMA mapping fixes from Stephen Hemminger.

15) Congestion recovery undo performed at the wrong spot in BIC and CUBIC
congestion control modules, fix from Neal Cardwell.

16) Ethtool ETHTOOL_GSSET_INFO is unnecessarily restrictive, from Michał Mirosław.

17) Fix triggerable race in ipv6 sysctl handling, from Francesco Ruggeri.

18) Statistics bug fixes in mlx4 from Eugenia Emantayev.

19) rds locking bug fix during info dumps, from your's truly.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (67 commits)
rds: Make rds_sock_lock BH rather than IRQ safe.
netprio_cgroup.h: dont include module.h from other includes
net: flow_dissector.c missing include linux/export.h
team: send only changed options/ports via netlink
net/hyperv: fix possible memory leak in do_set_multicast()
drivers/net: dsa/mv88e6xxx.c files need linux/module.h
stmmac: added PCI identifiers
llc: Fix race condition in llc_ui_recvmsg
stmmac: fix phy naming inconsistency
dsa: Add reporting of silicon revision for Marvell 88E6123/88E6161/88E6165 switches.
tg3: fix ipv6 header length computation
skge: add byte queue limit support
mv643xx_eth: Add Rx Discard and Rx Overrun statistics
bnx2x: fix compilation error with SOE in fw_dump
bnx2x: handle CHIP_REVISION during init_one
bnx2x: allow user to change ring size in ISCSI SD mode
bnx2x: fix Big-Endianess in ethtool -t
bnx2x: fixed ethtool statistics for MF modes
bnx2x: credit-leakage fixup on vlan_mac_del_all
macvlan: fix a possible use after free
...

+909 -517
+22 -14
arch/x86/net/bpf_jit_comp.c
··· 151 151 cleanup_addr = proglen; /* epilogue address */ 152 152 153 153 for (pass = 0; pass < 10; pass++) { 154 + u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen; 154 155 /* no prologue/epilogue for trivial filters (RET something) */ 155 156 proglen = 0; 156 157 prog = temp; 157 158 158 - if (seen) { 159 + if (seen_or_pass0) { 159 160 EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */ 160 161 EMIT4(0x48, 0x83, 0xec, 96); /* subq $96,%rsp */ 161 162 /* note : must save %rbx in case bpf_error is hit */ 162 - if (seen & (SEEN_XREG | SEEN_DATAREF)) 163 + if (seen_or_pass0 & (SEEN_XREG | SEEN_DATAREF)) 163 164 EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */ 164 - if (seen & SEEN_XREG) 165 + if (seen_or_pass0 & SEEN_XREG) 165 166 CLEAR_X(); /* make sure we dont leek kernel memory */ 166 167 167 168 /* ··· 171 170 * r9 = skb->len - skb->data_len 172 171 * r8 = skb->data 173 172 */ 174 - if (seen & SEEN_DATAREF) { 173 + if (seen_or_pass0 & SEEN_DATAREF) { 175 174 if (offsetof(struct sk_buff, len) <= 127) 176 175 /* mov off8(%rdi),%r9d */ 177 176 EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len)); ··· 261 260 case BPF_S_ALU_DIV_X: /* A /= X; */ 262 261 seen |= SEEN_XREG; 263 262 EMIT2(0x85, 0xdb); /* test %ebx,%ebx */ 264 - if (pc_ret0 != -1) 265 - EMIT_COND_JMP(X86_JE, addrs[pc_ret0] - (addrs[i] - 4)); 266 - else { 263 + if (pc_ret0 > 0) { 264 + /* addrs[pc_ret0 - 1] is start address of target 265 + * (addrs[i] - 4) is the address following this jmp 266 + * ("xor %edx,%edx; div %ebx" being 4 bytes long) 267 + */ 268 + EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] - 269 + (addrs[i] - 4)); 270 + } else { 267 271 EMIT_COND_JMP(X86_JNE, 2 + 5); 268 272 CLEAR_A(); 269 273 EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */ ··· 341 335 } 342 336 /* fallinto */ 343 337 case BPF_S_RET_A: 344 - if (seen) { 338 + if (seen_or_pass0) { 345 339 if (i != flen - 1) { 346 340 EMIT_JMP(cleanup_addr - addrs[i]); 347 341 break; 348 342 } 349 - if (seen & SEEN_XREG) 343 + if (seen_or_pass0 & SEEN_XREG) 350 344 EMIT4(0x48, 0x8b, 0x5d, 0xf8); /* mov -8(%rbp),%rbx */ 351 345 EMIT1(0xc9); /* leaveq */ 352 346 } ··· 489 483 goto common_load; 490 484 case BPF_S_LDX_B_MSH: 491 485 if ((int)K < 0) { 492 - if (pc_ret0 != -1) { 493 - EMIT_JMP(addrs[pc_ret0] - addrs[i]); 486 + if (pc_ret0 > 0) { 487 + /* addrs[pc_ret0 - 1] is the start address */ 488 + EMIT_JMP(addrs[pc_ret0 - 1] - addrs[i]); 494 489 break; 495 490 } 496 491 CLEAR_A(); ··· 606 599 * use it to give the cleanup instruction(s) addr 607 600 */ 608 601 cleanup_addr = proglen - 1; /* ret */ 609 - if (seen) 602 + if (seen_or_pass0) 610 603 cleanup_addr -= 1; /* leaveq */ 611 - if (seen & SEEN_XREG) 604 + if (seen_or_pass0 & SEEN_XREG) 612 605 cleanup_addr -= 4; /* mov -8(%rbp),%rbx */ 613 606 614 607 if (image) { 615 - WARN_ON(proglen != oldproglen); 608 + if (proglen != oldproglen) 609 + pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen); 616 610 break; 617 611 } 618 612 if (proglen == oldproglen) {
+9 -18
drivers/net/bonding/bond_alb.c
··· 909 909 } 910 910 } 911 911 912 - /* hw is a boolean parameter that determines whether we should try and 913 - * set the hw address of the device as well as the hw address of the 914 - * net_device 915 - */ 916 - static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[], int hw) 912 + static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[]) 917 913 { 918 914 struct net_device *dev = slave->dev; 919 915 struct sockaddr s_addr; 920 916 921 - if (!hw) { 917 + if (slave->bond->params.mode == BOND_MODE_TLB) { 922 918 memcpy(dev->dev_addr, addr, dev->addr_len); 923 919 return 0; 924 920 } ··· 944 948 u8 tmp_mac_addr[ETH_ALEN]; 945 949 946 950 memcpy(tmp_mac_addr, slave1->dev->dev_addr, ETH_ALEN); 947 - alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr, bond->alb_info.rlb_enabled); 948 - alb_set_slave_mac_addr(slave2, tmp_mac_addr, bond->alb_info.rlb_enabled); 951 + alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr); 952 + alb_set_slave_mac_addr(slave2, tmp_mac_addr); 949 953 950 954 } 951 955 ··· 1092 1096 1093 1097 /* Try setting slave mac to bond address and fall-through 1094 1098 to code handling that situation below... */ 1095 - alb_set_slave_mac_addr(slave, bond->dev->dev_addr, 1096 - bond->alb_info.rlb_enabled); 1099 + alb_set_slave_mac_addr(slave, bond->dev->dev_addr); 1097 1100 } 1098 1101 1099 1102 /* The slave's address is equal to the address of the bond. ··· 1128 1133 } 1129 1134 1130 1135 if (free_mac_slave) { 1131 - alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr, 1132 - bond->alb_info.rlb_enabled); 1136 + alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr); 1133 1137 1134 1138 pr_warning("%s: Warning: the hw address of slave %s is in use by the bond; giving it the hw address of %s\n", 1135 1139 bond->dev->name, slave->dev->name, ··· 1485 1491 { 1486 1492 int res; 1487 1493 1488 - res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr, 1489 - bond->alb_info.rlb_enabled); 1494 + res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr); 1490 1495 if (res) { 1491 1496 return res; 1492 1497 } ··· 1636 1643 alb_swap_mac_addr(bond, swap_slave, new_slave); 1637 1644 } else { 1638 1645 /* set the new_slave to the bond mac address */ 1639 - alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr, 1640 - bond->alb_info.rlb_enabled); 1646 + alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr); 1641 1647 } 1642 1648 1643 1649 if (swap_slave) { ··· 1696 1704 alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave); 1697 1705 alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave); 1698 1706 } else { 1699 - alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr, 1700 - bond->alb_info.rlb_enabled); 1707 + alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr); 1701 1708 1702 1709 read_lock(&bond->lock); 1703 1710 alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr);
+1
drivers/net/dsa/mv88e6060.c
··· 9 9 */ 10 10 11 11 #include <linux/list.h> 12 + #include <linux/module.h> 12 13 #include <linux/netdevice.h> 13 14 #include <linux/phy.h> 14 15 #include <net/dsa.h>
+18 -4
drivers/net/dsa/mv88e6123_61_65.c
··· 9 9 */ 10 10 11 11 #include <linux/list.h> 12 + #include <linux/module.h> 12 13 #include <linux/netdevice.h> 13 14 #include <linux/phy.h> 14 15 #include <net/dsa.h> ··· 21 20 22 21 ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); 23 22 if (ret >= 0) { 24 - ret &= 0xfff0; 25 - if (ret == 0x1210) 23 + if (ret == 0x1212) 24 + return "Marvell 88E6123 (A1)"; 25 + if (ret == 0x1213) 26 + return "Marvell 88E6123 (A2)"; 27 + if ((ret & 0xfff0) == 0x1210) 26 28 return "Marvell 88E6123"; 27 - if (ret == 0x1610) 29 + 30 + if (ret == 0x1612) 31 + return "Marvell 88E6161 (A1)"; 32 + if (ret == 0x1613) 33 + return "Marvell 88E6161 (A2)"; 34 + if ((ret & 0xfff0) == 0x1610) 28 35 return "Marvell 88E6161"; 29 - if (ret == 0x1650) 36 + 37 + if (ret == 0x1652) 38 + return "Marvell 88E6165 (A1)"; 39 + if (ret == 0x1653) 40 + return "Marvell 88e6165 (A2)"; 41 + if ((ret & 0xfff0) == 0x1650) 30 42 return "Marvell 88E6165"; 31 43 } 32 44
+1
drivers/net/dsa/mv88e6131.c
··· 9 9 */ 10 10 11 11 #include <linux/list.h> 12 + #include <linux/module.h> 12 13 #include <linux/netdevice.h> 13 14 #include <linux/phy.h> 14 15 #include <net/dsa.h>
+1
drivers/net/dsa/mv88e6xxx.c
··· 9 9 */ 10 10 11 11 #include <linux/list.h> 12 + #include <linux/module.h> 12 13 #include <linux/netdevice.h> 13 14 #include <linux/phy.h> 14 15 #include <net/dsa.h>
+1 -1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
··· 3117 3117 int rx_ring_size = 0; 3118 3118 3119 3119 #ifdef BCM_CNIC 3120 - if (IS_MF_ISCSI_SD(bp)) { 3120 + if (!bp->rx_ring_size && IS_MF_ISCSI_SD(bp)) { 3121 3121 rx_ring_size = MIN_RX_SIZE_NONTPA; 3122 3122 bp->rx_ring_size = rx_ring_size; 3123 3123 } else
+44 -69
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
··· 1738 1738 struct bnx2x_fp_txdata *txdata = &fp_tx->txdata[0]; 1739 1739 u16 tx_start_idx, tx_idx; 1740 1740 u16 rx_start_idx, rx_idx; 1741 - u16 pkt_prod, bd_prod, rx_comp_cons; 1741 + u16 pkt_prod, bd_prod; 1742 1742 struct sw_tx_bd *tx_buf; 1743 1743 struct eth_tx_start_bd *tx_start_bd; 1744 1744 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; ··· 1873 1873 if (rx_idx != rx_start_idx + num_pkts) 1874 1874 goto test_loopback_exit; 1875 1875 1876 - rx_comp_cons = le16_to_cpu(fp_rx->rx_comp_cons); 1877 - cqe = &fp_rx->rx_comp_ring[RCQ_BD(rx_comp_cons)]; 1876 + cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)]; 1878 1877 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; 1879 1878 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; 1880 1879 if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS)) ··· 2120 2121 case ETH_SS_STATS: 2121 2122 if (is_multi(bp)) { 2122 2123 num_stats = bnx2x_num_stat_queues(bp) * 2123 - BNX2X_NUM_Q_STATS; 2124 - if (!IS_MF_MODE_STAT(bp)) 2125 - num_stats += BNX2X_NUM_STATS; 2126 - } else { 2127 - if (IS_MF_MODE_STAT(bp)) { 2128 - num_stats = 0; 2129 - for (i = 0; i < BNX2X_NUM_STATS; i++) 2130 - if (IS_FUNC_STAT(i)) 2131 - num_stats++; 2132 - } else 2133 - num_stats = BNX2X_NUM_STATS; 2134 - } 2124 + BNX2X_NUM_Q_STATS; 2125 + } else 2126 + num_stats = 0; 2127 + if (IS_MF_MODE_STAT(bp)) { 2128 + for (i = 0; i < BNX2X_NUM_STATS; i++) 2129 + if (IS_FUNC_STAT(i)) 2130 + num_stats++; 2131 + } else 2132 + num_stats += BNX2X_NUM_STATS; 2133 + 2135 2134 return num_stats; 2136 2135 2137 2136 case ETH_SS_TEST: ··· 2148 2151 2149 2152 switch (stringset) { 2150 2153 case ETH_SS_STATS: 2154 + k = 0; 2151 2155 if (is_multi(bp)) { 2152 - k = 0; 2153 2156 for_each_eth_queue(bp, i) { 2154 2157 memset(queue_name, 0, sizeof(queue_name)); 2155 2158 sprintf(queue_name, "%d", i); ··· 2160 2163 queue_name); 2161 2164 k += BNX2X_NUM_Q_STATS; 2162 2165 } 2163 - if (IS_MF_MODE_STAT(bp)) 2164 - break; 2165 - for (j = 0; j < BNX2X_NUM_STATS; j++) 2166 - strcpy(buf + (k + j)*ETH_GSTRING_LEN, 2167 - bnx2x_stats_arr[j].string); 2168 - } else { 2169 - for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { 2170 - if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i)) 2171 - continue; 2172 - strcpy(buf + j*ETH_GSTRING_LEN, 2173 - bnx2x_stats_arr[i].string); 2174 - j++; 2175 - } 2176 2166 } 2167 + 2168 + 2169 + for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { 2170 + if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i)) 2171 + continue; 2172 + strcpy(buf + (k + j)*ETH_GSTRING_LEN, 2173 + bnx2x_stats_arr[i].string); 2174 + j++; 2175 + } 2176 + 2177 2177 break; 2178 2178 2179 2179 case ETH_SS_TEST: ··· 2184 2190 { 2185 2191 struct bnx2x *bp = netdev_priv(dev); 2186 2192 u32 *hw_stats, *offset; 2187 - int i, j, k; 2193 + int i, j, k = 0; 2188 2194 2189 2195 if (is_multi(bp)) { 2190 - k = 0; 2191 2196 for_each_eth_queue(bp, i) { 2192 2197 hw_stats = (u32 *)&bp->fp[i].eth_q_stats; 2193 2198 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { ··· 2207 2214 } 2208 2215 k += BNX2X_NUM_Q_STATS; 2209 2216 } 2210 - if (IS_MF_MODE_STAT(bp)) 2211 - return; 2212 - hw_stats = (u32 *)&bp->eth_stats; 2213 - for (j = 0; j < BNX2X_NUM_STATS; j++) { 2214 - if (bnx2x_stats_arr[j].size == 0) { 2215 - /* skip this counter */ 2216 - buf[k + j] = 0; 2217 - continue; 2218 - } 2219 - offset = (hw_stats + bnx2x_stats_arr[j].offset); 2220 - if (bnx2x_stats_arr[j].size == 4) { 2221 - /* 4-byte counter */ 2222 - buf[k + j] = (u64) *offset; 2223 - continue; 2224 - } 2225 - /* 8-byte counter */ 2226 - buf[k + j] = HILO_U64(*offset, *(offset + 1)); 2227 - } 2228 - } else { 2229 - hw_stats = (u32 *)&bp->eth_stats; 2230 - for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { 2231 - if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i)) 2232 - continue; 2233 - if (bnx2x_stats_arr[i].size == 0) { 2234 - /* skip this counter */ 2235 - buf[j] = 0; 2236 - j++; 2237 - continue; 2238 - } 2239 - offset = (hw_stats + bnx2x_stats_arr[i].offset); 2240 - if (bnx2x_stats_arr[i].size == 4) { 2241 - /* 4-byte counter */ 2242 - buf[j] = (u64) *offset; 2243 - j++; 2244 - continue; 2245 - } 2246 - /* 8-byte counter */ 2247 - buf[j] = HILO_U64(*offset, *(offset + 1)); 2217 + } 2218 + 2219 + hw_stats = (u32 *)&bp->eth_stats; 2220 + for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { 2221 + if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i)) 2222 + continue; 2223 + if (bnx2x_stats_arr[i].size == 0) { 2224 + /* skip this counter */ 2225 + buf[k + j] = 0; 2248 2226 j++; 2227 + continue; 2249 2228 } 2229 + offset = (hw_stats + bnx2x_stats_arr[i].offset); 2230 + if (bnx2x_stats_arr[i].size == 4) { 2231 + /* 4-byte counter */ 2232 + buf[k + j] = (u64) *offset; 2233 + j++; 2234 + continue; 2235 + } 2236 + /* 8-byte counter */ 2237 + buf[k + j] = HILO_U64(*offset, *(offset + 1)); 2238 + j++; 2250 2239 } 2251 2240 } 2252 2241
+6 -5
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
··· 941 941 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j]; 942 942 943 943 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n", 944 - i, j, rx_bd[1], rx_bd[0], sw_bd->skb); 944 + i, j, rx_bd[1], rx_bd[0], sw_bd->data); 945 945 } 946 946 947 947 start = RX_SGE(fp->rx_sge_prod); ··· 10536 10536 { 10537 10537 struct bnx2x *bp; 10538 10538 int rc; 10539 + bool chip_is_e1x = (board_type == BCM57710 || 10540 + board_type == BCM57711 || 10541 + board_type == BCM57711E); 10539 10542 10540 10543 SET_NETDEV_DEV(dev, &pdev->dev); 10541 10544 bp = netdev_priv(dev); ··· 10627 10624 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); 10628 10625 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); 10629 10626 10630 - if (CHIP_IS_E1x(bp)) { 10627 + if (chip_is_e1x) { 10631 10628 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); 10632 10629 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); 10633 10630 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); ··· 10638 10635 * Enable internal target-read (in case we are probed after PF FLR). 10639 10636 * Must be done prior to any BAR read access. Only for 57712 and up 10640 10637 */ 10641 - if (board_type != BCM57710 && 10642 - board_type != BCM57711 && 10643 - board_type != BCM57711E) 10638 + if (!chip_is_e1x) 10644 10639 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 10645 10640 10646 10641 /* Reset the load counter */
+43 -1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
··· 50 50 int exe_len, 51 51 union bnx2x_qable_obj *owner, 52 52 exe_q_validate validate, 53 + exe_q_remove remove, 53 54 exe_q_optimize optimize, 54 55 exe_q_execute exec, 55 56 exe_q_get get) ··· 67 66 68 67 /* Owner specific callbacks */ 69 68 o->validate = validate; 69 + o->remove = remove; 70 70 o->optimize = optimize; 71 71 o->execute = exec; 72 72 o->get = get; ··· 1342 1340 } 1343 1341 } 1344 1342 1343 + static int bnx2x_remove_vlan_mac(struct bnx2x *bp, 1344 + union bnx2x_qable_obj *qo, 1345 + struct bnx2x_exeq_elem *elem) 1346 + { 1347 + int rc = 0; 1348 + 1349 + /* If consumption wasn't required, nothing to do */ 1350 + if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 1351 + &elem->cmd_data.vlan_mac.vlan_mac_flags)) 1352 + return 0; 1353 + 1354 + switch (elem->cmd_data.vlan_mac.cmd) { 1355 + case BNX2X_VLAN_MAC_ADD: 1356 + case BNX2X_VLAN_MAC_MOVE: 1357 + rc = qo->vlan_mac.put_credit(&qo->vlan_mac); 1358 + break; 1359 + case BNX2X_VLAN_MAC_DEL: 1360 + rc = qo->vlan_mac.get_credit(&qo->vlan_mac); 1361 + break; 1362 + default: 1363 + return -EINVAL; 1364 + } 1365 + 1366 + if (rc != true) 1367 + return -EINVAL; 1368 + 1369 + return 0; 1370 + } 1371 + 1345 1372 /** 1346 1373 * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes. 1347 1374 * ··· 1832 1801 1833 1802 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) { 1834 1803 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags == 1835 - *vlan_mac_flags) 1804 + *vlan_mac_flags) { 1805 + rc = exeq->remove(bp, exeq->owner, exeq_pos); 1806 + if (rc) { 1807 + BNX2X_ERR("Failed to remove command\n"); 1808 + return rc; 1809 + } 1836 1810 list_del(&exeq_pos->link); 1811 + } 1837 1812 } 1838 1813 1839 1814 spin_unlock_bh(&exeq->lock); ··· 1945 1908 bnx2x_exe_queue_init(bp, 1946 1909 &mac_obj->exe_queue, 1, qable_obj, 1947 1910 bnx2x_validate_vlan_mac, 1911 + bnx2x_remove_vlan_mac, 1948 1912 bnx2x_optimize_vlan_mac, 1949 1913 bnx2x_execute_vlan_mac, 1950 1914 bnx2x_exeq_get_mac); ··· 1962 1924 bnx2x_exe_queue_init(bp, 1963 1925 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT, 1964 1926 qable_obj, bnx2x_validate_vlan_mac, 1927 + bnx2x_remove_vlan_mac, 1965 1928 bnx2x_optimize_vlan_mac, 1966 1929 bnx2x_execute_vlan_mac, 1967 1930 bnx2x_exeq_get_mac); ··· 2002 1963 bnx2x_exe_queue_init(bp, 2003 1964 &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT, 2004 1965 qable_obj, bnx2x_validate_vlan_mac, 1966 + bnx2x_remove_vlan_mac, 2005 1967 bnx2x_optimize_vlan_mac, 2006 1968 bnx2x_execute_vlan_mac, 2007 1969 bnx2x_exeq_get_vlan); ··· 2049 2009 bnx2x_exe_queue_init(bp, 2050 2010 &vlan_mac_obj->exe_queue, 1, qable_obj, 2051 2011 bnx2x_validate_vlan_mac, 2012 + bnx2x_remove_vlan_mac, 2052 2013 bnx2x_optimize_vlan_mac, 2053 2014 bnx2x_execute_vlan_mac, 2054 2015 bnx2x_exeq_get_vlan_mac); ··· 2066 2025 &vlan_mac_obj->exe_queue, 2067 2026 CLASSIFY_RULES_COUNT, 2068 2027 qable_obj, bnx2x_validate_vlan_mac, 2028 + bnx2x_remove_vlan_mac, 2069 2029 bnx2x_optimize_vlan_mac, 2070 2030 bnx2x_execute_vlan_mac, 2071 2031 bnx2x_exeq_get_vlan_mac);
+11
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
··· 161 161 union bnx2x_qable_obj *o, 162 162 struct bnx2x_exeq_elem *elem); 163 163 164 + typedef int (*exe_q_remove)(struct bnx2x *bp, 165 + union bnx2x_qable_obj *o, 166 + struct bnx2x_exeq_elem *elem); 167 + 164 168 /** 165 169 * @return positive is entry was optimized, 0 - if not, negative 166 170 * in case of an error. ··· 207 203 */ 208 204 exe_q_validate validate; 209 205 206 + /** 207 + * Called before removing pending commands, cleaning allocated 208 + * resources (e.g., credits from validate) 209 + */ 210 + exe_q_remove remove; 210 211 211 212 /** 212 213 * This will try to cancel the current pending commands list 213 214 * considering the new command. 215 + * 216 + * Returns the number of optimized commands or a negative error code 214 217 * 215 218 * Must run under exe_queue->lock 216 219 */
+2 -7
drivers/net/ethernet/broadcom/tg3.c
··· 6667 6667 iph = ip_hdr(skb); 6668 6668 tcp_opt_len = tcp_optlen(skb); 6669 6669 6670 - if (skb_is_gso_v6(skb)) { 6671 - hdr_len = skb_headlen(skb) - ETH_HLEN; 6672 - } else { 6673 - u32 ip_tcp_len; 6670 + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; 6674 6671 6675 - ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); 6676 - hdr_len = ip_tcp_len + tcp_opt_len; 6677 - 6672 + if (!skb_is_gso_v6(skb)) { 6678 6673 iph->check = 0; 6679 6674 iph->tot_len = htons(mss + hdr_len); 6680 6675 }
+1 -1
drivers/net/ethernet/cisco/enic/enic.h
··· 32 32 33 33 #define DRV_NAME "enic" 34 34 #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" 35 - #define DRV_VERSION "2.1.1.28" 35 + #define DRV_VERSION "2.1.1.31" 36 36 #define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc" 37 37 38 38 #define ENIC_BARS_MAX 6
+22 -16
drivers/net/ethernet/cisco/enic/enic_main.c
··· 57 57 58 58 #define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */ 59 59 #define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */ 60 + #define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */ 60 61 61 62 /* Supported devices */ 62 63 static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = { 63 64 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) }, 64 65 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) }, 66 + { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) }, 65 67 { 0, } /* end of table */ 66 68 }; 67 69 ··· 132 130 int enic_sriov_enabled(struct enic *enic) 133 131 { 134 132 return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0; 133 + } 134 + 135 + static int enic_is_sriov_vf(struct enic *enic) 136 + { 137 + return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF; 135 138 } 136 139 137 140 int enic_is_valid_vf(struct enic *enic, int vf) ··· 444 437 445 438 if (mtu && mtu != enic->port_mtu) { 446 439 enic->port_mtu = mtu; 447 - if (enic_is_dynamic(enic)) { 440 + if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) { 448 441 mtu = max_t(int, ENIC_MIN_MTU, 449 442 min_t(int, ENIC_MAX_MTU, mtu)); 450 443 if (mtu != netdev->mtu) ··· 856 849 { 857 850 struct enic *enic = netdev_priv(netdev); 858 851 859 - if (enic_is_dynamic(enic)) { 852 + if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) { 860 853 if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr)) 861 854 return -EADDRNOTAVAIL; 862 855 } else { ··· 1615 1608 for (i = 0; i < enic->rq_count; i++) 1616 1609 vnic_rq_enable(&enic->rq[i]); 1617 1610 1618 - if (!enic_is_dynamic(enic)) 1611 + if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) 1619 1612 enic_dev_add_station_addr(enic); 1620 1613 1621 1614 enic_set_rx_mode(netdev); ··· 1666 1659 netif_carrier_off(netdev); 1667 1660 netif_tx_disable(netdev); 1668 1661 1669 - if (!enic_is_dynamic(enic)) 1662 + if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) 1670 1663 enic_dev_del_station_addr(enic); 1671 1664 1672 1665 for (i = 0; i < enic->wq_count; i++) { ··· 1703 1696 if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU) 1704 1697 return -EINVAL; 1705 1698 1706 - if (enic_is_dynamic(enic)) 1699 + if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) 1707 1700 return -EOPNOTSUPP; 1708 1701 1709 1702 if (running) ··· 2270 2263 int using_dac = 0; 2271 2264 unsigned int i; 2272 2265 int err; 2273 - int num_pps = 1; 2274 2266 #ifdef CONFIG_PCI_IOV 2275 2267 int pos = 0; 2276 2268 #endif 2269 + int num_pps = 1; 2277 2270 2278 2271 /* Allocate net device structure and initialize. Private 2279 2272 * instance data is initialized to zero. ··· 2383 2376 num_pps = enic->num_vfs; 2384 2377 } 2385 2378 } 2386 - 2387 2379 #endif 2380 + 2388 2381 /* Allocate structure for port profiles */ 2389 2382 enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL); 2390 2383 if (!enic->pp) { 2391 2384 pr_err("port profile alloc failed, aborting\n"); 2392 2385 err = -ENOMEM; 2393 - goto err_out_disable_sriov; 2386 + goto err_out_disable_sriov_pp; 2394 2387 } 2395 2388 2396 2389 /* Issue device open to get device in known state ··· 2399 2392 err = enic_dev_open(enic); 2400 2393 if (err) { 2401 2394 dev_err(dev, "vNIC dev open failed, aborting\n"); 2402 - goto err_out_free_pp; 2395 + goto err_out_disable_sriov; 2403 2396 } 2404 2397 2405 2398 /* Setup devcmd lock ··· 2433 2426 * called later by an upper layer. 2434 2427 */ 2435 2428 2436 - if (!enic_is_dynamic(enic)) { 2429 + if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) { 2437 2430 err = vnic_dev_init(enic->vdev, 0); 2438 2431 if (err) { 2439 2432 dev_err(dev, "vNIC dev init failed, aborting\n"); ··· 2467 2460 (void)enic_change_mtu(netdev, enic->port_mtu); 2468 2461 2469 2462 #ifdef CONFIG_PCI_IOV 2470 - if (enic_is_dynamic(enic) && pdev->is_virtfn && 2471 - is_zero_ether_addr(enic->mac_addr)) 2463 + if (enic_is_sriov_vf(enic) && is_zero_ether_addr(enic->mac_addr)) 2472 2464 random_ether_addr(enic->mac_addr); 2473 2465 #endif 2474 2466 ··· 2480 2474 enic->tx_coalesce_usecs = enic->config.intr_timer_usec; 2481 2475 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs; 2482 2476 2483 - if (enic_is_dynamic(enic)) 2477 + if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) 2484 2478 netdev->netdev_ops = &enic_netdev_dynamic_ops; 2485 2479 else 2486 2480 netdev->netdev_ops = &enic_netdev_ops; ··· 2522 2516 enic_dev_deinit(enic); 2523 2517 err_out_dev_close: 2524 2518 vnic_dev_close(enic->vdev); 2525 - err_out_free_pp: 2526 - kfree(enic->pp); 2527 2519 err_out_disable_sriov: 2520 + kfree(enic->pp); 2521 + err_out_disable_sriov_pp: 2528 2522 #ifdef CONFIG_PCI_IOV 2529 2523 if (enic_sriov_enabled(enic)) { 2530 2524 pci_disable_sriov(pdev); 2531 2525 enic->priv_flags &= ~ENIC_SRIOV_ENABLED; 2532 2526 } 2533 2527 err_out_vnic_unregister: 2534 - vnic_dev_unregister(enic->vdev); 2535 2528 #endif 2529 + vnic_dev_unregister(enic->vdev); 2536 2530 err_out_iounmap: 2537 2531 enic_iounmap(enic); 2538 2532 err_out_release_regions:
+1 -2
drivers/net/ethernet/emulex/benet/be_main.c
··· 1786 1786 static u32 be_num_rxqs_want(struct be_adapter *adapter) 1787 1787 { 1788 1788 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) && 1789 - !sriov_enabled(adapter) && be_physfn(adapter) && 1790 - !be_is_mc(adapter)) { 1789 + !sriov_enabled(adapter) && be_physfn(adapter)) { 1791 1790 return 1 + MAX_RSS_QS; /* one default non-RSS queue */ 1792 1791 } else { 1793 1792 dev_warn(&adapter->pdev->dev,
+1
drivers/net/ethernet/faraday/ftgmac100.c
··· 25 25 #include <linux/etherdevice.h> 26 26 #include <linux/ethtool.h> 27 27 #include <linux/init.h> 28 + #include <linux/interrupt.h> 28 29 #include <linux/io.h> 29 30 #include <linux/module.h> 30 31 #include <linux/netdevice.h>
+1
drivers/net/ethernet/faraday/ftmac100.c
··· 25 25 #include <linux/etherdevice.h> 26 26 #include <linux/ethtool.h> 27 27 #include <linux/init.h> 28 + #include <linux/interrupt.h> 28 29 #include <linux/io.h> 29 30 #include <linux/mii.h> 30 31 #include <linux/module.h>
+1 -1
drivers/net/ethernet/intel/igb/Makefile
··· 1 1 ################################################################################ 2 2 # 3 3 # Intel 82575 PCI-Express Ethernet Linux driver 4 - # Copyright(c) 1999 - 2011 Intel Corporation. 4 + # Copyright(c) 1999 - 2012 Intel Corporation. 5 5 # 6 6 # This program is free software; you can redistribute it and/or modify it 7 7 # under the terms and conditions of the GNU General Public License,
+1 -1
drivers/net/ethernet/intel/igb/e1000_82575.c
··· 1 1 /******************************************************************************* 2 2 3 3 Intel(R) Gigabit Ethernet Linux driver 4 - Copyright(c) 2007-2011 Intel Corporation. 4 + Copyright(c) 2007-2012 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License,
+1 -1
drivers/net/ethernet/intel/igb/e1000_82575.h
··· 1 1 /******************************************************************************* 2 2 3 3 Intel(R) Gigabit Ethernet Linux driver 4 - Copyright(c) 2007-2011 Intel Corporation. 4 + Copyright(c) 2007-2012 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License,
+1 -1
drivers/net/ethernet/intel/igb/e1000_defines.h
··· 1 1 /******************************************************************************* 2 2 3 3 Intel(R) Gigabit Ethernet Linux driver 4 - Copyright(c) 2007-2011 Intel Corporation. 4 + Copyright(c) 2007-2012 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License,
+1 -1
drivers/net/ethernet/intel/igb/e1000_hw.h
··· 1 1 /******************************************************************************* 2 2 3 3 Intel(R) Gigabit Ethernet Linux driver 4 - Copyright(c) 2007-2011 Intel Corporation. 4 + Copyright(c) 2007-2012 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License,
+2 -2
drivers/net/ethernet/intel/igb/e1000_mac.c
··· 1 1 /******************************************************************************* 2 2 3 3 Intel(R) Gigabit Ethernet Linux driver 4 - Copyright(c) 2007-2011 Intel Corporation. 4 + Copyright(c) 2007-2012 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License, ··· 151 151 * Writes value at the given offset in the register array which stores 152 152 * the VLAN filter table. 153 153 **/ 154 - void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) 154 + static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) 155 155 { 156 156 int i; 157 157
+1 -1
drivers/net/ethernet/intel/igb/e1000_mac.h
··· 1 1 /******************************************************************************* 2 2 3 3 Intel(R) Gigabit Ethernet Linux driver 4 - Copyright(c) 2007-2011 Intel Corporation. 4 + Copyright(c) 2007-2012 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License,
+1 -1
drivers/net/ethernet/intel/igb/e1000_mbx.c
··· 1 1 /******************************************************************************* 2 2 3 3 Intel(R) Gigabit Ethernet Linux driver 4 - Copyright(c) 2007-2011 Intel Corporation. 4 + Copyright(c) 2007-2012 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License,
+1 -1
drivers/net/ethernet/intel/igb/e1000_mbx.h
··· 1 1 /******************************************************************************* 2 2 3 3 Intel(R) Gigabit Ethernet Linux driver 4 - Copyright(c) 2007-2011 Intel Corporation. 4 + Copyright(c) 2007-2012 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License,
+1 -1
drivers/net/ethernet/intel/igb/e1000_nvm.c
··· 1 1 /******************************************************************************* 2 2 3 3 Intel(R) Gigabit Ethernet Linux driver 4 - Copyright(c) 2007-2011 Intel Corporation. 4 + Copyright(c) 2007-2012 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License,
+1 -1
drivers/net/ethernet/intel/igb/e1000_nvm.h
··· 1 1 /******************************************************************************* 2 2 3 3 Intel(R) Gigabit Ethernet Linux driver 4 - Copyright(c) 2011 Intel Corporation. 4 + Copyright(c) 2012 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License,
+1 -1
drivers/net/ethernet/intel/igb/e1000_phy.c
··· 1 1 /******************************************************************************* 2 2 3 3 Intel(R) Gigabit Ethernet Linux driver 4 - Copyright(c) 2007-2011 Intel Corporation. 4 + Copyright(c) 2007-2012 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License,
+1 -1
drivers/net/ethernet/intel/igb/e1000_phy.h
··· 1 1 /******************************************************************************* 2 2 3 3 Intel(R) Gigabit Ethernet Linux driver 4 - Copyright(c) 2007-2011 Intel Corporation. 4 + Copyright(c) 2007-2012 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License,
+1 -1
drivers/net/ethernet/intel/igb/e1000_regs.h
··· 1 1 /******************************************************************************* 2 2 3 3 Intel(R) Gigabit Ethernet Linux driver 4 - Copyright(c) 2007-2011 Intel Corporation. 4 + Copyright(c) 2007-2012 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License,
+1 -1
drivers/net/ethernet/intel/igb/igb.h
··· 1 1 /******************************************************************************* 2 2 3 3 Intel(R) Gigabit Ethernet Linux driver 4 - Copyright(c) 2007-2011 Intel Corporation. 4 + Copyright(c) 2007-2012 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License,
+1 -1
drivers/net/ethernet/intel/igb/igb_ethtool.c
··· 1 1 /******************************************************************************* 2 2 3 3 Intel(R) Gigabit Ethernet Linux driver 4 - Copyright(c) 2007-2011 Intel Corporation. 4 + Copyright(c) 2007-2012 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License,
+5 -5
drivers/net/ethernet/intel/igb/igb_main.c
··· 1 1 /******************************************************************************* 2 2 3 3 Intel(R) Gigabit Ethernet Linux driver 4 - Copyright(c) 2007-2011 Intel Corporation. 4 + Copyright(c) 2007-2012 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License, ··· 68 68 char igb_driver_version[] = DRV_VERSION; 69 69 static const char igb_driver_string[] = 70 70 "Intel(R) Gigabit Ethernet Network Driver"; 71 - static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation."; 71 + static const char igb_copyright[] = "Copyright (c) 2007-2012 Intel Corporation."; 72 72 73 73 static const struct e1000_info *igb_info_tbl[] = { 74 74 [board_82575] = &e1000_82575_info, ··· 4003 4003 } 4004 4004 } 4005 4005 4006 - void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens, 4007 - u32 type_tucmd, u32 mss_l4len_idx) 4006 + static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens, 4007 + u32 type_tucmd, u32 mss_l4len_idx) 4008 4008 { 4009 4009 struct e1000_adv_tx_context_desc *context_desc; 4010 4010 u16 i = tx_ring->next_to_use; ··· 5623 5623 return IRQ_HANDLED; 5624 5624 } 5625 5625 5626 - void igb_ring_irq_enable(struct igb_q_vector *q_vector) 5626 + static void igb_ring_irq_enable(struct igb_q_vector *q_vector) 5627 5627 { 5628 5628 struct igb_adapter *adapter = q_vector->adapter; 5629 5629 struct e1000_hw *hw = &adapter->hw;
+1 -2
drivers/net/ethernet/intel/igbvf/ethtool.c
··· 468 468 469 469 void igbvf_set_ethtool_ops(struct net_device *netdev) 470 470 { 471 - /* have to "undeclare" const on this struct to remove warnings */ 472 - SET_ETHTOOL_OPS(netdev, (struct ethtool_ops *)&igbvf_ethtool_ops); 471 + SET_ETHTOOL_OPS(netdev, &igbvf_ethtool_ops); 473 472 }
-5
drivers/net/ethernet/intel/igbvf/netdev.c
··· 1194 1194 struct igbvf_adapter *adapter = netdev_priv(netdev); 1195 1195 struct e1000_hw *hw = &adapter->hw; 1196 1196 1197 - igbvf_irq_disable(adapter); 1198 - 1199 - if (!test_bit(__IGBVF_DOWN, &adapter->state)) 1200 - igbvf_irq_enable(adapter); 1201 - 1202 1197 if (hw->mac.ops.set_vfta(hw, vid, false)) { 1203 1198 dev_err(&adapter->pdev->dev, 1204 1199 "Failed to remove vlan id %d\n", vid);
+16 -16
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
··· 161 161 162 162 /* Receive DMA Registers */ 163 163 #define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \ 164 - (0x0D000 + ((_i - 64) * 0x40))) 164 + (0x0D000 + (((_i) - 64) * 0x40))) 165 165 #define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \ 166 - (0x0D004 + ((_i - 64) * 0x40))) 166 + (0x0D004 + (((_i) - 64) * 0x40))) 167 167 #define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \ 168 - (0x0D008 + ((_i - 64) * 0x40))) 168 + (0x0D008 + (((_i) - 64) * 0x40))) 169 169 #define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \ 170 - (0x0D010 + ((_i - 64) * 0x40))) 170 + (0x0D010 + (((_i) - 64) * 0x40))) 171 171 #define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \ 172 - (0x0D018 + ((_i - 64) * 0x40))) 172 + (0x0D018 + (((_i) - 64) * 0x40))) 173 173 #define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \ 174 - (0x0D028 + ((_i - 64) * 0x40))) 174 + (0x0D028 + (((_i) - 64) * 0x40))) 175 175 #define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \ 176 - (0x0D02C + ((_i - 64) * 0x40))) 176 + (0x0D02C + (((_i) - 64) * 0x40))) 177 177 #define IXGBE_RSCDBU 0x03028 178 178 #define IXGBE_RDDCC 0x02F20 179 179 #define IXGBE_RXMEMWRAP 0x03190 ··· 186 186 */ 187 187 #define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \ 188 188 (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \ 189 - (0x0D014 + ((_i - 64) * 0x40)))) 189 + (0x0D014 + (((_i) - 64) * 0x40)))) 190 190 /* 191 191 * Rx DCA Control Register: 192 192 * 00-15 : 0x02200 + n*4 ··· 195 195 */ 196 196 #define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \ 197 197 (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \ 198 - (0x0D00C + ((_i - 64) * 0x40)))) 198 + (0x0D00C + (((_i) - 64) * 0x40)))) 199 199 #define IXGBE_RDRXCTL 0x02F00 200 200 #define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) 201 201 /* 8 of these 0x03C00 - 0x03C1C */ ··· 344 344 345 345 #define IXGBE_WUPL 0x05900 346 346 #define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ 347 - #define IXGBE_FHFT(_n) (0x09000 + (_n * 0x100)) /* Flex host filter table */ 348 - #define IXGBE_FHFT_EXT(_n) (0x09800 + (_n * 0x100)) /* Ext Flexible Host 349 - * Filter Table */ 347 + #define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */ 348 + #define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100)) /* Ext Flexible Host 349 + * Filter Table */ 350 350 351 351 #define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4 352 352 #define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2 ··· 1485 1485 #define IXGBE_LED_BLINK_BASE 0x00000080 1486 1486 #define IXGBE_LED_MODE_MASK_BASE 0x0000000F 1487 1487 #define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i))) 1488 - #define IXGBE_LED_MODE_SHIFT(_i) (8*(_i)) 1488 + #define IXGBE_LED_MODE_SHIFT(_i) (8 * (_i)) 1489 1489 #define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i) 1490 1490 #define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i) 1491 1491 #define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i) ··· 2068 2068 2069 2069 /* SR-IOV specific macros */ 2070 2070 #define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4) 2071 - #define IXGBE_MBVFICR(_i) (0x00710 + (_i * 4)) 2072 - #define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600)) 2073 - #define IXGBE_VFLREC(_i) (0x00700 + (_i * 4)) 2071 + #define IXGBE_MBVFICR(_i) (0x00710 + ((_i) * 4)) 2072 + #define IXGBE_VFLRE(_i) ((((_i) & 1) ? 0x001C0 : 0x00600)) 2073 + #define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4)) 2074 2074 2075 2075 enum ixgbe_fdir_pballoc_type { 2076 2076 IXGBE_FDIR_PBALLOC_NONE = 0,
+3 -2
drivers/net/ethernet/intel/ixgbevf/ethtool.c
··· 56 56 offsetof(struct ixgbevf_adapter, m), \ 57 57 offsetof(struct ixgbevf_adapter, b), \ 58 58 offsetof(struct ixgbevf_adapter, r) 59 - static struct ixgbe_stats ixgbe_gstrings_stats[] = { 59 + 60 + static const struct ixgbe_stats ixgbe_gstrings_stats[] = { 60 61 {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc, 61 62 stats.saved_reset_vfgprc)}, 62 63 {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc, ··· 672 671 return 0; 673 672 } 674 673 675 - static struct ethtool_ops ixgbevf_ethtool_ops = { 674 + static const struct ethtool_ops ixgbevf_ethtool_ops = { 676 675 .get_settings = ixgbevf_get_settings, 677 676 .get_drvinfo = ixgbevf_get_drvinfo, 678 677 .get_regs_len = ixgbevf_get_regs_len,
+4 -4
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
··· 279 279 board_X540_vf, 280 280 }; 281 281 282 - extern struct ixgbevf_info ixgbevf_82599_vf_info; 283 - extern struct ixgbevf_info ixgbevf_X540_vf_info; 284 - extern struct ixgbe_mbx_operations ixgbevf_mbx_ops; 282 + extern const struct ixgbevf_info ixgbevf_82599_vf_info; 283 + extern const struct ixgbevf_info ixgbevf_X540_vf_info; 284 + extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops; 285 285 286 286 /* needed by ethtool.c */ 287 - extern char ixgbevf_driver_name[]; 287 + extern const char ixgbevf_driver_name[]; 288 288 extern const char ixgbevf_driver_version[]; 289 289 290 290 extern int ixgbevf_up(struct ixgbevf_adapter *adapter);
+22 -19
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
··· 53 53 54 54 #include "ixgbevf.h" 55 55 56 - char ixgbevf_driver_name[] = "ixgbevf"; 56 + const char ixgbevf_driver_name[] = "ixgbevf"; 57 57 static const char ixgbevf_driver_string[] = 58 58 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; 59 59 ··· 917 917 struct ixgbe_hw *hw = &adapter->hw; 918 918 u32 eicr; 919 919 u32 msg; 920 + bool got_ack = false; 920 921 921 922 eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS); 922 923 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr); 923 924 924 - if (!hw->mbx.ops.check_for_ack(hw)) { 925 - /* 926 - * checking for the ack clears the PFACK bit. Place 927 - * it back in the v2p_mailbox cache so that anyone 928 - * polling for an ack will not miss it. Also 929 - * avoid the read below because the code to read 930 - * the mailbox will also clear the ack bit. This was 931 - * causing lost acks. Just cache the bit and exit 932 - * the IRQ handler. 933 - */ 934 - hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK; 925 + if (!hw->mbx.ops.check_for_ack(hw)) 926 + got_ack = true; 927 + 928 + if (!hw->mbx.ops.check_for_msg(hw)) { 929 + hw->mbx.ops.read(hw, &msg, 1); 930 + 931 + if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) 932 + mod_timer(&adapter->watchdog_timer, 933 + round_jiffies(jiffies + 1)); 934 + 935 + if (msg & IXGBE_VT_MSGTYPE_NACK) 936 + pr_warn("Last Request of type %2.2x to PF Nacked\n", 937 + msg & 0xFF); 935 938 goto out; 936 939 } 937 940 938 - /* Not an ack interrupt, go ahead and read the message */ 939 - hw->mbx.ops.read(hw, &msg, 1); 940 - 941 - if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) 942 - mod_timer(&adapter->watchdog_timer, 943 - round_jiffies(jiffies + 1)); 944 - 941 + /* 942 + * checking for the ack clears the PFACK bit. Place 943 + * it back in the v2p_mailbox cache so that anyone 944 + * polling for an ack will not miss it 945 + */ 946 + if (got_ack) 947 + hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK; 945 948 out: 946 949 return IRQ_HANDLED; 947 950 }
+2 -1
drivers/net/ethernet/intel/ixgbevf/mbx.c
··· 26 26 *******************************************************************************/ 27 27 28 28 #include "mbx.h" 29 + #include "ixgbevf.h" 29 30 30 31 /** 31 32 * ixgbevf_poll_for_msg - Wait for message notification ··· 329 328 return 0; 330 329 } 331 330 332 - struct ixgbe_mbx_operations ixgbevf_mbx_ops = { 331 + const struct ixgbe_mbx_operations ixgbevf_mbx_ops = { 333 332 .init_params = ixgbevf_init_mbx_params_vf, 334 333 .read = ixgbevf_read_mbx_vf, 335 334 .write = ixgbevf_write_mbx_vf,
+4 -3
drivers/net/ethernet/intel/ixgbevf/vf.c
··· 26 26 *******************************************************************************/ 27 27 28 28 #include "vf.h" 29 + #include "ixgbevf.h" 29 30 30 31 /** 31 32 * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx ··· 402 401 return 0; 403 402 } 404 403 405 - static struct ixgbe_mac_operations ixgbevf_mac_ops = { 404 + static const struct ixgbe_mac_operations ixgbevf_mac_ops = { 406 405 .init_hw = ixgbevf_init_hw_vf, 407 406 .reset_hw = ixgbevf_reset_hw_vf, 408 407 .start_hw = ixgbevf_start_hw_vf, ··· 416 415 .set_vfta = ixgbevf_set_vfta_vf, 417 416 }; 418 417 419 - struct ixgbevf_info ixgbevf_82599_vf_info = { 418 + const struct ixgbevf_info ixgbevf_82599_vf_info = { 420 419 .mac = ixgbe_mac_82599_vf, 421 420 .mac_ops = &ixgbevf_mac_ops, 422 421 }; 423 422 424 - struct ixgbevf_info ixgbevf_X540_vf_info = { 423 + const struct ixgbevf_info ixgbevf_X540_vf_info = { 425 424 .mac = ixgbe_mac_X540_vf, 426 425 .mac_ops = &ixgbevf_mac_ops, 427 426 };
+1 -1
drivers/net/ethernet/intel/ixgbevf/vf.h
··· 167 167 168 168 struct ixgbevf_info { 169 169 enum ixgbe_mac_type mac; 170 - struct ixgbe_mac_operations *mac_ops; 170 + const struct ixgbe_mac_operations *mac_ops; 171 171 }; 172 172 173 173 #endif /* __IXGBE_VF_H__ */
+14
drivers/net/ethernet/marvell/mv643xx_eth.c
··· 136 136 #define INT_MASK 0x0068 137 137 #define INT_MASK_EXT 0x006c 138 138 #define TX_FIFO_URGENT_THRESHOLD 0x0074 139 + #define RX_DISCARD_FRAME_CNT 0x0084 140 + #define RX_OVERRUN_FRAME_CNT 0x0088 139 141 #define TXQ_FIX_PRIO_CONF_MOVED 0x00dc 140 142 #define TX_BW_RATE_MOVED 0x00e0 141 143 #define TX_BW_MTU_MOVED 0x00e8 ··· 336 334 u32 bad_crc_event; 337 335 u32 collision; 338 336 u32 late_collision; 337 + /* Non MIB hardware counters */ 338 + u32 rx_discard; 339 + u32 rx_overrun; 339 340 }; 340 341 341 342 struct lro_counters { ··· 1230 1225 1231 1226 for (i = 0; i < 0x80; i += 4) 1232 1227 mib_read(mp, i); 1228 + 1229 + /* Clear non MIB hw counters also */ 1230 + rdlp(mp, RX_DISCARD_FRAME_CNT); 1231 + rdlp(mp, RX_OVERRUN_FRAME_CNT); 1233 1232 } 1234 1233 1235 1234 static void mib_counters_update(struct mv643xx_eth_private *mp) ··· 1271 1262 p->bad_crc_event += mib_read(mp, 0x74); 1272 1263 p->collision += mib_read(mp, 0x78); 1273 1264 p->late_collision += mib_read(mp, 0x7c); 1265 + /* Non MIB hardware counters */ 1266 + p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT); 1267 + p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT); 1274 1268 spin_unlock_bh(&mp->mib_counters_lock); 1275 1269 1276 1270 mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); ··· 1425 1413 MIBSTAT(bad_crc_event), 1426 1414 MIBSTAT(collision), 1427 1415 MIBSTAT(late_collision), 1416 + MIBSTAT(rx_discard), 1417 + MIBSTAT(rx_overrun), 1428 1418 LROSTAT(lro_aggregated), 1429 1419 LROSTAT(lro_flushed), 1430 1420 LROSTAT(lro_no_desc),
+83 -26
drivers/net/ethernet/marvell/skge.c
··· 931 931 } 932 932 933 933 /* Allocate and setup a new buffer for receiving */ 934 - static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, 935 - struct sk_buff *skb, unsigned int bufsize) 934 + static int skge_rx_setup(struct pci_dev *pdev, 935 + struct skge_element *e, 936 + struct sk_buff *skb, unsigned int bufsize) 936 937 { 937 938 struct skge_rx_desc *rd = e->desc; 938 - u64 map; 939 + dma_addr_t map; 939 940 940 - map = pci_map_single(skge->hw->pdev, skb->data, bufsize, 941 + map = pci_map_single(pdev, skb->data, bufsize, 941 942 PCI_DMA_FROMDEVICE); 943 + if (pci_dma_mapping_error(pdev, map)) 944 + goto mapping_error; 942 945 943 - rd->dma_lo = map; 944 - rd->dma_hi = map >> 32; 946 + rd->dma_lo = lower_32_bits(map); 947 + rd->dma_hi = upper_32_bits(map); 945 948 e->skb = skb; 946 949 rd->csum1_start = ETH_HLEN; 947 950 rd->csum2_start = ETH_HLEN; ··· 956 953 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; 957 954 dma_unmap_addr_set(e, mapaddr, map); 958 955 dma_unmap_len_set(e, maplen, bufsize); 956 + return 0; 957 + 958 + mapping_error: 959 + if (net_ratelimit()) 960 + dev_warn(&pdev->dev, "%s: rx mapping error\n", 961 + skb->dev->name); 962 + return -EIO; 959 963 } 960 964 961 965 /* Resume receiving using existing skb, ··· 1024 1014 return -ENOMEM; 1025 1015 1026 1016 skb_reserve(skb, NET_IP_ALIGN); 1027 - skge_rx_setup(skge, e, skb, skge->rx_buf_size); 1017 + if (skge_rx_setup(skge->hw->pdev, e, skb, skge->rx_buf_size)) { 1018 + kfree_skb(skb); 1019 + return -ENOMEM; 1020 + } 1021 + 1028 1022 } while ((e = e->next) != ring->start); 1029 1023 1030 1024 ring->to_clean = ring->start; ··· 2590 2576 } 2591 2577 2592 2578 /* Initialize MAC */ 2579 + netif_carrier_off(dev); 2593 2580 spin_lock_bh(&hw->phy_lock); 2594 2581 if (is_genesis(hw)) 2595 2582 genesis_mac_init(hw, port); ··· 2743 2728 struct skge_tx_desc *td; 2744 2729 int i; 2745 2730 u32 control, len; 2746 - u64 map; 2731 + dma_addr_t map; 2747 2732 2748 2733 if (skb_padto(skb, ETH_ZLEN)) 2749 2734 return NETDEV_TX_OK; ··· 2757 2742 e->skb = skb; 2758 2743 len = skb_headlen(skb); 2759 2744 map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); 2745 + if (pci_dma_mapping_error(hw->pdev, map)) 2746 + goto mapping_error; 2747 + 2760 2748 dma_unmap_addr_set(e, mapaddr, map); 2761 2749 dma_unmap_len_set(e, maplen, len); 2762 2750 2763 - td->dma_lo = map; 2764 - td->dma_hi = map >> 32; 2751 + td->dma_lo = lower_32_bits(map); 2752 + td->dma_hi = upper_32_bits(map); 2765 2753 2766 2754 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2767 2755 const int offset = skb_checksum_start_offset(skb); ··· 2795 2777 2796 2778 map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, 2797 2779 skb_frag_size(frag), DMA_TO_DEVICE); 2780 + if (dma_mapping_error(&hw->pdev->dev, map)) 2781 + goto mapping_unwind; 2798 2782 2799 2783 e = e->next; 2800 2784 e->skb = skb; 2801 2785 tf = e->desc; 2802 2786 BUG_ON(tf->control & BMU_OWN); 2803 2787 2804 - tf->dma_lo = map; 2805 - tf->dma_hi = (u64) map >> 32; 2788 + tf->dma_lo = lower_32_bits(map); 2789 + tf->dma_hi = upper_32_bits(map); 2806 2790 dma_unmap_addr_set(e, mapaddr, map); 2807 2791 dma_unmap_len_set(e, maplen, skb_frag_size(frag)); 2808 2792 ··· 2816 2796 wmb(); 2817 2797 td->control = BMU_OWN | BMU_SW | BMU_STF | control | len; 2818 2798 wmb(); 2799 + 2800 + netdev_sent_queue(dev, skb->len); 2819 2801 2820 2802 skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START); 2821 2803 ··· 2834 2812 } 2835 2813 2836 2814 return NETDEV_TX_OK; 2815 + 2816 + mapping_unwind: 2817 + /* unroll any pages that were already mapped. */ 2818 + if (e != skge->tx_ring.to_use) { 2819 + struct skge_element *u; 2820 + 2821 + for (u = skge->tx_ring.to_use->next; u != e; u = u->next) 2822 + pci_unmap_page(hw->pdev, dma_unmap_addr(u, mapaddr), 2823 + dma_unmap_len(u, maplen), 2824 + PCI_DMA_TODEVICE); 2825 + e = skge->tx_ring.to_use; 2826 + } 2827 + /* undo the mapping for the skb header */ 2828 + pci_unmap_single(hw->pdev, dma_unmap_addr(e, mapaddr), 2829 + dma_unmap_len(e, maplen), 2830 + PCI_DMA_TODEVICE); 2831 + mapping_error: 2832 + /* mapping error causes error message and packet to be discarded. */ 2833 + if (net_ratelimit()) 2834 + dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); 2835 + dev_kfree_skb(skb); 2836 + return NETDEV_TX_OK; 2837 2837 } 2838 2838 2839 2839 2840 2840 /* Free resources associated with this reing element */ 2841 - static void skge_tx_free(struct skge_port *skge, struct skge_element *e, 2842 - u32 control) 2841 + static inline void skge_tx_unmap(struct pci_dev *pdev, struct skge_element *e, 2842 + u32 control) 2843 2843 { 2844 - struct pci_dev *pdev = skge->hw->pdev; 2845 - 2846 2844 /* skb header vs. fragment */ 2847 2845 if (control & BMU_STF) 2848 2846 pci_unmap_single(pdev, dma_unmap_addr(e, mapaddr), ··· 2872 2830 pci_unmap_page(pdev, dma_unmap_addr(e, mapaddr), 2873 2831 dma_unmap_len(e, maplen), 2874 2832 PCI_DMA_TODEVICE); 2875 - 2876 - if (control & BMU_EOF) { 2877 - netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev, 2878 - "tx done slot %td\n", e - skge->tx_ring.start); 2879 - 2880 - dev_kfree_skb(e->skb); 2881 - } 2882 2833 } 2883 2834 2884 2835 /* Free all buffers in transmit ring */ ··· 2882 2847 2883 2848 for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { 2884 2849 struct skge_tx_desc *td = e->desc; 2885 - skge_tx_free(skge, e, td->control); 2850 + 2851 + skge_tx_unmap(skge->hw->pdev, e, td->control); 2852 + 2853 + if (td->control & BMU_EOF) 2854 + dev_kfree_skb(e->skb); 2886 2855 td->control = 0; 2887 2856 } 2888 2857 2858 + netdev_reset_queue(dev); 2889 2859 skge->tx_ring.to_clean = e; 2890 2860 } 2891 2861 ··· 3099 3059 if (!nskb) 3100 3060 goto resubmit; 3101 3061 3062 + if (unlikely(skge_rx_setup(skge->hw->pdev, e, nskb, skge->rx_buf_size))) { 3063 + dev_kfree_skb(nskb); 3064 + goto resubmit; 3065 + } 3066 + 3102 3067 pci_unmap_single(skge->hw->pdev, 3103 3068 dma_unmap_addr(e, mapaddr), 3104 3069 dma_unmap_len(e, maplen), 3105 3070 PCI_DMA_FROMDEVICE); 3106 3071 skb = e->skb; 3107 3072 prefetch(skb->data); 3108 - skge_rx_setup(skge, e, nskb, skge->rx_buf_size); 3109 3073 } 3110 3074 3111 3075 skb_put(skb, len); ··· 3155 3111 struct skge_port *skge = netdev_priv(dev); 3156 3112 struct skge_ring *ring = &skge->tx_ring; 3157 3113 struct skge_element *e; 3114 + unsigned int bytes_compl = 0, pkts_compl = 0; 3158 3115 3159 3116 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); 3160 3117 ··· 3165 3120 if (control & BMU_OWN) 3166 3121 break; 3167 3122 3168 - skge_tx_free(skge, e, control); 3123 + skge_tx_unmap(skge->hw->pdev, e, control); 3124 + 3125 + if (control & BMU_EOF) { 3126 + netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev, 3127 + "tx done slot %td\n", 3128 + e - skge->tx_ring.start); 3129 + 3130 + pkts_compl++; 3131 + bytes_compl += e->skb->len; 3132 + 3133 + dev_kfree_skb(e->skb); 3134 + } 3169 3135 } 3136 + netdev_completed_queue(dev, pkts_compl, bytes_compl); 3170 3137 skge->tx_ring.to_clean = e; 3171 3138 3172 3139 /* Can run lockless until we need to synchronize to restart queue. */
+8 -1
drivers/net/ethernet/mellanox/mlx4/cmd.c
··· 1247 1247 u32 reply; 1248 1248 u32 slave_status = 0; 1249 1249 u8 is_going_down = 0; 1250 + int i; 1250 1251 1251 1252 slave_state[slave].comm_toggle ^= 1; 1252 1253 reply = (u32) slave_state[slave].comm_toggle << 31; ··· 1259 1258 if (cmd == MLX4_COMM_CMD_RESET) { 1260 1259 mlx4_warn(dev, "Received reset from slave:%d\n", slave); 1261 1260 slave_state[slave].active = false; 1261 + for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) { 1262 + slave_state[slave].event_eq[i].eqn = -1; 1263 + slave_state[slave].event_eq[i].token = 0; 1264 + } 1262 1265 /*check if we are in the middle of FLR process, 1263 1266 if so return "retry" status to the slave*/ 1264 1267 if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) { ··· 1457 1452 { 1458 1453 struct mlx4_priv *priv = mlx4_priv(dev); 1459 1454 struct mlx4_slave_state *s_state; 1460 - int i, err, port; 1455 + int i, j, err, port; 1461 1456 1462 1457 priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE, 1463 1458 &priv->mfunc.vhcr_dma, ··· 1490 1485 for (i = 0; i < dev->num_slaves; ++i) { 1491 1486 s_state = &priv->mfunc.master.slave_state[i]; 1492 1487 s_state->last_cmd = MLX4_COMM_CMD_RESET; 1488 + for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j) 1489 + s_state->event_eq[j].eqn = -1; 1493 1490 __raw_writel((__force u32) 0, 1494 1491 &priv->mfunc.comm[i].slave_write); 1495 1492 __raw_writel((__force u32) 0,
+2 -2
drivers/net/ethernet/mellanox/mlx4/cq.c
··· 96 96 static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 97 97 int cq_num) 98 98 { 99 - return mlx4_cmd(dev, mailbox->dma | dev->caps.function, cq_num, 0, 99 + return mlx4_cmd(dev, mailbox->dma, cq_num, 0, 100 100 MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A, 101 101 MLX4_CMD_WRAPPED); 102 102 } ··· 111 111 static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 112 112 int cq_num) 113 113 { 114 - return mlx4_cmd_box(dev, dev->caps.function, mailbox ? mailbox->dma : 0, 114 + return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, 115 115 cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ, 116 116 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 117 117 }
+143 -16
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
··· 183 183 static int mlx4_en_get_sset_count(struct net_device *dev, int sset) 184 184 { 185 185 struct mlx4_en_priv *priv = netdev_priv(dev); 186 + int bit_count = hweight64(priv->stats_bitmap); 186 187 187 188 switch (sset) { 188 189 case ETH_SS_STATS: 189 - return NUM_ALL_STATS + 190 + return (priv->stats_bitmap ? bit_count : NUM_ALL_STATS) + 190 191 (priv->tx_ring_num + priv->rx_ring_num) * 2; 191 192 case ETH_SS_TEST: 192 193 return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags ··· 202 201 { 203 202 struct mlx4_en_priv *priv = netdev_priv(dev); 204 203 int index = 0; 205 - int i; 204 + int i, j = 0; 206 205 207 206 spin_lock_bh(&priv->stats_lock); 208 207 209 - for (i = 0; i < NUM_MAIN_STATS; i++) 210 - data[index++] = ((unsigned long *) &priv->stats)[i]; 211 - for (i = 0; i < NUM_PORT_STATS; i++) 212 - data[index++] = ((unsigned long *) &priv->port_stats)[i]; 208 + if (!(priv->stats_bitmap)) { 209 + for (i = 0; i < NUM_MAIN_STATS; i++) 210 + data[index++] = 211 + ((unsigned long *) &priv->stats)[i]; 212 + for (i = 0; i < NUM_PORT_STATS; i++) 213 + data[index++] = 214 + ((unsigned long *) &priv->port_stats)[i]; 215 + for (i = 0; i < NUM_PKT_STATS; i++) 216 + data[index++] = 217 + ((unsigned long *) &priv->pkstats)[i]; 218 + } else { 219 + for (i = 0; i < NUM_MAIN_STATS; i++) { 220 + if ((priv->stats_bitmap >> j) & 1) 221 + data[index++] = 222 + ((unsigned long *) &priv->stats)[i]; 223 + j++; 224 + } 225 + for (i = 0; i < NUM_PORT_STATS; i++) { 226 + if ((priv->stats_bitmap >> j) & 1) 227 + data[index++] = 228 + ((unsigned long *) &priv->port_stats)[i]; 229 + j++; 230 + } 231 + } 213 232 for (i = 0; i < priv->tx_ring_num; i++) { 214 233 data[index++] = priv->tx_ring[i].packets; 215 234 data[index++] = priv->tx_ring[i].bytes; ··· 238 217 data[index++] = priv->rx_ring[i].packets; 239 218 data[index++] = priv->rx_ring[i].bytes; 240 219 } 241 - for (i = 0; i < NUM_PKT_STATS; i++) 242 - data[index++] = ((unsigned long *) &priv->pkstats)[i]; 243 220 spin_unlock_bh(&priv->stats_lock); 244 221 245 222 } ··· 266 247 267 248 case ETH_SS_STATS: 268 249 /* Add main counters */ 269 - for (i = 0; i < NUM_MAIN_STATS; i++) 270 - strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]); 271 - for (i = 0; i< NUM_PORT_STATS; i++) 272 - strcpy(data + (index++) * ETH_GSTRING_LEN, 273 - main_strings[i + NUM_MAIN_STATS]); 250 + if (!priv->stats_bitmap) { 251 + for (i = 0; i < NUM_MAIN_STATS; i++) 252 + strcpy(data + (index++) * ETH_GSTRING_LEN, 253 + main_strings[i]); 254 + for (i = 0; i < NUM_PORT_STATS; i++) 255 + strcpy(data + (index++) * ETH_GSTRING_LEN, 256 + main_strings[i + 257 + NUM_MAIN_STATS]); 258 + for (i = 0; i < NUM_PKT_STATS; i++) 259 + strcpy(data + (index++) * ETH_GSTRING_LEN, 260 + main_strings[i + 261 + NUM_MAIN_STATS + 262 + NUM_PORT_STATS]); 263 + } else 264 + for (i = 0; i < NUM_MAIN_STATS + NUM_PORT_STATS; i++) { 265 + if ((priv->stats_bitmap >> i) & 1) { 266 + strcpy(data + 267 + (index++) * ETH_GSTRING_LEN, 268 + main_strings[i]); 269 + } 270 + if (!(priv->stats_bitmap >> i)) 271 + break; 272 + } 274 273 for (i = 0; i < priv->tx_ring_num; i++) { 275 274 sprintf(data + (index++) * ETH_GSTRING_LEN, 276 275 "tx%d_packets", i); ··· 301 264 sprintf(data + (index++) * ETH_GSTRING_LEN, 302 265 "rx%d_bytes", i); 303 266 } 304 - for (i = 0; i< NUM_PKT_STATS; i++) 305 - strcpy(data + (index++) * ETH_GSTRING_LEN, 306 - main_strings[i + NUM_MAIN_STATS + NUM_PORT_STATS]); 307 267 break; 308 268 } 309 269 } ··· 513 479 param->tx_pending = priv->tx_ring[0].size; 514 480 } 515 481 482 + static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev) 483 + { 484 + struct mlx4_en_priv *priv = netdev_priv(dev); 485 + 486 + return priv->rx_ring_num; 487 + } 488 + 489 + static int mlx4_en_get_rxfh_indir(struct net_device *dev, u32 *ring_index) 490 + { 491 + struct mlx4_en_priv *priv = netdev_priv(dev); 492 + struct mlx4_en_rss_map *rss_map = &priv->rss_map; 493 + int rss_rings; 494 + size_t n = priv->rx_ring_num; 495 + int err = 0; 496 + 497 + rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num; 498 + 499 + while (n--) { 500 + ring_index[n] = rss_map->qps[n % rss_rings].qpn - 501 + rss_map->base_qpn; 502 + } 503 + 504 + return err; 505 + } 506 + 507 + static int mlx4_en_set_rxfh_indir(struct net_device *dev, 508 + const u32 *ring_index) 509 + { 510 + struct mlx4_en_priv *priv = netdev_priv(dev); 511 + struct mlx4_en_dev *mdev = priv->mdev; 512 + int port_up = 0; 513 + int err = 0; 514 + int i; 515 + int rss_rings = 0; 516 + 517 + /* Calculate RSS table size and make sure flows are spread evenly 518 + * between rings 519 + */ 520 + for (i = 0; i < priv->rx_ring_num; i++) { 521 + if (i > 0 && !ring_index[i] && !rss_rings) 522 + rss_rings = i; 523 + 524 + if (ring_index[i] != (i % (rss_rings ?: priv->rx_ring_num))) 525 + return -EINVAL; 526 + } 527 + 528 + if (!rss_rings) 529 + rss_rings = priv->rx_ring_num; 530 + 531 + /* RSS table size must be an order of 2 */ 532 + if (!is_power_of_2(rss_rings)) 533 + return -EINVAL; 534 + 535 + mutex_lock(&mdev->state_lock); 536 + if (priv->port_up) { 537 + port_up = 1; 538 + mlx4_en_stop_port(dev); 539 + } 540 + 541 + priv->prof->rss_rings = rss_rings; 542 + 543 + if (port_up) { 544 + err = mlx4_en_start_port(dev); 545 + if (err) 546 + en_err(priv, "Failed starting port\n"); 547 + } 548 + 549 + mutex_unlock(&mdev->state_lock); 550 + return err; 551 + } 552 + 553 + static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 554 + u32 *rule_locs) 555 + { 556 + struct mlx4_en_priv *priv = netdev_priv(dev); 557 + int err = 0; 558 + 559 + switch (cmd->cmd) { 560 + case ETHTOOL_GRXRINGS: 561 + cmd->data = priv->rx_ring_num; 562 + break; 563 + default: 564 + err = -EOPNOTSUPP; 565 + break; 566 + } 567 + 568 + return err; 569 + } 570 + 516 571 const struct ethtool_ops mlx4_en_ethtool_ops = { 517 572 .get_drvinfo = mlx4_en_get_drvinfo, 518 573 .get_settings = mlx4_en_get_settings, ··· 621 498 .set_pauseparam = mlx4_en_set_pauseparam, 622 499 .get_ringparam = mlx4_en_get_ringparam, 623 500 .set_ringparam = mlx4_en_set_ringparam, 501 + .get_rxnfc = mlx4_en_get_rxnfc, 502 + .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size, 503 + .get_rxfh_indir = mlx4_en_get_rxfh_indir, 504 + .set_rxfh_indir = mlx4_en_set_rxfh_indir, 624 505 }; 625 506 626 507
+1 -5
drivers/net/ethernet/mellanox/mlx4/en_main.c
··· 62 62 * Device scope module parameters 63 63 */ 64 64 65 - 66 - /* Enable RSS TCP traffic */ 67 - MLX4_EN_PARM_INT(tcp_rss, 1, 68 - "Enable RSS for incomming TCP traffic or disabled (0)"); 69 65 /* Enable RSS UDP traffic */ 70 66 MLX4_EN_PARM_INT(udp_rss, 1, 71 67 "Enable RSS for incomming UDP traffic or disabled (0)"); ··· 100 104 struct mlx4_en_profile *params = &mdev->profile; 101 105 int i; 102 106 103 - params->tcp_rss = tcp_rss; 104 107 params->udp_rss = udp_rss; 105 108 if (params->udp_rss && !(mdev->dev->caps.flags 106 109 & MLX4_DEV_CAP_FLAG_UDP_RSS)) { ··· 115 120 params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; 116 121 params->prof[i].tx_ring_num = MLX4_EN_NUM_TX_RINGS + 117 122 (!!pfcrx) * MLX4_EN_NUM_PPP_RINGS; 123 + params->prof[i].rss_rings = 0; 118 124 } 119 125 120 126 return 0;
+30 -16
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
··· 702 702 /* Schedule multicast task to populate multicast list */ 703 703 queue_work(mdev->workqueue, &priv->mcast_task); 704 704 705 + mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap); 706 + 705 707 priv->port_up = true; 706 708 netif_tx_start_all_queues(dev); 707 709 return 0; ··· 809 807 mutex_unlock(&mdev->state_lock); 810 808 } 811 809 810 + static void mlx4_en_clear_stats(struct net_device *dev) 811 + { 812 + struct mlx4_en_priv *priv = netdev_priv(dev); 813 + struct mlx4_en_dev *mdev = priv->mdev; 814 + int i; 815 + 816 + if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) 817 + en_dbg(HW, priv, "Failed dumping statistics\n"); 818 + 819 + memset(&priv->stats, 0, sizeof(priv->stats)); 820 + memset(&priv->pstats, 0, sizeof(priv->pstats)); 821 + memset(&priv->pkstats, 0, sizeof(priv->pkstats)); 822 + memset(&priv->port_stats, 0, sizeof(priv->port_stats)); 823 + 824 + for (i = 0; i < priv->tx_ring_num; i++) { 825 + priv->tx_ring[i].bytes = 0; 826 + priv->tx_ring[i].packets = 0; 827 + priv->tx_ring[i].tx_csum = 0; 828 + } 829 + for (i = 0; i < priv->rx_ring_num; i++) { 830 + priv->rx_ring[i].bytes = 0; 831 + priv->rx_ring[i].packets = 0; 832 + priv->rx_ring[i].csum_ok = 0; 833 + priv->rx_ring[i].csum_none = 0; 834 + } 835 + } 812 836 813 837 static int mlx4_en_open(struct net_device *dev) 814 838 { 815 839 struct mlx4_en_priv *priv = netdev_priv(dev); 816 840 struct mlx4_en_dev *mdev = priv->mdev; 817 - int i; 818 841 int err = 0; 819 842 820 843 mutex_lock(&mdev->state_lock); ··· 850 823 goto out; 851 824 } 852 825 853 - /* Reset HW statistics and performance counters */ 854 - if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) 855 - en_dbg(HW, priv, "Failed dumping statistics\n"); 856 - 857 - memset(&priv->stats, 0, sizeof(priv->stats)); 858 - memset(&priv->pstats, 0, sizeof(priv->pstats)); 859 - 860 - for (i = 0; i < priv->tx_ring_num; i++) { 861 - priv->tx_ring[i].bytes = 0; 862 - priv->tx_ring[i].packets = 0; 863 - } 864 - for (i = 0; i < priv->rx_ring_num; i++) { 865 - priv->rx_ring[i].bytes = 0; 866 - priv->rx_ring[i].packets = 0; 867 - } 826 + /* Reset HW statistics and SW counters */ 827 + mlx4_en_clear_stats(dev); 868 828 869 829 err = mlx4_en_start_port(dev); 870 830 if (err)
+7 -1
drivers/net/ethernet/mellanox/mlx4/en_rx.c
··· 853 853 struct mlx4_en_rss_map *rss_map = &priv->rss_map; 854 854 struct mlx4_qp_context context; 855 855 struct mlx4_rss_context *rss_context; 856 + int rss_rings; 856 857 void *ptr; 857 858 u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 | 858 859 MLX4_RSS_TCP_IPV6); ··· 894 893 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, 895 894 priv->rx_ring[0].cqn, &context); 896 895 896 + if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num) 897 + rss_rings = priv->rx_ring_num; 898 + else 899 + rss_rings = priv->prof->rss_rings; 900 + 897 901 ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path) 898 902 + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH; 899 903 rss_context = ptr; 900 - rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 | 904 + rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 | 901 905 (rss_map->base_qpn)); 902 906 rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn); 903 907 if (priv->mdev->profile.udp_rss) {
+9 -12
drivers/net/ethernet/mellanox/mlx4/eq.c
··· 513 513 { 514 514 struct mlx4_priv *priv = mlx4_priv(dev); 515 515 struct mlx4_slave_event_eq_info *event_eq = 516 - &priv->mfunc.master.slave_state[slave].event_eq; 516 + priv->mfunc.master.slave_state[slave].event_eq; 517 517 u32 in_modifier = vhcr->in_modifier; 518 518 u32 eqn = in_modifier & 0x1FF; 519 519 u64 in_param = vhcr->in_param; 520 520 int err = 0; 521 + int i; 521 522 522 523 if (slave == dev->caps.function) 523 524 err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn, 524 525 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B, 525 526 MLX4_CMD_NATIVE); 526 - if (!err) { 527 - if (in_modifier >> 31) { 528 - /* unmap */ 529 - event_eq->event_type &= ~in_param; 530 - } else { 531 - event_eq->eqn = eqn; 532 - event_eq->event_type = in_param; 533 - } 534 - } 527 + if (!err) 528 + for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) 529 + if (in_param & (1LL << i)) 530 + event_eq[i].eqn = in_modifier >> 31 ? -1 : eqn; 531 + 535 532 return err; 536 533 } 537 534 ··· 543 546 static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 544 547 int eq_num) 545 548 { 546 - return mlx4_cmd(dev, mailbox->dma | dev->caps.function, eq_num, 0, 549 + return mlx4_cmd(dev, mailbox->dma, eq_num, 0, 547 550 MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A, 548 551 MLX4_CMD_WRAPPED); 549 552 } ··· 551 554 static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 552 555 int eq_num) 553 556 { 554 - return mlx4_cmd_box(dev, dev->caps.function, mailbox->dma, eq_num, 557 + return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 555 558 0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A, 556 559 MLX4_CMD_WRAPPED); 557 560 }
-7
drivers/net/ethernet/mellanox/mlx4/fw.c
··· 158 158 159 159 #define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0 160 160 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1 161 - #define QUERY_FUNC_CAP_FUNCTION_OFFSET 0x3 162 161 #define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4 163 162 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x10 164 163 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x14 ··· 180 181 } else if (vhcr->op_modifier == 0) { 181 182 field = 1 << 7; /* enable only ethernet interface */ 182 183 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); 183 - 184 - field = slave; 185 - MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FUNCTION_OFFSET); 186 184 187 185 field = dev->caps.num_ports; 188 186 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); ··· 244 248 err = -EPROTONOSUPPORT; 245 249 goto out; 246 250 } 247 - 248 - MLX4_GET(field, outbox, QUERY_FUNC_CAP_FUNCTION_OFFSET); 249 - func_cap->function = field; 250 251 251 252 MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); 252 253 func_cap->num_ports = field;
-1
drivers/net/ethernet/mellanox/mlx4/fw.h
··· 119 119 }; 120 120 121 121 struct mlx4_func_cap { 122 - u8 function; 123 122 u8 num_ports; 124 123 u8 flags; 125 124 u32 pf_context_behaviour;
+1 -2
drivers/net/ethernet/mellanox/mlx4/main.c
··· 108 108 .num_cq = 1 << 16, 109 109 .num_mcg = 1 << 13, 110 110 .num_mpt = 1 << 19, 111 - .num_mtt = 1 << 20, 111 + .num_mtt = 1 << 20, /* It is really num mtt segements */ 112 112 }; 113 113 114 114 static int log_num_mac = 7; ··· 471 471 return -ENOSYS; 472 472 } 473 473 474 - dev->caps.function = func_cap.function; 475 474 dev->caps.num_ports = func_cap.num_ports; 476 475 dev->caps.num_qps = func_cap.qp_quota; 477 476 dev->caps.num_srqs = func_cap.srq_quota;
+5 -3
drivers/net/ethernet/mellanox/mlx4/mlx4.h
··· 388 388 }; 389 389 390 390 struct mlx4_slave_event_eq_info { 391 - u32 eqn; 391 + int eqn; 392 392 u16 token; 393 - u64 event_type; 394 393 }; 395 394 396 395 struct mlx4_profile { ··· 448 449 struct list_head duplicates; 449 450 }; 450 451 452 + #define MLX4_EVENT_TYPES_NUM 64 453 + 451 454 struct mlx4_slave_state { 452 455 u8 comm_toggle; 453 456 u8 last_cmd; ··· 462 461 struct mlx4_slave_eqe eq[MLX4_MFUNC_MAX_EQES]; 463 462 struct list_head mcast_filters[MLX4_MAX_PORTS + 1]; 464 463 struct mlx4_vlan_fltr *vlan_filter[MLX4_MAX_PORTS + 1]; 465 - struct mlx4_slave_event_eq_info event_eq; 464 + /* event type to eq number lookup */ 465 + struct mlx4_slave_event_eq_info event_eq[MLX4_EVENT_TYPES_NUM]; 466 466 u16 eq_pi; 467 467 u16 eq_ci; 468 468 spinlock_t lock;
+2 -1
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
··· 325 325 u8 rx_ppp; 326 326 u8 tx_pause; 327 327 u8 tx_ppp; 328 + int rss_rings; 328 329 }; 329 330 330 331 struct mlx4_en_profile { 331 332 int rss_xor; 332 - int tcp_rss; 333 333 int udp_rss; 334 334 u8 rss_mask; 335 335 u32 active_ports; ··· 476 476 struct mlx4_en_perf_stats pstats; 477 477 struct mlx4_en_pkt_stats pkstats; 478 478 struct mlx4_en_port_stats port_stats; 479 + u64 stats_bitmap; 479 480 char *mc_addrs; 480 481 int mc_addrs_cnt; 481 482 struct mlx4_en_stat_out_mbox hw_stats;
+1 -1
drivers/net/ethernet/mellanox/mlx4/mr.c
··· 291 291 static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 292 292 int mpt_index) 293 293 { 294 - return mlx4_cmd(dev, mailbox->dma | dev->caps.function , mpt_index, 294 + return mlx4_cmd(dev, mailbox->dma, mpt_index, 295 295 0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B, 296 296 MLX4_CMD_WRAPPED); 297 297 }
+1 -2
drivers/net/ethernet/mellanox/mlx4/pd.c
··· 52 52 *pdn = mlx4_bitmap_alloc(&priv->pd_bitmap); 53 53 if (*pdn == -1) 54 54 return -ENOMEM; 55 - if (mlx4_is_mfunc(dev)) 56 - *pdn |= (dev->caps.function + 1) << NOT_MASKED_PD_BITS; 55 + 57 56 return 0; 58 57 } 59 58 EXPORT_SYMBOL_GPL(mlx4_pd_alloc);
+23
drivers/net/ethernet/mellanox/mlx4/port.c
··· 44 44 #define MLX4_VLAN_VALID (1u << 31) 45 45 #define MLX4_VLAN_MASK 0xfff 46 46 47 + #define MLX4_STATS_TRAFFIC_COUNTERS_MASK 0xfULL 48 + #define MLX4_STATS_TRAFFIC_DROPS_MASK 0xc0ULL 49 + #define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL 50 + #define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL 51 + 47 52 void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table) 48 53 { 49 54 int i; ··· 903 898 struct mlx4_cmd_mailbox *outbox, 904 899 struct mlx4_cmd_info *cmd) 905 900 { 901 + if (slave != dev->caps.function) 902 + return 0; 906 903 return mlx4_common_dump_eth_stats(dev, slave, 907 904 vhcr->in_modifier, outbox); 908 905 } 906 + 907 + void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap) 908 + { 909 + if (!mlx4_is_mfunc(dev)) { 910 + *stats_bitmap = 0; 911 + return; 912 + } 913 + 914 + *stats_bitmap = (MLX4_STATS_TRAFFIC_COUNTERS_MASK | 915 + MLX4_STATS_TRAFFIC_DROPS_MASK | 916 + MLX4_STATS_PORT_COUNTERS_MASK); 917 + 918 + if (mlx4_is_master(dev)) 919 + *stats_bitmap |= MLX4_STATS_ERROR_COUNTERS_MASK; 920 + } 921 + EXPORT_SYMBOL(mlx4_set_stats_bitmap);
+1 -1
drivers/net/ethernet/mellanox/mlx4/profile.c
··· 110 110 profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, MAX_MSIX); 111 111 profile[MLX4_RES_DMPT].num = request->num_mpt; 112 112 profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; 113 - profile[MLX4_RES_MTT].num = request->num_mtt; 113 + profile[MLX4_RES_MTT].num = request->num_mtt * (1 << log_mtts_per_seg); 114 114 profile[MLX4_RES_MCG].num = request->num_mcg; 115 115 116 116 for (i = 0; i < MLX4_RES_NUM; ++i) {
+1 -1
drivers/net/ethernet/mellanox/mlx4/qp.c
··· 162 162 ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn = 163 163 cpu_to_be32(qp->qpn); 164 164 165 - ret = mlx4_cmd(dev, mailbox->dma | dev->caps.function, 165 + ret = mlx4_cmd(dev, mailbox->dma, 166 166 qp->qpn | (!!sqd_event << 31), 167 167 new_state == MLX4_QP_STATE_RST ? 2 : 0, 168 168 op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native);
+2 -37
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
··· 1561 1561 return be32_to_cpu(mpt->mtt_sz); 1562 1562 } 1563 1563 1564 - static int mr_get_pdn(struct mlx4_mpt_entry *mpt) 1565 - { 1566 - return be32_to_cpu(mpt->pd_flags) & 0xffffff; 1567 - } 1568 - 1569 1564 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc) 1570 1565 { 1571 1566 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8; ··· 1595 1600 page_shift); 1596 1601 1597 1602 return total_pages; 1598 - } 1599 - 1600 - static int qp_get_pdn(struct mlx4_qp_context *qpc) 1601 - { 1602 - return be32_to_cpu(qpc->pd) & 0xffffff; 1603 - } 1604 - 1605 - static int pdn2slave(int pdn) 1606 - { 1607 - return (pdn >> NOT_MASKED_PD_BITS) - 1; 1608 1603 } 1609 1604 1610 1605 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start, ··· 1639 1654 goto ex_put; 1640 1655 1641 1656 mpt->mtt = mtt; 1642 - } 1643 - 1644 - if (pdn2slave(mr_get_pdn(inbox->buf)) != slave) { 1645 - err = -EPERM; 1646 - goto ex_put; 1647 1657 } 1648 1658 1649 1659 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); ··· 1771 1791 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt); 1772 1792 if (err) 1773 1793 goto ex_put_mtt; 1774 - 1775 - if (pdn2slave(qp_get_pdn(qpc)) != slave) { 1776 - err = -EPERM; 1777 - goto ex_put_mtt; 1778 - } 1779 1794 1780 1795 err = get_res(dev, slave, rcqn, RES_CQ, &rcq); 1781 1796 if (err) ··· 2023 2048 if (!priv->mfunc.master.slave_state) 2024 2049 return -EINVAL; 2025 2050 2026 - event_eq = &priv->mfunc.master.slave_state[slave].event_eq; 2051 + event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type]; 2027 2052 2028 2053 /* Create the event only if the slave is registered */ 2029 - if ((event_eq->event_type & (1 << eqe->type)) == 0) 2054 + if (event_eq->eqn < 0) 2030 2055 return 0; 2031 2056 2032 2057 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]); ··· 2264 2289 return err; 2265 2290 } 2266 2291 2267 - static int srq_get_pdn(struct mlx4_srq_context *srqc) 2268 - { 2269 - return be32_to_cpu(srqc->pd) & 0xffffff; 2270 - } 2271 - 2272 2292 static int srq_get_mtt_size(struct mlx4_srq_context *srqc) 2273 2293 { 2274 2294 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf; ··· 2302 2332 mtt); 2303 2333 if (err) 2304 2334 goto ex_put_mtt; 2305 - 2306 - if (pdn2slave(srq_get_pdn(srqc)) != slave) { 2307 - err = -EPERM; 2308 - goto ex_put_mtt; 2309 - } 2310 2335 2311 2336 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 2312 2337 if (err)
+1 -1
drivers/net/ethernet/mellanox/mlx4/srq.c
··· 67 67 static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 68 68 int srq_num) 69 69 { 70 - return mlx4_cmd(dev, mailbox->dma | dev->caps.function, srq_num, 0, 70 + return mlx4_cmd(dev, mailbox->dma, srq_num, 0, 71 71 MLX4_CMD_SW2HW_SRQ, MLX4_CMD_TIME_CLASS_A, 72 72 MLX4_CMD_WRAPPED); 73 73 }
+14 -3
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
··· 1745 1745 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring; 1746 1746 int err; 1747 1747 1748 + /* Ensure we have a valid MAC */ 1749 + if (!is_valid_ether_addr(adapter->hw.mac.addr)) { 1750 + pr_err("Error: Invalid MAC address\n"); 1751 + return -EINVAL; 1752 + } 1753 + 1748 1754 /* hardware has been reset, we need to reload some things */ 1749 1755 pch_gbe_set_multi(netdev); 1750 1756 ··· 2474 2468 2475 2469 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); 2476 2470 if (!is_valid_ether_addr(netdev->dev_addr)) { 2477 - dev_err(&pdev->dev, "Invalid MAC Address\n"); 2478 - ret = -EIO; 2479 - goto err_free_adapter; 2471 + /* 2472 + * If the MAC is invalid (or just missing), display a warning 2473 + * but do not abort setting up the device. pch_gbe_up will 2474 + * prevent the interface from being brought up until a valid MAC 2475 + * is set. 2476 + */ 2477 + dev_err(&pdev->dev, "Invalid MAC address, " 2478 + "interface disabled.\n"); 2480 2479 } 2481 2480 setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog, 2482 2481 (unsigned long)adapter);
+1 -1
drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
··· 154 154 else 155 155 irqlist = priv->mii_irq; 156 156 157 - new_bus->name = "STMMAC MII Bus"; 157 + new_bus->name = "stmmac"; 158 158 new_bus->read = &stmmac_mdio_read; 159 159 new_bus->write = &stmmac_mdio_write; 160 160 new_bus->reset = &stmmac_mdio_reset;
+3 -3
drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
··· 170 170 #define STMMAC_DEVICE_ID 0x1108 171 171 172 172 static DEFINE_PCI_DEVICE_TABLE(stmmac_id_table) = { 173 - { 174 - PCI_DEVICE(STMMAC_VENDOR_ID, STMMAC_DEVICE_ID)}, { 175 - } 173 + {PCI_DEVICE(STMMAC_VENDOR_ID, STMMAC_DEVICE_ID)}, 174 + {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_MAC)}, 175 + {} 176 176 }; 177 177 178 178 MODULE_DEVICE_TABLE(pci, stmmac_id_table);
+3 -2
drivers/net/hyperv/netvsc_drv.c
··· 68 68 69 69 nvdev = hv_get_drvdata(ndevctx->device_ctx); 70 70 if (nvdev == NULL) 71 - return; 71 + goto out; 72 72 73 73 rdev = nvdev->extension; 74 74 if (rdev == NULL) 75 - return; 75 + goto out; 76 76 77 77 if (net->flags & IFF_PROMISC) 78 78 rndis_filter_set_packet_filter(rdev, ··· 83 83 NDIS_PACKET_TYPE_ALL_MULTICAST | 84 84 NDIS_PACKET_TYPE_DIRECTED); 85 85 86 + out: 86 87 kfree(w); 87 88 } 88 89
+1
drivers/net/macvlan.c
··· 173 173 skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN); 174 174 if (!skb) 175 175 return RX_HANDLER_CONSUMED; 176 + eth = eth_hdr(skb); 176 177 src = macvlan_hash_lookup(port, eth->h_source); 177 178 if (!src) 178 179 /* frame comes from an external address */
+2 -3
drivers/net/phy/mdio_bus.c
··· 38 38 39 39 /** 40 40 * mdiobus_alloc_size - allocate a mii_bus structure 41 + * @size: extra amount of memory to allocate for private storage. 42 + * If non-zero, then bus->priv is points to that memory. 41 43 * 42 44 * Description: called by a bus driver to allocate an mii_bus 43 45 * structure to fill in. 44 - * 45 - * 'size' is an an extra amount of memory to allocate for private storage. 46 - * If non-zero, then bus->priv is points to that memory. 47 46 */ 48 47 struct mii_bus *mdiobus_alloc_size(size_t size) 49 48 {
+90 -46
drivers/net/team/team.c
··· 92 92 return NULL; 93 93 } 94 94 95 - int team_options_register(struct team *team, 96 - const struct team_option *option, 97 - size_t option_count) 95 + int __team_options_register(struct team *team, 96 + const struct team_option *option, 97 + size_t option_count) 98 98 { 99 99 int i; 100 100 struct team_option **dst_opts; ··· 116 116 } 117 117 } 118 118 119 - for (i = 0; i < option_count; i++) 119 + for (i = 0; i < option_count; i++) { 120 + dst_opts[i]->changed = true; 121 + dst_opts[i]->removed = false; 120 122 list_add_tail(&dst_opts[i]->list, &team->option_list); 123 + } 121 124 122 125 kfree(dst_opts); 123 126 return 0; ··· 133 130 return err; 134 131 } 135 132 136 - EXPORT_SYMBOL(team_options_register); 133 + static void __team_options_mark_removed(struct team *team, 134 + const struct team_option *option, 135 + size_t option_count) 136 + { 137 + int i; 137 138 138 - static void __team_options_change_check(struct team *team, 139 - struct team_option *changed_option); 139 + for (i = 0; i < option_count; i++, option++) { 140 + struct team_option *del_opt; 141 + 142 + del_opt = __team_find_option(team, option->name); 143 + if (del_opt) { 144 + del_opt->changed = true; 145 + del_opt->removed = true; 146 + } 147 + } 148 + } 140 149 141 150 static void __team_options_unregister(struct team *team, 142 151 const struct team_option *option, ··· 167 152 } 168 153 } 169 154 155 + static void __team_options_change_check(struct team *team); 156 + 157 + int team_options_register(struct team *team, 158 + const struct team_option *option, 159 + size_t option_count) 160 + { 161 + int err; 162 + 163 + err = __team_options_register(team, option, option_count); 164 + if (err) 165 + return err; 166 + __team_options_change_check(team); 167 + return 0; 168 + } 169 + EXPORT_SYMBOL(team_options_register); 170 + 170 171 void team_options_unregister(struct team *team, 171 172 const struct team_option *option, 172 173 size_t option_count) 173 174 { 175 + __team_options_mark_removed(team, option, option_count); 176 + __team_options_change_check(team); 174 177 __team_options_unregister(team, option, option_count); 175 - __team_options_change_check(team, NULL); 176 178 } 177 179 EXPORT_SYMBOL(team_options_unregister); 178 180 ··· 208 176 if (err) 209 177 return err; 210 178 211 - __team_options_change_check(team, option); 179 + option->changed = true; 180 + __team_options_change_check(team); 212 181 return err; 213 182 } 214 183 ··· 686 653 return -ENOENT; 687 654 } 688 655 656 + port->removed = true; 689 657 __team_port_change_check(port, false); 690 658 team_port_list_del_port(team, port); 691 659 team_adjust_ops(team); ··· 1234 1200 return err; 1235 1201 } 1236 1202 1237 - static int team_nl_fill_options_get_changed(struct sk_buff *skb, 1238 - u32 pid, u32 seq, int flags, 1239 - struct team *team, 1240 - struct team_option *changed_option) 1203 + static int team_nl_fill_options_get(struct sk_buff *skb, 1204 + u32 pid, u32 seq, int flags, 1205 + struct team *team, bool fillall) 1241 1206 { 1242 1207 struct nlattr *option_list; 1243 1208 void *hdr; ··· 1256 1223 struct nlattr *option_item; 1257 1224 long arg; 1258 1225 1226 + /* Include only changed options if fill all mode is not on */ 1227 + if (!fillall && !option->changed) 1228 + continue; 1259 1229 option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION); 1260 1230 if (!option_item) 1261 1231 goto nla_put_failure; 1262 1232 NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_NAME, option->name); 1263 - if (option == changed_option) 1233 + if (option->changed) { 1264 1234 NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_CHANGED); 1235 + option->changed = false; 1236 + } 1237 + if (option->removed) 1238 + NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_REMOVED); 1265 1239 switch (option->type) { 1266 1240 case TEAM_OPTION_TYPE_U32: 1267 1241 NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32); ··· 1295 1255 return -EMSGSIZE; 1296 1256 } 1297 1257 1298 - static int team_nl_fill_options_get(struct sk_buff *skb, 1299 - struct genl_info *info, int flags, 1300 - struct team *team) 1258 + static int team_nl_fill_options_get_all(struct sk_buff *skb, 1259 + struct genl_info *info, int flags, 1260 + struct team *team) 1301 1261 { 1302 - return team_nl_fill_options_get_changed(skb, info->snd_pid, 1303 - info->snd_seq, NLM_F_ACK, 1304 - team, NULL); 1262 + return team_nl_fill_options_get(skb, info->snd_pid, 1263 + info->snd_seq, NLM_F_ACK, 1264 + team, true); 1305 1265 } 1306 1266 1307 1267 static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info) ··· 1313 1273 if (!team) 1314 1274 return -EINVAL; 1315 1275 1316 - err = team_nl_send_generic(info, team, team_nl_fill_options_get); 1276 + err = team_nl_send_generic(info, team, team_nl_fill_options_get_all); 1317 1277 1318 1278 team_nl_team_put(team); 1319 1279 ··· 1405 1365 return err; 1406 1366 } 1407 1367 1408 - static int team_nl_fill_port_list_get_changed(struct sk_buff *skb, 1409 - u32 pid, u32 seq, int flags, 1410 - struct team *team, 1411 - struct team_port *changed_port) 1368 + static int team_nl_fill_port_list_get(struct sk_buff *skb, 1369 + u32 pid, u32 seq, int flags, 1370 + struct team *team, 1371 + bool fillall) 1412 1372 { 1413 1373 struct nlattr *port_list; 1414 1374 void *hdr; ··· 1427 1387 list_for_each_entry(port, &team->port_list, list) { 1428 1388 struct nlattr *port_item; 1429 1389 1390 + /* Include only changed ports if fill all mode is not on */ 1391 + if (!fillall && !port->changed) 1392 + continue; 1430 1393 port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT); 1431 1394 if (!port_item) 1432 1395 goto nla_put_failure; 1433 1396 NLA_PUT_U32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex); 1434 - if (port == changed_port) 1397 + if (port->changed) { 1435 1398 NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_CHANGED); 1399 + port->changed = false; 1400 + } 1401 + if (port->removed) 1402 + NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_REMOVED); 1436 1403 if (port->linkup) 1437 1404 NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_LINKUP); 1438 1405 NLA_PUT_U32(skb, TEAM_ATTR_PORT_SPEED, port->speed); ··· 1455 1408 return -EMSGSIZE; 1456 1409 } 1457 1410 1458 - static int team_nl_fill_port_list_get(struct sk_buff *skb, 1459 - struct genl_info *info, int flags, 1460 - struct team *team) 1411 + static int team_nl_fill_port_list_get_all(struct sk_buff *skb, 1412 + struct genl_info *info, int flags, 1413 + struct team *team) 1461 1414 { 1462 - return team_nl_fill_port_list_get_changed(skb, info->snd_pid, 1463 - info->snd_seq, NLM_F_ACK, 1464 - team, NULL); 1415 + return team_nl_fill_port_list_get(skb, info->snd_pid, 1416 + info->snd_seq, NLM_F_ACK, 1417 + team, true); 1465 1418 } 1466 1419 1467 1420 static int team_nl_cmd_port_list_get(struct sk_buff *skb, ··· 1474 1427 if (!team) 1475 1428 return -EINVAL; 1476 1429 1477 - err = team_nl_send_generic(info, team, team_nl_fill_port_list_get); 1430 + err = team_nl_send_generic(info, team, team_nl_fill_port_list_get_all); 1478 1431 1479 1432 team_nl_team_put(team); 1480 1433 ··· 1511 1464 .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, 1512 1465 }; 1513 1466 1514 - static int team_nl_send_event_options_get(struct team *team, 1515 - struct team_option *changed_option) 1467 + static int team_nl_send_event_options_get(struct team *team) 1516 1468 { 1517 1469 struct sk_buff *skb; 1518 1470 int err; ··· 1521 1475 if (!skb) 1522 1476 return -ENOMEM; 1523 1477 1524 - err = team_nl_fill_options_get_changed(skb, 0, 0, 0, team, 1525 - changed_option); 1478 + err = team_nl_fill_options_get(skb, 0, 0, 0, team, false); 1526 1479 if (err < 0) 1527 1480 goto err_fill; 1528 1481 ··· 1534 1489 return err; 1535 1490 } 1536 1491 1537 - static int team_nl_send_event_port_list_get(struct team_port *port) 1492 + static int team_nl_send_event_port_list_get(struct team *team) 1538 1493 { 1539 1494 struct sk_buff *skb; 1540 1495 int err; 1541 - struct net *net = dev_net(port->team->dev); 1496 + struct net *net = dev_net(team->dev); 1542 1497 1543 1498 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1544 1499 if (!skb) 1545 1500 return -ENOMEM; 1546 1501 1547 - err = team_nl_fill_port_list_get_changed(skb, 0, 0, 0, 1548 - port->team, port); 1502 + err = team_nl_fill_port_list_get(skb, 0, 0, 0, team, false); 1549 1503 if (err < 0) 1550 1504 goto err_fill; 1551 1505 ··· 1588 1544 * Change checkers 1589 1545 ******************/ 1590 1546 1591 - static void __team_options_change_check(struct team *team, 1592 - struct team_option *changed_option) 1547 + static void __team_options_change_check(struct team *team) 1593 1548 { 1594 1549 int err; 1595 1550 1596 - err = team_nl_send_event_options_get(team, changed_option); 1551 + err = team_nl_send_event_options_get(team); 1597 1552 if (err) 1598 1553 netdev_warn(team->dev, "Failed to send options change via netlink\n"); 1599 1554 } ··· 1602 1559 { 1603 1560 int err; 1604 1561 1605 - if (port->linkup == linkup) 1562 + if (!port->removed && port->linkup == linkup) 1606 1563 return; 1607 1564 1565 + port->changed = true; 1608 1566 port->linkup = linkup; 1609 1567 if (linkup) { 1610 1568 struct ethtool_cmd ecmd; ··· 1621 1577 port->duplex = 0; 1622 1578 1623 1579 send_event: 1624 - err = team_nl_send_event_port_list_get(port); 1580 + err = team_nl_send_event_port_list_get(port->team); 1625 1581 if (err) 1626 1582 netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n", 1627 1583 port->dev->name);
+6
drivers/net/wireless/b43/Kconfig
··· 31 31 depends on B43 && BCMA 32 32 default y 33 33 34 + config B43_BCMA_EXTRA 35 + bool "Hardware support that overlaps with the brcmsmac driver" 36 + depends on B43_BCMA 37 + default n if BRCMSMAC || BRCMSMAC_MODULE 38 + default y 39 + 34 40 config B43_SSB 35 41 bool 36 42 depends on B43 && SSB
+2
drivers/net/wireless/b43/main.c
··· 116 116 #ifdef CONFIG_B43_BCMA 117 117 static const struct bcma_device_id b43_bcma_tbl[] = { 118 118 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x11, BCMA_ANY_CLASS), 119 + #ifdef CONFIG_B43_BCMA_EXTRA 119 120 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x17, BCMA_ANY_CLASS), 120 121 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x18, BCMA_ANY_CLASS), 122 + #endif 121 123 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x1D, BCMA_ANY_CLASS), 122 124 BCMA_CORETABLE_END 123 125 };
+9 -1
drivers/net/wireless/brcm80211/brcmsmac/main.c
··· 7981 7981 7982 7982 void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop) 7983 7983 { 7984 + int timeout = 20; 7985 + 7984 7986 /* flush packet queue when requested */ 7985 7987 if (drop) 7986 7988 brcmu_pktq_flush(&wlc->pkt_queue->q, false, NULL, NULL); 7987 7989 7988 7990 /* wait for queue and DMA fifos to run dry */ 7989 - while (!pktq_empty(&wlc->pkt_queue->q) || brcms_txpktpendtot(wlc) > 0) 7991 + while (!pktq_empty(&wlc->pkt_queue->q) || brcms_txpktpendtot(wlc) > 0) { 7990 7992 brcms_msleep(wlc->wl, 1); 7993 + 7994 + if (--timeout == 0) 7995 + break; 7996 + } 7997 + 7998 + WARN_ON_ONCE(timeout == 0); 7991 7999 } 7992 8000 7993 8001 void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval)
+10
include/linux/if_team.h
··· 46 46 u32 speed; 47 47 u8 duplex; 48 48 49 + /* Custom gennetlink interface related flags */ 50 + bool changed; 51 + bool removed; 52 + 49 53 struct rcu_head rcu; 50 54 }; 51 55 ··· 76 72 enum team_option_type type; 77 73 int (*getter)(struct team *team, void *arg); 78 74 int (*setter)(struct team *team, void *arg); 75 + 76 + /* Custom gennetlink interface related flags */ 77 + bool changed; 78 + bool removed; 79 79 }; 80 80 81 81 struct team_mode { ··· 215 207 TEAM_ATTR_OPTION_CHANGED, /* flag */ 216 208 TEAM_ATTR_OPTION_TYPE, /* u8 */ 217 209 TEAM_ATTR_OPTION_DATA, /* dynamic */ 210 + TEAM_ATTR_OPTION_REMOVED, /* flag */ 218 211 219 212 __TEAM_ATTR_OPTION_MAX, 220 213 TEAM_ATTR_OPTION_MAX = __TEAM_ATTR_OPTION_MAX - 1, ··· 236 227 TEAM_ATTR_PORT_LINKUP, /* flag */ 237 228 TEAM_ATTR_PORT_SPEED, /* u32 */ 238 229 TEAM_ATTR_PORT_DUPLEX, /* u8 */ 230 + TEAM_ATTR_PORT_REMOVED, /* flag */ 239 231 240 232 __TEAM_ATTR_PORT_MAX, 241 233 TEAM_ATTR_PORT_MAX = __TEAM_ATTR_PORT_MAX - 1,
+1
include/linux/mlx4/device.h
··· 621 621 int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac); 622 622 int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn); 623 623 void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn); 624 + void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap); 624 625 625 626 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); 626 627 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
+10 -1
include/linux/res_counter.h
··· 109 109 * 110 110 * returns 0 on success and <0 if the counter->usage will exceed the 111 111 * counter->limit _locked call expects the counter->lock to be taken 112 + * 113 + * charge_nofail works the same, except that it charges the resource 114 + * counter unconditionally, and returns < 0 if the after the current 115 + * charge we are over limit. 112 116 */ 113 117 114 118 int __must_check res_counter_charge_locked(struct res_counter *counter, 115 119 unsigned long val); 116 120 int __must_check res_counter_charge(struct res_counter *counter, 121 + unsigned long val, struct res_counter **limit_fail_at); 122 + int __must_check res_counter_charge_nofail(struct res_counter *counter, 117 123 unsigned long val, struct res_counter **limit_fail_at); 118 124 119 125 /* ··· 148 142 unsigned long flags; 149 143 150 144 spin_lock_irqsave(&cnt->lock, flags); 151 - margin = cnt->limit - cnt->usage; 145 + if (cnt->limit > cnt->usage) 146 + margin = cnt->limit - cnt->usage; 147 + else 148 + margin = 0; 152 149 spin_unlock_irqrestore(&cnt->lock, flags); 153 150 return margin; 154 151 }
-1
include/linux/snmp.h
··· 192 192 LINUX_MIB_TCPPARTIALUNDO, /* TCPPartialUndo */ 193 193 LINUX_MIB_TCPDSACKUNDO, /* TCPDSACKUndo */ 194 194 LINUX_MIB_TCPLOSSUNDO, /* TCPLossUndo */ 195 - LINUX_MIB_TCPLOSS, /* TCPLoss */ 196 195 LINUX_MIB_TCPLOSTRETRANSMIT, /* TCPLostRetransmit */ 197 196 LINUX_MIB_TCPRENOFAILURES, /* TCPRenoFailures */ 198 197 LINUX_MIB_TCPSACKFAILURES, /* TCPSackFailures */
+1 -1
include/net/bluetooth/hci.h
··· 1388 1388 }; 1389 1389 #define IREQ_CACHE_FLUSH 0x0001 1390 1390 1391 - extern int enable_hs; 1391 + extern bool enable_hs; 1392 1392 1393 1393 #endif /* __HCI_H */
-1
include/net/netprio_cgroup.h
··· 13 13 14 14 #ifndef _NETPRIO_CGROUP_H 15 15 #define _NETPRIO_CGROUP_H 16 - #include <linux/module.h> 17 16 #include <linux/cgroup.h> 18 17 #include <linux/hardirq.h> 19 18 #include <linux/rcupdate.h>
+6 -7
include/net/sock.h
··· 226 226 * @sk_ack_backlog: current listen backlog 227 227 * @sk_max_ack_backlog: listen backlog set in listen() 228 228 * @sk_priority: %SO_PRIORITY setting 229 + * @sk_cgrp_prioidx: socket group's priority map index 229 230 * @sk_type: socket type (%SOCK_STREAM, etc) 230 231 * @sk_protocol: which protocol this socket belongs in this network family 231 232 * @sk_peer_pid: &struct pid for this socket's peer ··· 922 921 #define sk_refcnt_debug_release(sk) do { } while (0) 923 922 #endif /* SOCK_REFCNT_DEBUG */ 924 923 925 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM 924 + #if defined(CONFIG_CGROUP_MEM_RES_CTLR_KMEM) && defined(CONFIG_NET) 926 925 extern struct jump_label_key memcg_socket_limit_enabled; 927 926 static inline struct cg_proto *parent_cg_proto(struct proto *proto, 928 927 struct cg_proto *cg_proto) ··· 1008 1007 struct res_counter *fail; 1009 1008 int ret; 1010 1009 1011 - ret = res_counter_charge(prot->memory_allocated, 1012 - amt << PAGE_SHIFT, &fail); 1013 - 1010 + ret = res_counter_charge_nofail(prot->memory_allocated, 1011 + amt << PAGE_SHIFT, &fail); 1014 1012 if (ret < 0) 1015 1013 *parent_status = OVER_LIMIT; 1016 1014 } ··· 1053 1053 } 1054 1054 1055 1055 static inline void 1056 - sk_memory_allocated_sub(struct sock *sk, int amt, int parent_status) 1056 + sk_memory_allocated_sub(struct sock *sk, int amt) 1057 1057 { 1058 1058 struct proto *prot = sk->sk_prot; 1059 1059 1060 - if (mem_cgroup_sockets_enabled && sk->sk_cgrp && 1061 - parent_status != OVER_LIMIT) /* Otherwise was uncharged already */ 1060 + if (mem_cgroup_sockets_enabled && sk->sk_cgrp) 1062 1061 memcg_memory_allocated_sub(sk->sk_cgrp, amt); 1063 1062 1064 1063 atomic_long_sub(amt, prot->memory_allocated);
+25
kernel/res_counter.c
··· 66 66 return ret; 67 67 } 68 68 69 + int res_counter_charge_nofail(struct res_counter *counter, unsigned long val, 70 + struct res_counter **limit_fail_at) 71 + { 72 + int ret, r; 73 + unsigned long flags; 74 + struct res_counter *c; 75 + 76 + r = ret = 0; 77 + *limit_fail_at = NULL; 78 + local_irq_save(flags); 79 + for (c = counter; c != NULL; c = c->parent) { 80 + spin_lock(&c->lock); 81 + r = res_counter_charge_locked(c, val); 82 + if (r) 83 + c->usage += val; 84 + spin_unlock(&c->lock); 85 + if (r < 0 && ret == 0) { 86 + *limit_fail_at = c; 87 + ret = r; 88 + } 89 + } 90 + local_irq_restore(flags); 91 + 92 + return ret; 93 + } 69 94 void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val) 70 95 { 71 96 if (WARN_ON(counter->usage < val))
+2 -2
mm/memcontrol.c
··· 379 379 static bool mem_cgroup_is_root(struct mem_cgroup *memcg); 380 380 void sock_update_memcg(struct sock *sk) 381 381 { 382 - if (static_branch(&memcg_socket_limit_enabled)) { 382 + if (mem_cgroup_sockets_enabled) { 383 383 struct mem_cgroup *memcg; 384 384 385 385 BUG_ON(!sk->sk_prot->proto_cgroup); ··· 411 411 412 412 void sock_release_memcg(struct sock *sk) 413 413 { 414 - if (static_branch(&memcg_socket_limit_enabled) && sk->sk_cgrp) { 414 + if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { 415 415 struct mem_cgroup *memcg; 416 416 WARN_ON(!sk->sk_cgrp->memcg); 417 417 memcg = sk->sk_cgrp->memcg;
+1 -1
net/bluetooth/hci_core.c
··· 55 55 56 56 #define AUTO_OFF_TIMEOUT 2000 57 57 58 - int enable_hs; 58 + bool enable_hs; 59 59 60 60 static void hci_rx_work(struct work_struct *work); 61 61 static void hci_cmd_work(struct work_struct *work);
+1
net/core/ethtool.c
··· 1311 1311 case ETHTOOL_GRXCSUM: 1312 1312 case ETHTOOL_GTXCSUM: 1313 1313 case ETHTOOL_GSG: 1314 + case ETHTOOL_GSSET_INFO: 1314 1315 case ETHTOOL_GSTRINGS: 1315 1316 case ETHTOOL_GTSO: 1316 1317 case ETHTOOL_GPERMADDR:
+1
net/core/flow_dissector.c
··· 1 1 #include <linux/skbuff.h> 2 + #include <linux/export.h> 2 3 #include <linux/ip.h> 3 4 #include <linux/ipv6.h> 4 5 #include <linux/if_vlan.h>
+2 -2
net/core/pktgen.c
··· 767 767 return i; 768 768 } 769 769 770 - static unsigned long num_arg(const char __user * user_buffer, 771 - unsigned long maxlen, unsigned long *num) 770 + static long num_arg(const char __user *user_buffer, unsigned long maxlen, 771 + unsigned long *num) 772 772 { 773 773 int i; 774 774 *num = 0;
+2 -2
net/core/sock.c
··· 1827 1827 /* Alas. Undo changes. */ 1828 1828 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; 1829 1829 1830 - sk_memory_allocated_sub(sk, amt, parent_status); 1830 + sk_memory_allocated_sub(sk, amt); 1831 1831 1832 1832 return 0; 1833 1833 } ··· 1840 1840 void __sk_mem_reclaim(struct sock *sk) 1841 1841 { 1842 1842 sk_memory_allocated_sub(sk, 1843 - sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, 0); 1843 + sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT); 1844 1844 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; 1845 1845 1846 1846 if (sk_under_memory_pressure(sk) &&
-1
net/ipv4/proc.c
··· 216 216 SNMP_MIB_ITEM("TCPPartialUndo", LINUX_MIB_TCPPARTIALUNDO), 217 217 SNMP_MIB_ITEM("TCPDSACKUndo", LINUX_MIB_TCPDSACKUNDO), 218 218 SNMP_MIB_ITEM("TCPLossUndo", LINUX_MIB_TCPLOSSUNDO), 219 - SNMP_MIB_ITEM("TCPLoss", LINUX_MIB_TCPLOSS), 220 219 SNMP_MIB_ITEM("TCPLostRetransmit", LINUX_MIB_TCPLOSTRETRANSMIT), 221 220 SNMP_MIB_ITEM("TCPRenoFailures", LINUX_MIB_TCPRENOFAILURES), 222 221 SNMP_MIB_ITEM("TCPSackFailures", LINUX_MIB_TCPSACKFAILURES),
+7 -4
net/ipv4/tcp_bic.c
··· 63 63 { 64 64 ca->cnt = 0; 65 65 ca->last_max_cwnd = 0; 66 - ca->loss_cwnd = 0; 67 66 ca->last_cwnd = 0; 68 67 ca->last_time = 0; 69 68 ca->epoch_start = 0; ··· 71 72 72 73 static void bictcp_init(struct sock *sk) 73 74 { 74 - bictcp_reset(inet_csk_ca(sk)); 75 + struct bictcp *ca = inet_csk_ca(sk); 76 + 77 + bictcp_reset(ca); 78 + ca->loss_cwnd = 0; 79 + 75 80 if (initial_ssthresh) 76 81 tcp_sk(sk)->snd_ssthresh = initial_ssthresh; 77 82 } ··· 130 127 } 131 128 132 129 /* if in slow start or link utilization is very low */ 133 - if (ca->loss_cwnd == 0) { 130 + if (ca->last_max_cwnd == 0) { 134 131 if (ca->cnt > 20) /* increase cwnd 5% per RTT */ 135 132 ca->cnt = 20; 136 133 } ··· 188 185 { 189 186 const struct tcp_sock *tp = tcp_sk(sk); 190 187 const struct bictcp *ca = inet_csk_ca(sk); 191 - return max(tp->snd_cwnd, ca->last_max_cwnd); 188 + return max(tp->snd_cwnd, ca->loss_cwnd); 192 189 } 193 190 194 191 static void bictcp_state(struct sock *sk, u8 new_state)
+6 -4
net/ipv4/tcp_cubic.c
··· 107 107 { 108 108 ca->cnt = 0; 109 109 ca->last_max_cwnd = 0; 110 - ca->loss_cwnd = 0; 111 110 ca->last_cwnd = 0; 112 111 ca->last_time = 0; 113 112 ca->bic_origin_point = 0; ··· 141 142 142 143 static void bictcp_init(struct sock *sk) 143 144 { 144 - bictcp_reset(inet_csk_ca(sk)); 145 + struct bictcp *ca = inet_csk_ca(sk); 146 + 147 + bictcp_reset(ca); 148 + ca->loss_cwnd = 0; 145 149 146 150 if (hystart) 147 151 bictcp_hystart_reset(sk); ··· 277 275 * The initial growth of cubic function may be too conservative 278 276 * when the available bandwidth is still unknown. 279 277 */ 280 - if (ca->loss_cwnd == 0 && ca->cnt > 20) 278 + if (ca->last_max_cwnd == 0 && ca->cnt > 20) 281 279 ca->cnt = 20; /* increase cwnd 5% per RTT */ 282 280 283 281 /* TCP Friendly */ ··· 344 342 { 345 343 struct bictcp *ca = inet_csk_ca(sk); 346 344 347 - return max(tcp_sk(sk)->snd_cwnd, ca->last_max_cwnd); 345 + return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd); 348 346 } 349 347 350 348 static void bictcp_state(struct sock *sk, u8 new_state)
+15 -26
net/ipv4/tcp_input.c
··· 105 105 #define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */ 106 106 #define FLAG_DATA_SACKED 0x20 /* New SACK. */ 107 107 #define FLAG_ECE 0x40 /* ECE in this ACK */ 108 - #define FLAG_DATA_LOST 0x80 /* SACK detected data lossage. */ 109 108 #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ 110 109 #define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */ 111 110 #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ ··· 1039 1040 * These 6 states form finite state machine, controlled by the following events: 1040 1041 * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue()) 1041 1042 * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue()) 1042 - * 3. Loss detection event of one of three flavors: 1043 + * 3. Loss detection event of two flavors: 1043 1044 * A. Scoreboard estimator decided the packet is lost. 1044 1045 * A'. Reno "three dupacks" marks head of queue lost. 1045 - * A''. Its FACK modfication, head until snd.fack is lost. 1046 - * B. SACK arrives sacking data transmitted after never retransmitted 1047 - * hole was sent out. 1048 - * C. SACK arrives sacking SND.NXT at the moment, when the 1046 + * A''. Its FACK modification, head until snd.fack is lost. 1047 + * B. SACK arrives sacking SND.NXT at the moment, when the 1049 1048 * segment was retransmitted. 1050 1049 * 4. D-SACK added new rule: D-SACK changes any tag to S. 1051 1050 * ··· 1150 1153 } 1151 1154 1152 1155 /* Check for lost retransmit. This superb idea is borrowed from "ratehalving". 1153 - * Event "C". Later note: FACK people cheated me again 8), we have to account 1156 + * Event "B". Later note: FACK people cheated me again 8), we have to account 1154 1157 * for reordering! Ugly, but should help. 1155 1158 * 1156 1159 * Search retransmitted skbs from write_queue that were sent when snd_nxt was ··· 1841 1844 if (found_dup_sack && ((i + 1) == first_sack_index)) 1842 1845 next_dup = &sp[i + 1]; 1843 1846 1844 - /* Event "B" in the comment above. */ 1845 - if (after(end_seq, tp->high_seq)) 1846 - state.flag |= FLAG_DATA_LOST; 1847 - 1848 1847 /* Skip too early cached blocks */ 1849 1848 while (tcp_sack_cache_ok(tp, cache) && 1850 1849 !before(start_seq, cache->end_seq)) ··· 2508 2515 tcp_verify_left_out(tp); 2509 2516 } 2510 2517 2511 - /* Mark head of queue up as lost. With RFC3517 SACK, the packets is 2512 - * is against sacked "cnt", otherwise it's against facked "cnt" 2518 + /* Detect loss in event "A" above by marking head of queue up as lost. 2519 + * For FACK or non-SACK(Reno) senders, the first "packets" number of segments 2520 + * are considered lost. For RFC3517 SACK, a segment is considered lost if it 2521 + * has at least tp->reordering SACKed seqments above it; "packets" refers to 2522 + * the maximum SACKed segments to pass before reaching this limit. 2513 2523 */ 2514 2524 static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) 2515 2525 { ··· 2521 2525 int cnt, oldcnt; 2522 2526 int err; 2523 2527 unsigned int mss; 2528 + /* Use SACK to deduce losses of new sequences sent during recovery */ 2529 + const u32 loss_high = tcp_is_sack(tp) ? tp->snd_nxt : tp->high_seq; 2524 2530 2525 2531 WARN_ON(packets > tp->packets_out); 2526 2532 if (tp->lost_skb_hint) { ··· 2544 2546 tp->lost_skb_hint = skb; 2545 2547 tp->lost_cnt_hint = cnt; 2546 2548 2547 - if (after(TCP_SKB_CB(skb)->end_seq, tp->high_seq)) 2549 + if (after(TCP_SKB_CB(skb)->end_seq, loss_high)) 2548 2550 break; 2549 2551 2550 2552 oldcnt = cnt; ··· 3031 3033 if (tcp_check_sack_reneging(sk, flag)) 3032 3034 return; 3033 3035 3034 - /* C. Process data loss notification, provided it is valid. */ 3035 - if (tcp_is_fack(tp) && (flag & FLAG_DATA_LOST) && 3036 - before(tp->snd_una, tp->high_seq) && 3037 - icsk->icsk_ca_state != TCP_CA_Open && 3038 - tp->fackets_out > tp->reordering) { 3039 - tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering, 0); 3040 - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSS); 3041 - } 3042 - 3043 - /* D. Check consistency of the current state. */ 3036 + /* C. Check consistency of the current state. */ 3044 3037 tcp_verify_left_out(tp); 3045 3038 3046 - /* E. Check state exit conditions. State can be terminated 3039 + /* D. Check state exit conditions. State can be terminated 3047 3040 * when high_seq is ACKed. */ 3048 3041 if (icsk->icsk_ca_state == TCP_CA_Open) { 3049 3042 WARN_ON(tp->retrans_out != 0); ··· 3066 3077 } 3067 3078 } 3068 3079 3069 - /* F. Process state. */ 3080 + /* E. Process state. */ 3070 3081 switch (icsk->icsk_ca_state) { 3071 3082 case TCP_CA_Recovery: 3072 3083 if (!(flag & FLAG_SND_UNA_ADVANCED)) {
+1 -1
net/ipv4/tcp_ipv4.c
··· 631 631 arg.iov[0].iov_len = sizeof(rep.th); 632 632 633 633 #ifdef CONFIG_TCP_MD5SIG 634 - key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL; 634 + key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->saddr) : NULL; 635 635 if (key) { 636 636 rep.opt[0] = htonl((TCPOPT_NOP << 24) | 637 637 (TCPOPT_NOP << 16) |
+40 -21
net/ipv6/addrconf.c
··· 502 502 rcu_read_unlock(); 503 503 } 504 504 505 - static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old) 505 + static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf) 506 506 { 507 507 struct net *net; 508 + int old; 509 + 510 + if (!rtnl_trylock()) 511 + return restart_syscall(); 508 512 509 513 net = (struct net *)table->extra2; 510 - if (p == &net->ipv6.devconf_dflt->forwarding) 511 - return 0; 514 + old = *p; 515 + *p = newf; 512 516 513 - if (!rtnl_trylock()) { 514 - /* Restore the original values before restarting */ 515 - *p = old; 516 - return restart_syscall(); 517 + if (p == &net->ipv6.devconf_dflt->forwarding) { 518 + rtnl_unlock(); 519 + return 0; 517 520 } 518 521 519 522 if (p == &net->ipv6.devconf_all->forwarding) { 520 - __s32 newf = net->ipv6.devconf_all->forwarding; 521 523 net->ipv6.devconf_dflt->forwarding = newf; 522 524 addrconf_forward_change(net, newf); 523 - } else if ((!*p) ^ (!old)) 525 + } else if ((!newf) ^ (!old)) 524 526 dev_forward_change((struct inet6_dev *)table->extra1); 525 527 rtnl_unlock(); 526 528 527 - if (*p) 529 + if (newf) 528 530 rt6_purge_dflt_routers(net); 529 531 return 1; 530 532 } ··· 4262 4260 int *valp = ctl->data; 4263 4261 int val = *valp; 4264 4262 loff_t pos = *ppos; 4263 + ctl_table lctl; 4265 4264 int ret; 4266 4265 4267 - ret = proc_dointvec(ctl, write, buffer, lenp, ppos); 4266 + /* 4267 + * ctl->data points to idev->cnf.forwarding, we should 4268 + * not modify it until we get the rtnl lock. 4269 + */ 4270 + lctl = *ctl; 4271 + lctl.data = &val; 4272 + 4273 + ret = proc_dointvec(&lctl, write, buffer, lenp, ppos); 4268 4274 4269 4275 if (write) 4270 4276 ret = addrconf_fixup_forwarding(ctl, valp, val); ··· 4310 4300 rcu_read_unlock(); 4311 4301 } 4312 4302 4313 - static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int old) 4303 + static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf) 4314 4304 { 4315 4305 struct net *net; 4306 + int old; 4307 + 4308 + if (!rtnl_trylock()) 4309 + return restart_syscall(); 4316 4310 4317 4311 net = (struct net *)table->extra2; 4312 + old = *p; 4313 + *p = newf; 4318 4314 4319 - if (p == &net->ipv6.devconf_dflt->disable_ipv6) 4315 + if (p == &net->ipv6.devconf_dflt->disable_ipv6) { 4316 + rtnl_unlock(); 4320 4317 return 0; 4321 - 4322 - if (!rtnl_trylock()) { 4323 - /* Restore the original values before restarting */ 4324 - *p = old; 4325 - return restart_syscall(); 4326 4318 } 4327 4319 4328 4320 if (p == &net->ipv6.devconf_all->disable_ipv6) { 4329 - __s32 newf = net->ipv6.devconf_all->disable_ipv6; 4330 4321 net->ipv6.devconf_dflt->disable_ipv6 = newf; 4331 4322 addrconf_disable_change(net, newf); 4332 - } else if ((!*p) ^ (!old)) 4323 + } else if ((!newf) ^ (!old)) 4333 4324 dev_disable_change((struct inet6_dev *)table->extra1); 4334 4325 4335 4326 rtnl_unlock(); ··· 4344 4333 int *valp = ctl->data; 4345 4334 int val = *valp; 4346 4335 loff_t pos = *ppos; 4336 + ctl_table lctl; 4347 4337 int ret; 4348 4338 4349 - ret = proc_dointvec(ctl, write, buffer, lenp, ppos); 4339 + /* 4340 + * ctl->data points to idev->cnf.disable_ipv6, we should 4341 + * not modify it until we get the rtnl lock. 4342 + */ 4343 + lctl = *ctl; 4344 + lctl.data = &val; 4345 + 4346 + ret = proc_dointvec(&lctl, write, buffer, lenp, ppos); 4350 4347 4351 4348 if (write) 4352 4349 ret = addrconf_disable_ipv6(ctl, valp, val);
+1 -1
net/ipv6/tcp_ipv6.c
··· 1083 1083 1084 1084 #ifdef CONFIG_TCP_MD5SIG 1085 1085 if (sk) 1086 - key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr); 1086 + key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr); 1087 1087 #endif 1088 1088 1089 1089 if (th->ack)
+5
net/llc/af_llc.c
··· 713 713 struct sk_buff *skb = NULL; 714 714 struct sock *sk = sock->sk; 715 715 struct llc_sock *llc = llc_sk(sk); 716 + unsigned long cpu_flags; 716 717 size_t copied = 0; 717 718 u32 peek_seq = 0; 718 719 u32 *seq; ··· 839 838 goto copy_uaddr; 840 839 841 840 if (!(flags & MSG_PEEK)) { 841 + spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); 842 842 sk_eat_skb(sk, skb, 0); 843 + spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); 843 844 *seq = 0; 844 845 } 845 846 ··· 862 859 llc_cmsg_rcv(msg, skb); 863 860 864 861 if (!(flags & MSG_PEEK)) { 862 + spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); 865 863 sk_eat_skb(sk, skb, 0); 864 + spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); 866 865 *seq = 0; 867 866 } 868 867
+4 -3
net/mac80211/debugfs_key.c
··· 225 225 key, &key_##name##_ops); 226 226 227 227 void ieee80211_debugfs_key_add(struct ieee80211_key *key) 228 - { 228 + { 229 229 static int keycount; 230 - char buf[50]; 230 + char buf[100]; 231 231 struct sta_info *sta; 232 232 233 233 if (!key->local->debugfs.keys) ··· 244 244 245 245 sta = key->sta; 246 246 if (sta) { 247 - sprintf(buf, "../../stations/%pM", sta->sta.addr); 247 + sprintf(buf, "../../netdev:%s/stations/%pM", 248 + sta->sdata->name, sta->sta.addr); 248 249 key->debugfs.stalink = 249 250 debugfs_create_symlink("station", key->debugfs.dir, buf); 250 251 }
+4 -4
net/mac80211/mesh_hwmp.c
··· 119 119 int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) + 120 120 sizeof(mgmt->u.action.u.mesh_action); 121 121 122 - skb = dev_alloc_skb(local->hw.extra_tx_headroom + 122 + skb = dev_alloc_skb(local->tx_headroom + 123 123 hdr_len + 124 124 2 + 37); /* max HWMP IE */ 125 125 if (!skb) 126 126 return -1; 127 - skb_reserve(skb, local->hw.extra_tx_headroom); 127 + skb_reserve(skb, local->tx_headroom); 128 128 mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len); 129 129 memset(mgmt, 0, hdr_len); 130 130 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | ··· 250 250 if (time_before(jiffies, ifmsh->next_perr)) 251 251 return -EAGAIN; 252 252 253 - skb = dev_alloc_skb(local->hw.extra_tx_headroom + 253 + skb = dev_alloc_skb(local->tx_headroom + 254 254 hdr_len + 255 255 2 + 15 /* PERR IE */); 256 256 if (!skb) 257 257 return -1; 258 - skb_reserve(skb, local->tx_headroom + local->hw.extra_tx_headroom); 258 + skb_reserve(skb, local->tx_headroom); 259 259 mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len); 260 260 memset(mgmt, 0, hdr_len); 261 261 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+2 -2
net/mac80211/mesh_plink.c
··· 172 172 int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.self_prot) + 173 173 sizeof(mgmt->u.action.u.self_prot); 174 174 175 - skb = dev_alloc_skb(local->hw.extra_tx_headroom + 175 + skb = dev_alloc_skb(local->tx_headroom + 176 176 hdr_len + 177 177 2 + /* capability info */ 178 178 2 + /* AID */ ··· 186 186 sdata->u.mesh.ie_len); 187 187 if (!skb) 188 188 return -1; 189 - skb_reserve(skb, local->hw.extra_tx_headroom); 189 + skb_reserve(skb, local->tx_headroom); 190 190 mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len); 191 191 memset(mgmt, 0, hdr_len); 192 192 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+27 -11
net/mac80211/mlme.c
··· 2750 2750 { 2751 2751 struct ieee80211_local *local = sdata->local; 2752 2752 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2753 - struct ieee80211_work *wk; 2754 2753 u8 bssid[ETH_ALEN]; 2755 2754 bool assoc_bss = false; 2756 2755 ··· 2762 2763 assoc_bss = true; 2763 2764 } else { 2764 2765 bool not_auth_yet = false; 2766 + struct ieee80211_work *tmp, *wk = NULL; 2765 2767 2766 2768 mutex_unlock(&ifmgd->mtx); 2767 2769 2768 2770 mutex_lock(&local->mtx); 2769 - list_for_each_entry(wk, &local->work_list, list) { 2770 - if (wk->sdata != sdata) 2771 + list_for_each_entry(tmp, &local->work_list, list) { 2772 + if (tmp->sdata != sdata) 2771 2773 continue; 2772 2774 2773 - if (wk->type != IEEE80211_WORK_DIRECT_PROBE && 2774 - wk->type != IEEE80211_WORK_AUTH && 2775 - wk->type != IEEE80211_WORK_ASSOC && 2776 - wk->type != IEEE80211_WORK_ASSOC_BEACON_WAIT) 2775 + if (tmp->type != IEEE80211_WORK_DIRECT_PROBE && 2776 + tmp->type != IEEE80211_WORK_AUTH && 2777 + tmp->type != IEEE80211_WORK_ASSOC && 2778 + tmp->type != IEEE80211_WORK_ASSOC_BEACON_WAIT) 2777 2779 continue; 2778 2780 2779 - if (memcmp(req->bss->bssid, wk->filter_ta, ETH_ALEN)) 2781 + if (memcmp(req->bss->bssid, tmp->filter_ta, ETH_ALEN)) 2780 2782 continue; 2781 2783 2782 - not_auth_yet = wk->type == IEEE80211_WORK_DIRECT_PROBE; 2783 - list_del_rcu(&wk->list); 2784 - free_work(wk); 2784 + not_auth_yet = tmp->type == IEEE80211_WORK_DIRECT_PROBE; 2785 + list_del_rcu(&tmp->list); 2786 + synchronize_rcu(); 2787 + wk = tmp; 2785 2788 break; 2786 2789 } 2787 2790 mutex_unlock(&local->mtx); 2791 + 2792 + if (wk && wk->type == IEEE80211_WORK_ASSOC) { 2793 + /* clean up dummy sta & TX sync */ 2794 + sta_info_destroy_addr(wk->sdata, wk->filter_ta); 2795 + if (wk->assoc.synced) 2796 + drv_finish_tx_sync(local, wk->sdata, 2797 + wk->filter_ta, 2798 + IEEE80211_TX_SYNC_ASSOC); 2799 + } else if (wk && wk->type == IEEE80211_WORK_AUTH) { 2800 + if (wk->probe_auth.synced) 2801 + drv_finish_tx_sync(local, wk->sdata, 2802 + wk->filter_ta, 2803 + IEEE80211_TX_SYNC_AUTH); 2804 + } 2805 + kfree(wk); 2788 2806 2789 2807 /* 2790 2808 * If somebody requests authentication and we haven't
+8 -12
net/rds/af_rds.c
··· 68 68 { 69 69 struct sock *sk = sock->sk; 70 70 struct rds_sock *rs; 71 - unsigned long flags; 72 71 73 72 if (!sk) 74 73 goto out; ··· 93 94 rds_rdma_drop_keys(rs); 94 95 rds_notify_queue_get(rs, NULL); 95 96 96 - spin_lock_irqsave(&rds_sock_lock, flags); 97 + spin_lock_bh(&rds_sock_lock); 97 98 list_del_init(&rs->rs_item); 98 99 rds_sock_count--; 99 - spin_unlock_irqrestore(&rds_sock_lock, flags); 100 + spin_unlock_bh(&rds_sock_lock); 100 101 101 102 rds_trans_put(rs->rs_transport); 102 103 ··· 408 409 409 410 static int __rds_create(struct socket *sock, struct sock *sk, int protocol) 410 411 { 411 - unsigned long flags; 412 412 struct rds_sock *rs; 413 413 414 414 sock_init_data(sock, sk); ··· 424 426 spin_lock_init(&rs->rs_rdma_lock); 425 427 rs->rs_rdma_keys = RB_ROOT; 426 428 427 - spin_lock_irqsave(&rds_sock_lock, flags); 429 + spin_lock_bh(&rds_sock_lock); 428 430 list_add_tail(&rs->rs_item, &rds_sock_list); 429 431 rds_sock_count++; 430 - spin_unlock_irqrestore(&rds_sock_lock, flags); 432 + spin_unlock_bh(&rds_sock_lock); 431 433 432 434 return 0; 433 435 } ··· 469 471 { 470 472 struct rds_sock *rs; 471 473 struct rds_incoming *inc; 472 - unsigned long flags; 473 474 unsigned int total = 0; 474 475 475 476 len /= sizeof(struct rds_info_message); 476 477 477 - spin_lock_irqsave(&rds_sock_lock, flags); 478 + spin_lock_bh(&rds_sock_lock); 478 479 479 480 list_for_each_entry(rs, &rds_sock_list, rs_item) { 480 481 read_lock(&rs->rs_recv_lock); ··· 489 492 read_unlock(&rs->rs_recv_lock); 490 493 } 491 494 492 - spin_unlock_irqrestore(&rds_sock_lock, flags); 495 + spin_unlock_bh(&rds_sock_lock); 493 496 494 497 lens->nr = total; 495 498 lens->each = sizeof(struct rds_info_message); ··· 501 504 { 502 505 struct rds_info_socket sinfo; 503 506 struct rds_sock *rs; 504 - unsigned long flags; 505 507 506 508 len /= sizeof(struct rds_info_socket); 507 509 508 - spin_lock_irqsave(&rds_sock_lock, flags); 510 + spin_lock_bh(&rds_sock_lock); 509 511 510 512 if (len < rds_sock_count) 511 513 goto out; ··· 525 529 lens->nr = rds_sock_count; 526 530 lens->each = sizeof(struct rds_info_socket); 527 531 528 - spin_unlock_irqrestore(&rds_sock_lock, flags); 532 + spin_unlock_bh(&rds_sock_lock); 529 533 } 530 534 531 535 static void rds_exit(void)
+1 -1
net/sched/sch_netem.c
··· 419 419 420 420 cb = netem_skb_cb(skb); 421 421 if (q->gap == 0 || /* not doing reordering */ 422 - q->counter < q->gap || /* inside last reordering gap */ 422 + q->counter < q->gap - 1 || /* inside last reordering gap */ 423 423 q->reorder < get_crandom(&q->reorder_cor)) { 424 424 psched_time_t now; 425 425 psched_tdiff_t delay;