Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) nfnetlink timestamp taken from wrong skb, fix from Florian Westphal.

2) Revert some msleep conversions in rtlwifi as these spots are in
atomic context, from Larry Finger.

3) Validate that NFTA_SET_TABLE attribute is actually specified when we
call nf_tables_getset(). From Phil Turnbull.

4) Don't do mdio_reset in stmmac driver with spinlock held as that can
sleep, from Vincent Palatin.

5) sk_filter() does things other than run a BPF filter, so we should
not elide it's call just because sk->sk_filter is NULL. Fix from
Eric Dumazet.

6) Fix missing backlog updates in several packet schedulers, from Cong
Wang.

7) bnx2x driver should allow VLAN add/remove while the interface is
down, from Michal Schmidt.

8) Several RDS/TCP race fixes from Sowmini Varadhan.

9) fq_codel scheduler doesn't return correct queue length in dumps,
from Eric Dumazet.

10) Fix TCP stats for tail loss probe and early retransmit in ipv6, from
Yuchung Cheng.

11) Properly initialize udp_tunnel_socket_cfg in l2tp_tunnel_create(),
from Guillaume Nault.

12) qfq scheduler leaks SKBs if a kzalloc fails, fix from Florian
Westphal.

13) sock_fprog passed into PACKET_FANOUT_DATA needs compat handling,
from Willem de Bruijn.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (85 commits)
vmxnet3: segCnt can be 1 for LRO packets
packet: compat support for sock_fprog
stmmac: fix parameter to dwmac4_set_umac_addr()
net/mlx5e: Fix blue flame quota logic
net/mlx5e: Use ndo_stop explicitly at shutdown flow
net/mlx5: E-Switch, always set mc_promisc for allmulti vports
net/mlx5: E-Switch, Modify node guid on vf set MAC
net/mlx5: E-Switch, Fix vport enable flow
net/mlx5: E-Switch, Use the correct error check on returned pointers
net/mlx5: E-Switch, Use the correct free() function
net/mlx5: Fix E-Switch flow steering capabilities check
net/mlx5: Fix flow steering NIC capabilities check
net/mlx5: Fix root flow table update
net/mlx5: Fix MLX5_CMD_OP_MAX to be defined correctly
net/mlx5: Fix masking of reserved bits in XRCD number
net/mlx5: Fix the size of modify QP mailbox
mlxsw: spectrum: Don't sleep during ndo_get_phys_port_name()
mlxsw: spectrum: Make split flow match firmware requirements
wext: Fix 32 bit iwpriv compatibility issue with 64 bit Kernel
cfg80211: remove get/set antenna and tx power warnings
...

+830 -462
+1
MAINTAINERS
··· 8009 8009 T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git 8010 8010 T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next.git 8011 8011 S: Maintained 8012 + F: Documentation/devicetree/bindings/net/wireless/ 8012 8013 F: drivers/net/wireless/ 8013 8014 8014 8015 NETXEN (1/10) GbE SUPPORT
+62 -89
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
··· 12895 12895 return rc; 12896 12896 } 12897 12897 12898 - int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp) 12898 + static int bnx2x_vlan_configure_vid_list(struct bnx2x *bp) 12899 12899 { 12900 12900 struct bnx2x_vlan_entry *vlan; 12901 12901 int rc = 0; 12902 12902 12903 - if (!bp->vlan_cnt) { 12904 - DP(NETIF_MSG_IFUP, "No need to re-configure vlan filters\n"); 12905 - return 0; 12906 - } 12907 - 12903 + /* Configure all non-configured entries */ 12908 12904 list_for_each_entry(vlan, &bp->vlan_reg, link) { 12909 - /* Prepare for cleanup in case of errors */ 12910 - if (rc) { 12911 - vlan->hw = false; 12912 - continue; 12913 - } 12914 - 12915 - if (!vlan->hw) 12905 + if (vlan->hw) 12916 12906 continue; 12917 12907 12918 - DP(NETIF_MSG_IFUP, "Re-configuring vlan 0x%04x\n", vlan->vid); 12908 + if (bp->vlan_cnt >= bp->vlan_credit) 12909 + return -ENOBUFS; 12919 12910 12920 12911 rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true); 12921 12912 if (rc) { 12922 - BNX2X_ERR("Unable to configure VLAN %d\n", vlan->vid); 12923 - vlan->hw = false; 12924 - rc = -EINVAL; 12925 - continue; 12913 + BNX2X_ERR("Unable to config VLAN %d\n", vlan->vid); 12914 + return rc; 12926 12915 } 12916 + 12917 + DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", vlan->vid); 12918 + vlan->hw = true; 12919 + bp->vlan_cnt++; 12927 12920 } 12928 12921 12929 - return rc; 12922 + return 0; 12923 + } 12924 + 12925 + static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode) 12926 + { 12927 + bool need_accept_any_vlan; 12928 + 12929 + need_accept_any_vlan = !!bnx2x_vlan_configure_vid_list(bp); 12930 + 12931 + if (bp->accept_any_vlan != need_accept_any_vlan) { 12932 + bp->accept_any_vlan = need_accept_any_vlan; 12933 + DP(NETIF_MSG_IFUP, "Accept all VLAN %s\n", 12934 + bp->accept_any_vlan ? "raised" : "cleared"); 12935 + if (set_rx_mode) { 12936 + if (IS_PF(bp)) 12937 + bnx2x_set_rx_mode_inner(bp); 12938 + else 12939 + bnx2x_vfpf_storm_rx_mode(bp); 12940 + } 12941 + } 12942 + } 12943 + 12944 + int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp) 12945 + { 12946 + struct bnx2x_vlan_entry *vlan; 12947 + 12948 + /* The hw forgot all entries after reload */ 12949 + list_for_each_entry(vlan, &bp->vlan_reg, link) 12950 + vlan->hw = false; 12951 + bp->vlan_cnt = 0; 12952 + 12953 + /* Don't set rx mode here. Our caller will do it. */ 12954 + bnx2x_vlan_configure(bp, false); 12955 + 12956 + return 0; 12930 12957 } 12931 12958 12932 12959 static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) 12933 12960 { 12934 12961 struct bnx2x *bp = netdev_priv(dev); 12935 12962 struct bnx2x_vlan_entry *vlan; 12936 - bool hw = false; 12937 - int rc = 0; 12938 - 12939 - if (!netif_running(bp->dev)) { 12940 - DP(NETIF_MSG_IFUP, 12941 - "Ignoring VLAN configuration the interface is down\n"); 12942 - return -EFAULT; 12943 - } 12944 12963 12945 12964 DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid); 12946 12965 ··· 12967 12948 if (!vlan) 12968 12949 return -ENOMEM; 12969 12950 12970 - bp->vlan_cnt++; 12971 - if (bp->vlan_cnt > bp->vlan_credit && !bp->accept_any_vlan) { 12972 - DP(NETIF_MSG_IFUP, "Accept all VLAN raised\n"); 12973 - bp->accept_any_vlan = true; 12974 - if (IS_PF(bp)) 12975 - bnx2x_set_rx_mode_inner(bp); 12976 - else 12977 - bnx2x_vfpf_storm_rx_mode(bp); 12978 - } else if (bp->vlan_cnt <= bp->vlan_credit) { 12979 - rc = __bnx2x_vlan_configure_vid(bp, vid, true); 12980 - hw = true; 12981 - } 12982 - 12983 12951 vlan->vid = vid; 12984 - vlan->hw = hw; 12952 + vlan->hw = false; 12953 + list_add_tail(&vlan->link, &bp->vlan_reg); 12985 12954 12986 - if (!rc) { 12987 - list_add(&vlan->link, &bp->vlan_reg); 12988 - } else { 12989 - bp->vlan_cnt--; 12990 - kfree(vlan); 12991 - } 12955 + if (netif_running(dev)) 12956 + bnx2x_vlan_configure(bp, true); 12992 12957 12993 - DP(NETIF_MSG_IFUP, "Adding VLAN result %d\n", rc); 12994 - 12995 - return rc; 12958 + return 0; 12996 12959 } 12997 12960 12998 12961 static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) 12999 12962 { 13000 12963 struct bnx2x *bp = netdev_priv(dev); 13001 12964 struct bnx2x_vlan_entry *vlan; 12965 + bool found = false; 13002 12966 int rc = 0; 13003 - 13004 - if (!netif_running(bp->dev)) { 13005 - DP(NETIF_MSG_IFUP, 13006 - "Ignoring VLAN configuration the interface is down\n"); 13007 - return -EFAULT; 13008 - } 13009 12967 13010 12968 DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid); 13011 12969 13012 - if (!bp->vlan_cnt) { 13013 - BNX2X_ERR("Unable to kill VLAN %d\n", vid); 13014 - return -EINVAL; 13015 - } 13016 - 13017 12970 list_for_each_entry(vlan, &bp->vlan_reg, link) 13018 - if (vlan->vid == vid) 12971 + if (vlan->vid == vid) { 12972 + found = true; 13019 12973 break; 12974 + } 13020 12975 13021 - if (vlan->vid != vid) { 12976 + if (!found) { 13022 12977 BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid); 13023 12978 return -EINVAL; 13024 12979 } 13025 12980 13026 - if (vlan->hw) 12981 + if (netif_running(dev) && vlan->hw) { 13027 12982 rc = __bnx2x_vlan_configure_vid(bp, vid, false); 12983 + DP(NETIF_MSG_IFUP, "HW deconfigured for VLAN %d\n", vid); 12984 + bp->vlan_cnt--; 12985 + } 13028 12986 13029 12987 list_del(&vlan->link); 13030 12988 kfree(vlan); 13031 12989 13032 - bp->vlan_cnt--; 13033 - 13034 - if (bp->vlan_cnt <= bp->vlan_credit && bp->accept_any_vlan) { 13035 - /* Configure all non-configured entries */ 13036 - list_for_each_entry(vlan, &bp->vlan_reg, link) { 13037 - if (vlan->hw) 13038 - continue; 13039 - 13040 - rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true); 13041 - if (rc) { 13042 - BNX2X_ERR("Unable to config VLAN %d\n", 13043 - vlan->vid); 13044 - continue; 13045 - } 13046 - DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", 13047 - vlan->vid); 13048 - vlan->hw = true; 13049 - } 13050 - DP(NETIF_MSG_IFUP, "Accept all VLAN Removed\n"); 13051 - bp->accept_any_vlan = false; 13052 - if (IS_PF(bp)) 13053 - bnx2x_set_rx_mode_inner(bp); 13054 - else 13055 - bnx2x_vfpf_storm_rx_mode(bp); 13056 - } 12990 + if (netif_running(dev)) 12991 + bnx2x_vlan_configure(bp, true); 13057 12992 13058 12993 DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc); 13059 12994
+25 -21
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 286 286 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); 287 287 txr->tx_prod = prod; 288 288 289 + tx_buf->is_push = 1; 289 290 netdev_tx_sent_queue(txq, skb->len); 291 + wmb(); /* Sync is_push and byte queue before pushing data */ 290 292 291 293 push_len = (length + sizeof(*tx_push) + 7) / 8; 292 294 if (push_len > 16) { ··· 300 298 push_len); 301 299 } 302 300 303 - tx_buf->is_push = 1; 304 301 goto tx_done; 305 302 } 306 303 ··· 1113 1112 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) 1114 1113 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); 1115 1114 1116 - if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) { 1117 - netdev_features_t features = skb->dev->features; 1115 + if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) && 1116 + (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1118 1117 u16 vlan_proto = tpa_info->metadata >> 1119 1118 RX_CMP_FLAGS2_METADATA_TPID_SFT; 1119 + u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK; 1120 1120 1121 - if (((features & NETIF_F_HW_VLAN_CTAG_RX) && 1122 - vlan_proto == ETH_P_8021Q) || 1123 - ((features & NETIF_F_HW_VLAN_STAG_RX) && 1124 - vlan_proto == ETH_P_8021AD)) { 1125 - __vlan_hwaccel_put_tag(skb, htons(vlan_proto), 1126 - tpa_info->metadata & 1127 - RX_CMP_FLAGS2_METADATA_VID_MASK); 1128 - } 1121 + __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1129 1122 } 1130 1123 1131 1124 skb_checksum_none_assert(skb); ··· 1272 1277 1273 1278 skb->protocol = eth_type_trans(skb, dev); 1274 1279 1275 - if (rxcmp1->rx_cmp_flags2 & 1276 - cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) { 1277 - netdev_features_t features = skb->dev->features; 1280 + if ((rxcmp1->rx_cmp_flags2 & 1281 + cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && 1282 + (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1278 1283 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); 1284 + u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK; 1279 1285 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; 1280 1286 1281 - if (((features & NETIF_F_HW_VLAN_CTAG_RX) && 1282 - vlan_proto == ETH_P_8021Q) || 1283 - ((features & NETIF_F_HW_VLAN_STAG_RX) && 1284 - vlan_proto == ETH_P_8021AD)) 1285 - __vlan_hwaccel_put_tag(skb, htons(vlan_proto), 1286 - meta_data & 1287 - RX_CMP_FLAGS2_METADATA_VID_MASK); 1287 + __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1288 1288 } 1289 1289 1290 1290 skb_checksum_none_assert(skb); ··· 5456 5466 5457 5467 if (!bnxt_rfs_capable(bp)) 5458 5468 features &= ~NETIF_F_NTUPLE; 5469 + 5470 + /* Both CTAG and STAG VLAN accelaration on the RX side have to be 5471 + * turned on or off together. 5472 + */ 5473 + if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) != 5474 + (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) { 5475 + if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) 5476 + features &= ~(NETIF_F_HW_VLAN_CTAG_RX | 5477 + NETIF_F_HW_VLAN_STAG_RX); 5478 + else 5479 + features |= NETIF_F_HW_VLAN_CTAG_RX | 5480 + NETIF_F_HW_VLAN_STAG_RX; 5481 + } 5482 + 5459 5483 return features; 5460 5484 } 5461 5485
+1
drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
··· 144 144 CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */ 145 145 CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */ 146 146 CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */ 147 + CH_PCI_ID_TABLE_FENTRY(0x5018), /* T540-BT */ 147 148 CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */ 148 149 CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */ 149 150 CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */
+4 -3
drivers/net/ethernet/ethoc.c
··· 1195 1195 priv->mdio = mdiobus_alloc(); 1196 1196 if (!priv->mdio) { 1197 1197 ret = -ENOMEM; 1198 - goto free; 1198 + goto free2; 1199 1199 } 1200 1200 1201 1201 priv->mdio->name = "ethoc-mdio"; ··· 1208 1208 ret = mdiobus_register(priv->mdio); 1209 1209 if (ret) { 1210 1210 dev_err(&netdev->dev, "failed to register MDIO bus\n"); 1211 - goto free; 1211 + goto free2; 1212 1212 } 1213 1213 1214 1214 ret = ethoc_mdio_probe(netdev); ··· 1241 1241 error: 1242 1242 mdiobus_unregister(priv->mdio); 1243 1243 mdiobus_free(priv->mdio); 1244 - free: 1244 + free2: 1245 1245 if (priv->clk) 1246 1246 clk_disable_unprepare(priv->clk); 1247 + free: 1247 1248 free_netdev(netdev); 1248 1249 out: 1249 1250 return ret;
+4 -4
drivers/net/ethernet/freescale/fec_main.c
··· 2416 2416 return -EOPNOTSUPP; 2417 2417 2418 2418 if (ec->rx_max_coalesced_frames > 255) { 2419 - pr_err("Rx coalesced frames exceed hardware limiation"); 2419 + pr_err("Rx coalesced frames exceed hardware limitation\n"); 2420 2420 return -EINVAL; 2421 2421 } 2422 2422 2423 2423 if (ec->tx_max_coalesced_frames > 255) { 2424 - pr_err("Tx coalesced frame exceed hardware limiation"); 2424 + pr_err("Tx coalesced frame exceed hardware limitation\n"); 2425 2425 return -EINVAL; 2426 2426 } 2427 2427 2428 2428 cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr); 2429 2429 if (cycle > 0xFFFF) { 2430 - pr_err("Rx coalesed usec exceeed hardware limiation"); 2430 + pr_err("Rx coalesced usec exceed hardware limitation\n"); 2431 2431 return -EINVAL; 2432 2432 } 2433 2433 2434 2434 cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr); 2435 2435 if (cycle > 0xFFFF) { 2436 - pr_err("Rx coalesed usec exceeed hardware limiation"); 2436 + pr_err("Rx coalesced usec exceed hardware limitation\n"); 2437 2437 return -EINVAL; 2438 2438 } 2439 2439
+2 -1
drivers/net/ethernet/freescale/gianfar.c
··· 2440 2440 tx_queue->tx_ring_size); 2441 2441 2442 2442 if (likely(!nr_frags)) { 2443 - lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 2443 + if (likely(!do_tstamp)) 2444 + lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 2444 2445 } else { 2445 2446 u32 lstatus_start = lstatus; 2446 2447
+34 -6
drivers/net/ethernet/mediatek/mtk_eth_soc.c
··· 133 133 static void mtk_phy_link_adjust(struct net_device *dev) 134 134 { 135 135 struct mtk_mac *mac = netdev_priv(dev); 136 + u16 lcl_adv = 0, rmt_adv = 0; 137 + u8 flowctrl; 136 138 u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | 137 139 MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | 138 140 MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | ··· 152 150 if (mac->phy_dev->link) 153 151 mcr |= MAC_MCR_FORCE_LINK; 154 152 155 - if (mac->phy_dev->duplex) 153 + if (mac->phy_dev->duplex) { 156 154 mcr |= MAC_MCR_FORCE_DPX; 157 155 158 - if (mac->phy_dev->pause) 159 - mcr |= MAC_MCR_FORCE_RX_FC | MAC_MCR_FORCE_TX_FC; 156 + if (mac->phy_dev->pause) 157 + rmt_adv = LPA_PAUSE_CAP; 158 + if (mac->phy_dev->asym_pause) 159 + rmt_adv |= LPA_PAUSE_ASYM; 160 + 161 + if (mac->phy_dev->advertising & ADVERTISED_Pause) 162 + lcl_adv |= ADVERTISE_PAUSE_CAP; 163 + if (mac->phy_dev->advertising & ADVERTISED_Asym_Pause) 164 + lcl_adv |= ADVERTISE_PAUSE_ASYM; 165 + 166 + flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); 167 + 168 + if (flowctrl & FLOW_CTRL_TX) 169 + mcr |= MAC_MCR_FORCE_TX_FC; 170 + if (flowctrl & FLOW_CTRL_RX) 171 + mcr |= MAC_MCR_FORCE_RX_FC; 172 + 173 + netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n", 174 + flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled", 175 + flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled"); 176 + } 160 177 161 178 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); 162 179 ··· 229 208 u32 val, ge_mode; 230 209 231 210 np = of_parse_phandle(mac->of_node, "phy-handle", 0); 211 + if (!np && of_phy_is_fixed_link(mac->of_node)) 212 + if (!of_phy_register_fixed_link(mac->of_node)) 213 + np = of_node_get(mac->of_node); 232 214 if (!np) 233 215 return -ENODEV; 234 216 235 217 switch (of_get_phy_mode(np)) { 218 + case PHY_INTERFACE_MODE_RGMII_TXID: 219 + case PHY_INTERFACE_MODE_RGMII_RXID: 220 + case PHY_INTERFACE_MODE_RGMII_ID: 236 221 case PHY_INTERFACE_MODE_RGMII: 237 222 ge_mode = 0; 238 223 break; ··· 263 236 mac->phy_dev->autoneg = AUTONEG_ENABLE; 264 237 mac->phy_dev->speed = 0; 265 238 mac->phy_dev->duplex = 0; 266 - mac->phy_dev->supported &= PHY_BASIC_FEATURES; 239 + mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | 240 + SUPPORTED_Asym_Pause; 267 241 mac->phy_dev->advertising = mac->phy_dev->supported | 268 242 ADVERTISED_Autoneg; 269 243 phy_start_aneg(mac->phy_dev); ··· 308 280 return 0; 309 281 310 282 err_free_bus: 311 - kfree(eth->mii_bus); 283 + mdiobus_free(eth->mii_bus); 312 284 313 285 err_put_node: 314 286 of_node_put(mii_np); ··· 323 295 324 296 mdiobus_unregister(eth->mii_bus); 325 297 of_node_put(eth->mii_bus->dev.of_node); 326 - kfree(eth->mii_bus); 298 + mdiobus_free(eth->mii_bus); 327 299 } 328 300 329 301 static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
+1 -4
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 3192 3192 flush_workqueue(priv->wq); 3193 3193 if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) { 3194 3194 netif_device_detach(netdev); 3195 - mutex_lock(&priv->state_lock); 3196 - if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 3197 - mlx5e_close_locked(netdev); 3198 - mutex_unlock(&priv->state_lock); 3195 + mlx5e_close(netdev); 3199 3196 } else { 3200 3197 unregister_netdev(netdev); 3201 3198 }
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
··· 317 317 while ((sq->pc & wq->sz_m1) > sq->edge) 318 318 mlx5e_send_nop(sq, false); 319 319 320 - sq->bf_budget = bf ? sq->bf_budget - 1 : 0; 320 + if (bf) 321 + sq->bf_budget--; 321 322 322 323 sq->stats.packets++; 323 324 sq->stats.bytes += num_bytes;
+43 -26
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
··· 383 383 match_v, 384 384 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 385 385 0, &dest); 386 - if (IS_ERR_OR_NULL(flow_rule)) { 386 + if (IS_ERR(flow_rule)) { 387 387 pr_warn( 388 388 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", 389 389 dmac_v, dmac_c, vport, PTR_ERR(flow_rule)); ··· 457 457 458 458 table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); 459 459 fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0); 460 - if (IS_ERR_OR_NULL(fdb)) { 460 + if (IS_ERR(fdb)) { 461 461 err = PTR_ERR(fdb); 462 462 esw_warn(dev, "Failed to create FDB Table err %d\n", err); 463 463 goto out; ··· 474 474 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3); 475 475 eth_broadcast_addr(dmac); 476 476 g = mlx5_create_flow_group(fdb, flow_group_in); 477 - if (IS_ERR_OR_NULL(g)) { 477 + if (IS_ERR(g)) { 478 478 err = PTR_ERR(g); 479 479 esw_warn(dev, "Failed to create flow group err(%d)\n", err); 480 480 goto out; ··· 489 489 eth_zero_addr(dmac); 490 490 dmac[0] = 0x01; 491 491 g = mlx5_create_flow_group(fdb, flow_group_in); 492 - if (IS_ERR_OR_NULL(g)) { 492 + if (IS_ERR(g)) { 493 493 err = PTR_ERR(g); 494 494 esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err); 495 495 goto out; ··· 506 506 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1); 507 507 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1); 508 508 g = mlx5_create_flow_group(fdb, flow_group_in); 509 - if (IS_ERR_OR_NULL(g)) { 509 + if (IS_ERR(g)) { 510 510 err = PTR_ERR(g); 511 511 esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err); 512 512 goto out; ··· 529 529 } 530 530 } 531 531 532 - kfree(flow_group_in); 532 + kvfree(flow_group_in); 533 533 return err; 534 534 } 535 535 ··· 651 651 esw_fdb_set_vport_rule(esw, 652 652 mac, 653 653 vport_idx); 654 + iter_vaddr->mc_promisc = true; 654 655 break; 655 656 case MLX5_ACTION_DEL: 656 657 if (!iter_vaddr) ··· 1061 1060 return; 1062 1061 1063 1062 acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); 1064 - if (IS_ERR_OR_NULL(acl)) { 1063 + if (IS_ERR(acl)) { 1065 1064 err = PTR_ERR(acl); 1066 1065 esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n", 1067 1066 vport->vport, err); ··· 1076 1075 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); 1077 1076 1078 1077 vlan_grp = mlx5_create_flow_group(acl, flow_group_in); 1079 - if (IS_ERR_OR_NULL(vlan_grp)) { 1078 + if (IS_ERR(vlan_grp)) { 1080 1079 err = PTR_ERR(vlan_grp); 1081 1080 esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n", 1082 1081 vport->vport, err); ··· 1087 1086 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1); 1088 1087 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); 1089 1088 drop_grp = mlx5_create_flow_group(acl, flow_group_in); 1090 - if (IS_ERR_OR_NULL(drop_grp)) { 1089 + if (IS_ERR(drop_grp)) { 1091 1090 err = PTR_ERR(drop_grp); 1092 1091 esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n", 1093 1092 vport->vport, err); ··· 1098 1097 vport->egress.drop_grp = drop_grp; 1099 1098 vport->egress.allowed_vlans_grp = vlan_grp; 1100 1099 out: 1101 - kfree(flow_group_in); 1100 + kvfree(flow_group_in); 1102 1101 if (err && !IS_ERR_OR_NULL(vlan_grp)) 1103 1102 mlx5_destroy_flow_group(vlan_grp); 1104 1103 if (err && !IS_ERR_OR_NULL(acl)) ··· 1175 1174 return; 1176 1175 1177 1176 acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); 1178 - if (IS_ERR_OR_NULL(acl)) { 1177 + if (IS_ERR(acl)) { 1179 1178 err = PTR_ERR(acl); 1180 1179 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n", 1181 1180 vport->vport, err); ··· 1193 1192 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); 1194 1193 1195 1194 g = mlx5_create_flow_group(acl, flow_group_in); 1196 - if (IS_ERR_OR_NULL(g)) { 1195 + if (IS_ERR(g)) { 1197 1196 err = PTR_ERR(g); 1198 1197 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n", 1199 1198 vport->vport, err); ··· 1208 1207 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); 1209 1208 1210 1209 g = mlx5_create_flow_group(acl, flow_group_in); 1211 - if (IS_ERR_OR_NULL(g)) { 1210 + if (IS_ERR(g)) { 1212 1211 err = PTR_ERR(g); 1213 1212 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n", 1214 1213 vport->vport, err); ··· 1224 1223 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2); 1225 1224 1226 1225 g = mlx5_create_flow_group(acl, flow_group_in); 1227 - if (IS_ERR_OR_NULL(g)) { 1226 + if (IS_ERR(g)) { 1228 1227 err = PTR_ERR(g); 1229 1228 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n", 1230 1229 vport->vport, err); ··· 1237 1236 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3); 1238 1237 1239 1238 g = mlx5_create_flow_group(acl, flow_group_in); 1240 - if (IS_ERR_OR_NULL(g)) { 1239 + if (IS_ERR(g)) { 1241 1240 err = PTR_ERR(g); 1242 1241 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n", 1243 1242 vport->vport, err); ··· 1260 1259 mlx5_destroy_flow_table(vport->ingress.acl); 1261 1260 } 1262 1261 1263 - kfree(flow_group_in); 1262 + kvfree(flow_group_in); 1264 1263 } 1265 1264 1266 1265 static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, ··· 1364 1363 match_v, 1365 1364 MLX5_FLOW_CONTEXT_ACTION_ALLOW, 1366 1365 0, NULL); 1367 - if (IS_ERR_OR_NULL(vport->ingress.allow_rule)) { 1366 + if (IS_ERR(vport->ingress.allow_rule)) { 1368 1367 err = PTR_ERR(vport->ingress.allow_rule); 1369 1368 pr_warn("vport[%d] configure ingress allow rule, err(%d)\n", 1370 1369 vport->vport, err); ··· 1381 1380 match_v, 1382 1381 MLX5_FLOW_CONTEXT_ACTION_DROP, 1383 1382 0, NULL); 1384 - if (IS_ERR_OR_NULL(vport->ingress.drop_rule)) { 1383 + if (IS_ERR(vport->ingress.drop_rule)) { 1385 1384 err = PTR_ERR(vport->ingress.drop_rule); 1386 1385 pr_warn("vport[%d] configure ingress drop rule, err(%d)\n", 1387 1386 vport->vport, err); ··· 1440 1439 match_v, 1441 1440 MLX5_FLOW_CONTEXT_ACTION_ALLOW, 1442 1441 0, NULL); 1443 - if (IS_ERR_OR_NULL(vport->egress.allowed_vlan)) { 1442 + if (IS_ERR(vport->egress.allowed_vlan)) { 1444 1443 err = PTR_ERR(vport->egress.allowed_vlan); 1445 1444 pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n", 1446 1445 vport->vport, err); ··· 1458 1457 match_v, 1459 1458 MLX5_FLOW_CONTEXT_ACTION_DROP, 1460 1459 0, NULL); 1461 - if (IS_ERR_OR_NULL(vport->egress.drop_rule)) { 1460 + if (IS_ERR(vport->egress.drop_rule)) { 1462 1461 err = PTR_ERR(vport->egress.drop_rule); 1463 1462 pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n", 1464 1463 vport->vport, err); ··· 1492 1491 1493 1492 /* Sync with current vport context */ 1494 1493 vport->enabled_events = enable_events; 1495 - esw_vport_change_handle_locked(vport); 1496 - 1497 1494 vport->enabled = true; 1498 1495 1499 1496 /* only PF is trusted by default */ 1500 1497 vport->trusted = (vport_num) ? false : true; 1501 - 1502 - arm_vport_context_events_cmd(esw->dev, vport_num, enable_events); 1498 + esw_vport_change_handle_locked(vport); 1503 1499 1504 1500 esw->enabled_vports++; 1505 1501 esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num); ··· 1726 1728 (esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev)) 1727 1729 #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports) 1728 1730 1731 + static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN]) 1732 + { 1733 + ((u8 *)node_guid)[7] = mac[0]; 1734 + ((u8 *)node_guid)[6] = mac[1]; 1735 + ((u8 *)node_guid)[5] = mac[2]; 1736 + ((u8 *)node_guid)[4] = 0xff; 1737 + ((u8 *)node_guid)[3] = 0xfe; 1738 + ((u8 *)node_guid)[2] = mac[3]; 1739 + ((u8 *)node_guid)[1] = mac[4]; 1740 + ((u8 *)node_guid)[0] = mac[5]; 1741 + } 1742 + 1729 1743 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, 1730 1744 int vport, u8 mac[ETH_ALEN]) 1731 1745 { 1732 - int err = 0; 1733 1746 struct mlx5_vport *evport; 1747 + u64 node_guid; 1748 + int err = 0; 1734 1749 1735 1750 if (!ESW_ALLOWED(esw)) 1736 1751 return -EPERM; ··· 1767 1756 return err; 1768 1757 } 1769 1758 1759 + node_guid_gen_from_mac(&node_guid, mac); 1760 + err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid); 1761 + if (err) 1762 + mlx5_core_warn(esw->dev, 1763 + "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n", 1764 + vport, err); 1765 + 1770 1766 mutex_lock(&esw->state_lock); 1771 1767 if (evport->enabled) 1772 1768 err = esw_vport_ingress_config(esw, evport); 1773 1769 mutex_unlock(&esw->state_lock); 1774 - 1775 1770 return err; 1776 1771 } 1777 1772
+25 -15
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
··· 1292 1292 ft->id); 1293 1293 return err; 1294 1294 } 1295 - root->root_ft = new_root_ft; 1296 1295 } 1296 + root->root_ft = new_root_ft; 1297 1297 return 0; 1298 1298 } 1299 1299 ··· 1767 1767 1768 1768 void mlx5_cleanup_fs(struct mlx5_core_dev *dev) 1769 1769 { 1770 + if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 1771 + return; 1772 + 1770 1773 cleanup_root_ns(dev); 1771 1774 cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns); 1772 1775 cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns); ··· 1831 1828 { 1832 1829 int err = 0; 1833 1830 1831 + if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 1832 + return 0; 1833 + 1834 1834 err = mlx5_init_fc_stats(dev); 1835 1835 if (err) 1836 1836 return err; 1837 1837 1838 - if (MLX5_CAP_GEN(dev, nic_flow_table)) { 1838 + if (MLX5_CAP_GEN(dev, nic_flow_table) && 1839 + MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) { 1839 1840 err = init_root_ns(dev); 1840 1841 if (err) 1841 1842 goto err; 1842 1843 } 1844 + 1843 1845 if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { 1844 - err = init_fdb_root_ns(dev); 1845 - if (err) 1846 - goto err; 1847 - } 1848 - if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) { 1849 - err = init_egress_acl_root_ns(dev); 1850 - if (err) 1851 - goto err; 1852 - } 1853 - if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) { 1854 - err = init_ingress_acl_root_ns(dev); 1855 - if (err) 1856 - goto err; 1846 + if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) { 1847 + err = init_fdb_root_ns(dev); 1848 + if (err) 1849 + goto err; 1850 + } 1851 + if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) { 1852 + err = init_egress_acl_root_ns(dev); 1853 + if (err) 1854 + goto err; 1855 + } 1856 + if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) { 1857 + err = init_ingress_acl_root_ns(dev); 1858 + if (err) 1859 + goto err; 1860 + } 1857 1861 } 1858 1862 1859 1863 return 0;
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/qp.c
··· 418 418 if (out.hdr.status) 419 419 err = mlx5_cmd_status_to_err(&out.hdr); 420 420 else 421 - *xrcdn = be32_to_cpu(out.xrcdn); 421 + *xrcdn = be32_to_cpu(out.xrcdn) & 0xffffff; 422 422 423 423 return err; 424 424 }
+38
drivers/net/ethernet/mellanox/mlx5/core/vport.c
··· 508 508 } 509 509 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid); 510 510 511 + int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, 512 + u32 vport, u64 node_guid) 513 + { 514 + int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); 515 + void *nic_vport_context; 516 + u8 *guid; 517 + void *in; 518 + int err; 519 + 520 + if (!vport) 521 + return -EINVAL; 522 + if (!MLX5_CAP_GEN(mdev, vport_group_manager)) 523 + return -EACCES; 524 + if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify)) 525 + return -ENOTSUPP; 526 + 527 + in = mlx5_vzalloc(inlen); 528 + if (!in) 529 + return -ENOMEM; 530 + 531 + MLX5_SET(modify_nic_vport_context_in, in, 532 + field_select.node_guid, 1); 533 + MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); 534 + MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport); 535 + 536 + nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in, 537 + in, nic_vport_context); 538 + guid = MLX5_ADDR_OF(nic_vport_context, nic_vport_context, 539 + node_guid); 540 + MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid); 541 + 542 + err = mlx5_modify_nic_vport_context(mdev, in, inlen); 543 + 544 + kvfree(in); 545 + 546 + return err; 547 + } 548 + 511 549 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, 512 550 u16 *qkey_viol_cntr) 513 551 {
+112 -91
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
··· 247 247 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); 248 248 } 249 249 250 + static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port, 251 + u8 swid) 252 + { 253 + char pspa_pl[MLXSW_REG_PSPA_LEN]; 254 + 255 + mlxsw_reg_pspa_pack(pspa_pl, swid, local_port); 256 + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 257 + } 258 + 250 259 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) 251 260 { 252 261 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 253 - char pspa_pl[MLXSW_REG_PSPA_LEN]; 254 262 255 - mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); 256 - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); 263 + return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port, 264 + swid); 257 265 } 258 266 259 267 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, ··· 313 305 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); 314 306 } 315 307 316 - static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 317 - u8 local_port, u8 *p_module, 318 - u8 *p_width, u8 *p_lane) 308 + static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 309 + u8 local_port, u8 *p_module, 310 + u8 *p_width, u8 *p_lane) 319 311 { 320 312 char pmlp_pl[MLXSW_REG_PMLP_LEN]; 321 313 int err; ··· 328 320 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); 329 321 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); 330 322 return 0; 331 - } 332 - 333 - static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, 334 - u8 local_port, u8 *p_module, 335 - u8 *p_width) 336 - { 337 - u8 lane; 338 - 339 - return __mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, p_module, 340 - p_width, &lane); 341 323 } 342 324 343 325 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port, ··· 947 949 size_t len) 948 950 { 949 951 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 950 - u8 module, width, lane; 952 + u8 module = mlxsw_sp_port->mapping.module; 953 + u8 width = mlxsw_sp_port->mapping.width; 954 + u8 lane = mlxsw_sp_port->mapping.lane; 951 955 int err; 952 - 953 - err = __mlxsw_sp_port_module_info_get(mlxsw_sp_port->mlxsw_sp, 954 - mlxsw_sp_port->local_port, 955 - &module, &width, &lane); 956 - if (err) { 957 - netdev_err(dev, "Failed to retrieve module information\n"); 958 - return err; 959 - } 960 956 961 957 if (!mlxsw_sp_port->split) 962 958 err = snprintf(name, len, "p%d", module + 1); ··· 1673 1681 return 0; 1674 1682 } 1675 1683 1676 - static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 1677 - bool split, u8 module, u8 width) 1684 + static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 1685 + bool split, u8 module, u8 width, u8 lane) 1678 1686 { 1679 1687 struct mlxsw_sp_port *mlxsw_sp_port; 1680 1688 struct net_device *dev; ··· 1689 1697 mlxsw_sp_port->mlxsw_sp = mlxsw_sp; 1690 1698 mlxsw_sp_port->local_port = local_port; 1691 1699 mlxsw_sp_port->split = split; 1700 + mlxsw_sp_port->mapping.module = module; 1701 + mlxsw_sp_port->mapping.width = width; 1702 + mlxsw_sp_port->mapping.lane = lane; 1692 1703 bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE); 1693 1704 mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL); 1694 1705 if (!mlxsw_sp_port->active_vlans) { ··· 1834 1839 return err; 1835 1840 } 1836 1841 1837 - static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 1838 - bool split, u8 module, u8 width, u8 lane) 1839 - { 1840 - int err; 1841 - 1842 - err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width, 1843 - lane); 1844 - if (err) 1845 - return err; 1846 - 1847 - err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split, module, 1848 - width); 1849 - if (err) 1850 - goto err_port_create; 1851 - 1852 - return 0; 1853 - 1854 - err_port_create: 1855 - mlxsw_sp_port_module_unmap(mlxsw_sp, local_port); 1856 - return err; 1857 - } 1858 - 1859 1842 static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port) 1860 1843 { 1861 1844 struct net_device *dev = mlxsw_sp_port->dev; ··· 1882 1909 1883 1910 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) 1884 1911 { 1912 + u8 module, width, lane; 1885 1913 size_t alloc_size; 1886 - u8 module, width; 1887 1914 int i; 1888 1915 int err; 1889 1916 ··· 1894 1921 1895 1922 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) { 1896 1923 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, 1897 - &width); 1924 + &width, &lane); 1898 1925 if (err) 1899 1926 goto err_port_module_info_get; 1900 1927 if (!width) 1901 1928 continue; 1902 1929 mlxsw_sp->port_to_module[i] = module; 1903 - err = __mlxsw_sp_port_create(mlxsw_sp, i, false, module, width); 1930 + err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width, 1931 + lane); 1904 1932 if (err) 1905 1933 goto err_port_create; 1906 1934 } ··· 1922 1948 return local_port - offset; 1923 1949 } 1924 1950 1951 + static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, 1952 + u8 module, unsigned int count) 1953 + { 1954 + u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; 1955 + int err, i; 1956 + 1957 + for (i = 0; i < count; i++) { 1958 + err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module, 1959 + width, i * width); 1960 + if (err) 1961 + goto err_port_module_map; 1962 + } 1963 + 1964 + for (i = 0; i < count; i++) { 1965 + err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0); 1966 + if (err) 1967 + goto err_port_swid_set; 1968 + } 1969 + 1970 + for (i = 0; i < count; i++) { 1971 + err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true, 1972 + module, width, i * width); 1973 + if (err) 1974 + goto err_port_create; 1975 + } 1976 + 1977 + return 0; 1978 + 1979 + err_port_create: 1980 + for (i--; i >= 0; i--) 1981 + mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 1982 + i = count; 1983 + err_port_swid_set: 1984 + for (i--; i >= 0; i--) 1985 + __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 1986 + MLXSW_PORT_SWID_DISABLED_PORT); 1987 + i = count; 1988 + err_port_module_map: 1989 + for (i--; i >= 0; i--) 1990 + mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i); 1991 + return err; 1992 + } 1993 + 1994 + static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, 1995 + u8 base_port, unsigned int count) 1996 + { 1997 + u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH; 1998 + int i; 1999 + 2000 + /* Split by four means we need to re-create two ports, otherwise 2001 + * only one. 2002 + */ 2003 + count = count / 2; 2004 + 2005 + for (i = 0; i < count; i++) { 2006 + local_port = base_port + i * 2; 2007 + module = mlxsw_sp->port_to_module[local_port]; 2008 + 2009 + mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width, 2010 + 0); 2011 + } 2012 + 2013 + for (i = 0; i < count; i++) 2014 + __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0); 2015 + 2016 + for (i = 0; i < count; i++) { 2017 + local_port = base_port + i * 2; 2018 + module = mlxsw_sp->port_to_module[local_port]; 2019 + 2020 + mlxsw_sp_port_create(mlxsw_sp, local_port, false, module, 2021 + width, 0); 2022 + } 2023 + } 2024 + 1925 2025 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 1926 2026 unsigned int count) 1927 2027 { 1928 2028 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 1929 2029 struct mlxsw_sp_port *mlxsw_sp_port; 1930 - u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; 1931 2030 u8 module, cur_width, base_port; 1932 2031 int i; 1933 2032 int err; ··· 2012 1965 return -EINVAL; 2013 1966 } 2014 1967 1968 + module = mlxsw_sp_port->mapping.module; 1969 + cur_width = mlxsw_sp_port->mapping.width; 1970 + 2015 1971 if (count != 2 && count != 4) { 2016 1972 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); 2017 1973 return -EINVAL; 2018 - } 2019 - 2020 - err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module, 2021 - &cur_width); 2022 - if (err) { 2023 - netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n"); 2024 - return err; 2025 1974 } 2026 1975 2027 1976 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { ··· 2044 2001 for (i = 0; i < count; i++) 2045 2002 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 2046 2003 2047 - for (i = 0; i < count; i++) { 2048 - err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true, 2049 - module, width, i * width); 2050 - if (err) { 2051 - dev_err(mlxsw_sp->bus_info->dev, "Failed to create split port\n"); 2052 - goto err_port_create; 2053 - } 2004 + err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count); 2005 + if (err) { 2006 + dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); 2007 + goto err_port_split_create; 2054 2008 } 2055 2009 2056 2010 return 0; 2057 2011 2058 - err_port_create: 2059 - for (i--; i >= 0; i--) 2060 - mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 2061 - for (i = 0; i < count / 2; i++) { 2062 - module = mlxsw_sp->port_to_module[base_port + i * 2]; 2063 - mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false, 2064 - module, MLXSW_PORT_MODULE_MAX_WIDTH, 0); 2065 - } 2012 + err_port_split_create: 2013 + mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 2066 2014 return err; 2067 2015 } 2068 2016 ··· 2061 2027 { 2062 2028 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2063 2029 struct mlxsw_sp_port *mlxsw_sp_port; 2064 - u8 module, cur_width, base_port; 2030 + u8 cur_width, base_port; 2065 2031 unsigned int count; 2066 2032 int i; 2067 - int err; 2068 2033 2069 2034 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 2070 2035 if (!mlxsw_sp_port) { ··· 2077 2044 return -EINVAL; 2078 2045 } 2079 2046 2080 - err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module, 2081 - &cur_width); 2082 - if (err) { 2083 - netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n"); 2084 - return err; 2085 - } 2047 + cur_width = mlxsw_sp_port->mapping.width; 2086 2048 count = cur_width == 1 ? 4 : 2; 2087 2049 2088 2050 base_port = mlxsw_sp_cluster_base_port_get(local_port); ··· 2089 2061 for (i = 0; i < count; i++) 2090 2062 mlxsw_sp_port_remove(mlxsw_sp, base_port + i); 2091 2063 2092 - for (i = 0; i < count / 2; i++) { 2093 - module = mlxsw_sp->port_to_module[base_port + i * 2]; 2094 - err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false, 2095 - module, MLXSW_PORT_MODULE_MAX_WIDTH, 2096 - 0); 2097 - if (err) 2098 - dev_err(mlxsw_sp->bus_info->dev, "Failed to reinstantiate port\n"); 2099 - } 2064 + mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); 2100 2065 2101 2066 return 0; 2102 2067 }
+5
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
··· 229 229 struct ieee_maxrate *maxrate; 230 230 struct ieee_pfc *pfc; 231 231 } dcb; 232 + struct { 233 + u8 module; 234 + u8 width; 235 + u8 lane; 236 + } mapping; 232 237 /* 802.1Q bridge VLANs */ 233 238 unsigned long *active_vlans; 234 239 unsigned long *untagged_vlans;
+36 -9
drivers/net/ethernet/qlogic/qed/qed_main.c
··· 1105 1105 return port_type; 1106 1106 } 1107 1107 1108 + static int qed_get_link_data(struct qed_hwfn *hwfn, 1109 + struct qed_mcp_link_params *params, 1110 + struct qed_mcp_link_state *link, 1111 + struct qed_mcp_link_capabilities *link_caps) 1112 + { 1113 + void *p; 1114 + 1115 + if (!IS_PF(hwfn->cdev)) { 1116 + qed_vf_get_link_params(hwfn, params); 1117 + qed_vf_get_link_state(hwfn, link); 1118 + qed_vf_get_link_caps(hwfn, link_caps); 1119 + 1120 + return 0; 1121 + } 1122 + 1123 + p = qed_mcp_get_link_params(hwfn); 1124 + if (!p) 1125 + return -ENXIO; 1126 + memcpy(params, p, sizeof(*params)); 1127 + 1128 + p = qed_mcp_get_link_state(hwfn); 1129 + if (!p) 1130 + return -ENXIO; 1131 + memcpy(link, p, sizeof(*link)); 1132 + 1133 + p = qed_mcp_get_link_capabilities(hwfn); 1134 + if (!p) 1135 + return -ENXIO; 1136 + memcpy(link_caps, p, sizeof(*link_caps)); 1137 + 1138 + return 0; 1139 + } 1140 + 1108 1141 static void qed_fill_link(struct qed_hwfn *hwfn, 1109 1142 struct qed_link_output *if_link) 1110 1143 { ··· 1149 1116 memset(if_link, 0, sizeof(*if_link)); 1150 1117 1151 1118 /* Prepare source inputs */ 1152 - if (IS_PF(hwfn->cdev)) { 1153 - memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params)); 1154 - memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link)); 1155 - memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn), 1156 - sizeof(link_caps)); 1157 - } else { 1158 - qed_vf_get_link_params(hwfn, &params); 1159 - qed_vf_get_link_state(hwfn, &link); 1160 - qed_vf_get_link_caps(hwfn, &link_caps); 1119 + if (qed_get_link_data(hwfn, &params, &link, &link_caps)) { 1120 + dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n"); 1121 + return; 1161 1122 } 1162 1123 1163 1124 /* Set the link parameters to pass to protocol driver */
+3 -1
drivers/net/ethernet/qlogic/qed/qed_sriov.h
··· 12 12 #include "qed_vf.h" 13 13 #define QED_VF_ARRAY_LENGTH (3) 14 14 15 + #ifdef CONFIG_QED_SRIOV 15 16 #define IS_VF(cdev) ((cdev)->b_is_vf) 16 17 #define IS_PF(cdev) (!((cdev)->b_is_vf)) 17 - #ifdef CONFIG_QED_SRIOV 18 18 #define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info)) 19 19 #else 20 + #define IS_VF(cdev) (0) 21 + #define IS_PF(cdev) (1) 20 22 #define IS_PF_SRIOV(p_hwfn) (0) 21 23 #endif 22 24 #define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
+2
drivers/net/ethernet/qlogic/qede/qede_main.c
··· 87 87 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF}, 88 88 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF}, 89 89 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF}, 90 + #ifdef CONFIG_QED_SRIOV 90 91 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF}, 92 + #endif 91 93 { 0 } 92 94 }; 93 95
+4 -3
drivers/net/ethernet/sfc/mcdi_port.c
··· 189 189 190 190 case MC_CMD_MEDIA_XFP: 191 191 case MC_CMD_MEDIA_SFP_PLUS: 192 - result |= SUPPORTED_FIBRE; 193 - break; 194 - 195 192 case MC_CMD_MEDIA_QSFP_PLUS: 196 193 result |= SUPPORTED_FIBRE; 194 + if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) 195 + result |= SUPPORTED_1000baseT_Full; 196 + if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) 197 + result |= SUPPORTED_10000baseT_Full; 197 198 if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) 198 199 result |= SUPPORTED_40000baseCR4_Full; 199 200 break;
+1 -1
drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
··· 156 156 struct netdev_hw_addr *ha; 157 157 158 158 netdev_for_each_uc_addr(ha, dev) { 159 - dwmac4_set_umac_addr(ioaddr, ha->addr, reg); 159 + dwmac4_set_umac_addr(hw, ha->addr, reg); 160 160 reg++; 161 161 } 162 162 }
+4 -2
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 3450 3450 if (!netif_running(ndev)) 3451 3451 return 0; 3452 3452 3453 - spin_lock_irqsave(&priv->lock, flags); 3454 - 3455 3453 /* Power Down bit, into the PM register, is cleared 3456 3454 * automatically as soon as a magic packet or a Wake-up frame 3457 3455 * is received. Anyway, it's better to manually clear ··· 3457 3459 * from another devices (e.g. serial console). 3458 3460 */ 3459 3461 if (device_may_wakeup(priv->device)) { 3462 + spin_lock_irqsave(&priv->lock, flags); 3460 3463 priv->hw->mac->pmt(priv->hw, 0); 3464 + spin_unlock_irqrestore(&priv->lock, flags); 3461 3465 priv->irq_wake = 0; 3462 3466 } else { 3463 3467 pinctrl_pm_select_default_state(priv->device); ··· 3472 3472 } 3473 3473 3474 3474 netif_device_attach(ndev); 3475 + 3476 + spin_lock_irqsave(&priv->lock, flags); 3475 3477 3476 3478 priv->cur_rx = 0; 3477 3479 priv->dirty_rx = 0;
+1 -1
drivers/net/ethernet/ti/cpsw.c
··· 1339 1339 if (priv->coal_intvl != 0) { 1340 1340 struct ethtool_coalesce coal; 1341 1341 1342 - coal.rx_coalesce_usecs = (priv->coal_intvl << 4); 1342 + coal.rx_coalesce_usecs = priv->coal_intvl; 1343 1343 cpsw_set_coalesce(ndev, &coal); 1344 1344 } 1345 1345
+1 -1
drivers/net/vmxnet3/vmxnet3_drv.c
··· 1369 1369 rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd; 1370 1370 1371 1371 segCnt = rcdlro->segCnt; 1372 - BUG_ON(segCnt <= 1); 1372 + WARN_ON_ONCE(segCnt == 0); 1373 1373 mss = rcdlro->mss; 1374 1374 if (unlikely(segCnt <= 1)) 1375 1375 segCnt = 0;
+2 -2
drivers/net/vmxnet3/vmxnet3_int.h
··· 69 69 /* 70 70 * Version numbers 71 71 */ 72 - #define VMXNET3_DRIVER_VERSION_STRING "1.4.7.0-k" 72 + #define VMXNET3_DRIVER_VERSION_STRING "1.4.8.0-k" 73 73 74 74 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 75 - #define VMXNET3_DRIVER_VERSION_NUM 0x01040700 75 + #define VMXNET3_DRIVER_VERSION_NUM 0x01040800 76 76 77 77 #if defined(CONFIG_PCI_MSI) 78 78 /* RSS only makes sense if MSI-X is supported. */
+16
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
··· 2540 2540 const u8 *mac, struct station_info *sinfo) 2541 2541 { 2542 2542 struct brcmf_if *ifp = netdev_priv(ndev); 2543 + struct brcmf_scb_val_le scb_val; 2543 2544 s32 err = 0; 2544 2545 struct brcmf_sta_info_le sta_info_le; 2545 2546 u32 sta_flags; 2546 2547 u32 is_tdls_peer; 2547 2548 s32 total_rssi; 2548 2549 s32 count_rssi; 2550 + int rssi; 2549 2551 u32 i; 2550 2552 2551 2553 brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac); ··· 2631 2629 sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); 2632 2630 total_rssi /= count_rssi; 2633 2631 sinfo->signal = total_rssi; 2632 + } else if (test_bit(BRCMF_VIF_STATUS_CONNECTED, 2633 + &ifp->vif->sme_state)) { 2634 + memset(&scb_val, 0, sizeof(scb_val)); 2635 + err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_RSSI, 2636 + &scb_val, sizeof(scb_val)); 2637 + if (err) { 2638 + brcmf_err("Could not get rssi (%d)\n", err); 2639 + goto done; 2640 + } else { 2641 + rssi = le32_to_cpu(scb_val.val); 2642 + sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); 2643 + sinfo->signal = rssi; 2644 + brcmf_dbg(CONN, "RSSI %d dBm\n", rssi); 2645 + } 2634 2646 } 2635 2647 } 2636 2648 done:
+2
drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
··· 1157 1157 brcmu_pkt_buf_free_skb(skb); 1158 1158 return; 1159 1159 } 1160 + 1161 + skb->protocol = eth_type_trans(skb, ifp->ndev); 1160 1162 brcmf_netif_rx(ifp, skb); 1161 1163 } 1162 1164
+1
drivers/net/wireless/mac80211_hwsim.c
··· 2776 2776 if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] || 2777 2777 !info->attrs[HWSIM_ATTR_FLAGS] || 2778 2778 !info->attrs[HWSIM_ATTR_COOKIE] || 2779 + !info->attrs[HWSIM_ATTR_SIGNAL] || 2779 2780 !info->attrs[HWSIM_ATTR_TX_INFO]) 2780 2781 goto out; 2781 2782
+3 -3
drivers/net/wireless/realtek/rtlwifi/core.c
··· 54 54 void rtl_addr_delay(u32 addr) 55 55 { 56 56 if (addr == 0xfe) 57 - msleep(50); 57 + mdelay(50); 58 58 else if (addr == 0xfd) 59 59 msleep(5); 60 60 else if (addr == 0xfc) ··· 75 75 rtl_addr_delay(addr); 76 76 } else { 77 77 rtl_set_rfreg(hw, rfpath, addr, mask, data); 78 - usleep_range(1, 2); 78 + udelay(1); 79 79 } 80 80 } 81 81 EXPORT_SYMBOL(rtl_rfreg_delay); ··· 86 86 rtl_addr_delay(addr); 87 87 } else { 88 88 rtl_set_bbreg(hw, addr, MASKDWORD, data); 89 - usleep_range(1, 2); 89 + udelay(1); 90 90 } 91 91 } 92 92 EXPORT_SYMBOL(rtl_bb_delay);
+6 -2
include/linux/mlx5/device.h
··· 1240 1240 u8 rsvd[8]; 1241 1241 }; 1242 1242 1243 - #define MLX5_CMD_OP_MAX 0x920 1244 - 1245 1243 enum { 1246 1244 VPORT_STATE_DOWN = 0x0, 1247 1245 VPORT_STATE_UP = 0x1, ··· 1366 1368 1367 1369 #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ 1368 1370 MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap) 1371 + 1372 + #define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \ 1373 + MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap) 1374 + 1375 + #define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \ 1376 + MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap) 1369 1377 1370 1378 #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ 1371 1379 MLX5_GET(flow_table_eswitch_cap, \
+9 -3
include/linux/mlx5/mlx5_ifc.h
··· 205 205 MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939, 206 206 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a, 207 207 MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b, 208 - MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c 208 + MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c, 209 + MLX5_CMD_OP_MAX 209 210 }; 210 211 211 212 struct mlx5_ifc_flow_table_fields_supported_bits { ··· 501 500 u8 vport_svlan_insert[0x1]; 502 501 u8 vport_cvlan_insert_if_not_exist[0x1]; 503 502 u8 vport_cvlan_insert_overwrite[0x1]; 504 - u8 reserved_at_5[0x1b]; 503 + u8 reserved_at_5[0x19]; 504 + u8 nic_vport_node_guid_modify[0x1]; 505 + u8 nic_vport_port_guid_modify[0x1]; 505 506 506 507 u8 reserved_at_20[0x7e0]; 507 508 }; ··· 4586 4583 }; 4587 4584 4588 4585 struct mlx5_ifc_modify_nic_vport_field_select_bits { 4589 - u8 reserved_at_0[0x19]; 4586 + u8 reserved_at_0[0x16]; 4587 + u8 node_guid[0x1]; 4588 + u8 port_guid[0x1]; 4589 + u8 reserved_at_18[0x1]; 4590 4590 u8 mtu[0x1]; 4591 4591 u8 change_event[0x1]; 4592 4592 u8 promisc[0x1];
+1
include/linux/mlx5/qp.h
··· 559 559 __be32 optparam; 560 560 u8 rsvd0[4]; 561 561 struct mlx5_qp_context ctx; 562 + u8 rsvd2[16]; 562 563 }; 563 564 564 565 struct mlx5_modify_qp_mbox_out {
+2
include/linux/mlx5/vport.h
··· 50 50 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, 51 51 u64 *system_image_guid); 52 52 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid); 53 + int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, 54 + u32 vport, u64 node_guid); 53 55 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, 54 56 u16 *qkey_viol_cntr); 55 57 int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
+1
include/net/compat.h
··· 42 42 43 43 int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *, 44 44 struct sockaddr __user **, struct iovec **); 45 + struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval); 45 46 asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *, 46 47 unsigned int); 47 48 asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *,
+1 -1
include/net/ip_vs.h
··· 1232 1232 const char *ip_vs_state_name(__u16 proto, int state); 1233 1233 1234 1234 void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp); 1235 - int ip_vs_check_template(struct ip_vs_conn *ct); 1235 + int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest); 1236 1236 void ip_vs_random_dropentry(struct netns_ipvs *ipvs); 1237 1237 int ip_vs_conn_init(void); 1238 1238 void ip_vs_conn_cleanup(void);
+2 -2
include/net/netfilter/nf_queue.h
··· 28 28 struct nf_hook_ops *ops); 29 29 }; 30 30 31 - void nf_register_queue_handler(const struct nf_queue_handler *qh); 32 - void nf_unregister_queue_handler(void); 31 + void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh); 32 + void nf_unregister_queue_handler(struct net *net); 33 33 void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict); 34 34 35 35 void nf_queue_entry_get_refs(struct nf_queue_entry *entry);
+2
include/net/netns/netfilter.h
··· 5 5 6 6 struct proc_dir_entry; 7 7 struct nf_logger; 8 + struct nf_queue_handler; 8 9 9 10 struct netns_nf { 10 11 #if defined CONFIG_PROC_FS 11 12 struct proc_dir_entry *proc_netfilter; 12 13 #endif 14 + const struct nf_queue_handler __rcu *queue_handler; 13 15 const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO]; 14 16 #ifdef CONFIG_SYSCTL 15 17 struct ctl_table_header *nf_log_dir_header;
+7 -3
include/net/pkt_cls.h
··· 392 392 }; 393 393 }; 394 394 395 - static inline bool tc_should_offload(struct net_device *dev, u32 flags) 395 + static inline bool tc_should_offload(const struct net_device *dev, 396 + const struct tcf_proto *tp, u32 flags) 396 397 { 398 + const struct Qdisc *sch = tp->q; 399 + const struct Qdisc_class_ops *cops = sch->ops->cl_ops; 400 + 397 401 if (!(dev->features & NETIF_F_HW_TC)) 398 402 return false; 399 - 400 403 if (flags & TCA_CLS_FLAGS_SKIP_HW) 401 404 return false; 402 - 403 405 if (!dev->netdev_ops->ndo_setup_tc) 404 406 return false; 407 + if (cops && cops->tcf_cl_offload) 408 + return cops->tcf_cl_offload(tp->classid); 405 409 406 410 return true; 407 411 }
+5 -1
include/net/sch_generic.h
··· 168 168 169 169 /* Filter manipulation */ 170 170 struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long); 171 + bool (*tcf_cl_offload)(u32 classid); 171 172 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, 172 173 u32 classid); 173 174 void (*unbind_tcf)(struct Qdisc *, unsigned long); ··· 692 691 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ 693 692 if (!sch->gso_skb) { 694 693 sch->gso_skb = sch->dequeue(sch); 695 - if (sch->gso_skb) 694 + if (sch->gso_skb) { 696 695 /* it's still part of the queue */ 696 + qdisc_qstats_backlog_inc(sch, sch->gso_skb); 697 697 sch->q.qlen++; 698 + } 698 699 } 699 700 700 701 return sch->gso_skb; ··· 709 706 710 707 if (skb) { 711 708 sch->gso_skb = NULL; 709 + qdisc_qstats_backlog_dec(sch, skb); 712 710 sch->q.qlen--; 713 711 } else { 714 712 skb = sch->dequeue(sch);
+1 -1
include/uapi/linux/gtp.h
··· 1 1 #ifndef _UAPI_LINUX_GTP_H_ 2 - #define _UAPI_LINUX_GTP_H__ 2 + #define _UAPI_LINUX_GTP_H_ 3 3 4 4 enum gtp_genl_cmds { 5 5 GTP_CMD_NEWPDP,
+2 -2
kernel/trace/bpf_trace.c
··· 198 198 if (unlikely(index >= array->map.max_entries)) 199 199 return -E2BIG; 200 200 201 - file = (struct file *)array->ptrs[index]; 201 + file = READ_ONCE(array->ptrs[index]); 202 202 if (unlikely(!file)) 203 203 return -ENOENT; 204 204 ··· 247 247 if (unlikely(index >= array->map.max_entries)) 248 248 return -E2BIG; 249 249 250 - file = (struct file *)array->ptrs[index]; 250 + file = READ_ONCE(array->ptrs[index]); 251 251 if (unlikely(!file)) 252 252 return -ENOENT; 253 253
+2
net/bridge/br_fdb.c
··· 279 279 * change from under us. 280 280 */ 281 281 list_for_each_entry(v, &vg->vlan_list, vlist) { 282 + if (!br_vlan_should_use(v)) 283 + continue; 282 284 f = __br_fdb_get(br, br->dev->dev_addr, v->vid); 283 285 if (f && f->is_local && !f->dst) 284 286 fdb_delete_local(br, NULL, f);
+17 -3
net/compat.c
··· 309 309 __scm_destroy(scm); 310 310 } 311 311 312 - static int do_set_attach_filter(struct socket *sock, int level, int optname, 313 - char __user *optval, unsigned int optlen) 312 + /* allocate a 64-bit sock_fprog on the user stack for duration of syscall. */ 313 + struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval) 314 314 { 315 315 struct compat_sock_fprog __user *fprog32 = (struct compat_sock_fprog __user *)optval; 316 316 struct sock_fprog __user *kfprog = compat_alloc_user_space(sizeof(struct sock_fprog)); ··· 323 323 __get_user(ptr, &fprog32->filter) || 324 324 __put_user(len, &kfprog->len) || 325 325 __put_user(compat_ptr(ptr), &kfprog->filter)) 326 + return NULL; 327 + 328 + return kfprog; 329 + } 330 + EXPORT_SYMBOL_GPL(get_compat_bpf_fprog); 331 + 332 + static int do_set_attach_filter(struct socket *sock, int level, int optname, 333 + char __user *optval, unsigned int optlen) 334 + { 335 + struct sock_fprog __user *kfprog; 336 + 337 + kfprog = get_compat_bpf_fprog(optval); 338 + if (!kfprog) 326 339 return -EFAULT; 327 340 328 341 return sock_setsockopt(sock, level, optname, (char __user *)kfprog, ··· 367 354 static int compat_sock_setsockopt(struct socket *sock, int level, int optname, 368 355 char __user *optval, unsigned int optlen) 369 356 { 370 - if (optname == SO_ATTACH_FILTER) 357 + if (optname == SO_ATTACH_FILTER || 358 + optname == SO_ATTACH_REUSEPORT_CBPF) 371 359 return do_set_attach_filter(sock, level, optname, 372 360 optval, optlen); 373 361 if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
+2
net/core/gen_stats.c
··· 47 47 * @xstats_type: TLV type for backward compatibility xstats TLV 48 48 * @lock: statistics lock 49 49 * @d: dumping handle 50 + * @padattr: padding attribute 50 51 * 51 52 * Initializes the dumping handle, grabs the statistic lock and appends 52 53 * an empty TLV header to the socket buffer for use a container for all ··· 88 87 * @type: TLV type for top level statistic TLV 89 88 * @lock: statistics lock 90 89 * @d: dumping handle 90 + * @padattr: padding attribute 91 91 * 92 92 * Initializes the dumping handle, grabs the statistic lock and appends 93 93 * an empty TLV header to the socket buffer for use a container for all
+1
net/core/net-sysfs.c
··· 24 24 #include <linux/jiffies.h> 25 25 #include <linux/pm_runtime.h> 26 26 #include <linux/of.h> 27 + #include <linux/of_net.h> 27 28 28 29 #include "net-sysfs.h" 29 30
+5 -5
net/ipv4/udp.c
··· 1618 1618 } 1619 1619 } 1620 1620 1621 - if (rcu_access_pointer(sk->sk_filter)) { 1622 - if (udp_lib_checksum_complete(skb)) 1621 + if (rcu_access_pointer(sk->sk_filter) && 1622 + udp_lib_checksum_complete(skb)) 1623 1623 goto csum_error; 1624 - if (sk_filter(sk, skb)) 1625 - goto drop; 1626 - } 1624 + 1625 + if (sk_filter(sk, skb)) 1626 + goto drop; 1627 1627 1628 1628 udp_csum_pull_header(skb); 1629 1629 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
+3
net/ipv6/ip6_gre.c
··· 1256 1256 if (ret) 1257 1257 return ret; 1258 1258 1259 + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1260 + 1259 1261 tunnel = netdev_priv(dev); 1260 1262 1261 1263 ip6gre_tnl_link_config(tunnel, 1); ··· 1291 1289 1292 1290 dev->features |= NETIF_F_NETNS_LOCAL; 1293 1291 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1292 + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1294 1293 } 1295 1294 1296 1295 static bool ip6gre_netlink_encap_parms(struct nlattr *data[],
+3 -8
net/ipv6/ip6_output.c
··· 1071 1071 const struct in6_addr *final_dst) 1072 1072 { 1073 1073 struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie); 1074 - int err; 1075 1074 1076 1075 dst = ip6_sk_dst_check(sk, dst, fl6); 1076 + if (!dst) 1077 + dst = ip6_dst_lookup_flow(sk, fl6, final_dst); 1077 1078 1078 - err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6); 1079 - if (err) 1080 - return ERR_PTR(err); 1081 - if (final_dst) 1082 - fl6->daddr = *final_dst; 1083 - 1084 - return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); 1079 + return dst; 1085 1080 } 1086 1081 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow); 1087 1082
+1
net/ipv6/netfilter/nf_dup_ipv6.c
··· 33 33 fl6.daddr = *gw; 34 34 fl6.flowlabel = (__force __be32)(((iph->flow_lbl[0] & 0xF) << 16) | 35 35 (iph->flow_lbl[1] << 8) | iph->flow_lbl[2]); 36 + fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH; 36 37 dst = ip6_route_output(net, NULL, &fl6); 37 38 if (dst->error) { 38 39 dst_release(dst);
+3 -1
net/ipv6/tcp_ipv6.c
··· 1721 1721 destp = ntohs(inet->inet_dport); 1722 1722 srcp = ntohs(inet->inet_sport); 1723 1723 1724 - if (icsk->icsk_pending == ICSK_TIME_RETRANS) { 1724 + if (icsk->icsk_pending == ICSK_TIME_RETRANS || 1725 + icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || 1726 + icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { 1725 1727 timer_active = 1; 1726 1728 timer_expires = icsk->icsk_timeout; 1727 1729 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
+6 -6
net/ipv6/udp.c
··· 653 653 } 654 654 } 655 655 656 - if (rcu_access_pointer(sk->sk_filter)) { 657 - if (udp_lib_checksum_complete(skb)) 658 - goto csum_error; 659 - if (sk_filter(sk, skb)) 660 - goto drop; 661 - } 656 + if (rcu_access_pointer(sk->sk_filter) && 657 + udp_lib_checksum_complete(skb)) 658 + goto csum_error; 659 + 660 + if (sk_filter(sk, skb)) 661 + goto drop; 662 662 663 663 udp_csum_pull_header(skb); 664 664 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
+1 -1
net/l2tp/l2tp_core.c
··· 1581 1581 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ 1582 1582 tunnel->encap = encap; 1583 1583 if (encap == L2TP_ENCAPTYPE_UDP) { 1584 - struct udp_tunnel_sock_cfg udp_cfg; 1584 + struct udp_tunnel_sock_cfg udp_cfg = { }; 1585 1585 1586 1586 udp_cfg.sk_user_data = tunnel; 1587 1587 udp_cfg.encap_type = UDP_ENCAP_L2TPINUDP;
+4
net/mac80211/mesh.c
··· 161 161 del_timer_sync(&sta->mesh->plink_timer); 162 162 } 163 163 164 + /* make sure no readers can access nexthop sta from here on */ 165 + mesh_path_flush_by_nexthop(sta); 166 + synchronize_net(); 167 + 164 168 if (changed) 165 169 ieee80211_mbss_info_change_notify(sdata, changed); 166 170 }
+1 -1
net/mac80211/sta_info.h
··· 280 280 u8 sa_offs, da_offs, pn_offs; 281 281 u8 band; 282 282 u8 hdr[30 + 2 + IEEE80211_FAST_XMIT_MAX_IV + 283 - sizeof(rfc1042_header)]; 283 + sizeof(rfc1042_header)] __aligned(2); 284 284 285 285 struct rcu_head rcu_head; 286 286 };
+3 -2
net/netfilter/ipvs/ip_vs_conn.c
··· 762 762 * If available, return 1, otherwise invalidate this connection 763 763 * template and return 0. 764 764 */ 765 - int ip_vs_check_template(struct ip_vs_conn *ct) 765 + int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest) 766 766 { 767 767 struct ip_vs_dest *dest = ct->dest; 768 768 struct netns_ipvs *ipvs = ct->ipvs; ··· 772 772 */ 773 773 if ((dest == NULL) || 774 774 !(dest->flags & IP_VS_DEST_F_AVAILABLE) || 775 - expire_quiescent_template(ipvs, dest)) { 775 + expire_quiescent_template(ipvs, dest) || 776 + (cdest && (dest != cdest))) { 776 777 IP_VS_DBG_BUF(9, "check_template: dest not available for " 777 778 "protocol %s s:%s:%d v:%s:%d " 778 779 "-> d:%s:%d\n",
+3 -2
net/netfilter/ipvs/ip_vs_core.c
··· 321 321 322 322 /* Check if a template already exists */ 323 323 ct = ip_vs_ct_in_get(&param); 324 - if (!ct || !ip_vs_check_template(ct)) { 324 + if (!ct || !ip_vs_check_template(ct, NULL)) { 325 325 struct ip_vs_scheduler *sched; 326 326 327 327 /* ··· 1154 1154 vport, &param) < 0) 1155 1155 return NULL; 1156 1156 ct = ip_vs_ct_in_get(&param); 1157 - if (!ct) { 1157 + /* check if template exists and points to the same dest */ 1158 + if (!ct || !ip_vs_check_template(ct, dest)) { 1158 1159 ct = ip_vs_conn_new(&param, dest->af, daddr, dport, 1159 1160 IP_VS_CONN_F_TEMPLATE, dest, 0); 1160 1161 if (!ct) {
+1
net/netfilter/nf_conntrack_ftp.c
··· 632 632 if (ret) { 633 633 pr_err("failed to register helper for pf: %d port: %d\n", 634 634 ftp[i][j].tuple.src.l3num, ports[i]); 635 + ports_c = i; 635 636 nf_conntrack_ftp_fini(); 636 637 return ret; 637 638 }
+4 -5
net/netfilter/nf_conntrack_helper.c
··· 361 361 362 362 int nf_conntrack_helper_register(struct nf_conntrack_helper *me) 363 363 { 364 - int ret = 0; 365 - struct nf_conntrack_helper *cur; 364 + struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) }; 366 365 unsigned int h = helper_hash(&me->tuple); 366 + struct nf_conntrack_helper *cur; 367 + int ret = 0; 367 368 368 369 BUG_ON(me->expect_policy == NULL); 369 370 BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES); ··· 372 371 373 372 mutex_lock(&nf_ct_helper_mutex); 374 373 hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) { 375 - if (strncmp(cur->name, me->name, NF_CT_HELPER_NAME_LEN) == 0 && 376 - cur->tuple.src.l3num == me->tuple.src.l3num && 377 - cur->tuple.dst.protonum == me->tuple.dst.protonum) { 374 + if (nf_ct_tuple_src_mask_cmp(&cur->tuple, &me->tuple, &mask)) { 378 375 ret = -EEXIST; 379 376 goto out; 380 377 }
+1
net/netfilter/nf_conntrack_irc.c
··· 271 271 if (ret) { 272 272 pr_err("failed to register helper for pf: %u port: %u\n", 273 273 irc[i].tuple.src.l3num, ports[i]); 274 + ports_c = i; 274 275 nf_conntrack_irc_fini(); 275 276 return ret; 276 277 }
+1
net/netfilter/nf_conntrack_sane.c
··· 223 223 if (ret) { 224 224 pr_err("failed to register helper for pf: %d port: %d\n", 225 225 sane[i][j].tuple.src.l3num, ports[i]); 226 + ports_c = i; 226 227 nf_conntrack_sane_fini(); 227 228 return ret; 228 229 }
+1
net/netfilter/nf_conntrack_sip.c
··· 1669 1669 if (ret) { 1670 1670 pr_err("failed to register helper for pf: %u port: %u\n", 1671 1671 sip[i][j].tuple.src.l3num, ports[i]); 1672 + ports_c = i; 1672 1673 nf_conntrack_sip_fini(); 1673 1674 return ret; 1674 1675 }
-2
net/netfilter/nf_conntrack_standalone.c
··· 487 487 { } 488 488 }; 489 489 490 - #define NET_NF_CONNTRACK_MAX 2089 491 - 492 490 static struct ctl_table nf_ct_netfilter_table[] = { 493 491 { 494 492 .procname = "nf_conntrack_max",
+1
net/netfilter/nf_conntrack_tftp.c
··· 142 142 if (ret) { 143 143 pr_err("failed to register helper for pf: %u port: %u\n", 144 144 tftp[i][j].tuple.src.l3num, ports[i]); 145 + ports_c = i; 145 146 nf_conntrack_tftp_fini(); 146 147 return ret; 147 148 }
+8 -9
net/netfilter/nf_queue.c
··· 26 26 * Once the queue is registered it must reinject all packets it 27 27 * receives, no matter what. 28 28 */ 29 - static const struct nf_queue_handler __rcu *queue_handler __read_mostly; 30 29 31 30 /* return EBUSY when somebody else is registered, return EEXIST if the 32 31 * same handler is registered, return 0 in case of success. */ 33 - void nf_register_queue_handler(const struct nf_queue_handler *qh) 32 + void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh) 34 33 { 35 34 /* should never happen, we only have one queueing backend in kernel */ 36 - WARN_ON(rcu_access_pointer(queue_handler)); 37 - rcu_assign_pointer(queue_handler, qh); 35 + WARN_ON(rcu_access_pointer(net->nf.queue_handler)); 36 + rcu_assign_pointer(net->nf.queue_handler, qh); 38 37 } 39 38 EXPORT_SYMBOL(nf_register_queue_handler); 40 39 41 40 /* The caller must flush their queue before this */ 42 - void nf_unregister_queue_handler(void) 41 + void nf_unregister_queue_handler(struct net *net) 43 42 { 44 - RCU_INIT_POINTER(queue_handler, NULL); 45 - synchronize_rcu(); 43 + RCU_INIT_POINTER(net->nf.queue_handler, NULL); 46 44 } 47 45 EXPORT_SYMBOL(nf_unregister_queue_handler); 48 46 ··· 101 103 const struct nf_queue_handler *qh; 102 104 103 105 rcu_read_lock(); 104 - qh = rcu_dereference(queue_handler); 106 + qh = rcu_dereference(net->nf.queue_handler); 105 107 if (qh) 106 108 qh->nf_hook_drop(net, ops); 107 109 rcu_read_unlock(); ··· 120 122 struct nf_queue_entry *entry = NULL; 121 123 const struct nf_afinfo *afinfo; 122 124 const struct nf_queue_handler *qh; 125 + struct net *net = state->net; 123 126 124 127 /* QUEUE == DROP if no one is waiting, to be safe. */ 125 - qh = rcu_dereference(queue_handler); 128 + qh = rcu_dereference(net->nf.queue_handler); 126 129 if (!qh) { 127 130 status = -ESRCH; 128 131 goto err;
+2
net/netfilter/nf_tables_api.c
··· 2647 2647 /* Only accept unspec with dump */ 2648 2648 if (nfmsg->nfgen_family == NFPROTO_UNSPEC) 2649 2649 return -EAFNOSUPPORT; 2650 + if (!nla[NFTA_SET_TABLE]) 2651 + return -EINVAL; 2650 2652 2651 2653 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]); 2652 2654 if (IS_ERR(set))
+2 -2
net/netfilter/x_tables.c
··· 612 612 return -EINVAL; 613 613 614 614 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 && 615 - target_offset + sizeof(struct compat_xt_standard_target) != next_offset) 615 + COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset) 616 616 return -EINVAL; 617 617 618 618 /* compat_xt_entry match has less strict aligment requirements, ··· 694 694 return -EINVAL; 695 695 696 696 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 && 697 - target_offset + sizeof(struct xt_standard_target) != next_offset) 697 + XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset) 698 698 return -EINVAL; 699 699 700 700 return xt_check_entry_match(elems, base + target_offset,
+25
net/packet/af_packet.c
··· 93 93 #include <net/inet_common.h> 94 94 #endif 95 95 #include <linux/bpf.h> 96 + #include <net/compat.h> 96 97 97 98 #include "internal.h" 98 99 ··· 3941 3940 } 3942 3941 3943 3942 3943 + #ifdef CONFIG_COMPAT 3944 + static int compat_packet_setsockopt(struct socket *sock, int level, int optname, 3945 + char __user *optval, unsigned int optlen) 3946 + { 3947 + struct packet_sock *po = pkt_sk(sock->sk); 3948 + 3949 + if (level != SOL_PACKET) 3950 + return -ENOPROTOOPT; 3951 + 3952 + if (optname == PACKET_FANOUT_DATA && 3953 + po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) { 3954 + optval = (char __user *)get_compat_bpf_fprog(optval); 3955 + if (!optval) 3956 + return -EFAULT; 3957 + optlen = sizeof(struct sock_fprog); 3958 + } 3959 + 3960 + return packet_setsockopt(sock, level, optname, optval, optlen); 3961 + } 3962 + #endif 3963 + 3944 3964 static int packet_notifier(struct notifier_block *this, 3945 3965 unsigned long msg, void *ptr) 3946 3966 { ··· 4438 4416 .shutdown = sock_no_shutdown, 4439 4417 .setsockopt = packet_setsockopt, 4440 4418 .getsockopt = packet_getsockopt, 4419 + #ifdef CONFIG_COMPAT 4420 + .compat_setsockopt = compat_packet_setsockopt, 4421 + #endif 4441 4422 .sendmsg = packet_sendmsg, 4442 4423 .recvmsg = packet_recvmsg, 4443 4424 .mmap = packet_mmap,
+2
net/rds/rds.h
··· 74 74 RDS_CONN_CONNECTING, 75 75 RDS_CONN_DISCONNECTING, 76 76 RDS_CONN_UP, 77 + RDS_CONN_RESETTING, 77 78 RDS_CONN_ERROR, 78 79 }; 79 80 ··· 814 813 void rds_shutdown_worker(struct work_struct *); 815 814 void rds_send_worker(struct work_struct *); 816 815 void rds_recv_worker(struct work_struct *); 816 + void rds_connect_path_complete(struct rds_connection *conn, int curr); 817 817 void rds_connect_complete(struct rds_connection *conn); 818 818 819 819 /* transport.c */
+2
net/rds/recv.c
··· 561 561 minfo.fport = inc->i_hdr.h_dport; 562 562 } 563 563 564 + minfo.flags = 0; 565 + 564 566 rds_info_copy(iter, &minfo, sizeof(minfo)); 565 567 }
+1
net/rds/send.c
··· 99 99 list_splice_init(&conn->c_retrans, &conn->c_send_queue); 100 100 spin_unlock_irqrestore(&conn->c_lock, flags); 101 101 } 102 + EXPORT_SYMBOL_GPL(rds_send_reset); 102 103 103 104 static int acquire_in_xmit(struct rds_connection *conn) 104 105 {
+75 -3
net/rds/tcp.c
··· 126 126 } 127 127 128 128 /* 129 - * This is the only path that sets tc->t_sock. Send and receive trust that 130 - * it is set. The RDS_CONN_UP bit protects those paths from being 131 - * called while it isn't set. 129 + * rds_tcp_reset_callbacks() switches the to the new sock and 130 + * returns the existing tc->t_sock. 131 + * 132 + * The only functions that set tc->t_sock are rds_tcp_set_callbacks 133 + * and rds_tcp_reset_callbacks. Send and receive trust that 134 + * it is set. The absence of RDS_CONN_UP bit protects those paths 135 + * from being called while it isn't set. 136 + */ 137 + void rds_tcp_reset_callbacks(struct socket *sock, 138 + struct rds_connection *conn) 139 + { 140 + struct rds_tcp_connection *tc = conn->c_transport_data; 141 + struct socket *osock = tc->t_sock; 142 + 143 + if (!osock) 144 + goto newsock; 145 + 146 + /* Need to resolve a duelling SYN between peers. 147 + * We have an outstanding SYN to this peer, which may 148 + * potentially have transitioned to the RDS_CONN_UP state, 149 + * so we must quiesce any send threads before resetting 150 + * c_transport_data. We quiesce these threads by setting 151 + * c_state to something other than RDS_CONN_UP, and then 152 + * waiting for any existing threads in rds_send_xmit to 153 + * complete release_in_xmit(). (Subsequent threads entering 154 + * rds_send_xmit() will bail on !rds_conn_up(). 155 + * 156 + * However an incoming syn-ack at this point would end up 157 + * marking the conn as RDS_CONN_UP, and would again permit 158 + * rds_send_xmi() threads through, so ideally we would 159 + * synchronize on RDS_CONN_UP after lock_sock(), but cannot 160 + * do that: waiting on !RDS_IN_XMIT after lock_sock() may 161 + * end up deadlocking with tcp_sendmsg(), and the RDS_IN_XMIT 162 + * would not get set. As a result, we set c_state to 163 + * RDS_CONN_RESETTTING, to ensure that rds_tcp_state_change 164 + * cannot mark rds_conn_path_up() in the window before lock_sock() 165 + */ 166 + atomic_set(&conn->c_state, RDS_CONN_RESETTING); 167 + wait_event(conn->c_waitq, !test_bit(RDS_IN_XMIT, &conn->c_flags)); 168 + lock_sock(osock->sk); 169 + /* reset receive side state for rds_tcp_data_recv() for osock */ 170 + if (tc->t_tinc) { 171 + rds_inc_put(&tc->t_tinc->ti_inc); 172 + tc->t_tinc = NULL; 173 + } 174 + tc->t_tinc_hdr_rem = sizeof(struct rds_header); 175 + tc->t_tinc_data_rem = 0; 176 + tc->t_sock = NULL; 177 + 178 + write_lock_bh(&osock->sk->sk_callback_lock); 179 + 180 + osock->sk->sk_user_data = NULL; 181 + osock->sk->sk_data_ready = tc->t_orig_data_ready; 182 + osock->sk->sk_write_space = tc->t_orig_write_space; 183 + osock->sk->sk_state_change = tc->t_orig_state_change; 184 + write_unlock_bh(&osock->sk->sk_callback_lock); 185 + release_sock(osock->sk); 186 + sock_release(osock); 187 + newsock: 188 + rds_send_reset(conn); 189 + lock_sock(sock->sk); 190 + write_lock_bh(&sock->sk->sk_callback_lock); 191 + tc->t_sock = sock; 192 + sock->sk->sk_user_data = conn; 193 + sock->sk->sk_data_ready = rds_tcp_data_ready; 194 + sock->sk->sk_write_space = rds_tcp_write_space; 195 + sock->sk->sk_state_change = rds_tcp_state_change; 196 + 197 + write_unlock_bh(&sock->sk->sk_callback_lock); 198 + release_sock(sock->sk); 199 + } 200 + 201 + /* Add tc to rds_tcp_tc_list and set tc->t_sock. See comments 202 + * above rds_tcp_reset_callbacks for notes about synchronization 203 + * with data path 132 204 */ 133 205 void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn) 134 206 {
+1
net/rds/tcp.h
··· 50 50 void rds_tcp_tune(struct socket *sock); 51 51 void rds_tcp_nonagle(struct socket *sock); 52 52 void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn); 53 + void rds_tcp_reset_callbacks(struct socket *sock, struct rds_connection *conn); 53 54 void rds_tcp_restore_callbacks(struct socket *sock, 54 55 struct rds_tcp_connection *tc); 55 56 u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc);
+1 -1
net/rds/tcp_connect.c
··· 60 60 case TCP_SYN_RECV: 61 61 break; 62 62 case TCP_ESTABLISHED: 63 - rds_connect_complete(conn); 63 + rds_connect_path_complete(conn, RDS_CONN_CONNECTING); 64 64 break; 65 65 case TCP_CLOSE_WAIT: 66 66 case TCP_CLOSE:
+7 -13
net/rds/tcp_listen.c
··· 78 78 struct inet_sock *inet; 79 79 struct rds_tcp_connection *rs_tcp = NULL; 80 80 int conn_state; 81 - struct sock *nsk; 82 81 83 82 if (!sock) /* module unload or netns delete in progress */ 84 83 return -ENETUNREACH; ··· 135 136 !conn->c_outgoing) { 136 137 goto rst_nsk; 137 138 } else { 138 - atomic_set(&conn->c_state, RDS_CONN_CONNECTING); 139 - wait_event(conn->c_waitq, 140 - !test_bit(RDS_IN_XMIT, &conn->c_flags)); 141 - rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp); 139 + rds_tcp_reset_callbacks(new_sock, conn); 142 140 conn->c_outgoing = 0; 141 + /* rds_connect_path_complete() marks RDS_CONN_UP */ 142 + rds_connect_path_complete(conn, RDS_CONN_DISCONNECTING); 143 143 } 144 + } else { 145 + rds_tcp_set_callbacks(new_sock, conn); 146 + rds_connect_path_complete(conn, RDS_CONN_CONNECTING); 144 147 } 145 - rds_tcp_set_callbacks(new_sock, conn); 146 - rds_connect_complete(conn); /* marks RDS_CONN_UP */ 147 148 new_sock = NULL; 148 149 ret = 0; 149 150 goto out; 150 151 rst_nsk: 151 152 /* reset the newly returned accept sock and bail */ 152 - nsk = new_sock->sk; 153 - rds_tcp_stats_inc(s_tcp_listen_closed_stale); 154 - nsk->sk_user_data = NULL; 155 - nsk->sk_prot->disconnect(nsk, 0); 156 - tcp_done(nsk); 157 - new_sock = NULL; 153 + kernel_sock_shutdown(new_sock, SHUT_RDWR); 158 154 ret = 0; 159 155 out: 160 156 if (rs_tcp)
+8 -2
net/rds/threads.c
··· 71 71 struct workqueue_struct *rds_wq; 72 72 EXPORT_SYMBOL_GPL(rds_wq); 73 73 74 - void rds_connect_complete(struct rds_connection *conn) 74 + void rds_connect_path_complete(struct rds_connection *conn, int curr) 75 75 { 76 - if (!rds_conn_transition(conn, RDS_CONN_CONNECTING, RDS_CONN_UP)) { 76 + if (!rds_conn_transition(conn, curr, RDS_CONN_UP)) { 77 77 printk(KERN_WARNING "%s: Cannot transition to state UP, " 78 78 "current state is %d\n", 79 79 __func__, ··· 89 89 set_bit(0, &conn->c_map_queued); 90 90 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 91 91 queue_delayed_work(rds_wq, &conn->c_recv_w, 0); 92 + } 93 + EXPORT_SYMBOL_GPL(rds_connect_path_complete); 94 + 95 + void rds_connect_complete(struct rds_connection *conn) 96 + { 97 + rds_connect_path_complete(conn, RDS_CONN_CONNECTING); 92 98 } 93 99 EXPORT_SYMBOL_GPL(rds_connect_complete); 94 100
+1 -3
net/rxrpc/rxkad.c
··· 1162 1162 /* pin the cipher we need so that the crypto layer doesn't invoke 1163 1163 * keventd to go get it */ 1164 1164 rxkad_ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC); 1165 - if (IS_ERR(rxkad_ci)) 1166 - return PTR_ERR(rxkad_ci); 1167 - return 0; 1165 + return PTR_ERR_OR_ZERO(rxkad_ci); 1168 1166 } 1169 1167 1170 1168 /*
+11 -22
net/sched/act_police.c
··· 38 38 bool peak_present; 39 39 }; 40 40 #define to_police(pc) \ 41 - container_of(pc, struct tcf_police, common) 41 + container_of(pc->priv, struct tcf_police, common) 42 42 43 43 #define POL_TAB_MASK 15 44 44 ··· 119 119 struct nlattr *est, struct tc_action *a, 120 120 int ovr, int bind) 121 121 { 122 - unsigned int h; 123 122 int ret = 0, err; 124 123 struct nlattr *tb[TCA_POLICE_MAX + 1]; 125 124 struct tc_police *parm; 126 125 struct tcf_police *police; 127 126 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL; 128 127 struct tc_action_net *tn = net_generic(net, police_net_id); 129 - struct tcf_hashinfo *hinfo = tn->hinfo; 130 128 int size; 131 129 132 130 if (nla == NULL) ··· 143 145 144 146 if (parm->index) { 145 147 if (tcf_hash_search(tn, a, parm->index)) { 146 - police = to_police(a->priv); 148 + police = to_police(a); 147 149 if (bind) { 148 150 police->tcf_bindcnt += 1; 149 151 police->tcf_refcnt += 1; ··· 154 156 /* not replacing */ 155 157 return -EEXIST; 156 158 } 159 + } else { 160 + ret = tcf_hash_create(tn, parm->index, NULL, a, 161 + sizeof(*police), bind, false); 162 + if (ret) 163 + return ret; 164 + ret = ACT_P_CREATED; 157 165 } 158 166 159 - police = kzalloc(sizeof(*police), GFP_KERNEL); 160 - if (police == NULL) 161 - return -ENOMEM; 162 - ret = ACT_P_CREATED; 163 - police->tcf_refcnt = 1; 164 - spin_lock_init(&police->tcf_lock); 165 - if (bind) 166 - police->tcf_bindcnt = 1; 167 + police = to_police(a); 167 168 override: 168 169 if (parm->rate.rate) { 169 170 err = -ENOMEM; ··· 234 237 return ret; 235 238 236 239 police->tcfp_t_c = ktime_get_ns(); 237 - police->tcf_index = parm->index ? parm->index : 238 - tcf_hash_new_index(tn); 239 - police->tcf_tm.install = jiffies; 240 - police->tcf_tm.lastuse = jiffies; 241 - h = tcf_hash(police->tcf_index, POL_TAB_MASK); 242 - spin_lock_bh(&hinfo->lock); 243 - hlist_add_head(&police->tcf_head, &hinfo->htab[h]); 244 - spin_unlock_bh(&hinfo->lock); 240 + tcf_hash_insert(tn, a); 245 241 246 - a->priv = police; 247 242 return ret; 248 243 249 244 failure_unlock: ··· 244 255 qdisc_put_rtab(P_tab); 245 256 qdisc_put_rtab(R_tab); 246 257 if (ret == ACT_P_CREATED) 247 - kfree(police); 258 + tcf_hash_cleanup(a, est); 248 259 return err; 249 260 } 250 261
+3 -3
net/sched/cls_flower.c
··· 171 171 struct tc_cls_flower_offload offload = {0}; 172 172 struct tc_to_netdev tc; 173 173 174 - if (!tc_should_offload(dev, 0)) 174 + if (!tc_should_offload(dev, tp, 0)) 175 175 return; 176 176 177 177 offload.command = TC_CLSFLOWER_DESTROY; ··· 194 194 struct tc_cls_flower_offload offload = {0}; 195 195 struct tc_to_netdev tc; 196 196 197 - if (!tc_should_offload(dev, flags)) 197 + if (!tc_should_offload(dev, tp, flags)) 198 198 return; 199 199 200 200 offload.command = TC_CLSFLOWER_REPLACE; ··· 216 216 struct tc_cls_flower_offload offload = {0}; 217 217 struct tc_to_netdev tc; 218 218 219 - if (!tc_should_offload(dev, 0)) 219 + if (!tc_should_offload(dev, tp, 0)) 220 220 return; 221 221 222 222 offload.command = TC_CLSFLOWER_STATS;
+42 -34
net/sched/cls_u32.c
··· 440 440 offload.type = TC_SETUP_CLSU32; 441 441 offload.cls_u32 = &u32_offload; 442 442 443 - if (tc_should_offload(dev, 0)) { 443 + if (tc_should_offload(dev, tp, 0)) { 444 444 offload.cls_u32->command = TC_CLSU32_DELETE_KNODE; 445 445 offload.cls_u32->knode.handle = handle; 446 446 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, ··· 457 457 struct tc_to_netdev offload; 458 458 int err; 459 459 460 + if (!tc_should_offload(dev, tp, flags)) 461 + return tc_skip_sw(flags) ? -EINVAL : 0; 462 + 460 463 offload.type = TC_SETUP_CLSU32; 461 464 offload.cls_u32 = &u32_offload; 462 465 463 - if (tc_should_offload(dev, flags)) { 464 - offload.cls_u32->command = TC_CLSU32_NEW_HNODE; 465 - offload.cls_u32->hnode.divisor = h->divisor; 466 - offload.cls_u32->hnode.handle = h->handle; 467 - offload.cls_u32->hnode.prio = h->prio; 466 + offload.cls_u32->command = TC_CLSU32_NEW_HNODE; 467 + offload.cls_u32->hnode.divisor = h->divisor; 468 + offload.cls_u32->hnode.handle = h->handle; 469 + offload.cls_u32->hnode.prio = h->prio; 468 470 469 - err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, 470 - tp->protocol, &offload); 471 - if (tc_skip_sw(flags)) 472 - return err; 473 - } 471 + err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, 472 + tp->protocol, &offload); 473 + if (tc_skip_sw(flags)) 474 + return err; 474 475 475 476 return 0; 476 477 } ··· 485 484 offload.type = TC_SETUP_CLSU32; 486 485 offload.cls_u32 = &u32_offload; 487 486 488 - if (tc_should_offload(dev, 0)) { 487 + if (tc_should_offload(dev, tp, 0)) { 489 488 offload.cls_u32->command = TC_CLSU32_DELETE_HNODE; 490 489 offload.cls_u32->hnode.divisor = h->divisor; 491 490 offload.cls_u32->hnode.handle = h->handle; ··· 508 507 offload.type = TC_SETUP_CLSU32; 509 508 offload.cls_u32 = &u32_offload; 510 509 511 - if (tc_should_offload(dev, flags)) { 512 - offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE; 513 - offload.cls_u32->knode.handle = n->handle; 514 - offload.cls_u32->knode.fshift = n->fshift; 515 - #ifdef CONFIG_CLS_U32_MARK 516 - offload.cls_u32->knode.val = n->val; 517 - offload.cls_u32->knode.mask = n->mask; 518 - #else 519 - offload.cls_u32->knode.val = 0; 520 - offload.cls_u32->knode.mask = 0; 521 - #endif 522 - offload.cls_u32->knode.sel = &n->sel; 523 - offload.cls_u32->knode.exts = &n->exts; 524 - if (n->ht_down) 525 - offload.cls_u32->knode.link_handle = n->ht_down->handle; 510 + if (!tc_should_offload(dev, tp, flags)) 511 + return tc_skip_sw(flags) ? -EINVAL : 0; 526 512 527 - err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, 528 - tp->protocol, &offload); 529 - if (tc_skip_sw(flags)) 530 - return err; 531 - } 513 + offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE; 514 + offload.cls_u32->knode.handle = n->handle; 515 + offload.cls_u32->knode.fshift = n->fshift; 516 + #ifdef CONFIG_CLS_U32_MARK 517 + offload.cls_u32->knode.val = n->val; 518 + offload.cls_u32->knode.mask = n->mask; 519 + #else 520 + offload.cls_u32->knode.val = 0; 521 + offload.cls_u32->knode.mask = 0; 522 + #endif 523 + offload.cls_u32->knode.sel = &n->sel; 524 + offload.cls_u32->knode.exts = &n->exts; 525 + if (n->ht_down) 526 + offload.cls_u32->knode.link_handle = n->ht_down->handle; 527 + 528 + err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, 529 + tp->protocol, &offload); 530 + if (tc_skip_sw(flags)) 531 + return err; 532 532 533 533 return 0; 534 534 } ··· 865 863 if (tb[TCA_U32_FLAGS]) { 866 864 flags = nla_get_u32(tb[TCA_U32_FLAGS]); 867 865 if (!tc_flags_valid(flags)) 868 - return err; 866 + return -EINVAL; 869 867 } 870 868 871 869 n = (struct tc_u_knode *)*arg; ··· 923 921 ht->divisor = divisor; 924 922 ht->handle = handle; 925 923 ht->prio = tp->prio; 924 + 925 + err = u32_replace_hw_hnode(tp, ht, flags); 926 + if (err) { 927 + kfree(ht); 928 + return err; 929 + } 930 + 926 931 RCU_INIT_POINTER(ht->next, tp_c->hlist); 927 932 rcu_assign_pointer(tp_c->hlist, ht); 928 933 *arg = (unsigned long)ht; 929 934 930 - u32_replace_hw_hnode(tp, ht, flags); 931 935 return 0; 932 936 } 933 937
+4
net/sched/sch_drr.c
··· 375 375 cl->deficit = cl->quantum; 376 376 } 377 377 378 + qdisc_qstats_backlog_inc(sch, skb); 378 379 sch->q.qlen++; 379 380 return err; 380 381 } ··· 408 407 409 408 bstats_update(&cl->bstats, skb); 410 409 qdisc_bstats_update(sch, skb); 410 + qdisc_qstats_backlog_dec(sch, skb); 411 411 sch->q.qlen--; 412 412 return skb; 413 413 } ··· 430 428 if (cl->qdisc->ops->drop) { 431 429 len = cl->qdisc->ops->drop(cl->qdisc); 432 430 if (len > 0) { 431 + sch->qstats.backlog -= len; 433 432 sch->q.qlen--; 434 433 if (cl->qdisc->q.qlen == 0) 435 434 list_del(&cl->alist); ··· 466 463 qdisc_reset(cl->qdisc); 467 464 } 468 465 } 466 + sch->qstats.backlog = 0; 469 467 sch->q.qlen = 0; 470 468 } 471 469
+19 -7
net/sched/sch_fq_codel.c
··· 199 199 unsigned int idx, prev_backlog, prev_qlen; 200 200 struct fq_codel_flow *flow; 201 201 int uninitialized_var(ret); 202 + unsigned int pkt_len; 202 203 bool memory_limited; 203 204 204 205 idx = fq_codel_classify(skb, sch, &ret); ··· 231 230 prev_backlog = sch->qstats.backlog; 232 231 prev_qlen = sch->q.qlen; 233 232 233 + /* save this packet length as it might be dropped by fq_codel_drop() */ 234 + pkt_len = qdisc_pkt_len(skb); 234 235 /* fq_codel_drop() is quite expensive, as it performs a linear search 235 236 * in q->backlogs[] to find a fat flow. 236 237 * So instead of dropping a single packet, drop half of its backlog ··· 240 237 */ 241 238 ret = fq_codel_drop(sch, q->drop_batch_size); 242 239 243 - q->drop_overlimit += prev_qlen - sch->q.qlen; 240 + prev_qlen -= sch->q.qlen; 241 + prev_backlog -= sch->qstats.backlog; 242 + q->drop_overlimit += prev_qlen; 244 243 if (memory_limited) 245 - q->drop_overmemory += prev_qlen - sch->q.qlen; 246 - /* As we dropped packet(s), better let upper stack know this */ 247 - qdisc_tree_reduce_backlog(sch, prev_qlen - sch->q.qlen, 248 - prev_backlog - sch->qstats.backlog); 244 + q->drop_overmemory += prev_qlen; 249 245 250 - return ret == idx ? NET_XMIT_CN : NET_XMIT_SUCCESS; 246 + /* As we dropped packet(s), better let upper stack know this. 247 + * If we dropped a packet for this flow, return NET_XMIT_CN, 248 + * but in this case, our parents wont increase their backlogs. 249 + */ 250 + if (ret == idx) { 251 + qdisc_tree_reduce_backlog(sch, prev_qlen - 1, 252 + prev_backlog - pkt_len); 253 + return NET_XMIT_CN; 254 + } 255 + qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog); 256 + return NET_XMIT_SUCCESS; 251 257 } 252 258 253 259 /* This is the specific function called from codel_dequeue() ··· 661 649 qs.backlog = q->backlogs[idx]; 662 650 qs.drops = flow->dropped; 663 651 } 664 - if (gnet_stats_copy_queue(d, NULL, &qs, 0) < 0) 652 + if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0) 665 653 return -1; 666 654 if (idx < q->flows_cnt) 667 655 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
+2
net/sched/sch_generic.c
··· 49 49 { 50 50 q->gso_skb = skb; 51 51 q->qstats.requeues++; 52 + qdisc_qstats_backlog_inc(q, skb); 52 53 q->q.qlen++; /* it's still part of the queue */ 53 54 __netif_schedule(q); 54 55 ··· 93 92 txq = skb_get_tx_queue(txq->dev, skb); 94 93 if (!netif_xmit_frozen_or_stopped(txq)) { 95 94 q->gso_skb = NULL; 95 + qdisc_qstats_backlog_dec(q, skb); 96 96 q->q.qlen--; 97 97 } else 98 98 skb = NULL;
+4 -8
net/sched/sch_hfsc.c
··· 1529 1529 q->eligible = RB_ROOT; 1530 1530 INIT_LIST_HEAD(&q->droplist); 1531 1531 qdisc_watchdog_cancel(&q->watchdog); 1532 + sch->qstats.backlog = 0; 1532 1533 sch->q.qlen = 0; 1533 1534 } 1534 1535 ··· 1560 1559 struct hfsc_sched *q = qdisc_priv(sch); 1561 1560 unsigned char *b = skb_tail_pointer(skb); 1562 1561 struct tc_hfsc_qopt qopt; 1563 - struct hfsc_class *cl; 1564 - unsigned int i; 1565 - 1566 - sch->qstats.backlog = 0; 1567 - for (i = 0; i < q->clhash.hashsize; i++) { 1568 - hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode) 1569 - sch->qstats.backlog += cl->qdisc->qstats.backlog; 1570 - } 1571 1562 1572 1563 qopt.defcls = q->defcls; 1573 1564 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt)) ··· 1597 1604 if (cl->qdisc->q.qlen == 1) 1598 1605 set_active(cl, qdisc_pkt_len(skb)); 1599 1606 1607 + qdisc_qstats_backlog_inc(sch, skb); 1600 1608 sch->q.qlen++; 1601 1609 1602 1610 return NET_XMIT_SUCCESS; ··· 1666 1672 1667 1673 qdisc_unthrottled(sch); 1668 1674 qdisc_bstats_update(sch, skb); 1675 + qdisc_qstats_backlog_dec(sch, skb); 1669 1676 sch->q.qlen--; 1670 1677 1671 1678 return skb; ··· 1690 1695 } 1691 1696 cl->qstats.drops++; 1692 1697 qdisc_qstats_drop(sch); 1698 + sch->qstats.backlog -= len; 1693 1699 sch->q.qlen--; 1694 1700 return len; 1695 1701 }
+12
net/sched/sch_ingress.c
··· 27 27 return TC_H_MIN(classid) + 1; 28 28 } 29 29 30 + static bool ingress_cl_offload(u32 classid) 31 + { 32 + return true; 33 + } 34 + 30 35 static unsigned long ingress_bind_filter(struct Qdisc *sch, 31 36 unsigned long parent, u32 classid) 32 37 { ··· 91 86 .put = ingress_put, 92 87 .walk = ingress_walk, 93 88 .tcf_chain = ingress_find_tcf, 89 + .tcf_cl_offload = ingress_cl_offload, 94 90 .bind_tcf = ingress_bind_filter, 95 91 .unbind_tcf = ingress_put, 96 92 }; ··· 114 108 default: 115 109 return 0; 116 110 } 111 + } 112 + 113 + static bool clsact_cl_offload(u32 classid) 114 + { 115 + return TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS); 117 116 } 118 117 119 118 static unsigned long clsact_bind_filter(struct Qdisc *sch, ··· 169 158 .put = ingress_put, 170 159 .walk = ingress_walk, 171 160 .tcf_chain = clsact_find_tcf, 161 + .tcf_cl_offload = clsact_cl_offload, 172 162 .bind_tcf = clsact_bind_filter, 173 163 .unbind_tcf = ingress_put, 174 164 };
+4
net/sched/sch_prio.c
··· 85 85 86 86 ret = qdisc_enqueue(skb, qdisc); 87 87 if (ret == NET_XMIT_SUCCESS) { 88 + qdisc_qstats_backlog_inc(sch, skb); 88 89 sch->q.qlen++; 89 90 return NET_XMIT_SUCCESS; 90 91 } ··· 118 117 struct sk_buff *skb = qdisc_dequeue_peeked(qdisc); 119 118 if (skb) { 120 119 qdisc_bstats_update(sch, skb); 120 + qdisc_qstats_backlog_dec(sch, skb); 121 121 sch->q.qlen--; 122 122 return skb; 123 123 } ··· 137 135 for (prio = q->bands-1; prio >= 0; prio--) { 138 136 qdisc = q->queues[prio]; 139 137 if (qdisc->ops->drop && (len = qdisc->ops->drop(qdisc)) != 0) { 138 + sch->qstats.backlog -= len; 140 139 sch->q.qlen--; 141 140 return len; 142 141 } ··· 154 151 155 152 for (prio = 0; prio < q->bands; prio++) 156 153 qdisc_reset(q->queues[prio]); 154 + sch->qstats.backlog = 0; 157 155 sch->q.qlen = 0; 158 156 } 159 157
+4 -2
net/sched/sch_qfq.c
··· 1235 1235 cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid); 1236 1236 err = qfq_change_agg(sch, cl, cl->agg->class_weight, 1237 1237 qdisc_pkt_len(skb)); 1238 - if (err) 1239 - return err; 1238 + if (err) { 1239 + cl->qstats.drops++; 1240 + return qdisc_drop(skb, sch); 1241 + } 1240 1242 } 1241 1243 1242 1244 err = qdisc_enqueue(skb, cl->qdisc);
+4
net/sched/sch_red.c
··· 97 97 98 98 ret = qdisc_enqueue(skb, child); 99 99 if (likely(ret == NET_XMIT_SUCCESS)) { 100 + qdisc_qstats_backlog_inc(sch, skb); 100 101 sch->q.qlen++; 101 102 } else if (net_xmit_drop_count(ret)) { 102 103 q->stats.pdrop++; ··· 119 118 skb = child->dequeue(child); 120 119 if (skb) { 121 120 qdisc_bstats_update(sch, skb); 121 + qdisc_qstats_backlog_dec(sch, skb); 122 122 sch->q.qlen--; 123 123 } else { 124 124 if (!red_is_idling(&q->vars)) ··· 145 143 if (child->ops->drop && (len = child->ops->drop(child)) > 0) { 146 144 q->stats.other++; 147 145 qdisc_qstats_drop(sch); 146 + sch->qstats.backlog -= len; 148 147 sch->q.qlen--; 149 148 return len; 150 149 } ··· 161 158 struct red_sched_data *q = qdisc_priv(sch); 162 159 163 160 qdisc_reset(q->qdisc); 161 + sch->qstats.backlog = 0; 164 162 sch->q.qlen = 0; 165 163 red_restart(&q->vars); 166 164 }
+4
net/sched/sch_tbf.c
··· 207 207 return ret; 208 208 } 209 209 210 + qdisc_qstats_backlog_inc(sch, skb); 210 211 sch->q.qlen++; 211 212 return NET_XMIT_SUCCESS; 212 213 } ··· 218 217 unsigned int len = 0; 219 218 220 219 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) { 220 + sch->qstats.backlog -= len; 221 221 sch->q.qlen--; 222 222 qdisc_qstats_drop(sch); 223 223 } ··· 265 263 q->t_c = now; 266 264 q->tokens = toks; 267 265 q->ptokens = ptoks; 266 + qdisc_qstats_backlog_dec(sch, skb); 268 267 sch->q.qlen--; 269 268 qdisc_unthrottled(sch); 270 269 qdisc_bstats_update(sch, skb); ··· 297 294 struct tbf_sched_data *q = qdisc_priv(sch); 298 295 299 296 qdisc_reset(q->qdisc); 297 + sch->qstats.backlog = 0; 300 298 sch->q.qlen = 0; 301 299 q->t_c = ktime_get_ns(); 302 300 q->tokens = q->buffer;
-2
net/wireless/core.c
··· 363 363 WARN_ON(ops->remain_on_channel && !ops->cancel_remain_on_channel); 364 364 WARN_ON(ops->tdls_channel_switch && !ops->tdls_cancel_channel_switch); 365 365 WARN_ON(ops->add_tx_ts && !ops->del_tx_ts); 366 - WARN_ON(ops->set_tx_power && !ops->get_tx_power); 367 - WARN_ON(ops->set_antenna && !ops->get_antenna); 368 366 369 367 alloc_size = sizeof(*rdev) + sizeof_priv; 370 368
+23 -2
net/wireless/wext-core.c
··· 958 958 return private(dev, iwr, cmd, info, handler); 959 959 } 960 960 /* Old driver API : call driver ioctl handler */ 961 - if (dev->netdev_ops->ndo_do_ioctl) 962 - return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd); 961 + if (dev->netdev_ops->ndo_do_ioctl) { 962 + #ifdef CONFIG_COMPAT 963 + if (info->flags & IW_REQUEST_FLAG_COMPAT) { 964 + int ret = 0; 965 + struct iwreq iwr_lcl; 966 + struct compat_iw_point *iwp_compat = (void *) &iwr->u.data; 967 + 968 + memcpy(&iwr_lcl, iwr, sizeof(struct iwreq)); 969 + iwr_lcl.u.data.pointer = compat_ptr(iwp_compat->pointer); 970 + iwr_lcl.u.data.length = iwp_compat->length; 971 + iwr_lcl.u.data.flags = iwp_compat->flags; 972 + 973 + ret = dev->netdev_ops->ndo_do_ioctl(dev, (void *) &iwr_lcl, cmd); 974 + 975 + iwp_compat->pointer = ptr_to_compat(iwr_lcl.u.data.pointer); 976 + iwp_compat->length = iwr_lcl.u.data.length; 977 + iwp_compat->flags = iwr_lcl.u.data.flags; 978 + 979 + return ret; 980 + } else 981 + #endif 982 + return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd); 983 + } 963 984 return -EOPNOTSUPP; 964 985 } 965 986
+5 -5
tools/testing/selftests/net/reuseport_bpf.c
··· 111 111 memset(&attr, 0, sizeof(attr)); 112 112 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; 113 113 attr.insn_cnt = ARRAY_SIZE(prog); 114 - attr.insns = (uint64_t)prog; 115 - attr.license = (uint64_t)bpf_license; 116 - attr.log_buf = (uint64_t)bpf_log_buf; 114 + attr.insns = (unsigned long) &prog; 115 + attr.license = (unsigned long) &bpf_license; 116 + attr.log_buf = (unsigned long) &bpf_log_buf; 117 117 attr.log_size = sizeof(bpf_log_buf); 118 118 attr.log_level = 1; 119 119 attr.kern_version = 0; ··· 351 351 memset(&eprog, 0, sizeof(eprog)); 352 352 eprog.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; 353 353 eprog.insn_cnt = ARRAY_SIZE(ecode); 354 - eprog.insns = (uint64_t)ecode; 355 - eprog.license = (uint64_t)bpf_license; 354 + eprog.insns = (unsigned long) &ecode; 355 + eprog.license = (unsigned long) &bpf_license; 356 356 eprog.kern_version = 0; 357 357 358 358 memset(&cprog, 0, sizeof(cprog));