Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2019-02-22

This series contains updates to the ice driver only.

Bruce adds the __always_unused attribute to a parameter to avoid
compiler warnings when using -Wunused-parameter. Fixed unnecessary
type-casting and the use of sizeof(). Fix the allocation of structs
that have become memory hogs, so allocate them in heaps and fix all the
associated references. Fixed the "possible" numeric overflow issues
that were caught with static analysis.

Maciej fixes the maximum MTU calculation by taking into account double
VLAN tagging amd ensure that the operations are done in the correct
order.

Victor fixes the supported node calculation, where we were not taking
into account if there is space to add the new VSI or intermediate node
above that layer, then it is not required to continue the calculation.
Added a check for a leaf node presence for a given VSI, which is needed
before removing a VSI.

Jake fixes an issue where the VSI list is shared, so simply removing a
VSI from the list will cause issues for the other users who reference
the list. Since we also free the memory, this could lead to
segmentation faults.

Brett fixes an issue where driver unload could cause a system reboot
when intel_iommu=on parameter is set. The issue is that we are not
clearing the CAUSE_ENA bit for the appropriate control queues register
when freeing the miscellaneous interrupt vector.

Mitch is so kind, he prevented spamming the VF with link messages when
the link status really has not changed. Updates the driver to use the
absolute vector ID and not the per-PF vector ID for the VF MSIx vector
allocation.

Lukasz fixes the ethtool pause parameter for the ice driver, which was
originally based off the link status but is now based off the PHY
configuration. This is to resolve an issue where pause parameters could
be set while link was down.

Jesse updates the string that reports statistics so the string does not
get modified at runtime and cause reports of string truncation.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+364 -199
+1 -1
drivers/net/ethernet/intel/ice/ice.h
··· 83 83 #define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 84 84 85 85 #define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - \ 86 - ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN) 86 + (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))) 87 87 88 88 #define ICE_UP_TABLE_TRANSLATE(val, i) \ 89 89 (((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \
+19 -2
drivers/net/ethernet/intel/ice/ice_common.c
··· 2450 2450 { 2451 2451 struct ice_aqc_dis_txqs *cmd; 2452 2452 struct ice_aq_desc desc; 2453 + enum ice_status status; 2453 2454 u16 i, sz = 0; 2454 2455 2455 2456 cmd = &desc.params.dis_txqs; ··· 2486 2485 break; 2487 2486 } 2488 2487 2488 + /* flush pipe on time out */ 2489 + cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE; 2489 2490 /* If no queue group info, we are in a reset flow. Issue the AQ */ 2490 2491 if (!qg_list) 2491 2492 goto do_aq; ··· 2513 2510 return ICE_ERR_PARAM; 2514 2511 2515 2512 do_aq: 2516 - return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 2513 + status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 2514 + if (status) { 2515 + if (!qg_list) 2516 + ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n", 2517 + vmvf_num, hw->adminq.sq_last_status); 2518 + else 2519 + ice_debug(hw, ICE_DBG_SCHED, "disable Q %d failed %d\n", 2520 + le16_to_cpu(qg_list[0].q_id[0]), 2521 + hw->adminq.sq_last_status); 2522 + } 2523 + return status; 2517 2524 } 2518 2525 2519 2526 /* End of FW Admin Queue command wrappers */ ··· 2809 2796 2810 2797 /* add the lan q */ 2811 2798 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); 2812 - if (status) 2799 + if (status) { 2800 + ice_debug(hw, ICE_DBG_SCHED, "enable Q %d failed %d\n", 2801 + le16_to_cpu(buf->txqs[0].txq_id), 2802 + hw->adminq.sq_last_status); 2813 2803 goto ena_txq_exit; 2804 + } 2814 2805 2815 2806 node.node_teid = buf->txqs[0].q_teid; 2816 2807 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
+73 -59
drivers/net/ethernet/intel/ice/ice_ethtool.c
··· 63 63 * is queried on the base PF netdev. 64 64 */ 65 65 static const struct ice_stats ice_gstrings_pf_stats[] = { 66 - ICE_PF_STAT("tx_bytes", stats.eth.tx_bytes), 67 - ICE_PF_STAT("rx_bytes", stats.eth.rx_bytes), 68 - ICE_PF_STAT("tx_unicast", stats.eth.tx_unicast), 69 - ICE_PF_STAT("rx_unicast", stats.eth.rx_unicast), 70 - ICE_PF_STAT("tx_multicast", stats.eth.tx_multicast), 71 - ICE_PF_STAT("rx_multicast", stats.eth.rx_multicast), 72 - ICE_PF_STAT("tx_broadcast", stats.eth.tx_broadcast), 73 - ICE_PF_STAT("rx_broadcast", stats.eth.rx_broadcast), 74 - ICE_PF_STAT("tx_errors", stats.eth.tx_errors), 75 - ICE_PF_STAT("tx_size_64", stats.tx_size_64), 76 - ICE_PF_STAT("rx_size_64", stats.rx_size_64), 77 - ICE_PF_STAT("tx_size_127", stats.tx_size_127), 78 - ICE_PF_STAT("rx_size_127", stats.rx_size_127), 79 - ICE_PF_STAT("tx_size_255", stats.tx_size_255), 80 - ICE_PF_STAT("rx_size_255", stats.rx_size_255), 81 - ICE_PF_STAT("tx_size_511", stats.tx_size_511), 82 - ICE_PF_STAT("rx_size_511", stats.rx_size_511), 83 - ICE_PF_STAT("tx_size_1023", stats.tx_size_1023), 84 - ICE_PF_STAT("rx_size_1023", stats.rx_size_1023), 85 - ICE_PF_STAT("tx_size_1522", stats.tx_size_1522), 86 - ICE_PF_STAT("rx_size_1522", stats.rx_size_1522), 87 - ICE_PF_STAT("tx_size_big", stats.tx_size_big), 88 - ICE_PF_STAT("rx_size_big", stats.rx_size_big), 89 - ICE_PF_STAT("link_xon_tx", stats.link_xon_tx), 90 - ICE_PF_STAT("link_xon_rx", stats.link_xon_rx), 91 - ICE_PF_STAT("link_xoff_tx", stats.link_xoff_tx), 92 - ICE_PF_STAT("link_xoff_rx", stats.link_xoff_rx), 93 - ICE_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down), 94 - ICE_PF_STAT("rx_undersize", stats.rx_undersize), 95 - ICE_PF_STAT("rx_fragments", stats.rx_fragments), 96 - ICE_PF_STAT("rx_oversize", stats.rx_oversize), 97 - ICE_PF_STAT("rx_jabber", stats.rx_jabber), 98 - ICE_PF_STAT("rx_csum_bad", hw_csum_rx_error), 99 - ICE_PF_STAT("rx_length_errors", stats.rx_len_errors), 100 - ICE_PF_STAT("rx_dropped", stats.eth.rx_discards), 101 - ICE_PF_STAT("rx_crc_errors", stats.crc_errors), 102 - ICE_PF_STAT("illegal_bytes", stats.illegal_bytes), 103 - ICE_PF_STAT("mac_local_faults", stats.mac_local_faults), 104 - ICE_PF_STAT("mac_remote_faults", stats.mac_remote_faults), 66 + ICE_PF_STAT("port.tx_bytes", stats.eth.tx_bytes), 67 + ICE_PF_STAT("port.rx_bytes", stats.eth.rx_bytes), 68 + ICE_PF_STAT("port.tx_unicast", stats.eth.tx_unicast), 69 + ICE_PF_STAT("port.rx_unicast", stats.eth.rx_unicast), 70 + ICE_PF_STAT("port.tx_multicast", stats.eth.tx_multicast), 71 + ICE_PF_STAT("port.rx_multicast", stats.eth.rx_multicast), 72 + ICE_PF_STAT("port.tx_broadcast", stats.eth.tx_broadcast), 73 + ICE_PF_STAT("port.rx_broadcast", stats.eth.rx_broadcast), 74 + ICE_PF_STAT("port.tx_errors", stats.eth.tx_errors), 75 + ICE_PF_STAT("port.tx_size_64", stats.tx_size_64), 76 + ICE_PF_STAT("port.rx_size_64", stats.rx_size_64), 77 + ICE_PF_STAT("port.tx_size_127", stats.tx_size_127), 78 + ICE_PF_STAT("port.rx_size_127", stats.rx_size_127), 79 + ICE_PF_STAT("port.tx_size_255", stats.tx_size_255), 80 + ICE_PF_STAT("port.rx_size_255", stats.rx_size_255), 81 + ICE_PF_STAT("port.tx_size_511", stats.tx_size_511), 82 + ICE_PF_STAT("port.rx_size_511", stats.rx_size_511), 83 + ICE_PF_STAT("port.tx_size_1023", stats.tx_size_1023), 84 + ICE_PF_STAT("port.rx_size_1023", stats.rx_size_1023), 85 + ICE_PF_STAT("port.tx_size_1522", stats.tx_size_1522), 86 + ICE_PF_STAT("port.rx_size_1522", stats.rx_size_1522), 87 + ICE_PF_STAT("port.tx_size_big", stats.tx_size_big), 88 + ICE_PF_STAT("port.rx_size_big", stats.rx_size_big), 89 + ICE_PF_STAT("port.link_xon_tx", stats.link_xon_tx), 90 + ICE_PF_STAT("port.link_xon_rx", stats.link_xon_rx), 91 + ICE_PF_STAT("port.link_xoff_tx", stats.link_xoff_tx), 92 + ICE_PF_STAT("port.link_xoff_rx", stats.link_xoff_rx), 93 + ICE_PF_STAT("port.tx_dropped_link_down", stats.tx_dropped_link_down), 94 + ICE_PF_STAT("port.rx_undersize", stats.rx_undersize), 95 + ICE_PF_STAT("port.rx_fragments", stats.rx_fragments), 96 + ICE_PF_STAT("port.rx_oversize", stats.rx_oversize), 97 + ICE_PF_STAT("port.rx_jabber", stats.rx_jabber), 98 + ICE_PF_STAT("port.rx_csum_bad", hw_csum_rx_error), 99 + ICE_PF_STAT("port.rx_length_errors", stats.rx_len_errors), 100 + ICE_PF_STAT("port.rx_dropped", stats.eth.rx_discards), 101 + ICE_PF_STAT("port.rx_crc_errors", stats.crc_errors), 102 + ICE_PF_STAT("port.illegal_bytes", stats.illegal_bytes), 103 + ICE_PF_STAT("port.mac_local_faults", stats.mac_local_faults), 104 + ICE_PF_STAT("port.mac_remote_faults", stats.mac_remote_faults), 105 105 }; 106 106 107 107 static const u32 ice_regs_dump_list[] = { ··· 304 304 return; 305 305 306 306 for (i = 0; i < ICE_PF_STATS_LEN; i++) { 307 - snprintf(p, ETH_GSTRING_LEN, "port.%s", 307 + snprintf(p, ETH_GSTRING_LEN, "%s", 308 308 ice_gstrings_pf_stats[i].stat_string); 309 309 p += ETH_GSTRING_LEN; 310 310 } ··· 1084 1084 * current PHY type, get what is supported by the NVM and intersect 1085 1085 * them to get what is truly supported 1086 1086 */ 1087 - memset(&cap_ksettings, 0, sizeof(struct ethtool_link_ksettings)); 1087 + memset(&cap_ksettings, 0, sizeof(cap_ksettings)); 1088 1088 ice_phy_type_to_ethtool(netdev, &cap_ksettings); 1089 1089 ethtool_intersect_link_masks(ks, &cap_ksettings); 1090 1090 ··· 1416 1416 return -EOPNOTSUPP; 1417 1417 1418 1418 /* copy the ksettings to copy_ks to avoid modifying the original */ 1419 - memcpy(&copy_ks, ks, sizeof(struct ethtool_link_ksettings)); 1419 + memcpy(&copy_ks, ks, sizeof(copy_ks)); 1420 1420 1421 1421 /* save autoneg out of ksettings */ 1422 1422 autoneg = copy_ks.base.autoneg; ··· 1435 1435 return -EINVAL; 1436 1436 1437 1437 /* get our own copy of the bits to check against */ 1438 - memset(&safe_ks, 0, sizeof(struct ethtool_link_ksettings)); 1438 + memset(&safe_ks, 0, sizeof(safe_ks)); 1439 1439 safe_ks.base.cmd = copy_ks.base.cmd; 1440 1440 safe_ks.base.link_mode_masks_nwords = 1441 1441 copy_ks.base.link_mode_masks_nwords; ··· 1449 1449 /* If copy_ks.base and safe_ks.base are not the same now, then they are 1450 1450 * trying to set something that we do not support. 1451 1451 */ 1452 - if (memcmp(&copy_ks.base, &safe_ks.base, 1453 - sizeof(struct ethtool_link_settings))) 1452 + if (memcmp(&copy_ks.base, &safe_ks.base, sizeof(copy_ks.base))) 1454 1453 return -EOPNOTSUPP; 1455 1454 1456 1455 while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) { ··· 1473 1474 } 1474 1475 1475 1476 /* Copy abilities to config in case autoneg is not set below */ 1476 - memset(&config, 0, sizeof(struct ice_aqc_set_phy_cfg_data)); 1477 + memset(&config, 0, sizeof(config)); 1477 1478 config.caps = abilities->caps & ~ICE_AQC_PHY_AN_MODE; 1478 1479 if (abilities->caps & ICE_AQC_PHY_AN_MODE) 1479 1480 config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; ··· 1667 1668 vsi->tx_rings[0]->count, new_tx_cnt); 1668 1669 1669 1670 tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, 1670 - sizeof(struct ice_ring), GFP_KERNEL); 1671 + sizeof(*tx_rings), GFP_KERNEL); 1671 1672 if (!tx_rings) { 1672 1673 err = -ENOMEM; 1673 1674 goto done; ··· 1699 1700 vsi->rx_rings[0]->count, new_rx_cnt); 1700 1701 1701 1702 rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, 1702 - sizeof(struct ice_ring), GFP_KERNEL); 1703 + sizeof(*rx_rings), GFP_KERNEL); 1703 1704 if (!rx_rings) { 1704 1705 err = -ENOMEM; 1705 1706 goto done; ··· 1818 1819 ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) 1819 1820 { 1820 1821 struct ice_netdev_priv *np = netdev_priv(netdev); 1821 - struct ice_port_info *pi; 1822 + struct ice_port_info *pi = np->vsi->port_info; 1823 + struct ice_aqc_get_phy_caps_data *pcaps; 1824 + struct ice_vsi *vsi = np->vsi; 1825 + enum ice_status status; 1822 1826 1823 - pi = np->vsi->port_info; 1824 - pause->autoneg = 1825 - ((pi->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) ? 1826 - AUTONEG_ENABLE : AUTONEG_DISABLE); 1827 + /* Initialize pause params */ 1828 + pause->rx_pause = 0; 1829 + pause->tx_pause = 0; 1827 1830 1828 - if (pi->fc.current_mode == ICE_FC_RX_PAUSE) { 1829 - pause->rx_pause = 1; 1830 - } else if (pi->fc.current_mode == ICE_FC_TX_PAUSE) { 1831 + pcaps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*pcaps), 1832 + GFP_KERNEL); 1833 + if (!pcaps) 1834 + return; 1835 + 1836 + /* Get current phy config */ 1837 + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, 1838 + NULL); 1839 + if (status) 1840 + goto out; 1841 + 1842 + pause->autoneg = ((pcaps->caps & ICE_AQC_PHY_AN_MODE) ? 1843 + AUTONEG_ENABLE : AUTONEG_DISABLE); 1844 + 1845 + if (pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) 1831 1846 pause->tx_pause = 1; 1832 - } else if (pi->fc.current_mode == ICE_FC_FULL) { 1847 + if (pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 1833 1848 pause->rx_pause = 1; 1834 - pause->tx_pause = 1; 1835 - } 1849 + 1850 + out: 1851 + devm_kfree(&vsi->back->pdev->dev, pcaps); 1836 1852 } 1837 1853 1838 1854 /**
+1
drivers/net/ethernet/intel/ice/ice_hw_autogen.h
··· 30 30 #define PF_FW_ATQLEN_ATQVFE_M BIT(28) 31 31 #define PF_FW_ATQLEN_ATQOVFL_M BIT(29) 32 32 #define PF_FW_ATQLEN_ATQCRIT_M BIT(30) 33 + #define VF_MBX_ARQLEN(_VF) (0x0022BC00 + ((_VF) * 4)) 33 34 #define PF_FW_ATQLEN_ATQENABLE_M BIT(31) 34 35 #define PF_FW_ATQT 0x00080400 35 36 #define PF_MBX_ARQBAH 0x0022E400
+70 -42
drivers/net/ethernet/intel/ice/ice_lib.c
··· 249 249 250 250 /* allocate memory for both Tx and Rx ring pointers */ 251 251 vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, 252 - sizeof(struct ice_ring *), GFP_KERNEL); 252 + sizeof(*vsi->tx_rings), GFP_KERNEL); 253 253 if (!vsi->tx_rings) 254 254 goto err_txrings; 255 255 256 256 vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, 257 - sizeof(struct ice_ring *), GFP_KERNEL); 257 + sizeof(*vsi->rx_rings), GFP_KERNEL); 258 258 if (!vsi->rx_rings) 259 259 goto err_rxrings; 260 260 ··· 262 262 /* allocate memory for q_vector pointers */ 263 263 vsi->q_vectors = devm_kcalloc(&pf->pdev->dev, 264 264 vsi->num_q_vectors, 265 - sizeof(struct ice_q_vector *), 265 + sizeof(*vsi->q_vectors), 266 266 GFP_KERNEL); 267 267 if (!vsi->q_vectors) 268 268 goto err_vectors; ··· 348 348 void ice_vsi_delete(struct ice_vsi *vsi) 349 349 { 350 350 struct ice_pf *pf = vsi->back; 351 - struct ice_vsi_ctx ctxt; 351 + struct ice_vsi_ctx *ctxt; 352 352 enum ice_status status; 353 353 354 + ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL); 355 + if (!ctxt) 356 + return; 357 + 354 358 if (vsi->type == ICE_VSI_VF) 355 - ctxt.vf_num = vsi->vf_id; 356 - ctxt.vsi_num = vsi->vsi_num; 359 + ctxt->vf_num = vsi->vf_id; 360 + ctxt->vsi_num = vsi->vsi_num; 357 361 358 - memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props)); 362 + memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info)); 359 363 360 - status = ice_free_vsi(&pf->hw, vsi->idx, &ctxt, false, NULL); 364 + status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL); 361 365 if (status) 362 366 dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n", 363 367 vsi->vsi_num); 368 + 369 + devm_kfree(&pf->pdev->dev, ctxt); 364 370 } 365 371 366 372 /** ··· 914 908 */ 915 909 static int ice_vsi_init(struct ice_vsi *vsi) 916 910 { 917 - struct ice_vsi_ctx ctxt = { 0 }; 918 911 struct ice_pf *pf = vsi->back; 919 912 struct ice_hw *hw = &pf->hw; 913 + struct ice_vsi_ctx *ctxt; 920 914 int ret = 0; 915 + 916 + ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL); 917 + if (!ctxt) 918 + return -ENOMEM; 921 919 922 920 switch (vsi->type) { 923 921 case ICE_VSI_PF: 924 - ctxt.flags = ICE_AQ_VSI_TYPE_PF; 922 + ctxt->flags = ICE_AQ_VSI_TYPE_PF; 925 923 break; 926 924 case ICE_VSI_VF: 927 - ctxt.flags = ICE_AQ_VSI_TYPE_VF; 925 + ctxt->flags = ICE_AQ_VSI_TYPE_VF; 928 926 /* VF number here is the absolute VF number (0-255) */ 929 - ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; 927 + ctxt->vf_num = vsi->vf_id + hw->func_caps.vf_base_id; 930 928 break; 931 929 default: 932 930 return -ENODEV; 933 931 } 934 932 935 - ice_set_dflt_vsi_ctx(&ctxt); 933 + ice_set_dflt_vsi_ctx(ctxt); 936 934 /* if the switch is in VEB mode, allow VSI loopback */ 937 935 if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB) 938 - ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 936 + ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 939 937 940 938 /* Set LUT type and HASH type if RSS is enabled */ 941 939 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) 942 - ice_set_rss_vsi_ctx(&ctxt, vsi); 940 + ice_set_rss_vsi_ctx(ctxt, vsi); 943 941 944 - ctxt.info.sw_id = vsi->port_info->sw_id; 945 - ice_vsi_setup_q_map(vsi, &ctxt); 942 + ctxt->info.sw_id = vsi->port_info->sw_id; 943 + ice_vsi_setup_q_map(vsi, ctxt); 946 944 947 - ret = ice_add_vsi(hw, vsi->idx, &ctxt, NULL); 945 + ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL); 948 946 if (ret) { 949 947 dev_err(&pf->pdev->dev, 950 948 "Add VSI failed, err %d\n", ret); ··· 956 946 } 957 947 958 948 /* keep context for update VSI operations */ 959 - vsi->info = ctxt.info; 949 + vsi->info = ctxt->info; 960 950 961 951 /* record VSI number returned */ 962 - vsi->vsi_num = ctxt.vsi_num; 952 + vsi->vsi_num = ctxt->vsi_num; 963 953 954 + devm_kfree(&pf->pdev->dev, ctxt); 964 955 return ret; 965 956 } 966 957 ··· 1631 1620 u16 buf_len, i, pf_q; 1632 1621 int err = 0, tc; 1633 1622 1634 - buf_len = sizeof(struct ice_aqc_add_tx_qgrp); 1623 + buf_len = sizeof(*qg_buf); 1635 1624 qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL); 1636 1625 if (!qg_buf) 1637 1626 return -ENOMEM; ··· 1834 1823 { 1835 1824 struct device *dev = &vsi->back->pdev->dev; 1836 1825 struct ice_hw *hw = &vsi->back->hw; 1837 - struct ice_vsi_ctx ctxt = { 0 }; 1826 + struct ice_vsi_ctx *ctxt; 1838 1827 enum ice_status status; 1828 + int ret = 0; 1829 + 1830 + ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); 1831 + if (!ctxt) 1832 + return -ENOMEM; 1839 1833 1840 1834 /* Here we are configuring the VSI to let the driver add VLAN tags by 1841 1835 * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag 1842 1836 * insertion happens in the Tx hot path, in ice_tx_map. 1843 1837 */ 1844 - ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL; 1838 + ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL; 1845 1839 1846 - ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); 1840 + ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); 1847 1841 1848 - status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); 1842 + status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 1849 1843 if (status) { 1850 1844 dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n", 1851 1845 status, hw->adminq.sq_last_status); 1852 - return -EIO; 1846 + ret = -EIO; 1847 + goto out; 1853 1848 } 1854 1849 1855 - vsi->info.vlan_flags = ctxt.info.vlan_flags; 1856 - return 0; 1850 + vsi->info.vlan_flags = ctxt->info.vlan_flags; 1851 + out: 1852 + devm_kfree(dev, ctxt); 1853 + return ret; 1857 1854 } 1858 1855 1859 1856 /** ··· 1873 1854 { 1874 1855 struct device *dev = &vsi->back->pdev->dev; 1875 1856 struct ice_hw *hw = &vsi->back->hw; 1876 - struct ice_vsi_ctx ctxt = { 0 }; 1857 + struct ice_vsi_ctx *ctxt; 1877 1858 enum ice_status status; 1859 + int ret = 0; 1860 + 1861 + ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); 1862 + if (!ctxt) 1863 + return -ENOMEM; 1878 1864 1879 1865 /* Here we are configuring what the VSI should do with the VLAN tag in 1880 1866 * the Rx packet. We can either leave the tag in the packet or put it in 1881 1867 * the Rx descriptor. 1882 1868 */ 1883 - if (ena) { 1869 + if (ena) 1884 1870 /* Strip VLAN tag from Rx packet and put it in the desc */ 1885 - ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH; 1886 - } else { 1871 + ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH; 1872 + else 1887 1873 /* Disable stripping. Leave tag in packet */ 1888 - ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING; 1889 - } 1874 + ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING; 1890 1875 1891 1876 /* Allow all packets untagged/tagged */ 1892 - ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL; 1877 + ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL; 1893 1878 1894 - ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); 1879 + ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); 1895 1880 1896 - status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); 1881 + status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 1897 1882 if (status) { 1898 1883 dev_err(dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n", 1899 1884 ena, status, hw->adminq.sq_last_status); 1900 - return -EIO; 1885 + ret = -EIO; 1886 + goto out; 1901 1887 } 1902 1888 1903 - vsi->info.vlan_flags = ctxt.info.vlan_flags; 1904 - return 0; 1889 + vsi->info.vlan_flags = ctxt->info.vlan_flags; 1890 + out: 1891 + devm_kfree(dev, ctxt); 1892 + return ret; 1905 1893 } 1906 1894 1907 1895 /** ··· 2518 2492 */ 2519 2493 int ice_vsi_release(struct ice_vsi *vsi) 2520 2494 { 2495 + struct ice_vf *vf = NULL; 2521 2496 struct ice_pf *pf; 2522 - struct ice_vf *vf; 2523 2497 2524 2498 if (!vsi->back) 2525 2499 return -ENODEV; 2526 2500 pf = vsi->back; 2527 - vf = &pf->vf[vsi->vf_id]; 2501 + 2502 + if (vsi->type == ICE_VSI_VF) 2503 + vf = &pf->vf[vsi->vf_id]; 2528 2504 /* do not unregister and free netdevs while driver is in the reset 2529 2505 * recovery pending state. Since reset/rebuild happens through PF 2530 2506 * service task workqueue, its not a good idea to unregister netdev
+90 -43
drivers/net/ethernet/intel/ice/ice_main.c
··· 609 609 } 610 610 } 611 611 612 - ice_vc_notify_link_state(pf); 612 + if (!new_link_same_as_old && pf->num_alloc_vfs) 613 + ice_vc_notify_link_state(pf); 613 614 614 615 return 0; 615 616 } ··· 1357 1356 } 1358 1357 1359 1358 /** 1359 + * ice_dis_ctrlq_interrupts - disable control queue interrupts 1360 + * @hw: pointer to HW structure 1361 + */ 1362 + static void ice_dis_ctrlq_interrupts(struct ice_hw *hw) 1363 + { 1364 + /* disable Admin queue Interrupt causes */ 1365 + wr32(hw, PFINT_FW_CTL, 1366 + rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M); 1367 + 1368 + /* disable Mailbox queue Interrupt causes */ 1369 + wr32(hw, PFINT_MBX_CTL, 1370 + rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M); 1371 + 1372 + /* disable Control queue Interrupt causes */ 1373 + wr32(hw, PFINT_OICR_CTL, 1374 + rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M); 1375 + 1376 + ice_flush(hw); 1377 + } 1378 + 1379 + /** 1360 1380 * ice_free_irq_msix_misc - Unroll misc vector setup 1361 1381 * @pf: board private structure 1362 1382 */ 1363 1383 static void ice_free_irq_msix_misc(struct ice_pf *pf) 1364 1384 { 1385 + struct ice_hw *hw = &pf->hw; 1386 + 1387 + ice_dis_ctrlq_interrupts(hw); 1388 + 1365 1389 /* disable OICR interrupt */ 1366 - wr32(&pf->hw, PFINT_OICR_ENA, 0); 1367 - ice_flush(&pf->hw); 1390 + wr32(hw, PFINT_OICR_ENA, 0); 1391 + ice_flush(hw); 1368 1392 1369 1393 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) { 1370 1394 synchronize_irq(pf->msix_entries[pf->sw_oicr_idx].vector); ··· 1404 1378 } 1405 1379 1406 1380 /** 1381 + * ice_ena_ctrlq_interrupts - enable control queue interrupts 1382 + * @hw: pointer to HW structure 1383 + * @v_idx: HW vector index to associate the control queue interrupts with 1384 + */ 1385 + static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 v_idx) 1386 + { 1387 + u32 val; 1388 + 1389 + val = ((v_idx & PFINT_OICR_CTL_MSIX_INDX_M) | 1390 + PFINT_OICR_CTL_CAUSE_ENA_M); 1391 + wr32(hw, PFINT_OICR_CTL, val); 1392 + 1393 + /* enable Admin queue Interrupt causes */ 1394 + val = ((v_idx & PFINT_FW_CTL_MSIX_INDX_M) | 1395 + PFINT_FW_CTL_CAUSE_ENA_M); 1396 + wr32(hw, PFINT_FW_CTL, val); 1397 + 1398 + /* enable Mailbox queue Interrupt causes */ 1399 + val = ((v_idx & PFINT_MBX_CTL_MSIX_INDX_M) | 1400 + PFINT_MBX_CTL_CAUSE_ENA_M); 1401 + wr32(hw, PFINT_MBX_CTL, val); 1402 + 1403 + ice_flush(hw); 1404 + } 1405 + 1406 + /** 1407 1407 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events 1408 1408 * @pf: board private structure 1409 1409 * ··· 1441 1389 { 1442 1390 struct ice_hw *hw = &pf->hw; 1443 1391 int oicr_idx, err = 0; 1444 - u32 val; 1445 1392 1446 1393 if (!pf->int_name[0]) 1447 1394 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", ··· 1489 1438 skip_req_irq: 1490 1439 ice_ena_misc_vector(pf); 1491 1440 1492 - val = ((pf->hw_oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) | 1493 - PFINT_OICR_CTL_CAUSE_ENA_M); 1494 - wr32(hw, PFINT_OICR_CTL, val); 1495 - 1496 - /* This enables Admin queue Interrupt causes */ 1497 - val = ((pf->hw_oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) | 1498 - PFINT_FW_CTL_CAUSE_ENA_M); 1499 - wr32(hw, PFINT_FW_CTL, val); 1500 - 1501 - /* This enables Mailbox queue Interrupt causes */ 1502 - val = ((pf->hw_oicr_idx & PFINT_MBX_CTL_MSIX_INDX_M) | 1503 - PFINT_MBX_CTL_CAUSE_ENA_M); 1504 - wr32(hw, PFINT_MBX_CTL, val); 1505 - 1441 + ice_ena_ctrlq_interrupts(hw, pf->hw_oicr_idx); 1506 1442 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->hw_oicr_idx), 1507 1443 ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S); 1508 1444 ··· 1551 1513 u8 mac_addr[ETH_ALEN]; 1552 1514 int err; 1553 1515 1554 - netdev = alloc_etherdev_mqs(sizeof(struct ice_netdev_priv), 1555 - vsi->alloc_txq, vsi->alloc_rxq); 1516 + netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, 1517 + vsi->alloc_rxq); 1556 1518 if (!netdev) 1557 1519 return -ENOMEM; 1558 1520 ··· 1905 1867 v_left -= pf->num_lan_msix; 1906 1868 1907 1869 pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget, 1908 - sizeof(struct msix_entry), GFP_KERNEL); 1870 + sizeof(*pf->msix_entries), GFP_KERNEL); 1909 1871 1910 1872 if (!pf->msix_entries) { 1911 1873 err = -ENOMEM; ··· 1993 1955 static int ice_init_interrupt_scheme(struct ice_pf *pf) 1994 1956 { 1995 1957 int vectors = 0, hw_vectors = 0; 1996 - ssize_t size; 1997 1958 1998 1959 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) 1999 1960 vectors = ice_ena_msix_range(pf); ··· 2003 1966 return vectors; 2004 1967 2005 1968 /* set up vector assignment tracking */ 2006 - size = sizeof(struct ice_res_tracker) + (sizeof(u16) * vectors); 2007 - 2008 - pf->sw_irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL); 1969 + pf->sw_irq_tracker = 1970 + devm_kzalloc(&pf->pdev->dev, sizeof(*pf->sw_irq_tracker) + 1971 + (sizeof(u16) * vectors), GFP_KERNEL); 2009 1972 if (!pf->sw_irq_tracker) { 2010 1973 ice_dis_msix(pf); 2011 1974 return -ENOMEM; ··· 2017 1980 2018 1981 /* set up HW vector assignment tracking */ 2019 1982 hw_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; 2020 - size = sizeof(struct ice_res_tracker) + (sizeof(u16) * hw_vectors); 2021 - 2022 - pf->hw_irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL); 1983 + pf->hw_irq_tracker = 1984 + devm_kzalloc(&pf->pdev->dev, sizeof(*pf->hw_irq_tracker) + 1985 + (sizeof(u16) * hw_vectors), GFP_KERNEL); 2023 1986 if (!pf->hw_irq_tracker) { 2024 1987 ice_clear_interrupt_scheme(pf); 2025 1988 return -ENOMEM; ··· 2153 2116 } 2154 2117 2155 2118 pf->vsi = devm_kcalloc(&pdev->dev, pf->num_alloc_vsi, 2156 - sizeof(struct ice_vsi *), GFP_KERNEL); 2119 + sizeof(*pf->vsi), GFP_KERNEL); 2157 2120 if (!pf->vsi) { 2158 2121 err = -ENOMEM; 2159 2122 goto err_init_pf_unroll; ··· 2185 2148 } 2186 2149 2187 2150 /* create switch struct for the switch element created by FW on boot */ 2188 - pf->first_sw = devm_kzalloc(&pdev->dev, sizeof(struct ice_sw), 2151 + pf->first_sw = devm_kzalloc(&pdev->dev, sizeof(*pf->first_sw), 2189 2152 GFP_KERNEL); 2190 2153 if (!pf->first_sw) { 2191 2154 err = -ENOMEM; ··· 2472 2435 * @addr: the MAC address entry being added 2473 2436 * @vid: VLAN id 2474 2437 * @flags: instructions from stack about fdb operation 2438 + * @extack: netlink extended ack 2475 2439 */ 2476 - static int ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], 2477 - struct net_device *dev, const unsigned char *addr, 2478 - u16 vid, u16 flags, 2479 - struct netlink_ext_ack *extack) 2440 + static int 2441 + ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], 2442 + struct net_device *dev, const unsigned char *addr, u16 vid, 2443 + u16 flags, struct netlink_ext_ack __always_unused *extack) 2480 2444 { 2481 2445 int err; 2482 2446 ··· 3745 3707 struct device *dev = &vsi->back->pdev->dev; 3746 3708 struct ice_aqc_vsi_props *vsi_props; 3747 3709 struct ice_hw *hw = &vsi->back->hw; 3748 - struct ice_vsi_ctx ctxt = { 0 }; 3710 + struct ice_vsi_ctx *ctxt; 3749 3711 enum ice_status status; 3712 + int ret = 0; 3750 3713 3751 3714 vsi_props = &vsi->info; 3752 - ctxt.info = vsi->info; 3715 + 3716 + ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); 3717 + if (!ctxt) 3718 + return -ENOMEM; 3719 + 3720 + ctxt->info = vsi->info; 3753 3721 3754 3722 if (bmode == BRIDGE_MODE_VEB) 3755 3723 /* change from VEPA to VEB mode */ 3756 - ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 3724 + ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 3757 3725 else 3758 3726 /* change from VEB to VEPA mode */ 3759 - ctxt.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 3760 - ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); 3727 + ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; 3728 + ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); 3761 3729 3762 - status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); 3730 + status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 3763 3731 if (status) { 3764 3732 dev_err(dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n", 3765 3733 bmode, status, hw->adminq.sq_last_status); 3766 - return -EIO; 3734 + ret = -EIO; 3735 + goto out; 3767 3736 } 3768 3737 /* Update sw flags for book keeping */ 3769 - vsi_props->sw_flags = ctxt.info.sw_flags; 3738 + vsi_props->sw_flags = ctxt->info.sw_flags; 3770 3739 3771 - return 0; 3740 + out: 3741 + devm_kfree(dev, ctxt); 3742 + return ret; 3772 3743 } 3773 3744 3774 3745 /**
+4 -3
drivers/net/ethernet/intel/ice/ice_nvm.c
··· 152 152 */ 153 153 off_w = offset % ICE_SR_SECTOR_SIZE_IN_WORDS; 154 154 read_size = off_w ? 155 - min(*words, 156 - (u16)(ICE_SR_SECTOR_SIZE_IN_WORDS - off_w)) : 157 - min((*words - words_read), ICE_SR_SECTOR_SIZE_IN_WORDS); 155 + min_t(u16, *words, 156 + (ICE_SR_SECTOR_SIZE_IN_WORDS - off_w)) : 157 + min_t(u16, (*words - words_read), 158 + ICE_SR_SECTOR_SIZE_IN_WORDS); 158 159 159 160 /* Check if this is last command, if so set proper flag */ 160 161 if ((words_read + read_size) >= *words)
+34 -7
drivers/net/ethernet/intel/ice/ice_sched.c
··· 1066 1066 hw->max_children[i] = le16_to_cpu(max_sibl); 1067 1067 } 1068 1068 1069 - hw->layer_info = (struct ice_aqc_layer_props *) 1070 - devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props, 1071 - (hw->num_tx_sched_layers * 1072 - sizeof(*hw->layer_info)), 1073 - GFP_KERNEL); 1069 + hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props, 1070 + (hw->num_tx_sched_layers * 1071 + sizeof(*hw->layer_info)), 1072 + GFP_KERNEL); 1074 1073 if (!hw->layer_info) { 1075 1074 status = ICE_ERR_NO_MEMORY; 1076 1075 goto sched_query_out; ··· 1343 1344 node = node->sibling; 1344 1345 } 1345 1346 1347 + /* tree has one intermediate node to add this new VSI. 1348 + * So no need to calculate supported nodes for below 1349 + * layers. 1350 + */ 1351 + if (node) 1352 + break; 1346 1353 /* all the nodes are full, allocate a new one */ 1347 - if (!node) 1348 - num_nodes[i]++; 1354 + num_nodes[i]++; 1349 1355 } 1350 1356 } 1351 1357 ··· 1616 1612 } 1617 1613 1618 1614 /** 1615 + * ice_sched_is_leaf_node_present - check for a leaf node in the sub-tree 1616 + * @node: pointer to the sub-tree node 1617 + * 1618 + * This function checks for a leaf node presence in a given sub-tree node. 1619 + */ 1620 + static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node) 1621 + { 1622 + u8 i; 1623 + 1624 + for (i = 0; i < node->num_children; i++) 1625 + if (ice_sched_is_leaf_node_present(node->children[i])) 1626 + return true; 1627 + /* check for a leaf node */ 1628 + return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF); 1629 + } 1630 + 1631 + /** 1619 1632 * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes 1620 1633 * @pi: port information structure 1621 1634 * @vsi_handle: software VSI handle ··· 1666 1645 if (!vsi_node) 1667 1646 continue; 1668 1647 1648 + if (ice_sched_is_leaf_node_present(vsi_node)) { 1649 + ice_debug(pi->hw, ICE_DBG_SCHED, 1650 + "VSI has leaf nodes in TC %d\n", i); 1651 + status = ICE_ERR_IN_USE; 1652 + goto exit_sched_rm_vsi_cfg; 1653 + } 1669 1654 while (j < vsi_node->num_children) { 1670 1655 if (vsi_node->children[j]->owner == owner) { 1671 1656 ice_free_sched_node(pi, vsi_node->children[j]);
+1
drivers/net/ethernet/intel/ice/ice_status.h
··· 22 22 ICE_ERR_OUT_OF_RANGE = -13, 23 23 ICE_ERR_ALREADY_EXISTS = -14, 24 24 ICE_ERR_DOES_NOT_EXIST = -15, 25 + ICE_ERR_IN_USE = -16, 25 26 ICE_ERR_MAX_LIMIT = -17, 26 27 ICE_ERR_RESET_ONGOING = -18, 27 28 ICE_ERR_BUF_TOO_SHORT = -52,
+14 -3
drivers/net/ethernet/intel/ice/ice_switch.c
··· 98 98 u8 i; 99 99 100 100 recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES, 101 - sizeof(struct ice_sw_recipe), GFP_KERNEL); 101 + sizeof(*recps), GFP_KERNEL); 102 102 if (!recps) 103 103 return ICE_ERR_NO_MEMORY; 104 104 ··· 1538 1538 } else if (!list_elem->vsi_list_info) { 1539 1539 status = ICE_ERR_DOES_NOT_EXIST; 1540 1540 goto exit; 1541 + } else if (list_elem->vsi_list_info->ref_cnt > 1) { 1542 + /* a ref_cnt > 1 indicates that the vsi_list is being 1543 + * shared by multiple rules. Decrement the ref_cnt and 1544 + * remove this rule, but do not modify the list, as it 1545 + * is in-use by other rules. 1546 + */ 1547 + list_elem->vsi_list_info->ref_cnt--; 1548 + remove_rule = true; 1541 1549 } else { 1542 - if (list_elem->vsi_list_info->ref_cnt > 1) 1543 - list_elem->vsi_list_info->ref_cnt--; 1550 + /* a ref_cnt of 1 indicates the vsi_list is only used 1551 + * by one rule. However, the original removal request is only 1552 + * for a single VSI. Update the vsi_list first, and only 1553 + * remove the rule if there are no further VSIs in this list. 1554 + */ 1544 1555 vsi_handle = f_entry->fltr_info.vsi_handle; 1545 1556 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem); 1546 1557 if (status)
+11 -15
drivers/net/ethernet/intel/ice/ice_txrx.c
··· 48 48 */ 49 49 void ice_clean_tx_ring(struct ice_ring *tx_ring) 50 50 { 51 - unsigned long size; 52 51 u16 i; 53 52 54 53 /* ring already cleared, nothing to do */ ··· 58 59 for (i = 0; i < tx_ring->count; i++) 59 60 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); 60 61 61 - size = sizeof(struct ice_tx_buf) * tx_ring->count; 62 - memset(tx_ring->tx_buf, 0, size); 62 + memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count); 63 63 64 64 /* Zero out the descriptor ring */ 65 65 memset(tx_ring->desc, 0, tx_ring->size); ··· 224 226 int ice_setup_tx_ring(struct ice_ring *tx_ring) 225 227 { 226 228 struct device *dev = tx_ring->dev; 227 - int bi_size; 228 229 229 230 if (!dev) 230 231 return -ENOMEM; 231 232 232 233 /* warn if we are about to overwrite the pointer */ 233 234 WARN_ON(tx_ring->tx_buf); 234 - bi_size = sizeof(struct ice_tx_buf) * tx_ring->count; 235 - tx_ring->tx_buf = devm_kzalloc(dev, bi_size, GFP_KERNEL); 235 + tx_ring->tx_buf = 236 + devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count, 237 + GFP_KERNEL); 236 238 if (!tx_ring->tx_buf) 237 239 return -ENOMEM; 238 240 239 241 /* round up to nearest 4K */ 240 - tx_ring->size = tx_ring->count * sizeof(struct ice_tx_desc); 241 - tx_ring->size = ALIGN(tx_ring->size, 4096); 242 + tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 243 + 4096); 242 244 tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, 243 245 GFP_KERNEL); 244 246 if (!tx_ring->desc) { ··· 265 267 void ice_clean_rx_ring(struct ice_ring *rx_ring) 266 268 { 267 269 struct device *dev = rx_ring->dev; 268 - unsigned long size; 269 270 u16 i; 270 271 271 272 /* ring already cleared, nothing to do */ ··· 289 292 rx_buf->page_offset = 0; 290 293 } 291 294 292 - size = sizeof(struct ice_rx_buf) * rx_ring->count; 293 - memset(rx_ring->rx_buf, 0, size); 295 + memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count); 294 296 295 297 /* Zero out the descriptor ring */ 296 298 memset(rx_ring->desc, 0, rx_ring->size); ··· 327 331 int ice_setup_rx_ring(struct ice_ring *rx_ring) 328 332 { 329 333 struct device *dev = rx_ring->dev; 330 - int bi_size; 331 334 332 335 if (!dev) 333 336 return -ENOMEM; 334 337 335 338 /* warn if we are about to overwrite the pointer */ 336 339 WARN_ON(rx_ring->rx_buf); 337 - bi_size = sizeof(struct ice_rx_buf) * rx_ring->count; 338 - rx_ring->rx_buf = devm_kzalloc(dev, bi_size, GFP_KERNEL); 340 + rx_ring->rx_buf = 341 + devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count, 342 + GFP_KERNEL); 339 343 if (!rx_ring->rx_buf) 340 344 return -ENOMEM; 341 345 ··· 1169 1173 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) 1170 1174 ice_update_ena_itr(vsi, q_vector); 1171 1175 1172 - return min(work_done, budget - 1); 1176 + return min_t(int, work_done, budget - 1); 1173 1177 } 1174 1178 1175 1179 /* helper function for building cmd/type/offset */
+46 -24
drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
··· 173 173 wr32(hw, VPINT_ALLOC(vf->vf_id), 0); 174 174 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0); 175 175 176 - first = vf->first_vector_idx; 176 + first = vf->first_vector_idx + 177 + hw->func_caps.common_cap.msix_vector_first_id; 177 178 last = first + pf->num_vf_msix - 1; 178 179 for (v = first; v <= last; v++) { 179 180 u32 reg; ··· 311 310 */ 312 311 clear_bit(ICE_VF_STATE_INIT, vf->vf_states); 313 312 313 + /* Clear the VF's ARQLEN register. This is how the VF detects reset, 314 + * since the VFGEN_RSTAT register doesn't stick at 0 after reset. 315 + */ 316 + wr32(hw, VF_MBX_ARQLEN(vf_abs_id), 0); 317 + 314 318 /* In the case of a VFLR, the HW has already reset the VF and we 315 319 * just need to clean up, so don't hit the VFRTRIG register. 316 320 */ ··· 351 345 { 352 346 struct device *dev = &vsi->back->pdev->dev; 353 347 struct ice_hw *hw = &vsi->back->hw; 354 - struct ice_vsi_ctx ctxt = { 0 }; 348 + struct ice_vsi_ctx *ctxt; 355 349 enum ice_status status; 350 + int ret = 0; 356 351 357 - ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED | 358 - ICE_AQ_VSI_PVLAN_INSERT_PVID | 359 - ICE_AQ_VSI_VLAN_EMOD_STR; 360 - ctxt.info.pvid = cpu_to_le16(vid); 361 - ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); 352 + ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); 353 + if (!ctxt) 354 + return -ENOMEM; 362 355 363 - status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); 356 + ctxt->info.vlan_flags = (ICE_AQ_VSI_VLAN_MODE_UNTAGGED | 357 + ICE_AQ_VSI_PVLAN_INSERT_PVID | 358 + ICE_AQ_VSI_VLAN_EMOD_STR); 359 + ctxt->info.pvid = cpu_to_le16(vid); 360 + ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); 361 + 362 + status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); 364 363 if (status) { 365 364 dev_info(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n", 366 365 status, hw->adminq.sq_last_status); 367 - return -EIO; 366 + ret = -EIO; 367 + goto out; 368 368 } 369 369 370 - vsi->info.pvid = ctxt.info.pvid; 371 - vsi->info.vlan_flags = ctxt.info.vlan_flags; 372 - return 0; 370 + vsi->info.pvid = ctxt->info.pvid; 371 + vsi->info.vlan_flags = ctxt->info.vlan_flags; 372 + out: 373 + devm_kfree(dev, ctxt); 374 + return ret; 373 375 } 374 376 375 377 /** ··· 524 510 525 511 hw = &pf->hw; 526 512 vsi = pf->vsi[vf->lan_vsi_idx]; 527 - first = vf->first_vector_idx; 513 + first = vf->first_vector_idx + 514 + hw->func_caps.common_cap.msix_vector_first_id; 528 515 last = (first + pf->num_vf_msix) - 1; 529 516 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 530 517 ··· 2494 2479 int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) 2495 2480 { 2496 2481 struct ice_netdev_priv *np = netdev_priv(netdev); 2497 - struct ice_vsi_ctx ctx = { 0 }; 2498 2482 struct ice_vsi *vsi = np->vsi; 2499 2483 struct ice_pf *pf = vsi->back; 2484 + struct ice_vsi_ctx *ctx; 2485 + enum ice_status status; 2500 2486 struct ice_vf *vf; 2501 - int status; 2487 + int ret = 0; 2502 2488 2503 2489 /* validate the request */ 2504 2490 if (vf_id >= pf->num_alloc_vfs) { ··· 2519 2503 return 0; 2520 2504 } 2521 2505 2522 - ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); 2506 + ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL); 2507 + if (!ctx) 2508 + return -ENOMEM; 2509 + 2510 + ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); 2523 2511 2524 2512 if (ena) { 2525 - ctx.info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; 2526 - ctx.info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M; 2513 + ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; 2514 + ctx->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M; 2527 2515 } 2528 2516 2529 - status = ice_update_vsi(&pf->hw, vsi->idx, &ctx, NULL); 2517 + status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL); 2530 2518 if (status) { 2531 2519 dev_dbg(&pf->pdev->dev, 2532 2520 "Error %d, failed to update VSI* parameters\n", status); 2533 - return -EIO; 2521 + ret = -EIO; 2522 + goto out; 2534 2523 } 2535 2524 2536 2525 vf->spoofchk = ena; 2537 - vsi->info.sec_flags = ctx.info.sec_flags; 2538 - vsi->info.sw_flags2 = ctx.info.sw_flags2; 2539 - 2540 - return status; 2526 + vsi->info.sec_flags = ctx->info.sec_flags; 2527 + vsi->info.sw_flags2 = ctx->info.sw_flags2; 2528 + out: 2529 + devm_kfree(&pf->pdev->dev, ctx); 2530 + return ret; 2541 2531 } 2542 2532 2543 2533 /**