Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch '200GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2025-01-24 (idpf, ice, iavf)

For idpf:

Emil adds memory barrier when accessing control queue descriptors and
restores call to idpf_vc_xn_shutdown() when resetting.

Manoj Vishwanathan expands transaction lock to properly protect xn->salt
value and adds additional debugging information.

Marco Leogrande converts workqueues to be unbound.

For ice:

Przemek fixes incorrect size use for array.

Mateusz removes reporting of invalid parameter and value.

For iavf:

Michal adjusts some VLAN changes to occur without a PF call to avoid
timing issues with the calls.

* '200GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue:
iavf: allow changing VLAN state without calling PF
ice: remove invalid parameter of equalizer
ice: fix ice_parser_rt::bst_key array size
idpf: add more info during virtchnl transaction timeout/salt mismatch
idpf: convert workqueues to unbound
idpf: Acquire the lock before accessing the xn->salt
idpf: fix transaction timeouts on reset
idpf: add read memory barrier when checking descriptor done bit
====================

Link: https://patch.msgid.link/20250124213213.1328775-1-anthony.l.nguyen@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+59 -27
+17 -2
drivers/net/ethernet/intel/iavf/iavf_main.c
··· 773 773 f->state = IAVF_VLAN_ADD; 774 774 adapter->num_vlan_filters++; 775 775 iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER); 776 + } else if (f->state == IAVF_VLAN_REMOVE) { 777 + /* IAVF_VLAN_REMOVE means that VLAN wasn't yet removed. 778 + * We can safely only change the state here. 779 + */ 780 + f->state = IAVF_VLAN_ACTIVE; 776 781 } 777 782 778 783 clearout: ··· 798 793 799 794 f = iavf_find_vlan(adapter, vlan); 800 795 if (f) { 801 - f->state = IAVF_VLAN_REMOVE; 802 - iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_VLAN_FILTER); 796 + /* IAVF_ADD_VLAN means that VLAN wasn't even added yet. 797 + * Remove it from the list. 798 + */ 799 + if (f->state == IAVF_VLAN_ADD) { 800 + list_del(&f->list); 801 + kfree(f); 802 + adapter->num_vlan_filters--; 803 + } else { 804 + f->state = IAVF_VLAN_REMOVE; 805 + iavf_schedule_aq_request(adapter, 806 + IAVF_FLAG_AQ_DEL_VLAN_FILTER); 807 + } 803 808 } 804 809 805 810 spin_unlock_bh(&adapter->mac_vlan_list_lock);
-1
drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
··· 1498 1498 #define ICE_AQC_RX_EQU_POST1 (0x12 << ICE_AQC_RX_EQU_SHIFT) 1499 1499 #define ICE_AQC_RX_EQU_BFLF (0x13 << ICE_AQC_RX_EQU_SHIFT) 1500 1500 #define ICE_AQC_RX_EQU_BFHF (0x14 << ICE_AQC_RX_EQU_SHIFT) 1501 - #define ICE_AQC_RX_EQU_DRATE (0x15 << ICE_AQC_RX_EQU_SHIFT) 1502 1501 #define ICE_AQC_RX_EQU_CTLE_GAINHF (0x20 << ICE_AQC_RX_EQU_SHIFT) 1503 1502 #define ICE_AQC_RX_EQU_CTLE_GAINLF (0x21 << ICE_AQC_RX_EQU_SHIFT) 1504 1503 #define ICE_AQC_RX_EQU_CTLE_GAINDC (0x22 << ICE_AQC_RX_EQU_SHIFT)
-1
drivers/net/ethernet/intel/ice/ice_ethtool.c
··· 710 710 { ICE_AQC_RX_EQU_POST1, rx, &ptr->rx_equ_post1 }, 711 711 { ICE_AQC_RX_EQU_BFLF, rx, &ptr->rx_equ_bflf }, 712 712 { ICE_AQC_RX_EQU_BFHF, rx, &ptr->rx_equ_bfhf }, 713 - { ICE_AQC_RX_EQU_DRATE, rx, &ptr->rx_equ_drate }, 714 713 { ICE_AQC_RX_EQU_CTLE_GAINHF, rx, &ptr->rx_equ_ctle_gainhf }, 715 714 { ICE_AQC_RX_EQU_CTLE_GAINLF, rx, &ptr->rx_equ_ctle_gainlf }, 716 715 { ICE_AQC_RX_EQU_CTLE_GAINDC, rx, &ptr->rx_equ_ctle_gaindc },
-1
drivers/net/ethernet/intel/ice/ice_ethtool.h
··· 15 15 int rx_equ_post1; 16 16 int rx_equ_bflf; 17 17 int rx_equ_bfhf; 18 - int rx_equ_drate; 19 18 int rx_equ_ctle_gainhf; 20 19 int rx_equ_ctle_gainlf; 21 20 int rx_equ_ctle_gaindc;
+2 -4
drivers/net/ethernet/intel/ice/ice_parser.h
··· 257 257 /*** ICE_SID_RXPARSER_BOOST_TCAM and ICE_SID_LBL_RXPARSER_TMEM sections ***/ 258 258 #define ICE_BST_TCAM_TABLE_SIZE 256 259 259 #define ICE_BST_TCAM_KEY_SIZE 20 260 - #define ICE_BST_KEY_TCAM_SIZE 19 261 260 262 261 /* Boost TCAM item */ 263 262 struct ice_bst_tcam_item { ··· 400 401 #define ICE_PARSER_GPR_NUM 128 401 402 #define ICE_PARSER_FLG_NUM 64 402 403 #define ICE_PARSER_ERR_NUM 16 403 - #define ICE_BST_KEY_SIZE 10 404 404 #define ICE_MARKER_ID_SIZE 9 405 405 #define ICE_MARKER_MAX_SIZE \ 406 406 (ICE_MARKER_ID_SIZE * BITS_PER_BYTE - 1) ··· 429 431 u8 pkt_buf[ICE_PARSER_MAX_PKT_LEN + ICE_PARSER_PKT_REV]; 430 432 u16 pkt_len; 431 433 u16 po; 432 - u8 bst_key[ICE_BST_KEY_SIZE]; 434 + u8 bst_key[ICE_BST_TCAM_KEY_SIZE]; 433 435 struct ice_pg_cam_key pg_key; 436 + u8 pg_prio; 434 437 struct ice_alu *alu0; 435 438 struct ice_alu *alu1; 436 439 struct ice_alu *alu2; 437 440 struct ice_pg_cam_action *action; 438 - u8 pg_prio; 439 441 struct ice_gpr_pu pu; 440 442 u8 markers[ICE_MARKER_ID_SIZE]; 441 443 bool protocols[ICE_PO_PAIR_SIZE];
+5 -7
drivers/net/ethernet/intel/ice/ice_parser_rt.c
··· 125 125 else 126 126 key[idd] = imem->b_kb.prio; 127 127 128 - idd = ICE_BST_KEY_TCAM_SIZE - 1; 128 + idd = ICE_BST_TCAM_KEY_SIZE - 2; 129 129 for (i = idd; i >= 0; i--) { 130 130 int j; 131 131 132 132 j = ho + idd - i; 133 133 if (j < ICE_PARSER_MAX_PKT_LEN) 134 - key[i] = rt->pkt_buf[ho + idd - i]; 134 + key[i] = rt->pkt_buf[j]; 135 135 else 136 136 key[i] = 0; 137 137 } 138 138 139 - ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generated Boost TCAM Key:\n"); 140 - ice_debug(rt->psr->hw, ICE_DBG_PARSER, "%02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", 141 - key[0], key[1], key[2], key[3], key[4], 142 - key[5], key[6], key[7], key[8], key[9]); 143 - ice_debug(rt->psr->hw, ICE_DBG_PARSER, "\n"); 139 + ice_debug_array_w_prefix(rt->psr->hw, ICE_DBG_PARSER, 140 + KBUILD_MODNAME ": Generated Boost TCAM Key", 141 + key, ICE_BST_TCAM_KEY_SIZE); 144 142 } 145 143 146 144 static u16 ice_bit_rev_u16(u16 v, int len)
+6
drivers/net/ethernet/intel/idpf/idpf_controlq.c
··· 376 376 if (!(le16_to_cpu(desc->flags) & IDPF_CTLQ_FLAG_DD)) 377 377 break; 378 378 379 + /* Ensure no other fields are read until DD flag is checked */ 380 + dma_rmb(); 381 + 379 382 /* strip off FW internal code */ 380 383 desc_err = le16_to_cpu(desc->ret_val) & 0xff; 381 384 ··· 565 562 566 563 if (!(flags & IDPF_CTLQ_FLAG_DD)) 567 564 break; 565 + 566 + /* Ensure no other fields are read until DD flag is checked */ 567 + dma_rmb(); 568 568 569 569 q_msg[i].vmvf_type = (flags & 570 570 (IDPF_CTLQ_FLAG_FTYPE_VM |
+10 -5
drivers/net/ethernet/intel/idpf/idpf_main.c
··· 174 174 pci_set_master(pdev); 175 175 pci_set_drvdata(pdev, adapter); 176 176 177 - adapter->init_wq = alloc_workqueue("%s-%s-init", 0, 0, 177 + adapter->init_wq = alloc_workqueue("%s-%s-init", 178 + WQ_UNBOUND | WQ_MEM_RECLAIM, 0, 178 179 dev_driver_string(dev), 179 180 dev_name(dev)); 180 181 if (!adapter->init_wq) { ··· 184 183 goto err_free; 185 184 } 186 185 187 - adapter->serv_wq = alloc_workqueue("%s-%s-service", 0, 0, 186 + adapter->serv_wq = alloc_workqueue("%s-%s-service", 187 + WQ_UNBOUND | WQ_MEM_RECLAIM, 0, 188 188 dev_driver_string(dev), 189 189 dev_name(dev)); 190 190 if (!adapter->serv_wq) { ··· 194 192 goto err_serv_wq_alloc; 195 193 } 196 194 197 - adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", 0, 0, 195 + adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", 196 + WQ_UNBOUND | WQ_MEM_RECLAIM, 0, 198 197 dev_driver_string(dev), 199 198 dev_name(dev)); 200 199 if (!adapter->mbx_wq) { ··· 204 201 goto err_mbx_wq_alloc; 205 202 } 206 203 207 - adapter->stats_wq = alloc_workqueue("%s-%s-stats", 0, 0, 204 + adapter->stats_wq = alloc_workqueue("%s-%s-stats", 205 + WQ_UNBOUND | WQ_MEM_RECLAIM, 0, 208 206 dev_driver_string(dev), 209 207 dev_name(dev)); 210 208 if (!adapter->stats_wq) { ··· 214 210 goto err_stats_wq_alloc; 215 211 } 216 212 217 - adapter->vc_event_wq = alloc_workqueue("%s-%s-vc_event", 0, 0, 213 + adapter->vc_event_wq = alloc_workqueue("%s-%s-vc_event", 214 + WQ_UNBOUND | WQ_MEM_RECLAIM, 0, 218 215 dev_driver_string(dev), 219 216 dev_name(dev)); 220 217 if (!adapter->vc_event_wq) {
+19 -6
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
··· 517 517 retval = -ENXIO; 518 518 goto only_unlock; 519 519 case IDPF_VC_XN_WAITING: 520 - dev_notice_ratelimited(&adapter->pdev->dev, "Transaction timed-out (op %d, %dms)\n", 521 - params->vc_op, params->timeout_ms); 520 + dev_notice_ratelimited(&adapter->pdev->dev, 521 + "Transaction timed-out (op:%d cookie:%04x vc_op:%d salt:%02x timeout:%dms)\n", 522 + params->vc_op, cookie, xn->vc_op, 523 + xn->salt, params->timeout_ms); 522 524 retval = -ETIME; 523 525 break; 524 526 case IDPF_VC_XN_COMPLETED_SUCCESS: ··· 614 612 return -EINVAL; 615 613 } 616 614 xn = &adapter->vcxn_mngr->ring[xn_idx]; 615 + idpf_vc_xn_lock(xn); 617 616 salt = FIELD_GET(IDPF_VC_XN_SALT_M, msg_info); 618 617 if (xn->salt != salt) { 619 - dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (%02x != %02x)\n", 620 - xn->salt, salt); 618 + dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (exp:%d@%02x(%d) != got:%d@%02x)\n", 619 + xn->vc_op, xn->salt, xn->state, 620 + ctlq_msg->cookie.mbx.chnl_opcode, salt); 621 + idpf_vc_xn_unlock(xn); 621 622 return -EINVAL; 622 623 } 623 624 624 - idpf_vc_xn_lock(xn); 625 625 switch (xn->state) { 626 626 case IDPF_VC_XN_WAITING: 627 627 /* success */ ··· 3081 3077 */ 3082 3078 void idpf_vc_core_deinit(struct idpf_adapter *adapter) 3083 3079 { 3080 + bool remove_in_prog; 3081 + 3084 3082 if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags)) 3085 3083 return; 3086 3084 3085 + /* Avoid transaction timeouts when called during reset */ 3086 + remove_in_prog = test_bit(IDPF_REMOVE_IN_PROG, adapter->flags); 3087 + if (!remove_in_prog) 3088 + idpf_vc_xn_shutdown(adapter->vcxn_mngr); 3089 + 3087 3090 idpf_deinit_task(adapter); 3088 3091 idpf_intr_rel(adapter); 3089 - idpf_vc_xn_shutdown(adapter->vcxn_mngr); 3092 + 3093 + if (remove_in_prog) 3094 + idpf_vc_xn_shutdown(adapter->vcxn_mngr); 3090 3095 3091 3096 cancel_delayed_work_sync(&adapter->serv_task); 3092 3097 cancel_delayed_work_sync(&adapter->mbx_task);