Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2025-08-25 (ice, ixgbe)

For ice:
Emil adds a check to ensure auxiliary device was created before tear
down to prevent NULL a pointer dereference.

Jake reworks flow for failed Tx scheduler configuration to allow for
proper recovery and operation. He also adjusts ice_adapter index for
E825C devices as use of DSN is incompatible with this device.

Michal corrects tracking of buffer allocation failure in
ice_clean_rx_irq().

For ixgbe:
Jedrzej adds __packed attribute to ixgbe_orom_civd_info to compatibility
with device OROM data.

* '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue:
ixgbe: fix ixgbe_orom_civd_info struct layout
ice: fix incorrect counter for buffer allocation failures
ice: use fixed adapter index for E825C embedded devices
ice: don't leave device non-functional if Tx scheduler config fails
ice: fix NULL pointer dereference in ice_unplug_aux_dev() on reset
====================

Link: https://patch.msgid.link/20250825215019.3442873-1-anthony.l.nguyen@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+93 -37
+1
drivers/net/ethernet/intel/ice/ice.h
··· 510 510 ICE_FLAG_LINK_LENIENT_MODE_ENA, 511 511 ICE_FLAG_PLUG_AUX_DEV, 512 512 ICE_FLAG_UNPLUG_AUX_DEV, 513 + ICE_FLAG_AUX_DEV_CREATED, 513 514 ICE_FLAG_MTU_CHANGED, 514 515 ICE_FLAG_GNSS, /* GNSS successfully initialized */ 515 516 ICE_FLAG_DPLL, /* SyncE/PTP dplls initialized */
+38 -11
drivers/net/ethernet/intel/ice/ice_adapter.c
··· 13 13 static DEFINE_XARRAY(ice_adapters); 14 14 static DEFINE_MUTEX(ice_adapters_mutex); 15 15 16 - static unsigned long ice_adapter_index(u64 dsn) 16 + #define ICE_ADAPTER_FIXED_INDEX BIT_ULL(63) 17 + 18 + #define ICE_ADAPTER_INDEX_E825C \ 19 + (ICE_DEV_ID_E825C_BACKPLANE | ICE_ADAPTER_FIXED_INDEX) 20 + 21 + static u64 ice_adapter_index(struct pci_dev *pdev) 17 22 { 23 + switch (pdev->device) { 24 + case ICE_DEV_ID_E825C_BACKPLANE: 25 + case ICE_DEV_ID_E825C_QSFP: 26 + case ICE_DEV_ID_E825C_SFP: 27 + case ICE_DEV_ID_E825C_SGMII: 28 + /* E825C devices have multiple NACs which are connected to the 29 + * same clock source, and which must share the same 30 + * ice_adapter structure. We can't use the serial number since 31 + * each NAC has its own NVM generated with its own unique 32 + * Device Serial Number. Instead, rely on the embedded nature 33 + * of the E825C devices, and use a fixed index. This relies on 34 + * the fact that all E825C physical functions in a given 35 + * system are part of the same overall device. 36 + */ 37 + return ICE_ADAPTER_INDEX_E825C; 38 + default: 39 + return pci_get_dsn(pdev) & ~ICE_ADAPTER_FIXED_INDEX; 40 + } 41 + } 42 + 43 + static unsigned long ice_adapter_xa_index(struct pci_dev *pdev) 44 + { 45 + u64 index = ice_adapter_index(pdev); 46 + 18 47 #if BITS_PER_LONG == 64 19 - return dsn; 48 + return index; 20 49 #else 21 - return (u32)dsn ^ (u32)(dsn >> 32); 50 + return (u32)index ^ (u32)(index >> 32); 22 51 #endif 23 52 } 24 53 25 - static struct ice_adapter *ice_adapter_new(u64 dsn) 54 + static struct ice_adapter *ice_adapter_new(struct pci_dev *pdev) 26 55 { 27 56 struct ice_adapter *adapter; 28 57 ··· 59 30 if (!adapter) 60 31 return NULL; 61 32 62 - adapter->device_serial_number = dsn; 33 + adapter->index = ice_adapter_index(pdev); 63 34 spin_lock_init(&adapter->ptp_gltsyn_time_lock); 64 35 spin_lock_init(&adapter->txq_ctx_lock); 65 36 refcount_set(&adapter->refcount, 1); ··· 93 64 */ 94 65 struct ice_adapter *ice_adapter_get(struct pci_dev *pdev) 95 66 { 96 - u64 dsn = pci_get_dsn(pdev); 97 67 struct ice_adapter *adapter; 98 68 unsigned long index; 99 69 int err; 100 70 101 - index = ice_adapter_index(dsn); 71 + index = ice_adapter_xa_index(pdev); 102 72 scoped_guard(mutex, &ice_adapters_mutex) { 103 73 err = xa_insert(&ice_adapters, index, NULL, GFP_KERNEL); 104 74 if (err == -EBUSY) { 105 75 adapter = xa_load(&ice_adapters, index); 106 76 refcount_inc(&adapter->refcount); 107 - WARN_ON_ONCE(adapter->device_serial_number != dsn); 77 + WARN_ON_ONCE(adapter->index != ice_adapter_index(pdev)); 108 78 return adapter; 109 79 } 110 80 if (err) 111 81 return ERR_PTR(err); 112 82 113 - adapter = ice_adapter_new(dsn); 83 + adapter = ice_adapter_new(pdev); 114 84 if (!adapter) 115 85 return ERR_PTR(-ENOMEM); 116 86 xa_store(&ice_adapters, index, adapter, GFP_KERNEL); ··· 128 100 */ 129 101 void ice_adapter_put(struct pci_dev *pdev) 130 102 { 131 - u64 dsn = pci_get_dsn(pdev); 132 103 struct ice_adapter *adapter; 133 104 unsigned long index; 134 105 135 - index = ice_adapter_index(dsn); 106 + index = ice_adapter_xa_index(pdev); 136 107 scoped_guard(mutex, &ice_adapters_mutex) { 137 108 adapter = xa_load(&ice_adapters, index); 138 109 if (WARN_ON(!adapter))
+2 -2
drivers/net/ethernet/intel/ice/ice_adapter.h
··· 33 33 * @txq_ctx_lock: Spinlock protecting access to the GLCOMM_QTX_CNTX_CTL register 34 34 * @ctrl_pf: Control PF of the adapter 35 35 * @ports: Ports list 36 - * @device_serial_number: DSN cached for collision detection on 32bit systems 36 + * @index: 64-bit index cached for collision detection on 32bit systems 37 37 */ 38 38 struct ice_adapter { 39 39 refcount_t refcount; ··· 44 44 45 45 struct ice_pf *ctrl_pf; 46 46 struct ice_port_list ports; 47 - u64 device_serial_number; 47 + u64 index; 48 48 }; 49 49 50 50 struct ice_adapter *ice_adapter_get(struct pci_dev *pdev);
+32 -12
drivers/net/ethernet/intel/ice/ice_ddp.c
··· 2377 2377 * The function will apply the new Tx topology from the package buffer 2378 2378 * if available. 2379 2379 * 2380 - * Return: zero when update was successful, negative values otherwise. 2380 + * Return: 2381 + * * 0 - Successfully applied topology configuration. 2382 + * * -EBUSY - Failed to acquire global configuration lock. 2383 + * * -EEXIST - Topology configuration has already been applied. 2384 + * * -EIO - Unable to apply topology configuration. 2385 + * * -ENODEV - Failed to re-initialize device after applying configuration. 2386 + * * Other negative error codes indicate unexpected failures. 2381 2387 */ 2382 2388 int ice_cfg_tx_topo(struct ice_hw *hw, const void *buf, u32 len) 2383 2389 { ··· 2416 2410 2417 2411 if (status) { 2418 2412 ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n"); 2419 - return status; 2413 + return -EIO; 2420 2414 } 2421 2415 2422 2416 /* Is default topology already applied ? */ ··· 2503 2497 ICE_GLOBAL_CFG_LOCK_TIMEOUT); 2504 2498 if (status) { 2505 2499 ice_debug(hw, ICE_DBG_INIT, "Failed to acquire global lock\n"); 2506 - return status; 2500 + return -EBUSY; 2507 2501 } 2508 2502 2509 2503 /* Check if reset was triggered already. */ 2510 2504 reg = rd32(hw, GLGEN_RSTAT); 2511 2505 if (reg & GLGEN_RSTAT_DEVSTATE_M) { 2512 - /* Reset is in progress, re-init the HW again */ 2513 2506 ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. Layer topology might be applied already\n"); 2514 2507 ice_check_reset(hw); 2515 - return 0; 2508 + /* Reset is in progress, re-init the HW again */ 2509 + goto reinit_hw; 2516 2510 } 2517 2511 2518 2512 /* Set new topology */ 2519 2513 status = ice_get_set_tx_topo(hw, new_topo, size, NULL, NULL, true); 2520 2514 if (status) { 2521 - ice_debug(hw, ICE_DBG_INIT, "Failed setting Tx topology\n"); 2522 - return status; 2515 + ice_debug(hw, ICE_DBG_INIT, "Failed to set Tx topology, status %pe\n", 2516 + ERR_PTR(status)); 2517 + /* only report -EIO here as the caller checks the error value 2518 + * and reports an informational error message informing that 2519 + * the driver failed to program Tx topology. 2520 + */ 2521 + status = -EIO; 2523 2522 } 2524 2523 2525 - /* New topology is updated, delay 1 second before issuing the CORER */ 2524 + /* Even if Tx topology config failed, we need to CORE reset here to 2525 + * clear the global configuration lock. Delay 1 second to allow 2526 + * hardware to settle then issue a CORER 2527 + */ 2526 2528 msleep(1000); 2527 2529 ice_reset(hw, ICE_RESET_CORER); 2528 - /* CORER will clear the global lock, so no explicit call 2529 - * required for release. 2530 - */ 2530 + ice_check_reset(hw); 2531 2531 2532 - return 0; 2532 + reinit_hw: 2533 + /* Since we triggered a CORER, re-initialize hardware */ 2534 + ice_deinit_hw(hw); 2535 + if (ice_init_hw(hw)) { 2536 + ice_debug(hw, ICE_DBG_INIT, "Failed to re-init hardware after setting Tx topology\n"); 2537 + return -ENODEV; 2538 + } 2539 + 2540 + return status; 2533 2541 }
+6 -4
drivers/net/ethernet/intel/ice/ice_idc.c
··· 336 336 mutex_lock(&pf->adev_mutex); 337 337 cdev->adev = adev; 338 338 mutex_unlock(&pf->adev_mutex); 339 + set_bit(ICE_FLAG_AUX_DEV_CREATED, pf->flags); 339 340 340 341 return 0; 341 342 } ··· 348 347 { 349 348 struct auxiliary_device *adev; 350 349 350 + if (!test_and_clear_bit(ICE_FLAG_AUX_DEV_CREATED, pf->flags)) 351 + return; 352 + 351 353 mutex_lock(&pf->adev_mutex); 352 354 adev = pf->cdev_info->adev; 353 355 pf->cdev_info->adev = NULL; 354 356 mutex_unlock(&pf->adev_mutex); 355 357 356 - if (adev) { 357 - auxiliary_device_delete(adev); 358 - auxiliary_device_uninit(adev); 359 - } 358 + auxiliary_device_delete(adev); 359 + auxiliary_device_uninit(adev); 360 360 } 361 361 362 362 /**
+11 -5
drivers/net/ethernet/intel/ice/ice_main.c
··· 4536 4536 dev_info(dev, "Tx scheduling layers switching feature disabled\n"); 4537 4537 else 4538 4538 dev_info(dev, "Tx scheduling layers switching feature enabled\n"); 4539 - /* if there was a change in topology ice_cfg_tx_topo triggered 4540 - * a CORER and we need to re-init hw 4539 + return 0; 4540 + } else if (err == -ENODEV) { 4541 + /* If we failed to re-initialize the device, we can no longer 4542 + * continue loading. 4541 4543 */ 4542 - ice_deinit_hw(hw); 4543 - err = ice_init_hw(hw); 4544 - 4544 + dev_warn(dev, "Failed to initialize hardware after applying Tx scheduling configuration.\n"); 4545 4545 return err; 4546 4546 } else if (err == -EIO) { 4547 4547 dev_info(dev, "DDP package does not support Tx scheduling layers switching feature - please update to the latest DDP package and try again\n"); 4548 + return 0; 4549 + } else if (err == -EEXIST) { 4550 + return 0; 4548 4551 } 4549 4552 4553 + /* Do not treat this as a fatal error. */ 4554 + dev_info(dev, "Failed to apply Tx scheduling configuration, err %pe\n", 4555 + ERR_PTR(err)); 4550 4556 return 0; 4551 4557 } 4552 4558
+1 -1
drivers/net/ethernet/intel/ice/ice_txrx.c
··· 1352 1352 skb = ice_construct_skb(rx_ring, xdp); 1353 1353 /* exit if we failed to retrieve a buffer */ 1354 1354 if (!skb) { 1355 - rx_ring->ring_stats->rx_stats.alloc_page_failed++; 1355 + rx_ring->ring_stats->rx_stats.alloc_buf_failed++; 1356 1356 xdp_verdict = ICE_XDP_CONSUMED; 1357 1357 } 1358 1358 ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
+1 -1
drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
··· 3125 3125 if (err) 3126 3126 return err; 3127 3127 3128 - combo_ver = le32_to_cpu(civd.combo_ver); 3128 + combo_ver = get_unaligned_le32(&civd.combo_ver); 3129 3129 3130 3130 orom->major = (u8)FIELD_GET(IXGBE_OROM_VER_MASK, combo_ver); 3131 3131 orom->patch = (u8)FIELD_GET(IXGBE_OROM_VER_PATCH_MASK, combo_ver);
+1 -1
drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
··· 932 932 __le32 combo_ver; /* Combo Image Version number */ 933 933 u8 combo_name_len; /* Length of the unicode combo image version string, max of 32 */ 934 934 __le16 combo_name[32]; /* Unicode string representing the Combo Image version */ 935 - }; 935 + } __packed; 936 936 937 937 /* Function specific capabilities */ 938 938 struct ixgbe_hw_func_caps {