Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
ice: cleanups and preparation for live migration

Jake Keller says:

Various cleanups and preparation to the ice driver code for supporting
SR-IOV live migration.

The logic for unpacking Rx queue context data is added. This is the inverse
of the existing packing logic. Thanks to <linux/packing.h> this is trivial
to add.

Code to enable both reading and writing the Tx queue context for a queue
over a shared hardware register interface is added. Thanks to ice_adapter,
this is locked across all PFs that need to use it, preventing concurrency
issues with multiple PFs.

The RSS hash configuration requested by a VF is cached within the VF
structure. This will be used to track and restore the same configuration
during migration load.

ice_sriov_set_msix_vec_count() is updated to use pci_iov_vf_id() instead of
open-coding a worse equivalent, and checks to avoid rebuilding MSI-X if the
current request is for the existing amount of vectors.

A new ice_get_vf_by_dev() helper function is added to simplify accessing a
VF from its PCI device structure. This will be used more heavily within the
live migration code itself.

* '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue:
ice: introduce ice_get_vf_by_dev() wrapper
ice: avoid rebuilding if MSI-X vector count is unchanged
ice: use pci_iov_vf_id() to get VF ID
ice: expose VF functions used by live migration
ice: move ice_vsi_update_l2tsel to ice_lib.c
ice: save RSS hash configuration for migration
ice: add functions to get and set Tx queue context
ice: add support for reading and unpacking Rx queue context
====================

Link: https://patch.msgid.link/20250710214518.1824208-1-anthony.l.nguyen@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+378 -69
+1
drivers/net/ethernet/intel/ice/ice_adapter.c
··· 32 32 33 33 adapter->device_serial_number = dsn; 34 34 spin_lock_init(&adapter->ptp_gltsyn_time_lock); 35 + spin_lock_init(&adapter->txq_ctx_lock); 35 36 refcount_set(&adapter->refcount, 1); 36 37 37 38 mutex_init(&adapter->ports.lock);
+4 -1
drivers/net/ethernet/intel/ice/ice_adapter.h
··· 27 27 28 28 /** 29 29 * struct ice_adapter - PCI adapter resources shared across PFs 30 + * @refcount: Reference count. struct ice_pf objects hold the references. 30 31 * @ptp_gltsyn_time_lock: Spinlock protecting access to the GLTSYN_TIME 31 32 * register of the PTP clock. 32 - * @refcount: Reference count. struct ice_pf objects hold the references. 33 + * @txq_ctx_lock: Spinlock protecting access to the GLCOMM_QTX_CNTX_CTL register 33 34 * @ctrl_pf: Control PF of the adapter 34 35 * @ports: Ports list 35 36 * @device_serial_number: DSN cached for collision detection on 32bit systems ··· 39 38 refcount_t refcount; 40 39 /* For access to the GLTSYN_TIME register */ 41 40 spinlock_t ptp_gltsyn_time_lock; 41 + /* For access to GLCOMM_QTX_CNTX_CTL register */ 42 + spinlock_t txq_ctx_lock; 42 43 43 44 struct ice_pf *ctrl_pf; 44 45 struct ice_port_list ports;
+13 -1
drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
··· 14 14 15 15 #define ICE_RXQ_CTX_SIZE_DWORDS 8 16 16 #define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32)) 17 - #define ICE_TXQ_CTX_SZ 22 18 17 19 18 typedef struct __packed { u8 buf[ICE_RXQ_CTX_SZ]; } ice_rxq_ctx_buf_t; 19 + 20 + /* The Tx queue context is 40 bytes, and includes some internal state. The 21 + * Admin Queue buffers don't include the internal state, so only include the 22 + * first 22 bytes of the context. 23 + */ 24 + #define ICE_TXQ_CTX_SZ 22 25 + 20 26 typedef struct __packed { u8 buf[ICE_TXQ_CTX_SZ]; } ice_txq_ctx_buf_t; 27 + 28 + #define ICE_TXQ_CTX_FULL_SIZE_DWORDS 10 29 + #define ICE_TXQ_CTX_FULL_SZ \ 30 + (ICE_TXQ_CTX_FULL_SIZE_DWORDS * sizeof(u32)) 31 + 32 + typedef struct __packed { u8 buf[ICE_TXQ_CTX_FULL_SZ]; } ice_txq_ctx_buf_full_t; 21 33 22 34 struct ice_aqc_generic { 23 35 __le32 param0;
+230 -3
drivers/net/ethernet/intel/ice/ice_common.c
··· 1342 1342 } 1343 1343 } 1344 1344 1345 + /** 1346 + * ice_copy_rxq_ctx_from_hw - Copy packed Rx Queue context from HW registers 1347 + * @hw: pointer to the hardware structure 1348 + * @rxq_ctx: pointer to the packed Rx queue context 1349 + * @rxq_index: the index of the Rx queue 1350 + */ 1351 + static void ice_copy_rxq_ctx_from_hw(struct ice_hw *hw, 1352 + ice_rxq_ctx_buf_t *rxq_ctx, 1353 + u32 rxq_index) 1354 + { 1355 + u32 *ctx = (u32 *)rxq_ctx; 1356 + 1357 + /* Copy each dword separately from HW */ 1358 + for (int i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++, ctx++) { 1359 + *ctx = rd32(hw, QRX_CONTEXT(i, rxq_index)); 1360 + 1361 + ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, *ctx); 1362 + } 1363 + } 1364 + 1345 1365 #define ICE_CTX_STORE(struct_name, struct_field, width, lsb) \ 1346 1366 PACKED_FIELD((lsb) + (width) - 1, (lsb), struct struct_name, struct_field) 1347 1367 ··· 1406 1386 } 1407 1387 1408 1388 /** 1389 + * ice_unpack_rxq_ctx - Unpack Rx queue context from a HW buffer 1390 + * @buf: the HW buffer to unpack from 1391 + * @ctx: the Rx queue context to unpack 1392 + * 1393 + * Unpack the Rx queue context from the HW buffer into the CPU-friendly 1394 + * structure. 1395 + */ 1396 + static void ice_unpack_rxq_ctx(const ice_rxq_ctx_buf_t *buf, 1397 + struct ice_rlan_ctx *ctx) 1398 + { 1399 + unpack_fields(buf, sizeof(*buf), ctx, ice_rlan_ctx_fields, 1400 + QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST); 1401 + } 1402 + 1403 + /** 1409 1404 * ice_write_rxq_ctx - Write Rx Queue context to hardware 1410 1405 * @hw: pointer to the hardware structure 1411 1406 * @rlan_ctx: pointer to the unpacked Rx queue context ··· 1441 1406 1442 1407 ice_pack_rxq_ctx(rlan_ctx, &buf); 1443 1408 ice_copy_rxq_ctx_to_hw(hw, &buf, rxq_index); 1409 + 1410 + return 0; 1411 + } 1412 + 1413 + /** 1414 + * ice_read_rxq_ctx - Read Rx queue context from HW 1415 + * @hw: pointer to the hardware structure 1416 + * @rlan_ctx: pointer to the Rx queue context 1417 + * @rxq_index: the index of the Rx queue 1418 + * 1419 + * Read the Rx queue context from the hardware registers, and unpack it into 1420 + * the sparse Rx queue context structure. 1421 + * 1422 + * Returns: 0 on success, or -EINVAL if the Rx queue index is invalid. 1423 + */ 1424 + int ice_read_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, 1425 + u32 rxq_index) 1426 + { 1427 + ice_rxq_ctx_buf_t buf = {}; 1428 + 1429 + if (rxq_index > QRX_CTRL_MAX_INDEX) 1430 + return -EINVAL; 1431 + 1432 + ice_copy_rxq_ctx_from_hw(hw, &buf, rxq_index); 1433 + ice_unpack_rxq_ctx(&buf, rlan_ctx); 1444 1434 1445 1435 return 0; 1446 1436 } ··· 1503 1443 }; 1504 1444 1505 1445 /** 1506 - * ice_pack_txq_ctx - Pack Tx queue context into a HW buffer 1446 + * ice_pack_txq_ctx - Pack Tx queue context into Admin Queue buffer 1507 1447 * @ctx: the Tx queue context to pack 1508 - * @buf: the HW buffer to pack into 1448 + * @buf: the Admin Queue HW buffer to pack into 1509 1449 * 1510 1450 * Pack the Tx queue context from the CPU-friendly unpacked buffer into its 1511 - * bit-packed HW layout. 1451 + * bit-packed Admin Queue layout. 1512 1452 */ 1513 1453 void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf) 1514 1454 { 1515 1455 pack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields, 1516 1456 QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST); 1457 + } 1458 + 1459 + /** 1460 + * ice_pack_txq_ctx_full - Pack Tx queue context into a HW buffer 1461 + * @ctx: the Tx queue context to pack 1462 + * @buf: the HW buffer to pack into 1463 + * 1464 + * Pack the Tx queue context from the CPU-friendly unpacked buffer into its 1465 + * bit-packed HW layout, including the internal data portion. 1466 + */ 1467 + static void ice_pack_txq_ctx_full(const struct ice_tlan_ctx *ctx, 1468 + ice_txq_ctx_buf_full_t *buf) 1469 + { 1470 + pack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields, 1471 + QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST); 1472 + } 1473 + 1474 + /** 1475 + * ice_unpack_txq_ctx_full - Unpack Tx queue context from a HW buffer 1476 + * @buf: the HW buffer to unpack from 1477 + * @ctx: the Tx queue context to unpack 1478 + * 1479 + * Unpack the Tx queue context from the HW buffer (including the full internal 1480 + * state) into the CPU-friendly structure. 1481 + */ 1482 + static void ice_unpack_txq_ctx_full(const ice_txq_ctx_buf_full_t *buf, 1483 + struct ice_tlan_ctx *ctx) 1484 + { 1485 + unpack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields, 1486 + QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST); 1487 + } 1488 + 1489 + /** 1490 + * ice_copy_txq_ctx_from_hw - Copy Tx Queue context from HW registers 1491 + * @hw: pointer to the hardware structure 1492 + * @txq_ctx: pointer to the packed Tx queue context, including internal state 1493 + * @txq_index: the index of the Tx queue 1494 + * 1495 + * Copy Tx Queue context from HW register space to dense structure 1496 + */ 1497 + static void ice_copy_txq_ctx_from_hw(struct ice_hw *hw, 1498 + ice_txq_ctx_buf_full_t *txq_ctx, 1499 + u32 txq_index) 1500 + { 1501 + struct ice_pf *pf = container_of(hw, struct ice_pf, hw); 1502 + u32 *ctx = (u32 *)txq_ctx; 1503 + u32 txq_base, reg; 1504 + 1505 + /* Get Tx queue base within card space */ 1506 + txq_base = rd32(hw, PFLAN_TX_QALLOC(hw->pf_id)); 1507 + txq_base = FIELD_GET(PFLAN_TX_QALLOC_FIRSTQ_M, txq_base); 1508 + 1509 + reg = FIELD_PREP(GLCOMM_QTX_CNTX_CTL_CMD_M, 1510 + GLCOMM_QTX_CNTX_CTL_CMD_READ) | 1511 + FIELD_PREP(GLCOMM_QTX_CNTX_CTL_QUEUE_ID_M, 1512 + txq_base + txq_index) | 1513 + GLCOMM_QTX_CNTX_CTL_CMD_EXEC_M; 1514 + 1515 + /* Prevent other PFs on the same adapter from accessing the Tx queue 1516 + * context interface concurrently. 1517 + */ 1518 + spin_lock(&pf->adapter->txq_ctx_lock); 1519 + 1520 + wr32(hw, GLCOMM_QTX_CNTX_CTL, reg); 1521 + ice_flush(hw); 1522 + 1523 + /* Copy each dword separately from HW */ 1524 + for (int i = 0; i < ICE_TXQ_CTX_FULL_SIZE_DWORDS; i++, ctx++) { 1525 + *ctx = rd32(hw, GLCOMM_QTX_CNTX_DATA(i)); 1526 + 1527 + ice_debug(hw, ICE_DBG_QCTX, "qtxdata[%d]: %08X\n", i, *ctx); 1528 + } 1529 + 1530 + spin_unlock(&pf->adapter->txq_ctx_lock); 1531 + } 1532 + 1533 + /** 1534 + * ice_copy_txq_ctx_to_hw - Copy Tx Queue context into HW registers 1535 + * @hw: pointer to the hardware structure 1536 + * @txq_ctx: pointer to the packed Tx queue context, including internal state 1537 + * @txq_index: the index of the Tx queue 1538 + */ 1539 + static void ice_copy_txq_ctx_to_hw(struct ice_hw *hw, 1540 + const ice_txq_ctx_buf_full_t *txq_ctx, 1541 + u32 txq_index) 1542 + { 1543 + struct ice_pf *pf = container_of(hw, struct ice_pf, hw); 1544 + u32 txq_base, reg; 1545 + 1546 + /* Get Tx queue base within card space */ 1547 + txq_base = rd32(hw, PFLAN_TX_QALLOC(hw->pf_id)); 1548 + txq_base = FIELD_GET(PFLAN_TX_QALLOC_FIRSTQ_M, txq_base); 1549 + 1550 + reg = FIELD_PREP(GLCOMM_QTX_CNTX_CTL_CMD_M, 1551 + GLCOMM_QTX_CNTX_CTL_CMD_WRITE_NO_DYN) | 1552 + FIELD_PREP(GLCOMM_QTX_CNTX_CTL_QUEUE_ID_M, 1553 + txq_base + txq_index) | 1554 + GLCOMM_QTX_CNTX_CTL_CMD_EXEC_M; 1555 + 1556 + /* Prevent other PFs on the same adapter from accessing the Tx queue 1557 + * context interface concurrently. 1558 + */ 1559 + spin_lock(&pf->adapter->txq_ctx_lock); 1560 + 1561 + /* Copy each dword separately to HW */ 1562 + for (int i = 0; i < ICE_TXQ_CTX_FULL_SIZE_DWORDS; i++) { 1563 + u32 ctx = ((const u32 *)txq_ctx)[i]; 1564 + 1565 + wr32(hw, GLCOMM_QTX_CNTX_DATA(i), ctx); 1566 + 1567 + ice_debug(hw, ICE_DBG_QCTX, "qtxdata[%d]: %08X\n", i, ctx); 1568 + } 1569 + 1570 + wr32(hw, GLCOMM_QTX_CNTX_CTL, reg); 1571 + ice_flush(hw); 1572 + 1573 + spin_unlock(&pf->adapter->txq_ctx_lock); 1574 + } 1575 + 1576 + /** 1577 + * ice_read_txq_ctx - Read Tx queue context from HW 1578 + * @hw: pointer to the hardware structure 1579 + * @tlan_ctx: pointer to the Tx queue context 1580 + * @txq_index: the index of the Tx queue 1581 + * 1582 + * Read the Tx queue context from the HW registers, then unpack it into the 1583 + * ice_tlan_ctx structure for use. 1584 + * 1585 + * Returns: 0 on success, or -EINVAL on an invalid Tx queue index. 1586 + */ 1587 + int ice_read_txq_ctx(struct ice_hw *hw, struct ice_tlan_ctx *tlan_ctx, 1588 + u32 txq_index) 1589 + { 1590 + ice_txq_ctx_buf_full_t buf = {}; 1591 + 1592 + if (txq_index > QTX_COMM_HEAD_MAX_INDEX) 1593 + return -EINVAL; 1594 + 1595 + ice_copy_txq_ctx_from_hw(hw, &buf, txq_index); 1596 + ice_unpack_txq_ctx_full(&buf, tlan_ctx); 1597 + 1598 + return 0; 1599 + } 1600 + 1601 + /** 1602 + * ice_write_txq_ctx - Write Tx queue context to HW 1603 + * @hw: pointer to the hardware structure 1604 + * @tlan_ctx: pointer to the Tx queue context 1605 + * @txq_index: the index of the Tx queue 1606 + * 1607 + * Pack the Tx queue context into the dense HW layout, then write it into the 1608 + * HW registers. 1609 + * 1610 + * Returns: 0 on success, or -EINVAL on an invalid Tx queue index. 1611 + */ 1612 + int ice_write_txq_ctx(struct ice_hw *hw, struct ice_tlan_ctx *tlan_ctx, 1613 + u32 txq_index) 1614 + { 1615 + ice_txq_ctx_buf_full_t buf = {}; 1616 + 1617 + if (txq_index > QTX_COMM_HEAD_MAX_INDEX) 1618 + return -EINVAL; 1619 + 1620 + ice_pack_txq_ctx_full(tlan_ctx, &buf); 1621 + ice_copy_txq_ctx_to_hw(hw, &buf, txq_index); 1622 + 1623 + return 0; 1517 1624 } 1518 1625 1519 1626 /* Sideband Queue command wrappers */
+6
drivers/net/ethernet/intel/ice/ice_common.h
··· 118 118 119 119 int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, 120 120 u32 rxq_index); 121 + int ice_read_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, 122 + u32 rxq_index); 123 + int ice_read_txq_ctx(struct ice_hw *hw, struct ice_tlan_ctx *tlan_ctx, 124 + u32 txq_index); 125 + int ice_write_txq_ctx(struct ice_hw *hw, struct ice_tlan_ctx *tlan_ctx, 126 + u32 txq_index); 121 127 122 128 int 123 129 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params);
+12
drivers/net/ethernet/intel/ice/ice_hw_autogen.h
··· 16 16 #define GLCOMM_QUANTA_PROF_MAX_DESC_M ICE_M(0x3F, 24) 17 17 #define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4)) 18 18 #define QTX_COMM_HEAD(_DBQM) (0x000E0000 + ((_DBQM) * 4)) 19 + #define QTX_COMM_HEAD_MAX_INDEX 16383 19 20 #define QTX_COMM_HEAD_HEAD_S 0 20 21 #define QTX_COMM_HEAD_HEAD_M ICE_M(0x1FFF, 0) 21 22 #define PF_FW_ARQBAH 0x00080180 ··· 273 272 #define VPINT_ALLOC_PCI_VALID_M BIT(31) 274 273 #define VPINT_MBX_CTL(_VSI) (0x0016A000 + ((_VSI) * 4)) 275 274 #define VPINT_MBX_CTL_CAUSE_ENA_M BIT(30) 275 + #define PFLAN_TX_QALLOC(_PF) (0x001D2580 + ((_PF) * 4)) 276 + #define PFLAN_TX_QALLOC_FIRSTQ_M GENMASK(13, 0) 276 277 #define GLLAN_RCTL_0 0x002941F8 277 278 #define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4)) 278 279 #define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4)) ··· 379 376 #define GLNVM_ULD_POR_DONE_1_M BIT(8) 380 377 #define GLNVM_ULD_PCIER_DONE_2_M BIT(9) 381 378 #define GLNVM_ULD_PE_DONE_M BIT(10) 379 + #define GLCOMM_QTX_CNTX_CTL 0x002D2DC8 380 + #define GLCOMM_QTX_CNTX_CTL_QUEUE_ID_M GENMASK(13, 0) 381 + #define GLCOMM_QTX_CNTX_CTL_CMD_M GENMASK(18, 16) 382 + #define GLCOMM_QTX_CNTX_CTL_CMD_READ 0 383 + #define GLCOMM_QTX_CNTX_CTL_CMD_WRITE 1 384 + #define GLCOMM_QTX_CNTX_CTL_CMD_RESET 3 385 + #define GLCOMM_QTX_CNTX_CTL_CMD_WRITE_NO_DYN 4 386 + #define GLCOMM_QTX_CNTX_CTL_CMD_EXEC_M BIT(19) 387 + #define GLCOMM_QTX_CNTX_DATA(_i) (0x002D2D40 + ((_i) * 4)) 382 388 #define GLPCI_CNF2 0x000BE004 383 389 #define GLPCI_CNF2_CACHELINE_SIZE_M BIT(1) 384 390 #define PF_FUNC_RID 0x0009E880
+35
drivers/net/ethernet/intel/ice/ice_lib.c
··· 4020 4020 vsi->info = ctx.info; 4021 4021 return 0; 4022 4022 } 4023 + 4024 + /** 4025 + * ice_vsi_update_l2tsel - update l2tsel field for all Rx rings on this VSI 4026 + * @vsi: VSI used to update l2tsel on 4027 + * @l2tsel: l2tsel setting requested 4028 + * 4029 + * Use the l2tsel setting to update all of the Rx queue context bits for l2tsel. 4030 + * This will modify which descriptor field the first offloaded VLAN will be 4031 + * stripped into. 4032 + */ 4033 + void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel) 4034 + { 4035 + struct ice_hw *hw = &vsi->back->hw; 4036 + u32 l2tsel_bit; 4037 + int i; 4038 + 4039 + if (l2tsel == ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND) 4040 + l2tsel_bit = 0; 4041 + else 4042 + l2tsel_bit = BIT(ICE_L2TSEL_BIT_OFFSET); 4043 + 4044 + for (i = 0; i < vsi->alloc_rxq; i++) { 4045 + u16 pfq = vsi->rxq_map[i]; 4046 + u32 qrx_context_offset; 4047 + u32 regval; 4048 + 4049 + qrx_context_offset = 4050 + QRX_CONTEXT(ICE_L2TSEL_QRX_CONTEXT_REG_IDX, pfq); 4051 + 4052 + regval = rd32(hw, qrx_context_offset); 4053 + regval &= ~BIT(ICE_L2TSEL_BIT_OFFSET); 4054 + regval |= l2tsel_bit; 4055 + wr32(hw, qrx_context_offset, regval); 4056 + } 4057 + }
+8
drivers/net/ethernet/intel/ice/ice_lib.h
··· 11 11 #define ICE_VSI_FLAG_INIT BIT(0) 12 12 #define ICE_VSI_FLAG_NO_INIT 0 13 13 14 + #define ICE_L2TSEL_QRX_CONTEXT_REG_IDX 3 15 + #define ICE_L2TSEL_BIT_OFFSET 23 16 + enum ice_l2tsel { 17 + ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND, 18 + ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1, 19 + }; 20 + 14 21 const char *ice_vsi_type_str(enum ice_vsi_type vsi_type); 15 22 16 23 bool ice_pf_state_is_nominal(struct ice_pf *pf); ··· 123 116 void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f); 124 117 void ice_init_feature_support(struct ice_pf *pf); 125 118 bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi); 119 + void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel); 126 120 #endif /* !_ICE_LIB_H_ */
+7 -12
drivers/net/ethernet/intel/ice/ice_sriov.c
··· 933 933 bool needs_rebuild = false; 934 934 struct ice_vsi *vsi; 935 935 struct ice_vf *vf; 936 - int id; 937 936 938 937 if (!ice_get_num_vfs(pf)) 939 938 return -ENOENT; ··· 951 952 if (msix_vec_count < ICE_MIN_INTR_PER_VF) 952 953 return -EINVAL; 953 954 954 - /* Transition of PCI VF function number to function_id */ 955 - for (id = 0; id < pci_num_vf(pdev); id++) { 956 - if (vf_dev->devfn == pci_iov_virtfn_devfn(pdev, id)) 957 - break; 958 - } 959 - 960 - if (id == pci_num_vf(pdev)) 961 - return -ENOENT; 962 - 963 - vf = ice_get_vf_by_id(pf, id); 964 - 955 + vf = ice_get_vf_by_dev(pf, vf_dev); 965 956 if (!vf) 966 957 return -ENOENT; 967 958 ··· 959 970 if (!vsi) { 960 971 ice_put_vf(vf); 961 972 return -ENOENT; 973 + } 974 + 975 + /* No need to rebuild if we're setting to the same value */ 976 + if (msix_vec_count == vf->num_msix) { 977 + ice_put_vf(vf); 978 + return 0; 962 979 } 963 980 964 981 prev_msix = vf->num_msix;
+7
drivers/net/ethernet/intel/ice/ice_sriov.h
··· 64 64 ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto); 65 65 u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev); 66 66 int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count); 67 + int ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id); 67 68 #else /* CONFIG_PCI_IOV */ 68 69 static inline void ice_process_vflr_event(struct ice_pf *pf) { } 69 70 static inline void ice_free_vfs(struct ice_pf *pf) { } ··· 162 161 163 162 static inline int 164 163 ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count) 164 + { 165 + return -EOPNOTSUPP; 166 + } 167 + 168 + static inline int ice_vf_vsi_dis_single_txq(struct ice_vf *vf, 169 + struct ice_vsi *vsi, u16 q_id) 165 170 { 166 171 return -EOPNOTSUPP; 167 172 }
+3
drivers/net/ethernet/intel/ice/ice_vf_lib.c
··· 1022 1022 vf->num_msix = vfs->num_msix_per; 1023 1023 vf->num_vf_qs = vfs->num_qps_per; 1024 1024 1025 + /* set default RSS hash configuration */ 1026 + vf->rss_hashcfg = ICE_DEFAULT_RSS_HASHCFG; 1027 + 1025 1028 /* ctrl_vsi_idx will be set to a valid value only when iAVF 1026 1029 * creates its first fdir rule. 1027 1030 */
+23 -3
drivers/net/ethernet/intel/ice/ice_vf_lib.h
··· 106 106 u16 ctrl_vsi_idx; 107 107 struct ice_vf_fdir fdir; 108 108 struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS]; 109 - /* first vector index of this VF in the PF space */ 110 - int first_vector_idx; 109 + u64 rss_hashcfg; /* RSS hash configuration */ 111 110 struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */ 112 111 struct virtchnl_version_info vf_ver; 113 112 u32 driver_caps; /* reported by VF driver */ ··· 125 126 u8 link_up:1; /* only valid if VF link is forced */ 126 127 u8 lldp_tx_ena:1; 127 128 129 + u16 num_msix; /* num of MSI-X configured on this VF */ 130 + 128 131 u32 ptp_caps; 129 132 130 133 unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */ 131 134 unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */ 135 + /* first vector index of this VF in the PF space */ 136 + int first_vector_idx; 132 137 DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */ 133 138 134 139 unsigned long vf_caps; /* VF's adv. capabilities */ ··· 157 154 u16 lldp_recipe_id; 158 155 u16 lldp_rule_id; 159 156 160 - u16 num_msix; /* num of MSI-X configured on this VF */ 161 157 struct ice_vf_qs_bw qs_bw[ICE_MAX_RSS_QS_PER_VF]; 162 158 }; 163 159 ··· 239 237 240 238 #ifdef CONFIG_PCI_IOV 241 239 struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id); 240 + 241 + static inline struct ice_vf *ice_get_vf_by_dev(struct ice_pf *pf, 242 + struct pci_dev *vf_dev) 243 + { 244 + int vf_id = pci_iov_vf_id(vf_dev); 245 + 246 + if (vf_id < 0) 247 + return NULL; 248 + 249 + return ice_get_vf_by_id(pf, pci_iov_vf_id(vf_dev)); 250 + } 251 + 242 252 void ice_put_vf(struct ice_vf *vf); 243 253 bool ice_has_vfs(struct ice_pf *pf); 244 254 u16 ice_get_num_vfs(struct ice_pf *pf); ··· 273 259 bool incr); 274 260 #else /* CONFIG_PCI_IOV */ 275 261 static inline struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id) 262 + { 263 + return NULL; 264 + } 265 + 266 + static inline struct ice_vf *ice_get_vf_by_dev(struct ice_pf *pf, 267 + struct pci_dev *vf_dev) 276 268 { 277 269 return NULL; 278 270 }
+10 -49
drivers/net/ethernet/intel/ice/ice_virtchnl.c
··· 1427 1427 * @vsi: VSI of the VF to configure 1428 1428 * @q_idx: VF queue index used to determine the queue in the PF's space 1429 1429 */ 1430 - static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx) 1430 + void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx) 1431 1431 { 1432 1432 struct ice_hw *hw = &vsi->back->hw; 1433 1433 u32 pfq = vsi->txq_map[q_idx]; ··· 1450 1450 * @vsi: VSI of the VF to configure 1451 1451 * @q_idx: VF queue index used to determine the queue in the PF's space 1452 1452 */ 1453 - static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx) 1453 + void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx) 1454 1454 { 1455 1455 struct ice_hw *hw = &vsi->back->hw; 1456 1456 u32 pfq = vsi->rxq_map[q_idx]; ··· 1566 1566 * disabled then clear q_id bit in the enabled queues bitmap and return 1567 1567 * success. Otherwise return error. 1568 1568 */ 1569 - static int 1570 - ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id) 1569 + int ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id) 1571 1570 { 1572 1571 struct ice_txq_meta txq_meta = { 0 }; 1573 1572 struct ice_tx_ring *ring; ··· 2620 2621 * ice_is_vlan_promisc_allowed - check if VLAN promiscuous config is allowed 2621 2622 * @vf: VF used to determine if VLAN promiscuous config is allowed 2622 2623 */ 2623 - static bool ice_is_vlan_promisc_allowed(struct ice_vf *vf) 2624 + bool ice_is_vlan_promisc_allowed(struct ice_vf *vf) 2624 2625 { 2625 2626 if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) || 2626 2627 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) && ··· 2639 2640 * This function should only be called if VLAN promiscuous mode is allowed, 2640 2641 * which can be determined via ice_is_vlan_promisc_allowed(). 2641 2642 */ 2642 - static int ice_vf_ena_vlan_promisc(struct ice_vf *vf, struct ice_vsi *vsi, 2643 - struct ice_vlan *vlan) 2643 + int ice_vf_ena_vlan_promisc(struct ice_vf *vf, struct ice_vsi *vsi, 2644 + struct ice_vlan *vlan) 2644 2645 { 2645 2646 u8 promisc_m = 0; 2646 2647 int status; ··· 3092 3093 status = ice_add_avf_rss_cfg(&pf->hw, vsi, vrh->hashcfg); 3093 3094 v_ret = ice_err_to_virt_err(status); 3094 3095 } 3096 + 3097 + /* save the requested VF configuration */ 3098 + if (!v_ret) 3099 + vf->rss_hashcfg = vrh->hashcfg; 3095 3100 3096 3101 /* send the response to the VF */ 3097 3102 err: ··· 3857 3854 return err; 3858 3855 3859 3856 return 0; 3860 - } 3861 - 3862 - #define ICE_L2TSEL_QRX_CONTEXT_REG_IDX 3 3863 - #define ICE_L2TSEL_BIT_OFFSET 23 3864 - enum ice_l2tsel { 3865 - ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND, 3866 - ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1, 3867 - }; 3868 - 3869 - /** 3870 - * ice_vsi_update_l2tsel - update l2tsel field for all Rx rings on this VSI 3871 - * @vsi: VSI used to update l2tsel on 3872 - * @l2tsel: l2tsel setting requested 3873 - * 3874 - * Use the l2tsel setting to update all of the Rx queue context bits for l2tsel. 3875 - * This will modify which descriptor field the first offloaded VLAN will be 3876 - * stripped into. 3877 - */ 3878 - static void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel) 3879 - { 3880 - struct ice_hw *hw = &vsi->back->hw; 3881 - u32 l2tsel_bit; 3882 - int i; 3883 - 3884 - if (l2tsel == ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND) 3885 - l2tsel_bit = 0; 3886 - else 3887 - l2tsel_bit = BIT(ICE_L2TSEL_BIT_OFFSET); 3888 - 3889 - for (i = 0; i < vsi->alloc_rxq; i++) { 3890 - u16 pfq = vsi->rxq_map[i]; 3891 - u32 qrx_context_offset; 3892 - u32 regval; 3893 - 3894 - qrx_context_offset = 3895 - QRX_CONTEXT(ICE_L2TSEL_QRX_CONTEXT_REG_IDX, pfq); 3896 - 3897 - regval = rd32(hw, qrx_context_offset); 3898 - regval &= ~BIT(ICE_L2TSEL_BIT_OFFSET); 3899 - regval |= l2tsel_bit; 3900 - wr32(hw, qrx_context_offset, regval); 3901 - } 3902 3857 } 3903 3858 3904 3859 /**
+19
drivers/net/ethernet/intel/ice/ice_virtchnl.h
··· 92 92 bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id); 93 93 void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event, 94 94 struct ice_mbx_data *mbxdata); 95 + void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx); 96 + void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx); 97 + int ice_vf_ena_vlan_promisc(struct ice_vf *vf, struct ice_vsi *vsi, 98 + struct ice_vlan *vlan); 99 + bool ice_is_vlan_promisc_allowed(struct ice_vf *vf); 95 100 #else /* CONFIG_PCI_IOV */ 96 101 static inline void ice_virtchnl_set_dflt_ops(struct ice_vf *vf) { } 97 102 static inline void ice_virtchnl_set_repr_ops(struct ice_vf *vf) { } 98 103 static inline void ice_vc_notify_vf_link_state(struct ice_vf *vf) { } 99 104 static inline void ice_vc_notify_link_state(struct ice_pf *pf) { } 100 105 static inline void ice_vc_notify_reset(struct ice_pf *pf) { } 106 + static inline void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx) { } 107 + static inline void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx) { } 108 + 109 + static inline int ice_vf_ena_vlan_promisc(struct ice_vf *vf, 110 + struct ice_vsi *vsi, 111 + struct ice_vlan *vlan) 112 + { 113 + return -EOPNOTSUPP; 114 + } 115 + 116 + static inline bool ice_is_vlan_promisc_allowed(struct ice_vf *vf) 117 + { 118 + return false; 119 + } 101 120 102 121 static inline int 103 122 ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,