Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ice: add E830 Earliest TxTime First Offload support

E830 supports Earliest TxTime First (ETF) hardware offload, which is
configured via the ETF Qdisc on a per-queue basis (see tc-etf(8)). ETF
introduces a new Tx flow mechanism that utilizes a timestamp ring
(tstamp_ring) alongside the standard Tx ring. This timestamp ring is
used to indicate when hardware will transmit a packet. Tx Time is
supported on the first 2048 Tx queues of the device, and the NVM image
limits the maximum number of Tx queues to 2048 for the device.

The allocation and initialization of the timestamp ring occur when the
feature is enabled on a specific Tx queue via tc-etf. The requested Tx
Time queue index cannot be greater than the number of Tx queues
(vsi->num_txq).

To support ETF, the following flags and bitmap are introduced:

- ICE_F_TXTIME: Device feature flag set for E830 NICs, indicating ETF
support.
- txtime_txqs: PF-level bitmap set when ETF is enabled and cleared
when disabled for a specific Tx queue. It is used by
ice_is_txtime_ena() to check if ETF is allocated and configured on
any Tx queue, which is checked during Tx ring allocation.
- ICE_TX_FLAGS_TXTIME: Per Tx ring flag set when ETF is allocated and
configured for a specific Tx queue. It determines ETF status during
packet transmission and is checked by ice_is_txtime_ena() to verify
if ETF is enabled on any Tx queue.

Due to a hardware issue that can result in a malicious driver detection
event, additional timestamp descriptors are required when wrapping
around the timestamp ring. Up to 64 additional timestamp descriptors
are reserved, reducing the available Tx descriptors.

To accommodate this, ICE_MAX_NUM_DESC_BY_MAC is introduced, defining:

- E830: Maximum Tx descriptor count of 8096 (8K - 32 - 64 for timestamp
fetch descriptors).
- E810 and E82X: Maximum Tx descriptor count of 8160 (8K - 32).

Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
Co-developed-by: Alice Michael <alice.michael@intel.com>
Signed-off-by: Alice Michael <alice.michael@intel.com>
Signed-off-by: Paul Greenwalt <paul.greenwalt@intel.com>
Acked-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>

authored by

Paul Greenwalt and committed by
Tony Nguyen
ccde82e9 3b860619

+722 -48
+32 -1
drivers/net/ethernet/intel/ice/ice.h
··· 84 84 #define ICE_BAR0 0 85 85 #define ICE_REQ_DESC_MULTIPLE 32 86 86 #define ICE_MIN_NUM_DESC 64 87 - #define ICE_MAX_NUM_DESC 8160 87 + #define ICE_MAX_NUM_DESC_E810 8160 88 + #define ICE_MAX_NUM_DESC_E830 8096 89 + #define ICE_MAX_NUM_DESC_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? \ 90 + ICE_MAX_NUM_DESC_E830 : \ 91 + ICE_MAX_NUM_DESC_E810) 88 92 #define ICE_DFLT_MIN_RX_DESC 512 89 93 #define ICE_DFLT_NUM_TX_DESC 256 90 94 #define ICE_DFLT_NUM_RX_DESC 2048 ··· 204 200 ICE_F_SMA_CTRL, 205 201 ICE_F_CGU, 206 202 ICE_F_GNSS, 203 + ICE_F_TXTIME, 207 204 ICE_F_GCS, 208 205 ICE_F_ROCE_LAG, 209 206 ICE_F_SRIOV_LAG, ··· 580 575 DECLARE_BITMAP(misc_thread, ICE_MISC_THREAD_NBITS); 581 576 unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */ 582 577 unsigned long *avail_rxqs; /* bitmap to track PF Rx queue usage */ 578 + unsigned long *txtime_txqs; /* bitmap to track PF Tx Time queue */ 583 579 unsigned long serv_tmr_period; 584 580 unsigned long serv_tmr_prev; 585 581 struct timer_list serv_tmr; ··· 752 746 static inline void ice_set_ring_xdp(struct ice_tx_ring *ring) 753 747 { 754 748 ring->flags |= ICE_TX_FLAGS_RING_XDP; 749 + } 750 + 751 + /** 752 + * ice_is_txtime_ena - check if Tx Time is enabled on the Tx ring 753 + * @ring: pointer to Tx ring 754 + * 755 + * Return: true if the Tx ring has Tx Time enabled, false otherwise. 756 + */ 757 + static inline bool ice_is_txtime_ena(const struct ice_tx_ring *ring) 758 + { 759 + struct ice_vsi *vsi = ring->vsi; 760 + struct ice_pf *pf = vsi->back; 761 + 762 + return test_bit(ring->q_index, pf->txtime_txqs); 763 + } 764 + 765 + /** 766 + * ice_is_txtime_cfg - check if Tx Time is configured on the Tx ring 767 + * @ring: pointer to Tx ring 768 + * 769 + * Return: true if the Tx ring is configured for Tx ring, false otherwise. 770 + */ 771 + static inline bool ice_is_txtime_cfg(const struct ice_tx_ring *ring) 772 + { 773 + return !!(ring->flags & ICE_TX_FLAGS_TXTIME); 755 774 } 756 775 757 776 /**
+35
drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
··· 33 33 34 34 typedef struct __packed { u8 buf[ICE_TXQ_CTX_FULL_SZ]; } ice_txq_ctx_buf_full_t; 35 35 36 + #define ICE_TXTIME_CTX_SZ 25 37 + 38 + typedef struct __packed { u8 buf[ICE_TXTIME_CTX_SZ]; } ice_txtime_ctx_buf_t; 39 + 36 40 /* Queue Shutdown (direct 0x0003) */ 37 41 struct ice_aqc_q_shutdown { 38 42 u8 driver_unloading; ··· 2121 2117 struct ice_aqc_add_tx_rdma_qset_entry rdma_qsets[]; 2122 2118 }; 2123 2119 2120 + /* Set Tx Time LAN Queue (indirect 0x0C35) */ 2121 + struct ice_aqc_set_txtimeqs { 2122 + __le16 q_id; 2123 + __le16 q_amount; 2124 + u8 reserved[4]; 2125 + __le32 addr_high; 2126 + __le32 addr_low; 2127 + }; 2128 + 2129 + /* This is the descriptor of each queue entry for the Set Tx Time Queue 2130 + * command (0x0C35). Only used within struct ice_aqc_set_txtime_qgrp. 2131 + */ 2132 + struct ice_aqc_set_txtimeqs_perq { 2133 + u8 reserved[4]; 2134 + ice_txtime_ctx_buf_t txtime_ctx; 2135 + u8 reserved1[3]; 2136 + }; 2137 + 2138 + /* The format of the command buffer for Set Tx Time Queue (0x0C35) 2139 + * is an array of the following structs. Please note that the length of 2140 + * each struct ice_aqc_set_txtime_qgrp is variable due to the variable 2141 + * number of queues in each group! 2142 + */ 2143 + struct ice_aqc_set_txtime_qgrp { 2144 + u8 reserved[8]; 2145 + struct ice_aqc_set_txtimeqs_perq txtimeqs[]; 2146 + }; 2147 + 2124 2148 /* Download Package (indirect 0x0C40) */ 2125 2149 /* Also used for Update Package (indirect 0x0C41 and 0x0C42) */ 2126 2150 struct ice_aqc_download_pkg { ··· 2645 2613 ice_aqc_opc_dis_txqs = 0x0C31, 2646 2614 ice_aqc_opc_cfg_txqs = 0x0C32, 2647 2615 ice_aqc_opc_add_rdma_qset = 0x0C33, 2616 + 2617 + /* Tx Time queue commands */ 2618 + ice_aqc_opc_set_txtimeqs = 0x0C35, 2648 2619 2649 2620 /* package commands */ 2650 2621 ice_aqc_opc_download_pkg = 0x0C40,
+208 -37
drivers/net/ethernet/intel/ice/ice_base.c
··· 242 242 * @ring: ring to get the absolute queue index 243 243 * @tc: traffic class number 244 244 */ 245 - static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 tc) 245 + static u16 246 + ice_calc_txq_handle(const struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 tc) 246 247 { 247 248 WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n"); 248 249 ··· 279 278 } 280 279 281 280 /** 282 - * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance 283 - * @ring: The Tx ring to configure 284 - * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized 285 - * @pf_q: queue index in the PF space 281 + * ice_set_txq_ctx_vmvf - set queue context VM/VF type and number by VSI type 282 + * @ring: the Tx ring to configure 283 + * @vmvf_type: VM/VF type 284 + * @vmvf_num: VM/VF number 286 285 * 287 - * Configure the Tx descriptor ring in TLAN context. 286 + * Return: 0 on success and a negative value on error. 288 287 */ 289 - static void 290 - ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) 288 + static int 289 + ice_set_txq_ctx_vmvf(struct ice_tx_ring *ring, u8 *vmvf_type, u16 *vmvf_num) 291 290 { 292 291 struct ice_vsi *vsi = ring->vsi; 293 - struct ice_hw *hw = &vsi->back->hw; 292 + struct ice_hw *hw; 294 293 295 - tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S; 296 - 297 - tlan_ctx->port_num = vsi->port_info->lport; 298 - 299 - /* Transmit Queue Length */ 300 - tlan_ctx->qlen = ring->count; 301 - 302 - ice_set_cgd_num(tlan_ctx, ring->dcb_tc); 303 - 304 - /* PF number */ 305 - tlan_ctx->pf_num = hw->pf_id; 294 + hw = &vsi->back->hw; 306 295 307 296 /* queue belongs to a specific VSI type 308 297 * VF / VM index should be programmed per vmvf_type setting: ··· 305 314 case ICE_VSI_CTRL: 306 315 case ICE_VSI_PF: 307 316 if (ring->ch) 308 - tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ; 317 + *vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ; 309 318 else 310 - tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; 319 + *vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; 311 320 break; 312 321 case ICE_VSI_VF: 313 322 /* Firmware expects vmvf_num to be absolute VF ID */ 314 - tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id; 315 - tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; 323 + *vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id; 324 + *vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; 316 325 break; 317 326 case ICE_VSI_SF: 318 - tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ; 327 + *vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ; 319 328 break; 320 329 default: 321 - return; 330 + dev_info(ice_pf_to_dev(vsi->back), 331 + "Unable to set VMVF type for VSI type %d\n", 332 + vsi->type); 333 + return -EINVAL; 322 334 } 335 + return 0; 336 + } 337 + 338 + /** 339 + * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance 340 + * @ring: the Tx ring to configure 341 + * @tlan_ctx: pointer to the Tx LAN queue context structure to be initialized 342 + * @pf_q: queue index in the PF space 343 + * 344 + * Configure the Tx descriptor ring in TLAN context. 345 + * 346 + * Return: 0 on success and a negative value on error. 347 + */ 348 + static int 349 + ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) 350 + { 351 + struct ice_vsi *vsi = ring->vsi; 352 + struct ice_hw *hw; 353 + int err; 354 + 355 + hw = &vsi->back->hw; 356 + tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S; 357 + tlan_ctx->port_num = vsi->port_info->lport; 358 + 359 + /* Transmit Queue Length */ 360 + tlan_ctx->qlen = ring->count; 361 + 362 + ice_set_cgd_num(tlan_ctx, ring->dcb_tc); 363 + 364 + /* PF number */ 365 + tlan_ctx->pf_num = hw->pf_id; 366 + 367 + err = ice_set_txq_ctx_vmvf(ring, &tlan_ctx->vmvf_type, 368 + &tlan_ctx->vmvf_num); 369 + if (err) 370 + return err; 323 371 324 372 /* make sure the context is associated with the right VSI */ 325 373 if (ring->ch) ··· 385 355 * 1: Legacy Host Interface 386 356 */ 387 357 tlan_ctx->legacy_int = ICE_TX_LEGACY; 358 + 359 + return 0; 360 + } 361 + 362 + /** 363 + * ice_setup_txtime_ctx - setup a struct ice_txtime_ctx instance 364 + * @ring: the tstamp ring to configure 365 + * @txtime_ctx: pointer to the Tx time queue context structure to be initialized 366 + * 367 + * Return: 0 on success and a negative value on error. 368 + */ 369 + static int 370 + ice_setup_txtime_ctx(const struct ice_tstamp_ring *ring, 371 + struct ice_txtime_ctx *txtime_ctx) 372 + { 373 + struct ice_tx_ring *tx_ring = ring->tx_ring; 374 + struct ice_vsi *vsi = tx_ring->vsi; 375 + struct ice_hw *hw = &vsi->back->hw; 376 + int err; 377 + 378 + txtime_ctx->base = ring->dma >> ICE_TXTIME_CTX_BASE_S; 379 + 380 + /* Tx time Queue Length */ 381 + txtime_ctx->qlen = ring->count; 382 + txtime_ctx->txtime_ena_q = 1; 383 + 384 + /* PF number */ 385 + txtime_ctx->pf_num = hw->pf_id; 386 + 387 + err = ice_set_txq_ctx_vmvf(tx_ring, &txtime_ctx->vmvf_type, 388 + &txtime_ctx->vmvf_num); 389 + if (err) 390 + return err; 391 + 392 + /* make sure the context is associated with the right VSI */ 393 + if (tx_ring->ch) 394 + txtime_ctx->src_vsi = tx_ring->ch->vsi_num; 395 + else 396 + txtime_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx); 397 + 398 + txtime_ctx->ts_res = ICE_TXTIME_CTX_RESOLUTION_128NS; 399 + txtime_ctx->drbell_mode_32 = ICE_TXTIME_CTX_DRBELL_MODE_32; 400 + txtime_ctx->ts_fetch_prof_id = ICE_TXTIME_CTX_FETCH_PROF_ID_0; 401 + 402 + return 0; 403 + } 404 + 405 + /** 406 + * ice_calc_ts_ring_count - calculate the number of Tx time stamp descriptors 407 + * @tx_ring: Tx ring to calculate the count for 408 + * 409 + * Return: the number of Tx time stamp descriptors. 410 + */ 411 + u16 ice_calc_ts_ring_count(struct ice_tx_ring *tx_ring) 412 + { 413 + u16 prof = ICE_TXTIME_CTX_FETCH_PROF_ID_0; 414 + struct ice_vsi *vsi = tx_ring->vsi; 415 + struct ice_hw *hw = &vsi->back->hw; 416 + u16 max_fetch_desc = 0, fetch, i; 417 + u32 reg; 418 + 419 + for (i = 0; i < ICE_TXTIME_FETCH_PROFILE_CNT; i++) { 420 + reg = rd32(hw, E830_GLTXTIME_FETCH_PROFILE(prof, 0)); 421 + fetch = FIELD_GET(E830_GLTXTIME_FETCH_PROFILE_FETCH_TS_DESC_M, 422 + reg); 423 + max_fetch_desc = max(fetch, max_fetch_desc); 424 + } 425 + 426 + if (!max_fetch_desc) 427 + max_fetch_desc = ICE_TXTIME_FETCH_TS_DESC_DFLT; 428 + 429 + max_fetch_desc = ALIGN(max_fetch_desc, ICE_REQ_DESC_MULTIPLE); 430 + 431 + return tx_ring->count + max_fetch_desc; 388 432 } 389 433 390 434 /** ··· 986 882 } 987 883 988 884 /** 885 + * ice_cfg_tstamp - Configure Tx time stamp queue 886 + * @tx_ring: Tx ring to be configured with timestamping 887 + * 888 + * Return: 0 on success and a negative value on error. 889 + */ 890 + static int 891 + ice_cfg_tstamp(struct ice_tx_ring *tx_ring) 892 + { 893 + DEFINE_RAW_FLEX(struct ice_aqc_set_txtime_qgrp, txtime_qg_buf, 894 + txtimeqs, 1); 895 + u8 txtime_buf_len = struct_size(txtime_qg_buf, txtimeqs, 1); 896 + struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring; 897 + struct ice_txtime_ctx txtime_ctx = {}; 898 + struct ice_vsi *vsi = tx_ring->vsi; 899 + struct ice_pf *pf = vsi->back; 900 + struct ice_hw *hw = &pf->hw; 901 + u16 pf_q = tx_ring->reg_idx; 902 + int err; 903 + 904 + err = ice_setup_txtime_ctx(tstamp_ring, &txtime_ctx); 905 + if (err) { 906 + dev_err(ice_pf_to_dev(pf), "Failed to setup Tx time queue context for queue %d, error: %d\n", 907 + pf_q, err); 908 + return err; 909 + } 910 + ice_pack_txtime_ctx(&txtime_ctx, 911 + &txtime_qg_buf->txtimeqs[0].txtime_ctx); 912 + 913 + tstamp_ring->tail = hw->hw_addr + E830_GLQTX_TXTIME_DBELL_LSB(pf_q); 914 + return ice_aq_set_txtimeq(hw, pf_q, 1, txtime_qg_buf, 915 + txtime_buf_len, NULL); 916 + } 917 + 918 + /** 989 919 * ice_vsi_cfg_txq - Configure single Tx queue 990 920 * @vsi: the VSI that queue belongs to 991 921 * @ring: Tx ring to be configured 992 922 * @qg_buf: queue group buffer 923 + * 924 + * Return: 0 on success and a negative value on error. 993 925 */ 994 926 static int 995 - ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring, 927 + ice_vsi_cfg_txq(const struct ice_vsi *vsi, struct ice_tx_ring *ring, 996 928 struct ice_aqc_add_tx_qgrp *qg_buf) 997 929 { 998 930 u8 buf_len = struct_size(qg_buf, txqs, 1); ··· 1037 897 struct ice_channel *ch = ring->ch; 1038 898 struct ice_pf *pf = vsi->back; 1039 899 struct ice_hw *hw = &pf->hw; 900 + u32 pf_q, vsi_idx; 1040 901 int status; 1041 - u16 pf_q; 1042 902 u8 tc; 1043 903 1044 904 /* Configure XPS */ 1045 905 ice_cfg_xps_tx_ring(ring); 1046 906 1047 907 pf_q = ring->reg_idx; 1048 - ice_setup_tx_ctx(ring, &tlan_ctx, pf_q); 908 + status = ice_setup_tx_ctx(ring, &tlan_ctx, pf_q); 909 + if (status) { 910 + dev_err(ice_pf_to_dev(pf), "Failed to setup Tx context for queue %d, error: %d\n", 911 + pf_q, status); 912 + return status; 913 + } 1049 914 /* copy context contents into the qg_buf */ 1050 915 qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); 1051 916 ice_pack_txq_ctx(&tlan_ctx, &qg_buf->txqs[0].txq_ctx); ··· 1070 925 */ 1071 926 ring->q_handle = ice_calc_txq_handle(vsi, ring, tc); 1072 927 1073 - if (ch) 1074 - status = ice_ena_vsi_txq(vsi->port_info, ch->ch_vsi->idx, 0, 1075 - ring->q_handle, 1, qg_buf, buf_len, 1076 - NULL); 1077 - else 1078 - status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, 1079 - ring->q_handle, 1, qg_buf, buf_len, 1080 - NULL); 928 + if (ch) { 929 + tc = 0; 930 + vsi_idx = ch->ch_vsi->idx; 931 + } else { 932 + vsi_idx = vsi->idx; 933 + } 934 + 935 + status = ice_ena_vsi_txq(vsi->port_info, vsi_idx, tc, ring->q_handle, 936 + 1, qg_buf, buf_len, NULL); 1081 937 if (status) { 1082 938 dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n", 1083 939 status); ··· 1093 947 if (pf_q == le16_to_cpu(txq->txq_id)) 1094 948 ring->txq_teid = le32_to_cpu(txq->q_teid); 1095 949 950 + if (ice_is_txtime_ena(ring)) { 951 + status = ice_alloc_setup_tstamp_ring(ring); 952 + if (status) { 953 + dev_err(ice_pf_to_dev(pf), 954 + "Failed to allocate Tx timestamp ring, error: %d\n", 955 + status); 956 + goto err_setup_tstamp; 957 + } 958 + 959 + status = ice_cfg_tstamp(ring); 960 + if (status) { 961 + dev_err(ice_pf_to_dev(pf), "Failed to set Tx Time queue context, error: %d\n", 962 + status); 963 + goto err_cfg_tstamp; 964 + } 965 + } 1096 966 return 0; 967 + 968 + err_cfg_tstamp: 969 + ice_free_tx_tstamp_ring(ring); 970 + err_setup_tstamp: 971 + ice_dis_vsi_txq(vsi->port_info, vsi_idx, tc, 1, &ring->q_handle, 972 + &ring->reg_idx, &ring->txq_teid, ICE_NO_RESET, 973 + tlan_ctx.vmvf_num, NULL); 974 + 975 + return status; 1097 976 } 1098 977 1099 978 int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
+1
drivers/net/ethernet/intel/ice/ice_base.h
··· 34 34 struct ice_txq_meta *txq_meta); 35 35 int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx); 36 36 int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx); 37 + u16 ice_calc_ts_ring_count(struct ice_tx_ring *tx_ring); 37 38 #endif /* _ICE_BASE_H_ */
+78
drivers/net/ethernet/intel/ice/ice_common.c
··· 1733 1733 return 0; 1734 1734 } 1735 1735 1736 + /* Tx time Queue Context */ 1737 + static const struct packed_field_u8 ice_txtime_ctx_fields[] = { 1738 + /* Field Width LSB */ 1739 + ICE_CTX_STORE(ice_txtime_ctx, base, 57, 0), 1740 + ICE_CTX_STORE(ice_txtime_ctx, pf_num, 3, 57), 1741 + ICE_CTX_STORE(ice_txtime_ctx, vmvf_num, 10, 60), 1742 + ICE_CTX_STORE(ice_txtime_ctx, vmvf_type, 2, 70), 1743 + ICE_CTX_STORE(ice_txtime_ctx, src_vsi, 10, 72), 1744 + ICE_CTX_STORE(ice_txtime_ctx, cpuid, 8, 82), 1745 + ICE_CTX_STORE(ice_txtime_ctx, tphrd_desc, 1, 90), 1746 + ICE_CTX_STORE(ice_txtime_ctx, qlen, 13, 91), 1747 + ICE_CTX_STORE(ice_txtime_ctx, timer_num, 1, 104), 1748 + ICE_CTX_STORE(ice_txtime_ctx, txtime_ena_q, 1, 105), 1749 + ICE_CTX_STORE(ice_txtime_ctx, drbell_mode_32, 1, 106), 1750 + ICE_CTX_STORE(ice_txtime_ctx, ts_res, 4, 107), 1751 + ICE_CTX_STORE(ice_txtime_ctx, ts_round_type, 2, 111), 1752 + ICE_CTX_STORE(ice_txtime_ctx, ts_pacing_slot, 3, 113), 1753 + ICE_CTX_STORE(ice_txtime_ctx, merging_ena, 1, 116), 1754 + ICE_CTX_STORE(ice_txtime_ctx, ts_fetch_prof_id, 4, 117), 1755 + ICE_CTX_STORE(ice_txtime_ctx, ts_fetch_cache_line_aln_thld, 4, 121), 1756 + ICE_CTX_STORE(ice_txtime_ctx, tx_pipe_delay_mode, 1, 125), 1757 + }; 1758 + 1759 + /** 1760 + * ice_pack_txtime_ctx - pack Tx time queue context into a HW buffer 1761 + * @ctx: the Tx time queue context to pack 1762 + * @buf: the HW buffer to pack into 1763 + * 1764 + * Pack the Tx time queue context from the CPU-friendly unpacked buffer into 1765 + * its bit-packed HW layout. 1766 + */ 1767 + void ice_pack_txtime_ctx(const struct ice_txtime_ctx *ctx, 1768 + ice_txtime_ctx_buf_t *buf) 1769 + { 1770 + pack_fields(buf, sizeof(*buf), ctx, ice_txtime_ctx_fields, 1771 + QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST); 1772 + } 1773 + 1736 1774 /* Sideband Queue command wrappers */ 1737 1775 1738 1776 /** ··· 4882 4844 cmd->num_qset_grps = num_qset_grps; 4883 4845 4884 4846 return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd); 4847 + } 4848 + 4849 + /** 4850 + * ice_aq_set_txtimeq - set Tx time queues 4851 + * @hw: pointer to the hardware structure 4852 + * @txtimeq: first Tx time queue id to configure 4853 + * @q_count: number of queues to configure 4854 + * @txtime_qg: queue group to be set 4855 + * @buf_size: size of buffer for indirect command 4856 + * @cd: pointer to command details structure or NULL 4857 + * 4858 + * Set Tx Time queue (0x0C35) 4859 + * Return: 0 on success or negative value on failure. 4860 + */ 4861 + int 4862 + ice_aq_set_txtimeq(struct ice_hw *hw, u16 txtimeq, u8 q_count, 4863 + struct ice_aqc_set_txtime_qgrp *txtime_qg, u16 buf_size, 4864 + struct ice_sq_cd *cd) 4865 + { 4866 + struct ice_aqc_set_txtimeqs *cmd; 4867 + struct libie_aq_desc desc; 4868 + u16 size; 4869 + 4870 + if (!txtime_qg || txtimeq > ICE_TXTIME_MAX_QUEUE || 4871 + q_count < 1 || q_count > ICE_SET_TXTIME_MAX_Q_AMOUNT) 4872 + return -EINVAL; 4873 + 4874 + size = struct_size(txtime_qg, txtimeqs, q_count); 4875 + if (buf_size != size) 4876 + return -EINVAL; 4877 + 4878 + cmd = libie_aq_raw(&desc); 4879 + 4880 + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_txtimeqs); 4881 + 4882 + desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD); 4883 + 4884 + cmd->q_id = cpu_to_le16(txtimeq); 4885 + cmd->q_amount = cpu_to_le16(q_count); 4886 + return ice_aq_send_cmd(hw, &desc, txtime_qg, buf_size, cd); 4885 4887 } 4886 4888 4887 4889 /* End of FW Admin Queue command wrappers */
+6
drivers/net/ethernet/intel/ice/ice_common.h
··· 275 275 void ice_replay_post(struct ice_hw *hw); 276 276 struct ice_q_ctx * 277 277 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle); 278 + int 279 + ice_aq_set_txtimeq(struct ice_hw *hw, u16 txtimeq, u8 q_count, 280 + struct ice_aqc_set_txtime_qgrp *txtime_qg, 281 + u16 buf_size, struct ice_sq_cd *cd); 282 + void ice_pack_txtime_ctx(const struct ice_txtime_ctx *ctx, 283 + ice_txtime_ctx_buf_t *buf); 278 284 int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flag); 279 285 int ice_aq_get_cgu_input_pin_measure(struct ice_hw *hw, u8 dpll_idx, 280 286 struct ice_cgu_input_measure *meas,
+9 -5
drivers/net/ethernet/intel/ice/ice_ethtool.c
··· 3147 3147 { 3148 3148 struct ice_netdev_priv *np = netdev_priv(netdev); 3149 3149 struct ice_vsi *vsi = np->vsi; 3150 + struct ice_hw *hw; 3150 3151 3151 - ring->rx_max_pending = ICE_MAX_NUM_DESC; 3152 - ring->tx_max_pending = ICE_MAX_NUM_DESC; 3152 + hw = &vsi->back->hw; 3153 + ring->rx_max_pending = ICE_MAX_NUM_DESC_BY_MAC(hw); 3154 + ring->tx_max_pending = ICE_MAX_NUM_DESC_BY_MAC(hw); 3153 3155 if (vsi->tx_rings && vsi->rx_rings) { 3154 3156 ring->rx_pending = vsi->rx_rings[0]->count; 3155 3157 ring->tx_pending = vsi->tx_rings[0]->count; ··· 3179 3177 struct ice_vsi *vsi = np->vsi; 3180 3178 struct ice_pf *pf = vsi->back; 3181 3179 int i, timeout = 50, err = 0; 3180 + struct ice_hw *hw = &pf->hw; 3182 3181 u16 new_rx_cnt, new_tx_cnt; 3183 3182 3184 - if (ring->tx_pending > ICE_MAX_NUM_DESC || 3183 + if (ring->tx_pending > ICE_MAX_NUM_DESC_BY_MAC(hw) || 3185 3184 ring->tx_pending < ICE_MIN_NUM_DESC || 3186 - ring->rx_pending > ICE_MAX_NUM_DESC || 3185 + ring->rx_pending > ICE_MAX_NUM_DESC_BY_MAC(hw) || 3187 3186 ring->rx_pending < ICE_MIN_NUM_DESC) { 3188 3187 netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n", 3189 3188 ring->tx_pending, ring->rx_pending, 3190 - ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC, 3189 + ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC_BY_MAC(hw), 3191 3190 ICE_REQ_DESC_MULTIPLE); 3192 3191 return -EINVAL; 3193 3192 } ··· 3261 3258 tx_rings[i].count = new_tx_cnt; 3262 3259 tx_rings[i].desc = NULL; 3263 3260 tx_rings[i].tx_buf = NULL; 3261 + tx_rings[i].tstamp_ring = NULL; 3264 3262 tx_rings[i].tx_tstamps = &pf->ptp.port.tx; 3265 3263 err = ice_setup_tx_ring(&tx_rings[i]); 3266 3264 if (err) {
+3
drivers/net/ethernet/intel/ice/ice_hw_autogen.h
··· 19 19 #define QTX_COMM_HEAD_MAX_INDEX 16383 20 20 #define QTX_COMM_HEAD_HEAD_S 0 21 21 #define QTX_COMM_HEAD_HEAD_M ICE_M(0x1FFF, 0) 22 + #define E830_GLQTX_TXTIME_DBELL_LSB(_DBQM) (0x002E0000 + ((_DBQM) * 8)) 22 23 #define PF_FW_ARQBAH 0x00080180 23 24 #define PF_FW_ARQBAL 0x00080080 24 25 #define PF_FW_ARQH 0x00080380 ··· 572 571 #define E830_PFPTM_SEM_BUSY_M BIT(0) 573 572 #define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) 574 573 #define VFINT_DYN_CTLN_CLEARPBA_M BIT(1) 574 + #define E830_GLTXTIME_FETCH_PROFILE(_i, _j) (0x002D3500 + ((_i) * 4 + (_j) * 64)) 575 + #define E830_GLTXTIME_FETCH_PROFILE_FETCH_TS_DESC_M ICE_M(0x1FF, 0) 575 576 #define E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH 0x00234000 576 577 #define E830_MBX_VF_DEC_TRIG(_VF) (0x00233800 + (_VF) * 4) 577 578 #define E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT(_VF) (0x00233000 + (_VF) * 4)
+41
drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
··· 569 569 u8 pkt_shaper_prof_idx; 570 570 }; 571 571 572 + #define ICE_TXTIME_TX_DESC_IDX_M GENMASK(12, 0) 573 + #define ICE_TXTIME_STAMP_M GENMASK(31, 13) 574 + 575 + /* Tx time stamp descriptor */ 576 + struct ice_ts_desc { 577 + __le32 tx_desc_idx_tstamp; 578 + }; 579 + 580 + #define ICE_TS_DESC(R, i) (&(((struct ice_ts_desc *)((R)->desc))[i])) 581 + 582 + #define ICE_TXTIME_MAX_QUEUE 2047 583 + #define ICE_SET_TXTIME_MAX_Q_AMOUNT 127 584 + #define ICE_TXTIME_FETCH_TS_DESC_DFLT 8 585 + #define ICE_TXTIME_FETCH_PROFILE_CNT 16 586 + 587 + /* Tx Time queue context data */ 588 + struct ice_txtime_ctx { 589 + #define ICE_TXTIME_CTX_BASE_S 7 590 + u64 base; /* base is defined in 128-byte units */ 591 + u8 pf_num; 592 + u16 vmvf_num; 593 + u8 vmvf_type; 594 + u16 src_vsi; 595 + u8 cpuid; 596 + u8 tphrd_desc; 597 + u16 qlen; 598 + u8 timer_num; 599 + u8 txtime_ena_q; 600 + u8 drbell_mode_32; 601 + #define ICE_TXTIME_CTX_DRBELL_MODE_32 1 602 + u8 ts_res; 603 + #define ICE_TXTIME_CTX_RESOLUTION_128NS 7 604 + u8 ts_round_type; 605 + u8 ts_pacing_slot; 606 + #define ICE_TXTIME_CTX_FETCH_PROF_ID_0 0 607 + u8 merging_ena; 608 + u8 ts_fetch_prof_id; 609 + u8 ts_fetch_cache_line_aln_thld; 610 + u8 tx_pipe_delay_mode; 611 + }; 612 + 572 613 #endif /* _ICE_LAN_TX_RX_H_ */
+1
drivers/net/ethernet/intel/ice/ice_lib.c
··· 3950 3950 if (pf->hw.mac_type == ICE_MAC_E830) { 3951 3951 ice_set_feature_support(pf, ICE_F_MBX_LIMIT); 3952 3952 ice_set_feature_support(pf, ICE_F_GCS); 3953 + ice_set_feature_support(pf, ICE_F_TXTIME); 3953 3954 } 3954 3955 } 3955 3956
+108 -1
drivers/net/ethernet/intel/ice/ice_main.c
··· 3969 3969 pf->avail_rxqs = NULL; 3970 3970 } 3971 3971 3972 + if (pf->txtime_txqs) { 3973 + bitmap_free(pf->txtime_txqs); 3974 + pf->txtime_txqs = NULL; 3975 + } 3976 + 3972 3977 if (pf->ptp.clock) 3973 3978 ptp_clock_unregister(pf->ptp.clock); 3974 3979 ··· 4064 4059 if (!pf->avail_rxqs) { 4065 4060 bitmap_free(pf->avail_txqs); 4066 4061 pf->avail_txqs = NULL; 4062 + return -ENOMEM; 4063 + } 4064 + 4065 + pf->txtime_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); 4066 + if (!pf->txtime_txqs) { 4067 + bitmap_free(pf->avail_txqs); 4068 + pf->avail_txqs = NULL; 4069 + bitmap_free(pf->avail_rxqs); 4070 + pf->avail_rxqs = NULL; 4067 4071 return -ENOMEM; 4068 4072 } 4069 4073 ··· 7498 7484 if (err) 7499 7485 goto err_setup_rx; 7500 7486 7501 - ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); 7487 + if (bitmap_empty(pf->txtime_txqs, pf->max_pf_txqs)) 7488 + ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); 7502 7489 7503 7490 if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_SF) { 7504 7491 /* Notify the stack of the actual queue counts. */ ··· 9288 9273 return ret; 9289 9274 } 9290 9275 9276 + /** 9277 + * ice_cfg_txtime - configure Tx Time for the Tx ring 9278 + * @tx_ring: pointer to the Tx ring structure 9279 + * 9280 + * Return: 0 on success, negative value on failure. 9281 + */ 9282 + static int ice_cfg_txtime(struct ice_tx_ring *tx_ring) 9283 + { 9284 + int err, timeout = 50; 9285 + struct ice_vsi *vsi; 9286 + struct device *dev; 9287 + struct ice_pf *pf; 9288 + u32 queue; 9289 + 9290 + if (!tx_ring) 9291 + return -EINVAL; 9292 + 9293 + vsi = tx_ring->vsi; 9294 + pf = vsi->back; 9295 + while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) { 9296 + timeout--; 9297 + if (!timeout) 9298 + return -EBUSY; 9299 + usleep_range(1000, 2000); 9300 + } 9301 + 9302 + queue = tx_ring->q_index; 9303 + dev = ice_pf_to_dev(pf); 9304 + 9305 + /* Ignore return value, and always attempt to enable queue. */ 9306 + ice_qp_dis(vsi, queue); 9307 + 9308 + err = ice_qp_ena(vsi, queue); 9309 + if (err) 9310 + dev_err(dev, "Failed to enable Tx queue %d for TxTime configuration\n", 9311 + queue); 9312 + 9313 + clear_bit(ICE_CFG_BUSY, pf->state); 9314 + return err; 9315 + } 9316 + 9317 + /** 9318 + * ice_offload_txtime - set earliest TxTime first 9319 + * @netdev: network interface device structure 9320 + * @qopt_off: etf queue option offload from the skb to set 9321 + * 9322 + * Return: 0 on success, negative value on failure. 9323 + */ 9324 + static int ice_offload_txtime(struct net_device *netdev, 9325 + void *qopt_off) 9326 + { 9327 + struct ice_netdev_priv *np = netdev_priv(netdev); 9328 + struct ice_pf *pf = np->vsi->back; 9329 + struct tc_etf_qopt_offload *qopt; 9330 + struct ice_vsi *vsi = np->vsi; 9331 + struct ice_tx_ring *tx_ring; 9332 + int ret = 0; 9333 + 9334 + if (!ice_is_feature_supported(pf, ICE_F_TXTIME)) 9335 + return -EOPNOTSUPP; 9336 + 9337 + qopt = qopt_off; 9338 + if (!qopt_off || qopt->queue < 0 || qopt->queue >= vsi->num_txq) 9339 + return -EINVAL; 9340 + 9341 + if (qopt->enable) 9342 + set_bit(qopt->queue, pf->txtime_txqs); 9343 + else 9344 + clear_bit(qopt->queue, pf->txtime_txqs); 9345 + 9346 + if (netif_running(vsi->netdev)) { 9347 + tx_ring = vsi->tx_rings[qopt->queue]; 9348 + ret = ice_cfg_txtime(tx_ring); 9349 + if (ret) 9350 + goto err; 9351 + } 9352 + 9353 + netdev_info(netdev, "%s TxTime on queue: %i\n", 9354 + str_enable_disable(qopt->enable), qopt->queue); 9355 + return 0; 9356 + 9357 + err: 9358 + netdev_err(netdev, "Failed to %s TxTime on queue: %i\n", 9359 + str_enable_disable(qopt->enable), qopt->queue); 9360 + 9361 + if (qopt->enable) 9362 + clear_bit(qopt->queue, pf->txtime_txqs); 9363 + return ret; 9364 + } 9365 + 9291 9366 static LIST_HEAD(ice_block_cb_list); 9292 9367 9293 9368 static int ··· 9441 9336 mutex_unlock(&pf->adev_mutex); 9442 9337 } 9443 9338 return err; 9339 + case TC_SETUP_QDISC_ETF: 9340 + return ice_offload_txtime(netdev, type_data); 9444 9341 default: 9445 9342 return -EOPNOTSUPP; 9446 9343 }
+170 -3
drivers/net/ethernet/intel/ice/ice_txrx.c
··· 144 144 } 145 145 146 146 /** 147 + * ice_clean_tstamp_ring - clean time stamp ring 148 + * @tx_ring: Tx ring to clean the Time Stamp ring for 149 + */ 150 + static void ice_clean_tstamp_ring(struct ice_tx_ring *tx_ring) 151 + { 152 + struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring; 153 + u32 size; 154 + 155 + if (!tstamp_ring->desc) 156 + return; 157 + 158 + size = ALIGN(tstamp_ring->count * sizeof(struct ice_ts_desc), 159 + PAGE_SIZE); 160 + memset(tstamp_ring->desc, 0, size); 161 + tstamp_ring->next_to_use = 0; 162 + } 163 + 164 + /** 165 + * ice_free_tstamp_ring - free time stamp resources per queue 166 + * @tx_ring: Tx ring to free the Time Stamp ring for 167 + */ 168 + void ice_free_tstamp_ring(struct ice_tx_ring *tx_ring) 169 + { 170 + struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring; 171 + u32 size; 172 + 173 + if (!tstamp_ring->desc) 174 + return; 175 + 176 + ice_clean_tstamp_ring(tx_ring); 177 + size = ALIGN(tstamp_ring->count * sizeof(struct ice_ts_desc), 178 + PAGE_SIZE); 179 + dmam_free_coherent(tx_ring->dev, size, tstamp_ring->desc, 180 + tstamp_ring->dma); 181 + tstamp_ring->desc = NULL; 182 + } 183 + 184 + /** 185 + * ice_free_tx_tstamp_ring - free time stamp resources per Tx ring 186 + * @tx_ring: Tx ring to free the Time Stamp ring for 187 + */ 188 + void ice_free_tx_tstamp_ring(struct ice_tx_ring *tx_ring) 189 + { 190 + ice_free_tstamp_ring(tx_ring); 191 + kfree_rcu(tx_ring->tstamp_ring, rcu); 192 + tx_ring->tstamp_ring = NULL; 193 + tx_ring->flags &= ~ICE_TX_FLAGS_TXTIME; 194 + } 195 + 196 + /** 147 197 * ice_clean_tx_ring - Free any empty Tx buffers 148 198 * @tx_ring: ring to be cleaned 149 199 */ ··· 231 181 232 182 /* cleanup Tx queue statistics */ 233 183 netdev_tx_reset_queue(txring_txq(tx_ring)); 184 + 185 + if (ice_is_txtime_cfg(tx_ring)) 186 + ice_free_tx_tstamp_ring(tx_ring); 234 187 } 235 188 236 189 /** ··· 382 329 } 383 330 384 331 return !!budget; 332 + } 333 + 334 + /** 335 + * ice_alloc_tstamp_ring - allocate the Time Stamp ring 336 + * @tx_ring: Tx ring to allocate the Time Stamp ring for 337 + * 338 + * Return: 0 on success, negative on error 339 + */ 340 + static int ice_alloc_tstamp_ring(struct ice_tx_ring *tx_ring) 341 + { 342 + struct ice_tstamp_ring *tstamp_ring; 343 + 344 + /* allocate with kzalloc(), free with kfree_rcu() */ 345 + tstamp_ring = kzalloc(sizeof(*tstamp_ring), GFP_KERNEL); 346 + if (!tstamp_ring) 347 + return -ENOMEM; 348 + 349 + tstamp_ring->tx_ring = tx_ring; 350 + tx_ring->tstamp_ring = tstamp_ring; 351 + tstamp_ring->desc = NULL; 352 + tstamp_ring->count = ice_calc_ts_ring_count(tx_ring); 353 + tx_ring->flags |= ICE_TX_FLAGS_TXTIME; 354 + return 0; 355 + } 356 + 357 + /** 358 + * ice_setup_tstamp_ring - allocate the Time Stamp ring 359 + * @tx_ring: Tx ring to set up the Time Stamp ring for 360 + * 361 + * Return: 0 on success, negative on error 362 + */ 363 + static int ice_setup_tstamp_ring(struct ice_tx_ring *tx_ring) 364 + { 365 + struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring; 366 + struct device *dev = tx_ring->dev; 367 + u32 size; 368 + 369 + /* round up to nearest page */ 370 + size = ALIGN(tstamp_ring->count * sizeof(struct ice_ts_desc), 371 + PAGE_SIZE); 372 + tstamp_ring->desc = dmam_alloc_coherent(dev, size, &tstamp_ring->dma, 373 + GFP_KERNEL); 374 + if (!tstamp_ring->desc) { 375 + dev_err(dev, "Unable to allocate memory for Time stamp Ring, size=%d\n", 376 + size); 377 + return -ENOMEM; 378 + } 379 + 380 + tstamp_ring->next_to_use = 0; 381 + return 0; 382 + } 383 + 384 + /** 385 + * ice_alloc_setup_tstamp_ring - Allocate and setup the Time Stamp ring 386 + * @tx_ring: Tx ring to allocate and setup the Time Stamp ring for 387 + * 388 + * Return: 0 on success, negative on error 389 + */ 390 + int ice_alloc_setup_tstamp_ring(struct ice_tx_ring *tx_ring) 391 + { 392 + struct device *dev = tx_ring->dev; 393 + int err; 394 + 395 + err = ice_alloc_tstamp_ring(tx_ring); 396 + if (err) { 397 + dev_err(dev, "Unable to allocate Time stamp ring for Tx ring %d\n", 398 + tx_ring->q_index); 399 + return err; 400 + } 401 + 402 + err = ice_setup_tstamp_ring(tx_ring); 403 + if (err) { 404 + dev_err(dev, "Unable to setup Time stamp ring for Tx ring %d\n", 405 + tx_ring->q_index); 406 + ice_free_tx_tstamp_ring(tx_ring); 407 + return err; 408 + } 409 + return 0; 385 410 } 386 411 387 412 /** ··· 1953 1822 /* notify HW of packet */ 1954 1823 kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount, 1955 1824 netdev_xmit_more()); 1956 - if (kick) 1957 - /* notify HW of packet */ 1958 - writel(i, tx_ring->tail); 1825 + if (!kick) 1826 + return; 1959 1827 1828 + if (ice_is_txtime_cfg(tx_ring)) { 1829 + struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring; 1830 + u32 tstamp_count = tstamp_ring->count; 1831 + u32 j = tstamp_ring->next_to_use; 1832 + struct ice_ts_desc *ts_desc; 1833 + struct timespec64 ts; 1834 + u32 tstamp; 1835 + 1836 + ts = ktime_to_timespec64(first->skb->tstamp); 1837 + tstamp = ts.tv_nsec >> ICE_TXTIME_CTX_RESOLUTION_128NS; 1838 + 1839 + ts_desc = ICE_TS_DESC(tstamp_ring, j); 1840 + ts_desc->tx_desc_idx_tstamp = ice_build_tstamp_desc(i, tstamp); 1841 + 1842 + j++; 1843 + if (j == tstamp_count) { 1844 + u32 fetch = tstamp_count - tx_ring->count; 1845 + 1846 + j = 0; 1847 + 1848 + /* To prevent an MDD, when wrapping the tstamp ring 1849 + * create additional TS descriptors equal to the number 1850 + * of the fetch TS descriptors value. HW will merge the 1851 + * TS descriptors with the same timestamp value into a 1852 + * single descriptor. 1853 + */ 1854 + for (; j < fetch; j++) { 1855 + ts_desc = ICE_TS_DESC(tstamp_ring, j); 1856 + ts_desc->tx_desc_idx_tstamp = 1857 + ice_build_tstamp_desc(i, tstamp); 1858 + } 1859 + } 1860 + tstamp_ring->next_to_use = j; 1861 + writel_relaxed(j, tstamp_ring->tail); 1862 + } else { 1863 + writel_relaxed(i, tx_ring->tail); 1864 + } 1960 1865 return; 1961 1866 1962 1867 dma_error:
+15
drivers/net/ethernet/intel/ice/ice_txrx.h
··· 310 310 #define ICE_TX_LEGACY 1 311 311 312 312 /* descriptor ring, associated with a VSI */ 313 + struct ice_tstamp_ring { 314 + struct ice_tx_ring *tx_ring; /* Backreference to associated Tx ring */ 315 + dma_addr_t dma; /* physical address of ring */ 316 + struct rcu_head rcu; /* to avoid race on free */ 317 + u8 __iomem *tail; 318 + void *desc; 319 + u16 next_to_use; 320 + u16 count; 321 + } ____cacheline_internodealigned_in_smp; 322 + 313 323 struct ice_rx_ring { 314 324 /* CL1 - 1st cacheline starts here */ 315 325 void *desc; /* Descriptor ring memory */ ··· 412 402 spinlock_t tx_lock; 413 403 u32 txq_teid; /* Added Tx queue TEID */ 414 404 /* CL4 - 4th cacheline starts here */ 405 + struct ice_tstamp_ring *tstamp_ring; 415 406 #define ICE_TX_FLAGS_RING_XDP BIT(0) 416 407 #define ICE_TX_FLAGS_RING_VLAN_L2TAG1 BIT(1) 417 408 #define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2) 409 + #define ICE_TX_FLAGS_TXTIME BIT(3) 418 410 u8 flags; 419 411 u8 dcb_tc; /* Traffic class of ring */ 420 412 u16 quanta_prof_id; ··· 512 500 void ice_clean_rx_ring(struct ice_rx_ring *rx_ring); 513 501 int ice_setup_tx_ring(struct ice_tx_ring *tx_ring); 514 502 int ice_setup_rx_ring(struct ice_rx_ring *rx_ring); 503 + int ice_alloc_setup_tstamp_ring(struct ice_tx_ring *tx_ring); 515 504 void ice_free_tx_ring(struct ice_tx_ring *tx_ring); 516 505 void ice_free_rx_ring(struct ice_rx_ring *rx_ring); 517 506 int ice_napi_poll(struct napi_struct *napi, int budget); ··· 521 508 u8 *raw_packet); 522 509 void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring); 523 510 void ice_clean_ctrl_rx_irq(struct ice_rx_ring *rx_ring); 511 + void ice_free_tx_tstamp_ring(struct ice_tx_ring *tx_ring); 512 + void ice_free_tstamp_ring(struct ice_tx_ring *tx_ring); 524 513 #endif /* _ICE_TXRX_H_ */
+14
drivers/net/ethernet/intel/ice/ice_txrx_lib.h
··· 54 54 } 55 55 56 56 /** 57 + * ice_build_tstamp_desc - build Tx time stamp descriptor 58 + * @tx_desc: Tx LAN descriptor index 59 + * @tstamp: time stamp 60 + * 61 + * Return: Tx time stamp descriptor 62 + */ 63 + static inline __le32 64 + ice_build_tstamp_desc(u16 tx_desc, u32 tstamp) 65 + { 66 + return cpu_to_le32(FIELD_PREP(ICE_TXTIME_TX_DESC_IDX_M, tx_desc) | 67 + FIELD_PREP(ICE_TXTIME_STAMP_M, tstamp)); 68 + } 69 + 70 + /** 57 71 * ice_get_vlan_tci - get VLAN TCI from Rx flex descriptor 58 72 * @rx_desc: Rx 32b flex descriptor with RXDID=2 59 73 *
+1 -1
drivers/net/ethernet/intel/ice/virt/queues.c
··· 54 54 { 55 55 return ring_len == 0 || 56 56 (ring_len >= ICE_MIN_NUM_DESC && 57 - ring_len <= ICE_MAX_NUM_DESC && 57 + ring_len <= ICE_MAX_NUM_DESC_E810 && 58 58 !(ring_len % ICE_REQ_DESC_MULTIPLE)); 59 59 } 60 60