Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
Jake Keller says:

====================
100GbE Intel Wired LAN Driver Updates 2021-06-11

Extend the ice driver to support basic PTP clock functionality for E810
devices.

This includes some tangential work required to setup the sideband queue and
driver shared parameters as well.

This series only supports E810-based devices. This is because other devices
based on the E822 MAC use a different and more complex PHY.

The low level device functionality is kept within ice_ptp_hw.c and is
designed to be extensible for supporting E822 devices in a future series.

This series also only supports very basic functionality including the
ptp_clock device and timestamping. Support for configuring periodic outputs
and external input timestamps will be implemented in a future series.

There are a couple of potential "what? why?" bits in this series I want to
point out:

1) the PTP hardware functionality is shared between multiple functions. This
means that the same clock registers are shared across multiple PFs. In order
to avoid contention or clashing between PFs, firmware assigns "ownership" to
one PF, while other PFs are merely "associated" with the timer. Because we
share the hardware resource, only the clock owner will allocate and register
a PTP clock device. Other PFs determine the appropriate PTP clock index to
report by using a firmware interface to read a shared parameter that is set
by the owning PF.

2) the ice driver uses its own kthread instead of using do_aux_work. This is
because the periodic and asynchronous tasks are necessary for all PFs, but
only one PF will allocate the clock.

The series is broken up into functional pieces to allow easy review.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+2962 -7
+1
drivers/net/ethernet/intel/Kconfig
··· 299 299 select DIMLIB 300 300 select NET_DEVLINK 301 301 select PLDMFW 302 + imply PTP_1588_CLOCK 302 303 help 303 304 This driver supports Intel(R) Ethernet Connection E800 Series of 304 305 devices. For more information on how to identify your adapter, go
+1
drivers/net/ethernet/intel/ice/Makefile
··· 29 29 ice_ethtool.o 30 30 ice-$(CONFIG_PCI_IOV) += ice_virtchnl_allowlist.o 31 31 ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o ice_virtchnl_fdir.o 32 + ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o 32 33 ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o 33 34 ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o 34 35 ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
+7 -1
drivers/net/ethernet/intel/ice/ice.h
··· 59 59 #include "ice_idc_int.h" 60 60 #include "ice_virtchnl_pf.h" 61 61 #include "ice_sriov.h" 62 + #include "ice_ptp.h" 62 63 #include "ice_fdir.h" 63 64 #include "ice_xsk.h" 64 65 #include "ice_arfs.h" ··· 75 74 76 75 #define ICE_DFLT_TRAFFIC_CLASS BIT(0) 77 76 #define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16) 78 - #define ICE_AQ_LEN 64 77 + #define ICE_AQ_LEN 192 79 78 #define ICE_MBXSQ_LEN 64 79 + #define ICE_SBQ_LEN 64 80 80 #define ICE_MIN_LAN_TXRX_MSIX 1 81 81 #define ICE_MIN_LAN_OICR_MSIX 1 82 82 #define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX) ··· 229 227 ICE_STATE_NOMINAL_CHECK_BITS, 230 228 ICE_ADMINQ_EVENT_PENDING, 231 229 ICE_MAILBOXQ_EVENT_PENDING, 230 + ICE_SIDEBANDQ_EVENT_PENDING, 232 231 ICE_MDD_EVENT_PENDING, 233 232 ICE_VFLR_EVENT_PENDING, 234 233 ICE_FLTR_OVERFLOW_PROMISC, ··· 390 387 ICE_FLAG_DCB_CAPABLE, 391 388 ICE_FLAG_DCB_ENA, 392 389 ICE_FLAG_FD_ENA, 390 + ICE_FLAG_PTP_SUPPORTED, /* PTP is supported by NVM */ 391 + ICE_FLAG_PTP, /* PTP is enabled by software */ 393 392 ICE_FLAG_AUX_ENA, 394 393 ICE_FLAG_ADV_FEATURES, 395 394 ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, ··· 454 449 struct mutex sw_mutex; /* lock for protecting VSI alloc flow */ 455 450 struct mutex tc_mutex; /* lock to protect TC changes */ 456 451 u32 msg_enable; 452 + struct ice_ptp ptp; 457 453 u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */ 458 454 u16 rdma_base_vector; 459 455
+41
drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
··· 108 108 #define ICE_AQC_CAPS_TXQS 0x0042 109 109 #define ICE_AQC_CAPS_MSIX 0x0043 110 110 #define ICE_AQC_CAPS_FD 0x0045 111 + #define ICE_AQC_CAPS_1588 0x0046 111 112 #define ICE_AQC_CAPS_MAX_MTU 0x0047 112 113 #define ICE_AQC_CAPS_NVM_VER 0x0048 113 114 #define ICE_AQC_CAPS_PENDING_NVM_VER 0x0049 ··· 1612 1611 __le32 addr_low; 1613 1612 }; 1614 1613 1614 + /* Sideband Control Interface Commands */ 1615 + /* Neighbor Device Request (indirect 0x0C00); also used for the response. */ 1616 + struct ice_aqc_neigh_dev_req { 1617 + __le16 sb_data_len; 1618 + u8 reserved[6]; 1619 + __le32 addr_high; 1620 + __le32 addr_low; 1621 + }; 1622 + 1615 1623 /* Add Tx LAN Queues (indirect 0x0C30) */ 1616 1624 struct ice_aqc_add_txqs { 1617 1625 u8 num_qgrps; ··· 1853 1843 struct ice_aqc_get_pkg_info pkg_info[]; 1854 1844 }; 1855 1845 1846 + /* Driver Shared Parameters (direct, 0x0C90) */ 1847 + struct ice_aqc_driver_shared_params { 1848 + u8 set_or_get_op; 1849 + #define ICE_AQC_DRIVER_PARAM_OP_MASK BIT(0) 1850 + #define ICE_AQC_DRIVER_PARAM_SET 0 1851 + #define ICE_AQC_DRIVER_PARAM_GET 1 1852 + u8 param_indx; 1853 + #define ICE_AQC_DRIVER_PARAM_MAX_IDX 15 1854 + u8 rsvd[2]; 1855 + __le32 param_val; 1856 + __le32 addr_high; 1857 + __le32 addr_low; 1858 + }; 1859 + 1860 + enum ice_aqc_driver_params { 1861 + /* OS clock index for PTP timer Domain 0 */ 1862 + ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0 = 0, 1863 + /* OS clock index for PTP timer Domain 1 */ 1864 + ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1, 1865 + 1866 + /* Add new parameters above */ 1867 + ICE_AQC_DRIVER_PARAM_MAX = 16, 1868 + }; 1869 + 1856 1870 /* Lan Queue Overflow Event (direct, 0x1001) */ 1857 1871 struct ice_aqc_event_lan_overflow { 1858 1872 __le32 prtdcb_ruptq; ··· 1945 1911 struct ice_aqc_lldp_filter_ctrl lldp_filter_ctrl; 1946 1912 struct ice_aqc_get_set_rss_lut get_set_rss_lut; 1947 1913 struct ice_aqc_get_set_rss_key get_set_rss_key; 1914 + struct ice_aqc_neigh_dev_req neigh_dev; 1948 1915 struct ice_aqc_add_txqs add_txqs; 1949 1916 struct ice_aqc_dis_txqs dis_txqs; 1950 1917 struct ice_aqc_add_rdma_qset add_rdma_qset; ··· 1954 1919 struct ice_aqc_fw_logging fw_logging; 1955 1920 struct ice_aqc_get_clear_fw_log get_clear_fw_log; 1956 1921 struct ice_aqc_download_pkg download_pkg; 1922 + struct ice_aqc_driver_shared_params drv_shared_params; 1957 1923 struct ice_aqc_set_mac_lb set_mac_lb; 1958 1924 struct ice_aqc_alloc_free_res_cmd sw_res_ctrl; 1959 1925 struct ice_aqc_set_mac_cfg set_mac_cfg; ··· 2095 2059 ice_aqc_opc_get_rss_key = 0x0B04, 2096 2060 ice_aqc_opc_get_rss_lut = 0x0B05, 2097 2061 2062 + /* Sideband Control Interface commands */ 2063 + ice_aqc_opc_neighbour_device_request = 0x0C00, 2064 + 2098 2065 /* Tx queue handling commands/events */ 2099 2066 ice_aqc_opc_add_txqs = 0x0C30, 2100 2067 ice_aqc_opc_dis_txqs = 0x0C31, ··· 2107 2068 ice_aqc_opc_download_pkg = 0x0C40, 2108 2069 ice_aqc_opc_update_pkg = 0x0C42, 2109 2070 ice_aqc_opc_get_pkg_info_list = 0x0C43, 2071 + 2072 + ice_aqc_opc_driver_shared_params = 0x0C90, 2110 2073 2111 2074 /* Standalone Commands/Events */ 2112 2075 ice_aqc_opc_event_lan_overflow = 0x1001,
+12 -2
drivers/net/ethernet/intel/ice/ice_base.c
··· 287 287 /* make sure the context is associated with the right VSI */ 288 288 tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx); 289 289 290 + /* Restrict Tx timestamps to the PF VSI */ 291 + switch (vsi->type) { 292 + case ICE_VSI_PF: 293 + tlan_ctx->tsyn_ena = 1; 294 + break; 295 + default: 296 + break; 297 + } 298 + 290 299 tlan_ctx->tso_ena = ICE_TX_LEGACY; 291 300 tlan_ctx->tso_qnum = pf_q; 292 301 ··· 402 393 * of same priority 403 394 */ 404 395 if (vsi->type != ICE_VSI_VF) 405 - ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3); 396 + ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true); 406 397 else 407 - ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3); 398 + ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3, 399 + false); 408 400 409 401 /* Absolute queue number out of 2K needs to be passed */ 410 402 err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
+243
drivers/net/ethernet/intel/ice/ice_common.c
··· 59 59 } 60 60 61 61 /** 62 + * ice_is_e810 63 + * @hw: pointer to the hardware structure 64 + * 65 + * returns true if the device is E810 based, false if not. 66 + */ 67 + bool ice_is_e810(struct ice_hw *hw) 68 + { 69 + return hw->mac_type == ICE_MAC_E810; 70 + } 71 + 72 + /** 62 73 * ice_clear_pf_cfg - Clear PF configuration 63 74 * @hw: pointer to the hardware structure 64 75 * ··· 1304 1293 { 0 } 1305 1294 }; 1306 1295 1296 + /* Sideband Queue command wrappers */ 1297 + 1298 + /** 1299 + * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue 1300 + * @hw: pointer to the HW struct 1301 + * @desc: descriptor describing the command 1302 + * @buf: buffer to use for indirect commands (NULL for direct commands) 1303 + * @buf_size: size of buffer for indirect commands (0 for direct commands) 1304 + * @cd: pointer to command details structure 1305 + */ 1306 + static int 1307 + ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc, 1308 + void *buf, u16 buf_size, struct ice_sq_cd *cd) 1309 + { 1310 + return ice_status_to_errno(ice_sq_send_cmd(hw, ice_get_sbq(hw), 1311 + (struct ice_aq_desc *)desc, 1312 + buf, buf_size, cd)); 1313 + } 1314 + 1315 + /** 1316 + * ice_sbq_rw_reg - Fill Sideband Queue command 1317 + * @hw: pointer to the HW struct 1318 + * @in: message info to be filled in descriptor 1319 + */ 1320 + int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in) 1321 + { 1322 + struct ice_sbq_cmd_desc desc = {0}; 1323 + struct ice_sbq_msg_req msg = {0}; 1324 + u16 msg_len; 1325 + int status; 1326 + 1327 + msg_len = sizeof(msg); 1328 + 1329 + msg.dest_dev = in->dest_dev; 1330 + msg.opcode = in->opcode; 1331 + msg.flags = ICE_SBQ_MSG_FLAGS; 1332 + msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE; 1333 + msg.msg_addr_low = cpu_to_le16(in->msg_addr_low); 1334 + msg.msg_addr_high = cpu_to_le32(in->msg_addr_high); 1335 + 1336 + if (in->opcode) 1337 + msg.data = cpu_to_le32(in->data); 1338 + else 1339 + /* data read comes back in completion, so shorten the struct by 1340 + * sizeof(msg.data) 1341 + */ 1342 + msg_len -= sizeof(msg.data); 1343 + 1344 + desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); 1345 + desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req); 1346 + desc.param0.cmd_len = cpu_to_le16(msg_len); 1347 + status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL); 1348 + if (!status && !in->opcode) 1349 + in->data = le32_to_cpu 1350 + (((struct ice_sbq_msg_cmpl *)&msg)->data); 1351 + return status; 1352 + } 1353 + 1307 1354 /* FW Admin Queue command wrappers */ 1308 1355 1309 1356 /* Software lock/mutex that is meant to be held while the Global Config Lock ··· 2104 2035 } 2105 2036 2106 2037 /** 2038 + * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps 2039 + * @hw: pointer to the HW struct 2040 + * @func_p: pointer to function capabilities structure 2041 + * @cap: pointer to the capability element to parse 2042 + * 2043 + * Extract function capabilities for ICE_AQC_CAPS_1588. 2044 + */ 2045 + static void 2046 + ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2047 + struct ice_aqc_list_caps_elem *cap) 2048 + { 2049 + struct ice_ts_func_info *info = &func_p->ts_func_info; 2050 + u32 number = le32_to_cpu(cap->number); 2051 + 2052 + info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0); 2053 + func_p->common_cap.ieee_1588 = info->ena; 2054 + 2055 + info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0); 2056 + info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0); 2057 + info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0); 2058 + info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0); 2059 + 2060 + info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S; 2061 + info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0); 2062 + 2063 + ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n", 2064 + func_p->common_cap.ieee_1588); 2065 + ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n", 2066 + info->src_tmr_owned); 2067 + ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n", 2068 + info->tmr_ena); 2069 + ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n", 2070 + info->tmr_index_owned); 2071 + ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n", 2072 + info->tmr_index_assoc); 2073 + ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n", 2074 + info->clk_freq); 2075 + ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n", 2076 + info->clk_src); 2077 + } 2078 + 2079 + /** 2107 2080 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps 2108 2081 * @hw: pointer to the HW struct 2109 2082 * @func_p: pointer to function capabilities structure ··· 2210 2099 break; 2211 2100 case ICE_AQC_CAPS_VSI: 2212 2101 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]); 2102 + break; 2103 + case ICE_AQC_CAPS_1588: 2104 + ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]); 2213 2105 break; 2214 2106 case ICE_AQC_CAPS_FD: 2215 2107 ice_parse_fdir_func_caps(hw, func_p); ··· 2287 2173 } 2288 2174 2289 2175 /** 2176 + * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps 2177 + * @hw: pointer to the HW struct 2178 + * @dev_p: pointer to device capabilities structure 2179 + * @cap: capability element to parse 2180 + * 2181 + * Parse ICE_AQC_CAPS_1588 for device capabilities. 2182 + */ 2183 + static void 2184 + ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2185 + struct ice_aqc_list_caps_elem *cap) 2186 + { 2187 + struct ice_ts_dev_info *info = &dev_p->ts_dev_info; 2188 + u32 logical_id = le32_to_cpu(cap->logical_id); 2189 + u32 phys_id = le32_to_cpu(cap->phys_id); 2190 + u32 number = le32_to_cpu(cap->number); 2191 + 2192 + info->ena = ((number & ICE_TS_DEV_ENA_M) != 0); 2193 + dev_p->common_cap.ieee_1588 = info->ena; 2194 + 2195 + info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M; 2196 + info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0); 2197 + info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0); 2198 + 2199 + info->tmr1_owner = (number & ICE_TS_TMR1_OWNR_M) >> ICE_TS_TMR1_OWNR_S; 2200 + info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0); 2201 + info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0); 2202 + 2203 + info->ena_ports = logical_id; 2204 + info->tmr_own_map = phys_id; 2205 + 2206 + ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n", 2207 + dev_p->common_cap.ieee_1588); 2208 + ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n", 2209 + info->tmr0_owner); 2210 + ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n", 2211 + info->tmr0_owned); 2212 + ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n", 2213 + info->tmr0_ena); 2214 + ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n", 2215 + info->tmr1_owner); 2216 + ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n", 2217 + info->tmr1_owned); 2218 + ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n", 2219 + info->tmr1_ena); 2220 + ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n", 2221 + info->ena_ports); 2222 + ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n", 2223 + info->tmr_own_map); 2224 + } 2225 + 2226 + /** 2290 2227 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps 2291 2228 * @hw: pointer to the HW struct 2292 2229 * @dev_p: pointer to device capabilities structure ··· 2397 2232 break; 2398 2233 case ICE_AQC_CAPS_VSI: 2399 2234 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]); 2235 + break; 2236 + case ICE_AQC_CAPS_1588: 2237 + ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]); 2400 2238 break; 2401 2239 case ICE_AQC_CAPS_FD: 2402 2240 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]); ··· 4689 4521 if (status || num_elem_ret != 1) 4690 4522 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); 4691 4523 return status; 4524 + } 4525 + 4526 + /** 4527 + * ice_aq_set_driver_param - Set driver parameter to share via firmware 4528 + * @hw: pointer to the HW struct 4529 + * @idx: parameter index to set 4530 + * @value: the value to set the parameter to 4531 + * @cd: pointer to command details structure or NULL 4532 + * 4533 + * Set the value of one of the software defined parameters. All PFs connected 4534 + * to this device can read the value using ice_aq_get_driver_param. 4535 + * 4536 + * Note that firmware provides no synchronization or locking, and will not 4537 + * save the parameter value during a device reset. It is expected that 4538 + * a single PF will write the parameter value, while all other PFs will only 4539 + * read it. 4540 + */ 4541 + int 4542 + ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, 4543 + u32 value, struct ice_sq_cd *cd) 4544 + { 4545 + struct ice_aqc_driver_shared_params *cmd; 4546 + struct ice_aq_desc desc; 4547 + 4548 + if (idx >= ICE_AQC_DRIVER_PARAM_MAX) 4549 + return -EIO; 4550 + 4551 + cmd = &desc.params.drv_shared_params; 4552 + 4553 + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params); 4554 + 4555 + cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_SET; 4556 + cmd->param_indx = idx; 4557 + cmd->param_val = cpu_to_le32(value); 4558 + 4559 + return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd)); 4560 + } 4561 + 4562 + /** 4563 + * ice_aq_get_driver_param - Get driver parameter shared via firmware 4564 + * @hw: pointer to the HW struct 4565 + * @idx: parameter index to set 4566 + * @value: storage to return the shared parameter 4567 + * @cd: pointer to command details structure or NULL 4568 + * 4569 + * Get the value of one of the software defined parameters. 4570 + * 4571 + * Note that firmware provides no synchronization or locking. It is expected 4572 + * that only a single PF will write a given parameter. 4573 + */ 4574 + int 4575 + ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, 4576 + u32 *value, struct ice_sq_cd *cd) 4577 + { 4578 + struct ice_aqc_driver_shared_params *cmd; 4579 + struct ice_aq_desc desc; 4580 + enum ice_status status; 4581 + 4582 + if (idx >= ICE_AQC_DRIVER_PARAM_MAX) 4583 + return -EIO; 4584 + 4585 + cmd = &desc.params.drv_shared_params; 4586 + 4587 + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params); 4588 + 4589 + cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_GET; 4590 + cmd->param_indx = idx; 4591 + 4592 + status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 4593 + if (status) 4594 + return ice_status_to_errno(status); 4595 + 4596 + *value = le32_to_cpu(cmd->param_val); 4597 + 4598 + return 0; 4692 4599 } 4693 4600 4694 4601 /**
+10
drivers/net/ethernet/intel/ice/ice_common.h
··· 40 40 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, 41 41 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, 42 42 enum ice_adminq_opc opc, struct ice_sq_cd *cd); 43 + bool ice_is_sbq_supported(struct ice_hw *hw); 44 + struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw); 43 45 enum ice_status 44 46 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, 45 47 struct ice_aq_desc *desc, void *buf, u16 buf_size, ··· 99 97 enum ice_status 100 98 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, 101 99 struct ice_sq_cd *cd); 100 + bool ice_is_e810(struct ice_hw *hw); 102 101 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw); 103 102 enum ice_status 104 103 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, ··· 176 173 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf); 177 174 struct ice_q_ctx * 178 175 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle); 176 + int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in); 179 177 void 180 178 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 181 179 u64 *prev_stat, u64 *cur_stat); ··· 186 182 enum ice_status 187 183 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, 188 184 struct ice_aqc_txsched_elem_data *buf); 185 + int 186 + ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, 187 + u32 value, struct ice_sq_cd *cd); 188 + int 189 + ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, 190 + u32 *value, struct ice_sq_cd *cd); 189 191 enum ice_status 190 192 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, 191 193 struct ice_sq_cd *cd);
+62
drivers/net/ethernet/intel/ice/ice_controlq.c
··· 52 52 } 53 53 54 54 /** 55 + * ice_sb_init_regs - Initialize Sideband registers 56 + * @hw: pointer to the hardware structure 57 + * 58 + * This assumes the alloc_sq and alloc_rq functions have already been called 59 + */ 60 + static void ice_sb_init_regs(struct ice_hw *hw) 61 + { 62 + struct ice_ctl_q_info *cq = &hw->sbq; 63 + 64 + ICE_CQ_INIT_REGS(cq, PF_SB); 65 + } 66 + 67 + /** 55 68 * ice_check_sq_alive 56 69 * @hw: pointer to the HW struct 57 70 * @cq: pointer to the specific Control queue ··· 622 609 ice_adminq_init_regs(hw); 623 610 cq = &hw->adminq; 624 611 break; 612 + case ICE_CTL_Q_SB: 613 + ice_sb_init_regs(hw); 614 + cq = &hw->sbq; 615 + break; 625 616 case ICE_CTL_Q_MAILBOX: 626 617 ice_mailbox_init_regs(hw); 627 618 cq = &hw->mailboxq; ··· 663 646 } 664 647 665 648 /** 649 + * ice_is_sbq_supported - is the sideband queue supported 650 + * @hw: pointer to the hardware structure 651 + * 652 + * Returns true if the sideband control queue interface is 653 + * supported for the device, false otherwise 654 + */ 655 + bool ice_is_sbq_supported(struct ice_hw *hw) 656 + { 657 + /* The device sideband queue is only supported on devices with the 658 + * generic MAC type. 659 + */ 660 + return hw->mac_type == ICE_MAC_GENERIC; 661 + } 662 + 663 + /** 664 + * ice_get_sbq - returns the right control queue to use for sideband 665 + * @hw: pointer to the hardware structure 666 + */ 667 + struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw) 668 + { 669 + if (ice_is_sbq_supported(hw)) 670 + return &hw->sbq; 671 + return &hw->adminq; 672 + } 673 + 674 + /** 666 675 * ice_shutdown_ctrlq - shutdown routine for any control queue 667 676 * @hw: pointer to the hardware structure 668 677 * @q_type: specific Control queue type ··· 704 661 cq = &hw->adminq; 705 662 if (ice_check_sq_alive(hw, cq)) 706 663 ice_aq_q_shutdown(hw, true); 664 + break; 665 + case ICE_CTL_Q_SB: 666 + cq = &hw->sbq; 707 667 break; 708 668 case ICE_CTL_Q_MAILBOX: 709 669 cq = &hw->mailboxq; ··· 731 685 { 732 686 /* Shutdown FW admin queue */ 733 687 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); 688 + /* Shutdown PHY Sideband */ 689 + if (ice_is_sbq_supported(hw)) 690 + ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB); 734 691 /* Shutdown PF-VF Mailbox */ 735 692 ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX); 736 693 } ··· 773 724 774 725 if (status) 775 726 return status; 727 + /* sideband control queue (SBQ) interface is not supported on some 728 + * devices. Initialize if supported, else fallback to the admin queue 729 + * interface 730 + */ 731 + if (ice_is_sbq_supported(hw)) { 732 + status = ice_init_ctrlq(hw, ICE_CTL_Q_SB); 733 + if (status) 734 + return status; 735 + } 776 736 /* Init Mailbox queue */ 777 737 return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX); 778 738 } ··· 817 759 enum ice_status ice_create_all_ctrlq(struct ice_hw *hw) 818 760 { 819 761 ice_init_ctrlq_locks(&hw->adminq); 762 + if (ice_is_sbq_supported(hw)) 763 + ice_init_ctrlq_locks(&hw->sbq); 820 764 ice_init_ctrlq_locks(&hw->mailboxq); 821 765 822 766 return ice_init_all_ctrlq(hw); ··· 851 791 ice_shutdown_all_ctrlq(hw); 852 792 853 793 ice_destroy_ctrlq_locks(&hw->adminq); 794 + if (ice_is_sbq_supported(hw)) 795 + ice_destroy_ctrlq_locks(&hw->sbq); 854 796 ice_destroy_ctrlq_locks(&hw->mailboxq); 855 797 } 856 798
+2
drivers/net/ethernet/intel/ice/ice_controlq.h
··· 9 9 /* Maximum buffer lengths for all control queue types */ 10 10 #define ICE_AQ_MAX_BUF_LEN 4096 11 11 #define ICE_MBXQ_MAX_BUF_LEN 4096 12 + #define ICE_SBQ_MAX_BUF_LEN 512 12 13 13 14 #define ICE_CTL_Q_DESC(R, i) \ 14 15 (&(((struct ice_aq_desc *)((R).desc_buf.va))[i])) ··· 30 29 ICE_CTL_Q_UNKNOWN = 0, 31 30 ICE_CTL_Q_ADMIN, 32 31 ICE_CTL_Q_MAILBOX, 32 + ICE_CTL_Q_SB, 33 33 }; 34 34 35 35 /* Control Queue timeout settings - max delay 1s */
+26 -1
drivers/net/ethernet/intel/ice/ice_ethtool.c
··· 3195 3195 return 0; 3196 3196 } 3197 3197 3198 + static int 3199 + ice_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) 3200 + { 3201 + struct ice_pf *pf = ice_netdev_to_pf(dev); 3202 + 3203 + /* only report timestamping if PTP is enabled */ 3204 + if (!test_bit(ICE_FLAG_PTP, pf->flags)) 3205 + return ethtool_op_get_ts_info(dev, info); 3206 + 3207 + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 3208 + SOF_TIMESTAMPING_RX_SOFTWARE | 3209 + SOF_TIMESTAMPING_SOFTWARE | 3210 + SOF_TIMESTAMPING_TX_HARDWARE | 3211 + SOF_TIMESTAMPING_RX_HARDWARE | 3212 + SOF_TIMESTAMPING_RAW_HARDWARE; 3213 + 3214 + info->phc_index = ice_get_ptp_clock_index(pf); 3215 + 3216 + info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); 3217 + 3218 + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); 3219 + 3220 + return 0; 3221 + } 3222 + 3198 3223 /** 3199 3224 * ice_get_max_txq - return the maximum number of Tx queues for in a PF 3200 3225 * @pf: PF structure ··· 4011 3986 .set_rxfh = ice_set_rxfh, 4012 3987 .get_channels = ice_get_channels, 4013 3988 .set_channels = ice_set_channels, 4014 - .get_ts_info = ethtool_op_get_ts_info, 3989 + .get_ts_info = ice_get_ts_info, 4015 3990 .get_per_queue_coalesce = ice_get_per_q_coalesce, 4016 3991 .set_per_queue_coalesce = ice_set_per_q_coalesce, 4017 3992 .get_fecparam = ice_get_fecparam,
+69
drivers/net/ethernet/intel/ice/ice_hw_autogen.h
··· 52 52 #define PF_MBX_ATQLEN_ATQCRIT_M BIT(30) 53 53 #define PF_MBX_ATQLEN_ATQENABLE_M BIT(31) 54 54 #define PF_MBX_ATQT 0x0022E300 55 + #define PF_SB_ARQBAH 0x0022FF00 56 + #define PF_SB_ARQBAH_ARQBAH_S 0 57 + #define PF_SB_ARQBAH_ARQBAH_M ICE_M(0xFFFFFFFF, 0) 58 + #define PF_SB_ARQBAL 0x0022FE80 59 + #define PF_SB_ARQBAL_ARQBAL_LSB_S 0 60 + #define PF_SB_ARQBAL_ARQBAL_LSB_M ICE_M(0x3F, 0) 61 + #define PF_SB_ARQBAL_ARQBAL_S 6 62 + #define PF_SB_ARQBAL_ARQBAL_M ICE_M(0x3FFFFFF, 6) 63 + #define PF_SB_ARQH 0x00230000 64 + #define PF_SB_ARQH_ARQH_S 0 65 + #define PF_SB_ARQH_ARQH_M ICE_M(0x3FF, 0) 66 + #define PF_SB_ARQLEN 0x0022FF80 67 + #define PF_SB_ARQLEN_ARQLEN_S 0 68 + #define PF_SB_ARQLEN_ARQLEN_M ICE_M(0x3FF, 0) 69 + #define PF_SB_ARQLEN_ARQVFE_S 28 70 + #define PF_SB_ARQLEN_ARQVFE_M BIT(28) 71 + #define PF_SB_ARQLEN_ARQOVFL_S 29 72 + #define PF_SB_ARQLEN_ARQOVFL_M BIT(29) 73 + #define PF_SB_ARQLEN_ARQCRIT_S 30 74 + #define PF_SB_ARQLEN_ARQCRIT_M BIT(30) 75 + #define PF_SB_ARQLEN_ARQENABLE_S 31 76 + #define PF_SB_ARQLEN_ARQENABLE_M BIT(31) 77 + #define PF_SB_ARQT 0x00230080 78 + #define PF_SB_ARQT_ARQT_S 0 79 + #define PF_SB_ARQT_ARQT_M ICE_M(0x3FF, 0) 80 + #define PF_SB_ATQBAH 0x0022FC80 81 + #define PF_SB_ATQBAH_ATQBAH_S 0 82 + #define PF_SB_ATQBAH_ATQBAH_M ICE_M(0xFFFFFFFF, 0) 83 + #define PF_SB_ATQBAL 0x0022FC00 84 + #define PF_SB_ATQBAL_ATQBAL_S 6 85 + #define PF_SB_ATQBAL_ATQBAL_M ICE_M(0x3FFFFFF, 6) 86 + #define PF_SB_ATQH 0x0022FD80 87 + #define PF_SB_ATQH_ATQH_S 0 88 + #define PF_SB_ATQH_ATQH_M ICE_M(0x3FF, 0) 89 + #define PF_SB_ATQLEN 0x0022FD00 90 + #define PF_SB_ATQLEN_ATQLEN_S 0 91 + #define PF_SB_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0) 92 + #define PF_SB_ATQLEN_ATQVFE_S 28 93 + #define PF_SB_ATQLEN_ATQVFE_M BIT(28) 94 + #define PF_SB_ATQLEN_ATQOVFL_S 29 95 + #define PF_SB_ATQLEN_ATQOVFL_M BIT(29) 96 + #define PF_SB_ATQLEN_ATQCRIT_S 30 97 + #define PF_SB_ATQLEN_ATQCRIT_M BIT(30) 98 + #define PF_SB_ATQLEN_ATQENABLE_S 31 99 + #define PF_SB_ATQLEN_ATQENABLE_M BIT(31) 100 + #define PF_SB_ATQT 0x0022FE00 101 + #define PF_SB_ATQT_ATQT_S 0 102 + #define PF_SB_ATQT_ATQT_M ICE_M(0x3FF, 0) 55 103 #define PRTDCB_GENC 0x00083000 56 104 #define PRTDCB_GENC_PFCLDA_S 16 57 105 #define PRTDCB_GENC_PFCLDA_M ICE_M(0xFFFF, 16) ··· 202 154 #define PFINT_MBX_CTL_ITR_INDX_M ICE_M(0x3, 11) 203 155 #define PFINT_MBX_CTL_CAUSE_ENA_M BIT(30) 204 156 #define PFINT_OICR 0x0016CA00 157 + #define PFINT_OICR_TSYN_TX_M BIT(11) 205 158 #define PFINT_OICR_ECC_ERR_M BIT(16) 206 159 #define PFINT_OICR_MAL_DETECT_M BIT(19) 207 160 #define PFINT_OICR_GRST_M BIT(20) ··· 218 169 #define PFINT_OICR_CTL_ITR_INDX_M ICE_M(0x3, 11) 219 170 #define PFINT_OICR_CTL_CAUSE_ENA_M BIT(30) 220 171 #define PFINT_OICR_ENA 0x0016C900 172 + #define PFINT_SB_CTL 0x0016B600 173 + #define PFINT_SB_CTL_MSIX_INDX_M ICE_M(0x7FF, 0) 174 + #define PFINT_SB_CTL_CAUSE_ENA_M BIT(30) 221 175 #define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4)) 222 176 #define QINT_RQCTL_MSIX_INDX_S 0 223 177 #define QINT_RQCTL_MSIX_INDX_M ICE_M(0x7FF, 0) ··· 434 382 #define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8)) 435 383 #define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8)) 436 384 #define PRTRPB_RDPC 0x000AC260 385 + #define GLTSYN_CMD 0x00088810 386 + #define GLTSYN_CMD_SYNC 0x00088814 387 + #define GLTSYN_ENA(_i) (0x00088808 + ((_i) * 4)) 388 + #define GLTSYN_ENA_TSYN_ENA_M BIT(0) 389 + #define GLTSYN_INCVAL_H(_i) (0x00088920 + ((_i) * 4)) 390 + #define GLTSYN_INCVAL_L(_i) (0x00088918 + ((_i) * 4)) 391 + #define GLTSYN_SHADJ_H(_i) (0x00088910 + ((_i) * 4)) 392 + #define GLTSYN_SHADJ_L(_i) (0x00088908 + ((_i) * 4)) 393 + #define GLTSYN_SHTIME_0(_i) (0x000888E0 + ((_i) * 4)) 394 + #define GLTSYN_SHTIME_H(_i) (0x000888F0 + ((_i) * 4)) 395 + #define GLTSYN_SHTIME_L(_i) (0x000888E8 + ((_i) * 4)) 396 + #define GLTSYN_STAT(_i) (0x000888C0 + ((_i) * 4)) 397 + #define GLTSYN_SYNC_DLAY 0x00088818 398 + #define GLTSYN_TIME_H(_i) (0x000888D8 + ((_i) * 4)) 399 + #define GLTSYN_TIME_L(_i) (0x000888D0 + ((_i) * 4)) 400 + #define PFTSYN_SEM 0x00088880 401 + #define PFTSYN_SEM_BUSY_M BIT(0) 437 402 #define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4)) 438 403 #define VSIQF_FD_CNT_FD_GCNT_S 0 439 404 #define VSIQF_FD_CNT_FD_GCNT_M ICE_M(0x3FFF, 0)
+18 -2
drivers/net/ethernet/intel/ice/ice_lib.c
··· 1298 1298 ring->reg_idx = vsi->txq_map[i]; 1299 1299 ring->ring_active = false; 1300 1300 ring->vsi = vsi; 1301 + ring->tx_tstamps = &pf->ptp.port.tx; 1301 1302 ring->dev = dev; 1302 1303 ring->count = vsi->num_tx_desc; 1303 1304 WRITE_ONCE(vsi->tx_rings[i], ring); ··· 1676 1675 * @pf_q: index of the Rx queue in the PF's queue space 1677 1676 * @rxdid: flexible descriptor RXDID 1678 1677 * @prio: priority for the RXDID for this queue 1678 + * @ena_ts: true to enable timestamp and false to disable timestamp 1679 1679 */ 1680 1680 void 1681 - ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio) 1681 + ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio, 1682 + bool ena_ts) 1682 1683 { 1683 1684 int regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); 1684 1685 ··· 1694 1691 1695 1692 regval |= (prio << QRXFLXP_CNTXT_RXDID_PRIO_S) & 1696 1693 QRXFLXP_CNTXT_RXDID_PRIO_M; 1694 + 1695 + if (ena_ts) 1696 + /* Enable TimeSync on this queue */ 1697 + regval |= QRXFLXP_CNTXT_TS_M; 1697 1698 1698 1699 wr32(hw, QRXFLXP_CNTXT(pf_q), regval); 1699 1700 } ··· 3399 3392 case ICE_ERR_DOES_NOT_EXIST: 3400 3393 return -ENOENT; 3401 3394 case ICE_ERR_OUT_OF_RANGE: 3402 - return -ENOTTY; 3395 + case ICE_ERR_AQ_ERROR: 3396 + case ICE_ERR_AQ_TIMEOUT: 3397 + case ICE_ERR_AQ_EMPTY: 3398 + case ICE_ERR_AQ_FW_CRITICAL: 3399 + return -EIO; 3403 3400 case ICE_ERR_PARAM: 3401 + case ICE_ERR_INVAL_SIZE: 3404 3402 return -EINVAL; 3405 3403 case ICE_ERR_NO_MEMORY: 3406 3404 return -ENOMEM; 3407 3405 case ICE_ERR_MAX_LIMIT: 3408 3406 return -EAGAIN; 3407 + case ICE_ERR_RESET_ONGOING: 3408 + return -EBUSY; 3409 + case ICE_ERR_AQ_FULL: 3410 + return -ENOSPC; 3409 3411 default: 3410 3412 return -EINVAL; 3411 3413 }
+2 -1
drivers/net/ethernet/intel/ice/ice_lib.h
··· 80 80 int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout); 81 81 82 82 void 83 - ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio); 83 + ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio, 84 + bool ena_ts); 84 85 85 86 void ice_vsi_dis_irq(struct ice_vsi *vsi); 86 87
+95
drivers/net/ethernet/intel/ice/ice_main.c
··· 471 471 /* disable the VSIs and their queues that are not already DOWN */ 472 472 ice_pf_dis_all_vsi(pf, false); 473 473 474 + if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 475 + ice_ptp_release(pf); 476 + 474 477 if (hw->port_info) 475 478 ice_sched_clear_port(hw->port_info); 476 479 ··· 1234 1231 cq = &hw->adminq; 1235 1232 qtype = "Admin"; 1236 1233 break; 1234 + case ICE_CTL_Q_SB: 1235 + cq = &hw->sbq; 1236 + qtype = "Sideband"; 1237 + break; 1237 1238 case ICE_CTL_Q_MAILBOX: 1238 1239 cq = &hw->mailboxq; 1239 1240 qtype = "Mailbox"; ··· 1407 1400 1408 1401 if (ice_ctrlq_pending(hw, &hw->mailboxq)) 1409 1402 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX); 1403 + 1404 + ice_flush(hw); 1405 + } 1406 + 1407 + /** 1408 + * ice_clean_sbq_subtask - clean the Sideband Queue rings 1409 + * @pf: board private structure 1410 + */ 1411 + static void ice_clean_sbq_subtask(struct ice_pf *pf) 1412 + { 1413 + struct ice_hw *hw = &pf->hw; 1414 + 1415 + /* Nothing to do here if sideband queue is not supported */ 1416 + if (!ice_is_sbq_supported(hw)) { 1417 + clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); 1418 + return; 1419 + } 1420 + 1421 + if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state)) 1422 + return; 1423 + 1424 + if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB)) 1425 + return; 1426 + 1427 + clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); 1428 + 1429 + if (ice_ctrlq_pending(hw, &hw->sbq)) 1430 + __ice_clean_ctrlq(pf, ICE_CTL_Q_SB); 1410 1431 1411 1432 ice_flush(hw); 1412 1433 } ··· 2141 2106 2142 2107 ice_process_vflr_event(pf); 2143 2108 ice_clean_mailboxq_subtask(pf); 2109 + ice_clean_sbq_subtask(pf); 2144 2110 ice_sync_arfs_fltrs(pf); 2145 2111 ice_flush_fdir_ctx(pf); 2146 2112 ··· 2157 2121 test_bit(ICE_VFLR_EVENT_PENDING, pf->state) || 2158 2122 test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) || 2159 2123 test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) || 2124 + test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) || 2160 2125 test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) 2161 2126 mod_timer(&pf->serv_tmr, jiffies); 2162 2127 } ··· 2176 2139 hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN; 2177 2140 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 2178 2141 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; 2142 + hw->sbq.num_rq_entries = ICE_SBQ_LEN; 2143 + hw->sbq.num_sq_entries = ICE_SBQ_LEN; 2144 + hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN; 2145 + hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN; 2179 2146 } 2180 2147 2181 2148 /** ··· 2720 2679 dev = ice_pf_to_dev(pf); 2721 2680 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); 2722 2681 set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); 2682 + set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); 2723 2683 2724 2684 oicr = rd32(hw, PFINT_OICR); 2725 2685 ena_mask = rd32(hw, PFINT_OICR_ENA); ··· 2792 2750 } 2793 2751 } 2794 2752 2753 + if (oicr & PFINT_OICR_TSYN_TX_M) { 2754 + ena_mask &= ~PFINT_OICR_TSYN_TX_M; 2755 + ice_ptp_process_ts(pf); 2756 + } 2757 + 2795 2758 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M) 2796 2759 if (oicr & ICE_AUX_CRIT_ERR) { 2797 2760 struct iidc_event *event; ··· 2846 2799 /* disable Mailbox queue Interrupt causes */ 2847 2800 wr32(hw, PFINT_MBX_CTL, 2848 2801 rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M); 2802 + 2803 + wr32(hw, PFINT_SB_CTL, 2804 + rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M); 2849 2805 2850 2806 /* disable Control queue Interrupt causes */ 2851 2807 wr32(hw, PFINT_OICR_CTL, ··· 2903 2853 val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) | 2904 2854 PFINT_MBX_CTL_CAUSE_ENA_M); 2905 2855 wr32(hw, PFINT_MBX_CTL, val); 2856 + 2857 + /* This enables Sideband queue Interrupt causes */ 2858 + val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) | 2859 + PFINT_SB_CTL_CAUSE_ENA_M); 2860 + wr32(hw, PFINT_SB_CTL, val); 2906 2861 2907 2862 ice_flush(hw); 2908 2863 } ··· 3372 3317 bitmap_free(pf->avail_rxqs); 3373 3318 pf->avail_rxqs = NULL; 3374 3319 } 3320 + 3321 + if (pf->ptp.clock) 3322 + ptp_clock_unregister(pf->ptp.clock); 3375 3323 } 3376 3324 3377 3325 /** ··· 3420 3362 ice_alloc_fd_shrd_item(&pf->hw, &unused, 3421 3363 func_caps->fd_fltr_best_effort); 3422 3364 } 3365 + 3366 + clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); 3367 + if (func_caps->common_cap.ieee_1588) 3368 + set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); 3423 3369 3424 3370 pf->max_pf_txqs = func_caps->common_cap.num_txq; 3425 3371 pf->max_pf_rxqs = func_caps->common_cap.num_rxq; ··· 4407 4345 } 4408 4346 4409 4347 /* initialize DDP driven features */ 4348 + if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 4349 + ice_ptp_init(pf); 4410 4350 4411 4351 /* Note: Flow director init failure is non-fatal to load */ 4412 4352 if (ice_init_fdir(pf)) ··· 4576 4512 4577 4513 mutex_destroy(&(&pf->hw)->fdir_fltr_lock); 4578 4514 ice_deinit_lag(pf); 4515 + if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 4516 + ice_ptp_release(pf); 4579 4517 if (!ice_is_safe_mode(pf)) 4580 4518 ice_remove_arfs(pf); 4581 4519 ice_setup_mc_magic_wake(pf); ··· 6369 6303 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) 6370 6304 ice_dcb_rebuild(pf); 6371 6305 6306 + /* If the PF previously had enabled PTP, PTP init needs to happen before 6307 + * the VSI rebuild. If not, this causes the PTP link status events to 6308 + * fail. 6309 + */ 6310 + if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) 6311 + ice_ptp_init(pf); 6312 + 6372 6313 /* rebuild PF VSI */ 6373 6314 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF); 6374 6315 if (err) { ··· 6522 6449 kfree(event); 6523 6450 6524 6451 return err; 6452 + } 6453 + 6454 + /** 6455 + * ice_do_ioctl - Access the hwtstamp interface 6456 + * @netdev: network interface device structure 6457 + * @ifr: interface request data 6458 + * @cmd: ioctl command 6459 + */ 6460 + static int ice_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 6461 + { 6462 + struct ice_netdev_priv *np = netdev_priv(netdev); 6463 + struct ice_pf *pf = np->vsi->back; 6464 + 6465 + switch (cmd) { 6466 + case SIOCGHWTSTAMP: 6467 + return ice_ptp_get_ts_config(pf, ifr); 6468 + case SIOCSHWTSTAMP: 6469 + return ice_ptp_set_ts_config(pf, ifr); 6470 + default: 6471 + return -EOPNOTSUPP; 6472 + } 6525 6473 } 6526 6474 6527 6475 /** ··· 7195 7101 .ndo_change_mtu = ice_change_mtu, 7196 7102 .ndo_get_stats64 = ice_get_stats64, 7197 7103 .ndo_set_tx_maxrate = ice_set_tx_maxrate, 7104 + .ndo_do_ioctl = ice_do_ioctl, 7198 7105 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk, 7199 7106 .ndo_set_vf_mac = ice_set_vf_mac, 7200 7107 .ndo_get_vf_config = ice_get_vf_cfg,
+1269
drivers/net/ethernet/intel/ice/ice_ptp.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (C) 2021, Intel Corporation. */ 3 + 4 + #include "ice.h" 5 + #include "ice_lib.h" 6 + 7 + /** 8 + * ice_set_tx_tstamp - Enable or disable Tx timestamping 9 + * @pf: The PF pointer to search in 10 + * @on: bool value for whether timestamps are enabled or disabled 11 + */ 12 + static void ice_set_tx_tstamp(struct ice_pf *pf, bool on) 13 + { 14 + struct ice_vsi *vsi; 15 + u32 val; 16 + u16 i; 17 + 18 + vsi = ice_get_main_vsi(pf); 19 + if (!vsi) 20 + return; 21 + 22 + /* Set the timestamp enable flag for all the Tx rings */ 23 + ice_for_each_rxq(vsi, i) { 24 + if (!vsi->tx_rings[i]) 25 + continue; 26 + vsi->tx_rings[i]->ptp_tx = on; 27 + } 28 + 29 + /* Configure the Tx timestamp interrupt */ 30 + val = rd32(&pf->hw, PFINT_OICR_ENA); 31 + if (on) 32 + val |= PFINT_OICR_TSYN_TX_M; 33 + else 34 + val &= ~PFINT_OICR_TSYN_TX_M; 35 + wr32(&pf->hw, PFINT_OICR_ENA, val); 36 + } 37 + 38 + /** 39 + * ice_set_rx_tstamp - Enable or disable Rx timestamping 40 + * @pf: The PF pointer to search in 41 + * @on: bool value for whether timestamps are enabled or disabled 42 + */ 43 + static void ice_set_rx_tstamp(struct ice_pf *pf, bool on) 44 + { 45 + struct ice_vsi *vsi; 46 + u16 i; 47 + 48 + vsi = ice_get_main_vsi(pf); 49 + if (!vsi) 50 + return; 51 + 52 + /* Set the timestamp flag for all the Rx rings */ 53 + ice_for_each_rxq(vsi, i) { 54 + if (!vsi->rx_rings[i]) 55 + continue; 56 + vsi->rx_rings[i]->ptp_rx = on; 57 + } 58 + } 59 + 60 + /** 61 + * ice_ptp_cfg_timestamp - Configure timestamp for init/deinit 62 + * @pf: Board private structure 63 + * @ena: bool value to enable or disable time stamp 64 + * 65 + * This function will configure timestamping during PTP initialization 66 + * and deinitialization 67 + */ 68 + static void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena) 69 + { 70 + ice_set_tx_tstamp(pf, ena); 71 + ice_set_rx_tstamp(pf, ena); 72 + 73 + if (ena) { 74 + pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL; 75 + pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON; 76 + } else { 77 + pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; 78 + pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF; 79 + } 80 + } 81 + 82 + /** 83 + * ice_get_ptp_clock_index - Get the PTP clock index 84 + * @pf: the PF pointer 85 + * 86 + * Determine the clock index of the PTP clock associated with this device. If 87 + * this is the PF controlling the clock, just use the local access to the 88 + * clock device pointer. 89 + * 90 + * Otherwise, read from the driver shared parameters to determine the clock 91 + * index value. 92 + * 93 + * Returns: the index of the PTP clock associated with this device, or -1 if 94 + * there is no associated clock. 95 + */ 96 + int ice_get_ptp_clock_index(struct ice_pf *pf) 97 + { 98 + struct device *dev = ice_pf_to_dev(pf); 99 + enum ice_aqc_driver_params param_idx; 100 + struct ice_hw *hw = &pf->hw; 101 + u8 tmr_idx; 102 + u32 value; 103 + int err; 104 + 105 + /* Use the ptp_clock structure if we're the main PF */ 106 + if (pf->ptp.clock) 107 + return ptp_clock_index(pf->ptp.clock); 108 + 109 + tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; 110 + if (!tmr_idx) 111 + param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0; 112 + else 113 + param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1; 114 + 115 + err = ice_aq_get_driver_param(hw, param_idx, &value, NULL); 116 + if (err) { 117 + dev_err(dev, "Failed to read PTP clock index parameter, err %d aq_err %s\n", 118 + err, ice_aq_str(hw->adminq.sq_last_status)); 119 + return -1; 120 + } 121 + 122 + /* The PTP clock index is an integer, and will be between 0 and 123 + * INT_MAX. The highest bit of the driver shared parameter is used to 124 + * indicate whether or not the currently stored clock index is valid. 125 + */ 126 + if (!(value & PTP_SHARED_CLK_IDX_VALID)) 127 + return -1; 128 + 129 + return value & ~PTP_SHARED_CLK_IDX_VALID; 130 + } 131 + 132 + /** 133 + * ice_set_ptp_clock_index - Set the PTP clock index 134 + * @pf: the PF pointer 135 + * 136 + * Set the PTP clock index for this device into the shared driver parameters, 137 + * so that other PFs associated with this device can read it. 138 + * 139 + * If the PF is unable to store the clock index, it will log an error, but 140 + * will continue operating PTP. 141 + */ 142 + static void ice_set_ptp_clock_index(struct ice_pf *pf) 143 + { 144 + struct device *dev = ice_pf_to_dev(pf); 145 + enum ice_aqc_driver_params param_idx; 146 + struct ice_hw *hw = &pf->hw; 147 + u8 tmr_idx; 148 + u32 value; 149 + int err; 150 + 151 + if (!pf->ptp.clock) 152 + return; 153 + 154 + tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; 155 + if (!tmr_idx) 156 + param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0; 157 + else 158 + param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1; 159 + 160 + value = (u32)ptp_clock_index(pf->ptp.clock); 161 + if (value > INT_MAX) { 162 + dev_err(dev, "PTP Clock index is too large to store\n"); 163 + return; 164 + } 165 + value |= PTP_SHARED_CLK_IDX_VALID; 166 + 167 + err = ice_aq_set_driver_param(hw, param_idx, value, NULL); 168 + if (err) { 169 + dev_err(dev, "Failed to set PTP clock index parameter, err %d aq_err %s\n", 170 + err, ice_aq_str(hw->adminq.sq_last_status)); 171 + } 172 + } 173 + 174 + /** 175 + * ice_clear_ptp_clock_index - Clear the PTP clock index 176 + * @pf: the PF pointer 177 + * 178 + * Clear the PTP clock index for this device. Must be called when 179 + * unregistering the PTP clock, in order to ensure other PFs stop reporting 180 + * a clock object that no longer exists. 181 + */ 182 + static void ice_clear_ptp_clock_index(struct ice_pf *pf) 183 + { 184 + struct device *dev = ice_pf_to_dev(pf); 185 + enum ice_aqc_driver_params param_idx; 186 + struct ice_hw *hw = &pf->hw; 187 + u8 tmr_idx; 188 + int err; 189 + 190 + /* Do not clear the index if we don't own the timer */ 191 + if (!hw->func_caps.ts_func_info.src_tmr_owned) 192 + return; 193 + 194 + tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; 195 + if (!tmr_idx) 196 + param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0; 197 + else 198 + param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1; 199 + 200 + err = ice_aq_set_driver_param(hw, param_idx, 0, NULL); 201 + if (err) { 202 + dev_dbg(dev, "Failed to clear PTP clock index parameter, err %d aq_err %s\n", 203 + err, ice_aq_str(hw->adminq.sq_last_status)); 204 + } 205 + } 206 + 207 + /** 208 + * ice_ptp_read_src_clk_reg - Read the source clock register 209 + * @pf: Board private structure 210 + * @sts: Optional parameter for holding a pair of system timestamps from 211 + * the system clock. Will be ignored if NULL is given. 212 + */ 213 + static u64 214 + ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts) 215 + { 216 + struct ice_hw *hw = &pf->hw; 217 + u32 hi, lo, lo2; 218 + u8 tmr_idx; 219 + 220 + tmr_idx = ice_get_ptp_src_clock_index(hw); 221 + /* Read the system timestamp pre PHC read */ 222 + if (sts) 223 + ptp_read_system_prets(sts); 224 + 225 + lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 226 + 227 + /* Read the system timestamp post PHC read */ 228 + if (sts) 229 + ptp_read_system_postts(sts); 230 + 231 + hi = rd32(hw, GLTSYN_TIME_H(tmr_idx)); 232 + lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 233 + 234 + if (lo2 < lo) { 235 + /* if TIME_L rolled over read TIME_L again and update 236 + * system timestamps 237 + */ 238 + if (sts) 239 + ptp_read_system_prets(sts); 240 + lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 241 + if (sts) 242 + ptp_read_system_postts(sts); 243 + hi = rd32(hw, GLTSYN_TIME_H(tmr_idx)); 244 + } 245 + 246 + return ((u64)hi << 32) | lo; 247 + } 248 + 249 + /** 250 + * ice_ptp_update_cached_phctime - Update the cached PHC time values 251 + * @pf: Board specific private structure 252 + * 253 + * This function updates the system time values which are cached in the PF 254 + * structure and the Rx rings. 255 + * 256 + * This function must be called periodically to ensure that the cached value 257 + * is never more than 2 seconds old. It must also be called whenever the PHC 258 + * time has been changed. 259 + */ 260 + static void ice_ptp_update_cached_phctime(struct ice_pf *pf) 261 + { 262 + u64 systime; 263 + int i; 264 + 265 + /* Read the current PHC time */ 266 + systime = ice_ptp_read_src_clk_reg(pf, NULL); 267 + 268 + /* Update the cached PHC time stored in the PF structure */ 269 + WRITE_ONCE(pf->ptp.cached_phc_time, systime); 270 + 271 + ice_for_each_vsi(pf, i) { 272 + struct ice_vsi *vsi = pf->vsi[i]; 273 + int j; 274 + 275 + if (!vsi) 276 + continue; 277 + 278 + if (vsi->type != ICE_VSI_PF) 279 + continue; 280 + 281 + ice_for_each_rxq(vsi, j) { 282 + if (!vsi->rx_rings[j]) 283 + continue; 284 + WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime); 285 + } 286 + } 287 + } 288 + 289 + /** 290 + * ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b 291 + * @cached_phc_time: recently cached copy of PHC time 292 + * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value 293 + * 294 + * Hardware captures timestamps which contain only 32 bits of nominal 295 + * nanoseconds, as opposed to the 64bit timestamps that the stack expects. 296 + * Note that the captured timestamp values may be 40 bits, but the lower 297 + * 8 bits are sub-nanoseconds and generally discarded. 298 + * 299 + * Extend the 32bit nanosecond timestamp using the following algorithm and 300 + * assumptions: 301 + * 302 + * 1) have a recently cached copy of the PHC time 303 + * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1 304 + * seconds) before or after the PHC time was captured. 305 + * 3) calculate the delta between the cached time and the timestamp 306 + * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was 307 + * captured after the PHC time. In this case, the full timestamp is just 308 + * the cached PHC time plus the delta. 309 + * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the 310 + * timestamp was captured *before* the PHC time, i.e. because the PHC 311 + * cache was updated after the timestamp was captured by hardware. In this 312 + * case, the full timestamp is the cached time minus the inverse delta. 313 + * 314 + * This algorithm works even if the PHC time was updated after a Tx timestamp 315 + * was requested, but before the Tx timestamp event was reported from 316 + * hardware. 317 + * 318 + * This calculation primarily relies on keeping the cached PHC time up to 319 + * date. If the timestamp was captured more than 2^31 nanoseconds after the 320 + * PHC time, it is possible that the lower 32bits of PHC time have 321 + * overflowed more than once, and we might generate an incorrect timestamp. 322 + * 323 + * This is prevented by (a) periodically updating the cached PHC time once 324 + * a second, and (b) discarding any Tx timestamp packet if it has waited for 325 + * a timestamp for more than one second. 326 + */ 327 + static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp) 328 + { 329 + u32 delta, phc_time_lo; 330 + u64 ns; 331 + 332 + /* Extract the lower 32 bits of the PHC time */ 333 + phc_time_lo = (u32)cached_phc_time; 334 + 335 + /* Calculate the delta between the lower 32bits of the cached PHC 336 + * time and the in_tstamp value 337 + */ 338 + delta = (in_tstamp - phc_time_lo); 339 + 340 + /* Do not assume that the in_tstamp is always more recent than the 341 + * cached PHC time. If the delta is large, it indicates that the 342 + * in_tstamp was taken in the past, and should be converted 343 + * forward. 344 + */ 345 + if (delta > (U32_MAX / 2)) { 346 + /* reverse the delta calculation here */ 347 + delta = (phc_time_lo - in_tstamp); 348 + ns = cached_phc_time - delta; 349 + } else { 350 + ns = cached_phc_time + delta; 351 + } 352 + 353 + return ns; 354 + } 355 + 356 + /** 357 + * ice_ptp_extend_40b_ts - Convert a 40b timestamp to 64b nanoseconds 358 + * @pf: Board private structure 359 + * @in_tstamp: Ingress/egress 40b timestamp value 360 + * 361 + * The Tx and Rx timestamps are 40 bits wide, including 32 bits of nominal 362 + * nanoseconds, 7 bits of sub-nanoseconds, and a valid bit. 363 + * 364 + * *--------------------------------------------------------------* 365 + * | 32 bits of nanoseconds | 7 high bits of sub ns underflow | v | 366 + * *--------------------------------------------------------------* 367 + * 368 + * The low bit is an indicator of whether the timestamp is valid. The next 369 + * 7 bits are a capture of the upper 7 bits of the sub-nanosecond underflow, 370 + * and the remaining 32 bits are the lower 32 bits of the PHC timer. 371 + * 372 + * It is assumed that the caller verifies the timestamp is valid prior to 373 + * calling this function. 374 + * 375 + * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC 376 + * time stored in the device private PTP structure as the basis for timestamp 377 + * extension. 378 + * 379 + * See ice_ptp_extend_32b_ts for a detailed explanation of the extension 380 + * algorithm. 381 + */ 382 + static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp) 383 + { 384 + const u64 mask = GENMASK_ULL(31, 0); 385 + 386 + return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time, 387 + (in_tstamp >> 8) & mask); 388 + } 389 + 390 + /** 391 + * ice_ptp_read_time - Read the time from the device 392 + * @pf: Board private structure 393 + * @ts: timespec structure to hold the current time value 394 + * @sts: Optional parameter for holding a pair of system timestamps from 395 + * the system clock. Will be ignored if NULL is given. 396 + * 397 + * This function reads the source clock registers and stores them in a timespec. 398 + * However, since the registers are 64 bits of nanoseconds, we must convert the 399 + * result to a timespec before we can return. 400 + */ 401 + static void 402 + ice_ptp_read_time(struct ice_pf *pf, struct timespec64 *ts, 403 + struct ptp_system_timestamp *sts) 404 + { 405 + u64 time_ns = ice_ptp_read_src_clk_reg(pf, sts); 406 + 407 + *ts = ns_to_timespec64(time_ns); 408 + } 409 + 410 + /** 411 + * ice_ptp_write_init - Set PHC time to provided value 412 + * @pf: Board private structure 413 + * @ts: timespec structure that holds the new time value 414 + * 415 + * Set the PHC time to the specified time provided in the timespec. 416 + */ 417 + static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts) 418 + { 419 + u64 ns = timespec64_to_ns(ts); 420 + struct ice_hw *hw = &pf->hw; 421 + 422 + return ice_ptp_init_time(hw, ns); 423 + } 424 + 425 + /** 426 + * ice_ptp_write_adj - Adjust PHC clock time atomically 427 + * @pf: Board private structure 428 + * @adj: Adjustment in nanoseconds 429 + * 430 + * Perform an atomic adjustment of the PHC time by the specified number of 431 + * nanoseconds. 432 + */ 433 + static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj) 434 + { 435 + struct ice_hw *hw = &pf->hw; 436 + 437 + return ice_ptp_adj_clock(hw, adj); 438 + } 439 + 440 + /** 441 + * ice_ptp_adjfine - Adjust clock increment rate 442 + * @info: the driver's PTP info structure 443 + * @scaled_ppm: Parts per million with 16-bit fractional field 444 + * 445 + * Adjust the frequency of the clock by the indicated scaled ppm from the 446 + * base frequency. 447 + */ 448 + static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm) 449 + { 450 + struct ice_pf *pf = ptp_info_to_pf(info); 451 + u64 freq, divisor = 1000000ULL; 452 + struct ice_hw *hw = &pf->hw; 453 + s64 incval, diff; 454 + int neg_adj = 0; 455 + int err; 456 + 457 + incval = ICE_PTP_NOMINAL_INCVAL_E810; 458 + 459 + if (scaled_ppm < 0) { 460 + neg_adj = 1; 461 + scaled_ppm = -scaled_ppm; 462 + } 463 + 464 + while ((u64)scaled_ppm > div_u64(U64_MAX, incval)) { 465 + /* handle overflow by scaling down the scaled_ppm and 466 + * the divisor, losing some precision 467 + */ 468 + scaled_ppm >>= 2; 469 + divisor >>= 2; 470 + } 471 + 472 + freq = (incval * (u64)scaled_ppm) >> 16; 473 + diff = div_u64(freq, divisor); 474 + 475 + if (neg_adj) 476 + incval -= diff; 477 + else 478 + incval += diff; 479 + 480 + err = ice_ptp_write_incval_locked(hw, incval); 481 + if (err) { 482 + dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n", 483 + err); 484 + return -EIO; 485 + } 486 + 487 + return 0; 488 + } 489 + 490 + /** 491 + * ice_ptp_gettimex64 - Get the time of the clock 492 + * @info: the driver's PTP info structure 493 + * @ts: timespec64 structure to hold the current time value 494 + * @sts: Optional parameter for holding a pair of system timestamps from 495 + * the system clock. Will be ignored if NULL is given. 496 + * 497 + * Read the device clock and return the correct value on ns, after converting it 498 + * into a timespec struct. 499 + */ 500 + static int 501 + ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts, 502 + struct ptp_system_timestamp *sts) 503 + { 504 + struct ice_pf *pf = ptp_info_to_pf(info); 505 + struct ice_hw *hw = &pf->hw; 506 + 507 + if (!ice_ptp_lock(hw)) { 508 + dev_err(ice_pf_to_dev(pf), "PTP failed to get time\n"); 509 + return -EBUSY; 510 + } 511 + 512 + ice_ptp_read_time(pf, ts, sts); 513 + ice_ptp_unlock(hw); 514 + 515 + return 0; 516 + } 517 + 518 + /** 519 + * ice_ptp_settime64 - Set the time of the clock 520 + * @info: the driver's PTP info structure 521 + * @ts: timespec64 structure that holds the new time value 522 + * 523 + * Set the device clock to the user input value. The conversion from timespec 524 + * to ns happens in the write function. 525 + */ 526 + static int 527 + ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) 528 + { 529 + struct ice_pf *pf = ptp_info_to_pf(info); 530 + struct timespec64 ts64 = *ts; 531 + struct ice_hw *hw = &pf->hw; 532 + int err; 533 + 534 + if (!ice_ptp_lock(hw)) { 535 + err = -EBUSY; 536 + goto exit; 537 + } 538 + 539 + err = ice_ptp_write_init(pf, &ts64); 540 + ice_ptp_unlock(hw); 541 + 542 + if (!err) 543 + ice_ptp_update_cached_phctime(pf); 544 + 545 + exit: 546 + if (err) { 547 + dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err); 548 + return err; 549 + } 550 + 551 + return 0; 552 + } 553 + 554 + /** 555 + * ice_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment 556 + * @info: the driver's PTP info structure 557 + * @delta: Offset in nanoseconds to adjust the time by 558 + */ 559 + static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta) 560 + { 561 + struct timespec64 now, then; 562 + 563 + then = ns_to_timespec64(delta); 564 + ice_ptp_gettimex64(info, &now, NULL); 565 + now = timespec64_add(now, then); 566 + 567 + return ice_ptp_settime64(info, (const struct timespec64 *)&now); 568 + } 569 + 570 + /** 571 + * ice_ptp_adjtime - Adjust the time of the clock by the indicated delta 572 + * @info: the driver's PTP info structure 573 + * @delta: Offset in nanoseconds to adjust the time by 574 + */ 575 + static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta) 576 + { 577 + struct ice_pf *pf = ptp_info_to_pf(info); 578 + struct ice_hw *hw = &pf->hw; 579 + struct device *dev; 580 + int err; 581 + 582 + dev = ice_pf_to_dev(pf); 583 + 584 + /* Hardware only supports atomic adjustments using signed 32-bit 585 + * integers. For any adjustment outside this range, perform 586 + * a non-atomic get->adjust->set flow. 587 + */ 588 + if (delta > S32_MAX || delta < S32_MIN) { 589 + dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta); 590 + return ice_ptp_adjtime_nonatomic(info, delta); 591 + } 592 + 593 + if (!ice_ptp_lock(hw)) { 594 + dev_err(dev, "PTP failed to acquire semaphore in adjtime\n"); 595 + return -EBUSY; 596 + } 597 + 598 + err = ice_ptp_write_adj(pf, delta); 599 + 600 + ice_ptp_unlock(hw); 601 + 602 + if (err) { 603 + dev_err(dev, "PTP failed to adjust time, err %d\n", err); 604 + return err; 605 + } 606 + 607 + ice_ptp_update_cached_phctime(pf); 608 + 609 + return 0; 610 + } 611 + 612 + /** 613 + * ice_ptp_get_ts_config - ioctl interface to read the timestamping config 614 + * @pf: Board private structure 615 + * @ifr: ioctl data 616 + * 617 + * Copy the timestamping config to user buffer 618 + */ 619 + int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr) 620 + { 621 + struct hwtstamp_config *config; 622 + 623 + if (!test_bit(ICE_FLAG_PTP, pf->flags)) 624 + return -EIO; 625 + 626 + config = &pf->ptp.tstamp_config; 627 + 628 + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? 629 + -EFAULT : 0; 630 + } 631 + 632 + /** 633 + * ice_ptp_set_timestamp_mode - Setup driver for requested timestamp mode 634 + * @pf: Board private structure 635 + * @config: hwtstamp settings requested or saved 636 + */ 637 + static int 638 + ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config) 639 + { 640 + /* Reserved for future extensions. */ 641 + if (config->flags) 642 + return -EINVAL; 643 + 644 + switch (config->tx_type) { 645 + case HWTSTAMP_TX_OFF: 646 + ice_set_tx_tstamp(pf, false); 647 + break; 648 + case HWTSTAMP_TX_ON: 649 + ice_set_tx_tstamp(pf, true); 650 + break; 651 + default: 652 + return -ERANGE; 653 + } 654 + 655 + switch (config->rx_filter) { 656 + case HWTSTAMP_FILTER_NONE: 657 + ice_set_rx_tstamp(pf, false); 658 + break; 659 + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 660 + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 661 + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 662 + case HWTSTAMP_FILTER_PTP_V2_EVENT: 663 + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 664 + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 665 + case HWTSTAMP_FILTER_PTP_V2_SYNC: 666 + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 667 + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 668 + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 669 + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 670 + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 671 + case HWTSTAMP_FILTER_NTP_ALL: 672 + case HWTSTAMP_FILTER_ALL: 673 + config->rx_filter = HWTSTAMP_FILTER_ALL; 674 + ice_set_rx_tstamp(pf, true); 675 + break; 676 + default: 677 + return -ERANGE; 678 + } 679 + 680 + return 0; 681 + } 682 + 683 + /** 684 + * ice_ptp_set_ts_config - ioctl interface to control the timestamping 685 + * @pf: Board private structure 686 + * @ifr: ioctl data 687 + * 688 + * Get the user config and store it 689 + */ 690 + int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr) 691 + { 692 + struct hwtstamp_config config; 693 + int err; 694 + 695 + if (!test_bit(ICE_FLAG_PTP, pf->flags)) 696 + return -EAGAIN; 697 + 698 + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 699 + return -EFAULT; 700 + 701 + err = ice_ptp_set_timestamp_mode(pf, &config); 702 + if (err) 703 + return err; 704 + 705 + /* Save these settings for future reference */ 706 + pf->ptp.tstamp_config = config; 707 + 708 + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 709 + -EFAULT : 0; 710 + } 711 + 712 + /** 713 + * ice_ptp_rx_hwtstamp - Check for an Rx timestamp 714 + * @rx_ring: Ring to get the VSI info 715 + * @rx_desc: Receive descriptor 716 + * @skb: Particular skb to send timestamp with 717 + * 718 + * The driver receives a notification in the receive descriptor with timestamp. 719 + * The timestamp is in ns, so we must convert the result first. 720 + */ 721 + void 722 + ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring, 723 + union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) 724 + { 725 + u32 ts_high; 726 + u64 ts_ns; 727 + 728 + /* Populate timesync data into skb */ 729 + if (rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID) { 730 + struct skb_shared_hwtstamps *hwtstamps; 731 + 732 + /* Use ice_ptp_extend_32b_ts directly, using the ring-specific 733 + * cached PHC value, rather than accessing the PF. This also 734 + * allows us to simply pass the upper 32bits of nanoseconds 735 + * directly. Calling ice_ptp_extend_40b_ts is unnecessary as 736 + * it would just discard these bits itself. 737 + */ 738 + ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high); 739 + ts_ns = ice_ptp_extend_32b_ts(rx_ring->cached_phctime, ts_high); 740 + 741 + hwtstamps = skb_hwtstamps(skb); 742 + memset(hwtstamps, 0, sizeof(*hwtstamps)); 743 + hwtstamps->hwtstamp = ns_to_ktime(ts_ns); 744 + } 745 + } 746 + 747 + /** 748 + * ice_ptp_set_caps - Set PTP capabilities 749 + * @pf: Board private structure 750 + */ 751 + static void ice_ptp_set_caps(struct ice_pf *pf) 752 + { 753 + struct ptp_clock_info *info = &pf->ptp.info; 754 + struct device *dev = ice_pf_to_dev(pf); 755 + 756 + snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk", 757 + dev_driver_string(dev), dev_name(dev)); 758 + info->owner = THIS_MODULE; 759 + info->max_adj = 999999999; 760 + info->adjtime = ice_ptp_adjtime; 761 + info->adjfine = ice_ptp_adjfine; 762 + info->gettimex64 = ice_ptp_gettimex64; 763 + info->settime64 = ice_ptp_settime64; 764 + } 765 + 766 + /** 767 + * ice_ptp_create_clock - Create PTP clock device for userspace 768 + * @pf: Board private structure 769 + * 770 + * This function creates a new PTP clock device. It only creates one if we 771 + * don't already have one. Will return error if it can't create one, but success 772 + * if we already have a device. Should be used by ice_ptp_init to create clock 773 + * initially, and prevent global resets from creating new clock devices. 774 + */ 775 + static long ice_ptp_create_clock(struct ice_pf *pf) 776 + { 777 + struct ptp_clock_info *info; 778 + struct ptp_clock *clock; 779 + struct device *dev; 780 + 781 + /* No need to create a clock device if we already have one */ 782 + if (pf->ptp.clock) 783 + return 0; 784 + 785 + ice_ptp_set_caps(pf); 786 + 787 + info = &pf->ptp.info; 788 + dev = ice_pf_to_dev(pf); 789 + 790 + /* Attempt to register the clock before enabling the hardware. */ 791 + clock = ptp_clock_register(info, dev); 792 + if (IS_ERR(clock)) 793 + return PTR_ERR(clock); 794 + 795 + pf->ptp.clock = clock; 796 + 797 + return 0; 798 + } 799 + 800 + /** 801 + * ice_ptp_tx_tstamp_work - Process Tx timestamps for a port 802 + * @work: pointer to the kthread_work struct 803 + * 804 + * Process timestamps captured by the PHY associated with this port. To do 805 + * this, loop over each index with a waiting skb. 806 + * 807 + * If a given index has a valid timestamp, perform the following steps: 808 + * 809 + * 1) copy the timestamp out of the PHY register 810 + * 4) clear the timestamp valid bit in the PHY register 811 + * 5) unlock the index by clearing the associated in_use bit. 812 + * 2) extend the 40b timestamp value to get a 64bit timestamp 813 + * 3) send that timestamp to the stack 814 + * 815 + * After looping, if we still have waiting SKBs, then re-queue the work. This 816 + * may cause us effectively poll even when not strictly necessary. We do this 817 + * because it's possible a new timestamp was requested around the same time as 818 + * the interrupt. In some cases hardware might not interrupt us again when the 819 + * timestamp is captured. 820 + * 821 + * Note that we only take the tracking lock when clearing the bit and when 822 + * checking if we need to re-queue this task. The only place where bits can be 823 + * set is the hard xmit routine where an SKB has a request flag set. The only 824 + * places where we clear bits are this work function, or the periodic cleanup 825 + * thread. If the cleanup thread clears a bit we're processing we catch it 826 + * when we lock to clear the bit and then grab the SKB pointer. If a Tx thread 827 + * starts a new timestamp, we might not begin processing it right away but we 828 + * will notice it at the end when we re-queue the work item. If a Tx thread 829 + * starts a new timestamp just after this function exits without re-queuing, 830 + * the interrupt when the timestamp finishes should trigger. Avoiding holding 831 + * the lock for the entire function is important in order to ensure that Tx 832 + * threads do not get blocked while waiting for the lock. 833 + */ 834 + static void ice_ptp_tx_tstamp_work(struct kthread_work *work) 835 + { 836 + struct ice_ptp_port *ptp_port; 837 + struct ice_ptp_tx *tx; 838 + struct ice_pf *pf; 839 + struct ice_hw *hw; 840 + u8 idx; 841 + 842 + tx = container_of(work, struct ice_ptp_tx, work); 843 + if (!tx->init) 844 + return; 845 + 846 + ptp_port = container_of(tx, struct ice_ptp_port, tx); 847 + pf = ptp_port_to_pf(ptp_port); 848 + hw = &pf->hw; 849 + 850 + for_each_set_bit(idx, tx->in_use, tx->len) { 851 + struct skb_shared_hwtstamps shhwtstamps = {}; 852 + u8 phy_idx = idx + tx->quad_offset; 853 + u64 raw_tstamp, tstamp; 854 + struct sk_buff *skb; 855 + int err; 856 + 857 + err = ice_read_phy_tstamp(hw, tx->quad, phy_idx, 858 + &raw_tstamp); 859 + if (err) 860 + continue; 861 + 862 + /* Check if the timestamp is valid */ 863 + if (!(raw_tstamp & ICE_PTP_TS_VALID)) 864 + continue; 865 + 866 + /* clear the timestamp register, so that it won't show valid 867 + * again when re-used. 868 + */ 869 + ice_clear_phy_tstamp(hw, tx->quad, phy_idx); 870 + 871 + /* The timestamp is valid, so we'll go ahead and clear this 872 + * index and then send the timestamp up to the stack. 873 + */ 874 + spin_lock(&tx->lock); 875 + clear_bit(idx, tx->in_use); 876 + skb = tx->tstamps[idx].skb; 877 + tx->tstamps[idx].skb = NULL; 878 + spin_unlock(&tx->lock); 879 + 880 + /* it's (unlikely but) possible we raced with the cleanup 881 + * thread for discarding old timestamp requests. 882 + */ 883 + if (!skb) 884 + continue; 885 + 886 + /* Extend the timestamp using cached PHC time */ 887 + tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp); 888 + shhwtstamps.hwtstamp = ns_to_ktime(tstamp); 889 + 890 + skb_tstamp_tx(skb, &shhwtstamps); 891 + dev_kfree_skb_any(skb); 892 + } 893 + 894 + /* Check if we still have work to do. If so, re-queue this task to 895 + * poll for remaining timestamps. 896 + */ 897 + spin_lock(&tx->lock); 898 + if (!bitmap_empty(tx->in_use, tx->len)) 899 + kthread_queue_work(pf->ptp.kworker, &tx->work); 900 + spin_unlock(&tx->lock); 901 + } 902 + 903 + /** 904 + * ice_ptp_request_ts - Request an available Tx timestamp index 905 + * @tx: the PTP Tx timestamp tracker to request from 906 + * @skb: the SKB to associate with this timestamp request 907 + */ 908 + s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) 909 + { 910 + u8 idx; 911 + 912 + /* Check if this tracker is initialized */ 913 + if (!tx->init) 914 + return -1; 915 + 916 + spin_lock(&tx->lock); 917 + /* Find and set the first available index */ 918 + idx = find_first_zero_bit(tx->in_use, tx->len); 919 + if (idx < tx->len) { 920 + /* We got a valid index that no other thread could have set. Store 921 + * a reference to the skb and the start time to allow discarding old 922 + * requests. 923 + */ 924 + set_bit(idx, tx->in_use); 925 + tx->tstamps[idx].start = jiffies; 926 + tx->tstamps[idx].skb = skb_get(skb); 927 + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 928 + } 929 + 930 + spin_unlock(&tx->lock); 931 + 932 + /* return the appropriate PHY timestamp register index, -1 if no 933 + * indexes were available. 934 + */ 935 + if (idx >= tx->len) 936 + return -1; 937 + else 938 + return idx + tx->quad_offset; 939 + } 940 + 941 + /** 942 + * ice_ptp_process_ts - Spawn kthread work to handle timestamps 943 + * @pf: Board private structure 944 + * 945 + * Queue work required to process the PTP Tx timestamps outside of interrupt 946 + * context. 947 + */ 948 + void ice_ptp_process_ts(struct ice_pf *pf) 949 + { 950 + if (pf->ptp.port.tx.init) 951 + kthread_queue_work(pf->ptp.kworker, &pf->ptp.port.tx.work); 952 + } 953 + 954 + /** 955 + * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps 956 + * @tx: Tx tracking structure to initialize 957 + * 958 + * Assumes that the length has already been initialized. Do not call directly, 959 + * use the ice_ptp_init_tx_e822 or ice_ptp_init_tx_e810 instead. 960 + */ 961 + static int 962 + ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx) 963 + { 964 + tx->tstamps = kcalloc(tx->len, sizeof(*tx->tstamps), GFP_KERNEL); 965 + if (!tx->tstamps) 966 + return -ENOMEM; 967 + 968 + tx->in_use = bitmap_zalloc(tx->len, GFP_KERNEL); 969 + if (!tx->in_use) { 970 + kfree(tx->tstamps); 971 + tx->tstamps = NULL; 972 + return -ENOMEM; 973 + } 974 + 975 + spin_lock_init(&tx->lock); 976 + kthread_init_work(&tx->work, ice_ptp_tx_tstamp_work); 977 + 978 + tx->init = 1; 979 + 980 + return 0; 981 + } 982 + 983 + /** 984 + * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker 985 + * @pf: Board private structure 986 + * @tx: the tracker to flush 987 + */ 988 + static void 989 + ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) 990 + { 991 + u8 idx; 992 + 993 + for (idx = 0; idx < tx->len; idx++) { 994 + u8 phy_idx = idx + tx->quad_offset; 995 + 996 + /* Clear any potential residual timestamp in the PHY block */ 997 + if (!pf->hw.reset_ongoing) 998 + ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx); 999 + 1000 + if (tx->tstamps[idx].skb) { 1001 + dev_kfree_skb_any(tx->tstamps[idx].skb); 1002 + tx->tstamps[idx].skb = NULL; 1003 + } 1004 + } 1005 + } 1006 + 1007 + /** 1008 + * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker 1009 + * @pf: Board private structure 1010 + * @tx: Tx tracking structure to release 1011 + * 1012 + * Free memory associated with the Tx timestamp tracker. 1013 + */ 1014 + static void 1015 + ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) 1016 + { 1017 + tx->init = 0; 1018 + 1019 + kthread_cancel_work_sync(&tx->work); 1020 + 1021 + ice_ptp_flush_tx_tracker(pf, tx); 1022 + 1023 + kfree(tx->tstamps); 1024 + tx->tstamps = NULL; 1025 + 1026 + kfree(tx->in_use); 1027 + tx->in_use = NULL; 1028 + 1029 + tx->len = 0; 1030 + } 1031 + 1032 + /** 1033 + * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps 1034 + * @pf: Board private structure 1035 + * @tx: the Tx tracking structure to initialize 1036 + * 1037 + * Initialize the Tx timestamp tracker for this PF. For E810 devices, each 1038 + * port has its own block of timestamps, independent of the other ports. 1039 + */ 1040 + static int 1041 + ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx) 1042 + { 1043 + tx->quad = pf->hw.port_info->lport; 1044 + tx->quad_offset = 0; 1045 + tx->len = INDEX_PER_QUAD; 1046 + 1047 + return ice_ptp_alloc_tx_tracker(tx); 1048 + } 1049 + 1050 + /** 1051 + * ice_ptp_tx_tstamp_cleanup - Cleanup old timestamp requests that got dropped 1052 + * @tx: PTP Tx tracker to clean up 1053 + * 1054 + * Loop through the Tx timestamp requests and see if any of them have been 1055 + * waiting for a long time. Discard any SKBs that have been waiting for more 1056 + * than 2 seconds. This is long enough to be reasonably sure that the 1057 + * timestamp will never be captured. This might happen if the packet gets 1058 + * discarded before it reaches the PHY timestamping block. 1059 + */ 1060 + static void ice_ptp_tx_tstamp_cleanup(struct ice_ptp_tx *tx) 1061 + { 1062 + u8 idx; 1063 + 1064 + if (!tx->init) 1065 + return; 1066 + 1067 + for_each_set_bit(idx, tx->in_use, tx->len) { 1068 + struct sk_buff *skb; 1069 + 1070 + /* Check if this SKB has been waiting for too long */ 1071 + if (time_is_after_jiffies(tx->tstamps[idx].start + 2 * HZ)) 1072 + continue; 1073 + 1074 + spin_lock(&tx->lock); 1075 + skb = tx->tstamps[idx].skb; 1076 + tx->tstamps[idx].skb = NULL; 1077 + clear_bit(idx, tx->in_use); 1078 + spin_unlock(&tx->lock); 1079 + 1080 + /* Free the SKB after we've cleared the bit */ 1081 + dev_kfree_skb_any(skb); 1082 + } 1083 + } 1084 + 1085 + static void ice_ptp_periodic_work(struct kthread_work *work) 1086 + { 1087 + struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work); 1088 + struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp); 1089 + 1090 + if (!test_bit(ICE_FLAG_PTP, pf->flags)) 1091 + return; 1092 + 1093 + ice_ptp_update_cached_phctime(pf); 1094 + 1095 + ice_ptp_tx_tstamp_cleanup(&pf->ptp.port.tx); 1096 + 1097 + /* Run twice a second */ 1098 + kthread_queue_delayed_work(ptp->kworker, &ptp->work, 1099 + msecs_to_jiffies(500)); 1100 + } 1101 + 1102 + /** 1103 + * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device 1104 + * @pf: Board private structure 1105 + * 1106 + * Setup and initialize a PTP clock device that represents the device hardware 1107 + * clock. Save the clock index for other functions connected to the same 1108 + * hardware resource. 1109 + */ 1110 + static int ice_ptp_init_owner(struct ice_pf *pf) 1111 + { 1112 + struct device *dev = ice_pf_to_dev(pf); 1113 + struct ice_hw *hw = &pf->hw; 1114 + struct timespec64 ts; 1115 + u8 src_idx; 1116 + int err; 1117 + 1118 + wr32(hw, GLTSYN_SYNC_DLAY, 0); 1119 + 1120 + /* Clear some HW residue and enable source clock */ 1121 + src_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1122 + 1123 + /* Enable source clocks */ 1124 + wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M); 1125 + 1126 + /* Enable PHY time sync */ 1127 + err = ice_ptp_init_phy_e810(hw); 1128 + if (err) 1129 + goto err_exit; 1130 + 1131 + /* Clear event status indications for auxiliary pins */ 1132 + (void)rd32(hw, GLTSYN_STAT(src_idx)); 1133 + 1134 + /* Acquire the global hardware lock */ 1135 + if (!ice_ptp_lock(hw)) { 1136 + err = -EBUSY; 1137 + goto err_exit; 1138 + } 1139 + 1140 + /* Write the increment time value to PHY and LAN */ 1141 + err = ice_ptp_write_incval(hw, ICE_PTP_NOMINAL_INCVAL_E810); 1142 + if (err) { 1143 + ice_ptp_unlock(hw); 1144 + goto err_exit; 1145 + } 1146 + 1147 + ts = ktime_to_timespec64(ktime_get_real()); 1148 + /* Write the initial Time value to PHY and LAN */ 1149 + err = ice_ptp_write_init(pf, &ts); 1150 + if (err) { 1151 + ice_ptp_unlock(hw); 1152 + goto err_exit; 1153 + } 1154 + 1155 + /* Release the global hardware lock */ 1156 + ice_ptp_unlock(hw); 1157 + 1158 + /* Ensure we have a clock device */ 1159 + err = ice_ptp_create_clock(pf); 1160 + if (err) 1161 + goto err_clk; 1162 + 1163 + /* Store the PTP clock index for other PFs */ 1164 + ice_set_ptp_clock_index(pf); 1165 + 1166 + return 0; 1167 + 1168 + err_clk: 1169 + pf->ptp.clock = NULL; 1170 + err_exit: 1171 + dev_err(dev, "PTP failed to register clock, err %d\n", err); 1172 + 1173 + return err; 1174 + } 1175 + 1176 + /** 1177 + * ice_ptp_init - Initialize the PTP support after device probe or reset 1178 + * @pf: Board private structure 1179 + * 1180 + * This function sets device up for PTP support. The first time it is run, it 1181 + * will create a clock device. It does not create a clock device if one 1182 + * already exists. It also reconfigures the device after a reset. 1183 + */ 1184 + void ice_ptp_init(struct ice_pf *pf) 1185 + { 1186 + struct device *dev = ice_pf_to_dev(pf); 1187 + struct kthread_worker *kworker; 1188 + struct ice_hw *hw = &pf->hw; 1189 + int err; 1190 + 1191 + /* PTP is currently only supported on E810 devices */ 1192 + if (!ice_is_e810(hw)) 1193 + return; 1194 + 1195 + /* Check if this PF owns the source timer */ 1196 + if (hw->func_caps.ts_func_info.src_tmr_owned) { 1197 + err = ice_ptp_init_owner(pf); 1198 + if (err) 1199 + return; 1200 + } 1201 + 1202 + /* Disable timestamping for both Tx and Rx */ 1203 + ice_ptp_cfg_timestamp(pf, false); 1204 + 1205 + /* Initialize the PTP port Tx timestamp tracker */ 1206 + ice_ptp_init_tx_e810(pf, &pf->ptp.port.tx); 1207 + 1208 + /* Initialize work functions */ 1209 + kthread_init_delayed_work(&pf->ptp.work, ice_ptp_periodic_work); 1210 + 1211 + /* Allocate a kworker for handling work required for the ports 1212 + * connected to the PTP hardware clock. 1213 + */ 1214 + kworker = kthread_create_worker(0, "ice-ptp-%s", dev_name(dev)); 1215 + if (IS_ERR(kworker)) { 1216 + err = PTR_ERR(kworker); 1217 + goto err_kworker; 1218 + } 1219 + pf->ptp.kworker = kworker; 1220 + 1221 + set_bit(ICE_FLAG_PTP, pf->flags); 1222 + 1223 + /* Start periodic work going */ 1224 + kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 0); 1225 + 1226 + dev_info(dev, "PTP init successful\n"); 1227 + return; 1228 + 1229 + err_kworker: 1230 + /* If we registered a PTP clock, release it */ 1231 + if (pf->ptp.clock) { 1232 + ptp_clock_unregister(pf->ptp.clock); 1233 + pf->ptp.clock = NULL; 1234 + } 1235 + dev_err(dev, "PTP failed %d\n", err); 1236 + } 1237 + 1238 + /** 1239 + * ice_ptp_release - Disable the driver/HW support and unregister the clock 1240 + * @pf: Board private structure 1241 + * 1242 + * This function handles the cleanup work required from the initialization by 1243 + * clearing out the important information and unregistering the clock 1244 + */ 1245 + void ice_ptp_release(struct ice_pf *pf) 1246 + { 1247 + /* Disable timestamping for both Tx and Rx */ 1248 + ice_ptp_cfg_timestamp(pf, false); 1249 + 1250 + ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); 1251 + 1252 + clear_bit(ICE_FLAG_PTP, pf->flags); 1253 + 1254 + kthread_cancel_delayed_work_sync(&pf->ptp.work); 1255 + 1256 + if (pf->ptp.kworker) { 1257 + kthread_destroy_worker(pf->ptp.kworker); 1258 + pf->ptp.kworker = NULL; 1259 + } 1260 + 1261 + if (!pf->ptp.clock) 1262 + return; 1263 + 1264 + ice_clear_ptp_clock_index(pf); 1265 + ptp_clock_unregister(pf->ptp.clock); 1266 + pf->ptp.clock = NULL; 1267 + 1268 + dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n"); 1269 + }
+161
drivers/net/ethernet/intel/ice/ice_ptp.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (C) 2021, Intel Corporation. */ 3 + 4 + #ifndef _ICE_PTP_H_ 5 + #define _ICE_PTP_H_ 6 + 7 + #include <linux/ptp_clock_kernel.h> 8 + #include <linux/kthread.h> 9 + 10 + #include "ice_ptp_hw.h" 11 + 12 + /* The ice hardware captures Tx hardware timestamps in the PHY. The timestamp 13 + * is stored in a buffer of registers. Depending on the specific hardware, 14 + * this buffer might be shared across multiple PHY ports. 15 + * 16 + * On transmit of a packet to be timestamped, software is responsible for 17 + * selecting an open index. Hardware makes no attempt to lock or prevent 18 + * re-use of an index for multiple packets. 19 + * 20 + * To handle this, timestamp indexes must be tracked by software to ensure 21 + * that an index is not re-used for multiple transmitted packets. The 22 + * structures and functions declared in this file track the available Tx 23 + * register indexes, as well as provide storage for the SKB pointers. 24 + * 25 + * To allow multiple ports to access the shared register block independently, 26 + * the blocks are split up so that indexes are assigned to each port based on 27 + * hardware logical port number. 28 + */ 29 + 30 + /** 31 + * struct ice_tx_tstamp - Tracking for a single Tx timestamp 32 + * @skb: pointer to the SKB for this timestamp request 33 + * @start: jiffies when the timestamp was first requested 34 + * 35 + * This structure tracks a single timestamp request. The SKB pointer is 36 + * provided when initiating a request. The start time is used to ensure that 37 + * we discard old requests that were not fulfilled within a 2 second time 38 + * window. 39 + */ 40 + struct ice_tx_tstamp { 41 + struct sk_buff *skb; 42 + unsigned long start; 43 + }; 44 + 45 + /** 46 + * struct ice_ptp_tx - Tracking structure for all Tx timestamp requests on a port 47 + * @work: work function to handle processing of Tx timestamps 48 + * @lock: lock to prevent concurrent write to in_use bitmap 49 + * @tstamps: array of len to store outstanding requests 50 + * @in_use: bitmap of len to indicate which slots are in use 51 + * @quad: which quad the timestamps are captured in 52 + * @quad_offset: offset into timestamp block of the quad to get the real index 53 + * @len: length of the tstamps and in_use fields. 54 + * @init: if true, the tracker is initialized; 55 + */ 56 + struct ice_ptp_tx { 57 + struct kthread_work work; 58 + spinlock_t lock; /* lock protecting in_use bitmap */ 59 + struct ice_tx_tstamp *tstamps; 60 + unsigned long *in_use; 61 + u8 quad; 62 + u8 quad_offset; 63 + u8 len; 64 + u8 init; 65 + }; 66 + 67 + /* Quad and port information for initializing timestamp blocks */ 68 + #define INDEX_PER_QUAD 64 69 + #define INDEX_PER_PORT (INDEX_PER_QUAD / ICE_PORTS_PER_QUAD) 70 + 71 + /** 72 + * struct ice_ptp_port - data used to initialize an external port for PTP 73 + * 74 + * This structure contains PTP data related to the external ports. Currently 75 + * it is used for tracking the Tx timestamps of a port. In the future this 76 + * structure will also hold information for the E822 port initialization 77 + * logic. 78 + * 79 + * @tx: Tx timestamp tracking for this port 80 + */ 81 + struct ice_ptp_port { 82 + struct ice_ptp_tx tx; 83 + }; 84 + 85 + /** 86 + * struct ice_ptp - data used for integrating with CONFIG_PTP_1588_CLOCK 87 + * @port: data for the PHY port initialization procedure 88 + * @work: delayed work function for periodic tasks 89 + * @cached_phc_time: a cached copy of the PHC time for timestamp extension 90 + * @kworker: kwork thread for handling periodic work 91 + * @info: structure defining PTP hardware capabilities 92 + * @clock: pointer to registered PTP clock device 93 + * @tstamp_config: hardware timestamping configuration 94 + */ 95 + struct ice_ptp { 96 + struct ice_ptp_port port; 97 + struct kthread_delayed_work work; 98 + u64 cached_phc_time; 99 + struct kthread_worker *kworker; 100 + struct ptp_clock_info info; 101 + struct ptp_clock *clock; 102 + struct hwtstamp_config tstamp_config; 103 + }; 104 + 105 + #define __ptp_port_to_ptp(p) \ 106 + container_of((p), struct ice_ptp, port) 107 + #define ptp_port_to_pf(p) \ 108 + container_of(__ptp_port_to_ptp((p)), struct ice_pf, ptp) 109 + 110 + #define __ptp_info_to_ptp(i) \ 111 + container_of((i), struct ice_ptp, info) 112 + #define ptp_info_to_pf(i) \ 113 + container_of(__ptp_info_to_ptp((i)), struct ice_pf, ptp) 114 + 115 + #define PTP_SHARED_CLK_IDX_VALID BIT(31) 116 + #define ICE_PTP_TS_VALID BIT(0) 117 + 118 + #if IS_ENABLED(CONFIG_PTP_1588_CLOCK) 119 + struct ice_pf; 120 + int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr); 121 + int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr); 122 + int ice_get_ptp_clock_index(struct ice_pf *pf); 123 + 124 + s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb); 125 + void ice_ptp_process_ts(struct ice_pf *pf); 126 + 127 + void 128 + ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring, 129 + union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb); 130 + void ice_ptp_init(struct ice_pf *pf); 131 + void ice_ptp_release(struct ice_pf *pf); 132 + #else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ 133 + static inline int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr) 134 + { 135 + return -EOPNOTSUPP; 136 + } 137 + 138 + static inline int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr) 139 + { 140 + return -EOPNOTSUPP; 141 + } 142 + 143 + static inline int ice_get_ptp_clock_index(struct ice_pf *pf) 144 + { 145 + return -1; 146 + } 147 + 148 + static inline 149 + ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) 150 + { 151 + return -1; 152 + } 153 + 154 + static inline void ice_ptp_process_ts(struct ice_pf *pf) { } 155 + static inline void 156 + ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring, 157 + union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { } 158 + static inline void ice_ptp_init(struct ice_pf *pf) { } 159 + static inline void ice_ptp_release(struct ice_pf *pf) { } 160 + #endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ 161 + #endif /* _ICE_PTP_H_ */
+653
drivers/net/ethernet/intel/ice/ice_ptp_hw.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (C) 2021, Intel Corporation. */ 3 + 4 + #include "ice_common.h" 5 + #include "ice_ptp_hw.h" 6 + 7 + /* Low level functions for interacting with and managing the device clock used 8 + * for the Precision Time Protocol. 9 + * 10 + * The ice hardware represents the current time using three registers: 11 + * 12 + * GLTSYN_TIME_H GLTSYN_TIME_L GLTSYN_TIME_R 13 + * +---------------+ +---------------+ +---------------+ 14 + * | 32 bits | | 32 bits | | 32 bits | 15 + * +---------------+ +---------------+ +---------------+ 16 + * 17 + * The registers are incremented every clock tick using a 40bit increment 18 + * value defined over two registers: 19 + * 20 + * GLTSYN_INCVAL_H GLTSYN_INCVAL_L 21 + * +---------------+ +---------------+ 22 + * | 8 bit s | | 32 bits | 23 + * +---------------+ +---------------+ 24 + * 25 + * The increment value is added to the GLSTYN_TIME_R and GLSTYN_TIME_L 26 + * registers every clock source tick. Depending on the specific device 27 + * configuration, the clock source frequency could be one of a number of 28 + * values. 29 + * 30 + * For E810 devices, the increment frequency is 812.5 MHz 31 + * 32 + * The hardware captures timestamps in the PHY for incoming packets, and for 33 + * outgoing packets on request. To support this, the PHY maintains a timer 34 + * that matches the lower 64 bits of the global source timer. 35 + * 36 + * In order to ensure that the PHY timers and the source timer are equivalent, 37 + * shadow registers are used to prepare the desired initial values. A special 38 + * sync command is issued to trigger copying from the shadow registers into 39 + * the appropriate source and PHY registers simultaneously. 40 + */ 41 + 42 + /** 43 + * ice_get_ptp_src_clock_index - determine source clock index 44 + * @hw: pointer to HW struct 45 + * 46 + * Determine the source clock index currently in use, based on device 47 + * capabilities reported during initialization. 48 + */ 49 + u8 ice_get_ptp_src_clock_index(struct ice_hw *hw) 50 + { 51 + return hw->func_caps.ts_func_info.tmr_index_assoc; 52 + } 53 + 54 + /* E810 functions 55 + * 56 + * The following functions operate on the E810 series devices which use 57 + * a separate external PHY. 58 + */ 59 + 60 + /** 61 + * ice_read_phy_reg_e810 - Read register from external PHY on E810 62 + * @hw: pointer to the HW struct 63 + * @addr: the address to read from 64 + * @val: On return, the value read from the PHY 65 + * 66 + * Read a register from the external PHY on the E810 device. 67 + */ 68 + static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val) 69 + { 70 + struct ice_sbq_msg_input msg = {0}; 71 + int status; 72 + 73 + msg.msg_addr_low = lower_16_bits(addr); 74 + msg.msg_addr_high = upper_16_bits(addr); 75 + msg.opcode = ice_sbq_msg_rd; 76 + msg.dest_dev = rmn_0; 77 + 78 + status = ice_sbq_rw_reg(hw, &msg); 79 + if (status) { 80 + ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, status %d\n", 81 + status); 82 + return status; 83 + } 84 + 85 + *val = msg.data; 86 + 87 + return 0; 88 + } 89 + 90 + /** 91 + * ice_write_phy_reg_e810 - Write register on external PHY on E810 92 + * @hw: pointer to the HW struct 93 + * @addr: the address to writem to 94 + * @val: the value to write to the PHY 95 + * 96 + * Write a value to a register of the external PHY on the E810 device. 97 + */ 98 + static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val) 99 + { 100 + struct ice_sbq_msg_input msg = {0}; 101 + int status; 102 + 103 + msg.msg_addr_low = lower_16_bits(addr); 104 + msg.msg_addr_high = upper_16_bits(addr); 105 + msg.opcode = ice_sbq_msg_wr; 106 + msg.dest_dev = rmn_0; 107 + msg.data = val; 108 + 109 + status = ice_sbq_rw_reg(hw, &msg); 110 + if (status) { 111 + ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, status %d\n", 112 + status); 113 + return status; 114 + } 115 + 116 + return 0; 117 + } 118 + 119 + /** 120 + * ice_read_phy_tstamp_e810 - Read a PHY timestamp out of the external PHY 121 + * @hw: pointer to the HW struct 122 + * @lport: the lport to read from 123 + * @idx: the timestamp index to read 124 + * @tstamp: on return, the 40bit timestamp value 125 + * 126 + * Read a 40bit timestamp value out of the timestamp block of the external PHY 127 + * on the E810 device. 128 + */ 129 + static int 130 + ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp) 131 + { 132 + u32 lo_addr, hi_addr, lo, hi; 133 + int status; 134 + 135 + lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx); 136 + hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx); 137 + 138 + status = ice_read_phy_reg_e810(hw, lo_addr, &lo); 139 + if (status) { 140 + ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, status %d\n", 141 + status); 142 + return status; 143 + } 144 + 145 + status = ice_read_phy_reg_e810(hw, hi_addr, &hi); 146 + if (status) { 147 + ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, status %d\n", 148 + status); 149 + return status; 150 + } 151 + 152 + /* For E810 devices, the timestamp is reported with the lower 32 bits 153 + * in the low register, and the upper 8 bits in the high register. 154 + */ 155 + *tstamp = ((u64)hi) << TS_HIGH_S | ((u64)lo & TS_LOW_M); 156 + 157 + return 0; 158 + } 159 + 160 + /** 161 + * ice_clear_phy_tstamp_e810 - Clear a timestamp from the external PHY 162 + * @hw: pointer to the HW struct 163 + * @lport: the lport to read from 164 + * @idx: the timestamp index to reset 165 + * 166 + * Clear a timestamp, resetting its valid bit, from the timestamp block of the 167 + * external PHY on the E810 device. 168 + */ 169 + static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx) 170 + { 171 + u32 lo_addr, hi_addr; 172 + int status; 173 + 174 + lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx); 175 + hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx); 176 + 177 + status = ice_write_phy_reg_e810(hw, lo_addr, 0); 178 + if (status) { 179 + ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, status %d\n", 180 + status); 181 + return status; 182 + } 183 + 184 + status = ice_write_phy_reg_e810(hw, hi_addr, 0); 185 + if (status) { 186 + ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, status %d\n", 187 + status); 188 + return status; 189 + } 190 + 191 + return 0; 192 + } 193 + 194 + /** 195 + * ice_ptp_init_phy_e810 - Enable PTP function on the external PHY 196 + * @hw: pointer to HW struct 197 + * 198 + * Enable the timesync PTP functionality for the external PHY connected to 199 + * this function. 200 + */ 201 + int ice_ptp_init_phy_e810(struct ice_hw *hw) 202 + { 203 + int status; 204 + u8 tmr_idx; 205 + 206 + tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 207 + status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx), 208 + GLTSYN_ENA_TSYN_ENA_M); 209 + if (status) 210 + ice_debug(hw, ICE_DBG_PTP, "PTP failed in ena_phy_time_syn %d\n", 211 + status); 212 + 213 + return status; 214 + } 215 + 216 + /** 217 + * ice_ptp_prep_phy_time_e810 - Prepare PHY port with initial time 218 + * @hw: Board private structure 219 + * @time: Time to initialize the PHY port clock to 220 + * 221 + * Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the 222 + * initial clock time. The time will not actually be programmed until the 223 + * driver issues an INIT_TIME command. 224 + * 225 + * The time value is the upper 32 bits of the PHY timer, usually in units of 226 + * nominal nanoseconds. 227 + */ 228 + static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time) 229 + { 230 + int status; 231 + u8 tmr_idx; 232 + 233 + tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 234 + status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_0(tmr_idx), 0); 235 + if (status) { 236 + ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_0, status %d\n", 237 + status); 238 + return status; 239 + } 240 + 241 + status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_L(tmr_idx), time); 242 + if (status) { 243 + ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_L, status %d\n", 244 + status); 245 + return status; 246 + } 247 + 248 + return 0; 249 + } 250 + 251 + /** 252 + * ice_ptp_prep_phy_adj_e810 - Prep PHY port for a time adjustment 253 + * @hw: pointer to HW struct 254 + * @adj: adjustment value to program 255 + * 256 + * Prepare the PHY port for an atomic adjustment by programming the PHY 257 + * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual adjustment 258 + * is completed by issuing an ADJ_TIME sync command. 259 + * 260 + * The adjustment value only contains the portion used for the upper 32bits of 261 + * the PHY timer, usually in units of nominal nanoseconds. Negative 262 + * adjustments are supported using 2s complement arithmetic. 263 + */ 264 + static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj) 265 + { 266 + int status; 267 + u8 tmr_idx; 268 + 269 + tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 270 + 271 + /* Adjustments are represented as signed 2's complement values in 272 + * nanoseconds. Sub-nanosecond adjustment is not supported. 273 + */ 274 + status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), 0); 275 + if (status) { 276 + ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_L, status %d\n", 277 + status); 278 + return status; 279 + } 280 + 281 + status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), adj); 282 + if (status) { 283 + ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_H, status %d\n", 284 + status); 285 + return status; 286 + } 287 + 288 + return 0; 289 + } 290 + 291 + /** 292 + * ice_ptp_prep_phy_incval_e810 - Prep PHY port increment value change 293 + * @hw: pointer to HW struct 294 + * @incval: The new 40bit increment value to prepare 295 + * 296 + * Prepare the PHY port for a new increment value by programming the PHY 297 + * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual change is 298 + * completed by issuing an INIT_INCVAL command. 299 + */ 300 + static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval) 301 + { 302 + u32 high, low; 303 + int status; 304 + u8 tmr_idx; 305 + 306 + tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 307 + low = lower_32_bits(incval); 308 + high = upper_32_bits(incval); 309 + 310 + status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), low); 311 + if (status) { 312 + ice_debug(hw, ICE_DBG_PTP, "Failed to write incval to PHY SHADJ_L, status %d\n", 313 + status); 314 + return status; 315 + } 316 + 317 + status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), high); 318 + if (status) { 319 + ice_debug(hw, ICE_DBG_PTP, "Failed to write incval PHY SHADJ_H, status %d\n", 320 + status); 321 + return status; 322 + } 323 + 324 + return 0; 325 + } 326 + 327 + /** 328 + * ice_ptp_port_cmd_e810 - Prepare all external PHYs for a timer command 329 + * @hw: pointer to HW struct 330 + * @cmd: Command to be sent to the port 331 + * 332 + * Prepare the external PHYs connected to this device for a timer sync 333 + * command. 334 + */ 335 + static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) 336 + { 337 + u32 cmd_val, val; 338 + int status; 339 + 340 + switch (cmd) { 341 + case INIT_TIME: 342 + cmd_val = GLTSYN_CMD_INIT_TIME; 343 + break; 344 + case INIT_INCVAL: 345 + cmd_val = GLTSYN_CMD_INIT_INCVAL; 346 + break; 347 + case ADJ_TIME: 348 + cmd_val = GLTSYN_CMD_ADJ_TIME; 349 + break; 350 + case READ_TIME: 351 + cmd_val = GLTSYN_CMD_READ_TIME; 352 + break; 353 + case ADJ_TIME_AT_TIME: 354 + cmd_val = GLTSYN_CMD_ADJ_INIT_TIME; 355 + break; 356 + } 357 + 358 + /* Read, modify, write */ 359 + status = ice_read_phy_reg_e810(hw, ETH_GLTSYN_CMD, &val); 360 + if (status) { 361 + ice_debug(hw, ICE_DBG_PTP, "Failed to read GLTSYN_CMD, status %d\n", status); 362 + return status; 363 + } 364 + 365 + /* Modify necessary bits only and perform write */ 366 + val &= ~TS_CMD_MASK_E810; 367 + val |= cmd_val; 368 + 369 + status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_CMD, val); 370 + if (status) { 371 + ice_debug(hw, ICE_DBG_PTP, "Failed to write back GLTSYN_CMD, status %d\n", status); 372 + return status; 373 + } 374 + 375 + return 0; 376 + } 377 + 378 + /* Device agnostic functions 379 + * 380 + * The following functions implement useful behavior to hide the differences 381 + * between E810 and other devices. They call the device-specific 382 + * implementations where necessary. 383 + * 384 + * Currently, the driver only supports E810, but future work will enable 385 + * support for E822-based devices. 386 + */ 387 + 388 + /** 389 + * ice_ptp_lock - Acquire PTP global semaphore register lock 390 + * @hw: pointer to the HW struct 391 + * 392 + * Acquire the global PTP hardware semaphore lock. Returns true if the lock 393 + * was acquired, false otherwise. 394 + * 395 + * The PFTSYN_SEM register sets the busy bit on read, returning the previous 396 + * value. If software sees the busy bit cleared, this means that this function 397 + * acquired the lock (and the busy bit is now set). If software sees the busy 398 + * bit set, it means that another function acquired the lock. 399 + * 400 + * Software must clear the busy bit with a write to release the lock for other 401 + * functions when done. 402 + */ 403 + bool ice_ptp_lock(struct ice_hw *hw) 404 + { 405 + u32 hw_lock; 406 + int i; 407 + 408 + #define MAX_TRIES 5 409 + 410 + for (i = 0; i < MAX_TRIES; i++) { 411 + hw_lock = rd32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); 412 + hw_lock = hw_lock & PFTSYN_SEM_BUSY_M; 413 + if (hw_lock) { 414 + /* Somebody is holding the lock */ 415 + usleep_range(10000, 20000); 416 + continue; 417 + } else { 418 + break; 419 + } 420 + } 421 + 422 + return !hw_lock; 423 + } 424 + 425 + /** 426 + * ice_ptp_unlock - Release PTP global semaphore register lock 427 + * @hw: pointer to the HW struct 428 + * 429 + * Release the global PTP hardware semaphore lock. This is done by writing to 430 + * the PFTSYN_SEM register. 431 + */ 432 + void ice_ptp_unlock(struct ice_hw *hw) 433 + { 434 + wr32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), 0); 435 + } 436 + 437 + /** 438 + * ice_ptp_src_cmd - Prepare source timer for a timer command 439 + * @hw: pointer to HW structure 440 + * @cmd: Timer command 441 + * 442 + * Prepare the source timer for an upcoming timer sync command. 443 + */ 444 + static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) 445 + { 446 + u32 cmd_val; 447 + u8 tmr_idx; 448 + 449 + tmr_idx = ice_get_ptp_src_clock_index(hw); 450 + cmd_val = tmr_idx << SEL_CPK_SRC; 451 + 452 + switch (cmd) { 453 + case INIT_TIME: 454 + cmd_val |= GLTSYN_CMD_INIT_TIME; 455 + break; 456 + case INIT_INCVAL: 457 + cmd_val |= GLTSYN_CMD_INIT_INCVAL; 458 + break; 459 + case ADJ_TIME: 460 + cmd_val |= GLTSYN_CMD_ADJ_TIME; 461 + break; 462 + case ADJ_TIME_AT_TIME: 463 + cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME; 464 + break; 465 + case READ_TIME: 466 + cmd_val |= GLTSYN_CMD_READ_TIME; 467 + break; 468 + } 469 + 470 + wr32(hw, GLTSYN_CMD, cmd_val); 471 + } 472 + 473 + /** 474 + * ice_ptp_tmr_cmd - Prepare and trigger a timer sync command 475 + * @hw: pointer to HW struct 476 + * @cmd: the command to issue 477 + * 478 + * Prepare the source timer and PHY timers and then trigger the requested 479 + * command. This causes the shadow registers previously written in preparation 480 + * for the command to be synchronously applied to both the source and PHY 481 + * timers. 482 + */ 483 + static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) 484 + { 485 + int status; 486 + 487 + /* First, prepare the source timer */ 488 + ice_ptp_src_cmd(hw, cmd); 489 + 490 + /* Next, prepare the ports */ 491 + status = ice_ptp_port_cmd_e810(hw, cmd); 492 + if (status) { 493 + ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, status %d\n", 494 + cmd, status); 495 + return status; 496 + } 497 + 498 + /* Write the sync command register to drive both source and PHY timer commands 499 + * synchronously 500 + */ 501 + wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD); 502 + 503 + return 0; 504 + } 505 + 506 + /** 507 + * ice_ptp_init_time - Initialize device time to provided value 508 + * @hw: pointer to HW struct 509 + * @time: 64bits of time (GLTSYN_TIME_L and GLTSYN_TIME_H) 510 + * 511 + * Initialize the device to the specified time provided. This requires a three 512 + * step process: 513 + * 514 + * 1) write the new init time to the source timer shadow registers 515 + * 2) write the new init time to the PHY timer shadow registers 516 + * 3) issue an init_time timer command to synchronously switch both the source 517 + * and port timers to the new init time value at the next clock cycle. 518 + */ 519 + int ice_ptp_init_time(struct ice_hw *hw, u64 time) 520 + { 521 + int status; 522 + u8 tmr_idx; 523 + 524 + tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 525 + 526 + /* Source timers */ 527 + wr32(hw, GLTSYN_SHTIME_L(tmr_idx), lower_32_bits(time)); 528 + wr32(hw, GLTSYN_SHTIME_H(tmr_idx), upper_32_bits(time)); 529 + wr32(hw, GLTSYN_SHTIME_0(tmr_idx), 0); 530 + 531 + /* PHY timers */ 532 + /* Fill Rx and Tx ports and send msg to PHY */ 533 + status = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF); 534 + if (status) 535 + return status; 536 + 537 + return ice_ptp_tmr_cmd(hw, INIT_TIME); 538 + } 539 + 540 + /** 541 + * ice_ptp_write_incval - Program PHC with new increment value 542 + * @hw: pointer to HW struct 543 + * @incval: Source timer increment value per clock cycle 544 + * 545 + * Program the PHC with a new increment value. This requires a three-step 546 + * process: 547 + * 548 + * 1) Write the increment value to the source timer shadow registers 549 + * 2) Write the increment value to the PHY timer shadow registers 550 + * 3) Issue an INIT_INCVAL timer command to synchronously switch both the 551 + * source and port timers to the new increment value at the next clock 552 + * cycle. 553 + */ 554 + int ice_ptp_write_incval(struct ice_hw *hw, u64 incval) 555 + { 556 + int status; 557 + u8 tmr_idx; 558 + 559 + tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 560 + 561 + /* Shadow Adjust */ 562 + wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval)); 563 + wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval)); 564 + 565 + status = ice_ptp_prep_phy_incval_e810(hw, incval); 566 + if (status) 567 + return status; 568 + 569 + return ice_ptp_tmr_cmd(hw, INIT_INCVAL); 570 + } 571 + 572 + /** 573 + * ice_ptp_write_incval_locked - Program new incval while holding semaphore 574 + * @hw: pointer to HW struct 575 + * @incval: Source timer increment value per clock cycle 576 + * 577 + * Program a new PHC incval while holding the PTP semaphore. 578 + */ 579 + int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval) 580 + { 581 + int status; 582 + 583 + if (!ice_ptp_lock(hw)) 584 + return -EBUSY; 585 + 586 + status = ice_ptp_write_incval(hw, incval); 587 + 588 + ice_ptp_unlock(hw); 589 + 590 + return status; 591 + } 592 + 593 + /** 594 + * ice_ptp_adj_clock - Adjust PHC clock time atomically 595 + * @hw: pointer to HW struct 596 + * @adj: Adjustment in nanoseconds 597 + * 598 + * Perform an atomic adjustment of the PHC time by the specified number of 599 + * nanoseconds. This requires a three-step process: 600 + * 601 + * 1) Write the adjustment to the source timer shadow registers 602 + * 2) Write the adjustment to the PHY timer shadow registers 603 + * 3) Issue an ADJ_TIME timer command to synchronously apply the adjustment to 604 + * both the source and port timers at the next clock cycle. 605 + */ 606 + int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj) 607 + { 608 + int status; 609 + u8 tmr_idx; 610 + 611 + tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 612 + 613 + /* Write the desired clock adjustment into the GLTSYN_SHADJ register. 614 + * For an ADJ_TIME command, this set of registers represents the value 615 + * to add to the clock time. It supports subtraction by interpreting 616 + * the value as a 2's complement integer. 617 + */ 618 + wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0); 619 + wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj); 620 + 621 + status = ice_ptp_prep_phy_adj_e810(hw, adj); 622 + if (status) 623 + return status; 624 + 625 + return ice_ptp_tmr_cmd(hw, ADJ_TIME); 626 + } 627 + 628 + /** 629 + * ice_read_phy_tstamp - Read a PHY timestamp from the timestamo block 630 + * @hw: pointer to the HW struct 631 + * @block: the block to read from 632 + * @idx: the timestamp index to read 633 + * @tstamp: on return, the 40bit timestamp value 634 + * 635 + * Read a 40bit timestamp value out of the timestamp block. 636 + */ 637 + int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp) 638 + { 639 + return ice_read_phy_tstamp_e810(hw, block, idx, tstamp); 640 + } 641 + 642 + /** 643 + * ice_clear_phy_tstamp - Clear a timestamp from the timestamp block 644 + * @hw: pointer to the HW struct 645 + * @block: the block to read from 646 + * @idx: the timestamp index to reset 647 + * 648 + * Clear a timestamp, resetting its valid bit, from the timestamp block. 649 + */ 650 + int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx) 651 + { 652 + return ice_clear_phy_tstamp_e810(hw, block, idx); 653 + }
+79
drivers/net/ethernet/intel/ice/ice_ptp_hw.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (C) 2021, Intel Corporation. */ 3 + 4 + #ifndef _ICE_PTP_HW_H_ 5 + #define _ICE_PTP_HW_H_ 6 + 7 + enum ice_ptp_tmr_cmd { 8 + INIT_TIME, 9 + INIT_INCVAL, 10 + ADJ_TIME, 11 + ADJ_TIME_AT_TIME, 12 + READ_TIME 13 + }; 14 + 15 + /* Increment value to generate nanoseconds in the GLTSYN_TIME_L register for 16 + * the E810 devices. Based off of a PLL with an 812.5 MHz frequency. 17 + */ 18 + #define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL 19 + 20 + /* Device agnostic functions */ 21 + u8 ice_get_ptp_src_clock_index(struct ice_hw *hw); 22 + bool ice_ptp_lock(struct ice_hw *hw); 23 + void ice_ptp_unlock(struct ice_hw *hw); 24 + int ice_ptp_init_time(struct ice_hw *hw, u64 time); 25 + int ice_ptp_write_incval(struct ice_hw *hw, u64 incval); 26 + int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval); 27 + int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj); 28 + int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp); 29 + int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx); 30 + 31 + /* E810 family functions */ 32 + int ice_ptp_init_phy_e810(struct ice_hw *hw); 33 + 34 + #define PFTSYN_SEM_BYTES 4 35 + 36 + /* PHY timer commands */ 37 + #define SEL_CPK_SRC 8 38 + 39 + /* Time Sync command Definitions */ 40 + #define GLTSYN_CMD_INIT_TIME BIT(0) 41 + #define GLTSYN_CMD_INIT_INCVAL BIT(1) 42 + #define GLTSYN_CMD_ADJ_TIME BIT(2) 43 + #define GLTSYN_CMD_ADJ_INIT_TIME (BIT(2) | BIT(3)) 44 + #define GLTSYN_CMD_READ_TIME BIT(7) 45 + 46 + #define TS_CMD_MASK_E810 0xFF 47 + #define SYNC_EXEC_CMD 0x3 48 + 49 + /* E810 timesync enable register */ 50 + #define ETH_GLTSYN_ENA(_i) (0x03000348 + ((_i) * 4)) 51 + 52 + /* E810 shadow init time registers */ 53 + #define ETH_GLTSYN_SHTIME_0(i) (0x03000368 + ((i) * 32)) 54 + #define ETH_GLTSYN_SHTIME_L(i) (0x0300036C + ((i) * 32)) 55 + 56 + /* E810 shadow time adjust registers */ 57 + #define ETH_GLTSYN_SHADJ_L(_i) (0x03000378 + ((_i) * 32)) 58 + #define ETH_GLTSYN_SHADJ_H(_i) (0x0300037C + ((_i) * 32)) 59 + 60 + /* E810 timer command register */ 61 + #define ETH_GLTSYN_CMD 0x03000344 62 + 63 + /* Source timer incval macros */ 64 + #define INCVAL_HIGH_M 0xFF 65 + 66 + /* Timestamp block macros */ 67 + #define TS_LOW_M 0xFFFFFFFF 68 + #define TS_HIGH_S 32 69 + 70 + #define BYTES_PER_IDX_ADDR_L_U 8 71 + 72 + /* External PHY timestamp address */ 73 + #define TS_EXT(a, port, idx) ((a) + (0x1000 * (port)) + \ 74 + ((idx) * BYTES_PER_IDX_ADDR_L_U)) 75 + 76 + #define LOW_TX_MEMORY_BANK_START 0x03090000 77 + #define HIGH_TX_MEMORY_BANK_START 0x03090004 78 + 79 + #endif /* _ICE_PTP_HW_H_ */
+92
drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (C) 2021, Intel Corporation. */ 3 + 4 + #ifndef _ICE_SBQ_CMD_H_ 5 + #define _ICE_SBQ_CMD_H_ 6 + 7 + /* This header file defines the Sideband Queue commands, error codes and 8 + * descriptor format. It is shared between Firmware and Software. 9 + */ 10 + 11 + /* Sideband Queue command structure and opcodes */ 12 + enum ice_sbq_opc { 13 + /* Sideband Queue commands */ 14 + ice_sbq_opc_neigh_dev_req = 0x0C00, 15 + ice_sbq_opc_neigh_dev_ev = 0x0C01 16 + }; 17 + 18 + /* Sideband Queue descriptor. Indirect command 19 + * and non posted 20 + */ 21 + struct ice_sbq_cmd_desc { 22 + __le16 flags; 23 + __le16 opcode; 24 + __le16 datalen; 25 + __le16 cmd_retval; 26 + 27 + /* Opaque message data */ 28 + __le32 cookie_high; 29 + __le32 cookie_low; 30 + 31 + union { 32 + __le16 cmd_len; 33 + __le16 cmpl_len; 34 + } param0; 35 + 36 + u8 reserved[6]; 37 + __le32 addr_high; 38 + __le32 addr_low; 39 + }; 40 + 41 + struct ice_sbq_evt_desc { 42 + __le16 flags; 43 + __le16 opcode; 44 + __le16 datalen; 45 + __le16 cmd_retval; 46 + u8 data[24]; 47 + }; 48 + 49 + enum ice_sbq_msg_dev { 50 + rmn_0 = 0x02, 51 + rmn_1 = 0x03, 52 + rmn_2 = 0x04, 53 + cgu = 0x06 54 + }; 55 + 56 + enum ice_sbq_msg_opcode { 57 + ice_sbq_msg_rd = 0x00, 58 + ice_sbq_msg_wr = 0x01 59 + }; 60 + 61 + #define ICE_SBQ_MSG_FLAGS 0x40 62 + #define ICE_SBQ_MSG_SBE_FBE 0x0F 63 + 64 + struct ice_sbq_msg_req { 65 + u8 dest_dev; 66 + u8 src_dev; 67 + u8 opcode; 68 + u8 flags; 69 + u8 sbe_fbe; 70 + u8 func_id; 71 + __le16 msg_addr_low; 72 + __le32 msg_addr_high; 73 + __le32 data; 74 + }; 75 + 76 + struct ice_sbq_msg_cmpl { 77 + u8 dest_dev; 78 + u8 src_dev; 79 + u8 opcode; 80 + u8 flags; 81 + __le32 data; 82 + }; 83 + 84 + /* Internal struct */ 85 + struct ice_sbq_msg_input { 86 + u8 dest_dev; 87 + u8 opcode; 88 + u16 msg_addr_low; 89 + u32 msg_addr_high; 90 + u32 data; 91 + }; 92 + #endif /* _ICE_SBQ_CMD_H_ */
+37
drivers/net/ethernet/intel/ice/ice_txrx.c
··· 2137 2137 } 2138 2138 2139 2139 /** 2140 + * ice_tstamp - set up context descriptor for hardware timestamp 2141 + * @tx_ring: pointer to the Tx ring to send buffer on 2142 + * @skb: pointer to the SKB we're sending 2143 + * @first: Tx buffer 2144 + * @off: Tx offload parameters 2145 + */ 2146 + static void 2147 + ice_tstamp(struct ice_ring *tx_ring, struct sk_buff *skb, 2148 + struct ice_tx_buf *first, struct ice_tx_offload_params *off) 2149 + { 2150 + s8 idx; 2151 + 2152 + /* only timestamp the outbound packet if the user has requested it */ 2153 + if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) 2154 + return; 2155 + 2156 + if (!tx_ring->ptp_tx) 2157 + return; 2158 + 2159 + /* Tx timestamps cannot be sampled when doing TSO */ 2160 + if (first->tx_flags & ICE_TX_FLAGS_TSO) 2161 + return; 2162 + 2163 + /* Grab an open timestamp slot */ 2164 + idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb); 2165 + if (idx < 0) 2166 + return; 2167 + 2168 + off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2169 + (ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) | 2170 + ((u64)idx << ICE_TXD_CTX_QW1_TSO_LEN_S)); 2171 + first->tx_flags |= ICE_TX_FLAGS_TSYN; 2172 + } 2173 + 2174 + /** 2140 2175 * ice_xmit_frame_ring - Sends buffer on Tx ring 2141 2176 * @skb: send buffer 2142 2177 * @tx_ring: ring to send buffer on ··· 2239 2204 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 2240 2205 ICE_TX_CTX_DESC_SWTCH_UPLINK << 2241 2206 ICE_TXD_CTX_QW1_CMD_S); 2207 + 2208 + ice_tstamp(tx_ring, skb, first, &offload); 2242 2209 2243 2210 if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { 2244 2211 struct ice_tx_ctx_desc *cdesc;
+5
drivers/net/ethernet/intel/ice/ice_txrx.h
··· 118 118 * freed instead of returned like skb packets. 119 119 */ 120 120 #define ICE_TX_FLAGS_DUMMY_PKT BIT(3) 121 + #define ICE_TX_FLAGS_TSYN BIT(4) 121 122 #define ICE_TX_FLAGS_IPV4 BIT(5) 122 123 #define ICE_TX_FLAGS_IPV6 BIT(6) 123 124 #define ICE_TX_FLAGS_TUNNEL BIT(7) ··· 312 311 u32 txq_teid; /* Added Tx queue TEID */ 313 312 u16 rx_buf_len; 314 313 u8 dcb_tc; /* Traffic class of ring */ 314 + struct ice_ptp_tx *tx_tstamps; 315 + u64 cached_phctime; 316 + u8 ptp_rx:1; 317 + u8 ptp_tx:1; 315 318 } ____cacheline_internodealigned_in_smp; 316 319 317 320 static inline bool ice_ring_uses_build_skb(struct ice_ring *ring)
+3
drivers/net/ethernet/intel/ice/ice_txrx_lib.c
··· 175 175 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 176 176 177 177 ice_rx_csum(rx_ring, skb, rx_desc, ptype); 178 + 179 + if (rx_ring->ptp_rx) 180 + ice_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); 178 181 } 179 182 180 183 /**
+62
drivers/net/ethernet/intel/ice/ice_type.h
··· 14 14 #include "ice_lan_tx_rx.h" 15 15 #include "ice_flex_type.h" 16 16 #include "ice_protocol_type.h" 17 + #include "ice_sbq_cmd.h" 17 18 18 19 static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc) 19 20 { ··· 49 48 #define ICE_DBG_RDMA BIT_ULL(15) 50 49 #define ICE_DBG_PKG BIT_ULL(16) 51 50 #define ICE_DBG_RES BIT_ULL(17) 51 + #define ICE_DBG_PTP BIT_ULL(19) 52 52 #define ICE_DBG_AQ_MSG BIT_ULL(24) 53 53 #define ICE_DBG_AQ_DESC BIT_ULL(25) 54 54 #define ICE_DBG_AQ_DESC_BUF BIT_ULL(26) ··· 266 264 u8 rss_table_entry_width; /* RSS Entry width in bits */ 267 265 268 266 u8 dcb; 267 + u8 ieee_1588; 269 268 u8 rdma; 270 269 271 270 bool nvm_update_pending_nvm; ··· 279 276 #define ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3) 280 277 }; 281 278 279 + /* IEEE 1588 TIME_SYNC specific info */ 280 + /* Function specific definitions */ 281 + #define ICE_TS_FUNC_ENA_M BIT(0) 282 + #define ICE_TS_SRC_TMR_OWND_M BIT(1) 283 + #define ICE_TS_TMR_ENA_M BIT(2) 284 + #define ICE_TS_TMR_IDX_OWND_S 4 285 + #define ICE_TS_TMR_IDX_OWND_M BIT(4) 286 + #define ICE_TS_CLK_FREQ_S 16 287 + #define ICE_TS_CLK_FREQ_M ICE_M(0x7, ICE_TS_CLK_FREQ_S) 288 + #define ICE_TS_CLK_SRC_S 20 289 + #define ICE_TS_CLK_SRC_M BIT(20) 290 + #define ICE_TS_TMR_IDX_ASSOC_S 24 291 + #define ICE_TS_TMR_IDX_ASSOC_M BIT(24) 292 + 293 + struct ice_ts_func_info { 294 + /* Function specific info */ 295 + u32 clk_freq; 296 + u8 clk_src; 297 + u8 tmr_index_assoc; 298 + u8 ena; 299 + u8 tmr_index_owned; 300 + u8 src_tmr_owned; 301 + u8 tmr_ena; 302 + }; 303 + 304 + /* Device specific definitions */ 305 + #define ICE_TS_TMR0_OWNR_M 0x7 306 + #define ICE_TS_TMR0_OWND_M BIT(3) 307 + #define ICE_TS_TMR1_OWNR_S 4 308 + #define ICE_TS_TMR1_OWNR_M ICE_M(0x7, ICE_TS_TMR1_OWNR_S) 309 + #define ICE_TS_TMR1_OWND_M BIT(7) 310 + #define ICE_TS_DEV_ENA_M BIT(24) 311 + #define ICE_TS_TMR0_ENA_M BIT(25) 312 + #define ICE_TS_TMR1_ENA_M BIT(26) 313 + 314 + struct ice_ts_dev_info { 315 + /* Device specific info */ 316 + u32 ena_ports; 317 + u32 tmr_own_map; 318 + u32 tmr0_owner; 319 + u32 tmr1_owner; 320 + u8 tmr0_owned; 321 + u8 tmr1_owned; 322 + u8 ena; 323 + u8 tmr0_ena; 324 + u8 tmr1_ena; 325 + }; 326 + 282 327 /* Function specific capabilities */ 283 328 struct ice_hw_func_caps { 284 329 struct ice_hw_common_caps common_cap; ··· 335 284 u32 guar_num_vsi; 336 285 u32 fd_fltr_guar; /* Number of filters guaranteed */ 337 286 u32 fd_fltr_best_effort; /* Number of best effort filters */ 287 + struct ice_ts_func_info ts_func_info; 338 288 }; 339 289 340 290 /* Device wide capabilities */ ··· 344 292 u32 num_vfs_exposed; /* Total number of VFs exposed */ 345 293 u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */ 346 294 u32 num_flow_director_fltr; /* Number of FD filters available */ 295 + struct ice_ts_dev_info ts_dev_info; 347 296 u32 num_funcs; 348 297 }; 349 298 ··· 807 754 808 755 /* Control Queue info */ 809 756 struct ice_ctl_q_info adminq; 757 + struct ice_ctl_q_info sbq; 810 758 struct ice_ctl_q_info mailboxq; 811 759 812 760 u8 api_branch; /* API branch version */ ··· 842 788 u8 intrl_gran; 843 789 844 790 u8 ucast_shared; /* true if VSIs can share unicast addr */ 791 + 792 + #define ICE_PHY_PER_NAC 1 793 + #define ICE_MAX_QUAD 2 794 + #define ICE_NUM_QUAD_TYPE 2 795 + #define ICE_PORTS_PER_QUAD 4 796 + #define ICE_PHY_0_LAST_QUAD 1 797 + #define ICE_PORTS_PER_PHY 8 798 + #define ICE_NUM_EXTERNAL_PORTS ICE_PORTS_PER_PHY 845 799 846 800 /* Active package version (currently active) */ 847 801 struct ice_pkg_ver active_pkg_ver;
+12
include/linux/kernel.h
··· 71 71 */ 72 72 #define lower_32_bits(n) ((u32)((n) & 0xffffffff)) 73 73 74 + /** 75 + * upper_16_bits - return bits 16-31 of a number 76 + * @n: the number we're accessing 77 + */ 78 + #define upper_16_bits(n) ((u16)((n) >> 16)) 79 + 80 + /** 81 + * lower_16_bits - return bits 0-15 of a number 82 + * @n: the number we're accessing 83 + */ 84 + #define lower_16_bits(n) ((u16)((n) & 0xffff)) 85 + 74 86 struct completion; 75 87 struct pt_regs; 76 88 struct user;