Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2021, Intel Corporation. */
3
4#include "ice.h"
5#include "ice_lib.h"
6
7#define E810_OUT_PROP_DELAY_NS 1
8
9#define UNKNOWN_INCVAL_E822 0x100000000ULL
10
11static const struct ptp_pin_desc ice_pin_desc_e810t[] = {
12 /* name idx func chan */
13 { "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } },
14 { "SMA1", SMA1, PTP_PF_NONE, 1, { 0, } },
15 { "U.FL1", UFL1, PTP_PF_NONE, 1, { 0, } },
16 { "SMA2", SMA2, PTP_PF_NONE, 2, { 0, } },
17 { "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } },
18};
19
20/**
21 * ice_get_sma_config_e810t
22 * @hw: pointer to the hw struct
23 * @ptp_pins: pointer to the ptp_pin_desc struture
24 *
25 * Read the configuration of the SMA control logic and put it into the
26 * ptp_pin_desc structure
27 */
28static int
29ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins)
30{
31 u8 data, i;
32 int status;
33
34 /* Read initial pin state */
35 status = ice_read_sma_ctrl_e810t(hw, &data);
36 if (status)
37 return status;
38
39 /* initialize with defaults */
40 for (i = 0; i < NUM_PTP_PINS_E810T; i++) {
41 snprintf(ptp_pins[i].name, sizeof(ptp_pins[i].name),
42 "%s", ice_pin_desc_e810t[i].name);
43 ptp_pins[i].index = ice_pin_desc_e810t[i].index;
44 ptp_pins[i].func = ice_pin_desc_e810t[i].func;
45 ptp_pins[i].chan = ice_pin_desc_e810t[i].chan;
46 }
47
48 /* Parse SMA1/UFL1 */
49 switch (data & ICE_SMA1_MASK_E810T) {
50 case ICE_SMA1_MASK_E810T:
51 default:
52 ptp_pins[SMA1].func = PTP_PF_NONE;
53 ptp_pins[UFL1].func = PTP_PF_NONE;
54 break;
55 case ICE_SMA1_DIR_EN_E810T:
56 ptp_pins[SMA1].func = PTP_PF_PEROUT;
57 ptp_pins[UFL1].func = PTP_PF_NONE;
58 break;
59 case ICE_SMA1_TX_EN_E810T:
60 ptp_pins[SMA1].func = PTP_PF_EXTTS;
61 ptp_pins[UFL1].func = PTP_PF_NONE;
62 break;
63 case 0:
64 ptp_pins[SMA1].func = PTP_PF_EXTTS;
65 ptp_pins[UFL1].func = PTP_PF_PEROUT;
66 break;
67 }
68
69 /* Parse SMA2/UFL2 */
70 switch (data & ICE_SMA2_MASK_E810T) {
71 case ICE_SMA2_MASK_E810T:
72 default:
73 ptp_pins[SMA2].func = PTP_PF_NONE;
74 ptp_pins[UFL2].func = PTP_PF_NONE;
75 break;
76 case (ICE_SMA2_TX_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
77 ptp_pins[SMA2].func = PTP_PF_EXTTS;
78 ptp_pins[UFL2].func = PTP_PF_NONE;
79 break;
80 case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
81 ptp_pins[SMA2].func = PTP_PF_PEROUT;
82 ptp_pins[UFL2].func = PTP_PF_NONE;
83 break;
84 case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T):
85 ptp_pins[SMA2].func = PTP_PF_NONE;
86 ptp_pins[UFL2].func = PTP_PF_EXTTS;
87 break;
88 case ICE_SMA2_DIR_EN_E810T:
89 ptp_pins[SMA2].func = PTP_PF_PEROUT;
90 ptp_pins[UFL2].func = PTP_PF_EXTTS;
91 break;
92 }
93
94 return 0;
95}
96
97/**
98 * ice_ptp_set_sma_config_e810t
99 * @hw: pointer to the hw struct
100 * @ptp_pins: pointer to the ptp_pin_desc struture
101 *
102 * Set the configuration of the SMA control logic based on the configuration in
103 * num_pins parameter
104 */
105static int
106ice_ptp_set_sma_config_e810t(struct ice_hw *hw,
107 const struct ptp_pin_desc *ptp_pins)
108{
109 int status;
110 u8 data;
111
112 /* SMA1 and UFL1 cannot be set to TX at the same time */
113 if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
114 ptp_pins[UFL1].func == PTP_PF_PEROUT)
115 return -EINVAL;
116
117 /* SMA2 and UFL2 cannot be set to RX at the same time */
118 if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
119 ptp_pins[UFL2].func == PTP_PF_EXTTS)
120 return -EINVAL;
121
122 /* Read initial pin state value */
123 status = ice_read_sma_ctrl_e810t(hw, &data);
124 if (status)
125 return status;
126
127 /* Set the right sate based on the desired configuration */
128 data &= ~ICE_SMA1_MASK_E810T;
129 if (ptp_pins[SMA1].func == PTP_PF_NONE &&
130 ptp_pins[UFL1].func == PTP_PF_NONE) {
131 dev_info(ice_hw_to_dev(hw), "SMA1 + U.FL1 disabled");
132 data |= ICE_SMA1_MASK_E810T;
133 } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
134 ptp_pins[UFL1].func == PTP_PF_NONE) {
135 dev_info(ice_hw_to_dev(hw), "SMA1 RX");
136 data |= ICE_SMA1_TX_EN_E810T;
137 } else if (ptp_pins[SMA1].func == PTP_PF_NONE &&
138 ptp_pins[UFL1].func == PTP_PF_PEROUT) {
139 /* U.FL 1 TX will always enable SMA 1 RX */
140 dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
141 } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
142 ptp_pins[UFL1].func == PTP_PF_PEROUT) {
143 dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
144 } else if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
145 ptp_pins[UFL1].func == PTP_PF_NONE) {
146 dev_info(ice_hw_to_dev(hw), "SMA1 TX");
147 data |= ICE_SMA1_DIR_EN_E810T;
148 }
149
150 data &= ~ICE_SMA2_MASK_E810T;
151 if (ptp_pins[SMA2].func == PTP_PF_NONE &&
152 ptp_pins[UFL2].func == PTP_PF_NONE) {
153 dev_info(ice_hw_to_dev(hw), "SMA2 + U.FL2 disabled");
154 data |= ICE_SMA2_MASK_E810T;
155 } else if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
156 ptp_pins[UFL2].func == PTP_PF_NONE) {
157 dev_info(ice_hw_to_dev(hw), "SMA2 RX");
158 data |= (ICE_SMA2_TX_EN_E810T |
159 ICE_SMA2_UFL2_RX_DIS_E810T);
160 } else if (ptp_pins[SMA2].func == PTP_PF_NONE &&
161 ptp_pins[UFL2].func == PTP_PF_EXTTS) {
162 dev_info(ice_hw_to_dev(hw), "UFL2 RX");
163 data |= (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T);
164 } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
165 ptp_pins[UFL2].func == PTP_PF_NONE) {
166 dev_info(ice_hw_to_dev(hw), "SMA2 TX");
167 data |= (ICE_SMA2_DIR_EN_E810T |
168 ICE_SMA2_UFL2_RX_DIS_E810T);
169 } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
170 ptp_pins[UFL2].func == PTP_PF_EXTTS) {
171 dev_info(ice_hw_to_dev(hw), "SMA2 TX + U.FL2 RX");
172 data |= ICE_SMA2_DIR_EN_E810T;
173 }
174
175 return ice_write_sma_ctrl_e810t(hw, data);
176}
177
178/**
179 * ice_ptp_set_sma_e810t
180 * @info: the driver's PTP info structure
181 * @pin: pin index in kernel structure
182 * @func: Pin function to be set (PTP_PF_NONE, PTP_PF_EXTTS or PTP_PF_PEROUT)
183 *
184 * Set the configuration of a single SMA pin
185 */
186static int
187ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin,
188 enum ptp_pin_function func)
189{
190 struct ptp_pin_desc ptp_pins[NUM_PTP_PINS_E810T];
191 struct ice_pf *pf = ptp_info_to_pf(info);
192 struct ice_hw *hw = &pf->hw;
193 int err;
194
195 if (pin < SMA1 || func > PTP_PF_PEROUT)
196 return -EOPNOTSUPP;
197
198 err = ice_get_sma_config_e810t(hw, ptp_pins);
199 if (err)
200 return err;
201
202 /* Disable the same function on the other pin sharing the channel */
203 if (pin == SMA1 && ptp_pins[UFL1].func == func)
204 ptp_pins[UFL1].func = PTP_PF_NONE;
205 if (pin == UFL1 && ptp_pins[SMA1].func == func)
206 ptp_pins[SMA1].func = PTP_PF_NONE;
207
208 if (pin == SMA2 && ptp_pins[UFL2].func == func)
209 ptp_pins[UFL2].func = PTP_PF_NONE;
210 if (pin == UFL2 && ptp_pins[SMA2].func == func)
211 ptp_pins[SMA2].func = PTP_PF_NONE;
212
213 /* Set up new pin function in the temp table */
214 ptp_pins[pin].func = func;
215
216 return ice_ptp_set_sma_config_e810t(hw, ptp_pins);
217}
218
219/**
220 * ice_verify_pin_e810t
221 * @info: the driver's PTP info structure
222 * @pin: Pin index
223 * @func: Assigned function
224 * @chan: Assigned channel
225 *
226 * Verify if pin supports requested pin function. If the Check pins consistency.
227 * Reconfigure the SMA logic attached to the given pin to enable its
228 * desired functionality
229 */
230static int
231ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin,
232 enum ptp_pin_function func, unsigned int chan)
233{
234 /* Don't allow channel reassignment */
235 if (chan != ice_pin_desc_e810t[pin].chan)
236 return -EOPNOTSUPP;
237
238 /* Check if functions are properly assigned */
239 switch (func) {
240 case PTP_PF_NONE:
241 break;
242 case PTP_PF_EXTTS:
243 if (pin == UFL1)
244 return -EOPNOTSUPP;
245 break;
246 case PTP_PF_PEROUT:
247 if (pin == UFL2 || pin == GNSS)
248 return -EOPNOTSUPP;
249 break;
250 case PTP_PF_PHYSYNC:
251 return -EOPNOTSUPP;
252 }
253
254 return ice_ptp_set_sma_e810t(info, pin, func);
255}
256
257/**
258 * ice_set_tx_tstamp - Enable or disable Tx timestamping
259 * @pf: The PF pointer to search in
260 * @on: bool value for whether timestamps are enabled or disabled
261 */
262static void ice_set_tx_tstamp(struct ice_pf *pf, bool on)
263{
264 struct ice_vsi *vsi;
265 u32 val;
266 u16 i;
267
268 vsi = ice_get_main_vsi(pf);
269 if (!vsi)
270 return;
271
272 /* Set the timestamp enable flag for all the Tx rings */
273 ice_for_each_txq(vsi, i) {
274 if (!vsi->tx_rings[i])
275 continue;
276 vsi->tx_rings[i]->ptp_tx = on;
277 }
278
279 /* Configure the Tx timestamp interrupt */
280 val = rd32(&pf->hw, PFINT_OICR_ENA);
281 if (on)
282 val |= PFINT_OICR_TSYN_TX_M;
283 else
284 val &= ~PFINT_OICR_TSYN_TX_M;
285 wr32(&pf->hw, PFINT_OICR_ENA, val);
286
287 pf->ptp.tstamp_config.tx_type = on ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
288}
289
290/**
291 * ice_set_rx_tstamp - Enable or disable Rx timestamping
292 * @pf: The PF pointer to search in
293 * @on: bool value for whether timestamps are enabled or disabled
294 */
295static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
296{
297 struct ice_vsi *vsi;
298 u16 i;
299
300 vsi = ice_get_main_vsi(pf);
301 if (!vsi)
302 return;
303
304 /* Set the timestamp flag for all the Rx rings */
305 ice_for_each_rxq(vsi, i) {
306 if (!vsi->rx_rings[i])
307 continue;
308 vsi->rx_rings[i]->ptp_rx = on;
309 }
310
311 pf->ptp.tstamp_config.rx_filter = on ? HWTSTAMP_FILTER_ALL :
312 HWTSTAMP_FILTER_NONE;
313}
314
315/**
316 * ice_ptp_cfg_timestamp - Configure timestamp for init/deinit
317 * @pf: Board private structure
318 * @ena: bool value to enable or disable time stamp
319 *
320 * This function will configure timestamping during PTP initialization
321 * and deinitialization
322 */
323void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena)
324{
325 ice_set_tx_tstamp(pf, ena);
326 ice_set_rx_tstamp(pf, ena);
327}
328
329/**
330 * ice_get_ptp_clock_index - Get the PTP clock index
331 * @pf: the PF pointer
332 *
333 * Determine the clock index of the PTP clock associated with this device. If
334 * this is the PF controlling the clock, just use the local access to the
335 * clock device pointer.
336 *
337 * Otherwise, read from the driver shared parameters to determine the clock
338 * index value.
339 *
340 * Returns: the index of the PTP clock associated with this device, or -1 if
341 * there is no associated clock.
342 */
343int ice_get_ptp_clock_index(struct ice_pf *pf)
344{
345 struct device *dev = ice_pf_to_dev(pf);
346 enum ice_aqc_driver_params param_idx;
347 struct ice_hw *hw = &pf->hw;
348 u8 tmr_idx;
349 u32 value;
350 int err;
351
352 /* Use the ptp_clock structure if we're the main PF */
353 if (pf->ptp.clock)
354 return ptp_clock_index(pf->ptp.clock);
355
356 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
357 if (!tmr_idx)
358 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0;
359 else
360 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1;
361
362 err = ice_aq_get_driver_param(hw, param_idx, &value, NULL);
363 if (err) {
364 dev_err(dev, "Failed to read PTP clock index parameter, err %d aq_err %s\n",
365 err, ice_aq_str(hw->adminq.sq_last_status));
366 return -1;
367 }
368
369 /* The PTP clock index is an integer, and will be between 0 and
370 * INT_MAX. The highest bit of the driver shared parameter is used to
371 * indicate whether or not the currently stored clock index is valid.
372 */
373 if (!(value & PTP_SHARED_CLK_IDX_VALID))
374 return -1;
375
376 return value & ~PTP_SHARED_CLK_IDX_VALID;
377}
378
379/**
380 * ice_set_ptp_clock_index - Set the PTP clock index
381 * @pf: the PF pointer
382 *
383 * Set the PTP clock index for this device into the shared driver parameters,
384 * so that other PFs associated with this device can read it.
385 *
386 * If the PF is unable to store the clock index, it will log an error, but
387 * will continue operating PTP.
388 */
389static void ice_set_ptp_clock_index(struct ice_pf *pf)
390{
391 struct device *dev = ice_pf_to_dev(pf);
392 enum ice_aqc_driver_params param_idx;
393 struct ice_hw *hw = &pf->hw;
394 u8 tmr_idx;
395 u32 value;
396 int err;
397
398 if (!pf->ptp.clock)
399 return;
400
401 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
402 if (!tmr_idx)
403 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0;
404 else
405 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1;
406
407 value = (u32)ptp_clock_index(pf->ptp.clock);
408 if (value > INT_MAX) {
409 dev_err(dev, "PTP Clock index is too large to store\n");
410 return;
411 }
412 value |= PTP_SHARED_CLK_IDX_VALID;
413
414 err = ice_aq_set_driver_param(hw, param_idx, value, NULL);
415 if (err) {
416 dev_err(dev, "Failed to set PTP clock index parameter, err %d aq_err %s\n",
417 err, ice_aq_str(hw->adminq.sq_last_status));
418 }
419}
420
421/**
422 * ice_clear_ptp_clock_index - Clear the PTP clock index
423 * @pf: the PF pointer
424 *
425 * Clear the PTP clock index for this device. Must be called when
426 * unregistering the PTP clock, in order to ensure other PFs stop reporting
427 * a clock object that no longer exists.
428 */
429static void ice_clear_ptp_clock_index(struct ice_pf *pf)
430{
431 struct device *dev = ice_pf_to_dev(pf);
432 enum ice_aqc_driver_params param_idx;
433 struct ice_hw *hw = &pf->hw;
434 u8 tmr_idx;
435 int err;
436
437 /* Do not clear the index if we don't own the timer */
438 if (!hw->func_caps.ts_func_info.src_tmr_owned)
439 return;
440
441 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
442 if (!tmr_idx)
443 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0;
444 else
445 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1;
446
447 err = ice_aq_set_driver_param(hw, param_idx, 0, NULL);
448 if (err) {
449 dev_dbg(dev, "Failed to clear PTP clock index parameter, err %d aq_err %s\n",
450 err, ice_aq_str(hw->adminq.sq_last_status));
451 }
452}
453
454/**
455 * ice_ptp_read_src_clk_reg - Read the source clock register
456 * @pf: Board private structure
457 * @sts: Optional parameter for holding a pair of system timestamps from
458 * the system clock. Will be ignored if NULL is given.
459 */
460static u64
461ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
462{
463 struct ice_hw *hw = &pf->hw;
464 u32 hi, lo, lo2;
465 u8 tmr_idx;
466
467 tmr_idx = ice_get_ptp_src_clock_index(hw);
468 /* Read the system timestamp pre PHC read */
469 ptp_read_system_prets(sts);
470
471 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
472
473 /* Read the system timestamp post PHC read */
474 ptp_read_system_postts(sts);
475
476 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
477 lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx));
478
479 if (lo2 < lo) {
480 /* if TIME_L rolled over read TIME_L again and update
481 * system timestamps
482 */
483 ptp_read_system_prets(sts);
484 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
485 ptp_read_system_postts(sts);
486 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
487 }
488
489 return ((u64)hi << 32) | lo;
490}
491
492/**
493 * ice_ptp_update_cached_phctime - Update the cached PHC time values
494 * @pf: Board specific private structure
495 *
496 * This function updates the system time values which are cached in the PF
497 * structure and the Rx rings.
498 *
499 * This function must be called periodically to ensure that the cached value
500 * is never more than 2 seconds old. It must also be called whenever the PHC
501 * time has been changed.
502 */
503static void ice_ptp_update_cached_phctime(struct ice_pf *pf)
504{
505 u64 systime;
506 int i;
507
508 /* Read the current PHC time */
509 systime = ice_ptp_read_src_clk_reg(pf, NULL);
510
511 /* Update the cached PHC time stored in the PF structure */
512 WRITE_ONCE(pf->ptp.cached_phc_time, systime);
513
514 ice_for_each_vsi(pf, i) {
515 struct ice_vsi *vsi = pf->vsi[i];
516 int j;
517
518 if (!vsi)
519 continue;
520
521 if (vsi->type != ICE_VSI_PF)
522 continue;
523
524 ice_for_each_rxq(vsi, j) {
525 if (!vsi->rx_rings[j])
526 continue;
527 WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
528 }
529 }
530}
531
532/**
533 * ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b
534 * @cached_phc_time: recently cached copy of PHC time
535 * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value
536 *
537 * Hardware captures timestamps which contain only 32 bits of nominal
538 * nanoseconds, as opposed to the 64bit timestamps that the stack expects.
539 * Note that the captured timestamp values may be 40 bits, but the lower
540 * 8 bits are sub-nanoseconds and generally discarded.
541 *
542 * Extend the 32bit nanosecond timestamp using the following algorithm and
543 * assumptions:
544 *
545 * 1) have a recently cached copy of the PHC time
546 * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1
547 * seconds) before or after the PHC time was captured.
548 * 3) calculate the delta between the cached time and the timestamp
549 * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was
550 * captured after the PHC time. In this case, the full timestamp is just
551 * the cached PHC time plus the delta.
552 * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the
553 * timestamp was captured *before* the PHC time, i.e. because the PHC
554 * cache was updated after the timestamp was captured by hardware. In this
555 * case, the full timestamp is the cached time minus the inverse delta.
556 *
557 * This algorithm works even if the PHC time was updated after a Tx timestamp
558 * was requested, but before the Tx timestamp event was reported from
559 * hardware.
560 *
561 * This calculation primarily relies on keeping the cached PHC time up to
562 * date. If the timestamp was captured more than 2^31 nanoseconds after the
563 * PHC time, it is possible that the lower 32bits of PHC time have
564 * overflowed more than once, and we might generate an incorrect timestamp.
565 *
566 * This is prevented by (a) periodically updating the cached PHC time once
567 * a second, and (b) discarding any Tx timestamp packet if it has waited for
568 * a timestamp for more than one second.
569 */
570static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp)
571{
572 u32 delta, phc_time_lo;
573 u64 ns;
574
575 /* Extract the lower 32 bits of the PHC time */
576 phc_time_lo = (u32)cached_phc_time;
577
578 /* Calculate the delta between the lower 32bits of the cached PHC
579 * time and the in_tstamp value
580 */
581 delta = (in_tstamp - phc_time_lo);
582
583 /* Do not assume that the in_tstamp is always more recent than the
584 * cached PHC time. If the delta is large, it indicates that the
585 * in_tstamp was taken in the past, and should be converted
586 * forward.
587 */
588 if (delta > (U32_MAX / 2)) {
589 /* reverse the delta calculation here */
590 delta = (phc_time_lo - in_tstamp);
591 ns = cached_phc_time - delta;
592 } else {
593 ns = cached_phc_time + delta;
594 }
595
596 return ns;
597}
598
599/**
600 * ice_ptp_extend_40b_ts - Convert a 40b timestamp to 64b nanoseconds
601 * @pf: Board private structure
602 * @in_tstamp: Ingress/egress 40b timestamp value
603 *
604 * The Tx and Rx timestamps are 40 bits wide, including 32 bits of nominal
605 * nanoseconds, 7 bits of sub-nanoseconds, and a valid bit.
606 *
607 * *--------------------------------------------------------------*
608 * | 32 bits of nanoseconds | 7 high bits of sub ns underflow | v |
609 * *--------------------------------------------------------------*
610 *
611 * The low bit is an indicator of whether the timestamp is valid. The next
612 * 7 bits are a capture of the upper 7 bits of the sub-nanosecond underflow,
613 * and the remaining 32 bits are the lower 32 bits of the PHC timer.
614 *
615 * It is assumed that the caller verifies the timestamp is valid prior to
616 * calling this function.
617 *
618 * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC
619 * time stored in the device private PTP structure as the basis for timestamp
620 * extension.
621 *
622 * See ice_ptp_extend_32b_ts for a detailed explanation of the extension
623 * algorithm.
624 */
625static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)
626{
627 const u64 mask = GENMASK_ULL(31, 0);
628
629 return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time,
630 (in_tstamp >> 8) & mask);
631}
632
633/**
634 * ice_ptp_read_time - Read the time from the device
635 * @pf: Board private structure
636 * @ts: timespec structure to hold the current time value
637 * @sts: Optional parameter for holding a pair of system timestamps from
638 * the system clock. Will be ignored if NULL is given.
639 *
640 * This function reads the source clock registers and stores them in a timespec.
641 * However, since the registers are 64 bits of nanoseconds, we must convert the
642 * result to a timespec before we can return.
643 */
644static void
645ice_ptp_read_time(struct ice_pf *pf, struct timespec64 *ts,
646 struct ptp_system_timestamp *sts)
647{
648 u64 time_ns = ice_ptp_read_src_clk_reg(pf, sts);
649
650 *ts = ns_to_timespec64(time_ns);
651}
652
653/**
654 * ice_ptp_write_init - Set PHC time to provided value
655 * @pf: Board private structure
656 * @ts: timespec structure that holds the new time value
657 *
658 * Set the PHC time to the specified time provided in the timespec.
659 */
660static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts)
661{
662 u64 ns = timespec64_to_ns(ts);
663 struct ice_hw *hw = &pf->hw;
664
665 return ice_ptp_init_time(hw, ns);
666}
667
668/**
669 * ice_ptp_write_adj - Adjust PHC clock time atomically
670 * @pf: Board private structure
671 * @adj: Adjustment in nanoseconds
672 *
673 * Perform an atomic adjustment of the PHC time by the specified number of
674 * nanoseconds.
675 */
676static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj)
677{
678 struct ice_hw *hw = &pf->hw;
679
680 return ice_ptp_adj_clock(hw, adj);
681}
682
683/**
684 * ice_base_incval - Get base timer increment value
685 * @pf: Board private structure
686 *
687 * Look up the base timer increment value for this device. The base increment
688 * value is used to define the nominal clock tick rate. This increment value
689 * is programmed during device initialization. It is also used as the basis
690 * for calculating adjustments using scaled_ppm.
691 */
692static u64 ice_base_incval(struct ice_pf *pf)
693{
694 struct ice_hw *hw = &pf->hw;
695 u64 incval;
696
697 if (ice_is_e810(hw))
698 incval = ICE_PTP_NOMINAL_INCVAL_E810;
699 else if (ice_e822_time_ref(hw) < NUM_ICE_TIME_REF_FREQ)
700 incval = ice_e822_nominal_incval(ice_e822_time_ref(hw));
701 else
702 incval = UNKNOWN_INCVAL_E822;
703
704 dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n",
705 incval);
706
707 return incval;
708}
709
710/**
711 * ice_ptp_reset_ts_memory_quad - Reset timestamp memory for one quad
712 * @pf: The PF private data structure
713 * @quad: The quad (0-4)
714 */
715static void ice_ptp_reset_ts_memory_quad(struct ice_pf *pf, int quad)
716{
717 struct ice_hw *hw = &pf->hw;
718
719 ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M);
720 ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M);
721}
722
723/**
724 * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state
725 * @port: PTP port for which Tx FIFO is checked
726 */
727static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
728{
729 int quad = port->port_num / ICE_PORTS_PER_QUAD;
730 int offs = port->port_num % ICE_PORTS_PER_QUAD;
731 struct ice_pf *pf;
732 struct ice_hw *hw;
733 u32 val, phy_sts;
734 int err;
735
736 pf = ptp_port_to_pf(port);
737 hw = &pf->hw;
738
739 if (port->tx_fifo_busy_cnt == FIFO_OK)
740 return 0;
741
742 /* need to read FIFO state */
743 if (offs == 0 || offs == 1)
744 err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO01_STATUS,
745 &val);
746 else
747 err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO23_STATUS,
748 &val);
749
750 if (err) {
751 dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n",
752 port->port_num, err);
753 return err;
754 }
755
756 if (offs & 0x1)
757 phy_sts = (val & Q_REG_FIFO13_M) >> Q_REG_FIFO13_S;
758 else
759 phy_sts = (val & Q_REG_FIFO02_M) >> Q_REG_FIFO02_S;
760
761 if (phy_sts & FIFO_EMPTY) {
762 port->tx_fifo_busy_cnt = FIFO_OK;
763 return 0;
764 }
765
766 port->tx_fifo_busy_cnt++;
767
768 dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n",
769 port->tx_fifo_busy_cnt, port->port_num);
770
771 if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) {
772 dev_dbg(ice_pf_to_dev(pf),
773 "Port %d Tx FIFO still not empty; resetting quad %d\n",
774 port->port_num, quad);
775 ice_ptp_reset_ts_memory_quad(pf, quad);
776 port->tx_fifo_busy_cnt = FIFO_OK;
777 return 0;
778 }
779
780 return -EAGAIN;
781}
782
783/**
784 * ice_ptp_check_tx_offset_valid - Check if the Tx PHY offset is valid
785 * @port: the PTP port to check
786 *
787 * Checks whether the Tx offset for the PHY associated with this port is
788 * valid. Returns 0 if the offset is valid, and a non-zero error code if it is
789 * not.
790 */
791static int ice_ptp_check_tx_offset_valid(struct ice_ptp_port *port)
792{
793 struct ice_pf *pf = ptp_port_to_pf(port);
794 struct device *dev = ice_pf_to_dev(pf);
795 struct ice_hw *hw = &pf->hw;
796 u32 val;
797 int err;
798
799 err = ice_ptp_check_tx_fifo(port);
800 if (err)
801 return err;
802
803 err = ice_read_phy_reg_e822(hw, port->port_num, P_REG_TX_OV_STATUS,
804 &val);
805 if (err) {
806 dev_err(dev, "Failed to read TX_OV_STATUS for port %d, err %d\n",
807 port->port_num, err);
808 return -EAGAIN;
809 }
810
811 if (!(val & P_REG_TX_OV_STATUS_OV_M))
812 return -EAGAIN;
813
814 return 0;
815}
816
817/**
818 * ice_ptp_check_rx_offset_valid - Check if the Rx PHY offset is valid
819 * @port: the PTP port to check
820 *
821 * Checks whether the Rx offset for the PHY associated with this port is
822 * valid. Returns 0 if the offset is valid, and a non-zero error code if it is
823 * not.
824 */
825static int ice_ptp_check_rx_offset_valid(struct ice_ptp_port *port)
826{
827 struct ice_pf *pf = ptp_port_to_pf(port);
828 struct device *dev = ice_pf_to_dev(pf);
829 struct ice_hw *hw = &pf->hw;
830 int err;
831 u32 val;
832
833 err = ice_read_phy_reg_e822(hw, port->port_num, P_REG_RX_OV_STATUS,
834 &val);
835 if (err) {
836 dev_err(dev, "Failed to read RX_OV_STATUS for port %d, err %d\n",
837 port->port_num, err);
838 return err;
839 }
840
841 if (!(val & P_REG_RX_OV_STATUS_OV_M))
842 return -EAGAIN;
843
844 return 0;
845}
846
847/**
848 * ice_ptp_check_offset_valid - Check port offset valid bit
849 * @port: Port for which offset valid bit is checked
850 *
851 * Returns 0 if both Tx and Rx offset are valid, and -EAGAIN if one of the
852 * offset is not ready.
853 */
854static int ice_ptp_check_offset_valid(struct ice_ptp_port *port)
855{
856 int tx_err, rx_err;
857
858 /* always check both Tx and Rx offset validity */
859 tx_err = ice_ptp_check_tx_offset_valid(port);
860 rx_err = ice_ptp_check_rx_offset_valid(port);
861
862 if (tx_err || rx_err)
863 return -EAGAIN;
864
865 return 0;
866}
867
868/**
869 * ice_ptp_wait_for_offset_valid - Check for valid Tx and Rx offsets
870 * @work: Pointer to the kthread_work structure for this task
871 *
872 * Check whether both the Tx and Rx offsets are valid for enabling the vernier
873 * calibration.
874 *
875 * Once we have valid offsets from hardware, update the total Tx and Rx
876 * offsets, and exit bypass mode. This enables more precise timestamps using
877 * the extra data measured during the vernier calibration process.
878 */
879static void ice_ptp_wait_for_offset_valid(struct kthread_work *work)
880{
881 struct ice_ptp_port *port;
882 int err;
883 struct device *dev;
884 struct ice_pf *pf;
885 struct ice_hw *hw;
886
887 port = container_of(work, struct ice_ptp_port, ov_work.work);
888 pf = ptp_port_to_pf(port);
889 hw = &pf->hw;
890 dev = ice_pf_to_dev(pf);
891
892 if (ice_ptp_check_offset_valid(port)) {
893 /* Offsets not ready yet, try again later */
894 kthread_queue_delayed_work(pf->ptp.kworker,
895 &port->ov_work,
896 msecs_to_jiffies(100));
897 return;
898 }
899
900 /* Offsets are valid, so it is safe to exit bypass mode */
901 err = ice_phy_exit_bypass_e822(hw, port->port_num);
902 if (err) {
903 dev_warn(dev, "Failed to exit bypass mode for PHY port %u, err %d\n",
904 port->port_num, err);
905 return;
906 }
907}
908
909/**
910 * ice_ptp_port_phy_stop - Stop timestamping for a PHY port
911 * @ptp_port: PTP port to stop
912 */
913static int
914ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
915{
916 struct ice_pf *pf = ptp_port_to_pf(ptp_port);
917 u8 port = ptp_port->port_num;
918 struct ice_hw *hw = &pf->hw;
919 int err;
920
921 if (ice_is_e810(hw))
922 return 0;
923
924 mutex_lock(&ptp_port->ps_lock);
925
926 kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
927
928 err = ice_stop_phy_timer_e822(hw, port, true);
929 if (err)
930 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n",
931 port, err);
932
933 mutex_unlock(&ptp_port->ps_lock);
934
935 return err;
936}
937
938/**
939 * ice_ptp_port_phy_restart - (Re)start and calibrate PHY timestamping
940 * @ptp_port: PTP port for which the PHY start is set
941 *
942 * Start the PHY timestamping block, and initiate Vernier timestamping
943 * calibration. If timestamping cannot be calibrated (such as if link is down)
944 * then disable the timestamping block instead.
945 */
946static int
947ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
948{
949 struct ice_pf *pf = ptp_port_to_pf(ptp_port);
950 u8 port = ptp_port->port_num;
951 struct ice_hw *hw = &pf->hw;
952 int err;
953
954 if (ice_is_e810(hw))
955 return 0;
956
957 if (!ptp_port->link_up)
958 return ice_ptp_port_phy_stop(ptp_port);
959
960 mutex_lock(&ptp_port->ps_lock);
961
962 kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
963
964 /* temporarily disable Tx timestamps while calibrating PHY offset */
965 ptp_port->tx.calibrating = true;
966 ptp_port->tx_fifo_busy_cnt = 0;
967
968 /* Start the PHY timer in bypass mode */
969 err = ice_start_phy_timer_e822(hw, port, true);
970 if (err)
971 goto out_unlock;
972
973 /* Enable Tx timestamps right away */
974 ptp_port->tx.calibrating = false;
975
976 kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 0);
977
978out_unlock:
979 if (err)
980 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n",
981 port, err);
982
983 mutex_unlock(&ptp_port->ps_lock);
984
985 return err;
986}
987
988/**
989 * ice_ptp_link_change - Set or clear port registers for timestamping
990 * @pf: Board private structure
991 * @port: Port for which the PHY start is set
992 * @linkup: Link is up or down
993 */
994int ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
995{
996 struct ice_ptp_port *ptp_port;
997
998 if (!test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
999 return 0;
1000
1001 if (port >= ICE_NUM_EXTERNAL_PORTS)
1002 return -EINVAL;
1003
1004 ptp_port = &pf->ptp.port;
1005 if (ptp_port->port_num != port)
1006 return -EINVAL;
1007
1008 /* Update cached link err for this port immediately */
1009 ptp_port->link_up = linkup;
1010
1011 if (!test_bit(ICE_FLAG_PTP, pf->flags))
1012 /* PTP is not setup */
1013 return -EAGAIN;
1014
1015 return ice_ptp_port_phy_restart(ptp_port);
1016}
1017
1018/**
1019 * ice_ptp_reset_ts_memory - Reset timestamp memory for all quads
1020 * @pf: The PF private data structure
1021 */
1022static void ice_ptp_reset_ts_memory(struct ice_pf *pf)
1023{
1024 int quad;
1025
1026 quad = pf->hw.port_info->lport / ICE_PORTS_PER_QUAD;
1027 ice_ptp_reset_ts_memory_quad(pf, quad);
1028}
1029
1030/**
1031 * ice_ptp_tx_ena_intr - Enable or disable the Tx timestamp interrupt
1032 * @pf: PF private structure
1033 * @ena: bool value to enable or disable interrupt
1034 * @threshold: Minimum number of packets at which intr is triggered
1035 *
1036 * Utility function to enable or disable Tx timestamp interrupt and threshold
1037 */
1038static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold)
1039{
1040 struct ice_hw *hw = &pf->hw;
1041 int err = 0;
1042 int quad;
1043 u32 val;
1044
1045 ice_ptp_reset_ts_memory(pf);
1046
1047 for (quad = 0; quad < ICE_MAX_QUAD; quad++) {
1048 err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG,
1049 &val);
1050 if (err)
1051 break;
1052
1053 if (ena) {
1054 val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
1055 val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M;
1056 val |= ((threshold << Q_REG_TX_MEM_GBL_CFG_INTR_THR_S) &
1057 Q_REG_TX_MEM_GBL_CFG_INTR_THR_M);
1058 } else {
1059 val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
1060 }
1061
1062 err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG,
1063 val);
1064 if (err)
1065 break;
1066 }
1067
1068 if (err)
1069 dev_err(ice_pf_to_dev(pf), "PTP failed in intr ena, err %d\n",
1070 err);
1071 return err;
1072}
1073
1074/**
1075 * ice_ptp_reset_phy_timestamping - Reset PHY timestamping block
1076 * @pf: Board private structure
1077 */
1078static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf)
1079{
1080 ice_ptp_port_phy_restart(&pf->ptp.port);
1081}
1082
1083/**
1084 * ice_ptp_adjfine - Adjust clock increment rate
1085 * @info: the driver's PTP info structure
1086 * @scaled_ppm: Parts per million with 16-bit fractional field
1087 *
1088 * Adjust the frequency of the clock by the indicated scaled ppm from the
1089 * base frequency.
1090 */
1091static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
1092{
1093 struct ice_pf *pf = ptp_info_to_pf(info);
1094 u64 freq, divisor = 1000000ULL;
1095 struct ice_hw *hw = &pf->hw;
1096 s64 incval, diff;
1097 int neg_adj = 0;
1098 int err;
1099
1100 incval = ice_base_incval(pf);
1101
1102 if (scaled_ppm < 0) {
1103 neg_adj = 1;
1104 scaled_ppm = -scaled_ppm;
1105 }
1106
1107 while ((u64)scaled_ppm > div64_u64(U64_MAX, incval)) {
1108 /* handle overflow by scaling down the scaled_ppm and
1109 * the divisor, losing some precision
1110 */
1111 scaled_ppm >>= 2;
1112 divisor >>= 2;
1113 }
1114
1115 freq = (incval * (u64)scaled_ppm) >> 16;
1116 diff = div_u64(freq, divisor);
1117
1118 if (neg_adj)
1119 incval -= diff;
1120 else
1121 incval += diff;
1122
1123 err = ice_ptp_write_incval_locked(hw, incval);
1124 if (err) {
1125 dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n",
1126 err);
1127 return -EIO;
1128 }
1129
1130 return 0;
1131}
1132
1133/**
1134 * ice_ptp_extts_work - Workqueue task function
1135 * @work: external timestamp work structure
1136 *
1137 * Service for PTP external clock event
1138 */
1139static void ice_ptp_extts_work(struct kthread_work *work)
1140{
1141 struct ice_ptp *ptp = container_of(work, struct ice_ptp, extts_work);
1142 struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
1143 struct ptp_clock_event event;
1144 struct ice_hw *hw = &pf->hw;
1145 u8 chan, tmr_idx;
1146 u32 hi, lo;
1147
1148 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1149 /* Event time is captured by one of the two matched registers
1150 * GLTSYN_EVNT_L: 32 LSB of sampled time event
1151 * GLTSYN_EVNT_H: 32 MSB of sampled time event
1152 * Event is defined in GLTSYN_EVNT_0 register
1153 */
1154 for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) {
1155 /* Check if channel is enabled */
1156 if (pf->ptp.ext_ts_irq & (1 << chan)) {
1157 lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
1158 hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
1159 event.timestamp = (((u64)hi) << 32) | lo;
1160 event.type = PTP_CLOCK_EXTTS;
1161 event.index = chan;
1162
1163 /* Fire event */
1164 ptp_clock_event(pf->ptp.clock, &event);
1165 pf->ptp.ext_ts_irq &= ~(1 << chan);
1166 }
1167 }
1168}
1169
1170/**
1171 * ice_ptp_cfg_extts - Configure EXTTS pin and channel
1172 * @pf: Board private structure
1173 * @ena: true to enable; false to disable
1174 * @chan: GPIO channel (0-3)
1175 * @gpio_pin: GPIO pin
1176 * @extts_flags: request flags from the ptp_extts_request.flags
1177 */
1178static int
1179ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin,
1180 unsigned int extts_flags)
1181{
1182 u32 func, aux_reg, gpio_reg, irq_reg;
1183 struct ice_hw *hw = &pf->hw;
1184 u8 tmr_idx;
1185
1186 if (chan > (unsigned int)pf->ptp.info.n_ext_ts)
1187 return -EINVAL;
1188
1189 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1190
1191 irq_reg = rd32(hw, PFINT_OICR_ENA);
1192
1193 if (ena) {
1194 /* Enable the interrupt */
1195 irq_reg |= PFINT_OICR_TSYN_EVNT_M;
1196 aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M;
1197
1198#define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE BIT(0)
1199#define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE BIT(1)
1200
1201 /* set event level to requested edge */
1202 if (extts_flags & PTP_FALLING_EDGE)
1203 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE;
1204 if (extts_flags & PTP_RISING_EDGE)
1205 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE;
1206
1207 /* Write GPIO CTL reg.
1208 * 0x1 is input sampled by EVENT register(channel)
1209 * + num_in_channels * tmr_idx
1210 */
1211 func = 1 + chan + (tmr_idx * 3);
1212 gpio_reg = ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) &
1213 GLGEN_GPIO_CTL_PIN_FUNC_M);
1214 pf->ptp.ext_ts_chan |= (1 << chan);
1215 } else {
1216 /* clear the values we set to reset defaults */
1217 aux_reg = 0;
1218 gpio_reg = 0;
1219 pf->ptp.ext_ts_chan &= ~(1 << chan);
1220 if (!pf->ptp.ext_ts_chan)
1221 irq_reg &= ~PFINT_OICR_TSYN_EVNT_M;
1222 }
1223
1224 wr32(hw, PFINT_OICR_ENA, irq_reg);
1225 wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg);
1226 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg);
1227
1228 return 0;
1229}
1230
1231/**
1232 * ice_ptp_cfg_clkout - Configure clock to generate periodic wave
1233 * @pf: Board private structure
1234 * @chan: GPIO channel (0-3)
1235 * @config: desired periodic clk configuration. NULL will disable channel
1236 * @store: If set to true the values will be stored
1237 *
1238 * Configure the internal clock generator modules to generate the clock wave of
1239 * specified period.
1240 */
1241static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
1242 struct ice_perout_channel *config, bool store)
1243{
1244 u64 current_time, period, start_time, phase;
1245 struct ice_hw *hw = &pf->hw;
1246 u32 func, val, gpio_pin;
1247 u8 tmr_idx;
1248
1249 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1250
1251 /* 0. Reset mode & out_en in AUX_OUT */
1252 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0);
1253
1254 /* If we're disabling the output, clear out CLKO and TGT and keep
1255 * output level low
1256 */
1257 if (!config || !config->ena) {
1258 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), 0);
1259 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), 0);
1260 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), 0);
1261
1262 val = GLGEN_GPIO_CTL_PIN_DIR_M;
1263 gpio_pin = pf->ptp.perout_channels[chan].gpio_pin;
1264 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
1265
1266 /* Store the value if requested */
1267 if (store)
1268 memset(&pf->ptp.perout_channels[chan], 0,
1269 sizeof(struct ice_perout_channel));
1270
1271 return 0;
1272 }
1273 period = config->period;
1274 start_time = config->start_time;
1275 div64_u64_rem(start_time, period, &phase);
1276 gpio_pin = config->gpio_pin;
1277
1278 /* 1. Write clkout with half of required period value */
1279 if (period & 0x1) {
1280 dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n");
1281 goto err;
1282 }
1283
1284 period >>= 1;
1285
1286 /* For proper operation, the GLTSYN_CLKO must be larger than clock tick
1287 */
1288#define MIN_PULSE 3
1289 if (period <= MIN_PULSE || period > U32_MAX) {
1290 dev_err(ice_pf_to_dev(pf), "CLK Period must be > %d && < 2^33",
1291 MIN_PULSE * 2);
1292 goto err;
1293 }
1294
1295 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period));
1296
1297 /* Allow time for programming before start_time is hit */
1298 current_time = ice_ptp_read_src_clk_reg(pf, NULL);
1299
1300 /* if start time is in the past start the timer at the nearest second
1301 * maintaining phase
1302 */
1303 if (start_time < current_time)
1304 start_time = div64_u64(current_time + NSEC_PER_SEC - 1,
1305 NSEC_PER_SEC) * NSEC_PER_SEC + phase;
1306
1307 if (ice_is_e810(hw))
1308 start_time -= E810_OUT_PROP_DELAY_NS;
1309 else
1310 start_time -= ice_e822_pps_delay(ice_e822_time_ref(hw));
1311
1312 /* 2. Write TARGET time */
1313 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time));
1314 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start_time));
1315
1316 /* 3. Write AUX_OUT register */
1317 val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M;
1318 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val);
1319
1320 /* 4. write GPIO CTL reg */
1321 func = 8 + chan + (tmr_idx * 4);
1322 val = GLGEN_GPIO_CTL_PIN_DIR_M |
1323 ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) & GLGEN_GPIO_CTL_PIN_FUNC_M);
1324 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
1325
1326 /* Store the value if requested */
1327 if (store) {
1328 memcpy(&pf->ptp.perout_channels[chan], config,
1329 sizeof(struct ice_perout_channel));
1330 pf->ptp.perout_channels[chan].start_time = phase;
1331 }
1332
1333 return 0;
1334err:
1335 dev_err(ice_pf_to_dev(pf), "PTP failed to cfg per_clk\n");
1336 return -EFAULT;
1337}
1338
1339/**
1340 * ice_ptp_disable_all_clkout - Disable all currently configured outputs
1341 * @pf: pointer to the PF structure
1342 *
1343 * Disable all currently configured clock outputs. This is necessary before
1344 * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_clkout to
1345 * re-enable the clocks again.
1346 */
1347static void ice_ptp_disable_all_clkout(struct ice_pf *pf)
1348{
1349 uint i;
1350
1351 for (i = 0; i < pf->ptp.info.n_per_out; i++)
1352 if (pf->ptp.perout_channels[i].ena)
1353 ice_ptp_cfg_clkout(pf, i, NULL, false);
1354}
1355
1356/**
1357 * ice_ptp_enable_all_clkout - Enable all configured periodic clock outputs
1358 * @pf: pointer to the PF structure
1359 *
1360 * Enable all currently configured clock outputs. Use this after
1361 * ice_ptp_disable_all_clkout to reconfigure the output signals according to
1362 * their configuration.
1363 */
1364static void ice_ptp_enable_all_clkout(struct ice_pf *pf)
1365{
1366 uint i;
1367
1368 for (i = 0; i < pf->ptp.info.n_per_out; i++)
1369 if (pf->ptp.perout_channels[i].ena)
1370 ice_ptp_cfg_clkout(pf, i, &pf->ptp.perout_channels[i],
1371 false);
1372}
1373
1374/**
1375 * ice_ptp_gpio_enable_e810 - Enable/disable ancillary features of PHC
1376 * @info: the driver's PTP info structure
1377 * @rq: The requested feature to change
1378 * @on: Enable/disable flag
1379 */
1380static int
1381ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
1382 struct ptp_clock_request *rq, int on)
1383{
1384 struct ice_pf *pf = ptp_info_to_pf(info);
1385 struct ice_perout_channel clk_cfg = {0};
1386 bool sma_pres = false;
1387 unsigned int chan;
1388 u32 gpio_pin;
1389 int err;
1390
1391 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
1392 sma_pres = true;
1393
1394 switch (rq->type) {
1395 case PTP_CLK_REQ_PEROUT:
1396 chan = rq->perout.index;
1397 if (sma_pres) {
1398 if (chan == ice_pin_desc_e810t[SMA1].chan)
1399 clk_cfg.gpio_pin = GPIO_20;
1400 else if (chan == ice_pin_desc_e810t[SMA2].chan)
1401 clk_cfg.gpio_pin = GPIO_22;
1402 else
1403 return -1;
1404 } else if (ice_is_e810t(&pf->hw)) {
1405 if (chan == 0)
1406 clk_cfg.gpio_pin = GPIO_20;
1407 else
1408 clk_cfg.gpio_pin = GPIO_22;
1409 } else if (chan == PPS_CLK_GEN_CHAN) {
1410 clk_cfg.gpio_pin = PPS_PIN_INDEX;
1411 } else {
1412 clk_cfg.gpio_pin = chan;
1413 }
1414
1415 clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) +
1416 rq->perout.period.nsec);
1417 clk_cfg.start_time = ((rq->perout.start.sec * NSEC_PER_SEC) +
1418 rq->perout.start.nsec);
1419 clk_cfg.ena = !!on;
1420
1421 err = ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true);
1422 break;
1423 case PTP_CLK_REQ_EXTTS:
1424 chan = rq->extts.index;
1425 if (sma_pres) {
1426 if (chan < ice_pin_desc_e810t[SMA2].chan)
1427 gpio_pin = GPIO_21;
1428 else
1429 gpio_pin = GPIO_23;
1430 } else if (ice_is_e810t(&pf->hw)) {
1431 if (chan == 0)
1432 gpio_pin = GPIO_21;
1433 else
1434 gpio_pin = GPIO_23;
1435 } else {
1436 gpio_pin = chan;
1437 }
1438
1439 err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin,
1440 rq->extts.flags);
1441 break;
1442 default:
1443 return -EOPNOTSUPP;
1444 }
1445
1446 return err;
1447}
1448
1449/**
1450 * ice_ptp_gettimex64 - Get the time of the clock
1451 * @info: the driver's PTP info structure
1452 * @ts: timespec64 structure to hold the current time value
1453 * @sts: Optional parameter for holding a pair of system timestamps from
1454 * the system clock. Will be ignored if NULL is given.
1455 *
1456 * Read the device clock and return the correct value on ns, after converting it
1457 * into a timespec struct.
1458 */
1459static int
1460ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts,
1461 struct ptp_system_timestamp *sts)
1462{
1463 struct ice_pf *pf = ptp_info_to_pf(info);
1464 struct ice_hw *hw = &pf->hw;
1465
1466 if (!ice_ptp_lock(hw)) {
1467 dev_err(ice_pf_to_dev(pf), "PTP failed to get time\n");
1468 return -EBUSY;
1469 }
1470
1471 ice_ptp_read_time(pf, ts, sts);
1472 ice_ptp_unlock(hw);
1473
1474 return 0;
1475}
1476
1477/**
1478 * ice_ptp_settime64 - Set the time of the clock
1479 * @info: the driver's PTP info structure
1480 * @ts: timespec64 structure that holds the new time value
1481 *
1482 * Set the device clock to the user input value. The conversion from timespec
1483 * to ns happens in the write function.
1484 */
1485static int
1486ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
1487{
1488 struct ice_pf *pf = ptp_info_to_pf(info);
1489 struct timespec64 ts64 = *ts;
1490 struct ice_hw *hw = &pf->hw;
1491 int err;
1492
1493 /* For Vernier mode, we need to recalibrate after new settime
1494 * Start with disabling timestamp block
1495 */
1496 if (pf->ptp.port.link_up)
1497 ice_ptp_port_phy_stop(&pf->ptp.port);
1498
1499 if (!ice_ptp_lock(hw)) {
1500 err = -EBUSY;
1501 goto exit;
1502 }
1503
1504 /* Disable periodic outputs */
1505 ice_ptp_disable_all_clkout(pf);
1506
1507 err = ice_ptp_write_init(pf, &ts64);
1508 ice_ptp_unlock(hw);
1509
1510 if (!err)
1511 ice_ptp_update_cached_phctime(pf);
1512
1513 /* Reenable periodic outputs */
1514 ice_ptp_enable_all_clkout(pf);
1515
1516 /* Recalibrate and re-enable timestamp block */
1517 if (pf->ptp.port.link_up)
1518 ice_ptp_port_phy_restart(&pf->ptp.port);
1519exit:
1520 if (err) {
1521 dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err);
1522 return err;
1523 }
1524
1525 return 0;
1526}
1527
1528/**
1529 * ice_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment
1530 * @info: the driver's PTP info structure
1531 * @delta: Offset in nanoseconds to adjust the time by
1532 */
1533static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
1534{
1535 struct timespec64 now, then;
1536 int ret;
1537
1538 then = ns_to_timespec64(delta);
1539 ret = ice_ptp_gettimex64(info, &now, NULL);
1540 if (ret)
1541 return ret;
1542 now = timespec64_add(now, then);
1543
1544 return ice_ptp_settime64(info, (const struct timespec64 *)&now);
1545}
1546
1547/**
1548 * ice_ptp_adjtime - Adjust the time of the clock by the indicated delta
1549 * @info: the driver's PTP info structure
1550 * @delta: Offset in nanoseconds to adjust the time by
1551 */
1552static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
1553{
1554 struct ice_pf *pf = ptp_info_to_pf(info);
1555 struct ice_hw *hw = &pf->hw;
1556 struct device *dev;
1557 int err;
1558
1559 dev = ice_pf_to_dev(pf);
1560
1561 /* Hardware only supports atomic adjustments using signed 32-bit
1562 * integers. For any adjustment outside this range, perform
1563 * a non-atomic get->adjust->set flow.
1564 */
1565 if (delta > S32_MAX || delta < S32_MIN) {
1566 dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta);
1567 return ice_ptp_adjtime_nonatomic(info, delta);
1568 }
1569
1570 if (!ice_ptp_lock(hw)) {
1571 dev_err(dev, "PTP failed to acquire semaphore in adjtime\n");
1572 return -EBUSY;
1573 }
1574
1575 /* Disable periodic outputs */
1576 ice_ptp_disable_all_clkout(pf);
1577
1578 err = ice_ptp_write_adj(pf, delta);
1579
1580 /* Reenable periodic outputs */
1581 ice_ptp_enable_all_clkout(pf);
1582
1583 ice_ptp_unlock(hw);
1584
1585 if (err) {
1586 dev_err(dev, "PTP failed to adjust time, err %d\n", err);
1587 return err;
1588 }
1589
1590 ice_ptp_update_cached_phctime(pf);
1591
1592 return 0;
1593}
1594
1595#ifdef CONFIG_ICE_HWTS
1596/**
1597 * ice_ptp_get_syncdevicetime - Get the cross time stamp info
1598 * @device: Current device time
1599 * @system: System counter value read synchronously with device time
1600 * @ctx: Context provided by timekeeping code
1601 *
1602 * Read device and system (ART) clock simultaneously and return the corrected
1603 * clock values in ns.
1604 */
1605static int
1606ice_ptp_get_syncdevicetime(ktime_t *device,
1607 struct system_counterval_t *system,
1608 void *ctx)
1609{
1610 struct ice_pf *pf = (struct ice_pf *)ctx;
1611 struct ice_hw *hw = &pf->hw;
1612 u32 hh_lock, hh_art_ctl;
1613 int i;
1614
1615 /* Get the HW lock */
1616 hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
1617 if (hh_lock & PFHH_SEM_BUSY_M) {
1618 dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n");
1619 return -EFAULT;
1620 }
1621
1622 /* Start the ART and device clock sync sequence */
1623 hh_art_ctl = rd32(hw, GLHH_ART_CTL);
1624 hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M;
1625 wr32(hw, GLHH_ART_CTL, hh_art_ctl);
1626
1627#define MAX_HH_LOCK_TRIES 100
1628
1629 for (i = 0; i < MAX_HH_LOCK_TRIES; i++) {
1630 /* Wait for sync to complete */
1631 hh_art_ctl = rd32(hw, GLHH_ART_CTL);
1632 if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) {
1633 udelay(1);
1634 continue;
1635 } else {
1636 u32 hh_ts_lo, hh_ts_hi, tmr_idx;
1637 u64 hh_ts;
1638
1639 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
1640 /* Read ART time */
1641 hh_ts_lo = rd32(hw, GLHH_ART_TIME_L);
1642 hh_ts_hi = rd32(hw, GLHH_ART_TIME_H);
1643 hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
1644 *system = convert_art_ns_to_tsc(hh_ts);
1645 /* Read Device source clock time */
1646 hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx));
1647 hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx));
1648 hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
1649 *device = ns_to_ktime(hh_ts);
1650 break;
1651 }
1652 }
1653 /* Release HW lock */
1654 hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
1655 hh_lock = hh_lock & ~PFHH_SEM_BUSY_M;
1656 wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock);
1657
1658 if (i == MAX_HH_LOCK_TRIES)
1659 return -ETIMEDOUT;
1660
1661 return 0;
1662}
1663
1664/**
1665 * ice_ptp_getcrosststamp_e822 - Capture a device cross timestamp
1666 * @info: the driver's PTP info structure
1667 * @cts: The memory to fill the cross timestamp info
1668 *
1669 * Capture a cross timestamp between the ART and the device PTP hardware
1670 * clock. Fill the cross timestamp information and report it back to the
1671 * caller.
1672 *
1673 * This is only valid for E822 devices which have support for generating the
1674 * cross timestamp via PCIe PTM.
1675 *
1676 * In order to correctly correlate the ART timestamp back to the TSC time, the
1677 * CPU must have X86_FEATURE_TSC_KNOWN_FREQ.
1678 */
1679static int
1680ice_ptp_getcrosststamp_e822(struct ptp_clock_info *info,
1681 struct system_device_crosststamp *cts)
1682{
1683 struct ice_pf *pf = ptp_info_to_pf(info);
1684
1685 return get_device_system_crosststamp(ice_ptp_get_syncdevicetime,
1686 pf, NULL, cts);
1687}
1688#endif /* CONFIG_ICE_HWTS */
1689
1690/**
1691 * ice_ptp_get_ts_config - ioctl interface to read the timestamping config
1692 * @pf: Board private structure
1693 * @ifr: ioctl data
1694 *
1695 * Copy the timestamping config to user buffer
1696 */
1697int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
1698{
1699 struct hwtstamp_config *config;
1700
1701 if (!test_bit(ICE_FLAG_PTP, pf->flags))
1702 return -EIO;
1703
1704 config = &pf->ptp.tstamp_config;
1705
1706 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
1707 -EFAULT : 0;
1708}
1709
1710/**
1711 * ice_ptp_set_timestamp_mode - Setup driver for requested timestamp mode
1712 * @pf: Board private structure
1713 * @config: hwtstamp settings requested or saved
1714 */
1715static int
1716ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
1717{
1718 switch (config->tx_type) {
1719 case HWTSTAMP_TX_OFF:
1720 ice_set_tx_tstamp(pf, false);
1721 break;
1722 case HWTSTAMP_TX_ON:
1723 ice_set_tx_tstamp(pf, true);
1724 break;
1725 default:
1726 return -ERANGE;
1727 }
1728
1729 switch (config->rx_filter) {
1730 case HWTSTAMP_FILTER_NONE:
1731 ice_set_rx_tstamp(pf, false);
1732 break;
1733 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1734 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1735 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1736 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1737 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1738 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1739 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1740 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1741 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1742 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1743 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1744 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1745 case HWTSTAMP_FILTER_NTP_ALL:
1746 case HWTSTAMP_FILTER_ALL:
1747 ice_set_rx_tstamp(pf, true);
1748 break;
1749 default:
1750 return -ERANGE;
1751 }
1752
1753 return 0;
1754}
1755
1756/**
1757 * ice_ptp_set_ts_config - ioctl interface to control the timestamping
1758 * @pf: Board private structure
1759 * @ifr: ioctl data
1760 *
1761 * Get the user config and store it
1762 */
1763int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
1764{
1765 struct hwtstamp_config config;
1766 int err;
1767
1768 if (!test_bit(ICE_FLAG_PTP, pf->flags))
1769 return -EAGAIN;
1770
1771 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1772 return -EFAULT;
1773
1774 err = ice_ptp_set_timestamp_mode(pf, &config);
1775 if (err)
1776 return err;
1777
1778 /* Return the actual configuration set */
1779 config = pf->ptp.tstamp_config;
1780
1781 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
1782 -EFAULT : 0;
1783}
1784
1785/**
1786 * ice_ptp_rx_hwtstamp - Check for an Rx timestamp
1787 * @rx_ring: Ring to get the VSI info
1788 * @rx_desc: Receive descriptor
1789 * @skb: Particular skb to send timestamp with
1790 *
1791 * The driver receives a notification in the receive descriptor with timestamp.
1792 * The timestamp is in ns, so we must convert the result first.
1793 */
1794void
1795ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
1796 union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb)
1797{
1798 u32 ts_high;
1799 u64 ts_ns;
1800
1801 /* Populate timesync data into skb */
1802 if (rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID) {
1803 struct skb_shared_hwtstamps *hwtstamps;
1804
1805 /* Use ice_ptp_extend_32b_ts directly, using the ring-specific
1806 * cached PHC value, rather than accessing the PF. This also
1807 * allows us to simply pass the upper 32bits of nanoseconds
1808 * directly. Calling ice_ptp_extend_40b_ts is unnecessary as
1809 * it would just discard these bits itself.
1810 */
1811 ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
1812 ts_ns = ice_ptp_extend_32b_ts(rx_ring->cached_phctime, ts_high);
1813
1814 hwtstamps = skb_hwtstamps(skb);
1815 memset(hwtstamps, 0, sizeof(*hwtstamps));
1816 hwtstamps->hwtstamp = ns_to_ktime(ts_ns);
1817 }
1818}
1819
1820/**
1821 * ice_ptp_disable_sma_pins_e810t - Disable E810-T SMA pins
1822 * @pf: pointer to the PF structure
1823 * @info: PTP clock info structure
1824 *
1825 * Disable the OS access to the SMA pins. Called to clear out the OS
1826 * indications of pin support when we fail to setup the E810-T SMA control
1827 * register.
1828 */
1829static void
1830ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
1831{
1832 struct device *dev = ice_pf_to_dev(pf);
1833
1834 dev_warn(dev, "Failed to configure E810-T SMA pin control\n");
1835
1836 info->enable = NULL;
1837 info->verify = NULL;
1838 info->n_pins = 0;
1839 info->n_ext_ts = 0;
1840 info->n_per_out = 0;
1841}
1842
1843/**
1844 * ice_ptp_setup_sma_pins_e810t - Setup the SMA pins
1845 * @pf: pointer to the PF structure
1846 * @info: PTP clock info structure
1847 *
1848 * Finish setting up the SMA pins by allocating pin_config, and setting it up
1849 * according to the current status of the SMA. On failure, disable all of the
1850 * extended SMA pin support.
1851 */
1852static void
1853ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
1854{
1855 struct device *dev = ice_pf_to_dev(pf);
1856 int err;
1857
1858 /* Allocate memory for kernel pins interface */
1859 info->pin_config = devm_kcalloc(dev, info->n_pins,
1860 sizeof(*info->pin_config), GFP_KERNEL);
1861 if (!info->pin_config) {
1862 ice_ptp_disable_sma_pins_e810t(pf, info);
1863 return;
1864 }
1865
1866 /* Read current SMA status */
1867 err = ice_get_sma_config_e810t(&pf->hw, info->pin_config);
1868 if (err)
1869 ice_ptp_disable_sma_pins_e810t(pf, info);
1870}
1871
1872/**
1873 * ice_ptp_setup_pins_e810t - Setup PTP pins in sysfs
1874 * @pf: pointer to the PF instance
1875 * @info: PTP clock capabilities
1876 */
1877static void
1878ice_ptp_setup_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
1879{
1880 /* Check if SMA controller is in the netlist */
1881 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL) &&
1882 !ice_is_pca9575_present(&pf->hw))
1883 ice_clear_feature_support(pf, ICE_F_SMA_CTRL);
1884
1885 if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
1886 info->n_ext_ts = N_EXT_TS_E810_NO_SMA;
1887 info->n_per_out = N_PER_OUT_E810T_NO_SMA;
1888 return;
1889 }
1890
1891 info->n_per_out = N_PER_OUT_E810T;
1892 info->n_ext_ts = N_EXT_TS_E810;
1893 info->n_pins = NUM_PTP_PINS_E810T;
1894 info->verify = ice_verify_pin_e810t;
1895
1896 /* Complete setup of the SMA pins */
1897 ice_ptp_setup_sma_pins_e810t(pf, info);
1898}
1899
1900/**
1901 * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs
1902 * @info: PTP clock capabilities
1903 */
1904static void ice_ptp_setup_pins_e810(struct ptp_clock_info *info)
1905{
1906 info->n_per_out = N_PER_OUT_E810;
1907 info->n_ext_ts = N_EXT_TS_E810;
1908}
1909
1910/**
1911 * ice_ptp_set_funcs_e822 - Set specialized functions for E822 support
1912 * @pf: Board private structure
1913 * @info: PTP info to fill
1914 *
1915 * Assign functions to the PTP capabiltiies structure for E822 devices.
1916 * Functions which operate across all device families should be set directly
1917 * in ice_ptp_set_caps. Only add functions here which are distinct for E822
1918 * devices.
1919 */
1920static void
1921ice_ptp_set_funcs_e822(struct ice_pf *pf, struct ptp_clock_info *info)
1922{
1923#ifdef CONFIG_ICE_HWTS
1924 if (boot_cpu_has(X86_FEATURE_ART) &&
1925 boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ))
1926 info->getcrosststamp = ice_ptp_getcrosststamp_e822;
1927#endif /* CONFIG_ICE_HWTS */
1928}
1929
1930/**
1931 * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support
1932 * @pf: Board private structure
1933 * @info: PTP info to fill
1934 *
1935 * Assign functions to the PTP capabiltiies structure for E810 devices.
1936 * Functions which operate across all device families should be set directly
1937 * in ice_ptp_set_caps. Only add functions here which are distinct for e810
1938 * devices.
1939 */
1940static void
1941ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info)
1942{
1943 info->enable = ice_ptp_gpio_enable_e810;
1944
1945 if (ice_is_e810t(&pf->hw))
1946 ice_ptp_setup_pins_e810t(pf, info);
1947 else
1948 ice_ptp_setup_pins_e810(info);
1949}
1950
1951/**
1952 * ice_ptp_set_caps - Set PTP capabilities
1953 * @pf: Board private structure
1954 */
1955static void ice_ptp_set_caps(struct ice_pf *pf)
1956{
1957 struct ptp_clock_info *info = &pf->ptp.info;
1958 struct device *dev = ice_pf_to_dev(pf);
1959
1960 snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk",
1961 dev_driver_string(dev), dev_name(dev));
1962 info->owner = THIS_MODULE;
1963 info->max_adj = 999999999;
1964 info->adjtime = ice_ptp_adjtime;
1965 info->adjfine = ice_ptp_adjfine;
1966 info->gettimex64 = ice_ptp_gettimex64;
1967 info->settime64 = ice_ptp_settime64;
1968
1969 if (ice_is_e810(&pf->hw))
1970 ice_ptp_set_funcs_e810(pf, info);
1971 else
1972 ice_ptp_set_funcs_e822(pf, info);
1973}
1974
1975/**
1976 * ice_ptp_create_clock - Create PTP clock device for userspace
1977 * @pf: Board private structure
1978 *
1979 * This function creates a new PTP clock device. It only creates one if we
1980 * don't already have one. Will return error if it can't create one, but success
1981 * if we already have a device. Should be used by ice_ptp_init to create clock
1982 * initially, and prevent global resets from creating new clock devices.
1983 */
1984static long ice_ptp_create_clock(struct ice_pf *pf)
1985{
1986 struct ptp_clock_info *info;
1987 struct ptp_clock *clock;
1988 struct device *dev;
1989
1990 /* No need to create a clock device if we already have one */
1991 if (pf->ptp.clock)
1992 return 0;
1993
1994 ice_ptp_set_caps(pf);
1995
1996 info = &pf->ptp.info;
1997 dev = ice_pf_to_dev(pf);
1998
1999 /* Attempt to register the clock before enabling the hardware. */
2000 clock = ptp_clock_register(info, dev);
2001 if (IS_ERR(clock))
2002 return PTR_ERR(clock);
2003
2004 pf->ptp.clock = clock;
2005
2006 return 0;
2007}
2008
2009/**
2010 * ice_ptp_tx_tstamp_work - Process Tx timestamps for a port
2011 * @work: pointer to the kthread_work struct
2012 *
2013 * Process timestamps captured by the PHY associated with this port. To do
2014 * this, loop over each index with a waiting skb.
2015 *
2016 * If a given index has a valid timestamp, perform the following steps:
2017 *
2018 * 1) copy the timestamp out of the PHY register
2019 * 4) clear the timestamp valid bit in the PHY register
2020 * 5) unlock the index by clearing the associated in_use bit.
2021 * 2) extend the 40b timestamp value to get a 64bit timestamp
2022 * 3) send that timestamp to the stack
2023 *
2024 * After looping, if we still have waiting SKBs, then re-queue the work. This
2025 * may cause us effectively poll even when not strictly necessary. We do this
2026 * because it's possible a new timestamp was requested around the same time as
2027 * the interrupt. In some cases hardware might not interrupt us again when the
2028 * timestamp is captured.
2029 *
2030 * Note that we only take the tracking lock when clearing the bit and when
2031 * checking if we need to re-queue this task. The only place where bits can be
2032 * set is the hard xmit routine where an SKB has a request flag set. The only
2033 * places where we clear bits are this work function, or the periodic cleanup
2034 * thread. If the cleanup thread clears a bit we're processing we catch it
2035 * when we lock to clear the bit and then grab the SKB pointer. If a Tx thread
2036 * starts a new timestamp, we might not begin processing it right away but we
2037 * will notice it at the end when we re-queue the work item. If a Tx thread
2038 * starts a new timestamp just after this function exits without re-queuing,
2039 * the interrupt when the timestamp finishes should trigger. Avoiding holding
2040 * the lock for the entire function is important in order to ensure that Tx
2041 * threads do not get blocked while waiting for the lock.
2042 */
2043static void ice_ptp_tx_tstamp_work(struct kthread_work *work)
2044{
2045 struct ice_ptp_port *ptp_port;
2046 struct ice_ptp_tx *tx;
2047 struct ice_pf *pf;
2048 struct ice_hw *hw;
2049 u8 idx;
2050
2051 tx = container_of(work, struct ice_ptp_tx, work);
2052 if (!tx->init)
2053 return;
2054
2055 ptp_port = container_of(tx, struct ice_ptp_port, tx);
2056 pf = ptp_port_to_pf(ptp_port);
2057 hw = &pf->hw;
2058
2059 for_each_set_bit(idx, tx->in_use, tx->len) {
2060 struct skb_shared_hwtstamps shhwtstamps = {};
2061 u8 phy_idx = idx + tx->quad_offset;
2062 u64 raw_tstamp, tstamp;
2063 struct sk_buff *skb;
2064 int err;
2065
2066 err = ice_read_phy_tstamp(hw, tx->quad, phy_idx,
2067 &raw_tstamp);
2068 if (err)
2069 continue;
2070
2071 /* Check if the timestamp is invalid or stale */
2072 if (!(raw_tstamp & ICE_PTP_TS_VALID) ||
2073 raw_tstamp == tx->tstamps[idx].cached_tstamp)
2074 continue;
2075
2076 /* The timestamp is valid, so we'll go ahead and clear this
2077 * index and then send the timestamp up to the stack.
2078 */
2079 spin_lock(&tx->lock);
2080 tx->tstamps[idx].cached_tstamp = raw_tstamp;
2081 clear_bit(idx, tx->in_use);
2082 skb = tx->tstamps[idx].skb;
2083 tx->tstamps[idx].skb = NULL;
2084 spin_unlock(&tx->lock);
2085
2086 /* it's (unlikely but) possible we raced with the cleanup
2087 * thread for discarding old timestamp requests.
2088 */
2089 if (!skb)
2090 continue;
2091
2092 /* Extend the timestamp using cached PHC time */
2093 tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
2094 shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
2095
2096 skb_tstamp_tx(skb, &shhwtstamps);
2097 dev_kfree_skb_any(skb);
2098 }
2099
2100 /* Check if we still have work to do. If so, re-queue this task to
2101 * poll for remaining timestamps.
2102 */
2103 spin_lock(&tx->lock);
2104 if (!bitmap_empty(tx->in_use, tx->len))
2105 kthread_queue_work(pf->ptp.kworker, &tx->work);
2106 spin_unlock(&tx->lock);
2107}
2108
2109/**
2110 * ice_ptp_request_ts - Request an available Tx timestamp index
2111 * @tx: the PTP Tx timestamp tracker to request from
2112 * @skb: the SKB to associate with this timestamp request
2113 */
2114s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
2115{
2116 u8 idx;
2117
2118 /* Check if this tracker is initialized */
2119 if (!tx->init || tx->calibrating)
2120 return -1;
2121
2122 spin_lock(&tx->lock);
2123 /* Find and set the first available index */
2124 idx = find_first_zero_bit(tx->in_use, tx->len);
2125 if (idx < tx->len) {
2126 /* We got a valid index that no other thread could have set. Store
2127 * a reference to the skb and the start time to allow discarding old
2128 * requests.
2129 */
2130 set_bit(idx, tx->in_use);
2131 tx->tstamps[idx].start = jiffies;
2132 tx->tstamps[idx].skb = skb_get(skb);
2133 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2134 }
2135
2136 spin_unlock(&tx->lock);
2137
2138 /* return the appropriate PHY timestamp register index, -1 if no
2139 * indexes were available.
2140 */
2141 if (idx >= tx->len)
2142 return -1;
2143 else
2144 return idx + tx->quad_offset;
2145}
2146
2147/**
2148 * ice_ptp_process_ts - Spawn kthread work to handle timestamps
2149 * @pf: Board private structure
2150 *
2151 * Queue work required to process the PTP Tx timestamps outside of interrupt
2152 * context.
2153 */
2154void ice_ptp_process_ts(struct ice_pf *pf)
2155{
2156 if (pf->ptp.port.tx.init)
2157 kthread_queue_work(pf->ptp.kworker, &pf->ptp.port.tx.work);
2158}
2159
2160/**
2161 * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps
2162 * @tx: Tx tracking structure to initialize
2163 *
2164 * Assumes that the length has already been initialized. Do not call directly,
2165 * use the ice_ptp_init_tx_e822 or ice_ptp_init_tx_e810 instead.
2166 */
2167static int
2168ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
2169{
2170 tx->tstamps = kcalloc(tx->len, sizeof(*tx->tstamps), GFP_KERNEL);
2171 if (!tx->tstamps)
2172 return -ENOMEM;
2173
2174 tx->in_use = bitmap_zalloc(tx->len, GFP_KERNEL);
2175 if (!tx->in_use) {
2176 kfree(tx->tstamps);
2177 tx->tstamps = NULL;
2178 return -ENOMEM;
2179 }
2180
2181 spin_lock_init(&tx->lock);
2182 kthread_init_work(&tx->work, ice_ptp_tx_tstamp_work);
2183
2184 tx->init = 1;
2185
2186 return 0;
2187}
2188
2189/**
2190 * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker
2191 * @pf: Board private structure
2192 * @tx: the tracker to flush
2193 */
2194static void
2195ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
2196{
2197 u8 idx;
2198
2199 for (idx = 0; idx < tx->len; idx++) {
2200 u8 phy_idx = idx + tx->quad_offset;
2201
2202 spin_lock(&tx->lock);
2203 if (tx->tstamps[idx].skb) {
2204 dev_kfree_skb_any(tx->tstamps[idx].skb);
2205 tx->tstamps[idx].skb = NULL;
2206 }
2207 clear_bit(idx, tx->in_use);
2208 spin_unlock(&tx->lock);
2209
2210 /* Clear any potential residual timestamp in the PHY block */
2211 if (!pf->hw.reset_ongoing)
2212 ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx);
2213 }
2214}
2215
2216/**
2217 * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
2218 * @pf: Board private structure
2219 * @tx: Tx tracking structure to release
2220 *
2221 * Free memory associated with the Tx timestamp tracker.
2222 */
2223static void
2224ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
2225{
2226 tx->init = 0;
2227
2228 kthread_cancel_work_sync(&tx->work);
2229
2230 ice_ptp_flush_tx_tracker(pf, tx);
2231
2232 kfree(tx->tstamps);
2233 tx->tstamps = NULL;
2234
2235 bitmap_free(tx->in_use);
2236 tx->in_use = NULL;
2237
2238 tx->len = 0;
2239}
2240
2241/**
2242 * ice_ptp_init_tx_e822 - Initialize tracking for Tx timestamps
2243 * @pf: Board private structure
2244 * @tx: the Tx tracking structure to initialize
2245 * @port: the port this structure tracks
2246 *
2247 * Initialize the Tx timestamp tracker for this port. For generic MAC devices,
2248 * the timestamp block is shared for all ports in the same quad. To avoid
2249 * ports using the same timestamp index, logically break the block of
2250 * registers into chunks based on the port number.
2251 */
2252static int
2253ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
2254{
2255 tx->quad = port / ICE_PORTS_PER_QUAD;
2256 tx->quad_offset = tx->quad * INDEX_PER_PORT;
2257 tx->len = INDEX_PER_PORT;
2258
2259 return ice_ptp_alloc_tx_tracker(tx);
2260}
2261
2262/**
2263 * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
2264 * @pf: Board private structure
2265 * @tx: the Tx tracking structure to initialize
2266 *
2267 * Initialize the Tx timestamp tracker for this PF. For E810 devices, each
2268 * port has its own block of timestamps, independent of the other ports.
2269 */
2270static int
2271ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
2272{
2273 tx->quad = pf->hw.port_info->lport;
2274 tx->quad_offset = 0;
2275 tx->len = INDEX_PER_QUAD;
2276
2277 return ice_ptp_alloc_tx_tracker(tx);
2278}
2279
2280/**
2281 * ice_ptp_tx_tstamp_cleanup - Cleanup old timestamp requests that got dropped
2282 * @tx: PTP Tx tracker to clean up
2283 *
2284 * Loop through the Tx timestamp requests and see if any of them have been
2285 * waiting for a long time. Discard any SKBs that have been waiting for more
2286 * than 2 seconds. This is long enough to be reasonably sure that the
2287 * timestamp will never be captured. This might happen if the packet gets
2288 * discarded before it reaches the PHY timestamping block.
2289 */
2290static void ice_ptp_tx_tstamp_cleanup(struct ice_ptp_tx *tx)
2291{
2292 u8 idx;
2293
2294 if (!tx->init)
2295 return;
2296
2297 for_each_set_bit(idx, tx->in_use, tx->len) {
2298 struct sk_buff *skb;
2299
2300 /* Check if this SKB has been waiting for too long */
2301 if (time_is_after_jiffies(tx->tstamps[idx].start + 2 * HZ))
2302 continue;
2303
2304 spin_lock(&tx->lock);
2305 skb = tx->tstamps[idx].skb;
2306 tx->tstamps[idx].skb = NULL;
2307 clear_bit(idx, tx->in_use);
2308 spin_unlock(&tx->lock);
2309
2310 /* Free the SKB after we've cleared the bit */
2311 dev_kfree_skb_any(skb);
2312 }
2313}
2314
2315static void ice_ptp_periodic_work(struct kthread_work *work)
2316{
2317 struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
2318 struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
2319
2320 if (!test_bit(ICE_FLAG_PTP, pf->flags))
2321 return;
2322
2323 ice_ptp_update_cached_phctime(pf);
2324
2325 ice_ptp_tx_tstamp_cleanup(&pf->ptp.port.tx);
2326
2327 /* Run twice a second */
2328 kthread_queue_delayed_work(ptp->kworker, &ptp->work,
2329 msecs_to_jiffies(500));
2330}
2331
2332/**
2333 * ice_ptp_reset - Initialize PTP hardware clock support after reset
2334 * @pf: Board private structure
2335 */
2336void ice_ptp_reset(struct ice_pf *pf)
2337{
2338 struct ice_ptp *ptp = &pf->ptp;
2339 struct ice_hw *hw = &pf->hw;
2340 struct timespec64 ts;
2341 int err, itr = 1;
2342 u64 time_diff;
2343
2344 if (test_bit(ICE_PFR_REQ, pf->state))
2345 goto pfr;
2346
2347 if (!hw->func_caps.ts_func_info.src_tmr_owned)
2348 goto reset_ts;
2349
2350 err = ice_ptp_init_phc(hw);
2351 if (err)
2352 goto err;
2353
2354 /* Acquire the global hardware lock */
2355 if (!ice_ptp_lock(hw)) {
2356 err = -EBUSY;
2357 goto err;
2358 }
2359
2360 /* Write the increment time value to PHY and LAN */
2361 err = ice_ptp_write_incval(hw, ice_base_incval(pf));
2362 if (err) {
2363 ice_ptp_unlock(hw);
2364 goto err;
2365 }
2366
2367 /* Write the initial Time value to PHY and LAN using the cached PHC
2368 * time before the reset and time difference between stopping and
2369 * starting the clock.
2370 */
2371 if (ptp->cached_phc_time) {
2372 time_diff = ktime_get_real_ns() - ptp->reset_time;
2373 ts = ns_to_timespec64(ptp->cached_phc_time + time_diff);
2374 } else {
2375 ts = ktime_to_timespec64(ktime_get_real());
2376 }
2377 err = ice_ptp_write_init(pf, &ts);
2378 if (err) {
2379 ice_ptp_unlock(hw);
2380 goto err;
2381 }
2382
2383 /* Release the global hardware lock */
2384 ice_ptp_unlock(hw);
2385
2386 if (!ice_is_e810(hw)) {
2387 /* Enable quad interrupts */
2388 err = ice_ptp_tx_ena_intr(pf, true, itr);
2389 if (err)
2390 goto err;
2391 }
2392
2393reset_ts:
2394 /* Restart the PHY timestamping block */
2395 ice_ptp_reset_phy_timestamping(pf);
2396
2397pfr:
2398 /* Init Tx structures */
2399 if (ice_is_e810(&pf->hw)) {
2400 err = ice_ptp_init_tx_e810(pf, &ptp->port.tx);
2401 } else {
2402 kthread_init_delayed_work(&ptp->port.ov_work,
2403 ice_ptp_wait_for_offset_valid);
2404 err = ice_ptp_init_tx_e822(pf, &ptp->port.tx,
2405 ptp->port.port_num);
2406 }
2407 if (err)
2408 goto err;
2409
2410 set_bit(ICE_FLAG_PTP, pf->flags);
2411
2412 /* Start periodic work going */
2413 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
2414
2415 dev_info(ice_pf_to_dev(pf), "PTP reset successful\n");
2416 return;
2417
2418err:
2419 dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
2420}
2421
2422/**
2423 * ice_ptp_prepare_for_reset - Prepare PTP for reset
2424 * @pf: Board private structure
2425 */
2426void ice_ptp_prepare_for_reset(struct ice_pf *pf)
2427{
2428 struct ice_ptp *ptp = &pf->ptp;
2429 u8 src_tmr;
2430
2431 clear_bit(ICE_FLAG_PTP, pf->flags);
2432
2433 /* Disable timestamping for both Tx and Rx */
2434 ice_ptp_cfg_timestamp(pf, false);
2435
2436 kthread_cancel_delayed_work_sync(&ptp->work);
2437 kthread_cancel_work_sync(&ptp->extts_work);
2438
2439 if (test_bit(ICE_PFR_REQ, pf->state))
2440 return;
2441
2442 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
2443
2444 /* Disable periodic outputs */
2445 ice_ptp_disable_all_clkout(pf);
2446
2447 src_tmr = ice_get_ptp_src_clock_index(&pf->hw);
2448
2449 /* Disable source clock */
2450 wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M);
2451
2452 /* Acquire PHC and system timer to restore after reset */
2453 ptp->reset_time = ktime_get_real_ns();
2454}
2455
2456/**
2457 * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device
2458 * @pf: Board private structure
2459 *
2460 * Setup and initialize a PTP clock device that represents the device hardware
2461 * clock. Save the clock index for other functions connected to the same
2462 * hardware resource.
2463 */
2464static int ice_ptp_init_owner(struct ice_pf *pf)
2465{
2466 struct ice_hw *hw = &pf->hw;
2467 struct timespec64 ts;
2468 int err, itr = 1;
2469
2470 err = ice_ptp_init_phc(hw);
2471 if (err) {
2472 dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n",
2473 err);
2474 return err;
2475 }
2476
2477 /* Acquire the global hardware lock */
2478 if (!ice_ptp_lock(hw)) {
2479 err = -EBUSY;
2480 goto err_exit;
2481 }
2482
2483 /* Write the increment time value to PHY and LAN */
2484 err = ice_ptp_write_incval(hw, ice_base_incval(pf));
2485 if (err) {
2486 ice_ptp_unlock(hw);
2487 goto err_exit;
2488 }
2489
2490 ts = ktime_to_timespec64(ktime_get_real());
2491 /* Write the initial Time value to PHY and LAN */
2492 err = ice_ptp_write_init(pf, &ts);
2493 if (err) {
2494 ice_ptp_unlock(hw);
2495 goto err_exit;
2496 }
2497
2498 /* Release the global hardware lock */
2499 ice_ptp_unlock(hw);
2500
2501 if (!ice_is_e810(hw)) {
2502 /* Enable quad interrupts */
2503 err = ice_ptp_tx_ena_intr(pf, true, itr);
2504 if (err)
2505 goto err_exit;
2506 }
2507
2508 /* Ensure we have a clock device */
2509 err = ice_ptp_create_clock(pf);
2510 if (err)
2511 goto err_clk;
2512
2513 /* Store the PTP clock index for other PFs */
2514 ice_set_ptp_clock_index(pf);
2515
2516 return 0;
2517
2518err_clk:
2519 pf->ptp.clock = NULL;
2520err_exit:
2521 return err;
2522}
2523
2524/**
2525 * ice_ptp_init_work - Initialize PTP work threads
2526 * @pf: Board private structure
2527 * @ptp: PF PTP structure
2528 */
2529static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
2530{
2531 struct kthread_worker *kworker;
2532
2533 /* Initialize work functions */
2534 kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work);
2535 kthread_init_work(&ptp->extts_work, ice_ptp_extts_work);
2536
2537 /* Allocate a kworker for handling work required for the ports
2538 * connected to the PTP hardware clock.
2539 */
2540 kworker = kthread_create_worker(0, "ice-ptp-%s",
2541 dev_name(ice_pf_to_dev(pf)));
2542 if (IS_ERR(kworker))
2543 return PTR_ERR(kworker);
2544
2545 ptp->kworker = kworker;
2546
2547 /* Start periodic work going */
2548 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
2549
2550 return 0;
2551}
2552
2553/**
2554 * ice_ptp_init_port - Initialize PTP port structure
2555 * @pf: Board private structure
2556 * @ptp_port: PTP port structure
2557 */
2558static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
2559{
2560 mutex_init(&ptp_port->ps_lock);
2561
2562 if (ice_is_e810(&pf->hw))
2563 return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
2564
2565 kthread_init_delayed_work(&ptp_port->ov_work,
2566 ice_ptp_wait_for_offset_valid);
2567 return ice_ptp_init_tx_e822(pf, &ptp_port->tx, ptp_port->port_num);
2568}
2569
2570/**
2571 * ice_ptp_init - Initialize PTP hardware clock support
2572 * @pf: Board private structure
2573 *
2574 * Set up the device for interacting with the PTP hardware clock for all
2575 * functions, both the function that owns the clock hardware, and the
2576 * functions connected to the clock hardware.
2577 *
2578 * The clock owner will allocate and register a ptp_clock with the
2579 * PTP_1588_CLOCK infrastructure. All functions allocate a kthread and work
2580 * items used for asynchronous work such as Tx timestamps and periodic work.
2581 */
2582void ice_ptp_init(struct ice_pf *pf)
2583{
2584 struct ice_ptp *ptp = &pf->ptp;
2585 struct ice_hw *hw = &pf->hw;
2586 int err;
2587
2588 /* If this function owns the clock hardware, it must allocate and
2589 * configure the PTP clock device to represent it.
2590 */
2591 if (hw->func_caps.ts_func_info.src_tmr_owned) {
2592 err = ice_ptp_init_owner(pf);
2593 if (err)
2594 goto err;
2595 }
2596
2597 ptp->port.port_num = hw->pf_id;
2598 err = ice_ptp_init_port(pf, &ptp->port);
2599 if (err)
2600 goto err;
2601
2602 /* Start the PHY timestamping block */
2603 ice_ptp_reset_phy_timestamping(pf);
2604
2605 set_bit(ICE_FLAG_PTP, pf->flags);
2606 err = ice_ptp_init_work(pf, ptp);
2607 if (err)
2608 goto err;
2609
2610 dev_info(ice_pf_to_dev(pf), "PTP init successful\n");
2611 return;
2612
2613err:
2614 /* If we registered a PTP clock, release it */
2615 if (pf->ptp.clock) {
2616 ptp_clock_unregister(ptp->clock);
2617 pf->ptp.clock = NULL;
2618 }
2619 clear_bit(ICE_FLAG_PTP, pf->flags);
2620 dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err);
2621}
2622
2623/**
2624 * ice_ptp_release - Disable the driver/HW support and unregister the clock
2625 * @pf: Board private structure
2626 *
2627 * This function handles the cleanup work required from the initialization by
2628 * clearing out the important information and unregistering the clock
2629 */
2630void ice_ptp_release(struct ice_pf *pf)
2631{
2632 if (!test_bit(ICE_FLAG_PTP, pf->flags))
2633 return;
2634
2635 /* Disable timestamping for both Tx and Rx */
2636 ice_ptp_cfg_timestamp(pf, false);
2637
2638 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
2639
2640 clear_bit(ICE_FLAG_PTP, pf->flags);
2641
2642 kthread_cancel_delayed_work_sync(&pf->ptp.work);
2643
2644 ice_ptp_port_phy_stop(&pf->ptp.port);
2645 mutex_destroy(&pf->ptp.port.ps_lock);
2646 if (pf->ptp.kworker) {
2647 kthread_destroy_worker(pf->ptp.kworker);
2648 pf->ptp.kworker = NULL;
2649 }
2650
2651 if (!pf->ptp.clock)
2652 return;
2653
2654 /* Disable periodic outputs */
2655 ice_ptp_disable_all_clkout(pf);
2656
2657 ice_clear_ptp_clock_index(pf);
2658 ptp_clock_unregister(pf->ptp.clock);
2659 pf->ptp.clock = NULL;
2660
2661 dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n");
2662}