1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021, Intel Corporation. */
3
4 #include "ice.h"
5 #include "ice_lib.h"
6 #include "ice_trace.h"
7
8 #define E810_OUT_PROP_DELAY_NS 1
9
10 #define UNKNOWN_INCVAL_E822 0x100000000ULL
11
12 static const struct ptp_pin_desc ice_pin_desc_e810t[] = {
13 /* name idx func chan */
14 { "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } },
15 { "SMA1", SMA1, PTP_PF_NONE, 1, { 0, } },
16 { "U.FL1", UFL1, PTP_PF_NONE, 1, { 0, } },
17 { "SMA2", SMA2, PTP_PF_NONE, 2, { 0, } },
18 { "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } },
19 };
20
21 /**
22 * ice_get_sma_config_e810t
23 * @hw: pointer to the hw struct
24 * @ptp_pins: pointer to the ptp_pin_desc struture
25 *
26 * Read the configuration of the SMA control logic and put it into the
27 * ptp_pin_desc structure
28 */
29 static int
ice_get_sma_config_e810t(struct ice_hw * hw,struct ptp_pin_desc * ptp_pins)30 ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins)
31 {
32 u8 data, i;
33 int status;
34
35 /* Read initial pin state */
36 status = ice_read_sma_ctrl_e810t(hw, &data);
37 if (status)
38 return status;
39
40 /* initialize with defaults */
41 for (i = 0; i < NUM_PTP_PINS_E810T; i++) {
42 snprintf(ptp_pins[i].name, sizeof(ptp_pins[i].name),
43 "%s", ice_pin_desc_e810t[i].name);
44 ptp_pins[i].index = ice_pin_desc_e810t[i].index;
45 ptp_pins[i].func = ice_pin_desc_e810t[i].func;
46 ptp_pins[i].chan = ice_pin_desc_e810t[i].chan;
47 }
48
49 /* Parse SMA1/UFL1 */
50 switch (data & ICE_SMA1_MASK_E810T) {
51 case ICE_SMA1_MASK_E810T:
52 default:
53 ptp_pins[SMA1].func = PTP_PF_NONE;
54 ptp_pins[UFL1].func = PTP_PF_NONE;
55 break;
56 case ICE_SMA1_DIR_EN_E810T:
57 ptp_pins[SMA1].func = PTP_PF_PEROUT;
58 ptp_pins[UFL1].func = PTP_PF_NONE;
59 break;
60 case ICE_SMA1_TX_EN_E810T:
61 ptp_pins[SMA1].func = PTP_PF_EXTTS;
62 ptp_pins[UFL1].func = PTP_PF_NONE;
63 break;
64 case 0:
65 ptp_pins[SMA1].func = PTP_PF_EXTTS;
66 ptp_pins[UFL1].func = PTP_PF_PEROUT;
67 break;
68 }
69
70 /* Parse SMA2/UFL2 */
71 switch (data & ICE_SMA2_MASK_E810T) {
72 case ICE_SMA2_MASK_E810T:
73 default:
74 ptp_pins[SMA2].func = PTP_PF_NONE;
75 ptp_pins[UFL2].func = PTP_PF_NONE;
76 break;
77 case (ICE_SMA2_TX_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
78 ptp_pins[SMA2].func = PTP_PF_EXTTS;
79 ptp_pins[UFL2].func = PTP_PF_NONE;
80 break;
81 case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
82 ptp_pins[SMA2].func = PTP_PF_PEROUT;
83 ptp_pins[UFL2].func = PTP_PF_NONE;
84 break;
85 case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T):
86 ptp_pins[SMA2].func = PTP_PF_NONE;
87 ptp_pins[UFL2].func = PTP_PF_EXTTS;
88 break;
89 case ICE_SMA2_DIR_EN_E810T:
90 ptp_pins[SMA2].func = PTP_PF_PEROUT;
91 ptp_pins[UFL2].func = PTP_PF_EXTTS;
92 break;
93 }
94
95 return 0;
96 }
97
98 /**
99 * ice_ptp_set_sma_config_e810t
100 * @hw: pointer to the hw struct
101 * @ptp_pins: pointer to the ptp_pin_desc struture
102 *
103 * Set the configuration of the SMA control logic based on the configuration in
104 * num_pins parameter
105 */
106 static int
ice_ptp_set_sma_config_e810t(struct ice_hw * hw,const struct ptp_pin_desc * ptp_pins)107 ice_ptp_set_sma_config_e810t(struct ice_hw *hw,
108 const struct ptp_pin_desc *ptp_pins)
109 {
110 int status;
111 u8 data;
112
113 /* SMA1 and UFL1 cannot be set to TX at the same time */
114 if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
115 ptp_pins[UFL1].func == PTP_PF_PEROUT)
116 return -EINVAL;
117
118 /* SMA2 and UFL2 cannot be set to RX at the same time */
119 if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
120 ptp_pins[UFL2].func == PTP_PF_EXTTS)
121 return -EINVAL;
122
123 /* Read initial pin state value */
124 status = ice_read_sma_ctrl_e810t(hw, &data);
125 if (status)
126 return status;
127
128 /* Set the right sate based on the desired configuration */
129 data &= ~ICE_SMA1_MASK_E810T;
130 if (ptp_pins[SMA1].func == PTP_PF_NONE &&
131 ptp_pins[UFL1].func == PTP_PF_NONE) {
132 dev_info(ice_hw_to_dev(hw), "SMA1 + U.FL1 disabled");
133 data |= ICE_SMA1_MASK_E810T;
134 } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
135 ptp_pins[UFL1].func == PTP_PF_NONE) {
136 dev_info(ice_hw_to_dev(hw), "SMA1 RX");
137 data |= ICE_SMA1_TX_EN_E810T;
138 } else if (ptp_pins[SMA1].func == PTP_PF_NONE &&
139 ptp_pins[UFL1].func == PTP_PF_PEROUT) {
140 /* U.FL 1 TX will always enable SMA 1 RX */
141 dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
142 } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
143 ptp_pins[UFL1].func == PTP_PF_PEROUT) {
144 dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
145 } else if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
146 ptp_pins[UFL1].func == PTP_PF_NONE) {
147 dev_info(ice_hw_to_dev(hw), "SMA1 TX");
148 data |= ICE_SMA1_DIR_EN_E810T;
149 }
150
151 data &= ~ICE_SMA2_MASK_E810T;
152 if (ptp_pins[SMA2].func == PTP_PF_NONE &&
153 ptp_pins[UFL2].func == PTP_PF_NONE) {
154 dev_info(ice_hw_to_dev(hw), "SMA2 + U.FL2 disabled");
155 data |= ICE_SMA2_MASK_E810T;
156 } else if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
157 ptp_pins[UFL2].func == PTP_PF_NONE) {
158 dev_info(ice_hw_to_dev(hw), "SMA2 RX");
159 data |= (ICE_SMA2_TX_EN_E810T |
160 ICE_SMA2_UFL2_RX_DIS_E810T);
161 } else if (ptp_pins[SMA2].func == PTP_PF_NONE &&
162 ptp_pins[UFL2].func == PTP_PF_EXTTS) {
163 dev_info(ice_hw_to_dev(hw), "UFL2 RX");
164 data |= (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T);
165 } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
166 ptp_pins[UFL2].func == PTP_PF_NONE) {
167 dev_info(ice_hw_to_dev(hw), "SMA2 TX");
168 data |= (ICE_SMA2_DIR_EN_E810T |
169 ICE_SMA2_UFL2_RX_DIS_E810T);
170 } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
171 ptp_pins[UFL2].func == PTP_PF_EXTTS) {
172 dev_info(ice_hw_to_dev(hw), "SMA2 TX + U.FL2 RX");
173 data |= ICE_SMA2_DIR_EN_E810T;
174 }
175
176 return ice_write_sma_ctrl_e810t(hw, data);
177 }
178
179 /**
180 * ice_ptp_set_sma_e810t
181 * @info: the driver's PTP info structure
182 * @pin: pin index in kernel structure
183 * @func: Pin function to be set (PTP_PF_NONE, PTP_PF_EXTTS or PTP_PF_PEROUT)
184 *
185 * Set the configuration of a single SMA pin
186 */
187 static int
ice_ptp_set_sma_e810t(struct ptp_clock_info * info,unsigned int pin,enum ptp_pin_function func)188 ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin,
189 enum ptp_pin_function func)
190 {
191 struct ptp_pin_desc ptp_pins[NUM_PTP_PINS_E810T];
192 struct ice_pf *pf = ptp_info_to_pf(info);
193 struct ice_hw *hw = &pf->hw;
194 int err;
195
196 if (pin < SMA1 || func > PTP_PF_PEROUT)
197 return -EOPNOTSUPP;
198
199 err = ice_get_sma_config_e810t(hw, ptp_pins);
200 if (err)
201 return err;
202
203 /* Disable the same function on the other pin sharing the channel */
204 if (pin == SMA1 && ptp_pins[UFL1].func == func)
205 ptp_pins[UFL1].func = PTP_PF_NONE;
206 if (pin == UFL1 && ptp_pins[SMA1].func == func)
207 ptp_pins[SMA1].func = PTP_PF_NONE;
208
209 if (pin == SMA2 && ptp_pins[UFL2].func == func)
210 ptp_pins[UFL2].func = PTP_PF_NONE;
211 if (pin == UFL2 && ptp_pins[SMA2].func == func)
212 ptp_pins[SMA2].func = PTP_PF_NONE;
213
214 /* Set up new pin function in the temp table */
215 ptp_pins[pin].func = func;
216
217 return ice_ptp_set_sma_config_e810t(hw, ptp_pins);
218 }
219
220 /**
221 * ice_verify_pin_e810t
222 * @info: the driver's PTP info structure
223 * @pin: Pin index
224 * @func: Assigned function
225 * @chan: Assigned channel
226 *
227 * Verify if pin supports requested pin function. If the Check pins consistency.
228 * Reconfigure the SMA logic attached to the given pin to enable its
229 * desired functionality
230 */
231 static int
ice_verify_pin_e810t(struct ptp_clock_info * info,unsigned int pin,enum ptp_pin_function func,unsigned int chan)232 ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin,
233 enum ptp_pin_function func, unsigned int chan)
234 {
235 /* Don't allow channel reassignment */
236 if (chan != ice_pin_desc_e810t[pin].chan)
237 return -EOPNOTSUPP;
238
239 /* Check if functions are properly assigned */
240 switch (func) {
241 case PTP_PF_NONE:
242 break;
243 case PTP_PF_EXTTS:
244 if (pin == UFL1)
245 return -EOPNOTSUPP;
246 break;
247 case PTP_PF_PEROUT:
248 if (pin == UFL2 || pin == GNSS)
249 return -EOPNOTSUPP;
250 break;
251 case PTP_PF_PHYSYNC:
252 return -EOPNOTSUPP;
253 }
254
255 return ice_ptp_set_sma_e810t(info, pin, func);
256 }
257
258 /**
259 * ice_set_tx_tstamp - Enable or disable Tx timestamping
260 * @pf: The PF pointer to search in
261 * @on: bool value for whether timestamps are enabled or disabled
262 */
ice_set_tx_tstamp(struct ice_pf * pf,bool on)263 static void ice_set_tx_tstamp(struct ice_pf *pf, bool on)
264 {
265 struct ice_vsi *vsi;
266 u32 val;
267 u16 i;
268
269 vsi = ice_get_main_vsi(pf);
270 if (!vsi)
271 return;
272
273 /* Set the timestamp enable flag for all the Tx rings */
274 ice_for_each_txq(vsi, i) {
275 if (!vsi->tx_rings[i])
276 continue;
277 vsi->tx_rings[i]->ptp_tx = on;
278 }
279
280 /* Configure the Tx timestamp interrupt */
281 val = rd32(&pf->hw, PFINT_OICR_ENA);
282 if (on)
283 val |= PFINT_OICR_TSYN_TX_M;
284 else
285 val &= ~PFINT_OICR_TSYN_TX_M;
286 wr32(&pf->hw, PFINT_OICR_ENA, val);
287
288 pf->ptp.tstamp_config.tx_type = on ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
289 }
290
291 /**
292 * ice_set_rx_tstamp - Enable or disable Rx timestamping
293 * @pf: The PF pointer to search in
294 * @on: bool value for whether timestamps are enabled or disabled
295 */
ice_set_rx_tstamp(struct ice_pf * pf,bool on)296 static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
297 {
298 struct ice_vsi *vsi;
299 u16 i;
300
301 vsi = ice_get_main_vsi(pf);
302 if (!vsi)
303 return;
304
305 /* Set the timestamp flag for all the Rx rings */
306 ice_for_each_rxq(vsi, i) {
307 if (!vsi->rx_rings[i])
308 continue;
309 vsi->rx_rings[i]->ptp_rx = on;
310 }
311
312 pf->ptp.tstamp_config.rx_filter = on ? HWTSTAMP_FILTER_ALL :
313 HWTSTAMP_FILTER_NONE;
314 }
315
316 /**
317 * ice_ptp_cfg_timestamp - Configure timestamp for init/deinit
318 * @pf: Board private structure
319 * @ena: bool value to enable or disable time stamp
320 *
321 * This function will configure timestamping during PTP initialization
322 * and deinitialization
323 */
ice_ptp_cfg_timestamp(struct ice_pf * pf,bool ena)324 void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena)
325 {
326 ice_set_tx_tstamp(pf, ena);
327 ice_set_rx_tstamp(pf, ena);
328 }
329
330 /**
331 * ice_get_ptp_clock_index - Get the PTP clock index
332 * @pf: the PF pointer
333 *
334 * Determine the clock index of the PTP clock associated with this device. If
335 * this is the PF controlling the clock, just use the local access to the
336 * clock device pointer.
337 *
338 * Otherwise, read from the driver shared parameters to determine the clock
339 * index value.
340 *
341 * Returns: the index of the PTP clock associated with this device, or -1 if
342 * there is no associated clock.
343 */
ice_get_ptp_clock_index(struct ice_pf * pf)344 int ice_get_ptp_clock_index(struct ice_pf *pf)
345 {
346 struct device *dev = ice_pf_to_dev(pf);
347 enum ice_aqc_driver_params param_idx;
348 struct ice_hw *hw = &pf->hw;
349 u8 tmr_idx;
350 u32 value;
351 int err;
352
353 /* Use the ptp_clock structure if we're the main PF */
354 if (pf->ptp.clock)
355 return ptp_clock_index(pf->ptp.clock);
356
357 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
358 if (!tmr_idx)
359 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0;
360 else
361 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1;
362
363 err = ice_aq_get_driver_param(hw, param_idx, &value, NULL);
364 if (err) {
365 dev_err(dev, "Failed to read PTP clock index parameter, err %d aq_err %s\n",
366 err, ice_aq_str(hw->adminq.sq_last_status));
367 return -1;
368 }
369
370 /* The PTP clock index is an integer, and will be between 0 and
371 * INT_MAX. The highest bit of the driver shared parameter is used to
372 * indicate whether or not the currently stored clock index is valid.
373 */
374 if (!(value & PTP_SHARED_CLK_IDX_VALID))
375 return -1;
376
377 return value & ~PTP_SHARED_CLK_IDX_VALID;
378 }
379
380 /**
381 * ice_set_ptp_clock_index - Set the PTP clock index
382 * @pf: the PF pointer
383 *
384 * Set the PTP clock index for this device into the shared driver parameters,
385 * so that other PFs associated with this device can read it.
386 *
387 * If the PF is unable to store the clock index, it will log an error, but
388 * will continue operating PTP.
389 */
ice_set_ptp_clock_index(struct ice_pf * pf)390 static void ice_set_ptp_clock_index(struct ice_pf *pf)
391 {
392 struct device *dev = ice_pf_to_dev(pf);
393 enum ice_aqc_driver_params param_idx;
394 struct ice_hw *hw = &pf->hw;
395 u8 tmr_idx;
396 u32 value;
397 int err;
398
399 if (!pf->ptp.clock)
400 return;
401
402 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
403 if (!tmr_idx)
404 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0;
405 else
406 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1;
407
408 value = (u32)ptp_clock_index(pf->ptp.clock);
409 if (value > INT_MAX) {
410 dev_err(dev, "PTP Clock index is too large to store\n");
411 return;
412 }
413 value |= PTP_SHARED_CLK_IDX_VALID;
414
415 err = ice_aq_set_driver_param(hw, param_idx, value, NULL);
416 if (err) {
417 dev_err(dev, "Failed to set PTP clock index parameter, err %d aq_err %s\n",
418 err, ice_aq_str(hw->adminq.sq_last_status));
419 }
420 }
421
422 /**
423 * ice_clear_ptp_clock_index - Clear the PTP clock index
424 * @pf: the PF pointer
425 *
426 * Clear the PTP clock index for this device. Must be called when
427 * unregistering the PTP clock, in order to ensure other PFs stop reporting
428 * a clock object that no longer exists.
429 */
ice_clear_ptp_clock_index(struct ice_pf * pf)430 static void ice_clear_ptp_clock_index(struct ice_pf *pf)
431 {
432 struct device *dev = ice_pf_to_dev(pf);
433 enum ice_aqc_driver_params param_idx;
434 struct ice_hw *hw = &pf->hw;
435 u8 tmr_idx;
436 int err;
437
438 /* Do not clear the index if we don't own the timer */
439 if (!hw->func_caps.ts_func_info.src_tmr_owned)
440 return;
441
442 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
443 if (!tmr_idx)
444 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0;
445 else
446 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1;
447
448 err = ice_aq_set_driver_param(hw, param_idx, 0, NULL);
449 if (err) {
450 dev_dbg(dev, "Failed to clear PTP clock index parameter, err %d aq_err %s\n",
451 err, ice_aq_str(hw->adminq.sq_last_status));
452 }
453 }
454
455 /**
456 * ice_ptp_read_src_clk_reg - Read the source clock register
457 * @pf: Board private structure
458 * @sts: Optional parameter for holding a pair of system timestamps from
459 * the system clock. Will be ignored if NULL is given.
460 */
461 static u64
ice_ptp_read_src_clk_reg(struct ice_pf * pf,struct ptp_system_timestamp * sts)462 ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
463 {
464 struct ice_hw *hw = &pf->hw;
465 u32 hi, lo, lo2;
466 u8 tmr_idx;
467
468 tmr_idx = ice_get_ptp_src_clock_index(hw);
469 /* Read the system timestamp pre PHC read */
470 ptp_read_system_prets(sts);
471
472 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
473
474 /* Read the system timestamp post PHC read */
475 ptp_read_system_postts(sts);
476
477 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
478 lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx));
479
480 if (lo2 < lo) {
481 /* if TIME_L rolled over read TIME_L again and update
482 * system timestamps
483 */
484 ptp_read_system_prets(sts);
485 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
486 ptp_read_system_postts(sts);
487 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
488 }
489
490 return ((u64)hi << 32) | lo;
491 }
492
493 /**
494 * ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b
495 * @cached_phc_time: recently cached copy of PHC time
496 * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value
497 *
498 * Hardware captures timestamps which contain only 32 bits of nominal
499 * nanoseconds, as opposed to the 64bit timestamps that the stack expects.
500 * Note that the captured timestamp values may be 40 bits, but the lower
501 * 8 bits are sub-nanoseconds and generally discarded.
502 *
503 * Extend the 32bit nanosecond timestamp using the following algorithm and
504 * assumptions:
505 *
506 * 1) have a recently cached copy of the PHC time
507 * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1
508 * seconds) before or after the PHC time was captured.
509 * 3) calculate the delta between the cached time and the timestamp
510 * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was
511 * captured after the PHC time. In this case, the full timestamp is just
512 * the cached PHC time plus the delta.
513 * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the
514 * timestamp was captured *before* the PHC time, i.e. because the PHC
515 * cache was updated after the timestamp was captured by hardware. In this
516 * case, the full timestamp is the cached time minus the inverse delta.
517 *
518 * This algorithm works even if the PHC time was updated after a Tx timestamp
519 * was requested, but before the Tx timestamp event was reported from
520 * hardware.
521 *
522 * This calculation primarily relies on keeping the cached PHC time up to
523 * date. If the timestamp was captured more than 2^31 nanoseconds after the
524 * PHC time, it is possible that the lower 32bits of PHC time have
525 * overflowed more than once, and we might generate an incorrect timestamp.
526 *
527 * This is prevented by (a) periodically updating the cached PHC time once
528 * a second, and (b) discarding any Tx timestamp packet if it has waited for
529 * a timestamp for more than one second.
530 */
ice_ptp_extend_32b_ts(u64 cached_phc_time,u32 in_tstamp)531 static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp)
532 {
533 u32 delta, phc_time_lo;
534 u64 ns;
535
536 /* Extract the lower 32 bits of the PHC time */
537 phc_time_lo = (u32)cached_phc_time;
538
539 /* Calculate the delta between the lower 32bits of the cached PHC
540 * time and the in_tstamp value
541 */
542 delta = (in_tstamp - phc_time_lo);
543
544 /* Do not assume that the in_tstamp is always more recent than the
545 * cached PHC time. If the delta is large, it indicates that the
546 * in_tstamp was taken in the past, and should be converted
547 * forward.
548 */
549 if (delta > (U32_MAX / 2)) {
550 /* reverse the delta calculation here */
551 delta = (phc_time_lo - in_tstamp);
552 ns = cached_phc_time - delta;
553 } else {
554 ns = cached_phc_time + delta;
555 }
556
557 return ns;
558 }
559
560 /**
561 * ice_ptp_extend_40b_ts - Convert a 40b timestamp to 64b nanoseconds
562 * @pf: Board private structure
563 * @in_tstamp: Ingress/egress 40b timestamp value
564 *
565 * The Tx and Rx timestamps are 40 bits wide, including 32 bits of nominal
566 * nanoseconds, 7 bits of sub-nanoseconds, and a valid bit.
567 *
568 * *--------------------------------------------------------------*
569 * | 32 bits of nanoseconds | 7 high bits of sub ns underflow | v |
570 * *--------------------------------------------------------------*
571 *
572 * The low bit is an indicator of whether the timestamp is valid. The next
573 * 7 bits are a capture of the upper 7 bits of the sub-nanosecond underflow,
574 * and the remaining 32 bits are the lower 32 bits of the PHC timer.
575 *
576 * It is assumed that the caller verifies the timestamp is valid prior to
577 * calling this function.
578 *
579 * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC
580 * time stored in the device private PTP structure as the basis for timestamp
581 * extension.
582 *
583 * See ice_ptp_extend_32b_ts for a detailed explanation of the extension
584 * algorithm.
585 */
ice_ptp_extend_40b_ts(struct ice_pf * pf,u64 in_tstamp)586 static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)
587 {
588 const u64 mask = GENMASK_ULL(31, 0);
589 unsigned long discard_time;
590
591 /* Discard the hardware timestamp if the cached PHC time is too old */
592 discard_time = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
593 if (time_is_before_jiffies(discard_time)) {
594 pf->ptp.tx_hwtstamp_discarded++;
595 return 0;
596 }
597
598 return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time,
599 (in_tstamp >> 8) & mask);
600 }
601
602 /**
603 * ice_ptp_tx_tstamp - Process Tx timestamps for a port
604 * @tx: the PTP Tx timestamp tracker
605 *
606 * Process timestamps captured by the PHY associated with this port. To do
607 * this, loop over each index with a waiting skb.
608 *
609 * If a given index has a valid timestamp, perform the following steps:
610 *
611 * 1) copy the timestamp out of the PHY register
612 * 4) clear the timestamp valid bit in the PHY register
613 * 5) unlock the index by clearing the associated in_use bit.
614 * 2) extend the 40b timestamp value to get a 64bit timestamp
615 * 3) send that timestamp to the stack
616 *
617 * Returns true if all timestamps were handled, and false if any slots remain
618 * without a timestamp.
619 *
620 * After looping, if we still have waiting SKBs, return false. This may cause
621 * us effectively poll even when not strictly necessary. We do this because
622 * it's possible a new timestamp was requested around the same time as the
623 * interrupt. In some cases hardware might not interrupt us again when the
624 * timestamp is captured.
625 *
626 * Note that we only take the tracking lock when clearing the bit and when
627 * checking if we need to re-queue this task. The only place where bits can be
628 * set is the hard xmit routine where an SKB has a request flag set. The only
629 * places where we clear bits are this work function, or the periodic cleanup
630 * thread. If the cleanup thread clears a bit we're processing we catch it
631 * when we lock to clear the bit and then grab the SKB pointer. If a Tx thread
632 * starts a new timestamp, we might not begin processing it right away but we
633 * will notice it at the end when we re-queue the task. If a Tx thread starts
634 * a new timestamp just after this function exits without re-queuing,
635 * the interrupt when the timestamp finishes should trigger. Avoiding holding
636 * the lock for the entire function is important in order to ensure that Tx
637 * threads do not get blocked while waiting for the lock.
638 */
ice_ptp_tx_tstamp(struct ice_ptp_tx * tx)639 static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
640 {
641 struct ice_ptp_port *ptp_port;
642 bool ts_handled = true;
643 struct ice_pf *pf;
644 u8 idx;
645
646 if (!tx->init)
647 return true;
648
649 ptp_port = container_of(tx, struct ice_ptp_port, tx);
650 pf = ptp_port_to_pf(ptp_port);
651
652 for_each_set_bit(idx, tx->in_use, tx->len) {
653 struct skb_shared_hwtstamps shhwtstamps = {};
654 u8 phy_idx = idx + tx->quad_offset;
655 u64 raw_tstamp, tstamp;
656 struct sk_buff *skb;
657 int err;
658
659 ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
660
661 err = ice_read_phy_tstamp(&pf->hw, tx->quad, phy_idx,
662 &raw_tstamp);
663 if (err)
664 continue;
665
666 ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
667
668 /* Check if the timestamp is invalid or stale */
669 if (!(raw_tstamp & ICE_PTP_TS_VALID) ||
670 raw_tstamp == tx->tstamps[idx].cached_tstamp)
671 continue;
672
673 /* The timestamp is valid, so we'll go ahead and clear this
674 * index and then send the timestamp up to the stack.
675 */
676 spin_lock(&tx->lock);
677 tx->tstamps[idx].cached_tstamp = raw_tstamp;
678 clear_bit(idx, tx->in_use);
679 skb = tx->tstamps[idx].skb;
680 tx->tstamps[idx].skb = NULL;
681 spin_unlock(&tx->lock);
682
683 /* it's (unlikely but) possible we raced with the cleanup
684 * thread for discarding old timestamp requests.
685 */
686 if (!skb)
687 continue;
688
689 /* Extend the timestamp using cached PHC time */
690 tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
691 if (tstamp) {
692 shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
693 ice_trace(tx_tstamp_complete, skb, idx);
694 }
695
696 skb_tstamp_tx(skb, &shhwtstamps);
697 dev_kfree_skb_any(skb);
698 }
699
700 /* Check if we still have work to do. If so, re-queue this task to
701 * poll for remaining timestamps.
702 */
703 spin_lock(&tx->lock);
704 if (!bitmap_empty(tx->in_use, tx->len))
705 ts_handled = false;
706 spin_unlock(&tx->lock);
707
708 return ts_handled;
709 }
710
711 /**
712 * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps
713 * @tx: Tx tracking structure to initialize
714 *
715 * Assumes that the length has already been initialized. Do not call directly,
716 * use the ice_ptp_init_tx_e822 or ice_ptp_init_tx_e810 instead.
717 */
718 static int
ice_ptp_alloc_tx_tracker(struct ice_ptp_tx * tx)719 ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
720 {
721 tx->tstamps = kcalloc(tx->len, sizeof(*tx->tstamps), GFP_KERNEL);
722 if (!tx->tstamps)
723 return -ENOMEM;
724
725 tx->in_use = bitmap_zalloc(tx->len, GFP_KERNEL);
726 if (!tx->in_use) {
727 kfree(tx->tstamps);
728 tx->tstamps = NULL;
729 return -ENOMEM;
730 }
731
732 spin_lock_init(&tx->lock);
733
734 tx->init = 1;
735
736 return 0;
737 }
738
739 /**
740 * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker
741 * @pf: Board private structure
742 * @tx: the tracker to flush
743 */
744 static void
ice_ptp_flush_tx_tracker(struct ice_pf * pf,struct ice_ptp_tx * tx)745 ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
746 {
747 u8 idx;
748
749 for (idx = 0; idx < tx->len; idx++) {
750 u8 phy_idx = idx + tx->quad_offset;
751
752 spin_lock(&tx->lock);
753 if (tx->tstamps[idx].skb) {
754 dev_kfree_skb_any(tx->tstamps[idx].skb);
755 tx->tstamps[idx].skb = NULL;
756 pf->ptp.tx_hwtstamp_flushed++;
757 }
758 clear_bit(idx, tx->in_use);
759 spin_unlock(&tx->lock);
760
761 /* Clear any potential residual timestamp in the PHY block */
762 if (!pf->hw.reset_ongoing)
763 ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx);
764 }
765 }
766
767 /**
768 * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
769 * @pf: Board private structure
770 * @tx: Tx tracking structure to release
771 *
772 * Free memory associated with the Tx timestamp tracker.
773 */
774 static void
ice_ptp_release_tx_tracker(struct ice_pf * pf,struct ice_ptp_tx * tx)775 ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
776 {
777 tx->init = 0;
778
779 ice_ptp_flush_tx_tracker(pf, tx);
780
781 kfree(tx->tstamps);
782 tx->tstamps = NULL;
783
784 bitmap_free(tx->in_use);
785 tx->in_use = NULL;
786
787 tx->len = 0;
788 }
789
790 /**
791 * ice_ptp_init_tx_e822 - Initialize tracking for Tx timestamps
792 * @pf: Board private structure
793 * @tx: the Tx tracking structure to initialize
794 * @port: the port this structure tracks
795 *
796 * Initialize the Tx timestamp tracker for this port. For generic MAC devices,
797 * the timestamp block is shared for all ports in the same quad. To avoid
798 * ports using the same timestamp index, logically break the block of
799 * registers into chunks based on the port number.
800 */
801 static int
ice_ptp_init_tx_e822(struct ice_pf * pf,struct ice_ptp_tx * tx,u8 port)802 ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
803 {
804 tx->quad = port / ICE_PORTS_PER_QUAD;
805 tx->quad_offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT;
806 tx->len = INDEX_PER_PORT;
807
808 return ice_ptp_alloc_tx_tracker(tx);
809 }
810
811 /**
812 * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
813 * @pf: Board private structure
814 * @tx: the Tx tracking structure to initialize
815 *
816 * Initialize the Tx timestamp tracker for this PF. For E810 devices, each
817 * port has its own block of timestamps, independent of the other ports.
818 */
819 static int
ice_ptp_init_tx_e810(struct ice_pf * pf,struct ice_ptp_tx * tx)820 ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
821 {
822 tx->quad = pf->hw.port_info->lport;
823 tx->quad_offset = 0;
824 tx->len = INDEX_PER_QUAD;
825
826 return ice_ptp_alloc_tx_tracker(tx);
827 }
828
829 /**
830 * ice_ptp_tx_tstamp_cleanup - Cleanup old timestamp requests that got dropped
831 * @pf: pointer to the PF struct
832 * @tx: PTP Tx tracker to clean up
833 *
834 * Loop through the Tx timestamp requests and see if any of them have been
835 * waiting for a long time. Discard any SKBs that have been waiting for more
836 * than 2 seconds. This is long enough to be reasonably sure that the
837 * timestamp will never be captured. This might happen if the packet gets
838 * discarded before it reaches the PHY timestamping block.
839 */
ice_ptp_tx_tstamp_cleanup(struct ice_pf * pf,struct ice_ptp_tx * tx)840 static void ice_ptp_tx_tstamp_cleanup(struct ice_pf *pf, struct ice_ptp_tx *tx)
841 {
842 struct ice_hw *hw = &pf->hw;
843 u8 idx;
844
845 if (!tx->init)
846 return;
847
848 for_each_set_bit(idx, tx->in_use, tx->len) {
849 struct sk_buff *skb;
850 u64 raw_tstamp;
851
852 /* Check if this SKB has been waiting for too long */
853 if (time_is_after_jiffies(tx->tstamps[idx].start + 2 * HZ))
854 continue;
855
856 /* Read tstamp to be able to use this register again */
857 ice_read_phy_tstamp(hw, tx->quad, idx + tx->quad_offset,
858 &raw_tstamp);
859
860 spin_lock(&tx->lock);
861 skb = tx->tstamps[idx].skb;
862 tx->tstamps[idx].skb = NULL;
863 clear_bit(idx, tx->in_use);
864 spin_unlock(&tx->lock);
865
866 /* Count the number of Tx timestamps which have timed out */
867 pf->ptp.tx_hwtstamp_timeouts++;
868
869 /* Free the SKB after we've cleared the bit */
870 dev_kfree_skb_any(skb);
871 }
872 }
873
874 /**
875 * ice_ptp_update_cached_phctime - Update the cached PHC time values
876 * @pf: Board specific private structure
877 *
878 * This function updates the system time values which are cached in the PF
879 * structure and the Rx rings.
880 *
881 * This function must be called periodically to ensure that the cached value
882 * is never more than 2 seconds old.
883 *
884 * Note that the cached copy in the PF PTP structure is always updated, even
885 * if we can't update the copy in the Rx rings.
886 *
887 * Return:
888 * * 0 - OK, successfully updated
889 * * -EAGAIN - PF was busy, need to reschedule the update
890 */
ice_ptp_update_cached_phctime(struct ice_pf * pf)891 static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
892 {
893 struct device *dev = ice_pf_to_dev(pf);
894 unsigned long update_before;
895 u64 systime;
896 int i;
897
898 update_before = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
899 if (pf->ptp.cached_phc_time &&
900 time_is_before_jiffies(update_before)) {
901 unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies;
902
903 dev_warn(dev, "%u msecs passed between update to cached PHC time\n",
904 jiffies_to_msecs(time_taken));
905 pf->ptp.late_cached_phc_updates++;
906 }
907
908 /* Read the current PHC time */
909 systime = ice_ptp_read_src_clk_reg(pf, NULL);
910
911 /* Update the cached PHC time stored in the PF structure */
912 WRITE_ONCE(pf->ptp.cached_phc_time, systime);
913 WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies);
914
915 if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
916 return -EAGAIN;
917
918 ice_for_each_vsi(pf, i) {
919 struct ice_vsi *vsi = pf->vsi[i];
920 int j;
921
922 if (!vsi)
923 continue;
924
925 if (vsi->type != ICE_VSI_PF)
926 continue;
927
928 ice_for_each_rxq(vsi, j) {
929 if (!vsi->rx_rings[j])
930 continue;
931 WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
932 }
933 }
934 clear_bit(ICE_CFG_BUSY, pf->state);
935
936 return 0;
937 }
938
939 /**
940 * ice_ptp_reset_cached_phctime - Reset cached PHC time after an update
941 * @pf: Board specific private structure
942 *
943 * This function must be called when the cached PHC time is no longer valid,
944 * such as after a time adjustment. It discards any outstanding Tx timestamps,
945 * and updates the cached PHC time for both the PF and Rx rings. If updating
946 * the PHC time cannot be done immediately, a warning message is logged and
947 * the work item is scheduled.
948 *
949 * These steps are required in order to ensure that we do not accidentally
950 * report a timestamp extended by the wrong PHC cached copy. Note that we
951 * do not directly update the cached timestamp here because it is possible
952 * this might produce an error when ICE_CFG_BUSY is set. If this occurred, we
953 * would have to try again. During that time window, timestamps might be
954 * requested and returned with an invalid extension. Thus, on failure to
955 * immediately update the cached PHC time we would need to zero the value
956 * anyways. For this reason, we just zero the value immediately and queue the
957 * update work item.
958 */
ice_ptp_reset_cached_phctime(struct ice_pf * pf)959 static void ice_ptp_reset_cached_phctime(struct ice_pf *pf)
960 {
961 struct device *dev = ice_pf_to_dev(pf);
962 int err;
963
964 /* Update the cached PHC time immediately if possible, otherwise
965 * schedule the work item to execute soon.
966 */
967 err = ice_ptp_update_cached_phctime(pf);
968 if (err) {
969 /* If another thread is updating the Rx rings, we won't
970 * properly reset them here. This could lead to reporting of
971 * invalid timestamps, but there isn't much we can do.
972 */
973 dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n",
974 __func__);
975
976 /* Queue the work item to update the Rx rings when possible */
977 kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work,
978 msecs_to_jiffies(10));
979 }
980
981 /* Flush any outstanding Tx timestamps */
982 ice_ptp_flush_tx_tracker(pf, &pf->ptp.port.tx);
983 }
984
985 /**
986 * ice_ptp_read_time - Read the time from the device
987 * @pf: Board private structure
988 * @ts: timespec structure to hold the current time value
989 * @sts: Optional parameter for holding a pair of system timestamps from
990 * the system clock. Will be ignored if NULL is given.
991 *
992 * This function reads the source clock registers and stores them in a timespec.
993 * However, since the registers are 64 bits of nanoseconds, we must convert the
994 * result to a timespec before we can return.
995 */
996 static void
ice_ptp_read_time(struct ice_pf * pf,struct timespec64 * ts,struct ptp_system_timestamp * sts)997 ice_ptp_read_time(struct ice_pf *pf, struct timespec64 *ts,
998 struct ptp_system_timestamp *sts)
999 {
1000 u64 time_ns = ice_ptp_read_src_clk_reg(pf, sts);
1001
1002 *ts = ns_to_timespec64(time_ns);
1003 }
1004
1005 /**
1006 * ice_ptp_write_init - Set PHC time to provided value
1007 * @pf: Board private structure
1008 * @ts: timespec structure that holds the new time value
1009 *
1010 * Set the PHC time to the specified time provided in the timespec.
1011 */
ice_ptp_write_init(struct ice_pf * pf,struct timespec64 * ts)1012 static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts)
1013 {
1014 u64 ns = timespec64_to_ns(ts);
1015 struct ice_hw *hw = &pf->hw;
1016
1017 return ice_ptp_init_time(hw, ns);
1018 }
1019
1020 /**
1021 * ice_ptp_write_adj - Adjust PHC clock time atomically
1022 * @pf: Board private structure
1023 * @adj: Adjustment in nanoseconds
1024 *
1025 * Perform an atomic adjustment of the PHC time by the specified number of
1026 * nanoseconds.
1027 */
ice_ptp_write_adj(struct ice_pf * pf,s32 adj)1028 static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj)
1029 {
1030 struct ice_hw *hw = &pf->hw;
1031
1032 return ice_ptp_adj_clock(hw, adj);
1033 }
1034
1035 /**
1036 * ice_base_incval - Get base timer increment value
1037 * @pf: Board private structure
1038 *
1039 * Look up the base timer increment value for this device. The base increment
1040 * value is used to define the nominal clock tick rate. This increment value
1041 * is programmed during device initialization. It is also used as the basis
1042 * for calculating adjustments using scaled_ppm.
1043 */
ice_base_incval(struct ice_pf * pf)1044 static u64 ice_base_incval(struct ice_pf *pf)
1045 {
1046 struct ice_hw *hw = &pf->hw;
1047 u64 incval;
1048
1049 if (ice_is_e810(hw))
1050 incval = ICE_PTP_NOMINAL_INCVAL_E810;
1051 else if (ice_e822_time_ref(hw) < NUM_ICE_TIME_REF_FREQ)
1052 incval = ice_e822_nominal_incval(ice_e822_time_ref(hw));
1053 else
1054 incval = UNKNOWN_INCVAL_E822;
1055
1056 dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n",
1057 incval);
1058
1059 return incval;
1060 }
1061
1062 /**
1063 * ice_ptp_reset_ts_memory_quad - Reset timestamp memory for one quad
1064 * @pf: The PF private data structure
1065 * @quad: The quad (0-4)
1066 */
ice_ptp_reset_ts_memory_quad(struct ice_pf * pf,int quad)1067 static void ice_ptp_reset_ts_memory_quad(struct ice_pf *pf, int quad)
1068 {
1069 struct ice_hw *hw = &pf->hw;
1070
1071 ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M);
1072 ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M);
1073 }
1074
1075 /**
1076 * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state
1077 * @port: PTP port for which Tx FIFO is checked
1078 */
ice_ptp_check_tx_fifo(struct ice_ptp_port * port)1079 static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
1080 {
1081 int quad = port->port_num / ICE_PORTS_PER_QUAD;
1082 int offs = port->port_num % ICE_PORTS_PER_QUAD;
1083 struct ice_pf *pf;
1084 struct ice_hw *hw;
1085 u32 val, phy_sts;
1086 int err;
1087
1088 pf = ptp_port_to_pf(port);
1089 hw = &pf->hw;
1090
1091 if (port->tx_fifo_busy_cnt == FIFO_OK)
1092 return 0;
1093
1094 /* need to read FIFO state */
1095 if (offs == 0 || offs == 1)
1096 err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO01_STATUS,
1097 &val);
1098 else
1099 err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO23_STATUS,
1100 &val);
1101
1102 if (err) {
1103 dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n",
1104 port->port_num, err);
1105 return err;
1106 }
1107
1108 if (offs & 0x1)
1109 phy_sts = (val & Q_REG_FIFO13_M) >> Q_REG_FIFO13_S;
1110 else
1111 phy_sts = (val & Q_REG_FIFO02_M) >> Q_REG_FIFO02_S;
1112
1113 if (phy_sts & FIFO_EMPTY) {
1114 port->tx_fifo_busy_cnt = FIFO_OK;
1115 return 0;
1116 }
1117
1118 port->tx_fifo_busy_cnt++;
1119
1120 dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n",
1121 port->tx_fifo_busy_cnt, port->port_num);
1122
1123 if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) {
1124 dev_dbg(ice_pf_to_dev(pf),
1125 "Port %d Tx FIFO still not empty; resetting quad %d\n",
1126 port->port_num, quad);
1127 ice_ptp_reset_ts_memory_quad(pf, quad);
1128 port->tx_fifo_busy_cnt = FIFO_OK;
1129 return 0;
1130 }
1131
1132 return -EAGAIN;
1133 }
1134
1135 /**
1136 * ice_ptp_check_tx_offset_valid - Check if the Tx PHY offset is valid
1137 * @port: the PTP port to check
1138 *
1139 * Checks whether the Tx offset for the PHY associated with this port is
1140 * valid. Returns 0 if the offset is valid, and a non-zero error code if it is
1141 * not.
1142 */
ice_ptp_check_tx_offset_valid(struct ice_ptp_port * port)1143 static int ice_ptp_check_tx_offset_valid(struct ice_ptp_port *port)
1144 {
1145 struct ice_pf *pf = ptp_port_to_pf(port);
1146 struct device *dev = ice_pf_to_dev(pf);
1147 struct ice_hw *hw = &pf->hw;
1148 u32 val;
1149 int err;
1150
1151 err = ice_ptp_check_tx_fifo(port);
1152 if (err)
1153 return err;
1154
1155 err = ice_read_phy_reg_e822(hw, port->port_num, P_REG_TX_OV_STATUS,
1156 &val);
1157 if (err) {
1158 dev_err(dev, "Failed to read TX_OV_STATUS for port %d, err %d\n",
1159 port->port_num, err);
1160 return -EAGAIN;
1161 }
1162
1163 if (!(val & P_REG_TX_OV_STATUS_OV_M))
1164 return -EAGAIN;
1165
1166 return 0;
1167 }
1168
1169 /**
1170 * ice_ptp_check_rx_offset_valid - Check if the Rx PHY offset is valid
1171 * @port: the PTP port to check
1172 *
1173 * Checks whether the Rx offset for the PHY associated with this port is
1174 * valid. Returns 0 if the offset is valid, and a non-zero error code if it is
1175 * not.
1176 */
ice_ptp_check_rx_offset_valid(struct ice_ptp_port * port)1177 static int ice_ptp_check_rx_offset_valid(struct ice_ptp_port *port)
1178 {
1179 struct ice_pf *pf = ptp_port_to_pf(port);
1180 struct device *dev = ice_pf_to_dev(pf);
1181 struct ice_hw *hw = &pf->hw;
1182 int err;
1183 u32 val;
1184
1185 err = ice_read_phy_reg_e822(hw, port->port_num, P_REG_RX_OV_STATUS,
1186 &val);
1187 if (err) {
1188 dev_err(dev, "Failed to read RX_OV_STATUS for port %d, err %d\n",
1189 port->port_num, err);
1190 return err;
1191 }
1192
1193 if (!(val & P_REG_RX_OV_STATUS_OV_M))
1194 return -EAGAIN;
1195
1196 return 0;
1197 }
1198
1199 /**
1200 * ice_ptp_check_offset_valid - Check port offset valid bit
1201 * @port: Port for which offset valid bit is checked
1202 *
1203 * Returns 0 if both Tx and Rx offset are valid, and -EAGAIN if one of the
1204 * offset is not ready.
1205 */
ice_ptp_check_offset_valid(struct ice_ptp_port * port)1206 static int ice_ptp_check_offset_valid(struct ice_ptp_port *port)
1207 {
1208 int tx_err, rx_err;
1209
1210 /* always check both Tx and Rx offset validity */
1211 tx_err = ice_ptp_check_tx_offset_valid(port);
1212 rx_err = ice_ptp_check_rx_offset_valid(port);
1213
1214 if (tx_err || rx_err)
1215 return -EAGAIN;
1216
1217 return 0;
1218 }
1219
1220 /**
1221 * ice_ptp_wait_for_offset_valid - Check for valid Tx and Rx offsets
1222 * @work: Pointer to the kthread_work structure for this task
1223 *
1224 * Check whether both the Tx and Rx offsets are valid for enabling the vernier
1225 * calibration.
1226 *
1227 * Once we have valid offsets from hardware, update the total Tx and Rx
1228 * offsets, and exit bypass mode. This enables more precise timestamps using
1229 * the extra data measured during the vernier calibration process.
1230 */
ice_ptp_wait_for_offset_valid(struct kthread_work * work)1231 static void ice_ptp_wait_for_offset_valid(struct kthread_work *work)
1232 {
1233 struct ice_ptp_port *port;
1234 int err;
1235 struct device *dev;
1236 struct ice_pf *pf;
1237 struct ice_hw *hw;
1238
1239 port = container_of(work, struct ice_ptp_port, ov_work.work);
1240 pf = ptp_port_to_pf(port);
1241 hw = &pf->hw;
1242 dev = ice_pf_to_dev(pf);
1243
1244 if (ice_is_reset_in_progress(pf->state))
1245 return;
1246
1247 if (ice_ptp_check_offset_valid(port)) {
1248 /* Offsets not ready yet, try again later */
1249 kthread_queue_delayed_work(pf->ptp.kworker,
1250 &port->ov_work,
1251 msecs_to_jiffies(100));
1252 return;
1253 }
1254
1255 /* Offsets are valid, so it is safe to exit bypass mode */
1256 err = ice_phy_exit_bypass_e822(hw, port->port_num);
1257 if (err) {
1258 dev_warn(dev, "Failed to exit bypass mode for PHY port %u, err %d\n",
1259 port->port_num, err);
1260 return;
1261 }
1262 }
1263
1264 /**
1265 * ice_ptp_port_phy_stop - Stop timestamping for a PHY port
1266 * @ptp_port: PTP port to stop
1267 */
1268 static int
ice_ptp_port_phy_stop(struct ice_ptp_port * ptp_port)1269 ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
1270 {
1271 struct ice_pf *pf = ptp_port_to_pf(ptp_port);
1272 u8 port = ptp_port->port_num;
1273 struct ice_hw *hw = &pf->hw;
1274 int err;
1275
1276 if (ice_is_e810(hw))
1277 return 0;
1278
1279 mutex_lock(&ptp_port->ps_lock);
1280
1281 kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
1282
1283 err = ice_stop_phy_timer_e822(hw, port, true);
1284 if (err)
1285 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n",
1286 port, err);
1287
1288 mutex_unlock(&ptp_port->ps_lock);
1289
1290 return err;
1291 }
1292
1293 /**
1294 * ice_ptp_port_phy_restart - (Re)start and calibrate PHY timestamping
1295 * @ptp_port: PTP port for which the PHY start is set
1296 *
1297 * Start the PHY timestamping block, and initiate Vernier timestamping
1298 * calibration. If timestamping cannot be calibrated (such as if link is down)
1299 * then disable the timestamping block instead.
1300 */
1301 static int
ice_ptp_port_phy_restart(struct ice_ptp_port * ptp_port)1302 ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
1303 {
1304 struct ice_pf *pf = ptp_port_to_pf(ptp_port);
1305 u8 port = ptp_port->port_num;
1306 struct ice_hw *hw = &pf->hw;
1307 int err;
1308
1309 if (ice_is_e810(hw))
1310 return 0;
1311
1312 if (!ptp_port->link_up)
1313 return ice_ptp_port_phy_stop(ptp_port);
1314
1315 mutex_lock(&ptp_port->ps_lock);
1316
1317 kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
1318
1319 /* temporarily disable Tx timestamps while calibrating PHY offset */
1320 ptp_port->tx.calibrating = true;
1321 ptp_port->tx_fifo_busy_cnt = 0;
1322
1323 /* Start the PHY timer in bypass mode */
1324 err = ice_start_phy_timer_e822(hw, port, true);
1325 if (err)
1326 goto out_unlock;
1327
1328 /* Enable Tx timestamps right away */
1329 ptp_port->tx.calibrating = false;
1330
1331 kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 0);
1332
1333 out_unlock:
1334 if (err)
1335 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n",
1336 port, err);
1337
1338 mutex_unlock(&ptp_port->ps_lock);
1339
1340 return err;
1341 }
1342
1343 /**
1344 * ice_ptp_link_change - Set or clear port registers for timestamping
1345 * @pf: Board private structure
1346 * @port: Port for which the PHY start is set
1347 * @linkup: Link is up or down
1348 */
ice_ptp_link_change(struct ice_pf * pf,u8 port,bool linkup)1349 int ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
1350 {
1351 struct ice_ptp_port *ptp_port;
1352
1353 if (!test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
1354 return 0;
1355
1356 if (port >= ICE_NUM_EXTERNAL_PORTS)
1357 return -EINVAL;
1358
1359 ptp_port = &pf->ptp.port;
1360 if (ptp_port->port_num != port)
1361 return -EINVAL;
1362
1363 /* Update cached link err for this port immediately */
1364 ptp_port->link_up = linkup;
1365
1366 if (!test_bit(ICE_FLAG_PTP, pf->flags))
1367 /* PTP is not setup */
1368 return -EAGAIN;
1369
1370 return ice_ptp_port_phy_restart(ptp_port);
1371 }
1372
1373 /**
1374 * ice_ptp_reset_ts_memory - Reset timestamp memory for all quads
1375 * @pf: The PF private data structure
1376 */
ice_ptp_reset_ts_memory(struct ice_pf * pf)1377 static void ice_ptp_reset_ts_memory(struct ice_pf *pf)
1378 {
1379 int quad;
1380
1381 quad = pf->hw.port_info->lport / ICE_PORTS_PER_QUAD;
1382 ice_ptp_reset_ts_memory_quad(pf, quad);
1383 }
1384
1385 /**
1386 * ice_ptp_tx_ena_intr - Enable or disable the Tx timestamp interrupt
1387 * @pf: PF private structure
1388 * @ena: bool value to enable or disable interrupt
1389 * @threshold: Minimum number of packets at which intr is triggered
1390 *
1391 * Utility function to enable or disable Tx timestamp interrupt and threshold
1392 */
ice_ptp_tx_ena_intr(struct ice_pf * pf,bool ena,u32 threshold)1393 static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold)
1394 {
1395 struct ice_hw *hw = &pf->hw;
1396 int err = 0;
1397 int quad;
1398 u32 val;
1399
1400 ice_ptp_reset_ts_memory(pf);
1401
1402 for (quad = 0; quad < ICE_MAX_QUAD; quad++) {
1403 err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG,
1404 &val);
1405 if (err)
1406 break;
1407
1408 if (ena) {
1409 val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
1410 val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M;
1411 val |= ((threshold << Q_REG_TX_MEM_GBL_CFG_INTR_THR_S) &
1412 Q_REG_TX_MEM_GBL_CFG_INTR_THR_M);
1413 } else {
1414 val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
1415 }
1416
1417 err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG,
1418 val);
1419 if (err)
1420 break;
1421 }
1422
1423 if (err)
1424 dev_err(ice_pf_to_dev(pf), "PTP failed in intr ena, err %d\n",
1425 err);
1426 return err;
1427 }
1428
1429 /**
1430 * ice_ptp_reset_phy_timestamping - Reset PHY timestamping block
1431 * @pf: Board private structure
1432 */
ice_ptp_reset_phy_timestamping(struct ice_pf * pf)1433 static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf)
1434 {
1435 ice_ptp_port_phy_restart(&pf->ptp.port);
1436 }
1437
1438 /**
1439 * ice_ptp_adjfine - Adjust clock increment rate
1440 * @info: the driver's PTP info structure
1441 * @scaled_ppm: Parts per million with 16-bit fractional field
1442 *
1443 * Adjust the frequency of the clock by the indicated scaled ppm from the
1444 * base frequency.
1445 */
ice_ptp_adjfine(struct ptp_clock_info * info,long scaled_ppm)1446 static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
1447 {
1448 struct ice_pf *pf = ptp_info_to_pf(info);
1449 struct ice_hw *hw = &pf->hw;
1450 u64 incval, diff;
1451 int neg_adj = 0;
1452 int err;
1453
1454 incval = ice_base_incval(pf);
1455
1456 if (scaled_ppm < 0) {
1457 neg_adj = 1;
1458 scaled_ppm = -scaled_ppm;
1459 }
1460
1461 diff = mul_u64_u64_div_u64(incval, (u64)scaled_ppm,
1462 1000000ULL << 16);
1463 if (neg_adj)
1464 incval -= diff;
1465 else
1466 incval += diff;
1467
1468 err = ice_ptp_write_incval_locked(hw, incval);
1469 if (err) {
1470 dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n",
1471 err);
1472 return -EIO;
1473 }
1474
1475 return 0;
1476 }
1477
1478 /**
1479 * ice_ptp_extts_work - Workqueue task function
1480 * @work: external timestamp work structure
1481 *
1482 * Service for PTP external clock event
1483 */
ice_ptp_extts_work(struct kthread_work * work)1484 static void ice_ptp_extts_work(struct kthread_work *work)
1485 {
1486 struct ice_ptp *ptp = container_of(work, struct ice_ptp, extts_work);
1487 struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
1488 struct ptp_clock_event event;
1489 struct ice_hw *hw = &pf->hw;
1490 u8 chan, tmr_idx;
1491 u32 hi, lo;
1492
1493 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1494 /* Event time is captured by one of the two matched registers
1495 * GLTSYN_EVNT_L: 32 LSB of sampled time event
1496 * GLTSYN_EVNT_H: 32 MSB of sampled time event
1497 * Event is defined in GLTSYN_EVNT_0 register
1498 */
1499 for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) {
1500 /* Check if channel is enabled */
1501 if (pf->ptp.ext_ts_irq & (1 << chan)) {
1502 lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
1503 hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
1504 event.timestamp = (((u64)hi) << 32) | lo;
1505 event.type = PTP_CLOCK_EXTTS;
1506 event.index = chan;
1507
1508 /* Fire event */
1509 ptp_clock_event(pf->ptp.clock, &event);
1510 pf->ptp.ext_ts_irq &= ~(1 << chan);
1511 }
1512 }
1513 }
1514
1515 /**
1516 * ice_ptp_cfg_extts - Configure EXTTS pin and channel
1517 * @pf: Board private structure
1518 * @ena: true to enable; false to disable
1519 * @chan: GPIO channel (0-3)
1520 * @gpio_pin: GPIO pin
1521 * @extts_flags: request flags from the ptp_extts_request.flags
1522 */
1523 static int
ice_ptp_cfg_extts(struct ice_pf * pf,bool ena,unsigned int chan,u32 gpio_pin,unsigned int extts_flags)1524 ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin,
1525 unsigned int extts_flags)
1526 {
1527 u32 func, aux_reg, gpio_reg, irq_reg;
1528 struct ice_hw *hw = &pf->hw;
1529 u8 tmr_idx;
1530
1531 if (chan > (unsigned int)pf->ptp.info.n_ext_ts)
1532 return -EINVAL;
1533
1534 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1535
1536 irq_reg = rd32(hw, PFINT_OICR_ENA);
1537
1538 if (ena) {
1539 /* Enable the interrupt */
1540 irq_reg |= PFINT_OICR_TSYN_EVNT_M;
1541 aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M;
1542
1543 #define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE BIT(0)
1544 #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE BIT(1)
1545
1546 /* set event level to requested edge */
1547 if (extts_flags & PTP_FALLING_EDGE)
1548 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE;
1549 if (extts_flags & PTP_RISING_EDGE)
1550 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE;
1551
1552 /* Write GPIO CTL reg.
1553 * 0x1 is input sampled by EVENT register(channel)
1554 * + num_in_channels * tmr_idx
1555 */
1556 func = 1 + chan + (tmr_idx * 3);
1557 gpio_reg = ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) &
1558 GLGEN_GPIO_CTL_PIN_FUNC_M);
1559 pf->ptp.ext_ts_chan |= (1 << chan);
1560 } else {
1561 /* clear the values we set to reset defaults */
1562 aux_reg = 0;
1563 gpio_reg = 0;
1564 pf->ptp.ext_ts_chan &= ~(1 << chan);
1565 if (!pf->ptp.ext_ts_chan)
1566 irq_reg &= ~PFINT_OICR_TSYN_EVNT_M;
1567 }
1568
1569 wr32(hw, PFINT_OICR_ENA, irq_reg);
1570 wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg);
1571 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg);
1572
1573 return 0;
1574 }
1575
1576 /**
1577 * ice_ptp_cfg_clkout - Configure clock to generate periodic wave
1578 * @pf: Board private structure
1579 * @chan: GPIO channel (0-3)
1580 * @config: desired periodic clk configuration. NULL will disable channel
1581 * @store: If set to true the values will be stored
1582 *
1583 * Configure the internal clock generator modules to generate the clock wave of
1584 * specified period.
1585 */
ice_ptp_cfg_clkout(struct ice_pf * pf,unsigned int chan,struct ice_perout_channel * config,bool store)1586 static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
1587 struct ice_perout_channel *config, bool store)
1588 {
1589 u64 current_time, period, start_time, phase;
1590 struct ice_hw *hw = &pf->hw;
1591 u32 func, val, gpio_pin;
1592 u8 tmr_idx;
1593
1594 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1595
1596 /* 0. Reset mode & out_en in AUX_OUT */
1597 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0);
1598
1599 /* If we're disabling the output, clear out CLKO and TGT and keep
1600 * output level low
1601 */
1602 if (!config || !config->ena) {
1603 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), 0);
1604 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), 0);
1605 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), 0);
1606
1607 val = GLGEN_GPIO_CTL_PIN_DIR_M;
1608 gpio_pin = pf->ptp.perout_channels[chan].gpio_pin;
1609 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
1610
1611 /* Store the value if requested */
1612 if (store)
1613 memset(&pf->ptp.perout_channels[chan], 0,
1614 sizeof(struct ice_perout_channel));
1615
1616 return 0;
1617 }
1618 period = config->period;
1619 start_time = config->start_time;
1620 div64_u64_rem(start_time, period, &phase);
1621 gpio_pin = config->gpio_pin;
1622
1623 /* 1. Write clkout with half of required period value */
1624 if (period & 0x1) {
1625 dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n");
1626 goto err;
1627 }
1628
1629 period >>= 1;
1630
1631 /* For proper operation, the GLTSYN_CLKO must be larger than clock tick
1632 */
1633 #define MIN_PULSE 3
1634 if (period <= MIN_PULSE || period > U32_MAX) {
1635 dev_err(ice_pf_to_dev(pf), "CLK Period must be > %d && < 2^33",
1636 MIN_PULSE * 2);
1637 goto err;
1638 }
1639
1640 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period));
1641
1642 /* Allow time for programming before start_time is hit */
1643 current_time = ice_ptp_read_src_clk_reg(pf, NULL);
1644
1645 /* if start time is in the past start the timer at the nearest second
1646 * maintaining phase
1647 */
1648 if (start_time < current_time)
1649 start_time = div64_u64(current_time + NSEC_PER_SEC - 1,
1650 NSEC_PER_SEC) * NSEC_PER_SEC + phase;
1651
1652 if (ice_is_e810(hw))
1653 start_time -= E810_OUT_PROP_DELAY_NS;
1654 else
1655 start_time -= ice_e822_pps_delay(ice_e822_time_ref(hw));
1656
1657 /* 2. Write TARGET time */
1658 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time));
1659 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start_time));
1660
1661 /* 3. Write AUX_OUT register */
1662 val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M;
1663 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val);
1664
1665 /* 4. write GPIO CTL reg */
1666 func = 8 + chan + (tmr_idx * 4);
1667 val = GLGEN_GPIO_CTL_PIN_DIR_M |
1668 ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) & GLGEN_GPIO_CTL_PIN_FUNC_M);
1669 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
1670
1671 /* Store the value if requested */
1672 if (store) {
1673 memcpy(&pf->ptp.perout_channels[chan], config,
1674 sizeof(struct ice_perout_channel));
1675 pf->ptp.perout_channels[chan].start_time = phase;
1676 }
1677
1678 return 0;
1679 err:
1680 dev_err(ice_pf_to_dev(pf), "PTP failed to cfg per_clk\n");
1681 return -EFAULT;
1682 }
1683
1684 /**
1685 * ice_ptp_disable_all_clkout - Disable all currently configured outputs
1686 * @pf: pointer to the PF structure
1687 *
1688 * Disable all currently configured clock outputs. This is necessary before
1689 * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_clkout to
1690 * re-enable the clocks again.
1691 */
ice_ptp_disable_all_clkout(struct ice_pf * pf)1692 static void ice_ptp_disable_all_clkout(struct ice_pf *pf)
1693 {
1694 uint i;
1695
1696 for (i = 0; i < pf->ptp.info.n_per_out; i++)
1697 if (pf->ptp.perout_channels[i].ena)
1698 ice_ptp_cfg_clkout(pf, i, NULL, false);
1699 }
1700
1701 /**
1702 * ice_ptp_enable_all_clkout - Enable all configured periodic clock outputs
1703 * @pf: pointer to the PF structure
1704 *
1705 * Enable all currently configured clock outputs. Use this after
1706 * ice_ptp_disable_all_clkout to reconfigure the output signals according to
1707 * their configuration.
1708 */
ice_ptp_enable_all_clkout(struct ice_pf * pf)1709 static void ice_ptp_enable_all_clkout(struct ice_pf *pf)
1710 {
1711 uint i;
1712
1713 for (i = 0; i < pf->ptp.info.n_per_out; i++)
1714 if (pf->ptp.perout_channels[i].ena)
1715 ice_ptp_cfg_clkout(pf, i, &pf->ptp.perout_channels[i],
1716 false);
1717 }
1718
1719 /**
1720 * ice_ptp_gpio_enable_e810 - Enable/disable ancillary features of PHC
1721 * @info: the driver's PTP info structure
1722 * @rq: The requested feature to change
1723 * @on: Enable/disable flag
1724 */
1725 static int
ice_ptp_gpio_enable_e810(struct ptp_clock_info * info,struct ptp_clock_request * rq,int on)1726 ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
1727 struct ptp_clock_request *rq, int on)
1728 {
1729 struct ice_pf *pf = ptp_info_to_pf(info);
1730 struct ice_perout_channel clk_cfg = {0};
1731 bool sma_pres = false;
1732 unsigned int chan;
1733 u32 gpio_pin;
1734 int err;
1735
1736 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
1737 sma_pres = true;
1738
1739 switch (rq->type) {
1740 case PTP_CLK_REQ_PEROUT:
1741 chan = rq->perout.index;
1742 if (sma_pres) {
1743 if (chan == ice_pin_desc_e810t[SMA1].chan)
1744 clk_cfg.gpio_pin = GPIO_20;
1745 else if (chan == ice_pin_desc_e810t[SMA2].chan)
1746 clk_cfg.gpio_pin = GPIO_22;
1747 else
1748 return -1;
1749 } else if (ice_is_e810t(&pf->hw)) {
1750 if (chan == 0)
1751 clk_cfg.gpio_pin = GPIO_20;
1752 else
1753 clk_cfg.gpio_pin = GPIO_22;
1754 } else if (chan == PPS_CLK_GEN_CHAN) {
1755 clk_cfg.gpio_pin = PPS_PIN_INDEX;
1756 } else {
1757 clk_cfg.gpio_pin = chan;
1758 }
1759
1760 clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) +
1761 rq->perout.period.nsec);
1762 clk_cfg.start_time = ((rq->perout.start.sec * NSEC_PER_SEC) +
1763 rq->perout.start.nsec);
1764 clk_cfg.ena = !!on;
1765
1766 err = ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true);
1767 break;
1768 case PTP_CLK_REQ_EXTTS:
1769 chan = rq->extts.index;
1770 if (sma_pres) {
1771 if (chan < ice_pin_desc_e810t[SMA2].chan)
1772 gpio_pin = GPIO_21;
1773 else
1774 gpio_pin = GPIO_23;
1775 } else if (ice_is_e810t(&pf->hw)) {
1776 if (chan == 0)
1777 gpio_pin = GPIO_21;
1778 else
1779 gpio_pin = GPIO_23;
1780 } else {
1781 gpio_pin = chan;
1782 }
1783
1784 err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin,
1785 rq->extts.flags);
1786 break;
1787 default:
1788 return -EOPNOTSUPP;
1789 }
1790
1791 return err;
1792 }
1793
1794 /**
1795 * ice_ptp_gettimex64 - Get the time of the clock
1796 * @info: the driver's PTP info structure
1797 * @ts: timespec64 structure to hold the current time value
1798 * @sts: Optional parameter for holding a pair of system timestamps from
1799 * the system clock. Will be ignored if NULL is given.
1800 *
1801 * Read the device clock and return the correct value on ns, after converting it
1802 * into a timespec struct.
1803 */
1804 static int
ice_ptp_gettimex64(struct ptp_clock_info * info,struct timespec64 * ts,struct ptp_system_timestamp * sts)1805 ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts,
1806 struct ptp_system_timestamp *sts)
1807 {
1808 struct ice_pf *pf = ptp_info_to_pf(info);
1809 struct ice_hw *hw = &pf->hw;
1810
1811 if (!ice_ptp_lock(hw)) {
1812 dev_err(ice_pf_to_dev(pf), "PTP failed to get time\n");
1813 return -EBUSY;
1814 }
1815
1816 ice_ptp_read_time(pf, ts, sts);
1817 ice_ptp_unlock(hw);
1818
1819 return 0;
1820 }
1821
1822 /**
1823 * ice_ptp_settime64 - Set the time of the clock
1824 * @info: the driver's PTP info structure
1825 * @ts: timespec64 structure that holds the new time value
1826 *
1827 * Set the device clock to the user input value. The conversion from timespec
1828 * to ns happens in the write function.
1829 */
1830 static int
ice_ptp_settime64(struct ptp_clock_info * info,const struct timespec64 * ts)1831 ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
1832 {
1833 struct ice_pf *pf = ptp_info_to_pf(info);
1834 struct timespec64 ts64 = *ts;
1835 struct ice_hw *hw = &pf->hw;
1836 int err;
1837
1838 /* For Vernier mode, we need to recalibrate after new settime
1839 * Start with disabling timestamp block
1840 */
1841 if (pf->ptp.port.link_up)
1842 ice_ptp_port_phy_stop(&pf->ptp.port);
1843
1844 if (!ice_ptp_lock(hw)) {
1845 err = -EBUSY;
1846 goto exit;
1847 }
1848
1849 /* Disable periodic outputs */
1850 ice_ptp_disable_all_clkout(pf);
1851
1852 err = ice_ptp_write_init(pf, &ts64);
1853 ice_ptp_unlock(hw);
1854
1855 if (!err)
1856 ice_ptp_reset_cached_phctime(pf);
1857
1858 /* Reenable periodic outputs */
1859 ice_ptp_enable_all_clkout(pf);
1860
1861 /* Recalibrate and re-enable timestamp block */
1862 if (pf->ptp.port.link_up)
1863 ice_ptp_port_phy_restart(&pf->ptp.port);
1864 exit:
1865 if (err) {
1866 dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err);
1867 return err;
1868 }
1869
1870 return 0;
1871 }
1872
1873 /**
1874 * ice_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment
1875 * @info: the driver's PTP info structure
1876 * @delta: Offset in nanoseconds to adjust the time by
1877 */
ice_ptp_adjtime_nonatomic(struct ptp_clock_info * info,s64 delta)1878 static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
1879 {
1880 struct timespec64 now, then;
1881 int ret;
1882
1883 then = ns_to_timespec64(delta);
1884 ret = ice_ptp_gettimex64(info, &now, NULL);
1885 if (ret)
1886 return ret;
1887 now = timespec64_add(now, then);
1888
1889 return ice_ptp_settime64(info, (const struct timespec64 *)&now);
1890 }
1891
1892 /**
1893 * ice_ptp_adjtime - Adjust the time of the clock by the indicated delta
1894 * @info: the driver's PTP info structure
1895 * @delta: Offset in nanoseconds to adjust the time by
1896 */
ice_ptp_adjtime(struct ptp_clock_info * info,s64 delta)1897 static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
1898 {
1899 struct ice_pf *pf = ptp_info_to_pf(info);
1900 struct ice_hw *hw = &pf->hw;
1901 struct device *dev;
1902 int err;
1903
1904 dev = ice_pf_to_dev(pf);
1905
1906 /* Hardware only supports atomic adjustments using signed 32-bit
1907 * integers. For any adjustment outside this range, perform
1908 * a non-atomic get->adjust->set flow.
1909 */
1910 if (delta > S32_MAX || delta < S32_MIN) {
1911 dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta);
1912 return ice_ptp_adjtime_nonatomic(info, delta);
1913 }
1914
1915 if (!ice_ptp_lock(hw)) {
1916 dev_err(dev, "PTP failed to acquire semaphore in adjtime\n");
1917 return -EBUSY;
1918 }
1919
1920 /* Disable periodic outputs */
1921 ice_ptp_disable_all_clkout(pf);
1922
1923 err = ice_ptp_write_adj(pf, delta);
1924
1925 /* Reenable periodic outputs */
1926 ice_ptp_enable_all_clkout(pf);
1927
1928 ice_ptp_unlock(hw);
1929
1930 if (err) {
1931 dev_err(dev, "PTP failed to adjust time, err %d\n", err);
1932 return err;
1933 }
1934
1935 ice_ptp_reset_cached_phctime(pf);
1936
1937 return 0;
1938 }
1939
1940 #ifdef CONFIG_ICE_HWTS
1941 /**
1942 * ice_ptp_get_syncdevicetime - Get the cross time stamp info
1943 * @device: Current device time
1944 * @system: System counter value read synchronously with device time
1945 * @ctx: Context provided by timekeeping code
1946 *
1947 * Read device and system (ART) clock simultaneously and return the corrected
1948 * clock values in ns.
1949 */
1950 static int
ice_ptp_get_syncdevicetime(ktime_t * device,struct system_counterval_t * system,void * ctx)1951 ice_ptp_get_syncdevicetime(ktime_t *device,
1952 struct system_counterval_t *system,
1953 void *ctx)
1954 {
1955 struct ice_pf *pf = (struct ice_pf *)ctx;
1956 struct ice_hw *hw = &pf->hw;
1957 u32 hh_lock, hh_art_ctl;
1958 int i;
1959
1960 /* Get the HW lock */
1961 hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
1962 if (hh_lock & PFHH_SEM_BUSY_M) {
1963 dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n");
1964 return -EFAULT;
1965 }
1966
1967 /* Start the ART and device clock sync sequence */
1968 hh_art_ctl = rd32(hw, GLHH_ART_CTL);
1969 hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M;
1970 wr32(hw, GLHH_ART_CTL, hh_art_ctl);
1971
1972 #define MAX_HH_LOCK_TRIES 100
1973
1974 for (i = 0; i < MAX_HH_LOCK_TRIES; i++) {
1975 /* Wait for sync to complete */
1976 hh_art_ctl = rd32(hw, GLHH_ART_CTL);
1977 if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) {
1978 udelay(1);
1979 continue;
1980 } else {
1981 u32 hh_ts_lo, hh_ts_hi, tmr_idx;
1982 u64 hh_ts;
1983
1984 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
1985 /* Read ART time */
1986 hh_ts_lo = rd32(hw, GLHH_ART_TIME_L);
1987 hh_ts_hi = rd32(hw, GLHH_ART_TIME_H);
1988 hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
1989 *system = convert_art_ns_to_tsc(hh_ts);
1990 /* Read Device source clock time */
1991 hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx));
1992 hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx));
1993 hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
1994 *device = ns_to_ktime(hh_ts);
1995 break;
1996 }
1997 }
1998 /* Release HW lock */
1999 hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
2000 hh_lock = hh_lock & ~PFHH_SEM_BUSY_M;
2001 wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock);
2002
2003 if (i == MAX_HH_LOCK_TRIES)
2004 return -ETIMEDOUT;
2005
2006 return 0;
2007 }
2008
2009 /**
2010 * ice_ptp_getcrosststamp_e822 - Capture a device cross timestamp
2011 * @info: the driver's PTP info structure
2012 * @cts: The memory to fill the cross timestamp info
2013 *
2014 * Capture a cross timestamp between the ART and the device PTP hardware
2015 * clock. Fill the cross timestamp information and report it back to the
2016 * caller.
2017 *
2018 * This is only valid for E822 devices which have support for generating the
2019 * cross timestamp via PCIe PTM.
2020 *
2021 * In order to correctly correlate the ART timestamp back to the TSC time, the
2022 * CPU must have X86_FEATURE_TSC_KNOWN_FREQ.
2023 */
2024 static int
ice_ptp_getcrosststamp_e822(struct ptp_clock_info * info,struct system_device_crosststamp * cts)2025 ice_ptp_getcrosststamp_e822(struct ptp_clock_info *info,
2026 struct system_device_crosststamp *cts)
2027 {
2028 struct ice_pf *pf = ptp_info_to_pf(info);
2029
2030 return get_device_system_crosststamp(ice_ptp_get_syncdevicetime,
2031 pf, NULL, cts);
2032 }
2033 #endif /* CONFIG_ICE_HWTS */
2034
2035 /**
2036 * ice_ptp_get_ts_config - ioctl interface to read the timestamping config
2037 * @pf: Board private structure
2038 * @ifr: ioctl data
2039 *
2040 * Copy the timestamping config to user buffer
2041 */
ice_ptp_get_ts_config(struct ice_pf * pf,struct ifreq * ifr)2042 int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
2043 {
2044 struct hwtstamp_config *config;
2045
2046 if (!test_bit(ICE_FLAG_PTP, pf->flags))
2047 return -EIO;
2048
2049 config = &pf->ptp.tstamp_config;
2050
2051 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
2052 -EFAULT : 0;
2053 }
2054
2055 /**
2056 * ice_ptp_set_timestamp_mode - Setup driver for requested timestamp mode
2057 * @pf: Board private structure
2058 * @config: hwtstamp settings requested or saved
2059 */
2060 static int
ice_ptp_set_timestamp_mode(struct ice_pf * pf,struct hwtstamp_config * config)2061 ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
2062 {
2063 switch (config->tx_type) {
2064 case HWTSTAMP_TX_OFF:
2065 ice_set_tx_tstamp(pf, false);
2066 break;
2067 case HWTSTAMP_TX_ON:
2068 ice_set_tx_tstamp(pf, true);
2069 break;
2070 default:
2071 return -ERANGE;
2072 }
2073
2074 switch (config->rx_filter) {
2075 case HWTSTAMP_FILTER_NONE:
2076 ice_set_rx_tstamp(pf, false);
2077 break;
2078 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2079 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2080 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2081 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2082 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2083 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2084 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2085 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2086 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2087 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2088 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2089 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2090 case HWTSTAMP_FILTER_NTP_ALL:
2091 case HWTSTAMP_FILTER_ALL:
2092 ice_set_rx_tstamp(pf, true);
2093 break;
2094 default:
2095 return -ERANGE;
2096 }
2097
2098 return 0;
2099 }
2100
2101 /**
2102 * ice_ptp_set_ts_config - ioctl interface to control the timestamping
2103 * @pf: Board private structure
2104 * @ifr: ioctl data
2105 *
2106 * Get the user config and store it
2107 */
ice_ptp_set_ts_config(struct ice_pf * pf,struct ifreq * ifr)2108 int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
2109 {
2110 struct hwtstamp_config config;
2111 int err;
2112
2113 if (!test_bit(ICE_FLAG_PTP, pf->flags))
2114 return -EAGAIN;
2115
2116 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2117 return -EFAULT;
2118
2119 err = ice_ptp_set_timestamp_mode(pf, &config);
2120 if (err)
2121 return err;
2122
2123 /* Return the actual configuration set */
2124 config = pf->ptp.tstamp_config;
2125
2126 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2127 -EFAULT : 0;
2128 }
2129
2130 /**
2131 * ice_ptp_rx_hwtstamp - Check for an Rx timestamp
2132 * @rx_ring: Ring to get the VSI info
2133 * @rx_desc: Receive descriptor
2134 * @skb: Particular skb to send timestamp with
2135 *
2136 * The driver receives a notification in the receive descriptor with timestamp.
2137 * The timestamp is in ns, so we must convert the result first.
2138 */
2139 void
ice_ptp_rx_hwtstamp(struct ice_rx_ring * rx_ring,union ice_32b_rx_flex_desc * rx_desc,struct sk_buff * skb)2140 ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
2141 union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb)
2142 {
2143 struct skb_shared_hwtstamps *hwtstamps;
2144 u64 ts_ns, cached_time;
2145 u32 ts_high;
2146
2147 if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID))
2148 return;
2149
2150 cached_time = READ_ONCE(rx_ring->cached_phctime);
2151
2152 /* Do not report a timestamp if we don't have a cached PHC time */
2153 if (!cached_time)
2154 return;
2155
2156 /* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached
2157 * PHC value, rather than accessing the PF. This also allows us to
2158 * simply pass the upper 32bits of nanoseconds directly. Calling
2159 * ice_ptp_extend_40b_ts is unnecessary as it would just discard these
2160 * bits itself.
2161 */
2162 ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
2163 ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high);
2164
2165 hwtstamps = skb_hwtstamps(skb);
2166 memset(hwtstamps, 0, sizeof(*hwtstamps));
2167 hwtstamps->hwtstamp = ns_to_ktime(ts_ns);
2168 }
2169
2170 /**
2171 * ice_ptp_disable_sma_pins_e810t - Disable E810-T SMA pins
2172 * @pf: pointer to the PF structure
2173 * @info: PTP clock info structure
2174 *
2175 * Disable the OS access to the SMA pins. Called to clear out the OS
2176 * indications of pin support when we fail to setup the E810-T SMA control
2177 * register.
2178 */
2179 static void
ice_ptp_disable_sma_pins_e810t(struct ice_pf * pf,struct ptp_clock_info * info)2180 ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
2181 {
2182 struct device *dev = ice_pf_to_dev(pf);
2183
2184 dev_warn(dev, "Failed to configure E810-T SMA pin control\n");
2185
2186 info->enable = NULL;
2187 info->verify = NULL;
2188 info->n_pins = 0;
2189 info->n_ext_ts = 0;
2190 info->n_per_out = 0;
2191 }
2192
2193 /**
2194 * ice_ptp_setup_sma_pins_e810t - Setup the SMA pins
2195 * @pf: pointer to the PF structure
2196 * @info: PTP clock info structure
2197 *
2198 * Finish setting up the SMA pins by allocating pin_config, and setting it up
2199 * according to the current status of the SMA. On failure, disable all of the
2200 * extended SMA pin support.
2201 */
2202 static void
ice_ptp_setup_sma_pins_e810t(struct ice_pf * pf,struct ptp_clock_info * info)2203 ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
2204 {
2205 struct device *dev = ice_pf_to_dev(pf);
2206 int err;
2207
2208 /* Allocate memory for kernel pins interface */
2209 info->pin_config = devm_kcalloc(dev, info->n_pins,
2210 sizeof(*info->pin_config), GFP_KERNEL);
2211 if (!info->pin_config) {
2212 ice_ptp_disable_sma_pins_e810t(pf, info);
2213 return;
2214 }
2215
2216 /* Read current SMA status */
2217 err = ice_get_sma_config_e810t(&pf->hw, info->pin_config);
2218 if (err)
2219 ice_ptp_disable_sma_pins_e810t(pf, info);
2220 }
2221
2222 /**
2223 * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs
2224 * @pf: pointer to the PF instance
2225 * @info: PTP clock capabilities
2226 */
2227 static void
ice_ptp_setup_pins_e810(struct ice_pf * pf,struct ptp_clock_info * info)2228 ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info)
2229 {
2230 info->n_per_out = N_PER_OUT_E810;
2231
2232 if (ice_is_feature_supported(pf, ICE_F_PTP_EXTTS))
2233 info->n_ext_ts = N_EXT_TS_E810;
2234
2235 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
2236 info->n_ext_ts = N_EXT_TS_E810;
2237 info->n_pins = NUM_PTP_PINS_E810T;
2238 info->verify = ice_verify_pin_e810t;
2239
2240 /* Complete setup of the SMA pins */
2241 ice_ptp_setup_sma_pins_e810t(pf, info);
2242 }
2243 }
2244
2245 /**
2246 * ice_ptp_set_funcs_e822 - Set specialized functions for E822 support
2247 * @pf: Board private structure
2248 * @info: PTP info to fill
2249 *
2250 * Assign functions to the PTP capabiltiies structure for E822 devices.
2251 * Functions which operate across all device families should be set directly
2252 * in ice_ptp_set_caps. Only add functions here which are distinct for E822
2253 * devices.
2254 */
2255 static void
ice_ptp_set_funcs_e822(struct ice_pf * pf,struct ptp_clock_info * info)2256 ice_ptp_set_funcs_e822(struct ice_pf *pf, struct ptp_clock_info *info)
2257 {
2258 #ifdef CONFIG_ICE_HWTS
2259 if (boot_cpu_has(X86_FEATURE_ART) &&
2260 boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ))
2261 info->getcrosststamp = ice_ptp_getcrosststamp_e822;
2262 #endif /* CONFIG_ICE_HWTS */
2263 }
2264
2265 /**
2266 * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support
2267 * @pf: Board private structure
2268 * @info: PTP info to fill
2269 *
2270 * Assign functions to the PTP capabiltiies structure for E810 devices.
2271 * Functions which operate across all device families should be set directly
2272 * in ice_ptp_set_caps. Only add functions here which are distinct for e810
2273 * devices.
2274 */
2275 static void
ice_ptp_set_funcs_e810(struct ice_pf * pf,struct ptp_clock_info * info)2276 ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info)
2277 {
2278 info->enable = ice_ptp_gpio_enable_e810;
2279 ice_ptp_setup_pins_e810(pf, info);
2280 }
2281
2282 /**
2283 * ice_ptp_set_caps - Set PTP capabilities
2284 * @pf: Board private structure
2285 */
ice_ptp_set_caps(struct ice_pf * pf)2286 static void ice_ptp_set_caps(struct ice_pf *pf)
2287 {
2288 struct ptp_clock_info *info = &pf->ptp.info;
2289 struct device *dev = ice_pf_to_dev(pf);
2290
2291 snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk",
2292 dev_driver_string(dev), dev_name(dev));
2293 info->owner = THIS_MODULE;
2294 info->max_adj = 999999999;
2295 info->adjtime = ice_ptp_adjtime;
2296 info->adjfine = ice_ptp_adjfine;
2297 info->gettimex64 = ice_ptp_gettimex64;
2298 info->settime64 = ice_ptp_settime64;
2299
2300 if (ice_is_e810(&pf->hw))
2301 ice_ptp_set_funcs_e810(pf, info);
2302 else
2303 ice_ptp_set_funcs_e822(pf, info);
2304 }
2305
2306 /**
2307 * ice_ptp_create_clock - Create PTP clock device for userspace
2308 * @pf: Board private structure
2309 *
2310 * This function creates a new PTP clock device. It only creates one if we
2311 * don't already have one. Will return error if it can't create one, but success
2312 * if we already have a device. Should be used by ice_ptp_init to create clock
2313 * initially, and prevent global resets from creating new clock devices.
2314 */
ice_ptp_create_clock(struct ice_pf * pf)2315 static long ice_ptp_create_clock(struct ice_pf *pf)
2316 {
2317 struct ptp_clock_info *info;
2318 struct ptp_clock *clock;
2319 struct device *dev;
2320
2321 /* No need to create a clock device if we already have one */
2322 if (pf->ptp.clock)
2323 return 0;
2324
2325 ice_ptp_set_caps(pf);
2326
2327 info = &pf->ptp.info;
2328 dev = ice_pf_to_dev(pf);
2329
2330 /* Attempt to register the clock before enabling the hardware. */
2331 clock = ptp_clock_register(info, dev);
2332 if (IS_ERR(clock))
2333 return PTR_ERR(clock);
2334
2335 pf->ptp.clock = clock;
2336
2337 return 0;
2338 }
2339
2340 /**
2341 * ice_ptp_request_ts - Request an available Tx timestamp index
2342 * @tx: the PTP Tx timestamp tracker to request from
2343 * @skb: the SKB to associate with this timestamp request
2344 */
ice_ptp_request_ts(struct ice_ptp_tx * tx,struct sk_buff * skb)2345 s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
2346 {
2347 u8 idx;
2348
2349 /* Check if this tracker is initialized */
2350 if (!tx->init || tx->calibrating)
2351 return -1;
2352
2353 spin_lock(&tx->lock);
2354 /* Find and set the first available index */
2355 idx = find_first_zero_bit(tx->in_use, tx->len);
2356 if (idx < tx->len) {
2357 /* We got a valid index that no other thread could have set. Store
2358 * a reference to the skb and the start time to allow discarding old
2359 * requests.
2360 */
2361 set_bit(idx, tx->in_use);
2362 tx->tstamps[idx].start = jiffies;
2363 tx->tstamps[idx].skb = skb_get(skb);
2364 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2365 ice_trace(tx_tstamp_request, skb, idx);
2366 }
2367
2368 spin_unlock(&tx->lock);
2369
2370 /* return the appropriate PHY timestamp register index, -1 if no
2371 * indexes were available.
2372 */
2373 if (idx >= tx->len)
2374 return -1;
2375 else
2376 return idx + tx->quad_offset;
2377 }
2378
2379 /**
2380 * ice_ptp_process_ts - Process the PTP Tx timestamps
2381 * @pf: Board private structure
2382 *
2383 * Returns true if timestamps are processed.
2384 */
ice_ptp_process_ts(struct ice_pf * pf)2385 bool ice_ptp_process_ts(struct ice_pf *pf)
2386 {
2387 return ice_ptp_tx_tstamp(&pf->ptp.port.tx);
2388 }
2389
ice_ptp_periodic_work(struct kthread_work * work)2390 static void ice_ptp_periodic_work(struct kthread_work *work)
2391 {
2392 struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
2393 struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
2394 int err;
2395
2396 if (!test_bit(ICE_FLAG_PTP, pf->flags))
2397 return;
2398
2399 err = ice_ptp_update_cached_phctime(pf);
2400
2401 ice_ptp_tx_tstamp_cleanup(pf, &pf->ptp.port.tx);
2402
2403 /* Run twice a second or reschedule if phc update failed */
2404 kthread_queue_delayed_work(ptp->kworker, &ptp->work,
2405 msecs_to_jiffies(err ? 10 : 500));
2406 }
2407
2408 /**
2409 * ice_ptp_reset - Initialize PTP hardware clock support after reset
2410 * @pf: Board private structure
2411 */
ice_ptp_reset(struct ice_pf * pf)2412 void ice_ptp_reset(struct ice_pf *pf)
2413 {
2414 struct ice_ptp *ptp = &pf->ptp;
2415 struct ice_hw *hw = &pf->hw;
2416 struct timespec64 ts;
2417 int err, itr = 1;
2418 u64 time_diff;
2419
2420 if (test_bit(ICE_PFR_REQ, pf->state))
2421 goto pfr;
2422
2423 if (!hw->func_caps.ts_func_info.src_tmr_owned)
2424 goto reset_ts;
2425
2426 err = ice_ptp_init_phc(hw);
2427 if (err)
2428 goto err;
2429
2430 /* Acquire the global hardware lock */
2431 if (!ice_ptp_lock(hw)) {
2432 err = -EBUSY;
2433 goto err;
2434 }
2435
2436 /* Write the increment time value to PHY and LAN */
2437 err = ice_ptp_write_incval(hw, ice_base_incval(pf));
2438 if (err) {
2439 ice_ptp_unlock(hw);
2440 goto err;
2441 }
2442
2443 /* Write the initial Time value to PHY and LAN using the cached PHC
2444 * time before the reset and time difference between stopping and
2445 * starting the clock.
2446 */
2447 if (ptp->cached_phc_time) {
2448 time_diff = ktime_get_real_ns() - ptp->reset_time;
2449 ts = ns_to_timespec64(ptp->cached_phc_time + time_diff);
2450 } else {
2451 ts = ktime_to_timespec64(ktime_get_real());
2452 }
2453 err = ice_ptp_write_init(pf, &ts);
2454 if (err) {
2455 ice_ptp_unlock(hw);
2456 goto err;
2457 }
2458
2459 /* Release the global hardware lock */
2460 ice_ptp_unlock(hw);
2461
2462 if (!ice_is_e810(hw)) {
2463 /* Enable quad interrupts */
2464 err = ice_ptp_tx_ena_intr(pf, true, itr);
2465 if (err)
2466 goto err;
2467 }
2468
2469 reset_ts:
2470 /* Restart the PHY timestamping block */
2471 ice_ptp_reset_phy_timestamping(pf);
2472
2473 pfr:
2474 /* Init Tx structures */
2475 if (ice_is_e810(&pf->hw)) {
2476 err = ice_ptp_init_tx_e810(pf, &ptp->port.tx);
2477 } else {
2478 kthread_init_delayed_work(&ptp->port.ov_work,
2479 ice_ptp_wait_for_offset_valid);
2480 err = ice_ptp_init_tx_e822(pf, &ptp->port.tx,
2481 ptp->port.port_num);
2482 }
2483 if (err)
2484 goto err;
2485
2486 set_bit(ICE_FLAG_PTP, pf->flags);
2487
2488 /* Start periodic work going */
2489 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
2490
2491 dev_info(ice_pf_to_dev(pf), "PTP reset successful\n");
2492 return;
2493
2494 err:
2495 dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
2496 }
2497
2498 /**
2499 * ice_ptp_prepare_for_reset - Prepare PTP for reset
2500 * @pf: Board private structure
2501 */
ice_ptp_prepare_for_reset(struct ice_pf * pf)2502 void ice_ptp_prepare_for_reset(struct ice_pf *pf)
2503 {
2504 struct ice_ptp *ptp = &pf->ptp;
2505 u8 src_tmr;
2506
2507 clear_bit(ICE_FLAG_PTP, pf->flags);
2508
2509 /* Disable timestamping for both Tx and Rx */
2510 ice_ptp_cfg_timestamp(pf, false);
2511
2512 kthread_cancel_delayed_work_sync(&ptp->work);
2513 kthread_cancel_work_sync(&ptp->extts_work);
2514
2515 if (test_bit(ICE_PFR_REQ, pf->state))
2516 return;
2517
2518 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
2519
2520 /* Disable periodic outputs */
2521 ice_ptp_disable_all_clkout(pf);
2522
2523 src_tmr = ice_get_ptp_src_clock_index(&pf->hw);
2524
2525 /* Disable source clock */
2526 wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M);
2527
2528 /* Acquire PHC and system timer to restore after reset */
2529 ptp->reset_time = ktime_get_real_ns();
2530 }
2531
2532 /**
2533 * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device
2534 * @pf: Board private structure
2535 *
2536 * Setup and initialize a PTP clock device that represents the device hardware
2537 * clock. Save the clock index for other functions connected to the same
2538 * hardware resource.
2539 */
ice_ptp_init_owner(struct ice_pf * pf)2540 static int ice_ptp_init_owner(struct ice_pf *pf)
2541 {
2542 struct ice_hw *hw = &pf->hw;
2543 struct timespec64 ts;
2544 int err, itr = 1;
2545
2546 err = ice_ptp_init_phc(hw);
2547 if (err) {
2548 dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n",
2549 err);
2550 return err;
2551 }
2552
2553 /* Acquire the global hardware lock */
2554 if (!ice_ptp_lock(hw)) {
2555 err = -EBUSY;
2556 goto err_exit;
2557 }
2558
2559 /* Write the increment time value to PHY and LAN */
2560 err = ice_ptp_write_incval(hw, ice_base_incval(pf));
2561 if (err) {
2562 ice_ptp_unlock(hw);
2563 goto err_exit;
2564 }
2565
2566 ts = ktime_to_timespec64(ktime_get_real());
2567 /* Write the initial Time value to PHY and LAN */
2568 err = ice_ptp_write_init(pf, &ts);
2569 if (err) {
2570 ice_ptp_unlock(hw);
2571 goto err_exit;
2572 }
2573
2574 /* Release the global hardware lock */
2575 ice_ptp_unlock(hw);
2576
2577 if (!ice_is_e810(hw)) {
2578 /* Enable quad interrupts */
2579 err = ice_ptp_tx_ena_intr(pf, true, itr);
2580 if (err)
2581 goto err_exit;
2582 }
2583
2584 /* Ensure we have a clock device */
2585 err = ice_ptp_create_clock(pf);
2586 if (err)
2587 goto err_clk;
2588
2589 /* Store the PTP clock index for other PFs */
2590 ice_set_ptp_clock_index(pf);
2591
2592 return 0;
2593
2594 err_clk:
2595 pf->ptp.clock = NULL;
2596 err_exit:
2597 return err;
2598 }
2599
2600 /**
2601 * ice_ptp_init_work - Initialize PTP work threads
2602 * @pf: Board private structure
2603 * @ptp: PF PTP structure
2604 */
ice_ptp_init_work(struct ice_pf * pf,struct ice_ptp * ptp)2605 static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
2606 {
2607 struct kthread_worker *kworker;
2608
2609 /* Initialize work functions */
2610 kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work);
2611 kthread_init_work(&ptp->extts_work, ice_ptp_extts_work);
2612
2613 /* Allocate a kworker for handling work required for the ports
2614 * connected to the PTP hardware clock.
2615 */
2616 kworker = kthread_create_worker(0, "ice-ptp-%s",
2617 dev_name(ice_pf_to_dev(pf)));
2618 if (IS_ERR(kworker))
2619 return PTR_ERR(kworker);
2620
2621 ptp->kworker = kworker;
2622
2623 /* Start periodic work going */
2624 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
2625
2626 return 0;
2627 }
2628
2629 /**
2630 * ice_ptp_init_port - Initialize PTP port structure
2631 * @pf: Board private structure
2632 * @ptp_port: PTP port structure
2633 */
ice_ptp_init_port(struct ice_pf * pf,struct ice_ptp_port * ptp_port)2634 static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
2635 {
2636 mutex_init(&ptp_port->ps_lock);
2637
2638 if (ice_is_e810(&pf->hw))
2639 return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
2640
2641 kthread_init_delayed_work(&ptp_port->ov_work,
2642 ice_ptp_wait_for_offset_valid);
2643 return ice_ptp_init_tx_e822(pf, &ptp_port->tx, ptp_port->port_num);
2644 }
2645
2646 /**
2647 * ice_ptp_init - Initialize PTP hardware clock support
2648 * @pf: Board private structure
2649 *
2650 * Set up the device for interacting with the PTP hardware clock for all
2651 * functions, both the function that owns the clock hardware, and the
2652 * functions connected to the clock hardware.
2653 *
2654 * The clock owner will allocate and register a ptp_clock with the
2655 * PTP_1588_CLOCK infrastructure. All functions allocate a kthread and work
2656 * items used for asynchronous work such as Tx timestamps and periodic work.
2657 */
ice_ptp_init(struct ice_pf * pf)2658 void ice_ptp_init(struct ice_pf *pf)
2659 {
2660 struct ice_ptp *ptp = &pf->ptp;
2661 struct ice_hw *hw = &pf->hw;
2662 int err;
2663
2664 /* If this function owns the clock hardware, it must allocate and
2665 * configure the PTP clock device to represent it.
2666 */
2667 if (hw->func_caps.ts_func_info.src_tmr_owned) {
2668 err = ice_ptp_init_owner(pf);
2669 if (err)
2670 goto err;
2671 }
2672
2673 ptp->port.port_num = hw->pf_id;
2674 err = ice_ptp_init_port(pf, &ptp->port);
2675 if (err)
2676 goto err;
2677
2678 /* Start the PHY timestamping block */
2679 ice_ptp_reset_phy_timestamping(pf);
2680
2681 set_bit(ICE_FLAG_PTP, pf->flags);
2682 err = ice_ptp_init_work(pf, ptp);
2683 if (err)
2684 goto err;
2685
2686 dev_info(ice_pf_to_dev(pf), "PTP init successful\n");
2687 return;
2688
2689 err:
2690 /* If we registered a PTP clock, release it */
2691 if (pf->ptp.clock) {
2692 ptp_clock_unregister(ptp->clock);
2693 pf->ptp.clock = NULL;
2694 }
2695 clear_bit(ICE_FLAG_PTP, pf->flags);
2696 dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err);
2697 }
2698
2699 /**
2700 * ice_ptp_release - Disable the driver/HW support and unregister the clock
2701 * @pf: Board private structure
2702 *
2703 * This function handles the cleanup work required from the initialization by
2704 * clearing out the important information and unregistering the clock
2705 */
ice_ptp_release(struct ice_pf * pf)2706 void ice_ptp_release(struct ice_pf *pf)
2707 {
2708 if (!test_bit(ICE_FLAG_PTP, pf->flags))
2709 return;
2710
2711 /* Disable timestamping for both Tx and Rx */
2712 ice_ptp_cfg_timestamp(pf, false);
2713
2714 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
2715
2716 clear_bit(ICE_FLAG_PTP, pf->flags);
2717
2718 kthread_cancel_delayed_work_sync(&pf->ptp.work);
2719
2720 ice_ptp_port_phy_stop(&pf->ptp.port);
2721 mutex_destroy(&pf->ptp.port.ps_lock);
2722 if (pf->ptp.kworker) {
2723 kthread_destroy_worker(pf->ptp.kworker);
2724 pf->ptp.kworker = NULL;
2725 }
2726
2727 if (!pf->ptp.clock)
2728 return;
2729
2730 /* Disable periodic outputs */
2731 ice_ptp_disable_all_clkout(pf);
2732
2733 ice_clear_ptp_clock_index(pf);
2734 ptp_clock_unregister(pf->ptp.clock);
2735 pf->ptp.clock = NULL;
2736
2737 dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n");
2738 }
2739