1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021, Intel Corporation. */
3
4 #include <linux/delay.h>
5 #include "ice_common.h"
6 #include "ice_ptp_hw.h"
7 #include "ice_ptp_consts.h"
8 #include "ice_cgu_regs.h"
9
10 /* Low level functions for interacting with and managing the device clock used
11 * for the Precision Time Protocol.
12 *
13 * The ice hardware represents the current time using three registers:
14 *
15 * GLTSYN_TIME_H GLTSYN_TIME_L GLTSYN_TIME_R
16 * +---------------+ +---------------+ +---------------+
17 * | 32 bits | | 32 bits | | 32 bits |
18 * +---------------+ +---------------+ +---------------+
19 *
20 * The registers are incremented every clock tick using a 40bit increment
21 * value defined over two registers:
22 *
23 * GLTSYN_INCVAL_H GLTSYN_INCVAL_L
24 * +---------------+ +---------------+
25 * | 8 bit s | | 32 bits |
26 * +---------------+ +---------------+
27 *
28 * The increment value is added to the GLSTYN_TIME_R and GLSTYN_TIME_L
29 * registers every clock source tick. Depending on the specific device
30 * configuration, the clock source frequency could be one of a number of
31 * values.
32 *
33 * For E810 devices, the increment frequency is 812.5 MHz
34 *
35 * For E822 devices the clock can be derived from different sources, and the
36 * increment has an effective frequency of one of the following:
37 * - 823.4375 MHz
38 * - 783.36 MHz
39 * - 796.875 MHz
40 * - 816 MHz
41 * - 830.078125 MHz
42 * - 783.36 MHz
43 *
44 * The hardware captures timestamps in the PHY for incoming packets, and for
45 * outgoing packets on request. To support this, the PHY maintains a timer
46 * that matches the lower 64 bits of the global source timer.
47 *
48 * In order to ensure that the PHY timers and the source timer are equivalent,
49 * shadow registers are used to prepare the desired initial values. A special
50 * sync command is issued to trigger copying from the shadow registers into
51 * the appropriate source and PHY registers simultaneously.
52 *
53 * The driver supports devices which have different PHYs with subtly different
54 * mechanisms to program and control the timers. We divide the devices into
55 * families named after the first major device, E810 and similar devices, and
56 * E822 and similar devices.
57 *
58 * - E822 based devices have additional support for fine grained Vernier
59 * calibration which requires significant setup
60 * - The layout of timestamp data in the PHY register blocks is different
61 * - The way timer synchronization commands are issued is different.
62 *
63 * To support this, very low level functions have an e810 or e822 suffix
64 * indicating what type of device they work on. Higher level abstractions for
65 * tasks that can be done on both devices do not have the suffix and will
66 * correctly look up the appropriate low level function when running.
67 *
68 * Functions which only make sense on a single device family may not have
69 * a suitable generic implementation
70 */
71
72 /**
73 * ice_get_ptp_src_clock_index - determine source clock index
74 * @hw: pointer to HW struct
75 *
76 * Determine the source clock index currently in use, based on device
77 * capabilities reported during initialization.
78 */
ice_get_ptp_src_clock_index(struct ice_hw * hw)79 u8 ice_get_ptp_src_clock_index(struct ice_hw *hw)
80 {
81 return hw->func_caps.ts_func_info.tmr_index_assoc;
82 }
83
84 /**
85 * ice_ptp_read_src_incval - Read source timer increment value
86 * @hw: pointer to HW struct
87 *
88 * Read the increment value of the source timer and return it.
89 */
ice_ptp_read_src_incval(struct ice_hw * hw)90 static u64 ice_ptp_read_src_incval(struct ice_hw *hw)
91 {
92 u32 lo, hi;
93 u8 tmr_idx;
94
95 tmr_idx = ice_get_ptp_src_clock_index(hw);
96
97 lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
98 hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
99
100 return ((u64)(hi & INCVAL_HIGH_M) << 32) | lo;
101 }
102
103 /**
104 * ice_ptp_src_cmd - Prepare source timer for a timer command
105 * @hw: pointer to HW structure
106 * @cmd: Timer command
107 *
108 * Prepare the source timer for an upcoming timer sync command.
109 */
ice_ptp_src_cmd(struct ice_hw * hw,enum ice_ptp_tmr_cmd cmd)110 static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
111 {
112 u32 cmd_val;
113 u8 tmr_idx;
114
115 tmr_idx = ice_get_ptp_src_clock_index(hw);
116 cmd_val = tmr_idx << SEL_CPK_SRC;
117
118 switch (cmd) {
119 case INIT_TIME:
120 cmd_val |= GLTSYN_CMD_INIT_TIME;
121 break;
122 case INIT_INCVAL:
123 cmd_val |= GLTSYN_CMD_INIT_INCVAL;
124 break;
125 case ADJ_TIME:
126 cmd_val |= GLTSYN_CMD_ADJ_TIME;
127 break;
128 case ADJ_TIME_AT_TIME:
129 cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME;
130 break;
131 case READ_TIME:
132 cmd_val |= GLTSYN_CMD_READ_TIME;
133 break;
134 }
135
136 wr32(hw, GLTSYN_CMD, cmd_val);
137 }
138
139 /**
140 * ice_ptp_exec_tmr_cmd - Execute all prepared timer commands
141 * @hw: pointer to HW struct
142 *
143 * Write the SYNC_EXEC_CMD bit to the GLTSYN_CMD_SYNC register, and flush the
144 * write immediately. This triggers the hardware to begin executing all of the
145 * source and PHY timer commands synchronously.
146 */
ice_ptp_exec_tmr_cmd(struct ice_hw * hw)147 static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
148 {
149 wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD);
150 ice_flush(hw);
151 }
152
153 /* E822 family functions
154 *
155 * The following functions operate on the E822 family of devices.
156 */
157
158 /**
159 * ice_fill_phy_msg_e822 - Fill message data for a PHY register access
160 * @msg: the PHY message buffer to fill in
161 * @port: the port to access
162 * @offset: the register offset
163 */
164 static void
ice_fill_phy_msg_e822(struct ice_sbq_msg_input * msg,u8 port,u16 offset)165 ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
166 {
167 int phy_port, phy, quadtype;
168
169 phy_port = port % ICE_PORTS_PER_PHY;
170 phy = port / ICE_PORTS_PER_PHY;
171 quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_NUM_QUAD_TYPE;
172
173 if (quadtype == 0) {
174 msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port);
175 msg->msg_addr_high = P_Q0_H(P_0_BASE + offset, phy_port);
176 } else {
177 msg->msg_addr_low = P_Q1_L(P_4_BASE + offset, phy_port);
178 msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port);
179 }
180
181 if (phy == 0)
182 msg->dest_dev = rmn_0;
183 else if (phy == 1)
184 msg->dest_dev = rmn_1;
185 else
186 msg->dest_dev = rmn_2;
187 }
188
189 /**
190 * ice_is_64b_phy_reg_e822 - Check if this is a 64bit PHY register
191 * @low_addr: the low address to check
192 * @high_addr: on return, contains the high address of the 64bit register
193 *
194 * Checks if the provided low address is one of the known 64bit PHY values
195 * represented as two 32bit registers. If it is, return the appropriate high
196 * register offset to use.
197 */
ice_is_64b_phy_reg_e822(u16 low_addr,u16 * high_addr)198 static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr)
199 {
200 switch (low_addr) {
201 case P_REG_PAR_PCS_TX_OFFSET_L:
202 *high_addr = P_REG_PAR_PCS_TX_OFFSET_U;
203 return true;
204 case P_REG_PAR_PCS_RX_OFFSET_L:
205 *high_addr = P_REG_PAR_PCS_RX_OFFSET_U;
206 return true;
207 case P_REG_PAR_TX_TIME_L:
208 *high_addr = P_REG_PAR_TX_TIME_U;
209 return true;
210 case P_REG_PAR_RX_TIME_L:
211 *high_addr = P_REG_PAR_RX_TIME_U;
212 return true;
213 case P_REG_TOTAL_TX_OFFSET_L:
214 *high_addr = P_REG_TOTAL_TX_OFFSET_U;
215 return true;
216 case P_REG_TOTAL_RX_OFFSET_L:
217 *high_addr = P_REG_TOTAL_RX_OFFSET_U;
218 return true;
219 case P_REG_UIX66_10G_40G_L:
220 *high_addr = P_REG_UIX66_10G_40G_U;
221 return true;
222 case P_REG_UIX66_25G_100G_L:
223 *high_addr = P_REG_UIX66_25G_100G_U;
224 return true;
225 case P_REG_TX_CAPTURE_L:
226 *high_addr = P_REG_TX_CAPTURE_U;
227 return true;
228 case P_REG_RX_CAPTURE_L:
229 *high_addr = P_REG_RX_CAPTURE_U;
230 return true;
231 case P_REG_TX_TIMER_INC_PRE_L:
232 *high_addr = P_REG_TX_TIMER_INC_PRE_U;
233 return true;
234 case P_REG_RX_TIMER_INC_PRE_L:
235 *high_addr = P_REG_RX_TIMER_INC_PRE_U;
236 return true;
237 default:
238 return false;
239 }
240 }
241
242 /**
243 * ice_is_40b_phy_reg_e822 - Check if this is a 40bit PHY register
244 * @low_addr: the low address to check
245 * @high_addr: on return, contains the high address of the 40bit value
246 *
247 * Checks if the provided low address is one of the known 40bit PHY values
248 * split into two registers with the lower 8 bits in the low register and the
249 * upper 32 bits in the high register. If it is, return the appropriate high
250 * register offset to use.
251 */
ice_is_40b_phy_reg_e822(u16 low_addr,u16 * high_addr)252 static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr)
253 {
254 switch (low_addr) {
255 case P_REG_TIMETUS_L:
256 *high_addr = P_REG_TIMETUS_U;
257 return true;
258 case P_REG_PAR_RX_TUS_L:
259 *high_addr = P_REG_PAR_RX_TUS_U;
260 return true;
261 case P_REG_PAR_TX_TUS_L:
262 *high_addr = P_REG_PAR_TX_TUS_U;
263 return true;
264 case P_REG_PCS_RX_TUS_L:
265 *high_addr = P_REG_PCS_RX_TUS_U;
266 return true;
267 case P_REG_PCS_TX_TUS_L:
268 *high_addr = P_REG_PCS_TX_TUS_U;
269 return true;
270 case P_REG_DESK_PAR_RX_TUS_L:
271 *high_addr = P_REG_DESK_PAR_RX_TUS_U;
272 return true;
273 case P_REG_DESK_PAR_TX_TUS_L:
274 *high_addr = P_REG_DESK_PAR_TX_TUS_U;
275 return true;
276 case P_REG_DESK_PCS_RX_TUS_L:
277 *high_addr = P_REG_DESK_PCS_RX_TUS_U;
278 return true;
279 case P_REG_DESK_PCS_TX_TUS_L:
280 *high_addr = P_REG_DESK_PCS_TX_TUS_U;
281 return true;
282 default:
283 return false;
284 }
285 }
286
287 /**
288 * ice_read_phy_reg_e822 - Read a PHY register
289 * @hw: pointer to the HW struct
290 * @port: PHY port to read from
291 * @offset: PHY register offset to read
292 * @val: on return, the contents read from the PHY
293 *
294 * Read a PHY register for the given port over the device sideband queue.
295 */
296 int
ice_read_phy_reg_e822(struct ice_hw * hw,u8 port,u16 offset,u32 * val)297 ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
298 {
299 struct ice_sbq_msg_input msg = {0};
300 int err;
301
302 ice_fill_phy_msg_e822(&msg, port, offset);
303 msg.opcode = ice_sbq_msg_rd;
304
305 err = ice_sbq_rw_reg(hw, &msg);
306 if (err) {
307 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
308 err);
309 return err;
310 }
311
312 *val = msg.data;
313
314 return 0;
315 }
316
317 /**
318 * ice_read_64b_phy_reg_e822 - Read a 64bit value from PHY registers
319 * @hw: pointer to the HW struct
320 * @port: PHY port to read from
321 * @low_addr: offset of the lower register to read from
322 * @val: on return, the contents of the 64bit value from the PHY registers
323 *
324 * Reads the two registers associated with a 64bit value and returns it in the
325 * val pointer. The offset always specifies the lower register offset to use.
326 * The high offset is looked up. This function only operates on registers
327 * known to be two parts of a 64bit value.
328 */
329 static int
ice_read_64b_phy_reg_e822(struct ice_hw * hw,u8 port,u16 low_addr,u64 * val)330 ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
331 {
332 u32 low, high;
333 u16 high_addr;
334 int err;
335
336 /* Only operate on registers known to be split into two 32bit
337 * registers.
338 */
339 if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
340 ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
341 low_addr);
342 return -EINVAL;
343 }
344
345 err = ice_read_phy_reg_e822(hw, port, low_addr, &low);
346 if (err) {
347 ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register 0x%08x\n, err %d",
348 low_addr, err);
349 return err;
350 }
351
352 err = ice_read_phy_reg_e822(hw, port, high_addr, &high);
353 if (err) {
354 ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register 0x%08x\n, err %d",
355 high_addr, err);
356 return err;
357 }
358
359 *val = (u64)high << 32 | low;
360
361 return 0;
362 }
363
364 /**
365 * ice_write_phy_reg_e822 - Write a PHY register
366 * @hw: pointer to the HW struct
367 * @port: PHY port to write to
368 * @offset: PHY register offset to write
369 * @val: The value to write to the register
370 *
371 * Write a PHY register for the given port over the device sideband queue.
372 */
373 int
ice_write_phy_reg_e822(struct ice_hw * hw,u8 port,u16 offset,u32 val)374 ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val)
375 {
376 struct ice_sbq_msg_input msg = {0};
377 int err;
378
379 ice_fill_phy_msg_e822(&msg, port, offset);
380 msg.opcode = ice_sbq_msg_wr;
381 msg.data = val;
382
383 err = ice_sbq_rw_reg(hw, &msg);
384 if (err) {
385 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
386 err);
387 return err;
388 }
389
390 return 0;
391 }
392
393 /**
394 * ice_write_40b_phy_reg_e822 - Write a 40b value to the PHY
395 * @hw: pointer to the HW struct
396 * @port: port to write to
397 * @low_addr: offset of the low register
398 * @val: 40b value to write
399 *
400 * Write the provided 40b value to the two associated registers by splitting
401 * it up into two chunks, the lower 8 bits and the upper 32 bits.
402 */
403 static int
ice_write_40b_phy_reg_e822(struct ice_hw * hw,u8 port,u16 low_addr,u64 val)404 ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
405 {
406 u32 low, high;
407 u16 high_addr;
408 int err;
409
410 /* Only operate on registers known to be split into a lower 8 bit
411 * register and an upper 32 bit register.
412 */
413 if (!ice_is_40b_phy_reg_e822(low_addr, &high_addr)) {
414 ice_debug(hw, ICE_DBG_PTP, "Invalid 40b register addr 0x%08x\n",
415 low_addr);
416 return -EINVAL;
417 }
418
419 low = (u32)(val & P_REG_40B_LOW_M);
420 high = (u32)(val >> P_REG_40B_HIGH_S);
421
422 err = ice_write_phy_reg_e822(hw, port, low_addr, low);
423 if (err) {
424 ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
425 low_addr, err);
426 return err;
427 }
428
429 err = ice_write_phy_reg_e822(hw, port, high_addr, high);
430 if (err) {
431 ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
432 high_addr, err);
433 return err;
434 }
435
436 return 0;
437 }
438
439 /**
440 * ice_write_64b_phy_reg_e822 - Write a 64bit value to PHY registers
441 * @hw: pointer to the HW struct
442 * @port: PHY port to read from
443 * @low_addr: offset of the lower register to read from
444 * @val: the contents of the 64bit value to write to PHY
445 *
446 * Write the 64bit value to the two associated 32bit PHY registers. The offset
447 * is always specified as the lower register, and the high address is looked
448 * up. This function only operates on registers known to be two parts of
449 * a 64bit value.
450 */
451 static int
ice_write_64b_phy_reg_e822(struct ice_hw * hw,u8 port,u16 low_addr,u64 val)452 ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
453 {
454 u32 low, high;
455 u16 high_addr;
456 int err;
457
458 /* Only operate on registers known to be split into two 32bit
459 * registers.
460 */
461 if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
462 ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
463 low_addr);
464 return -EINVAL;
465 }
466
467 low = lower_32_bits(val);
468 high = upper_32_bits(val);
469
470 err = ice_write_phy_reg_e822(hw, port, low_addr, low);
471 if (err) {
472 ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
473 low_addr, err);
474 return err;
475 }
476
477 err = ice_write_phy_reg_e822(hw, port, high_addr, high);
478 if (err) {
479 ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
480 high_addr, err);
481 return err;
482 }
483
484 return 0;
485 }
486
487 /**
488 * ice_fill_quad_msg_e822 - Fill message data for quad register access
489 * @msg: the PHY message buffer to fill in
490 * @quad: the quad to access
491 * @offset: the register offset
492 *
493 * Fill a message buffer for accessing a register in a quad shared between
494 * multiple PHYs.
495 */
496 static void
ice_fill_quad_msg_e822(struct ice_sbq_msg_input * msg,u8 quad,u16 offset)497 ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
498 {
499 u32 addr;
500
501 msg->dest_dev = rmn_0;
502
503 if ((quad % ICE_NUM_QUAD_TYPE) == 0)
504 addr = Q_0_BASE + offset;
505 else
506 addr = Q_1_BASE + offset;
507
508 msg->msg_addr_low = lower_16_bits(addr);
509 msg->msg_addr_high = upper_16_bits(addr);
510 }
511
512 /**
513 * ice_read_quad_reg_e822 - Read a PHY quad register
514 * @hw: pointer to the HW struct
515 * @quad: quad to read from
516 * @offset: quad register offset to read
517 * @val: on return, the contents read from the quad
518 *
519 * Read a quad register over the device sideband queue. Quad registers are
520 * shared between multiple PHYs.
521 */
522 int
ice_read_quad_reg_e822(struct ice_hw * hw,u8 quad,u16 offset,u32 * val)523 ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
524 {
525 struct ice_sbq_msg_input msg = {0};
526 int err;
527
528 if (quad >= ICE_MAX_QUAD)
529 return -EINVAL;
530
531 ice_fill_quad_msg_e822(&msg, quad, offset);
532 msg.opcode = ice_sbq_msg_rd;
533
534 err = ice_sbq_rw_reg(hw, &msg);
535 if (err) {
536 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
537 err);
538 return err;
539 }
540
541 *val = msg.data;
542
543 return 0;
544 }
545
546 /**
547 * ice_write_quad_reg_e822 - Write a PHY quad register
548 * @hw: pointer to the HW struct
549 * @quad: quad to write to
550 * @offset: quad register offset to write
551 * @val: The value to write to the register
552 *
553 * Write a quad register over the device sideband queue. Quad registers are
554 * shared between multiple PHYs.
555 */
556 int
ice_write_quad_reg_e822(struct ice_hw * hw,u8 quad,u16 offset,u32 val)557 ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
558 {
559 struct ice_sbq_msg_input msg = {0};
560 int err;
561
562 if (quad >= ICE_MAX_QUAD)
563 return -EINVAL;
564
565 ice_fill_quad_msg_e822(&msg, quad, offset);
566 msg.opcode = ice_sbq_msg_wr;
567 msg.data = val;
568
569 err = ice_sbq_rw_reg(hw, &msg);
570 if (err) {
571 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
572 err);
573 return err;
574 }
575
576 return 0;
577 }
578
579 /**
580 * ice_read_phy_tstamp_e822 - Read a PHY timestamp out of the quad block
581 * @hw: pointer to the HW struct
582 * @quad: the quad to read from
583 * @idx: the timestamp index to read
584 * @tstamp: on return, the 40bit timestamp value
585 *
586 * Read a 40bit timestamp value out of the two associated registers in the
587 * quad memory block that is shared between the internal PHYs of the E822
588 * family of devices.
589 */
590 static int
ice_read_phy_tstamp_e822(struct ice_hw * hw,u8 quad,u8 idx,u64 * tstamp)591 ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
592 {
593 u16 lo_addr, hi_addr;
594 u32 lo, hi;
595 int err;
596
597 lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
598 hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
599
600 err = ice_read_quad_reg_e822(hw, quad, lo_addr, &lo);
601 if (err) {
602 ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
603 err);
604 return err;
605 }
606
607 err = ice_read_quad_reg_e822(hw, quad, hi_addr, &hi);
608 if (err) {
609 ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
610 err);
611 return err;
612 }
613
614 /* For E822 based internal PHYs, the timestamp is reported with the
615 * lower 8 bits in the low register, and the upper 32 bits in the high
616 * register.
617 */
618 *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M);
619
620 return 0;
621 }
622
623 /**
624 * ice_clear_phy_tstamp_e822 - Clear a timestamp from the quad block
625 * @hw: pointer to the HW struct
626 * @quad: the quad to read from
627 * @idx: the timestamp index to reset
628 *
629 * Clear a timestamp, resetting its valid bit, from the PHY quad block that is
630 * shared between the internal PHYs on the E822 devices.
631 */
632 static int
ice_clear_phy_tstamp_e822(struct ice_hw * hw,u8 quad,u8 idx)633 ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx)
634 {
635 u16 lo_addr, hi_addr;
636 int err;
637
638 lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
639 hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
640
641 err = ice_write_quad_reg_e822(hw, quad, lo_addr, 0);
642 if (err) {
643 ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n",
644 err);
645 return err;
646 }
647
648 err = ice_write_quad_reg_e822(hw, quad, hi_addr, 0);
649 if (err) {
650 ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n",
651 err);
652 return err;
653 }
654
655 return 0;
656 }
657
658 /**
659 * ice_read_cgu_reg_e822 - Read a CGU register
660 * @hw: pointer to the HW struct
661 * @addr: Register address to read
662 * @val: storage for register value read
663 *
664 * Read the contents of a register of the Clock Generation Unit. Only
665 * applicable to E822 devices.
666 */
667 static int
ice_read_cgu_reg_e822(struct ice_hw * hw,u32 addr,u32 * val)668 ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val)
669 {
670 struct ice_sbq_msg_input cgu_msg;
671 int err;
672
673 cgu_msg.opcode = ice_sbq_msg_rd;
674 cgu_msg.dest_dev = cgu;
675 cgu_msg.msg_addr_low = addr;
676 cgu_msg.msg_addr_high = 0x0;
677
678 err = ice_sbq_rw_reg(hw, &cgu_msg);
679 if (err) {
680 ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n",
681 addr, err);
682 return err;
683 }
684
685 *val = cgu_msg.data;
686
687 return err;
688 }
689
690 /**
691 * ice_write_cgu_reg_e822 - Write a CGU register
692 * @hw: pointer to the HW struct
693 * @addr: Register address to write
694 * @val: value to write into the register
695 *
696 * Write the specified value to a register of the Clock Generation Unit. Only
697 * applicable to E822 devices.
698 */
699 static int
ice_write_cgu_reg_e822(struct ice_hw * hw,u32 addr,u32 val)700 ice_write_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 val)
701 {
702 struct ice_sbq_msg_input cgu_msg;
703 int err;
704
705 cgu_msg.opcode = ice_sbq_msg_wr;
706 cgu_msg.dest_dev = cgu;
707 cgu_msg.msg_addr_low = addr;
708 cgu_msg.msg_addr_high = 0x0;
709 cgu_msg.data = val;
710
711 err = ice_sbq_rw_reg(hw, &cgu_msg);
712 if (err) {
713 ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n",
714 addr, err);
715 return err;
716 }
717
718 return err;
719 }
720
721 /**
722 * ice_clk_freq_str - Convert time_ref_freq to string
723 * @clk_freq: Clock frequency
724 *
725 * Convert the specified TIME_REF clock frequency to a string.
726 */
ice_clk_freq_str(u8 clk_freq)727 static const char *ice_clk_freq_str(u8 clk_freq)
728 {
729 switch ((enum ice_time_ref_freq)clk_freq) {
730 case ICE_TIME_REF_FREQ_25_000:
731 return "25 MHz";
732 case ICE_TIME_REF_FREQ_122_880:
733 return "122.88 MHz";
734 case ICE_TIME_REF_FREQ_125_000:
735 return "125 MHz";
736 case ICE_TIME_REF_FREQ_153_600:
737 return "153.6 MHz";
738 case ICE_TIME_REF_FREQ_156_250:
739 return "156.25 MHz";
740 case ICE_TIME_REF_FREQ_245_760:
741 return "245.76 MHz";
742 default:
743 return "Unknown";
744 }
745 }
746
747 /**
748 * ice_clk_src_str - Convert time_ref_src to string
749 * @clk_src: Clock source
750 *
751 * Convert the specified clock source to its string name.
752 */
ice_clk_src_str(u8 clk_src)753 static const char *ice_clk_src_str(u8 clk_src)
754 {
755 switch ((enum ice_clk_src)clk_src) {
756 case ICE_CLK_SRC_TCX0:
757 return "TCX0";
758 case ICE_CLK_SRC_TIME_REF:
759 return "TIME_REF";
760 default:
761 return "Unknown";
762 }
763 }
764
765 /**
766 * ice_cfg_cgu_pll_e822 - Configure the Clock Generation Unit
767 * @hw: pointer to the HW struct
768 * @clk_freq: Clock frequency to program
769 * @clk_src: Clock source to select (TIME_REF, or TCX0)
770 *
771 * Configure the Clock Generation Unit with the desired clock frequency and
772 * time reference, enabling the PLL which drives the PTP hardware clock.
773 */
774 static int
ice_cfg_cgu_pll_e822(struct ice_hw * hw,enum ice_time_ref_freq clk_freq,enum ice_clk_src clk_src)775 ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
776 enum ice_clk_src clk_src)
777 {
778 union tspll_ro_bwm_lf bwm_lf;
779 union nac_cgu_dword19 dw19;
780 union nac_cgu_dword22 dw22;
781 union nac_cgu_dword24 dw24;
782 union nac_cgu_dword9 dw9;
783 int err;
784
785 if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
786 dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
787 clk_freq);
788 return -EINVAL;
789 }
790
791 if (clk_src >= NUM_ICE_CLK_SRC) {
792 dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
793 clk_src);
794 return -EINVAL;
795 }
796
797 if (clk_src == ICE_CLK_SRC_TCX0 &&
798 clk_freq != ICE_TIME_REF_FREQ_25_000) {
799 dev_warn(ice_hw_to_dev(hw),
800 "TCX0 only supports 25 MHz frequency\n");
801 return -EINVAL;
802 }
803
804 err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD9, &dw9.val);
805 if (err)
806 return err;
807
808 err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
809 if (err)
810 return err;
811
812 err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
813 if (err)
814 return err;
815
816 /* Log the current clock configuration */
817 ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
818 dw24.field.ts_pll_enable ? "enabled" : "disabled",
819 ice_clk_src_str(dw24.field.time_ref_sel),
820 ice_clk_freq_str(dw9.field.time_ref_freq_sel),
821 bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
822
823 /* Disable the PLL before changing the clock source or frequency */
824 if (dw24.field.ts_pll_enable) {
825 dw24.field.ts_pll_enable = 0;
826
827 err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
828 if (err)
829 return err;
830 }
831
832 /* Set the frequency */
833 dw9.field.time_ref_freq_sel = clk_freq;
834 err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD9, dw9.val);
835 if (err)
836 return err;
837
838 /* Configure the TS PLL feedback divisor */
839 err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD19, &dw19.val);
840 if (err)
841 return err;
842
843 dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div;
844 dw19.field.tspll_ndivratio = 1;
845
846 err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD19, dw19.val);
847 if (err)
848 return err;
849
850 /* Configure the TS PLL post divisor */
851 err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD22, &dw22.val);
852 if (err)
853 return err;
854
855 dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div;
856 dw22.field.time1588clk_sel_div2 = 0;
857
858 err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD22, dw22.val);
859 if (err)
860 return err;
861
862 /* Configure the TS PLL pre divisor and clock source */
863 err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
864 if (err)
865 return err;
866
867 dw24.field.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div;
868 dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
869 dw24.field.time_ref_sel = clk_src;
870
871 err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
872 if (err)
873 return err;
874
875 /* Finally, enable the PLL */
876 dw24.field.ts_pll_enable = 1;
877
878 err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
879 if (err)
880 return err;
881
882 /* Wait to verify if the PLL locks */
883 usleep_range(1000, 5000);
884
885 err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
886 if (err)
887 return err;
888
889 if (!bwm_lf.field.plllock_true_lock_cri) {
890 dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
891 return -EBUSY;
892 }
893
894 /* Log the current clock configuration */
895 ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
896 dw24.field.ts_pll_enable ? "enabled" : "disabled",
897 ice_clk_src_str(dw24.field.time_ref_sel),
898 ice_clk_freq_str(dw9.field.time_ref_freq_sel),
899 bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
900
901 return 0;
902 }
903
904 /**
905 * ice_init_cgu_e822 - Initialize CGU with settings from firmware
906 * @hw: pointer to the HW structure
907 *
908 * Initialize the Clock Generation Unit of the E822 device.
909 */
ice_init_cgu_e822(struct ice_hw * hw)910 static int ice_init_cgu_e822(struct ice_hw *hw)
911 {
912 struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
913 union tspll_cntr_bist_settings cntr_bist;
914 int err;
915
916 err = ice_read_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
917 &cntr_bist.val);
918 if (err)
919 return err;
920
921 /* Disable sticky lock detection so lock err reported is accurate */
922 cntr_bist.field.i_plllock_sel_0 = 0;
923 cntr_bist.field.i_plllock_sel_1 = 0;
924
925 err = ice_write_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
926 cntr_bist.val);
927 if (err)
928 return err;
929
930 /* Configure the CGU PLL using the parameters from the function
931 * capabilities.
932 */
933 err = ice_cfg_cgu_pll_e822(hw, ts_info->time_ref,
934 (enum ice_clk_src)ts_info->clk_src);
935 if (err)
936 return err;
937
938 return 0;
939 }
940
941 /**
942 * ice_ptp_set_vernier_wl - Set the window length for vernier calibration
943 * @hw: pointer to the HW struct
944 *
945 * Set the window length used for the vernier port calibration process.
946 */
ice_ptp_set_vernier_wl(struct ice_hw * hw)947 static int ice_ptp_set_vernier_wl(struct ice_hw *hw)
948 {
949 u8 port;
950
951 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
952 int err;
953
954 err = ice_write_phy_reg_e822(hw, port, P_REG_WL,
955 PTP_VERNIER_WL);
956 if (err) {
957 ice_debug(hw, ICE_DBG_PTP, "Failed to set vernier window length for port %u, err %d\n",
958 port, err);
959 return err;
960 }
961 }
962
963 return 0;
964 }
965
966 /**
967 * ice_ptp_init_phc_e822 - Perform E822 specific PHC initialization
968 * @hw: pointer to HW struct
969 *
970 * Perform PHC initialization steps specific to E822 devices.
971 */
ice_ptp_init_phc_e822(struct ice_hw * hw)972 static int ice_ptp_init_phc_e822(struct ice_hw *hw)
973 {
974 int err;
975 u32 regval;
976
977 /* Enable reading switch and PHY registers over the sideband queue */
978 #define PF_SB_REM_DEV_CTL_SWITCH_READ BIT(1)
979 #define PF_SB_REM_DEV_CTL_PHY0 BIT(2)
980 regval = rd32(hw, PF_SB_REM_DEV_CTL);
981 regval |= (PF_SB_REM_DEV_CTL_SWITCH_READ |
982 PF_SB_REM_DEV_CTL_PHY0);
983 wr32(hw, PF_SB_REM_DEV_CTL, regval);
984
985 /* Initialize the Clock Generation Unit */
986 err = ice_init_cgu_e822(hw);
987 if (err)
988 return err;
989
990 /* Set window length for all the ports */
991 return ice_ptp_set_vernier_wl(hw);
992 }
993
994 /**
995 * ice_ptp_prep_phy_time_e822 - Prepare PHY port with initial time
996 * @hw: pointer to the HW struct
997 * @time: Time to initialize the PHY port clocks to
998 *
999 * Program the PHY port registers with a new initial time value. The port
1000 * clock will be initialized once the driver issues an INIT_TIME sync
1001 * command. The time value is the upper 32 bits of the PHY timer, usually in
1002 * units of nominal nanoseconds.
1003 */
1004 static int
ice_ptp_prep_phy_time_e822(struct ice_hw * hw,u32 time)1005 ice_ptp_prep_phy_time_e822(struct ice_hw *hw, u32 time)
1006 {
1007 u64 phy_time;
1008 u8 port;
1009 int err;
1010
1011 /* The time represents the upper 32 bits of the PHY timer, so we need
1012 * to shift to account for this when programming.
1013 */
1014 phy_time = (u64)time << 32;
1015
1016 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1017 /* Tx case */
1018 err = ice_write_64b_phy_reg_e822(hw, port,
1019 P_REG_TX_TIMER_INC_PRE_L,
1020 phy_time);
1021 if (err)
1022 goto exit_err;
1023
1024 /* Rx case */
1025 err = ice_write_64b_phy_reg_e822(hw, port,
1026 P_REG_RX_TIMER_INC_PRE_L,
1027 phy_time);
1028 if (err)
1029 goto exit_err;
1030 }
1031
1032 return 0;
1033
1034 exit_err:
1035 ice_debug(hw, ICE_DBG_PTP, "Failed to write init time for port %u, err %d\n",
1036 port, err);
1037
1038 return err;
1039 }
1040
1041 /**
1042 * ice_ptp_prep_port_adj_e822 - Prepare a single port for time adjust
1043 * @hw: pointer to HW struct
1044 * @port: Port number to be programmed
1045 * @time: time in cycles to adjust the port Tx and Rx clocks
1046 *
1047 * Program the port for an atomic adjustment by writing the Tx and Rx timer
1048 * registers. The atomic adjustment won't be completed until the driver issues
1049 * an ADJ_TIME command.
1050 *
1051 * Note that time is not in units of nanoseconds. It is in clock time
1052 * including the lower sub-nanosecond portion of the port timer.
1053 *
1054 * Negative adjustments are supported using 2s complement arithmetic.
1055 */
1056 int
ice_ptp_prep_port_adj_e822(struct ice_hw * hw,u8 port,s64 time)1057 ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time)
1058 {
1059 u32 l_time, u_time;
1060 int err;
1061
1062 l_time = lower_32_bits(time);
1063 u_time = upper_32_bits(time);
1064
1065 /* Tx case */
1066 err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_L,
1067 l_time);
1068 if (err)
1069 goto exit_err;
1070
1071 err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_U,
1072 u_time);
1073 if (err)
1074 goto exit_err;
1075
1076 /* Rx case */
1077 err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_L,
1078 l_time);
1079 if (err)
1080 goto exit_err;
1081
1082 err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_U,
1083 u_time);
1084 if (err)
1085 goto exit_err;
1086
1087 return 0;
1088
1089 exit_err:
1090 ice_debug(hw, ICE_DBG_PTP, "Failed to write time adjust for port %u, err %d\n",
1091 port, err);
1092 return err;
1093 }
1094
1095 /**
1096 * ice_ptp_prep_phy_adj_e822 - Prep PHY ports for a time adjustment
1097 * @hw: pointer to HW struct
1098 * @adj: adjustment in nanoseconds
1099 *
1100 * Prepare the PHY ports for an atomic time adjustment by programming the PHY
1101 * Tx and Rx port registers. The actual adjustment is completed by issuing an
1102 * ADJ_TIME or ADJ_TIME_AT_TIME sync command.
1103 */
1104 static int
ice_ptp_prep_phy_adj_e822(struct ice_hw * hw,s32 adj)1105 ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj)
1106 {
1107 s64 cycles;
1108 u8 port;
1109
1110 /* The port clock supports adjustment of the sub-nanosecond portion of
1111 * the clock. We shift the provided adjustment in nanoseconds to
1112 * calculate the appropriate adjustment to program into the PHY ports.
1113 */
1114 if (adj > 0)
1115 cycles = (s64)adj << 32;
1116 else
1117 cycles = -(((s64)-adj) << 32);
1118
1119 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1120 int err;
1121
1122 err = ice_ptp_prep_port_adj_e822(hw, port, cycles);
1123 if (err)
1124 return err;
1125 }
1126
1127 return 0;
1128 }
1129
1130 /**
1131 * ice_ptp_prep_phy_incval_e822 - Prepare PHY ports for time adjustment
1132 * @hw: pointer to HW struct
1133 * @incval: new increment value to prepare
1134 *
1135 * Prepare each of the PHY ports for a new increment value by programming the
1136 * port's TIMETUS registers. The new increment value will be updated after
1137 * issuing an INIT_INCVAL command.
1138 */
1139 static int
ice_ptp_prep_phy_incval_e822(struct ice_hw * hw,u64 incval)1140 ice_ptp_prep_phy_incval_e822(struct ice_hw *hw, u64 incval)
1141 {
1142 int err;
1143 u8 port;
1144
1145 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1146 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L,
1147 incval);
1148 if (err)
1149 goto exit_err;
1150 }
1151
1152 return 0;
1153
1154 exit_err:
1155 ice_debug(hw, ICE_DBG_PTP, "Failed to write incval for port %u, err %d\n",
1156 port, err);
1157
1158 return err;
1159 }
1160
1161 /**
1162 * ice_ptp_read_port_capture - Read a port's local time capture
1163 * @hw: pointer to HW struct
1164 * @port: Port number to read
1165 * @tx_ts: on return, the Tx port time capture
1166 * @rx_ts: on return, the Rx port time capture
1167 *
1168 * Read the port's Tx and Rx local time capture values.
1169 *
1170 * Note this has no equivalent for the E810 devices.
1171 */
1172 static int
ice_ptp_read_port_capture(struct ice_hw * hw,u8 port,u64 * tx_ts,u64 * rx_ts)1173 ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
1174 {
1175 int err;
1176
1177 /* Tx case */
1178 err = ice_read_64b_phy_reg_e822(hw, port, P_REG_TX_CAPTURE_L, tx_ts);
1179 if (err) {
1180 ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, err %d\n",
1181 err);
1182 return err;
1183 }
1184
1185 ice_debug(hw, ICE_DBG_PTP, "tx_init = 0x%016llx\n",
1186 (unsigned long long)*tx_ts);
1187
1188 /* Rx case */
1189 err = ice_read_64b_phy_reg_e822(hw, port, P_REG_RX_CAPTURE_L, rx_ts);
1190 if (err) {
1191 ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, err %d\n",
1192 err);
1193 return err;
1194 }
1195
1196 ice_debug(hw, ICE_DBG_PTP, "rx_init = 0x%016llx\n",
1197 (unsigned long long)*rx_ts);
1198
1199 return 0;
1200 }
1201
1202 /**
1203 * ice_ptp_one_port_cmd - Prepare a single PHY port for a timer command
1204 * @hw: pointer to HW struct
1205 * @port: Port to which cmd has to be sent
1206 * @cmd: Command to be sent to the port
1207 *
1208 * Prepare the requested port for an upcoming timer sync command.
1209 *
1210 * Note there is no equivalent of this operation on E810, as that device
1211 * always handles all external PHYs internally.
1212 */
1213 static int
ice_ptp_one_port_cmd(struct ice_hw * hw,u8 port,enum ice_ptp_tmr_cmd cmd)1214 ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
1215 {
1216 u32 cmd_val, val;
1217 u8 tmr_idx;
1218 int err;
1219
1220 tmr_idx = ice_get_ptp_src_clock_index(hw);
1221 cmd_val = tmr_idx << SEL_PHY_SRC;
1222 switch (cmd) {
1223 case INIT_TIME:
1224 cmd_val |= PHY_CMD_INIT_TIME;
1225 break;
1226 case INIT_INCVAL:
1227 cmd_val |= PHY_CMD_INIT_INCVAL;
1228 break;
1229 case ADJ_TIME:
1230 cmd_val |= PHY_CMD_ADJ_TIME;
1231 break;
1232 case READ_TIME:
1233 cmd_val |= PHY_CMD_READ_TIME;
1234 break;
1235 case ADJ_TIME_AT_TIME:
1236 cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME;
1237 break;
1238 }
1239
1240 /* Tx case */
1241 /* Read, modify, write */
1242 err = ice_read_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, &val);
1243 if (err) {
1244 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_TMR_CMD, err %d\n",
1245 err);
1246 return err;
1247 }
1248
1249 /* Modify necessary bits only and perform write */
1250 val &= ~TS_CMD_MASK;
1251 val |= cmd_val;
1252
1253 err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, val);
1254 if (err) {
1255 ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n",
1256 err);
1257 return err;
1258 }
1259
1260 /* Rx case */
1261 /* Read, modify, write */
1262 err = ice_read_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, &val);
1263 if (err) {
1264 ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_TMR_CMD, err %d\n",
1265 err);
1266 return err;
1267 }
1268
1269 /* Modify necessary bits only and perform write */
1270 val &= ~TS_CMD_MASK;
1271 val |= cmd_val;
1272
1273 err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, val);
1274 if (err) {
1275 ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n",
1276 err);
1277 return err;
1278 }
1279
1280 return 0;
1281 }
1282
1283 /**
1284 * ice_ptp_port_cmd_e822 - Prepare all ports for a timer command
1285 * @hw: pointer to the HW struct
1286 * @cmd: timer command to prepare
1287 *
1288 * Prepare all ports connected to this device for an upcoming timer sync
1289 * command.
1290 */
1291 static int
ice_ptp_port_cmd_e822(struct ice_hw * hw,enum ice_ptp_tmr_cmd cmd)1292 ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
1293 {
1294 u8 port;
1295
1296 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1297 int err;
1298
1299 err = ice_ptp_one_port_cmd(hw, port, cmd);
1300 if (err)
1301 return err;
1302 }
1303
1304 return 0;
1305 }
1306
1307 /* E822 Vernier calibration functions
1308 *
1309 * The following functions are used as part of the vernier calibration of
1310 * a port. This calibration increases the precision of the timestamps on the
1311 * port.
1312 */
1313
1314 /**
1315 * ice_phy_get_speed_and_fec_e822 - Get link speed and FEC based on serdes mode
1316 * @hw: pointer to HW struct
1317 * @port: the port to read from
1318 * @link_out: if non-NULL, holds link speed on success
1319 * @fec_out: if non-NULL, holds FEC algorithm on success
1320 *
1321 * Read the serdes data for the PHY port and extract the link speed and FEC
1322 * algorithm.
1323 */
1324 static int
ice_phy_get_speed_and_fec_e822(struct ice_hw * hw,u8 port,enum ice_ptp_link_spd * link_out,enum ice_ptp_fec_mode * fec_out)1325 ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port,
1326 enum ice_ptp_link_spd *link_out,
1327 enum ice_ptp_fec_mode *fec_out)
1328 {
1329 enum ice_ptp_link_spd link;
1330 enum ice_ptp_fec_mode fec;
1331 u32 serdes;
1332 int err;
1333
1334 err = ice_read_phy_reg_e822(hw, port, P_REG_LINK_SPEED, &serdes);
1335 if (err) {
1336 ice_debug(hw, ICE_DBG_PTP, "Failed to read serdes info\n");
1337 return err;
1338 }
1339
1340 /* Determine the FEC algorithm */
1341 fec = (enum ice_ptp_fec_mode)P_REG_LINK_SPEED_FEC_MODE(serdes);
1342
1343 serdes &= P_REG_LINK_SPEED_SERDES_M;
1344
1345 /* Determine the link speed */
1346 if (fec == ICE_PTP_FEC_MODE_RS_FEC) {
1347 switch (serdes) {
1348 case ICE_PTP_SERDES_25G:
1349 link = ICE_PTP_LNK_SPD_25G_RS;
1350 break;
1351 case ICE_PTP_SERDES_50G:
1352 link = ICE_PTP_LNK_SPD_50G_RS;
1353 break;
1354 case ICE_PTP_SERDES_100G:
1355 link = ICE_PTP_LNK_SPD_100G_RS;
1356 break;
1357 default:
1358 return -EIO;
1359 }
1360 } else {
1361 switch (serdes) {
1362 case ICE_PTP_SERDES_1G:
1363 link = ICE_PTP_LNK_SPD_1G;
1364 break;
1365 case ICE_PTP_SERDES_10G:
1366 link = ICE_PTP_LNK_SPD_10G;
1367 break;
1368 case ICE_PTP_SERDES_25G:
1369 link = ICE_PTP_LNK_SPD_25G;
1370 break;
1371 case ICE_PTP_SERDES_40G:
1372 link = ICE_PTP_LNK_SPD_40G;
1373 break;
1374 case ICE_PTP_SERDES_50G:
1375 link = ICE_PTP_LNK_SPD_50G;
1376 break;
1377 default:
1378 return -EIO;
1379 }
1380 }
1381
1382 if (link_out)
1383 *link_out = link;
1384 if (fec_out)
1385 *fec_out = fec;
1386
1387 return 0;
1388 }
1389
1390 /**
1391 * ice_phy_cfg_lane_e822 - Configure PHY quad for single/multi-lane timestamp
1392 * @hw: pointer to HW struct
1393 * @port: to configure the quad for
1394 */
ice_phy_cfg_lane_e822(struct ice_hw * hw,u8 port)1395 static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port)
1396 {
1397 enum ice_ptp_link_spd link_spd;
1398 int err;
1399 u32 val;
1400 u8 quad;
1401
1402 err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, NULL);
1403 if (err) {
1404 ice_debug(hw, ICE_DBG_PTP, "Failed to get PHY link speed, err %d\n",
1405 err);
1406 return;
1407 }
1408
1409 quad = port / ICE_PORTS_PER_QUAD;
1410
1411 err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val);
1412 if (err) {
1413 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEM_GLB_CFG, err %d\n",
1414 err);
1415 return;
1416 }
1417
1418 if (link_spd >= ICE_PTP_LNK_SPD_40G)
1419 val &= ~Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
1420 else
1421 val |= Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
1422
1423 err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, val);
1424 if (err) {
1425 ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_MEM_GBL_CFG, err %d\n",
1426 err);
1427 return;
1428 }
1429 }
1430
1431 /**
1432 * ice_phy_cfg_uix_e822 - Configure Serdes UI to TU conversion for E822
1433 * @hw: pointer to the HW structure
1434 * @port: the port to configure
1435 *
1436 * Program the conversion ration of Serdes clock "unit intervals" (UIs) to PHC
1437 * hardware clock time units (TUs). That is, determine the number of TUs per
1438 * serdes unit interval, and program the UIX registers with this conversion.
1439 *
1440 * This conversion is used as part of the calibration process when determining
1441 * the additional error of a timestamp vs the real time of transmission or
1442 * receipt of the packet.
1443 *
1444 * Hardware uses the number of TUs per 66 UIs, written to the UIX registers
1445 * for the two main serdes clock rates, 10G/40G and 25G/100G serdes clocks.
1446 *
1447 * To calculate the conversion ratio, we use the following facts:
1448 *
1449 * a) the clock frequency in Hz (cycles per second)
1450 * b) the number of TUs per cycle (the increment value of the clock)
1451 * c) 1 second per 1 billion nanoseconds
1452 * d) the duration of 66 UIs in nanoseconds
1453 *
1454 * Given these facts, we can use the following table to work out what ratios
1455 * to multiply in order to get the number of TUs per 66 UIs:
1456 *
1457 * cycles | 1 second | incval (TUs) | nanoseconds
1458 * -------+--------------+--------------+-------------
1459 * second | 1 billion ns | cycle | 66 UIs
1460 *
1461 * To perform the multiplication using integers without too much loss of
1462 * precision, we can take use the following equation:
1463 *
1464 * (freq * incval * 6600 LINE_UI ) / ( 100 * 1 billion)
1465 *
1466 * We scale up to using 6600 UI instead of 66 in order to avoid fractional
1467 * nanosecond UIs (66 UI at 10G/40G is 6.4 ns)
1468 *
1469 * The increment value has a maximum expected range of about 34 bits, while
1470 * the frequency value is about 29 bits. Multiplying these values shouldn't
1471 * overflow the 64 bits. However, we must then further multiply them again by
1472 * the Serdes unit interval duration. To avoid overflow here, we split the
1473 * overall divide by 1e11 into a divide by 256 (shift down by 8 bits) and
1474 * a divide by 390,625,000. This does lose some precision, but avoids
1475 * miscalculation due to arithmetic overflow.
1476 */
ice_phy_cfg_uix_e822(struct ice_hw * hw,u8 port)1477 static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port)
1478 {
1479 u64 cur_freq, clk_incval, tu_per_sec, uix;
1480 int err;
1481
1482 cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
1483 clk_incval = ice_ptp_read_src_incval(hw);
1484
1485 /* Calculate TUs per second divided by 256 */
1486 tu_per_sec = (cur_freq * clk_incval) >> 8;
1487
1488 #define LINE_UI_10G_40G 640 /* 6600 UIs is 640 nanoseconds at 10Gb/40Gb */
1489 #define LINE_UI_25G_100G 256 /* 6600 UIs is 256 nanoseconds at 25Gb/100Gb */
1490
1491 /* Program the 10Gb/40Gb conversion ratio */
1492 uix = div_u64(tu_per_sec * LINE_UI_10G_40G, 390625000);
1493
1494 err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_10G_40G_L,
1495 uix);
1496 if (err) {
1497 ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_10G_40G, err %d\n",
1498 err);
1499 return err;
1500 }
1501
1502 /* Program the 25Gb/100Gb conversion ratio */
1503 uix = div_u64(tu_per_sec * LINE_UI_25G_100G, 390625000);
1504
1505 err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_25G_100G_L,
1506 uix);
1507 if (err) {
1508 ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_25G_100G, err %d\n",
1509 err);
1510 return err;
1511 }
1512
1513 return 0;
1514 }
1515
1516 /**
1517 * ice_phy_cfg_parpcs_e822 - Configure TUs per PAR/PCS clock cycle
1518 * @hw: pointer to the HW struct
1519 * @port: port to configure
1520 *
1521 * Configure the number of TUs for the PAR and PCS clocks used as part of the
1522 * timestamp calibration process. This depends on the link speed, as the PHY
1523 * uses different markers depending on the speed.
1524 *
1525 * 1Gb/10Gb/25Gb:
1526 * - Tx/Rx PAR/PCS markers
1527 *
1528 * 25Gb RS:
1529 * - Tx/Rx Reed Solomon gearbox PAR/PCS markers
1530 *
1531 * 40Gb/50Gb:
1532 * - Tx/Rx PAR/PCS markers
1533 * - Rx Deskew PAR/PCS markers
1534 *
1535 * 50G RS and 100GB RS:
1536 * - Tx/Rx Reed Solomon gearbox PAR/PCS markers
1537 * - Rx Deskew PAR/PCS markers
1538 * - Tx PAR/PCS markers
1539 *
1540 * To calculate the conversion, we use the PHC clock frequency (cycles per
1541 * second), the increment value (TUs per cycle), and the related PHY clock
1542 * frequency to calculate the TUs per unit of the PHY link clock. The
1543 * following table shows how the units convert:
1544 *
1545 * cycles | TUs | second
1546 * -------+-------+--------
1547 * second | cycle | cycles
1548 *
1549 * For each conversion register, look up the appropriate frequency from the
1550 * e822 PAR/PCS table and calculate the TUs per unit of that clock. Program
1551 * this to the appropriate register, preparing hardware to perform timestamp
1552 * calibration to calculate the total Tx or Rx offset to adjust the timestamp
1553 * in order to calibrate for the internal PHY delays.
1554 *
1555 * Note that the increment value ranges up to ~34 bits, and the clock
1556 * frequency is ~29 bits, so multiplying them together should fit within the
1557 * 64 bit arithmetic.
1558 */
ice_phy_cfg_parpcs_e822(struct ice_hw * hw,u8 port)1559 static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
1560 {
1561 u64 cur_freq, clk_incval, tu_per_sec, phy_tus;
1562 enum ice_ptp_link_spd link_spd;
1563 enum ice_ptp_fec_mode fec_mode;
1564 int err;
1565
1566 err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
1567 if (err)
1568 return err;
1569
1570 cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
1571 clk_incval = ice_ptp_read_src_incval(hw);
1572
1573 /* Calculate TUs per cycle of the PHC clock */
1574 tu_per_sec = cur_freq * clk_incval;
1575
1576 /* For each PHY conversion register, look up the appropriate link
1577 * speed frequency and determine the TUs per that clock's cycle time.
1578 * Split this into a high and low value and then program the
1579 * appropriate register. If that link speed does not use the
1580 * associated register, write zeros to clear it instead.
1581 */
1582
1583 /* P_REG_PAR_TX_TUS */
1584 if (e822_vernier[link_spd].tx_par_clk)
1585 phy_tus = div_u64(tu_per_sec,
1586 e822_vernier[link_spd].tx_par_clk);
1587 else
1588 phy_tus = 0;
1589
1590 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_TX_TUS_L,
1591 phy_tus);
1592 if (err)
1593 return err;
1594
1595 /* P_REG_PAR_RX_TUS */
1596 if (e822_vernier[link_spd].rx_par_clk)
1597 phy_tus = div_u64(tu_per_sec,
1598 e822_vernier[link_spd].rx_par_clk);
1599 else
1600 phy_tus = 0;
1601
1602 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_RX_TUS_L,
1603 phy_tus);
1604 if (err)
1605 return err;
1606
1607 /* P_REG_PCS_TX_TUS */
1608 if (e822_vernier[link_spd].tx_pcs_clk)
1609 phy_tus = div_u64(tu_per_sec,
1610 e822_vernier[link_spd].tx_pcs_clk);
1611 else
1612 phy_tus = 0;
1613
1614 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_TX_TUS_L,
1615 phy_tus);
1616 if (err)
1617 return err;
1618
1619 /* P_REG_PCS_RX_TUS */
1620 if (e822_vernier[link_spd].rx_pcs_clk)
1621 phy_tus = div_u64(tu_per_sec,
1622 e822_vernier[link_spd].rx_pcs_clk);
1623 else
1624 phy_tus = 0;
1625
1626 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_RX_TUS_L,
1627 phy_tus);
1628 if (err)
1629 return err;
1630
1631 /* P_REG_DESK_PAR_TX_TUS */
1632 if (e822_vernier[link_spd].tx_desk_rsgb_par)
1633 phy_tus = div_u64(tu_per_sec,
1634 e822_vernier[link_spd].tx_desk_rsgb_par);
1635 else
1636 phy_tus = 0;
1637
1638 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_TX_TUS_L,
1639 phy_tus);
1640 if (err)
1641 return err;
1642
1643 /* P_REG_DESK_PAR_RX_TUS */
1644 if (e822_vernier[link_spd].rx_desk_rsgb_par)
1645 phy_tus = div_u64(tu_per_sec,
1646 e822_vernier[link_spd].rx_desk_rsgb_par);
1647 else
1648 phy_tus = 0;
1649
1650 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_RX_TUS_L,
1651 phy_tus);
1652 if (err)
1653 return err;
1654
1655 /* P_REG_DESK_PCS_TX_TUS */
1656 if (e822_vernier[link_spd].tx_desk_rsgb_pcs)
1657 phy_tus = div_u64(tu_per_sec,
1658 e822_vernier[link_spd].tx_desk_rsgb_pcs);
1659 else
1660 phy_tus = 0;
1661
1662 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_TX_TUS_L,
1663 phy_tus);
1664 if (err)
1665 return err;
1666
1667 /* P_REG_DESK_PCS_RX_TUS */
1668 if (e822_vernier[link_spd].rx_desk_rsgb_pcs)
1669 phy_tus = div_u64(tu_per_sec,
1670 e822_vernier[link_spd].rx_desk_rsgb_pcs);
1671 else
1672 phy_tus = 0;
1673
1674 return ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_RX_TUS_L,
1675 phy_tus);
1676 }
1677
1678 /**
1679 * ice_calc_fixed_tx_offset_e822 - Calculated Fixed Tx offset for a port
1680 * @hw: pointer to the HW struct
1681 * @link_spd: the Link speed to calculate for
1682 *
1683 * Calculate the fixed offset due to known static latency data.
1684 */
1685 static u64
ice_calc_fixed_tx_offset_e822(struct ice_hw * hw,enum ice_ptp_link_spd link_spd)1686 ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
1687 {
1688 u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
1689
1690 cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
1691 clk_incval = ice_ptp_read_src_incval(hw);
1692
1693 /* Calculate TUs per second */
1694 tu_per_sec = cur_freq * clk_incval;
1695
1696 /* Calculate number of TUs to add for the fixed Tx latency. Since the
1697 * latency measurement is in 1/100th of a nanosecond, we need to
1698 * multiply by tu_per_sec and then divide by 1e11. This calculation
1699 * overflows 64 bit integer arithmetic, so break it up into two
1700 * divisions by 1e4 first then by 1e7.
1701 */
1702 fixed_offset = div_u64(tu_per_sec, 10000);
1703 fixed_offset *= e822_vernier[link_spd].tx_fixed_delay;
1704 fixed_offset = div_u64(fixed_offset, 10000000);
1705
1706 return fixed_offset;
1707 }
1708
1709 /**
1710 * ice_phy_cfg_tx_offset_e822 - Configure total Tx timestamp offset
1711 * @hw: pointer to the HW struct
1712 * @port: the PHY port to configure
1713 *
1714 * Program the P_REG_TOTAL_TX_OFFSET register with the total number of TUs to
1715 * adjust Tx timestamps by. This is calculated by combining some known static
1716 * latency along with the Vernier offset computations done by hardware.
1717 *
1718 * This function must be called only after the offset registers are valid,
1719 * i.e. after the Vernier calibration wait has passed, to ensure that the PHY
1720 * has measured the offset.
1721 *
1722 * To avoid overflow, when calculating the offset based on the known static
1723 * latency values, we use measurements in 1/100th of a nanosecond, and divide
1724 * the TUs per second up front. This avoids overflow while allowing
1725 * calculation of the adjustment using integer arithmetic.
1726 */
ice_phy_cfg_tx_offset_e822(struct ice_hw * hw,u8 port)1727 static int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
1728 {
1729 enum ice_ptp_link_spd link_spd;
1730 enum ice_ptp_fec_mode fec_mode;
1731 u64 total_offset, val;
1732 int err;
1733
1734 err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
1735 if (err)
1736 return err;
1737
1738 total_offset = ice_calc_fixed_tx_offset_e822(hw, link_spd);
1739
1740 /* Read the first Vernier offset from the PHY register and add it to
1741 * the total offset.
1742 */
1743 if (link_spd == ICE_PTP_LNK_SPD_1G ||
1744 link_spd == ICE_PTP_LNK_SPD_10G ||
1745 link_spd == ICE_PTP_LNK_SPD_25G ||
1746 link_spd == ICE_PTP_LNK_SPD_25G_RS ||
1747 link_spd == ICE_PTP_LNK_SPD_40G ||
1748 link_spd == ICE_PTP_LNK_SPD_50G) {
1749 err = ice_read_64b_phy_reg_e822(hw, port,
1750 P_REG_PAR_PCS_TX_OFFSET_L,
1751 &val);
1752 if (err)
1753 return err;
1754
1755 total_offset += val;
1756 }
1757
1758 /* For Tx, we only need to use the second Vernier offset for
1759 * multi-lane link speeds with RS-FEC. The lanes will always be
1760 * aligned.
1761 */
1762 if (link_spd == ICE_PTP_LNK_SPD_50G_RS ||
1763 link_spd == ICE_PTP_LNK_SPD_100G_RS) {
1764 err = ice_read_64b_phy_reg_e822(hw, port,
1765 P_REG_PAR_TX_TIME_L,
1766 &val);
1767 if (err)
1768 return err;
1769
1770 total_offset += val;
1771 }
1772
1773 /* Now that the total offset has been calculated, program it to the
1774 * PHY and indicate that the Tx offset is ready. After this,
1775 * timestamps will be enabled.
1776 */
1777 err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_TX_OFFSET_L,
1778 total_offset);
1779 if (err)
1780 return err;
1781
1782 err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 1);
1783 if (err)
1784 return err;
1785
1786 return 0;
1787 }
1788
1789 /**
1790 * ice_phy_cfg_fixed_tx_offset_e822 - Configure Tx offset for bypass mode
1791 * @hw: pointer to the HW struct
1792 * @port: the PHY port to configure
1793 *
1794 * Calculate and program the fixed Tx offset, and indicate that the offset is
1795 * ready. This can be used when operating in bypass mode.
1796 */
1797 static int
ice_phy_cfg_fixed_tx_offset_e822(struct ice_hw * hw,u8 port)1798 ice_phy_cfg_fixed_tx_offset_e822(struct ice_hw *hw, u8 port)
1799 {
1800 enum ice_ptp_link_spd link_spd;
1801 enum ice_ptp_fec_mode fec_mode;
1802 u64 total_offset;
1803 int err;
1804
1805 err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
1806 if (err)
1807 return err;
1808
1809 total_offset = ice_calc_fixed_tx_offset_e822(hw, link_spd);
1810
1811 /* Program the fixed Tx offset into the P_REG_TOTAL_TX_OFFSET_L
1812 * register, then indicate that the Tx offset is ready. After this,
1813 * timestamps will be enabled.
1814 *
1815 * Note that this skips including the more precise offsets generated
1816 * by the Vernier calibration.
1817 */
1818 err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_TX_OFFSET_L,
1819 total_offset);
1820 if (err)
1821 return err;
1822
1823 err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 1);
1824 if (err)
1825 return err;
1826
1827 return 0;
1828 }
1829
1830 /**
1831 * ice_phy_calc_pmd_adj_e822 - Calculate PMD adjustment for Rx
1832 * @hw: pointer to the HW struct
1833 * @port: the PHY port to adjust for
1834 * @link_spd: the current link speed of the PHY
1835 * @fec_mode: the current FEC mode of the PHY
1836 * @pmd_adj: on return, the amount to adjust the Rx total offset by
1837 *
1838 * Calculates the adjustment to Rx timestamps due to PMD alignment in the PHY.
1839 * This varies by link speed and FEC mode. The value calculated accounts for
1840 * various delays caused when receiving a packet.
1841 */
1842 static int
ice_phy_calc_pmd_adj_e822(struct ice_hw * hw,u8 port,enum ice_ptp_link_spd link_spd,enum ice_ptp_fec_mode fec_mode,u64 * pmd_adj)1843 ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
1844 enum ice_ptp_link_spd link_spd,
1845 enum ice_ptp_fec_mode fec_mode, u64 *pmd_adj)
1846 {
1847 u64 cur_freq, clk_incval, tu_per_sec, mult, adj;
1848 u8 pmd_align;
1849 u32 val;
1850 int err;
1851
1852 err = ice_read_phy_reg_e822(hw, port, P_REG_PMD_ALIGNMENT, &val);
1853 if (err) {
1854 ice_debug(hw, ICE_DBG_PTP, "Failed to read PMD alignment, err %d\n",
1855 err);
1856 return err;
1857 }
1858
1859 pmd_align = (u8)val;
1860
1861 cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
1862 clk_incval = ice_ptp_read_src_incval(hw);
1863
1864 /* Calculate TUs per second */
1865 tu_per_sec = cur_freq * clk_incval;
1866
1867 /* The PMD alignment adjustment measurement depends on the link speed,
1868 * and whether FEC is enabled. For each link speed, the alignment
1869 * adjustment is calculated by dividing a value by the length of
1870 * a Time Unit in nanoseconds.
1871 *
1872 * 1G: align == 4 ? 10 * 0.8 : (align + 6 % 10) * 0.8
1873 * 10G: align == 65 ? 0 : (align * 0.1 * 32/33)
1874 * 10G w/FEC: align * 0.1 * 32/33
1875 * 25G: align == 65 ? 0 : (align * 0.4 * 32/33)
1876 * 25G w/FEC: align * 0.4 * 32/33
1877 * 40G: align == 65 ? 0 : (align * 0.1 * 32/33)
1878 * 40G w/FEC: align * 0.1 * 32/33
1879 * 50G: align == 65 ? 0 : (align * 0.4 * 32/33)
1880 * 50G w/FEC: align * 0.8 * 32/33
1881 *
1882 * For RS-FEC, if align is < 17 then we must also add 1.6 * 32/33.
1883 *
1884 * To allow for calculating this value using integer arithmetic, we
1885 * instead start with the number of TUs per second, (inverse of the
1886 * length of a Time Unit in nanoseconds), multiply by a value based
1887 * on the PMD alignment register, and then divide by the right value
1888 * calculated based on the table above. To avoid integer overflow this
1889 * division is broken up into a step of dividing by 125 first.
1890 */
1891 if (link_spd == ICE_PTP_LNK_SPD_1G) {
1892 if (pmd_align == 4)
1893 mult = 10;
1894 else
1895 mult = (pmd_align + 6) % 10;
1896 } else if (link_spd == ICE_PTP_LNK_SPD_10G ||
1897 link_spd == ICE_PTP_LNK_SPD_25G ||
1898 link_spd == ICE_PTP_LNK_SPD_40G ||
1899 link_spd == ICE_PTP_LNK_SPD_50G) {
1900 /* If Clause 74 FEC, always calculate PMD adjust */
1901 if (pmd_align != 65 || fec_mode == ICE_PTP_FEC_MODE_CLAUSE74)
1902 mult = pmd_align;
1903 else
1904 mult = 0;
1905 } else if (link_spd == ICE_PTP_LNK_SPD_25G_RS ||
1906 link_spd == ICE_PTP_LNK_SPD_50G_RS ||
1907 link_spd == ICE_PTP_LNK_SPD_100G_RS) {
1908 if (pmd_align < 17)
1909 mult = pmd_align + 40;
1910 else
1911 mult = pmd_align;
1912 } else {
1913 ice_debug(hw, ICE_DBG_PTP, "Unknown link speed %d, skipping PMD adjustment\n",
1914 link_spd);
1915 mult = 0;
1916 }
1917
1918 /* In some cases, there's no need to adjust for the PMD alignment */
1919 if (!mult) {
1920 *pmd_adj = 0;
1921 return 0;
1922 }
1923
1924 /* Calculate the adjustment by multiplying TUs per second by the
1925 * appropriate multiplier and divisor. To avoid overflow, we first
1926 * divide by 125, and then handle remaining divisor based on the link
1927 * speed pmd_adj_divisor value.
1928 */
1929 adj = div_u64(tu_per_sec, 125);
1930 adj *= mult;
1931 adj = div_u64(adj, e822_vernier[link_spd].pmd_adj_divisor);
1932
1933 /* Finally, for 25G-RS and 50G-RS, a further adjustment for the Rx
1934 * cycle count is necessary.
1935 */
1936 if (link_spd == ICE_PTP_LNK_SPD_25G_RS) {
1937 u64 cycle_adj;
1938 u8 rx_cycle;
1939
1940 err = ice_read_phy_reg_e822(hw, port, P_REG_RX_40_TO_160_CNT,
1941 &val);
1942 if (err) {
1943 ice_debug(hw, ICE_DBG_PTP, "Failed to read 25G-RS Rx cycle count, err %d\n",
1944 err);
1945 return err;
1946 }
1947
1948 rx_cycle = val & P_REG_RX_40_TO_160_CNT_RXCYC_M;
1949 if (rx_cycle) {
1950 mult = (4 - rx_cycle) * 40;
1951
1952 cycle_adj = div_u64(tu_per_sec, 125);
1953 cycle_adj *= mult;
1954 cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor);
1955
1956 adj += cycle_adj;
1957 }
1958 } else if (link_spd == ICE_PTP_LNK_SPD_50G_RS) {
1959 u64 cycle_adj;
1960 u8 rx_cycle;
1961
1962 err = ice_read_phy_reg_e822(hw, port, P_REG_RX_80_TO_160_CNT,
1963 &val);
1964 if (err) {
1965 ice_debug(hw, ICE_DBG_PTP, "Failed to read 50G-RS Rx cycle count, err %d\n",
1966 err);
1967 return err;
1968 }
1969
1970 rx_cycle = val & P_REG_RX_80_TO_160_CNT_RXCYC_M;
1971 if (rx_cycle) {
1972 mult = rx_cycle * 40;
1973
1974 cycle_adj = div_u64(tu_per_sec, 125);
1975 cycle_adj *= mult;
1976 cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor);
1977
1978 adj += cycle_adj;
1979 }
1980 }
1981
1982 /* Return the calculated adjustment */
1983 *pmd_adj = adj;
1984
1985 return 0;
1986 }
1987
1988 /**
1989 * ice_calc_fixed_rx_offset_e822 - Calculated the fixed Rx offset for a port
1990 * @hw: pointer to HW struct
1991 * @link_spd: The Link speed to calculate for
1992 *
1993 * Determine the fixed Rx latency for a given link speed.
1994 */
1995 static u64
ice_calc_fixed_rx_offset_e822(struct ice_hw * hw,enum ice_ptp_link_spd link_spd)1996 ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
1997 {
1998 u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
1999
2000 cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
2001 clk_incval = ice_ptp_read_src_incval(hw);
2002
2003 /* Calculate TUs per second */
2004 tu_per_sec = cur_freq * clk_incval;
2005
2006 /* Calculate number of TUs to add for the fixed Rx latency. Since the
2007 * latency measurement is in 1/100th of a nanosecond, we need to
2008 * multiply by tu_per_sec and then divide by 1e11. This calculation
2009 * overflows 64 bit integer arithmetic, so break it up into two
2010 * divisions by 1e4 first then by 1e7.
2011 */
2012 fixed_offset = div_u64(tu_per_sec, 10000);
2013 fixed_offset *= e822_vernier[link_spd].rx_fixed_delay;
2014 fixed_offset = div_u64(fixed_offset, 10000000);
2015
2016 return fixed_offset;
2017 }
2018
2019 /**
2020 * ice_phy_cfg_rx_offset_e822 - Configure total Rx timestamp offset
2021 * @hw: pointer to the HW struct
2022 * @port: the PHY port to configure
2023 *
2024 * Program the P_REG_TOTAL_RX_OFFSET register with the number of Time Units to
2025 * adjust Rx timestamps by. This combines calculations from the Vernier offset
2026 * measurements taken in hardware with some data about known fixed delay as
2027 * well as adjusting for multi-lane alignment delay.
2028 *
2029 * This function must be called only after the offset registers are valid,
2030 * i.e. after the Vernier calibration wait has passed, to ensure that the PHY
2031 * has measured the offset.
2032 *
2033 * To avoid overflow, when calculating the offset based on the known static
2034 * latency values, we use measurements in 1/100th of a nanosecond, and divide
2035 * the TUs per second up front. This avoids overflow while allowing
2036 * calculation of the adjustment using integer arithmetic.
2037 */
ice_phy_cfg_rx_offset_e822(struct ice_hw * hw,u8 port)2038 static int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
2039 {
2040 enum ice_ptp_link_spd link_spd;
2041 enum ice_ptp_fec_mode fec_mode;
2042 u64 total_offset, pmd, val;
2043 int err;
2044
2045 err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
2046 if (err)
2047 return err;
2048
2049 total_offset = ice_calc_fixed_rx_offset_e822(hw, link_spd);
2050
2051 /* Read the first Vernier offset from the PHY register and add it to
2052 * the total offset.
2053 */
2054 err = ice_read_64b_phy_reg_e822(hw, port,
2055 P_REG_PAR_PCS_RX_OFFSET_L,
2056 &val);
2057 if (err)
2058 return err;
2059
2060 total_offset += val;
2061
2062 /* For Rx, all multi-lane link speeds include a second Vernier
2063 * calibration, because the lanes might not be aligned.
2064 */
2065 if (link_spd == ICE_PTP_LNK_SPD_40G ||
2066 link_spd == ICE_PTP_LNK_SPD_50G ||
2067 link_spd == ICE_PTP_LNK_SPD_50G_RS ||
2068 link_spd == ICE_PTP_LNK_SPD_100G_RS) {
2069 err = ice_read_64b_phy_reg_e822(hw, port,
2070 P_REG_PAR_RX_TIME_L,
2071 &val);
2072 if (err)
2073 return err;
2074
2075 total_offset += val;
2076 }
2077
2078 /* In addition, Rx must account for the PMD alignment */
2079 err = ice_phy_calc_pmd_adj_e822(hw, port, link_spd, fec_mode, &pmd);
2080 if (err)
2081 return err;
2082
2083 /* For RS-FEC, this adjustment adds delay, but for other modes, it
2084 * subtracts delay.
2085 */
2086 if (fec_mode == ICE_PTP_FEC_MODE_RS_FEC)
2087 total_offset += pmd;
2088 else
2089 total_offset -= pmd;
2090
2091 /* Now that the total offset has been calculated, program it to the
2092 * PHY and indicate that the Rx offset is ready. After this,
2093 * timestamps will be enabled.
2094 */
2095 err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_RX_OFFSET_L,
2096 total_offset);
2097 if (err)
2098 return err;
2099
2100 err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 1);
2101 if (err)
2102 return err;
2103
2104 return 0;
2105 }
2106
2107 /**
2108 * ice_phy_cfg_fixed_rx_offset_e822 - Configure fixed Rx offset for bypass mode
2109 * @hw: pointer to the HW struct
2110 * @port: the PHY port to configure
2111 *
2112 * Calculate and program the fixed Rx offset, and indicate that the offset is
2113 * ready. This can be used when operating in bypass mode.
2114 */
2115 static int
ice_phy_cfg_fixed_rx_offset_e822(struct ice_hw * hw,u8 port)2116 ice_phy_cfg_fixed_rx_offset_e822(struct ice_hw *hw, u8 port)
2117 {
2118 enum ice_ptp_link_spd link_spd;
2119 enum ice_ptp_fec_mode fec_mode;
2120 u64 total_offset;
2121 int err;
2122
2123 err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
2124 if (err)
2125 return err;
2126
2127 total_offset = ice_calc_fixed_rx_offset_e822(hw, link_spd);
2128
2129 /* Program the fixed Rx offset into the P_REG_TOTAL_RX_OFFSET_L
2130 * register, then indicate that the Rx offset is ready. After this,
2131 * timestamps will be enabled.
2132 *
2133 * Note that this skips including the more precise offsets generated
2134 * by Vernier calibration.
2135 */
2136 err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_RX_OFFSET_L,
2137 total_offset);
2138 if (err)
2139 return err;
2140
2141 err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 1);
2142 if (err)
2143 return err;
2144
2145 return 0;
2146 }
2147
2148 /**
2149 * ice_read_phy_and_phc_time_e822 - Simultaneously capture PHC and PHY time
2150 * @hw: pointer to the HW struct
2151 * @port: the PHY port to read
2152 * @phy_time: on return, the 64bit PHY timer value
2153 * @phc_time: on return, the lower 64bits of PHC time
2154 *
2155 * Issue a READ_TIME timer command to simultaneously capture the PHY and PHC
2156 * timer values.
2157 */
2158 static int
ice_read_phy_and_phc_time_e822(struct ice_hw * hw,u8 port,u64 * phy_time,u64 * phc_time)2159 ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
2160 u64 *phc_time)
2161 {
2162 u64 tx_time, rx_time;
2163 u32 zo, lo;
2164 u8 tmr_idx;
2165 int err;
2166
2167 tmr_idx = ice_get_ptp_src_clock_index(hw);
2168
2169 /* Prepare the PHC timer for a READ_TIME capture command */
2170 ice_ptp_src_cmd(hw, READ_TIME);
2171
2172 /* Prepare the PHY timer for a READ_TIME capture command */
2173 err = ice_ptp_one_port_cmd(hw, port, READ_TIME);
2174 if (err)
2175 return err;
2176
2177 /* Issue the sync to start the READ_TIME capture */
2178 ice_ptp_exec_tmr_cmd(hw);
2179
2180 /* Read the captured PHC time from the shadow time registers */
2181 zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx));
2182 lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx));
2183 *phc_time = (u64)lo << 32 | zo;
2184
2185 /* Read the captured PHY time from the PHY shadow registers */
2186 err = ice_ptp_read_port_capture(hw, port, &tx_time, &rx_time);
2187 if (err)
2188 return err;
2189
2190 /* If the PHY Tx and Rx timers don't match, log a warning message.
2191 * Note that this should not happen in normal circumstances since the
2192 * driver always programs them together.
2193 */
2194 if (tx_time != rx_time)
2195 dev_warn(ice_hw_to_dev(hw),
2196 "PHY port %u Tx and Rx timers do not match, tx_time 0x%016llX, rx_time 0x%016llX\n",
2197 port, (unsigned long long)tx_time,
2198 (unsigned long long)rx_time);
2199
2200 *phy_time = tx_time;
2201
2202 return 0;
2203 }
2204
2205 /**
2206 * ice_sync_phy_timer_e822 - Synchronize the PHY timer with PHC timer
2207 * @hw: pointer to the HW struct
2208 * @port: the PHY port to synchronize
2209 *
2210 * Perform an adjustment to ensure that the PHY and PHC timers are in sync.
2211 * This is done by issuing a READ_TIME command which triggers a simultaneous
2212 * read of the PHY timer and PHC timer. Then we use the difference to
2213 * calculate an appropriate 2s complement addition to add to the PHY timer in
2214 * order to ensure it reads the same value as the primary PHC timer.
2215 */
ice_sync_phy_timer_e822(struct ice_hw * hw,u8 port)2216 static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
2217 {
2218 u64 phc_time, phy_time, difference;
2219 int err;
2220
2221 if (!ice_ptp_lock(hw)) {
2222 ice_debug(hw, ICE_DBG_PTP, "Failed to acquire PTP semaphore\n");
2223 return -EBUSY;
2224 }
2225
2226 err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time);
2227 if (err)
2228 goto err_unlock;
2229
2230 /* Calculate the amount required to add to the port time in order for
2231 * it to match the PHC time.
2232 *
2233 * Note that the port adjustment is done using 2s complement
2234 * arithmetic. This is convenient since it means that we can simply
2235 * calculate the difference between the PHC time and the port time,
2236 * and it will be interpreted correctly.
2237 */
2238 difference = phc_time - phy_time;
2239
2240 err = ice_ptp_prep_port_adj_e822(hw, port, (s64)difference);
2241 if (err)
2242 goto err_unlock;
2243
2244 err = ice_ptp_one_port_cmd(hw, port, ADJ_TIME);
2245 if (err)
2246 goto err_unlock;
2247
2248 /* Issue the sync to activate the time adjustment */
2249 ice_ptp_exec_tmr_cmd(hw);
2250
2251 /* Re-capture the timer values to flush the command registers and
2252 * verify that the time was properly adjusted.
2253 */
2254 err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time);
2255 if (err)
2256 goto err_unlock;
2257
2258 dev_info(ice_hw_to_dev(hw),
2259 "Port %u PHY time synced to PHC: 0x%016llX, 0x%016llX\n",
2260 port, (unsigned long long)phy_time,
2261 (unsigned long long)phc_time);
2262
2263 ice_ptp_unlock(hw);
2264
2265 return 0;
2266
2267 err_unlock:
2268 ice_ptp_unlock(hw);
2269 return err;
2270 }
2271
2272 /**
2273 * ice_stop_phy_timer_e822 - Stop the PHY clock timer
2274 * @hw: pointer to the HW struct
2275 * @port: the PHY port to stop
2276 * @soft_reset: if true, hold the SOFT_RESET bit of P_REG_PS
2277 *
2278 * Stop the clock of a PHY port. This must be done as part of the flow to
2279 * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
2280 * initialized or when link speed changes.
2281 */
2282 int
ice_stop_phy_timer_e822(struct ice_hw * hw,u8 port,bool soft_reset)2283 ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset)
2284 {
2285 int err;
2286 u32 val;
2287
2288 err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 0);
2289 if (err)
2290 return err;
2291
2292 err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 0);
2293 if (err)
2294 return err;
2295
2296 err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
2297 if (err)
2298 return err;
2299
2300 val &= ~P_REG_PS_START_M;
2301 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2302 if (err)
2303 return err;
2304
2305 val &= ~P_REG_PS_ENA_CLK_M;
2306 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2307 if (err)
2308 return err;
2309
2310 if (soft_reset) {
2311 val |= P_REG_PS_SFT_RESET_M;
2312 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2313 if (err)
2314 return err;
2315 }
2316
2317 ice_debug(hw, ICE_DBG_PTP, "Disabled clock on PHY port %u\n", port);
2318
2319 return 0;
2320 }
2321
2322 /**
2323 * ice_start_phy_timer_e822 - Start the PHY clock timer
2324 * @hw: pointer to the HW struct
2325 * @port: the PHY port to start
2326 * @bypass: if true, start the PHY in bypass mode
2327 *
2328 * Start the clock of a PHY port. This must be done as part of the flow to
2329 * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
2330 * initialized or when link speed changes.
2331 *
2332 * Bypass mode enables timestamps immediately without waiting for Vernier
2333 * calibration to complete. Hardware will still continue taking Vernier
2334 * measurements on Tx or Rx of packets, but they will not be applied to
2335 * timestamps. Use ice_phy_exit_bypass_e822 to exit bypass mode once hardware
2336 * has completed offset calculation.
2337 */
2338 int
ice_start_phy_timer_e822(struct ice_hw * hw,u8 port,bool bypass)2339 ice_start_phy_timer_e822(struct ice_hw *hw, u8 port, bool bypass)
2340 {
2341 u32 lo, hi, val;
2342 u64 incval;
2343 u8 tmr_idx;
2344 int err;
2345
2346 tmr_idx = ice_get_ptp_src_clock_index(hw);
2347
2348 err = ice_stop_phy_timer_e822(hw, port, false);
2349 if (err)
2350 return err;
2351
2352 ice_phy_cfg_lane_e822(hw, port);
2353
2354 err = ice_phy_cfg_uix_e822(hw, port);
2355 if (err)
2356 return err;
2357
2358 err = ice_phy_cfg_parpcs_e822(hw, port);
2359 if (err)
2360 return err;
2361
2362 lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
2363 hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
2364 incval = (u64)hi << 32 | lo;
2365
2366 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L, incval);
2367 if (err)
2368 return err;
2369
2370 err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL);
2371 if (err)
2372 return err;
2373
2374 ice_ptp_exec_tmr_cmd(hw);
2375
2376 err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
2377 if (err)
2378 return err;
2379
2380 val |= P_REG_PS_SFT_RESET_M;
2381 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2382 if (err)
2383 return err;
2384
2385 val |= P_REG_PS_START_M;
2386 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2387 if (err)
2388 return err;
2389
2390 val &= ~P_REG_PS_SFT_RESET_M;
2391 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2392 if (err)
2393 return err;
2394
2395 err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL);
2396 if (err)
2397 return err;
2398
2399 ice_ptp_exec_tmr_cmd(hw);
2400
2401 val |= P_REG_PS_ENA_CLK_M;
2402 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2403 if (err)
2404 return err;
2405
2406 val |= P_REG_PS_LOAD_OFFSET_M;
2407 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2408 if (err)
2409 return err;
2410
2411 ice_ptp_exec_tmr_cmd(hw);
2412
2413 err = ice_sync_phy_timer_e822(hw, port);
2414 if (err)
2415 return err;
2416
2417 if (bypass) {
2418 val |= P_REG_PS_BYPASS_MODE_M;
2419 /* Enter BYPASS mode, enabling timestamps immediately. */
2420 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2421 if (err)
2422 return err;
2423
2424 /* Program the fixed Tx offset */
2425 err = ice_phy_cfg_fixed_tx_offset_e822(hw, port);
2426 if (err)
2427 return err;
2428
2429 /* Program the fixed Rx offset */
2430 err = ice_phy_cfg_fixed_rx_offset_e822(hw, port);
2431 if (err)
2432 return err;
2433 }
2434
2435 ice_debug(hw, ICE_DBG_PTP, "Enabled clock on PHY port %u\n", port);
2436
2437 return 0;
2438 }
2439
2440 /**
2441 * ice_phy_exit_bypass_e822 - Exit bypass mode, after vernier calculations
2442 * @hw: pointer to the HW struct
2443 * @port: the PHY port to configure
2444 *
2445 * After hardware finishes vernier calculations for the Tx and Rx offset, this
2446 * function can be used to exit bypass mode by updating the total Tx and Rx
2447 * offsets, and then disabling bypass. This will enable hardware to include
2448 * the more precise offset calibrations, increasing precision of the generated
2449 * timestamps.
2450 *
2451 * This cannot be done until hardware has measured the offsets, which requires
2452 * waiting until at least one packet has been sent and received by the device.
2453 */
ice_phy_exit_bypass_e822(struct ice_hw * hw,u8 port)2454 int ice_phy_exit_bypass_e822(struct ice_hw *hw, u8 port)
2455 {
2456 int err;
2457 u32 val;
2458
2459 err = ice_read_phy_reg_e822(hw, port, P_REG_TX_OV_STATUS, &val);
2460 if (err) {
2461 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OV_STATUS for port %u, err %d\n",
2462 port, err);
2463 return err;
2464 }
2465
2466 if (!(val & P_REG_TX_OV_STATUS_OV_M)) {
2467 ice_debug(hw, ICE_DBG_PTP, "Tx offset is not yet valid for port %u\n",
2468 port);
2469 return -EBUSY;
2470 }
2471
2472 err = ice_read_phy_reg_e822(hw, port, P_REG_RX_OV_STATUS, &val);
2473 if (err) {
2474 ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OV_STATUS for port %u, err %d\n",
2475 port, err);
2476 return err;
2477 }
2478
2479 if (!(val & P_REG_TX_OV_STATUS_OV_M)) {
2480 ice_debug(hw, ICE_DBG_PTP, "Rx offset is not yet valid for port %u\n",
2481 port);
2482 return -EBUSY;
2483 }
2484
2485 err = ice_phy_cfg_tx_offset_e822(hw, port);
2486 if (err) {
2487 ice_debug(hw, ICE_DBG_PTP, "Failed to program total Tx offset for port %u, err %d\n",
2488 port, err);
2489 return err;
2490 }
2491
2492 err = ice_phy_cfg_rx_offset_e822(hw, port);
2493 if (err) {
2494 ice_debug(hw, ICE_DBG_PTP, "Failed to program total Rx offset for port %u, err %d\n",
2495 port, err);
2496 return err;
2497 }
2498
2499 /* Exit bypass mode now that the offset has been updated */
2500 err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
2501 if (err) {
2502 ice_debug(hw, ICE_DBG_PTP, "Failed to read P_REG_PS for port %u, err %d\n",
2503 port, err);
2504 return err;
2505 }
2506
2507 if (!(val & P_REG_PS_BYPASS_MODE_M))
2508 ice_debug(hw, ICE_DBG_PTP, "Port %u not in bypass mode\n",
2509 port);
2510
2511 val &= ~P_REG_PS_BYPASS_MODE_M;
2512 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2513 if (err) {
2514 ice_debug(hw, ICE_DBG_PTP, "Failed to disable bypass for port %u, err %d\n",
2515 port, err);
2516 return err;
2517 }
2518
2519 dev_info(ice_hw_to_dev(hw), "Exiting bypass mode on PHY port %u\n",
2520 port);
2521
2522 return 0;
2523 }
2524
2525 /* E810 functions
2526 *
2527 * The following functions operate on the E810 series devices which use
2528 * a separate external PHY.
2529 */
2530
2531 /**
2532 * ice_read_phy_reg_e810 - Read register from external PHY on E810
2533 * @hw: pointer to the HW struct
2534 * @addr: the address to read from
2535 * @val: On return, the value read from the PHY
2536 *
2537 * Read a register from the external PHY on the E810 device.
2538 */
ice_read_phy_reg_e810(struct ice_hw * hw,u32 addr,u32 * val)2539 static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val)
2540 {
2541 struct ice_sbq_msg_input msg = {0};
2542 int err;
2543
2544 msg.msg_addr_low = lower_16_bits(addr);
2545 msg.msg_addr_high = upper_16_bits(addr);
2546 msg.opcode = ice_sbq_msg_rd;
2547 msg.dest_dev = rmn_0;
2548
2549 err = ice_sbq_rw_reg(hw, &msg);
2550 if (err) {
2551 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
2552 err);
2553 return err;
2554 }
2555
2556 *val = msg.data;
2557
2558 return 0;
2559 }
2560
2561 /**
2562 * ice_write_phy_reg_e810 - Write register on external PHY on E810
2563 * @hw: pointer to the HW struct
2564 * @addr: the address to writem to
2565 * @val: the value to write to the PHY
2566 *
2567 * Write a value to a register of the external PHY on the E810 device.
2568 */
ice_write_phy_reg_e810(struct ice_hw * hw,u32 addr,u32 val)2569 static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
2570 {
2571 struct ice_sbq_msg_input msg = {0};
2572 int err;
2573
2574 msg.msg_addr_low = lower_16_bits(addr);
2575 msg.msg_addr_high = upper_16_bits(addr);
2576 msg.opcode = ice_sbq_msg_wr;
2577 msg.dest_dev = rmn_0;
2578 msg.data = val;
2579
2580 err = ice_sbq_rw_reg(hw, &msg);
2581 if (err) {
2582 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
2583 err);
2584 return err;
2585 }
2586
2587 return 0;
2588 }
2589
2590 /**
2591 * ice_read_phy_tstamp_ll_e810 - Read a PHY timestamp registers through the FW
2592 * @hw: pointer to the HW struct
2593 * @idx: the timestamp index to read
2594 * @hi: 8 bit timestamp high value
2595 * @lo: 32 bit timestamp low value
2596 *
2597 * Read a 8bit timestamp high value and 32 bit timestamp low value out of the
2598 * timestamp block of the external PHY on the E810 device using the low latency
2599 * timestamp read.
2600 */
2601 static int
ice_read_phy_tstamp_ll_e810(struct ice_hw * hw,u8 idx,u8 * hi,u32 * lo)2602 ice_read_phy_tstamp_ll_e810(struct ice_hw *hw, u8 idx, u8 *hi, u32 *lo)
2603 {
2604 u32 val;
2605 u8 i;
2606
2607 /* Write TS index to read to the PF register so the FW can read it */
2608 val = FIELD_PREP(TS_LL_READ_TS_IDX, idx) | TS_LL_READ_TS;
2609 wr32(hw, PF_SB_ATQBAL, val);
2610
2611 /* Read the register repeatedly until the FW provides us the TS */
2612 for (i = TS_LL_READ_RETRIES; i > 0; i--) {
2613 val = rd32(hw, PF_SB_ATQBAL);
2614
2615 /* When the bit is cleared, the TS is ready in the register */
2616 if (!(FIELD_GET(TS_LL_READ_TS, val))) {
2617 /* High 8 bit value of the TS is on the bits 16:23 */
2618 *hi = FIELD_GET(TS_LL_READ_TS_HIGH, val);
2619
2620 /* Read the low 32 bit value and set the TS valid bit */
2621 *lo = rd32(hw, PF_SB_ATQBAH) | TS_VALID;
2622 return 0;
2623 }
2624
2625 udelay(10);
2626 }
2627
2628 /* FW failed to provide the TS in time */
2629 ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n");
2630 return -EINVAL;
2631 }
2632
2633 /**
2634 * ice_read_phy_tstamp_sbq_e810 - Read a PHY timestamp registers through the sbq
2635 * @hw: pointer to the HW struct
2636 * @lport: the lport to read from
2637 * @idx: the timestamp index to read
2638 * @hi: 8 bit timestamp high value
2639 * @lo: 32 bit timestamp low value
2640 *
2641 * Read a 8bit timestamp high value and 32 bit timestamp low value out of the
2642 * timestamp block of the external PHY on the E810 device using sideband queue.
2643 */
2644 static int
ice_read_phy_tstamp_sbq_e810(struct ice_hw * hw,u8 lport,u8 idx,u8 * hi,u32 * lo)2645 ice_read_phy_tstamp_sbq_e810(struct ice_hw *hw, u8 lport, u8 idx, u8 *hi,
2646 u32 *lo)
2647 {
2648 u32 hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
2649 u32 lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
2650 u32 lo_val, hi_val;
2651 int err;
2652
2653 err = ice_read_phy_reg_e810(hw, lo_addr, &lo_val);
2654 if (err) {
2655 ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
2656 err);
2657 return err;
2658 }
2659
2660 err = ice_read_phy_reg_e810(hw, hi_addr, &hi_val);
2661 if (err) {
2662 ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
2663 err);
2664 return err;
2665 }
2666
2667 *lo = lo_val;
2668 *hi = (u8)hi_val;
2669
2670 return 0;
2671 }
2672
2673 /**
2674 * ice_read_phy_tstamp_e810 - Read a PHY timestamp out of the external PHY
2675 * @hw: pointer to the HW struct
2676 * @lport: the lport to read from
2677 * @idx: the timestamp index to read
2678 * @tstamp: on return, the 40bit timestamp value
2679 *
2680 * Read a 40bit timestamp value out of the timestamp block of the external PHY
2681 * on the E810 device.
2682 */
2683 static int
ice_read_phy_tstamp_e810(struct ice_hw * hw,u8 lport,u8 idx,u64 * tstamp)2684 ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
2685 {
2686 u32 lo = 0;
2687 u8 hi = 0;
2688 int err;
2689
2690 if (hw->dev_caps.ts_dev_info.ts_ll_read)
2691 err = ice_read_phy_tstamp_ll_e810(hw, idx, &hi, &lo);
2692 else
2693 err = ice_read_phy_tstamp_sbq_e810(hw, lport, idx, &hi, &lo);
2694
2695 if (err)
2696 return err;
2697
2698 /* For E810 devices, the timestamp is reported with the lower 32 bits
2699 * in the low register, and the upper 8 bits in the high register.
2700 */
2701 *tstamp = ((u64)hi) << TS_HIGH_S | ((u64)lo & TS_LOW_M);
2702
2703 return 0;
2704 }
2705
2706 /**
2707 * ice_clear_phy_tstamp_e810 - Clear a timestamp from the external PHY
2708 * @hw: pointer to the HW struct
2709 * @lport: the lport to read from
2710 * @idx: the timestamp index to reset
2711 *
2712 * Clear a timestamp, resetting its valid bit, from the timestamp block of the
2713 * external PHY on the E810 device.
2714 */
ice_clear_phy_tstamp_e810(struct ice_hw * hw,u8 lport,u8 idx)2715 static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx)
2716 {
2717 u32 lo_addr, hi_addr;
2718 int err;
2719
2720 lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
2721 hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
2722
2723 err = ice_write_phy_reg_e810(hw, lo_addr, 0);
2724 if (err) {
2725 ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n",
2726 err);
2727 return err;
2728 }
2729
2730 err = ice_write_phy_reg_e810(hw, hi_addr, 0);
2731 if (err) {
2732 ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n",
2733 err);
2734 return err;
2735 }
2736
2737 return 0;
2738 }
2739
2740 /**
2741 * ice_ptp_init_phy_e810 - Enable PTP function on the external PHY
2742 * @hw: pointer to HW struct
2743 *
2744 * Enable the timesync PTP functionality for the external PHY connected to
2745 * this function.
2746 */
ice_ptp_init_phy_e810(struct ice_hw * hw)2747 int ice_ptp_init_phy_e810(struct ice_hw *hw)
2748 {
2749 u8 tmr_idx;
2750 int err;
2751
2752 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2753 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx),
2754 GLTSYN_ENA_TSYN_ENA_M);
2755 if (err)
2756 ice_debug(hw, ICE_DBG_PTP, "PTP failed in ena_phy_time_syn %d\n",
2757 err);
2758
2759 return err;
2760 }
2761
2762 /**
2763 * ice_ptp_init_phc_e810 - Perform E810 specific PHC initialization
2764 * @hw: pointer to HW struct
2765 *
2766 * Perform E810-specific PTP hardware clock initialization steps.
2767 */
ice_ptp_init_phc_e810(struct ice_hw * hw)2768 static int ice_ptp_init_phc_e810(struct ice_hw *hw)
2769 {
2770 /* Ensure synchronization delay is zero */
2771 wr32(hw, GLTSYN_SYNC_DLAY, 0);
2772
2773 /* Initialize the PHY */
2774 return ice_ptp_init_phy_e810(hw);
2775 }
2776
2777 /**
2778 * ice_ptp_prep_phy_time_e810 - Prepare PHY port with initial time
2779 * @hw: Board private structure
2780 * @time: Time to initialize the PHY port clock to
2781 *
2782 * Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the
2783 * initial clock time. The time will not actually be programmed until the
2784 * driver issues an INIT_TIME command.
2785 *
2786 * The time value is the upper 32 bits of the PHY timer, usually in units of
2787 * nominal nanoseconds.
2788 */
ice_ptp_prep_phy_time_e810(struct ice_hw * hw,u32 time)2789 static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time)
2790 {
2791 u8 tmr_idx;
2792 int err;
2793
2794 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2795 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_0(tmr_idx), 0);
2796 if (err) {
2797 ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_0, err %d\n",
2798 err);
2799 return err;
2800 }
2801
2802 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_L(tmr_idx), time);
2803 if (err) {
2804 ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_L, err %d\n",
2805 err);
2806 return err;
2807 }
2808
2809 return 0;
2810 }
2811
2812 /**
2813 * ice_ptp_prep_phy_adj_e810 - Prep PHY port for a time adjustment
2814 * @hw: pointer to HW struct
2815 * @adj: adjustment value to program
2816 *
2817 * Prepare the PHY port for an atomic adjustment by programming the PHY
2818 * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual adjustment
2819 * is completed by issuing an ADJ_TIME sync command.
2820 *
2821 * The adjustment value only contains the portion used for the upper 32bits of
2822 * the PHY timer, usually in units of nominal nanoseconds. Negative
2823 * adjustments are supported using 2s complement arithmetic.
2824 */
ice_ptp_prep_phy_adj_e810(struct ice_hw * hw,s32 adj)2825 static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
2826 {
2827 u8 tmr_idx;
2828 int err;
2829
2830 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2831
2832 /* Adjustments are represented as signed 2's complement values in
2833 * nanoseconds. Sub-nanosecond adjustment is not supported.
2834 */
2835 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), 0);
2836 if (err) {
2837 ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_L, err %d\n",
2838 err);
2839 return err;
2840 }
2841
2842 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), adj);
2843 if (err) {
2844 ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_H, err %d\n",
2845 err);
2846 return err;
2847 }
2848
2849 return 0;
2850 }
2851
2852 /**
2853 * ice_ptp_prep_phy_incval_e810 - Prep PHY port increment value change
2854 * @hw: pointer to HW struct
2855 * @incval: The new 40bit increment value to prepare
2856 *
2857 * Prepare the PHY port for a new increment value by programming the PHY
2858 * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual change is
2859 * completed by issuing an INIT_INCVAL command.
2860 */
ice_ptp_prep_phy_incval_e810(struct ice_hw * hw,u64 incval)2861 static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
2862 {
2863 u32 high, low;
2864 u8 tmr_idx;
2865 int err;
2866
2867 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2868 low = lower_32_bits(incval);
2869 high = upper_32_bits(incval);
2870
2871 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), low);
2872 if (err) {
2873 ice_debug(hw, ICE_DBG_PTP, "Failed to write incval to PHY SHADJ_L, err %d\n",
2874 err);
2875 return err;
2876 }
2877
2878 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), high);
2879 if (err) {
2880 ice_debug(hw, ICE_DBG_PTP, "Failed to write incval PHY SHADJ_H, err %d\n",
2881 err);
2882 return err;
2883 }
2884
2885 return 0;
2886 }
2887
2888 /**
2889 * ice_ptp_port_cmd_e810 - Prepare all external PHYs for a timer command
2890 * @hw: pointer to HW struct
2891 * @cmd: Command to be sent to the port
2892 *
2893 * Prepare the external PHYs connected to this device for a timer sync
2894 * command.
2895 */
ice_ptp_port_cmd_e810(struct ice_hw * hw,enum ice_ptp_tmr_cmd cmd)2896 static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
2897 {
2898 u32 cmd_val, val;
2899 int err;
2900
2901 switch (cmd) {
2902 case INIT_TIME:
2903 cmd_val = GLTSYN_CMD_INIT_TIME;
2904 break;
2905 case INIT_INCVAL:
2906 cmd_val = GLTSYN_CMD_INIT_INCVAL;
2907 break;
2908 case ADJ_TIME:
2909 cmd_val = GLTSYN_CMD_ADJ_TIME;
2910 break;
2911 case READ_TIME:
2912 cmd_val = GLTSYN_CMD_READ_TIME;
2913 break;
2914 case ADJ_TIME_AT_TIME:
2915 cmd_val = GLTSYN_CMD_ADJ_INIT_TIME;
2916 break;
2917 }
2918
2919 /* Read, modify, write */
2920 err = ice_read_phy_reg_e810(hw, ETH_GLTSYN_CMD, &val);
2921 if (err) {
2922 ice_debug(hw, ICE_DBG_PTP, "Failed to read GLTSYN_CMD, err %d\n", err);
2923 return err;
2924 }
2925
2926 /* Modify necessary bits only and perform write */
2927 val &= ~TS_CMD_MASK_E810;
2928 val |= cmd_val;
2929
2930 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_CMD, val);
2931 if (err) {
2932 ice_debug(hw, ICE_DBG_PTP, "Failed to write back GLTSYN_CMD, err %d\n", err);
2933 return err;
2934 }
2935
2936 return 0;
2937 }
2938
2939 /* Device agnostic functions
2940 *
2941 * The following functions implement shared behavior common to both E822 and
2942 * E810 devices, possibly calling a device specific implementation where
2943 * necessary.
2944 */
2945
2946 /**
2947 * ice_ptp_lock - Acquire PTP global semaphore register lock
2948 * @hw: pointer to the HW struct
2949 *
2950 * Acquire the global PTP hardware semaphore lock. Returns true if the lock
2951 * was acquired, false otherwise.
2952 *
2953 * The PFTSYN_SEM register sets the busy bit on read, returning the previous
2954 * value. If software sees the busy bit cleared, this means that this function
2955 * acquired the lock (and the busy bit is now set). If software sees the busy
2956 * bit set, it means that another function acquired the lock.
2957 *
2958 * Software must clear the busy bit with a write to release the lock for other
2959 * functions when done.
2960 */
ice_ptp_lock(struct ice_hw * hw)2961 bool ice_ptp_lock(struct ice_hw *hw)
2962 {
2963 u32 hw_lock;
2964 int i;
2965
2966 #define MAX_TRIES 5
2967
2968 for (i = 0; i < MAX_TRIES; i++) {
2969 hw_lock = rd32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
2970 hw_lock = hw_lock & PFTSYN_SEM_BUSY_M;
2971 if (!hw_lock)
2972 break;
2973
2974 /* Somebody is holding the lock */
2975 usleep_range(10000, 20000);
2976 }
2977
2978 return !hw_lock;
2979 }
2980
2981 /**
2982 * ice_ptp_unlock - Release PTP global semaphore register lock
2983 * @hw: pointer to the HW struct
2984 *
2985 * Release the global PTP hardware semaphore lock. This is done by writing to
2986 * the PFTSYN_SEM register.
2987 */
ice_ptp_unlock(struct ice_hw * hw)2988 void ice_ptp_unlock(struct ice_hw *hw)
2989 {
2990 wr32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), 0);
2991 }
2992
2993 /**
2994 * ice_ptp_tmr_cmd - Prepare and trigger a timer sync command
2995 * @hw: pointer to HW struct
2996 * @cmd: the command to issue
2997 *
2998 * Prepare the source timer and PHY timers and then trigger the requested
2999 * command. This causes the shadow registers previously written in preparation
3000 * for the command to be synchronously applied to both the source and PHY
3001 * timers.
3002 */
ice_ptp_tmr_cmd(struct ice_hw * hw,enum ice_ptp_tmr_cmd cmd)3003 static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
3004 {
3005 int err;
3006
3007 /* First, prepare the source timer */
3008 ice_ptp_src_cmd(hw, cmd);
3009
3010 /* Next, prepare the ports */
3011 if (ice_is_e810(hw))
3012 err = ice_ptp_port_cmd_e810(hw, cmd);
3013 else
3014 err = ice_ptp_port_cmd_e822(hw, cmd);
3015 if (err) {
3016 ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, err %d\n",
3017 cmd, err);
3018 return err;
3019 }
3020
3021 /* Write the sync command register to drive both source and PHY timer
3022 * commands synchronously
3023 */
3024 ice_ptp_exec_tmr_cmd(hw);
3025
3026 return 0;
3027 }
3028
3029 /**
3030 * ice_ptp_init_time - Initialize device time to provided value
3031 * @hw: pointer to HW struct
3032 * @time: 64bits of time (GLTSYN_TIME_L and GLTSYN_TIME_H)
3033 *
3034 * Initialize the device to the specified time provided. This requires a three
3035 * step process:
3036 *
3037 * 1) write the new init time to the source timer shadow registers
3038 * 2) write the new init time to the PHY timer shadow registers
3039 * 3) issue an init_time timer command to synchronously switch both the source
3040 * and port timers to the new init time value at the next clock cycle.
3041 */
ice_ptp_init_time(struct ice_hw * hw,u64 time)3042 int ice_ptp_init_time(struct ice_hw *hw, u64 time)
3043 {
3044 u8 tmr_idx;
3045 int err;
3046
3047 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3048
3049 /* Source timers */
3050 wr32(hw, GLTSYN_SHTIME_L(tmr_idx), lower_32_bits(time));
3051 wr32(hw, GLTSYN_SHTIME_H(tmr_idx), upper_32_bits(time));
3052 wr32(hw, GLTSYN_SHTIME_0(tmr_idx), 0);
3053
3054 /* PHY timers */
3055 /* Fill Rx and Tx ports and send msg to PHY */
3056 if (ice_is_e810(hw))
3057 err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
3058 else
3059 err = ice_ptp_prep_phy_time_e822(hw, time & 0xFFFFFFFF);
3060 if (err)
3061 return err;
3062
3063 return ice_ptp_tmr_cmd(hw, INIT_TIME);
3064 }
3065
3066 /**
3067 * ice_ptp_write_incval - Program PHC with new increment value
3068 * @hw: pointer to HW struct
3069 * @incval: Source timer increment value per clock cycle
3070 *
3071 * Program the PHC with a new increment value. This requires a three-step
3072 * process:
3073 *
3074 * 1) Write the increment value to the source timer shadow registers
3075 * 2) Write the increment value to the PHY timer shadow registers
3076 * 3) Issue an INIT_INCVAL timer command to synchronously switch both the
3077 * source and port timers to the new increment value at the next clock
3078 * cycle.
3079 */
ice_ptp_write_incval(struct ice_hw * hw,u64 incval)3080 int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
3081 {
3082 u8 tmr_idx;
3083 int err;
3084
3085 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3086
3087 /* Shadow Adjust */
3088 wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval));
3089 wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval));
3090
3091 if (ice_is_e810(hw))
3092 err = ice_ptp_prep_phy_incval_e810(hw, incval);
3093 else
3094 err = ice_ptp_prep_phy_incval_e822(hw, incval);
3095 if (err)
3096 return err;
3097
3098 return ice_ptp_tmr_cmd(hw, INIT_INCVAL);
3099 }
3100
3101 /**
3102 * ice_ptp_write_incval_locked - Program new incval while holding semaphore
3103 * @hw: pointer to HW struct
3104 * @incval: Source timer increment value per clock cycle
3105 *
3106 * Program a new PHC incval while holding the PTP semaphore.
3107 */
ice_ptp_write_incval_locked(struct ice_hw * hw,u64 incval)3108 int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval)
3109 {
3110 int err;
3111
3112 if (!ice_ptp_lock(hw))
3113 return -EBUSY;
3114
3115 err = ice_ptp_write_incval(hw, incval);
3116
3117 ice_ptp_unlock(hw);
3118
3119 return err;
3120 }
3121
3122 /**
3123 * ice_ptp_adj_clock - Adjust PHC clock time atomically
3124 * @hw: pointer to HW struct
3125 * @adj: Adjustment in nanoseconds
3126 *
3127 * Perform an atomic adjustment of the PHC time by the specified number of
3128 * nanoseconds. This requires a three-step process:
3129 *
3130 * 1) Write the adjustment to the source timer shadow registers
3131 * 2) Write the adjustment to the PHY timer shadow registers
3132 * 3) Issue an ADJ_TIME timer command to synchronously apply the adjustment to
3133 * both the source and port timers at the next clock cycle.
3134 */
ice_ptp_adj_clock(struct ice_hw * hw,s32 adj)3135 int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
3136 {
3137 u8 tmr_idx;
3138 int err;
3139
3140 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3141
3142 /* Write the desired clock adjustment into the GLTSYN_SHADJ register.
3143 * For an ADJ_TIME command, this set of registers represents the value
3144 * to add to the clock time. It supports subtraction by interpreting
3145 * the value as a 2's complement integer.
3146 */
3147 wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
3148 wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
3149
3150 if (ice_is_e810(hw))
3151 err = ice_ptp_prep_phy_adj_e810(hw, adj);
3152 else
3153 err = ice_ptp_prep_phy_adj_e822(hw, adj);
3154 if (err)
3155 return err;
3156
3157 return ice_ptp_tmr_cmd(hw, ADJ_TIME);
3158 }
3159
3160 /**
3161 * ice_read_phy_tstamp - Read a PHY timestamp from the timestamo block
3162 * @hw: pointer to the HW struct
3163 * @block: the block to read from
3164 * @idx: the timestamp index to read
3165 * @tstamp: on return, the 40bit timestamp value
3166 *
3167 * Read a 40bit timestamp value out of the timestamp block. For E822 devices,
3168 * the block is the quad to read from. For E810 devices, the block is the
3169 * logical port to read from.
3170 */
ice_read_phy_tstamp(struct ice_hw * hw,u8 block,u8 idx,u64 * tstamp)3171 int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
3172 {
3173 if (ice_is_e810(hw))
3174 return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
3175 else
3176 return ice_read_phy_tstamp_e822(hw, block, idx, tstamp);
3177 }
3178
3179 /**
3180 * ice_clear_phy_tstamp - Clear a timestamp from the timestamp block
3181 * @hw: pointer to the HW struct
3182 * @block: the block to read from
3183 * @idx: the timestamp index to reset
3184 *
3185 * Clear a timestamp, resetting its valid bit, from the timestamp block. For
3186 * E822 devices, the block is the quad to clear from. For E810 devices, the
3187 * block is the logical port to clear from.
3188 */
ice_clear_phy_tstamp(struct ice_hw * hw,u8 block,u8 idx)3189 int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
3190 {
3191 if (ice_is_e810(hw))
3192 return ice_clear_phy_tstamp_e810(hw, block, idx);
3193 else
3194 return ice_clear_phy_tstamp_e822(hw, block, idx);
3195 }
3196
3197 /* E810T SMA functions
3198 *
3199 * The following functions operate specifically on E810T hardware and are used
3200 * to access the extended GPIOs available.
3201 */
3202
3203 /**
3204 * ice_get_pca9575_handle
3205 * @hw: pointer to the hw struct
3206 * @pca9575_handle: GPIO controller's handle
3207 *
3208 * Find and return the GPIO controller's handle in the netlist.
3209 * When found - the value will be cached in the hw structure and following calls
3210 * will return cached value
3211 */
3212 static int
ice_get_pca9575_handle(struct ice_hw * hw,u16 * pca9575_handle)3213 ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
3214 {
3215 struct ice_aqc_get_link_topo *cmd;
3216 struct ice_aq_desc desc;
3217 int status;
3218 u8 idx;
3219
3220 /* If handle was read previously return cached value */
3221 if (hw->io_expander_handle) {
3222 *pca9575_handle = hw->io_expander_handle;
3223 return 0;
3224 }
3225
3226 /* If handle was not detected read it from the netlist */
3227 cmd = &desc.params.get_link_topo;
3228 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
3229
3230 /* Set node type to GPIO controller */
3231 cmd->addr.topo_params.node_type_ctx =
3232 (ICE_AQC_LINK_TOPO_NODE_TYPE_M &
3233 ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL);
3234
3235 #define SW_PCA9575_SFP_TOPO_IDX 2
3236 #define SW_PCA9575_QSFP_TOPO_IDX 1
3237
3238 /* Check if the SW IO expander controlling SMA exists in the netlist. */
3239 if (hw->device_id == ICE_DEV_ID_E810C_SFP)
3240 idx = SW_PCA9575_SFP_TOPO_IDX;
3241 else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
3242 idx = SW_PCA9575_QSFP_TOPO_IDX;
3243 else
3244 return -EOPNOTSUPP;
3245
3246 cmd->addr.topo_params.index = idx;
3247
3248 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3249 if (status)
3250 return -EOPNOTSUPP;
3251
3252 /* Verify if we found the right IO expander type */
3253 if (desc.params.get_link_topo.node_part_num !=
3254 ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
3255 return -EOPNOTSUPP;
3256
3257 /* If present save the handle and return it */
3258 hw->io_expander_handle =
3259 le16_to_cpu(desc.params.get_link_topo.addr.handle);
3260 *pca9575_handle = hw->io_expander_handle;
3261
3262 return 0;
3263 }
3264
3265 /**
3266 * ice_read_sma_ctrl_e810t
3267 * @hw: pointer to the hw struct
3268 * @data: pointer to data to be read from the GPIO controller
3269 *
3270 * Read the SMA controller state. It is connected to pins 3-7 of Port 1 of the
3271 * PCA9575 expander, so only bits 3-7 in data are valid.
3272 */
ice_read_sma_ctrl_e810t(struct ice_hw * hw,u8 * data)3273 int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data)
3274 {
3275 int status;
3276 u16 handle;
3277 u8 i;
3278
3279 status = ice_get_pca9575_handle(hw, &handle);
3280 if (status)
3281 return status;
3282
3283 *data = 0;
3284
3285 for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
3286 bool pin;
3287
3288 status = ice_aq_get_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
3289 &pin, NULL);
3290 if (status)
3291 break;
3292 *data |= (u8)(!pin) << i;
3293 }
3294
3295 return status;
3296 }
3297
3298 /**
3299 * ice_write_sma_ctrl_e810t
3300 * @hw: pointer to the hw struct
3301 * @data: data to be written to the GPIO controller
3302 *
3303 * Write the data to the SMA controller. It is connected to pins 3-7 of Port 1
3304 * of the PCA9575 expander, so only bits 3-7 in data are valid.
3305 */
ice_write_sma_ctrl_e810t(struct ice_hw * hw,u8 data)3306 int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data)
3307 {
3308 int status;
3309 u16 handle;
3310 u8 i;
3311
3312 status = ice_get_pca9575_handle(hw, &handle);
3313 if (status)
3314 return status;
3315
3316 for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
3317 bool pin;
3318
3319 pin = !(data & (1 << i));
3320 status = ice_aq_set_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
3321 pin, NULL);
3322 if (status)
3323 break;
3324 }
3325
3326 return status;
3327 }
3328
3329 /**
3330 * ice_read_pca9575_reg_e810t
3331 * @hw: pointer to the hw struct
3332 * @offset: GPIO controller register offset
3333 * @data: pointer to data to be read from the GPIO controller
3334 *
3335 * Read the register from the GPIO controller
3336 */
ice_read_pca9575_reg_e810t(struct ice_hw * hw,u8 offset,u8 * data)3337 int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data)
3338 {
3339 struct ice_aqc_link_topo_addr link_topo;
3340 __le16 addr;
3341 u16 handle;
3342 int err;
3343
3344 memset(&link_topo, 0, sizeof(link_topo));
3345
3346 err = ice_get_pca9575_handle(hw, &handle);
3347 if (err)
3348 return err;
3349
3350 link_topo.handle = cpu_to_le16(handle);
3351 link_topo.topo_params.node_type_ctx =
3352 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
3353 ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
3354
3355 addr = cpu_to_le16((u16)offset);
3356
3357 return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
3358 }
3359
3360 /**
3361 * ice_is_pca9575_present
3362 * @hw: pointer to the hw struct
3363 *
3364 * Check if the SW IO expander is present in the netlist
3365 */
ice_is_pca9575_present(struct ice_hw * hw)3366 bool ice_is_pca9575_present(struct ice_hw *hw)
3367 {
3368 u16 handle = 0;
3369 int status;
3370
3371 if (!ice_is_e810t(hw))
3372 return false;
3373
3374 status = ice_get_pca9575_handle(hw, &handle);
3375
3376 return !status && handle;
3377 }
3378
3379 /**
3380 * ice_ptp_init_phc - Initialize PTP hardware clock
3381 * @hw: pointer to the HW struct
3382 *
3383 * Perform the steps required to initialize the PTP hardware clock.
3384 */
ice_ptp_init_phc(struct ice_hw * hw)3385 int ice_ptp_init_phc(struct ice_hw *hw)
3386 {
3387 u8 src_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3388
3389 /* Enable source clocks */
3390 wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M);
3391
3392 /* Clear event err indications for auxiliary pins */
3393 (void)rd32(hw, GLTSYN_STAT(src_idx));
3394
3395 if (ice_is_e810(hw))
3396 return ice_ptp_init_phc_e810(hw);
3397 else
3398 return ice_ptp_init_phc_e822(hw);
3399 }
3400