1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4 #include <linux/pci.h>
5 #include <linux/delay.h>
6 #include <linux/iopoll.h>
7 #include <linux/sched.h>
8
9 #include "ixgbe.h"
10 #include "ixgbe_phy.h"
11
12 static void ixgbe_i2c_start(struct ixgbe_hw *hw);
13 static void ixgbe_i2c_stop(struct ixgbe_hw *hw);
14 static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data);
15 static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data);
16 static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw);
17 static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data);
18 static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
19 static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
20 static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
21 static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
22 static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl);
23 static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
24 static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
25 static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
26 static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
27
28 /**
29 * ixgbe_out_i2c_byte_ack - Send I2C byte with ack
30 * @hw: pointer to the hardware structure
31 * @byte: byte to send
32 *
33 * Returns an error code on error.
34 **/
ixgbe_out_i2c_byte_ack(struct ixgbe_hw * hw,u8 byte)35 static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte)
36 {
37 s32 status;
38
39 status = ixgbe_clock_out_i2c_byte(hw, byte);
40 if (status)
41 return status;
42 return ixgbe_get_i2c_ack(hw);
43 }
44
45 /**
46 * ixgbe_in_i2c_byte_ack - Receive an I2C byte and send ack
47 * @hw: pointer to the hardware structure
48 * @byte: pointer to a u8 to receive the byte
49 *
50 * Returns an error code on error.
51 **/
ixgbe_in_i2c_byte_ack(struct ixgbe_hw * hw,u8 * byte)52 static s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte)
53 {
54 s32 status;
55
56 status = ixgbe_clock_in_i2c_byte(hw, byte);
57 if (status)
58 return status;
59 /* ACK */
60 return ixgbe_clock_out_i2c_bit(hw, false);
61 }
62
63 /**
64 * ixgbe_ones_comp_byte_add - Perform one's complement addition
65 * @add1: addend 1
66 * @add2: addend 2
67 *
68 * Returns one's complement 8-bit sum.
69 **/
ixgbe_ones_comp_byte_add(u8 add1,u8 add2)70 static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2)
71 {
72 u16 sum = add1 + add2;
73
74 sum = (sum & 0xFF) + (sum >> 8);
75 return sum & 0xFF;
76 }
77
78 /**
79 * ixgbe_read_i2c_combined_generic_int - Perform I2C read combined operation
80 * @hw: pointer to the hardware structure
81 * @addr: I2C bus address to read from
82 * @reg: I2C device register to read from
83 * @val: pointer to location to receive read value
84 * @lock: true if to take and release semaphore
85 *
86 * Returns an error code on error.
87 */
ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw * hw,u8 addr,u16 reg,u16 * val,bool lock)88 s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
89 u16 reg, u16 *val, bool lock)
90 {
91 u32 swfw_mask = hw->phy.phy_semaphore_mask;
92 int max_retry = 3;
93 int retry = 0;
94 u8 csum_byte;
95 u8 high_bits;
96 u8 low_bits;
97 u8 reg_high;
98 u8 csum;
99
100 reg_high = ((reg >> 7) & 0xFE) | 1; /* Indicate read combined */
101 csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
102 csum = ~csum;
103 do {
104 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
105 return IXGBE_ERR_SWFW_SYNC;
106 ixgbe_i2c_start(hw);
107 /* Device Address and write indication */
108 if (ixgbe_out_i2c_byte_ack(hw, addr))
109 goto fail;
110 /* Write bits 14:8 */
111 if (ixgbe_out_i2c_byte_ack(hw, reg_high))
112 goto fail;
113 /* Write bits 7:0 */
114 if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
115 goto fail;
116 /* Write csum */
117 if (ixgbe_out_i2c_byte_ack(hw, csum))
118 goto fail;
119 /* Re-start condition */
120 ixgbe_i2c_start(hw);
121 /* Device Address and read indication */
122 if (ixgbe_out_i2c_byte_ack(hw, addr | 1))
123 goto fail;
124 /* Get upper bits */
125 if (ixgbe_in_i2c_byte_ack(hw, &high_bits))
126 goto fail;
127 /* Get low bits */
128 if (ixgbe_in_i2c_byte_ack(hw, &low_bits))
129 goto fail;
130 /* Get csum */
131 if (ixgbe_clock_in_i2c_byte(hw, &csum_byte))
132 goto fail;
133 /* NACK */
134 if (ixgbe_clock_out_i2c_bit(hw, false))
135 goto fail;
136 ixgbe_i2c_stop(hw);
137 if (lock)
138 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
139 *val = (high_bits << 8) | low_bits;
140 return 0;
141
142 fail:
143 ixgbe_i2c_bus_clear(hw);
144 if (lock)
145 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
146 retry++;
147 if (retry < max_retry)
148 hw_dbg(hw, "I2C byte read combined error - Retry.\n");
149 else
150 hw_dbg(hw, "I2C byte read combined error.\n");
151 } while (retry < max_retry);
152
153 return IXGBE_ERR_I2C;
154 }
155
156 /**
157 * ixgbe_write_i2c_combined_generic_int - Perform I2C write combined operation
158 * @hw: pointer to the hardware structure
159 * @addr: I2C bus address to write to
160 * @reg: I2C device register to write to
161 * @val: value to write
162 * @lock: true if to take and release semaphore
163 *
164 * Returns an error code on error.
165 */
ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw * hw,u8 addr,u16 reg,u16 val,bool lock)166 s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
167 u16 reg, u16 val, bool lock)
168 {
169 u32 swfw_mask = hw->phy.phy_semaphore_mask;
170 int max_retry = 1;
171 int retry = 0;
172 u8 reg_high;
173 u8 csum;
174
175 reg_high = (reg >> 7) & 0xFE; /* Indicate write combined */
176 csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
177 csum = ixgbe_ones_comp_byte_add(csum, val >> 8);
178 csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF);
179 csum = ~csum;
180 do {
181 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
182 return IXGBE_ERR_SWFW_SYNC;
183 ixgbe_i2c_start(hw);
184 /* Device Address and write indication */
185 if (ixgbe_out_i2c_byte_ack(hw, addr))
186 goto fail;
187 /* Write bits 14:8 */
188 if (ixgbe_out_i2c_byte_ack(hw, reg_high))
189 goto fail;
190 /* Write bits 7:0 */
191 if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
192 goto fail;
193 /* Write data 15:8 */
194 if (ixgbe_out_i2c_byte_ack(hw, val >> 8))
195 goto fail;
196 /* Write data 7:0 */
197 if (ixgbe_out_i2c_byte_ack(hw, val & 0xFF))
198 goto fail;
199 /* Write csum */
200 if (ixgbe_out_i2c_byte_ack(hw, csum))
201 goto fail;
202 ixgbe_i2c_stop(hw);
203 if (lock)
204 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
205 return 0;
206
207 fail:
208 ixgbe_i2c_bus_clear(hw);
209 if (lock)
210 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
211 retry++;
212 if (retry < max_retry)
213 hw_dbg(hw, "I2C byte write combined error - Retry.\n");
214 else
215 hw_dbg(hw, "I2C byte write combined error.\n");
216 } while (retry < max_retry);
217
218 return IXGBE_ERR_I2C;
219 }
220
221 /**
222 * ixgbe_probe_phy - Probe a single address for a PHY
223 * @hw: pointer to hardware structure
224 * @phy_addr: PHY address to probe
225 *
226 * Returns true if PHY found
227 **/
ixgbe_probe_phy(struct ixgbe_hw * hw,u16 phy_addr)228 static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr)
229 {
230 u16 ext_ability = 0;
231
232 hw->phy.mdio.prtad = phy_addr;
233 if (mdio45_probe(&hw->phy.mdio, phy_addr) != 0)
234 return false;
235
236 if (ixgbe_get_phy_id(hw))
237 return false;
238
239 hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id);
240
241 if (hw->phy.type == ixgbe_phy_unknown) {
242 hw->phy.ops.read_reg(hw,
243 MDIO_PMA_EXTABLE,
244 MDIO_MMD_PMAPMD,
245 &ext_ability);
246 if (ext_ability &
247 (MDIO_PMA_EXTABLE_10GBT |
248 MDIO_PMA_EXTABLE_1000BT))
249 hw->phy.type = ixgbe_phy_cu_unknown;
250 else
251 hw->phy.type = ixgbe_phy_generic;
252 }
253
254 return true;
255 }
256
257 /**
258 * ixgbe_identify_phy_generic - Get physical layer module
259 * @hw: pointer to hardware structure
260 *
261 * Determines the physical layer module found on the current adapter.
262 **/
ixgbe_identify_phy_generic(struct ixgbe_hw * hw)263 s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
264 {
265 u32 phy_addr;
266 u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
267
268 if (!hw->phy.phy_semaphore_mask) {
269 if (hw->bus.lan_id)
270 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
271 else
272 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
273 }
274
275 if (hw->phy.type != ixgbe_phy_unknown)
276 return 0;
277
278 if (hw->phy.nw_mng_if_sel) {
279 phy_addr = (hw->phy.nw_mng_if_sel &
280 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
281 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
282 if (ixgbe_probe_phy(hw, phy_addr))
283 return 0;
284 else
285 return IXGBE_ERR_PHY_ADDR_INVALID;
286 }
287
288 for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
289 if (ixgbe_probe_phy(hw, phy_addr)) {
290 status = 0;
291 break;
292 }
293 }
294
295 /* Certain media types do not have a phy so an address will not
296 * be found and the code will take this path. Caller has to
297 * decide if it is an error or not.
298 */
299 if (status)
300 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
301
302 return status;
303 }
304
305 /**
306 * ixgbe_check_reset_blocked - check status of MNG FW veto bit
307 * @hw: pointer to the hardware structure
308 *
309 * This function checks the MMNGC.MNG_VETO bit to see if there are
310 * any constraints on link from manageability. For MAC's that don't
311 * have this bit just return false since the link can not be blocked
312 * via this method.
313 **/
ixgbe_check_reset_blocked(struct ixgbe_hw * hw)314 bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw)
315 {
316 u32 mmngc;
317
318 /* If we don't have this bit, it can't be blocking */
319 if (hw->mac.type == ixgbe_mac_82598EB)
320 return false;
321
322 mmngc = IXGBE_READ_REG(hw, IXGBE_MMNGC);
323 if (mmngc & IXGBE_MMNGC_MNG_VETO) {
324 hw_dbg(hw, "MNG_VETO bit detected.\n");
325 return true;
326 }
327
328 return false;
329 }
330
331 /**
332 * ixgbe_get_phy_id - Get the phy type
333 * @hw: pointer to hardware structure
334 *
335 **/
ixgbe_get_phy_id(struct ixgbe_hw * hw)336 static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
337 {
338 s32 status;
339 u16 phy_id_high = 0;
340 u16 phy_id_low = 0;
341
342 status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD,
343 &phy_id_high);
344
345 if (!status) {
346 hw->phy.id = (u32)(phy_id_high << 16);
347 status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD,
348 &phy_id_low);
349 hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
350 hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
351 }
352 return status;
353 }
354
355 /**
356 * ixgbe_get_phy_type_from_id - Get the phy type
357 * @phy_id: hardware phy id
358 *
359 **/
ixgbe_get_phy_type_from_id(u32 phy_id)360 static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
361 {
362 enum ixgbe_phy_type phy_type;
363
364 switch (phy_id) {
365 case TN1010_PHY_ID:
366 phy_type = ixgbe_phy_tn;
367 break;
368 case X550_PHY_ID2:
369 case X550_PHY_ID3:
370 case X540_PHY_ID:
371 phy_type = ixgbe_phy_aq;
372 break;
373 case QT2022_PHY_ID:
374 phy_type = ixgbe_phy_qt;
375 break;
376 case ATH_PHY_ID:
377 phy_type = ixgbe_phy_nl;
378 break;
379 case X557_PHY_ID:
380 case X557_PHY_ID2:
381 phy_type = ixgbe_phy_x550em_ext_t;
382 break;
383 default:
384 phy_type = ixgbe_phy_unknown;
385 break;
386 }
387
388 return phy_type;
389 }
390
391 /**
392 * ixgbe_reset_phy_generic - Performs a PHY reset
393 * @hw: pointer to hardware structure
394 **/
ixgbe_reset_phy_generic(struct ixgbe_hw * hw)395 s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
396 {
397 u32 i;
398 u16 ctrl = 0;
399 s32 status = 0;
400
401 if (hw->phy.type == ixgbe_phy_unknown)
402 status = ixgbe_identify_phy_generic(hw);
403
404 if (status != 0 || hw->phy.type == ixgbe_phy_none)
405 return status;
406
407 /* Don't reset PHY if it's shut down due to overtemp. */
408 if (!hw->phy.reset_if_overtemp &&
409 (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
410 return 0;
411
412 /* Blocked by MNG FW so bail */
413 if (ixgbe_check_reset_blocked(hw))
414 return 0;
415
416 /*
417 * Perform soft PHY reset to the PHY_XS.
418 * This will cause a soft reset to the PHY
419 */
420 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
421 MDIO_MMD_PHYXS,
422 MDIO_CTRL1_RESET);
423
424 /*
425 * Poll for reset bit to self-clear indicating reset is complete.
426 * Some PHYs could take up to 3 seconds to complete and need about
427 * 1.7 usec delay after the reset is complete.
428 */
429 for (i = 0; i < 30; i++) {
430 msleep(100);
431 if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
432 status = hw->phy.ops.read_reg(hw,
433 IXGBE_MDIO_TX_VENDOR_ALARMS_3,
434 MDIO_MMD_PMAPMD, &ctrl);
435 if (status)
436 return status;
437
438 if (ctrl & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
439 udelay(2);
440 break;
441 }
442 } else {
443 status = hw->phy.ops.read_reg(hw, MDIO_CTRL1,
444 MDIO_MMD_PHYXS, &ctrl);
445 if (status)
446 return status;
447
448 if (!(ctrl & MDIO_CTRL1_RESET)) {
449 udelay(2);
450 break;
451 }
452 }
453 }
454
455 if (ctrl & MDIO_CTRL1_RESET) {
456 hw_dbg(hw, "PHY reset polling failed to complete.\n");
457 return IXGBE_ERR_RESET_FAILED;
458 }
459
460 return 0;
461 }
462
463 /**
464 * ixgbe_read_phy_mdi - Reads a value from a specified PHY register without
465 * the SWFW lock
466 * @hw: pointer to hardware structure
467 * @reg_addr: 32 bit address of PHY register to read
468 * @device_type: 5 bit device type
469 * @phy_data: Pointer to read data from PHY register
470 **/
ixgbe_read_phy_reg_mdi(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u16 * phy_data)471 s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
472 u16 *phy_data)
473 {
474 u32 i, data, command;
475
476 /* Setup and write the address cycle command */
477 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
478 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
479 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
480 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
481
482 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
483
484 /* Check every 10 usec to see if the address cycle completed.
485 * The MDI Command bit will clear when the operation is
486 * complete
487 */
488 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
489 udelay(10);
490
491 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
492 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
493 break;
494 }
495
496
497 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
498 hw_dbg(hw, "PHY address command did not complete.\n");
499 return IXGBE_ERR_PHY;
500 }
501
502 /* Address cycle complete, setup and write the read
503 * command
504 */
505 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
506 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
507 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
508 (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
509
510 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
511
512 /* Check every 10 usec to see if the address cycle
513 * completed. The MDI Command bit will clear when the
514 * operation is complete
515 */
516 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
517 udelay(10);
518
519 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
520 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
521 break;
522 }
523
524 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
525 hw_dbg(hw, "PHY read command didn't complete\n");
526 return IXGBE_ERR_PHY;
527 }
528
529 /* Read operation is complete. Get the data
530 * from MSRWD
531 */
532 data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
533 data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
534 *phy_data = (u16)(data);
535
536 return 0;
537 }
538
539 /**
540 * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register
541 * using the SWFW lock - this function is needed in most cases
542 * @hw: pointer to hardware structure
543 * @reg_addr: 32 bit address of PHY register to read
544 * @device_type: 5 bit device type
545 * @phy_data: Pointer to read data from PHY register
546 **/
ixgbe_read_phy_reg_generic(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u16 * phy_data)547 s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
548 u32 device_type, u16 *phy_data)
549 {
550 s32 status;
551 u32 gssr = hw->phy.phy_semaphore_mask;
552
553 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
554 status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type,
555 phy_data);
556 hw->mac.ops.release_swfw_sync(hw, gssr);
557 } else {
558 return IXGBE_ERR_SWFW_SYNC;
559 }
560
561 return status;
562 }
563
564 /**
565 * ixgbe_write_phy_reg_mdi - Writes a value to specified PHY register
566 * without SWFW lock
567 * @hw: pointer to hardware structure
568 * @reg_addr: 32 bit PHY register to write
569 * @device_type: 5 bit device type
570 * @phy_data: Data to write to the PHY register
571 **/
ixgbe_write_phy_reg_mdi(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u16 phy_data)572 s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
573 u32 device_type, u16 phy_data)
574 {
575 u32 i, command;
576
577 /* Put the data in the MDI single read and write data register*/
578 IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
579
580 /* Setup and write the address cycle command */
581 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
582 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
583 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
584 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
585
586 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
587
588 /*
589 * Check every 10 usec to see if the address cycle completed.
590 * The MDI Command bit will clear when the operation is
591 * complete
592 */
593 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
594 udelay(10);
595
596 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
597 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
598 break;
599 }
600
601 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
602 hw_dbg(hw, "PHY address cmd didn't complete\n");
603 return IXGBE_ERR_PHY;
604 }
605
606 /*
607 * Address cycle complete, setup and write the write
608 * command
609 */
610 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
611 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
612 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
613 (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
614
615 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
616
617 /* Check every 10 usec to see if the address cycle
618 * completed. The MDI Command bit will clear when the
619 * operation is complete
620 */
621 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
622 udelay(10);
623
624 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
625 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
626 break;
627 }
628
629 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
630 hw_dbg(hw, "PHY write cmd didn't complete\n");
631 return IXGBE_ERR_PHY;
632 }
633
634 return 0;
635 }
636
637 /**
638 * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register
639 * using SWFW lock- this function is needed in most cases
640 * @hw: pointer to hardware structure
641 * @reg_addr: 32 bit PHY register to write
642 * @device_type: 5 bit device type
643 * @phy_data: Data to write to the PHY register
644 **/
ixgbe_write_phy_reg_generic(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u16 phy_data)645 s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
646 u32 device_type, u16 phy_data)
647 {
648 s32 status;
649 u32 gssr = hw->phy.phy_semaphore_mask;
650
651 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
652 status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type,
653 phy_data);
654 hw->mac.ops.release_swfw_sync(hw, gssr);
655 } else {
656 return IXGBE_ERR_SWFW_SYNC;
657 }
658
659 return status;
660 }
661
662 #define IXGBE_HW_READ_REG(addr) IXGBE_READ_REG(hw, addr)
663
664 /**
665 * ixgbe_msca_cmd - Write the command register and poll for completion/timeout
666 * @hw: pointer to hardware structure
667 * @cmd: command register value to write
668 **/
ixgbe_msca_cmd(struct ixgbe_hw * hw,u32 cmd)669 static s32 ixgbe_msca_cmd(struct ixgbe_hw *hw, u32 cmd)
670 {
671 IXGBE_WRITE_REG(hw, IXGBE_MSCA, cmd);
672
673 return readx_poll_timeout(IXGBE_HW_READ_REG, IXGBE_MSCA, cmd,
674 !(cmd & IXGBE_MSCA_MDI_COMMAND), 10,
675 10 * IXGBE_MDIO_COMMAND_TIMEOUT);
676 }
677
678 /**
679 * ixgbe_mii_bus_read_generic - Read a clause 22/45 register with gssr flags
680 * @hw: pointer to hardware structure
681 * @addr: address
682 * @regnum: register number
683 * @gssr: semaphore flags to acquire
684 **/
ixgbe_mii_bus_read_generic(struct ixgbe_hw * hw,int addr,int regnum,u32 gssr)685 static s32 ixgbe_mii_bus_read_generic(struct ixgbe_hw *hw, int addr,
686 int regnum, u32 gssr)
687 {
688 u32 hwaddr, cmd;
689 s32 data;
690
691 if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
692 return -EBUSY;
693
694 hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT;
695 if (regnum & MII_ADDR_C45) {
696 hwaddr |= regnum & GENMASK(21, 0);
697 cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND;
698 } else {
699 hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT;
700 cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL |
701 IXGBE_MSCA_READ_AUTOINC | IXGBE_MSCA_MDI_COMMAND;
702 }
703
704 data = ixgbe_msca_cmd(hw, cmd);
705 if (data < 0)
706 goto mii_bus_read_done;
707
708 /* For a clause 45 access the address cycle just completed, we still
709 * need to do the read command, otherwise just get the data
710 */
711 if (!(regnum & MII_ADDR_C45))
712 goto do_mii_bus_read;
713
714 cmd = hwaddr | IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND;
715 data = ixgbe_msca_cmd(hw, cmd);
716 if (data < 0)
717 goto mii_bus_read_done;
718
719 do_mii_bus_read:
720 data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
721 data = (data >> IXGBE_MSRWD_READ_DATA_SHIFT) & GENMASK(16, 0);
722
723 mii_bus_read_done:
724 hw->mac.ops.release_swfw_sync(hw, gssr);
725 return data;
726 }
727
728 /**
729 * ixgbe_mii_bus_write_generic - Write a clause 22/45 register with gssr flags
730 * @hw: pointer to hardware structure
731 * @addr: address
732 * @regnum: register number
733 * @val: value to write
734 * @gssr: semaphore flags to acquire
735 **/
ixgbe_mii_bus_write_generic(struct ixgbe_hw * hw,int addr,int regnum,u16 val,u32 gssr)736 static s32 ixgbe_mii_bus_write_generic(struct ixgbe_hw *hw, int addr,
737 int regnum, u16 val, u32 gssr)
738 {
739 u32 hwaddr, cmd;
740 s32 err;
741
742 if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
743 return -EBUSY;
744
745 IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)val);
746
747 hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT;
748 if (regnum & MII_ADDR_C45) {
749 hwaddr |= regnum & GENMASK(21, 0);
750 cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND;
751 } else {
752 hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT;
753 cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
754 IXGBE_MSCA_MDI_COMMAND;
755 }
756
757 /* For clause 45 this is an address cycle, for clause 22 this is the
758 * entire transaction
759 */
760 err = ixgbe_msca_cmd(hw, cmd);
761 if (err < 0 || !(regnum & MII_ADDR_C45))
762 goto mii_bus_write_done;
763
764 cmd = hwaddr | IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND;
765 err = ixgbe_msca_cmd(hw, cmd);
766
767 mii_bus_write_done:
768 hw->mac.ops.release_swfw_sync(hw, gssr);
769 return err;
770 }
771
772 /**
773 * ixgbe_mii_bus_read - Read a clause 22/45 register
774 * @hw: pointer to hardware structure
775 * @addr: address
776 * @regnum: register number
777 **/
ixgbe_mii_bus_read(struct mii_bus * bus,int addr,int regnum)778 static s32 ixgbe_mii_bus_read(struct mii_bus *bus, int addr, int regnum)
779 {
780 struct ixgbe_adapter *adapter = bus->priv;
781 struct ixgbe_hw *hw = &adapter->hw;
782 u32 gssr = hw->phy.phy_semaphore_mask;
783
784 return ixgbe_mii_bus_read_generic(hw, addr, regnum, gssr);
785 }
786
787 /**
788 * ixgbe_mii_bus_write - Write a clause 22/45 register
789 * @hw: pointer to hardware structure
790 * @addr: address
791 * @regnum: register number
792 * @val: value to write
793 **/
ixgbe_mii_bus_write(struct mii_bus * bus,int addr,int regnum,u16 val)794 static s32 ixgbe_mii_bus_write(struct mii_bus *bus, int addr, int regnum,
795 u16 val)
796 {
797 struct ixgbe_adapter *adapter = bus->priv;
798 struct ixgbe_hw *hw = &adapter->hw;
799 u32 gssr = hw->phy.phy_semaphore_mask;
800
801 return ixgbe_mii_bus_write_generic(hw, addr, regnum, val, gssr);
802 }
803
804 /**
805 * ixgbe_x550em_a_mii_bus_read - Read a clause 22/45 register on x550em_a
806 * @hw: pointer to hardware structure
807 * @addr: address
808 * @regnum: register number
809 **/
ixgbe_x550em_a_mii_bus_read(struct mii_bus * bus,int addr,int regnum)810 static s32 ixgbe_x550em_a_mii_bus_read(struct mii_bus *bus, int addr,
811 int regnum)
812 {
813 struct ixgbe_adapter *adapter = bus->priv;
814 struct ixgbe_hw *hw = &adapter->hw;
815 u32 gssr = hw->phy.phy_semaphore_mask;
816
817 gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM;
818 return ixgbe_mii_bus_read_generic(hw, addr, regnum, gssr);
819 }
820
821 /**
822 * ixgbe_x550em_a_mii_bus_write - Write a clause 22/45 register on x550em_a
823 * @hw: pointer to hardware structure
824 * @addr: address
825 * @regnum: register number
826 * @val: value to write
827 **/
ixgbe_x550em_a_mii_bus_write(struct mii_bus * bus,int addr,int regnum,u16 val)828 static s32 ixgbe_x550em_a_mii_bus_write(struct mii_bus *bus, int addr,
829 int regnum, u16 val)
830 {
831 struct ixgbe_adapter *adapter = bus->priv;
832 struct ixgbe_hw *hw = &adapter->hw;
833 u32 gssr = hw->phy.phy_semaphore_mask;
834
835 gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM;
836 return ixgbe_mii_bus_write_generic(hw, addr, regnum, val, gssr);
837 }
838
839 /**
840 * ixgbe_get_first_secondary_devfn - get first device downstream of root port
841 * @devfn: PCI_DEVFN of root port on domain 0, bus 0
842 *
843 * Returns pci_dev pointer to PCI_DEVFN(0, 0) on subordinate side of root
844 * on domain 0, bus 0, devfn = 'devfn'
845 **/
ixgbe_get_first_secondary_devfn(unsigned int devfn)846 static struct pci_dev *ixgbe_get_first_secondary_devfn(unsigned int devfn)
847 {
848 struct pci_dev *rp_pdev;
849 int bus;
850
851 rp_pdev = pci_get_domain_bus_and_slot(0, 0, devfn);
852 if (rp_pdev && rp_pdev->subordinate) {
853 bus = rp_pdev->subordinate->number;
854 return pci_get_domain_bus_and_slot(0, bus, 0);
855 }
856
857 return NULL;
858 }
859
860 /**
861 * ixgbe_x550em_a_has_mii - is this the first ixgbe x550em_a PCI function?
862 * @hw: pointer to hardware structure
863 *
864 * Returns true if hw points to lowest numbered PCI B:D.F x550_em_a device in
865 * the SoC. There are up to 4 MACs sharing a single MDIO bus on the x550em_a,
866 * but we only want to register one MDIO bus.
867 **/
ixgbe_x550em_a_has_mii(struct ixgbe_hw * hw)868 static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw)
869 {
870 struct ixgbe_adapter *adapter = hw->back;
871 struct pci_dev *pdev = adapter->pdev;
872 struct pci_dev *func0_pdev;
873
874 /* For the C3000 family of SoCs (x550em_a) the internal ixgbe devices
875 * are always downstream of root ports @ 0000:00:16.0 & 0000:00:17.0
876 * It's not valid for function 0 to be disabled and function 1 is up,
877 * so the lowest numbered ixgbe dev will be device 0 function 0 on one
878 * of those two root ports
879 */
880 func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x16, 0));
881 if (func0_pdev) {
882 if (func0_pdev == pdev)
883 return true;
884 else
885 return false;
886 }
887 func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x17, 0));
888 if (func0_pdev == pdev)
889 return true;
890
891 return false;
892 }
893
894 /**
895 * ixgbe_mii_bus_init - mii_bus structure setup
896 * @hw: pointer to hardware structure
897 *
898 * Returns 0 on success, negative on failure
899 *
900 * ixgbe_mii_bus_init initializes a mii_bus structure in adapter
901 **/
ixgbe_mii_bus_init(struct ixgbe_hw * hw)902 s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
903 {
904 struct ixgbe_adapter *adapter = hw->back;
905 struct pci_dev *pdev = adapter->pdev;
906 struct device *dev = &adapter->netdev->dev;
907 struct mii_bus *bus;
908 int err = -ENODEV;
909
910 bus = devm_mdiobus_alloc(dev);
911 if (!bus)
912 return -ENOMEM;
913
914 switch (hw->device_id) {
915 /* C3000 SoCs */
916 case IXGBE_DEV_ID_X550EM_A_KR:
917 case IXGBE_DEV_ID_X550EM_A_KR_L:
918 case IXGBE_DEV_ID_X550EM_A_SFP_N:
919 case IXGBE_DEV_ID_X550EM_A_SGMII:
920 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
921 case IXGBE_DEV_ID_X550EM_A_10G_T:
922 case IXGBE_DEV_ID_X550EM_A_SFP:
923 case IXGBE_DEV_ID_X550EM_A_1G_T:
924 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
925 if (!ixgbe_x550em_a_has_mii(hw))
926 goto ixgbe_no_mii_bus;
927 bus->read = &ixgbe_x550em_a_mii_bus_read;
928 bus->write = &ixgbe_x550em_a_mii_bus_write;
929 break;
930 default:
931 bus->read = &ixgbe_mii_bus_read;
932 bus->write = &ixgbe_mii_bus_write;
933 break;
934 }
935
936 /* Use the position of the device in the PCI hierarchy as the id */
937 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mdio-%s", ixgbe_driver_name,
938 pci_name(pdev));
939
940 bus->name = "ixgbe-mdio";
941 bus->priv = adapter;
942 bus->parent = dev;
943 bus->phy_mask = GENMASK(31, 0);
944
945 /* Support clause 22/45 natively. ixgbe_probe() sets MDIO_EMULATE_C22
946 * unfortunately that causes some clause 22 frames to be sent with
947 * clause 45 addressing. We don't want that.
948 */
949 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22;
950
951 err = mdiobus_register(bus);
952 if (!err) {
953 adapter->mii_bus = bus;
954 return 0;
955 }
956
957 ixgbe_no_mii_bus:
958 devm_mdiobus_free(dev, bus);
959 return err;
960 }
961
962 /**
963 * ixgbe_setup_phy_link_generic - Set and restart autoneg
964 * @hw: pointer to hardware structure
965 *
966 * Restart autonegotiation and PHY and waits for completion.
967 **/
ixgbe_setup_phy_link_generic(struct ixgbe_hw * hw)968 s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
969 {
970 s32 status = 0;
971 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
972 bool autoneg = false;
973 ixgbe_link_speed speed;
974
975 ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
976
977 /* Set or unset auto-negotiation 10G advertisement */
978 hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, &autoneg_reg);
979
980 autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
981 if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) &&
982 (speed & IXGBE_LINK_SPEED_10GB_FULL))
983 autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
984
985 hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, autoneg_reg);
986
987 hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
988 MDIO_MMD_AN, &autoneg_reg);
989
990 if (hw->mac.type == ixgbe_mac_X550) {
991 /* Set or unset auto-negotiation 5G advertisement */
992 autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE;
993 if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) &&
994 (speed & IXGBE_LINK_SPEED_5GB_FULL))
995 autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE;
996
997 /* Set or unset auto-negotiation 2.5G advertisement */
998 autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE;
999 if ((hw->phy.autoneg_advertised &
1000 IXGBE_LINK_SPEED_2_5GB_FULL) &&
1001 (speed & IXGBE_LINK_SPEED_2_5GB_FULL))
1002 autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE;
1003 }
1004
1005 /* Set or unset auto-negotiation 1G advertisement */
1006 autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
1007 if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) &&
1008 (speed & IXGBE_LINK_SPEED_1GB_FULL))
1009 autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
1010
1011 hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
1012 MDIO_MMD_AN, autoneg_reg);
1013
1014 /* Set or unset auto-negotiation 100M advertisement */
1015 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, &autoneg_reg);
1016
1017 autoneg_reg &= ~(ADVERTISE_100FULL | ADVERTISE_100HALF);
1018 if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) &&
1019 (speed & IXGBE_LINK_SPEED_100_FULL))
1020 autoneg_reg |= ADVERTISE_100FULL;
1021
1022 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, autoneg_reg);
1023
1024 /* Blocked by MNG FW so don't reset PHY */
1025 if (ixgbe_check_reset_blocked(hw))
1026 return 0;
1027
1028 /* Restart PHY autonegotiation and wait for completion */
1029 hw->phy.ops.read_reg(hw, MDIO_CTRL1,
1030 MDIO_MMD_AN, &autoneg_reg);
1031
1032 autoneg_reg |= MDIO_AN_CTRL1_RESTART;
1033
1034 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
1035 MDIO_MMD_AN, autoneg_reg);
1036
1037 return status;
1038 }
1039
1040 /**
1041 * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities
1042 * @hw: pointer to hardware structure
1043 * @speed: new link speed
1044 * @autoneg_wait_to_complete: unused
1045 **/
ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)1046 s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
1047 ixgbe_link_speed speed,
1048 bool autoneg_wait_to_complete)
1049 {
1050 /* Clear autoneg_advertised and set new values based on input link
1051 * speed.
1052 */
1053 hw->phy.autoneg_advertised = 0;
1054
1055 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
1056 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
1057
1058 if (speed & IXGBE_LINK_SPEED_5GB_FULL)
1059 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_5GB_FULL;
1060
1061 if (speed & IXGBE_LINK_SPEED_2_5GB_FULL)
1062 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_2_5GB_FULL;
1063
1064 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
1065 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
1066
1067 if (speed & IXGBE_LINK_SPEED_100_FULL)
1068 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
1069
1070 if (speed & IXGBE_LINK_SPEED_10_FULL)
1071 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL;
1072
1073 /* Setup link based on the new speed settings */
1074 if (hw->phy.ops.setup_link)
1075 hw->phy.ops.setup_link(hw);
1076
1077 return 0;
1078 }
1079
1080 /**
1081 * ixgbe_get_copper_speeds_supported - Get copper link speed from phy
1082 * @hw: pointer to hardware structure
1083 *
1084 * Determines the supported link capabilities by reading the PHY auto
1085 * negotiation register.
1086 */
ixgbe_get_copper_speeds_supported(struct ixgbe_hw * hw)1087 static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
1088 {
1089 u16 speed_ability;
1090 s32 status;
1091
1092 status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
1093 &speed_ability);
1094 if (status)
1095 return status;
1096
1097 if (speed_ability & MDIO_SPEED_10G)
1098 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
1099 if (speed_ability & MDIO_PMA_SPEED_1000)
1100 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
1101 if (speed_ability & MDIO_PMA_SPEED_100)
1102 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
1103
1104 switch (hw->mac.type) {
1105 case ixgbe_mac_X550:
1106 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
1107 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
1108 break;
1109 case ixgbe_mac_X550EM_x:
1110 case ixgbe_mac_x550em_a:
1111 hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL;
1112 break;
1113 default:
1114 break;
1115 }
1116
1117 return 0;
1118 }
1119
1120 /**
1121 * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities
1122 * @hw: pointer to hardware structure
1123 * @speed: pointer to link speed
1124 * @autoneg: boolean auto-negotiation value
1125 */
ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * autoneg)1126 s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
1127 ixgbe_link_speed *speed,
1128 bool *autoneg)
1129 {
1130 s32 status = 0;
1131
1132 *autoneg = true;
1133 if (!hw->phy.speeds_supported)
1134 status = ixgbe_get_copper_speeds_supported(hw);
1135
1136 *speed = hw->phy.speeds_supported;
1137 return status;
1138 }
1139
1140 /**
1141 * ixgbe_check_phy_link_tnx - Determine link and speed status
1142 * @hw: pointer to hardware structure
1143 * @speed: link speed
1144 * @link_up: status of link
1145 *
1146 * Reads the VS1 register to determine if link is up and the current speed for
1147 * the PHY.
1148 **/
ixgbe_check_phy_link_tnx(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up)1149 s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
1150 bool *link_up)
1151 {
1152 s32 status;
1153 u32 time_out;
1154 u32 max_time_out = 10;
1155 u16 phy_link = 0;
1156 u16 phy_speed = 0;
1157 u16 phy_data = 0;
1158
1159 /* Initialize speed and link to default case */
1160 *link_up = false;
1161 *speed = IXGBE_LINK_SPEED_10GB_FULL;
1162
1163 /*
1164 * Check current speed and link status of the PHY register.
1165 * This is a vendor specific register and may have to
1166 * be changed for other copper PHYs.
1167 */
1168 for (time_out = 0; time_out < max_time_out; time_out++) {
1169 udelay(10);
1170 status = hw->phy.ops.read_reg(hw,
1171 MDIO_STAT1,
1172 MDIO_MMD_VEND1,
1173 &phy_data);
1174 phy_link = phy_data &
1175 IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
1176 phy_speed = phy_data &
1177 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
1178 if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
1179 *link_up = true;
1180 if (phy_speed ==
1181 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
1182 *speed = IXGBE_LINK_SPEED_1GB_FULL;
1183 break;
1184 }
1185 }
1186
1187 return status;
1188 }
1189
1190 /**
1191 * ixgbe_setup_phy_link_tnx - Set and restart autoneg
1192 * @hw: pointer to hardware structure
1193 *
1194 * Restart autonegotiation and PHY and waits for completion.
1195 * This function always returns success, this is nessary since
1196 * it is called via a function pointer that could call other
1197 * functions that could return an error.
1198 **/
ixgbe_setup_phy_link_tnx(struct ixgbe_hw * hw)1199 s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
1200 {
1201 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
1202 bool autoneg = false;
1203 ixgbe_link_speed speed;
1204
1205 ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
1206
1207 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
1208 /* Set or unset auto-negotiation 10G advertisement */
1209 hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL,
1210 MDIO_MMD_AN,
1211 &autoneg_reg);
1212
1213 autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
1214 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
1215 autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
1216
1217 hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL,
1218 MDIO_MMD_AN,
1219 autoneg_reg);
1220 }
1221
1222 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
1223 /* Set or unset auto-negotiation 1G advertisement */
1224 hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
1225 MDIO_MMD_AN,
1226 &autoneg_reg);
1227
1228 autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
1229 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
1230 autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
1231
1232 hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
1233 MDIO_MMD_AN,
1234 autoneg_reg);
1235 }
1236
1237 if (speed & IXGBE_LINK_SPEED_100_FULL) {
1238 /* Set or unset auto-negotiation 100M advertisement */
1239 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
1240 MDIO_MMD_AN,
1241 &autoneg_reg);
1242
1243 autoneg_reg &= ~(ADVERTISE_100FULL |
1244 ADVERTISE_100HALF);
1245 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
1246 autoneg_reg |= ADVERTISE_100FULL;
1247
1248 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
1249 MDIO_MMD_AN,
1250 autoneg_reg);
1251 }
1252
1253 /* Blocked by MNG FW so don't reset PHY */
1254 if (ixgbe_check_reset_blocked(hw))
1255 return 0;
1256
1257 /* Restart PHY autonegotiation and wait for completion */
1258 hw->phy.ops.read_reg(hw, MDIO_CTRL1,
1259 MDIO_MMD_AN, &autoneg_reg);
1260
1261 autoneg_reg |= MDIO_AN_CTRL1_RESTART;
1262
1263 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
1264 MDIO_MMD_AN, autoneg_reg);
1265 return 0;
1266 }
1267
1268 /**
1269 * ixgbe_reset_phy_nl - Performs a PHY reset
1270 * @hw: pointer to hardware structure
1271 **/
ixgbe_reset_phy_nl(struct ixgbe_hw * hw)1272 s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
1273 {
1274 u16 phy_offset, control, eword, edata, block_crc;
1275 bool end_data = false;
1276 u16 list_offset, data_offset;
1277 u16 phy_data = 0;
1278 s32 ret_val;
1279 u32 i;
1280
1281 /* Blocked by MNG FW so bail */
1282 if (ixgbe_check_reset_blocked(hw))
1283 return 0;
1284
1285 hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data);
1286
1287 /* reset the PHY and poll for completion */
1288 hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
1289 (phy_data | MDIO_CTRL1_RESET));
1290
1291 for (i = 0; i < 100; i++) {
1292 hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
1293 &phy_data);
1294 if ((phy_data & MDIO_CTRL1_RESET) == 0)
1295 break;
1296 usleep_range(10000, 20000);
1297 }
1298
1299 if ((phy_data & MDIO_CTRL1_RESET) != 0) {
1300 hw_dbg(hw, "PHY reset did not complete.\n");
1301 return IXGBE_ERR_PHY;
1302 }
1303
1304 /* Get init offsets */
1305 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
1306 &data_offset);
1307 if (ret_val)
1308 return ret_val;
1309
1310 ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc);
1311 data_offset++;
1312 while (!end_data) {
1313 /*
1314 * Read control word from PHY init contents offset
1315 */
1316 ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
1317 if (ret_val)
1318 goto err_eeprom;
1319 control = (eword & IXGBE_CONTROL_MASK_NL) >>
1320 IXGBE_CONTROL_SHIFT_NL;
1321 edata = eword & IXGBE_DATA_MASK_NL;
1322 switch (control) {
1323 case IXGBE_DELAY_NL:
1324 data_offset++;
1325 hw_dbg(hw, "DELAY: %d MS\n", edata);
1326 usleep_range(edata * 1000, edata * 2000);
1327 break;
1328 case IXGBE_DATA_NL:
1329 hw_dbg(hw, "DATA:\n");
1330 data_offset++;
1331 ret_val = hw->eeprom.ops.read(hw, data_offset++,
1332 &phy_offset);
1333 if (ret_val)
1334 goto err_eeprom;
1335 for (i = 0; i < edata; i++) {
1336 ret_val = hw->eeprom.ops.read(hw, data_offset,
1337 &eword);
1338 if (ret_val)
1339 goto err_eeprom;
1340 hw->phy.ops.write_reg(hw, phy_offset,
1341 MDIO_MMD_PMAPMD, eword);
1342 hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword,
1343 phy_offset);
1344 data_offset++;
1345 phy_offset++;
1346 }
1347 break;
1348 case IXGBE_CONTROL_NL:
1349 data_offset++;
1350 hw_dbg(hw, "CONTROL:\n");
1351 if (edata == IXGBE_CONTROL_EOL_NL) {
1352 hw_dbg(hw, "EOL\n");
1353 end_data = true;
1354 } else if (edata == IXGBE_CONTROL_SOL_NL) {
1355 hw_dbg(hw, "SOL\n");
1356 } else {
1357 hw_dbg(hw, "Bad control value\n");
1358 return IXGBE_ERR_PHY;
1359 }
1360 break;
1361 default:
1362 hw_dbg(hw, "Bad control type\n");
1363 return IXGBE_ERR_PHY;
1364 }
1365 }
1366
1367 return ret_val;
1368
1369 err_eeprom:
1370 hw_err(hw, "eeprom read at offset %d failed\n", data_offset);
1371 return IXGBE_ERR_PHY;
1372 }
1373
1374 /**
1375 * ixgbe_identify_module_generic - Identifies module type
1376 * @hw: pointer to hardware structure
1377 *
1378 * Determines HW type and calls appropriate function.
1379 **/
ixgbe_identify_module_generic(struct ixgbe_hw * hw)1380 s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
1381 {
1382 switch (hw->mac.ops.get_media_type(hw)) {
1383 case ixgbe_media_type_fiber:
1384 return ixgbe_identify_sfp_module_generic(hw);
1385 case ixgbe_media_type_fiber_qsfp:
1386 return ixgbe_identify_qsfp_module_generic(hw);
1387 default:
1388 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1389 return IXGBE_ERR_SFP_NOT_PRESENT;
1390 }
1391
1392 return IXGBE_ERR_SFP_NOT_PRESENT;
1393 }
1394
1395 /**
1396 * ixgbe_identify_sfp_module_generic - Identifies SFP modules
1397 * @hw: pointer to hardware structure
1398 *
1399 * Searches for and identifies the SFP module and assigns appropriate PHY type.
1400 **/
ixgbe_identify_sfp_module_generic(struct ixgbe_hw * hw)1401 s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
1402 {
1403 struct ixgbe_adapter *adapter = hw->back;
1404 s32 status;
1405 u32 vendor_oui = 0;
1406 enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
1407 u8 identifier = 0;
1408 u8 comp_codes_1g = 0;
1409 u8 comp_codes_10g = 0;
1410 u8 oui_bytes[3] = {0, 0, 0};
1411 u8 cable_tech = 0;
1412 u8 cable_spec = 0;
1413 u16 enforce_sfp = 0;
1414
1415 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
1416 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1417 return IXGBE_ERR_SFP_NOT_PRESENT;
1418 }
1419
1420 /* LAN ID is needed for sfp_type determination */
1421 hw->mac.ops.set_lan_id(hw);
1422
1423 status = hw->phy.ops.read_i2c_eeprom(hw,
1424 IXGBE_SFF_IDENTIFIER,
1425 &identifier);
1426
1427 if (status)
1428 goto err_read_i2c_eeprom;
1429
1430 if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
1431 hw->phy.type = ixgbe_phy_sfp_unsupported;
1432 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1433 }
1434 status = hw->phy.ops.read_i2c_eeprom(hw,
1435 IXGBE_SFF_1GBE_COMP_CODES,
1436 &comp_codes_1g);
1437
1438 if (status)
1439 goto err_read_i2c_eeprom;
1440
1441 status = hw->phy.ops.read_i2c_eeprom(hw,
1442 IXGBE_SFF_10GBE_COMP_CODES,
1443 &comp_codes_10g);
1444
1445 if (status)
1446 goto err_read_i2c_eeprom;
1447 status = hw->phy.ops.read_i2c_eeprom(hw,
1448 IXGBE_SFF_CABLE_TECHNOLOGY,
1449 &cable_tech);
1450
1451 if (status)
1452 goto err_read_i2c_eeprom;
1453
1454 /* ID Module
1455 * =========
1456 * 0 SFP_DA_CU
1457 * 1 SFP_SR
1458 * 2 SFP_LR
1459 * 3 SFP_DA_CORE0 - 82599-specific
1460 * 4 SFP_DA_CORE1 - 82599-specific
1461 * 5 SFP_SR/LR_CORE0 - 82599-specific
1462 * 6 SFP_SR/LR_CORE1 - 82599-specific
1463 * 7 SFP_act_lmt_DA_CORE0 - 82599-specific
1464 * 8 SFP_act_lmt_DA_CORE1 - 82599-specific
1465 * 9 SFP_1g_cu_CORE0 - 82599-specific
1466 * 10 SFP_1g_cu_CORE1 - 82599-specific
1467 * 11 SFP_1g_sx_CORE0 - 82599-specific
1468 * 12 SFP_1g_sx_CORE1 - 82599-specific
1469 */
1470 if (hw->mac.type == ixgbe_mac_82598EB) {
1471 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
1472 hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
1473 else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
1474 hw->phy.sfp_type = ixgbe_sfp_type_sr;
1475 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
1476 hw->phy.sfp_type = ixgbe_sfp_type_lr;
1477 else
1478 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
1479 } else {
1480 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
1481 if (hw->bus.lan_id == 0)
1482 hw->phy.sfp_type =
1483 ixgbe_sfp_type_da_cu_core0;
1484 else
1485 hw->phy.sfp_type =
1486 ixgbe_sfp_type_da_cu_core1;
1487 } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
1488 hw->phy.ops.read_i2c_eeprom(
1489 hw, IXGBE_SFF_CABLE_SPEC_COMP,
1490 &cable_spec);
1491 if (cable_spec &
1492 IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
1493 if (hw->bus.lan_id == 0)
1494 hw->phy.sfp_type =
1495 ixgbe_sfp_type_da_act_lmt_core0;
1496 else
1497 hw->phy.sfp_type =
1498 ixgbe_sfp_type_da_act_lmt_core1;
1499 } else {
1500 hw->phy.sfp_type =
1501 ixgbe_sfp_type_unknown;
1502 }
1503 } else if (comp_codes_10g &
1504 (IXGBE_SFF_10GBASESR_CAPABLE |
1505 IXGBE_SFF_10GBASELR_CAPABLE)) {
1506 if (hw->bus.lan_id == 0)
1507 hw->phy.sfp_type =
1508 ixgbe_sfp_type_srlr_core0;
1509 else
1510 hw->phy.sfp_type =
1511 ixgbe_sfp_type_srlr_core1;
1512 } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
1513 if (hw->bus.lan_id == 0)
1514 hw->phy.sfp_type =
1515 ixgbe_sfp_type_1g_cu_core0;
1516 else
1517 hw->phy.sfp_type =
1518 ixgbe_sfp_type_1g_cu_core1;
1519 } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) {
1520 if (hw->bus.lan_id == 0)
1521 hw->phy.sfp_type =
1522 ixgbe_sfp_type_1g_sx_core0;
1523 else
1524 hw->phy.sfp_type =
1525 ixgbe_sfp_type_1g_sx_core1;
1526 } else if (comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) {
1527 if (hw->bus.lan_id == 0)
1528 hw->phy.sfp_type =
1529 ixgbe_sfp_type_1g_lx_core0;
1530 else
1531 hw->phy.sfp_type =
1532 ixgbe_sfp_type_1g_lx_core1;
1533 } else {
1534 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
1535 }
1536 }
1537
1538 if (hw->phy.sfp_type != stored_sfp_type)
1539 hw->phy.sfp_setup_needed = true;
1540
1541 /* Determine if the SFP+ PHY is dual speed or not. */
1542 hw->phy.multispeed_fiber = false;
1543 if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
1544 (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
1545 ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
1546 (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
1547 hw->phy.multispeed_fiber = true;
1548
1549 /* Determine PHY vendor */
1550 if (hw->phy.type != ixgbe_phy_nl) {
1551 hw->phy.id = identifier;
1552 status = hw->phy.ops.read_i2c_eeprom(hw,
1553 IXGBE_SFF_VENDOR_OUI_BYTE0,
1554 &oui_bytes[0]);
1555
1556 if (status != 0)
1557 goto err_read_i2c_eeprom;
1558
1559 status = hw->phy.ops.read_i2c_eeprom(hw,
1560 IXGBE_SFF_VENDOR_OUI_BYTE1,
1561 &oui_bytes[1]);
1562
1563 if (status != 0)
1564 goto err_read_i2c_eeprom;
1565
1566 status = hw->phy.ops.read_i2c_eeprom(hw,
1567 IXGBE_SFF_VENDOR_OUI_BYTE2,
1568 &oui_bytes[2]);
1569
1570 if (status != 0)
1571 goto err_read_i2c_eeprom;
1572
1573 vendor_oui =
1574 ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
1575 (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
1576 (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
1577
1578 switch (vendor_oui) {
1579 case IXGBE_SFF_VENDOR_OUI_TYCO:
1580 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
1581 hw->phy.type =
1582 ixgbe_phy_sfp_passive_tyco;
1583 break;
1584 case IXGBE_SFF_VENDOR_OUI_FTL:
1585 if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
1586 hw->phy.type = ixgbe_phy_sfp_ftl_active;
1587 else
1588 hw->phy.type = ixgbe_phy_sfp_ftl;
1589 break;
1590 case IXGBE_SFF_VENDOR_OUI_AVAGO:
1591 hw->phy.type = ixgbe_phy_sfp_avago;
1592 break;
1593 case IXGBE_SFF_VENDOR_OUI_INTEL:
1594 hw->phy.type = ixgbe_phy_sfp_intel;
1595 break;
1596 default:
1597 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
1598 hw->phy.type =
1599 ixgbe_phy_sfp_passive_unknown;
1600 else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
1601 hw->phy.type =
1602 ixgbe_phy_sfp_active_unknown;
1603 else
1604 hw->phy.type = ixgbe_phy_sfp_unknown;
1605 break;
1606 }
1607 }
1608
1609 /* Allow any DA cable vendor */
1610 if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
1611 IXGBE_SFF_DA_ACTIVE_CABLE))
1612 return 0;
1613
1614 /* Verify supported 1G SFP modules */
1615 if (comp_codes_10g == 0 &&
1616 !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1617 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1618 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1619 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
1620 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
1621 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
1622 hw->phy.type = ixgbe_phy_sfp_unsupported;
1623 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1624 }
1625
1626 /* Anything else 82598-based is supported */
1627 if (hw->mac.type == ixgbe_mac_82598EB)
1628 return 0;
1629
1630 hw->mac.ops.get_device_caps(hw, &enforce_sfp);
1631 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
1632 !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1633 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1634 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1635 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
1636 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
1637 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
1638 /* Make sure we're a supported PHY type */
1639 if (hw->phy.type == ixgbe_phy_sfp_intel)
1640 return 0;
1641 if (hw->allow_unsupported_sfp) {
1642 e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n");
1643 return 0;
1644 }
1645 hw_dbg(hw, "SFP+ module not supported\n");
1646 hw->phy.type = ixgbe_phy_sfp_unsupported;
1647 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1648 }
1649 return 0;
1650
1651 err_read_i2c_eeprom:
1652 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1653 if (hw->phy.type != ixgbe_phy_nl) {
1654 hw->phy.id = 0;
1655 hw->phy.type = ixgbe_phy_unknown;
1656 }
1657 return IXGBE_ERR_SFP_NOT_PRESENT;
1658 }
1659
1660 /**
1661 * ixgbe_identify_qsfp_module_generic - Identifies QSFP modules
1662 * @hw: pointer to hardware structure
1663 *
1664 * Searches for and identifies the QSFP module and assigns appropriate PHY type
1665 **/
ixgbe_identify_qsfp_module_generic(struct ixgbe_hw * hw)1666 static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
1667 {
1668 struct ixgbe_adapter *adapter = hw->back;
1669 s32 status;
1670 u32 vendor_oui = 0;
1671 enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
1672 u8 identifier = 0;
1673 u8 comp_codes_1g = 0;
1674 u8 comp_codes_10g = 0;
1675 u8 oui_bytes[3] = {0, 0, 0};
1676 u16 enforce_sfp = 0;
1677 u8 connector = 0;
1678 u8 cable_length = 0;
1679 u8 device_tech = 0;
1680 bool active_cable = false;
1681
1682 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) {
1683 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1684 return IXGBE_ERR_SFP_NOT_PRESENT;
1685 }
1686
1687 /* LAN ID is needed for sfp_type determination */
1688 hw->mac.ops.set_lan_id(hw);
1689
1690 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
1691 &identifier);
1692
1693 if (status != 0)
1694 goto err_read_i2c_eeprom;
1695
1696 if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) {
1697 hw->phy.type = ixgbe_phy_sfp_unsupported;
1698 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1699 }
1700
1701 hw->phy.id = identifier;
1702
1703 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP,
1704 &comp_codes_10g);
1705
1706 if (status != 0)
1707 goto err_read_i2c_eeprom;
1708
1709 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_1GBE_COMP,
1710 &comp_codes_1g);
1711
1712 if (status != 0)
1713 goto err_read_i2c_eeprom;
1714
1715 if (comp_codes_10g & IXGBE_SFF_QSFP_DA_PASSIVE_CABLE) {
1716 hw->phy.type = ixgbe_phy_qsfp_passive_unknown;
1717 if (hw->bus.lan_id == 0)
1718 hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core0;
1719 else
1720 hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core1;
1721 } else if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
1722 IXGBE_SFF_10GBASELR_CAPABLE)) {
1723 if (hw->bus.lan_id == 0)
1724 hw->phy.sfp_type = ixgbe_sfp_type_srlr_core0;
1725 else
1726 hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1;
1727 } else {
1728 if (comp_codes_10g & IXGBE_SFF_QSFP_DA_ACTIVE_CABLE)
1729 active_cable = true;
1730
1731 if (!active_cable) {
1732 /* check for active DA cables that pre-date
1733 * SFF-8436 v3.6
1734 */
1735 hw->phy.ops.read_i2c_eeprom(hw,
1736 IXGBE_SFF_QSFP_CONNECTOR,
1737 &connector);
1738
1739 hw->phy.ops.read_i2c_eeprom(hw,
1740 IXGBE_SFF_QSFP_CABLE_LENGTH,
1741 &cable_length);
1742
1743 hw->phy.ops.read_i2c_eeprom(hw,
1744 IXGBE_SFF_QSFP_DEVICE_TECH,
1745 &device_tech);
1746
1747 if ((connector ==
1748 IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE) &&
1749 (cable_length > 0) &&
1750 ((device_tech >> 4) ==
1751 IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL))
1752 active_cable = true;
1753 }
1754
1755 if (active_cable) {
1756 hw->phy.type = ixgbe_phy_qsfp_active_unknown;
1757 if (hw->bus.lan_id == 0)
1758 hw->phy.sfp_type =
1759 ixgbe_sfp_type_da_act_lmt_core0;
1760 else
1761 hw->phy.sfp_type =
1762 ixgbe_sfp_type_da_act_lmt_core1;
1763 } else {
1764 /* unsupported module type */
1765 hw->phy.type = ixgbe_phy_sfp_unsupported;
1766 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1767 }
1768 }
1769
1770 if (hw->phy.sfp_type != stored_sfp_type)
1771 hw->phy.sfp_setup_needed = true;
1772
1773 /* Determine if the QSFP+ PHY is dual speed or not. */
1774 hw->phy.multispeed_fiber = false;
1775 if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
1776 (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
1777 ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
1778 (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
1779 hw->phy.multispeed_fiber = true;
1780
1781 /* Determine PHY vendor for optical modules */
1782 if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
1783 IXGBE_SFF_10GBASELR_CAPABLE)) {
1784 status = hw->phy.ops.read_i2c_eeprom(hw,
1785 IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0,
1786 &oui_bytes[0]);
1787
1788 if (status != 0)
1789 goto err_read_i2c_eeprom;
1790
1791 status = hw->phy.ops.read_i2c_eeprom(hw,
1792 IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1,
1793 &oui_bytes[1]);
1794
1795 if (status != 0)
1796 goto err_read_i2c_eeprom;
1797
1798 status = hw->phy.ops.read_i2c_eeprom(hw,
1799 IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2,
1800 &oui_bytes[2]);
1801
1802 if (status != 0)
1803 goto err_read_i2c_eeprom;
1804
1805 vendor_oui =
1806 ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
1807 (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
1808 (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
1809
1810 if (vendor_oui == IXGBE_SFF_VENDOR_OUI_INTEL)
1811 hw->phy.type = ixgbe_phy_qsfp_intel;
1812 else
1813 hw->phy.type = ixgbe_phy_qsfp_unknown;
1814
1815 hw->mac.ops.get_device_caps(hw, &enforce_sfp);
1816 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) {
1817 /* Make sure we're a supported PHY type */
1818 if (hw->phy.type == ixgbe_phy_qsfp_intel)
1819 return 0;
1820 if (hw->allow_unsupported_sfp) {
1821 e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n");
1822 return 0;
1823 }
1824 hw_dbg(hw, "QSFP module not supported\n");
1825 hw->phy.type = ixgbe_phy_sfp_unsupported;
1826 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1827 }
1828 return 0;
1829 }
1830 return 0;
1831
1832 err_read_i2c_eeprom:
1833 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1834 hw->phy.id = 0;
1835 hw->phy.type = ixgbe_phy_unknown;
1836
1837 return IXGBE_ERR_SFP_NOT_PRESENT;
1838 }
1839
1840 /**
1841 * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
1842 * @hw: pointer to hardware structure
1843 * @list_offset: offset to the SFP ID list
1844 * @data_offset: offset to the SFP data block
1845 *
1846 * Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if
1847 * so it returns the offsets to the phy init sequence block.
1848 **/
ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw * hw,u16 * list_offset,u16 * data_offset)1849 s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
1850 u16 *list_offset,
1851 u16 *data_offset)
1852 {
1853 u16 sfp_id;
1854 u16 sfp_type = hw->phy.sfp_type;
1855
1856 if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
1857 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1858
1859 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
1860 return IXGBE_ERR_SFP_NOT_PRESENT;
1861
1862 if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) &&
1863 (hw->phy.sfp_type == ixgbe_sfp_type_da_cu))
1864 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1865
1866 /*
1867 * Limiting active cables and 1G Phys must be initialized as
1868 * SR modules
1869 */
1870 if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
1871 sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1872 sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1873 sfp_type == ixgbe_sfp_type_1g_sx_core0)
1874 sfp_type = ixgbe_sfp_type_srlr_core0;
1875 else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
1876 sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
1877 sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1878 sfp_type == ixgbe_sfp_type_1g_sx_core1)
1879 sfp_type = ixgbe_sfp_type_srlr_core1;
1880
1881 /* Read offset to PHY init contents */
1882 if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) {
1883 hw_err(hw, "eeprom read at %d failed\n",
1884 IXGBE_PHY_INIT_OFFSET_NL);
1885 return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
1886 }
1887
1888 if ((!*list_offset) || (*list_offset == 0xFFFF))
1889 return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
1890
1891 /* Shift offset to first ID word */
1892 (*list_offset)++;
1893
1894 /*
1895 * Find the matching SFP ID in the EEPROM
1896 * and program the init sequence
1897 */
1898 if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
1899 goto err_phy;
1900
1901 while (sfp_id != IXGBE_PHY_INIT_END_NL) {
1902 if (sfp_id == sfp_type) {
1903 (*list_offset)++;
1904 if (hw->eeprom.ops.read(hw, *list_offset, data_offset))
1905 goto err_phy;
1906 if ((!*data_offset) || (*data_offset == 0xFFFF)) {
1907 hw_dbg(hw, "SFP+ module not supported\n");
1908 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1909 } else {
1910 break;
1911 }
1912 } else {
1913 (*list_offset) += 2;
1914 if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
1915 goto err_phy;
1916 }
1917 }
1918
1919 if (sfp_id == IXGBE_PHY_INIT_END_NL) {
1920 hw_dbg(hw, "No matching SFP+ module found\n");
1921 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1922 }
1923
1924 return 0;
1925
1926 err_phy:
1927 hw_err(hw, "eeprom read at offset %d failed\n", *list_offset);
1928 return IXGBE_ERR_PHY;
1929 }
1930
1931 /**
1932 * ixgbe_read_i2c_eeprom_generic - Reads 8 bit EEPROM word over I2C interface
1933 * @hw: pointer to hardware structure
1934 * @byte_offset: EEPROM byte offset to read
1935 * @eeprom_data: value read
1936 *
1937 * Performs byte read operation to SFP module's EEPROM over I2C interface.
1938 **/
ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw * hw,u8 byte_offset,u8 * eeprom_data)1939 s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
1940 u8 *eeprom_data)
1941 {
1942 return hw->phy.ops.read_i2c_byte(hw, byte_offset,
1943 IXGBE_I2C_EEPROM_DEV_ADDR,
1944 eeprom_data);
1945 }
1946
1947 /**
1948 * ixgbe_read_i2c_sff8472_generic - Reads 8 bit word over I2C interface
1949 * @hw: pointer to hardware structure
1950 * @byte_offset: byte offset at address 0xA2
1951 * @sff8472_data: value read
1952 *
1953 * Performs byte read operation to SFP module's SFF-8472 data over I2C
1954 **/
ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw * hw,u8 byte_offset,u8 * sff8472_data)1955 s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
1956 u8 *sff8472_data)
1957 {
1958 return hw->phy.ops.read_i2c_byte(hw, byte_offset,
1959 IXGBE_I2C_EEPROM_DEV_ADDR2,
1960 sff8472_data);
1961 }
1962
1963 /**
1964 * ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface
1965 * @hw: pointer to hardware structure
1966 * @byte_offset: EEPROM byte offset to write
1967 * @eeprom_data: value to write
1968 *
1969 * Performs byte write operation to SFP module's EEPROM over I2C interface.
1970 **/
ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw * hw,u8 byte_offset,u8 eeprom_data)1971 s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
1972 u8 eeprom_data)
1973 {
1974 return hw->phy.ops.write_i2c_byte(hw, byte_offset,
1975 IXGBE_I2C_EEPROM_DEV_ADDR,
1976 eeprom_data);
1977 }
1978
1979 /**
1980 * ixgbe_is_sfp_probe - Returns true if SFP is being detected
1981 * @hw: pointer to hardware structure
1982 * @offset: eeprom offset to be read
1983 * @addr: I2C address to be read
1984 */
ixgbe_is_sfp_probe(struct ixgbe_hw * hw,u8 offset,u8 addr)1985 static bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr)
1986 {
1987 if (addr == IXGBE_I2C_EEPROM_DEV_ADDR &&
1988 offset == IXGBE_SFF_IDENTIFIER &&
1989 hw->phy.sfp_type == ixgbe_sfp_type_not_present)
1990 return true;
1991 return false;
1992 }
1993
1994 /**
1995 * ixgbe_read_i2c_byte_generic_int - Reads 8 bit word over I2C
1996 * @hw: pointer to hardware structure
1997 * @byte_offset: byte offset to read
1998 * @dev_addr: device address
1999 * @data: value read
2000 * @lock: true if to take and release semaphore
2001 *
2002 * Performs byte read operation to SFP module's EEPROM over I2C interface at
2003 * a specified device address.
2004 */
ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw * hw,u8 byte_offset,u8 dev_addr,u8 * data,bool lock)2005 static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
2006 u8 dev_addr, u8 *data, bool lock)
2007 {
2008 s32 status;
2009 u32 max_retry = 10;
2010 u32 retry = 0;
2011 u32 swfw_mask = hw->phy.phy_semaphore_mask;
2012 bool nack = true;
2013
2014 if (hw->mac.type >= ixgbe_mac_X550)
2015 max_retry = 3;
2016 if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr))
2017 max_retry = IXGBE_SFP_DETECT_RETRIES;
2018
2019 *data = 0;
2020
2021 do {
2022 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
2023 return IXGBE_ERR_SWFW_SYNC;
2024
2025 ixgbe_i2c_start(hw);
2026
2027 /* Device Address and write indication */
2028 status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
2029 if (status != 0)
2030 goto fail;
2031
2032 status = ixgbe_get_i2c_ack(hw);
2033 if (status != 0)
2034 goto fail;
2035
2036 status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
2037 if (status != 0)
2038 goto fail;
2039
2040 status = ixgbe_get_i2c_ack(hw);
2041 if (status != 0)
2042 goto fail;
2043
2044 ixgbe_i2c_start(hw);
2045
2046 /* Device Address and read indication */
2047 status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1));
2048 if (status != 0)
2049 goto fail;
2050
2051 status = ixgbe_get_i2c_ack(hw);
2052 if (status != 0)
2053 goto fail;
2054
2055 status = ixgbe_clock_in_i2c_byte(hw, data);
2056 if (status != 0)
2057 goto fail;
2058
2059 status = ixgbe_clock_out_i2c_bit(hw, nack);
2060 if (status != 0)
2061 goto fail;
2062
2063 ixgbe_i2c_stop(hw);
2064 if (lock)
2065 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2066 return 0;
2067
2068 fail:
2069 ixgbe_i2c_bus_clear(hw);
2070 if (lock) {
2071 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2072 msleep(100);
2073 }
2074 retry++;
2075 if (retry < max_retry)
2076 hw_dbg(hw, "I2C byte read error - Retrying.\n");
2077 else
2078 hw_dbg(hw, "I2C byte read error.\n");
2079
2080 } while (retry < max_retry);
2081
2082 return status;
2083 }
2084
2085 /**
2086 * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C
2087 * @hw: pointer to hardware structure
2088 * @byte_offset: byte offset to read
2089 * @dev_addr: device address
2090 * @data: value read
2091 *
2092 * Performs byte read operation to SFP module's EEPROM over I2C interface at
2093 * a specified device address.
2094 */
ixgbe_read_i2c_byte_generic(struct ixgbe_hw * hw,u8 byte_offset,u8 dev_addr,u8 * data)2095 s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
2096 u8 dev_addr, u8 *data)
2097 {
2098 return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
2099 data, true);
2100 }
2101
2102 /**
2103 * ixgbe_read_i2c_byte_generic_unlocked - Reads 8 bit word over I2C
2104 * @hw: pointer to hardware structure
2105 * @byte_offset: byte offset to read
2106 * @dev_addr: device address
2107 * @data: value read
2108 *
2109 * Performs byte read operation to SFP module's EEPROM over I2C interface at
2110 * a specified device address.
2111 */
ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw * hw,u8 byte_offset,u8 dev_addr,u8 * data)2112 s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
2113 u8 dev_addr, u8 *data)
2114 {
2115 return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
2116 data, false);
2117 }
2118
2119 /**
2120 * ixgbe_write_i2c_byte_generic_int - Writes 8 bit word over I2C
2121 * @hw: pointer to hardware structure
2122 * @byte_offset: byte offset to write
2123 * @dev_addr: device address
2124 * @data: value to write
2125 * @lock: true if to take and release semaphore
2126 *
2127 * Performs byte write operation to SFP module's EEPROM over I2C interface at
2128 * a specified device address.
2129 */
ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw * hw,u8 byte_offset,u8 dev_addr,u8 data,bool lock)2130 static s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
2131 u8 dev_addr, u8 data, bool lock)
2132 {
2133 s32 status;
2134 u32 max_retry = 1;
2135 u32 retry = 0;
2136 u32 swfw_mask = hw->phy.phy_semaphore_mask;
2137
2138 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
2139 return IXGBE_ERR_SWFW_SYNC;
2140
2141 do {
2142 ixgbe_i2c_start(hw);
2143
2144 status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
2145 if (status != 0)
2146 goto fail;
2147
2148 status = ixgbe_get_i2c_ack(hw);
2149 if (status != 0)
2150 goto fail;
2151
2152 status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
2153 if (status != 0)
2154 goto fail;
2155
2156 status = ixgbe_get_i2c_ack(hw);
2157 if (status != 0)
2158 goto fail;
2159
2160 status = ixgbe_clock_out_i2c_byte(hw, data);
2161 if (status != 0)
2162 goto fail;
2163
2164 status = ixgbe_get_i2c_ack(hw);
2165 if (status != 0)
2166 goto fail;
2167
2168 ixgbe_i2c_stop(hw);
2169 if (lock)
2170 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2171 return 0;
2172
2173 fail:
2174 ixgbe_i2c_bus_clear(hw);
2175 retry++;
2176 if (retry < max_retry)
2177 hw_dbg(hw, "I2C byte write error - Retrying.\n");
2178 else
2179 hw_dbg(hw, "I2C byte write error.\n");
2180 } while (retry < max_retry);
2181
2182 if (lock)
2183 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2184
2185 return status;
2186 }
2187
2188 /**
2189 * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C
2190 * @hw: pointer to hardware structure
2191 * @byte_offset: byte offset to write
2192 * @dev_addr: device address
2193 * @data: value to write
2194 *
2195 * Performs byte write operation to SFP module's EEPROM over I2C interface at
2196 * a specified device address.
2197 */
ixgbe_write_i2c_byte_generic(struct ixgbe_hw * hw,u8 byte_offset,u8 dev_addr,u8 data)2198 s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
2199 u8 dev_addr, u8 data)
2200 {
2201 return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
2202 data, true);
2203 }
2204
2205 /**
2206 * ixgbe_write_i2c_byte_generic_unlocked - Writes 8 bit word over I2C
2207 * @hw: pointer to hardware structure
2208 * @byte_offset: byte offset to write
2209 * @dev_addr: device address
2210 * @data: value to write
2211 *
2212 * Performs byte write operation to SFP module's EEPROM over I2C interface at
2213 * a specified device address.
2214 */
ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw * hw,u8 byte_offset,u8 dev_addr,u8 data)2215 s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
2216 u8 dev_addr, u8 data)
2217 {
2218 return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
2219 data, false);
2220 }
2221
2222 /**
2223 * ixgbe_i2c_start - Sets I2C start condition
2224 * @hw: pointer to hardware structure
2225 *
2226 * Sets I2C start condition (High -> Low on SDA while SCL is High)
2227 * Set bit-bang mode on X550 hardware.
2228 **/
ixgbe_i2c_start(struct ixgbe_hw * hw)2229 static void ixgbe_i2c_start(struct ixgbe_hw *hw)
2230 {
2231 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2232
2233 i2cctl |= IXGBE_I2C_BB_EN(hw);
2234
2235 /* Start condition must begin with data and clock high */
2236 ixgbe_set_i2c_data(hw, &i2cctl, 1);
2237 ixgbe_raise_i2c_clk(hw, &i2cctl);
2238
2239 /* Setup time for start condition (4.7us) */
2240 udelay(IXGBE_I2C_T_SU_STA);
2241
2242 ixgbe_set_i2c_data(hw, &i2cctl, 0);
2243
2244 /* Hold time for start condition (4us) */
2245 udelay(IXGBE_I2C_T_HD_STA);
2246
2247 ixgbe_lower_i2c_clk(hw, &i2cctl);
2248
2249 /* Minimum low period of clock is 4.7 us */
2250 udelay(IXGBE_I2C_T_LOW);
2251
2252 }
2253
2254 /**
2255 * ixgbe_i2c_stop - Sets I2C stop condition
2256 * @hw: pointer to hardware structure
2257 *
2258 * Sets I2C stop condition (Low -> High on SDA while SCL is High)
2259 * Disables bit-bang mode and negates data output enable on X550
2260 * hardware.
2261 **/
ixgbe_i2c_stop(struct ixgbe_hw * hw)2262 static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
2263 {
2264 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2265 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
2266 u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN(hw);
2267 u32 bb_en_bit = IXGBE_I2C_BB_EN(hw);
2268
2269 /* Stop condition must begin with data low and clock high */
2270 ixgbe_set_i2c_data(hw, &i2cctl, 0);
2271 ixgbe_raise_i2c_clk(hw, &i2cctl);
2272
2273 /* Setup time for stop condition (4us) */
2274 udelay(IXGBE_I2C_T_SU_STO);
2275
2276 ixgbe_set_i2c_data(hw, &i2cctl, 1);
2277
2278 /* bus free time between stop and start (4.7us)*/
2279 udelay(IXGBE_I2C_T_BUF);
2280
2281 if (bb_en_bit || data_oe_bit || clk_oe_bit) {
2282 i2cctl &= ~bb_en_bit;
2283 i2cctl |= data_oe_bit | clk_oe_bit;
2284 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
2285 IXGBE_WRITE_FLUSH(hw);
2286 }
2287 }
2288
2289 /**
2290 * ixgbe_clock_in_i2c_byte - Clocks in one byte via I2C
2291 * @hw: pointer to hardware structure
2292 * @data: data byte to clock in
2293 *
2294 * Clocks in one byte data via I2C data/clock
2295 **/
ixgbe_clock_in_i2c_byte(struct ixgbe_hw * hw,u8 * data)2296 static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
2297 {
2298 s32 i;
2299 bool bit = false;
2300
2301 *data = 0;
2302 for (i = 7; i >= 0; i--) {
2303 ixgbe_clock_in_i2c_bit(hw, &bit);
2304 *data |= bit << i;
2305 }
2306
2307 return 0;
2308 }
2309
2310 /**
2311 * ixgbe_clock_out_i2c_byte - Clocks out one byte via I2C
2312 * @hw: pointer to hardware structure
2313 * @data: data byte clocked out
2314 *
2315 * Clocks out one byte data via I2C data/clock
2316 **/
ixgbe_clock_out_i2c_byte(struct ixgbe_hw * hw,u8 data)2317 static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
2318 {
2319 s32 status;
2320 s32 i;
2321 u32 i2cctl;
2322 bool bit = false;
2323
2324 for (i = 7; i >= 0; i--) {
2325 bit = (data >> i) & 0x1;
2326 status = ixgbe_clock_out_i2c_bit(hw, bit);
2327
2328 if (status != 0)
2329 break;
2330 }
2331
2332 /* Release SDA line (set high) */
2333 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2334 i2cctl |= IXGBE_I2C_DATA_OUT(hw);
2335 i2cctl |= IXGBE_I2C_DATA_OE_N_EN(hw);
2336 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
2337 IXGBE_WRITE_FLUSH(hw);
2338
2339 return status;
2340 }
2341
2342 /**
2343 * ixgbe_get_i2c_ack - Polls for I2C ACK
2344 * @hw: pointer to hardware structure
2345 *
2346 * Clocks in/out one bit via I2C data/clock
2347 **/
ixgbe_get_i2c_ack(struct ixgbe_hw * hw)2348 static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
2349 {
2350 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
2351 s32 status = 0;
2352 u32 i = 0;
2353 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2354 u32 timeout = 10;
2355 bool ack = true;
2356
2357 if (data_oe_bit) {
2358 i2cctl |= IXGBE_I2C_DATA_OUT(hw);
2359 i2cctl |= data_oe_bit;
2360 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
2361 IXGBE_WRITE_FLUSH(hw);
2362 }
2363 ixgbe_raise_i2c_clk(hw, &i2cctl);
2364
2365 /* Minimum high period of clock is 4us */
2366 udelay(IXGBE_I2C_T_HIGH);
2367
2368 /* Poll for ACK. Note that ACK in I2C spec is
2369 * transition from 1 to 0 */
2370 for (i = 0; i < timeout; i++) {
2371 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2372 ack = ixgbe_get_i2c_data(hw, &i2cctl);
2373
2374 udelay(1);
2375 if (ack == 0)
2376 break;
2377 }
2378
2379 if (ack == 1) {
2380 hw_dbg(hw, "I2C ack was not received.\n");
2381 status = IXGBE_ERR_I2C;
2382 }
2383
2384 ixgbe_lower_i2c_clk(hw, &i2cctl);
2385
2386 /* Minimum low period of clock is 4.7 us */
2387 udelay(IXGBE_I2C_T_LOW);
2388
2389 return status;
2390 }
2391
2392 /**
2393 * ixgbe_clock_in_i2c_bit - Clocks in one bit via I2C data/clock
2394 * @hw: pointer to hardware structure
2395 * @data: read data value
2396 *
2397 * Clocks in one bit via I2C data/clock
2398 **/
ixgbe_clock_in_i2c_bit(struct ixgbe_hw * hw,bool * data)2399 static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
2400 {
2401 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2402 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
2403
2404 if (data_oe_bit) {
2405 i2cctl |= IXGBE_I2C_DATA_OUT(hw);
2406 i2cctl |= data_oe_bit;
2407 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
2408 IXGBE_WRITE_FLUSH(hw);
2409 }
2410 ixgbe_raise_i2c_clk(hw, &i2cctl);
2411
2412 /* Minimum high period of clock is 4us */
2413 udelay(IXGBE_I2C_T_HIGH);
2414
2415 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2416 *data = ixgbe_get_i2c_data(hw, &i2cctl);
2417
2418 ixgbe_lower_i2c_clk(hw, &i2cctl);
2419
2420 /* Minimum low period of clock is 4.7 us */
2421 udelay(IXGBE_I2C_T_LOW);
2422
2423 return 0;
2424 }
2425
2426 /**
2427 * ixgbe_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock
2428 * @hw: pointer to hardware structure
2429 * @data: data value to write
2430 *
2431 * Clocks out one bit via I2C data/clock
2432 **/
ixgbe_clock_out_i2c_bit(struct ixgbe_hw * hw,bool data)2433 static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
2434 {
2435 s32 status;
2436 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2437
2438 status = ixgbe_set_i2c_data(hw, &i2cctl, data);
2439 if (status == 0) {
2440 ixgbe_raise_i2c_clk(hw, &i2cctl);
2441
2442 /* Minimum high period of clock is 4us */
2443 udelay(IXGBE_I2C_T_HIGH);
2444
2445 ixgbe_lower_i2c_clk(hw, &i2cctl);
2446
2447 /* Minimum low period of clock is 4.7 us.
2448 * This also takes care of the data hold time.
2449 */
2450 udelay(IXGBE_I2C_T_LOW);
2451 } else {
2452 hw_dbg(hw, "I2C data was not set to %X\n", data);
2453 return IXGBE_ERR_I2C;
2454 }
2455
2456 return 0;
2457 }
2458 /**
2459 * ixgbe_raise_i2c_clk - Raises the I2C SCL clock
2460 * @hw: pointer to hardware structure
2461 * @i2cctl: Current value of I2CCTL register
2462 *
2463 * Raises the I2C clock line '0'->'1'
2464 * Negates the I2C clock output enable on X550 hardware.
2465 **/
ixgbe_raise_i2c_clk(struct ixgbe_hw * hw,u32 * i2cctl)2466 static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
2467 {
2468 u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN(hw);
2469 u32 i = 0;
2470 u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT;
2471 u32 i2cctl_r = 0;
2472
2473 if (clk_oe_bit) {
2474 *i2cctl |= clk_oe_bit;
2475 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2476 }
2477
2478 for (i = 0; i < timeout; i++) {
2479 *i2cctl |= IXGBE_I2C_CLK_OUT(hw);
2480 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2481 IXGBE_WRITE_FLUSH(hw);
2482 /* SCL rise time (1000ns) */
2483 udelay(IXGBE_I2C_T_RISE);
2484
2485 i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2486 if (i2cctl_r & IXGBE_I2C_CLK_IN(hw))
2487 break;
2488 }
2489 }
2490
2491 /**
2492 * ixgbe_lower_i2c_clk - Lowers the I2C SCL clock
2493 * @hw: pointer to hardware structure
2494 * @i2cctl: Current value of I2CCTL register
2495 *
2496 * Lowers the I2C clock line '1'->'0'
2497 * Asserts the I2C clock output enable on X550 hardware.
2498 **/
ixgbe_lower_i2c_clk(struct ixgbe_hw * hw,u32 * i2cctl)2499 static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
2500 {
2501
2502 *i2cctl &= ~IXGBE_I2C_CLK_OUT(hw);
2503 *i2cctl &= ~IXGBE_I2C_CLK_OE_N_EN(hw);
2504
2505 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2506 IXGBE_WRITE_FLUSH(hw);
2507
2508 /* SCL fall time (300ns) */
2509 udelay(IXGBE_I2C_T_FALL);
2510 }
2511
2512 /**
2513 * ixgbe_set_i2c_data - Sets the I2C data bit
2514 * @hw: pointer to hardware structure
2515 * @i2cctl: Current value of I2CCTL register
2516 * @data: I2C data value (0 or 1) to set
2517 *
2518 * Sets the I2C data bit
2519 * Asserts the I2C data output enable on X550 hardware.
2520 **/
ixgbe_set_i2c_data(struct ixgbe_hw * hw,u32 * i2cctl,bool data)2521 static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
2522 {
2523 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
2524
2525 if (data)
2526 *i2cctl |= IXGBE_I2C_DATA_OUT(hw);
2527 else
2528 *i2cctl &= ~IXGBE_I2C_DATA_OUT(hw);
2529 *i2cctl &= ~data_oe_bit;
2530
2531 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2532 IXGBE_WRITE_FLUSH(hw);
2533
2534 /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
2535 udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
2536
2537 if (!data) /* Can't verify data in this case */
2538 return 0;
2539 if (data_oe_bit) {
2540 *i2cctl |= data_oe_bit;
2541 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2542 IXGBE_WRITE_FLUSH(hw);
2543 }
2544
2545 /* Verify data was set correctly */
2546 *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2547 if (data != ixgbe_get_i2c_data(hw, i2cctl)) {
2548 hw_dbg(hw, "Error - I2C data was not set to %X.\n", data);
2549 return IXGBE_ERR_I2C;
2550 }
2551
2552 return 0;
2553 }
2554
2555 /**
2556 * ixgbe_get_i2c_data - Reads the I2C SDA data bit
2557 * @hw: pointer to hardware structure
2558 * @i2cctl: Current value of I2CCTL register
2559 *
2560 * Returns the I2C data bit value
2561 * Negates the I2C data output enable on X550 hardware.
2562 **/
ixgbe_get_i2c_data(struct ixgbe_hw * hw,u32 * i2cctl)2563 static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl)
2564 {
2565 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
2566
2567 if (data_oe_bit) {
2568 *i2cctl |= data_oe_bit;
2569 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2570 IXGBE_WRITE_FLUSH(hw);
2571 udelay(IXGBE_I2C_T_FALL);
2572 }
2573
2574 if (*i2cctl & IXGBE_I2C_DATA_IN(hw))
2575 return true;
2576 return false;
2577 }
2578
2579 /**
2580 * ixgbe_i2c_bus_clear - Clears the I2C bus
2581 * @hw: pointer to hardware structure
2582 *
2583 * Clears the I2C bus by sending nine clock pulses.
2584 * Used when data line is stuck low.
2585 **/
ixgbe_i2c_bus_clear(struct ixgbe_hw * hw)2586 static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
2587 {
2588 u32 i2cctl;
2589 u32 i;
2590
2591 ixgbe_i2c_start(hw);
2592 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2593
2594 ixgbe_set_i2c_data(hw, &i2cctl, 1);
2595
2596 for (i = 0; i < 9; i++) {
2597 ixgbe_raise_i2c_clk(hw, &i2cctl);
2598
2599 /* Min high period of clock is 4us */
2600 udelay(IXGBE_I2C_T_HIGH);
2601
2602 ixgbe_lower_i2c_clk(hw, &i2cctl);
2603
2604 /* Min low period of clock is 4.7us*/
2605 udelay(IXGBE_I2C_T_LOW);
2606 }
2607
2608 ixgbe_i2c_start(hw);
2609
2610 /* Put the i2c bus back to default state */
2611 ixgbe_i2c_stop(hw);
2612 }
2613
2614 /**
2615 * ixgbe_tn_check_overtemp - Checks if an overtemp occurred.
2616 * @hw: pointer to hardware structure
2617 *
2618 * Checks if the LASI temp alarm status was triggered due to overtemp
2619 **/
ixgbe_tn_check_overtemp(struct ixgbe_hw * hw)2620 s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
2621 {
2622 u16 phy_data = 0;
2623
2624 if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM)
2625 return 0;
2626
2627 /* Check that the LASI temp alarm status was triggered */
2628 hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
2629 MDIO_MMD_PMAPMD, &phy_data);
2630
2631 if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
2632 return 0;
2633
2634 return IXGBE_ERR_OVERTEMP;
2635 }
2636
2637 /** ixgbe_set_copper_phy_power - Control power for copper phy
2638 * @hw: pointer to hardware structure
2639 * @on: true for on, false for off
2640 **/
ixgbe_set_copper_phy_power(struct ixgbe_hw * hw,bool on)2641 s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on)
2642 {
2643 u32 status;
2644 u16 reg;
2645
2646 /* Bail if we don't have copper phy */
2647 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
2648 return 0;
2649
2650 if (!on && ixgbe_mng_present(hw))
2651 return 0;
2652
2653 status = hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_VEND1, ®);
2654 if (status)
2655 return status;
2656
2657 if (on) {
2658 reg &= ~IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
2659 } else {
2660 if (ixgbe_check_reset_blocked(hw))
2661 return 0;
2662 reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
2663 }
2664
2665 status = hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_VEND1, reg);
2666 return status;
2667 }
2668