1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited.
9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10 * refers to Broadcom Inc. and/or its subsidiaries.
11 *
12 * Firmware is:
13 * Derived from proprietary unpublished source code,
14 * Copyright (C) 2000-2016 Broadcom Corporation.
15 * Copyright (C) 2016-2017 Broadcom Ltd.
16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17 * refers to Broadcom Inc. and/or its subsidiaries.
18 *
19 * Permission is hereby granted for the distribution of this firmware
20 * data in hexadecimal or equivalent format, provided this copyright
21 * notice is accompanying it.
22 */
23
24
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/in.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
46 #include <linux/if.h>
47 #include <linux/if_vlan.h>
48 #include <linux/ip.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
58
59 #include <net/checksum.h>
60 #include <net/ip.h>
61
62 #include <linux/io.h>
63 #include <asm/byteorder.h>
64 #include <linux/uaccess.h>
65
66 #include <uapi/linux/net_tstamp.h>
67 #include <linux/ptp_clock_kernel.h>
68
69 #define BAR_0 0
70 #define BAR_2 2
71
72 #include "tg3.h"
73
74 /* Functions & macros to verify TG3_FLAGS types */
75
_tg3_flag(enum TG3_FLAGS flag,unsigned long * bits)76 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
77 {
78 return test_bit(flag, bits);
79 }
80
_tg3_flag_set(enum TG3_FLAGS flag,unsigned long * bits)81 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
82 {
83 set_bit(flag, bits);
84 }
85
_tg3_flag_clear(enum TG3_FLAGS flag,unsigned long * bits)86 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
87 {
88 clear_bit(flag, bits);
89 }
90
91 #define tg3_flag(tp, flag) \
92 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define tg3_flag_set(tp, flag) \
94 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define tg3_flag_clear(tp, flag) \
96 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
97
98 #define DRV_MODULE_NAME "tg3"
99 /* DO NOT UPDATE TG3_*_NUM defines */
100 #define TG3_MAJ_NUM 3
101 #define TG3_MIN_NUM 137
102
103 #define RESET_KIND_SHUTDOWN 0
104 #define RESET_KIND_INIT 1
105 #define RESET_KIND_SUSPEND 2
106
107 #define TG3_DEF_RX_MODE 0
108 #define TG3_DEF_TX_MODE 0
109 #define TG3_DEF_MSG_ENABLE \
110 (NETIF_MSG_DRV | \
111 NETIF_MSG_PROBE | \
112 NETIF_MSG_LINK | \
113 NETIF_MSG_TIMER | \
114 NETIF_MSG_IFDOWN | \
115 NETIF_MSG_IFUP | \
116 NETIF_MSG_RX_ERR | \
117 NETIF_MSG_TX_ERR)
118
119 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
120
121 /* length of time before we decide the hardware is borked,
122 * and dev->tx_timeout() should be called to fix the problem
123 */
124
125 #define TG3_TX_TIMEOUT (5 * HZ)
126
127 /* hardware minimum and maximum for a single frame's data payload */
128 #define TG3_MIN_MTU ETH_ZLEN
129 #define TG3_MAX_MTU(tp) \
130 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131
132 /* These numbers seem to be hard coded in the NIC firmware somehow.
133 * You can't change the ring sizes, but you can change where you place
134 * them in the NIC onboard memory.
135 */
136 #define TG3_RX_STD_RING_SIZE(tp) \
137 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
138 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
139 #define TG3_DEF_RX_RING_PENDING 200
140 #define TG3_RX_JMB_RING_SIZE(tp) \
141 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
142 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
143 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
144
145 /* Do not place this n-ring entries value into the tp struct itself,
146 * we really want to expose these constants to GCC so that modulo et
147 * al. operations are done with shifts and masks instead of with
148 * hw multiply/modulo instructions. Another solution would be to
149 * replace things like '% foo' with '& (foo - 1)'.
150 */
151
152 #define TG3_TX_RING_SIZE 512
153 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
154
155 #define TG3_RX_STD_RING_BYTES(tp) \
156 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
157 #define TG3_RX_JMB_RING_BYTES(tp) \
158 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
159 #define TG3_RX_RCB_RING_BYTES(tp) \
160 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
161 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
162 TG3_TX_RING_SIZE)
163 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164
165 #define TG3_DMA_BYTE_ENAB 64
166
167 #define TG3_RX_STD_DMA_SZ 1536
168 #define TG3_RX_JMB_DMA_SZ 9046
169
170 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
171
172 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
173 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174
175 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
176 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177
178 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
179 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180
181 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
182 * that are at least dword aligned when used in PCIX mode. The driver
183 * works around this bug by double copying the packet. This workaround
184 * is built into the normal double copy length check for efficiency.
185 *
186 * However, the double copy is only necessary on those architectures
187 * where unaligned memory accesses are inefficient. For those architectures
188 * where unaligned memory accesses incur little penalty, we can reintegrate
189 * the 5701 in the normal rx path. Doing so saves a device structure
190 * dereference by hardcoding the double copy threshold in place.
191 */
192 #define TG3_RX_COPY_THRESHOLD 256
193 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
194 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
195 #else
196 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
197 #endif
198
199 #if (NET_IP_ALIGN != 0)
200 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
201 #else
202 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
203 #endif
204
205 /* minimum number of free TX descriptors required to wake up TX process */
206 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
207 #define TG3_TX_BD_DMA_MAX_2K 2048
208 #define TG3_TX_BD_DMA_MAX_4K 4096
209
210 #define TG3_RAW_IP_ALIGN 2
211
212 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
213 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
214
215 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
216 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
217
218 #define FIRMWARE_TG3 "tigon/tg3.bin"
219 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
220 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
221 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
222
223 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
224 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
225 MODULE_LICENSE("GPL");
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229
230 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
236
237 static const struct pci_device_id tg3_pci_tbl[] = {
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 TG3_DRV_DATA_FLAG_5705_10_100},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 TG3_DRV_DATA_FLAG_5705_10_100},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 PCI_VENDOR_ID_LENOVO,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
345 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
346 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
347 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
348 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
349 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
350 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
351 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
352 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
353 {}
354 };
355
356 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
357
358 static const struct {
359 const char string[ETH_GSTRING_LEN];
360 } ethtool_stats_keys[] = {
361 { "rx_octets" },
362 { "rx_fragments" },
363 { "rx_ucast_packets" },
364 { "rx_mcast_packets" },
365 { "rx_bcast_packets" },
366 { "rx_fcs_errors" },
367 { "rx_align_errors" },
368 { "rx_xon_pause_rcvd" },
369 { "rx_xoff_pause_rcvd" },
370 { "rx_mac_ctrl_rcvd" },
371 { "rx_xoff_entered" },
372 { "rx_frame_too_long_errors" },
373 { "rx_jabbers" },
374 { "rx_undersize_packets" },
375 { "rx_in_length_errors" },
376 { "rx_out_length_errors" },
377 { "rx_64_or_less_octet_packets" },
378 { "rx_65_to_127_octet_packets" },
379 { "rx_128_to_255_octet_packets" },
380 { "rx_256_to_511_octet_packets" },
381 { "rx_512_to_1023_octet_packets" },
382 { "rx_1024_to_1522_octet_packets" },
383 { "rx_1523_to_2047_octet_packets" },
384 { "rx_2048_to_4095_octet_packets" },
385 { "rx_4096_to_8191_octet_packets" },
386 { "rx_8192_to_9022_octet_packets" },
387
388 { "tx_octets" },
389 { "tx_collisions" },
390
391 { "tx_xon_sent" },
392 { "tx_xoff_sent" },
393 { "tx_flow_control" },
394 { "tx_mac_errors" },
395 { "tx_single_collisions" },
396 { "tx_mult_collisions" },
397 { "tx_deferred" },
398 { "tx_excessive_collisions" },
399 { "tx_late_collisions" },
400 { "tx_collide_2times" },
401 { "tx_collide_3times" },
402 { "tx_collide_4times" },
403 { "tx_collide_5times" },
404 { "tx_collide_6times" },
405 { "tx_collide_7times" },
406 { "tx_collide_8times" },
407 { "tx_collide_9times" },
408 { "tx_collide_10times" },
409 { "tx_collide_11times" },
410 { "tx_collide_12times" },
411 { "tx_collide_13times" },
412 { "tx_collide_14times" },
413 { "tx_collide_15times" },
414 { "tx_ucast_packets" },
415 { "tx_mcast_packets" },
416 { "tx_bcast_packets" },
417 { "tx_carrier_sense_errors" },
418 { "tx_discards" },
419 { "tx_errors" },
420
421 { "dma_writeq_full" },
422 { "dma_write_prioq_full" },
423 { "rxbds_empty" },
424 { "rx_discards" },
425 { "rx_errors" },
426 { "rx_threshold_hit" },
427
428 { "dma_readq_full" },
429 { "dma_read_prioq_full" },
430 { "tx_comp_queue_full" },
431
432 { "ring_set_send_prod_index" },
433 { "ring_status_update" },
434 { "nic_irqs" },
435 { "nic_avoided_irqs" },
436 { "nic_tx_threshold_hit" },
437
438 { "mbuf_lwm_thresh_hit" },
439 };
440
441 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
442 #define TG3_NVRAM_TEST 0
443 #define TG3_LINK_TEST 1
444 #define TG3_REGISTER_TEST 2
445 #define TG3_MEMORY_TEST 3
446 #define TG3_MAC_LOOPB_TEST 4
447 #define TG3_PHY_LOOPB_TEST 5
448 #define TG3_EXT_LOOPB_TEST 6
449 #define TG3_INTERRUPT_TEST 7
450
451
452 static const struct {
453 const char string[ETH_GSTRING_LEN];
454 } ethtool_test_keys[] = {
455 [TG3_NVRAM_TEST] = { "nvram test (online) " },
456 [TG3_LINK_TEST] = { "link test (online) " },
457 [TG3_REGISTER_TEST] = { "register test (offline)" },
458 [TG3_MEMORY_TEST] = { "memory test (offline)" },
459 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
460 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
461 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
462 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
463 };
464
465 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
466
467
tg3_write32(struct tg3 * tp,u32 off,u32 val)468 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
469 {
470 writel(val, tp->regs + off);
471 }
472
tg3_read32(struct tg3 * tp,u32 off)473 static u32 tg3_read32(struct tg3 *tp, u32 off)
474 {
475 return readl(tp->regs + off);
476 }
477
tg3_ape_write32(struct tg3 * tp,u32 off,u32 val)478 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
479 {
480 writel(val, tp->aperegs + off);
481 }
482
tg3_ape_read32(struct tg3 * tp,u32 off)483 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
484 {
485 return readl(tp->aperegs + off);
486 }
487
tg3_write_indirect_reg32(struct tg3 * tp,u32 off,u32 val)488 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
489 {
490 unsigned long flags;
491
492 spin_lock_irqsave(&tp->indirect_lock, flags);
493 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
494 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
495 spin_unlock_irqrestore(&tp->indirect_lock, flags);
496 }
497
tg3_write_flush_reg32(struct tg3 * tp,u32 off,u32 val)498 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
499 {
500 writel(val, tp->regs + off);
501 readl(tp->regs + off);
502 }
503
tg3_read_indirect_reg32(struct tg3 * tp,u32 off)504 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
505 {
506 unsigned long flags;
507 u32 val;
508
509 spin_lock_irqsave(&tp->indirect_lock, flags);
510 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
511 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
512 spin_unlock_irqrestore(&tp->indirect_lock, flags);
513 return val;
514 }
515
tg3_write_indirect_mbox(struct tg3 * tp,u32 off,u32 val)516 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
517 {
518 unsigned long flags;
519
520 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
521 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
522 TG3_64BIT_REG_LOW, val);
523 return;
524 }
525 if (off == TG3_RX_STD_PROD_IDX_REG) {
526 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
527 TG3_64BIT_REG_LOW, val);
528 return;
529 }
530
531 spin_lock_irqsave(&tp->indirect_lock, flags);
532 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
533 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
534 spin_unlock_irqrestore(&tp->indirect_lock, flags);
535
536 /* In indirect mode when disabling interrupts, we also need
537 * to clear the interrupt bit in the GRC local ctrl register.
538 */
539 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
540 (val == 0x1)) {
541 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
542 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
543 }
544 }
545
tg3_read_indirect_mbox(struct tg3 * tp,u32 off)546 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
547 {
548 unsigned long flags;
549 u32 val;
550
551 spin_lock_irqsave(&tp->indirect_lock, flags);
552 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
553 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
554 spin_unlock_irqrestore(&tp->indirect_lock, flags);
555 return val;
556 }
557
558 /* usec_wait specifies the wait time in usec when writing to certain registers
559 * where it is unsafe to read back the register without some delay.
560 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
561 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
562 */
_tw32_flush(struct tg3 * tp,u32 off,u32 val,u32 usec_wait)563 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
564 {
565 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
566 /* Non-posted methods */
567 tp->write32(tp, off, val);
568 else {
569 /* Posted method */
570 tg3_write32(tp, off, val);
571 if (usec_wait)
572 udelay(usec_wait);
573 tp->read32(tp, off);
574 }
575 /* Wait again after the read for the posted method to guarantee that
576 * the wait time is met.
577 */
578 if (usec_wait)
579 udelay(usec_wait);
580 }
581
tw32_mailbox_flush(struct tg3 * tp,u32 off,u32 val)582 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
583 {
584 tp->write32_mbox(tp, off, val);
585 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
586 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
587 !tg3_flag(tp, ICH_WORKAROUND)))
588 tp->read32_mbox(tp, off);
589 }
590
tg3_write32_tx_mbox(struct tg3 * tp,u32 off,u32 val)591 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
592 {
593 void __iomem *mbox = tp->regs + off;
594 writel(val, mbox);
595 if (tg3_flag(tp, TXD_MBOX_HWBUG))
596 writel(val, mbox);
597 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
598 tg3_flag(tp, FLUSH_POSTED_WRITES))
599 readl(mbox);
600 }
601
tg3_read32_mbox_5906(struct tg3 * tp,u32 off)602 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
603 {
604 return readl(tp->regs + off + GRCMBOX_BASE);
605 }
606
tg3_write32_mbox_5906(struct tg3 * tp,u32 off,u32 val)607 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
608 {
609 writel(val, tp->regs + off + GRCMBOX_BASE);
610 }
611
612 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
613 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
614 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
615 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
616 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
617
618 #define tw32(reg, val) tp->write32(tp, reg, val)
619 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
620 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
621 #define tr32(reg) tp->read32(tp, reg)
622
tg3_write_mem(struct tg3 * tp,u32 off,u32 val)623 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
624 {
625 unsigned long flags;
626
627 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
628 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
629 return;
630
631 spin_lock_irqsave(&tp->indirect_lock, flags);
632 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
633 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
634 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
635
636 /* Always leave this as zero. */
637 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
638 } else {
639 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
640 tw32_f(TG3PCI_MEM_WIN_DATA, val);
641
642 /* Always leave this as zero. */
643 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
644 }
645 spin_unlock_irqrestore(&tp->indirect_lock, flags);
646 }
647
tg3_read_mem(struct tg3 * tp,u32 off,u32 * val)648 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
649 {
650 unsigned long flags;
651
652 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
653 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
654 *val = 0;
655 return;
656 }
657
658 spin_lock_irqsave(&tp->indirect_lock, flags);
659 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
660 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
661 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
662
663 /* Always leave this as zero. */
664 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
665 } else {
666 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
667 *val = tr32(TG3PCI_MEM_WIN_DATA);
668
669 /* Always leave this as zero. */
670 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
671 }
672 spin_unlock_irqrestore(&tp->indirect_lock, flags);
673 }
674
tg3_ape_lock_init(struct tg3 * tp)675 static void tg3_ape_lock_init(struct tg3 *tp)
676 {
677 int i;
678 u32 regbase, bit;
679
680 if (tg3_asic_rev(tp) == ASIC_REV_5761)
681 regbase = TG3_APE_LOCK_GRANT;
682 else
683 regbase = TG3_APE_PER_LOCK_GRANT;
684
685 /* Make sure the driver hasn't any stale locks. */
686 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
687 switch (i) {
688 case TG3_APE_LOCK_PHY0:
689 case TG3_APE_LOCK_PHY1:
690 case TG3_APE_LOCK_PHY2:
691 case TG3_APE_LOCK_PHY3:
692 bit = APE_LOCK_GRANT_DRIVER;
693 break;
694 default:
695 if (!tp->pci_fn)
696 bit = APE_LOCK_GRANT_DRIVER;
697 else
698 bit = 1 << tp->pci_fn;
699 }
700 tg3_ape_write32(tp, regbase + 4 * i, bit);
701 }
702
703 }
704
tg3_ape_lock(struct tg3 * tp,int locknum)705 static int tg3_ape_lock(struct tg3 *tp, int locknum)
706 {
707 int i, off;
708 int ret = 0;
709 u32 status, req, gnt, bit;
710
711 if (!tg3_flag(tp, ENABLE_APE))
712 return 0;
713
714 switch (locknum) {
715 case TG3_APE_LOCK_GPIO:
716 if (tg3_asic_rev(tp) == ASIC_REV_5761)
717 return 0;
718 fallthrough;
719 case TG3_APE_LOCK_GRC:
720 case TG3_APE_LOCK_MEM:
721 if (!tp->pci_fn)
722 bit = APE_LOCK_REQ_DRIVER;
723 else
724 bit = 1 << tp->pci_fn;
725 break;
726 case TG3_APE_LOCK_PHY0:
727 case TG3_APE_LOCK_PHY1:
728 case TG3_APE_LOCK_PHY2:
729 case TG3_APE_LOCK_PHY3:
730 bit = APE_LOCK_REQ_DRIVER;
731 break;
732 default:
733 return -EINVAL;
734 }
735
736 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
737 req = TG3_APE_LOCK_REQ;
738 gnt = TG3_APE_LOCK_GRANT;
739 } else {
740 req = TG3_APE_PER_LOCK_REQ;
741 gnt = TG3_APE_PER_LOCK_GRANT;
742 }
743
744 off = 4 * locknum;
745
746 tg3_ape_write32(tp, req + off, bit);
747
748 /* Wait for up to 1 millisecond to acquire lock. */
749 for (i = 0; i < 100; i++) {
750 status = tg3_ape_read32(tp, gnt + off);
751 if (status == bit)
752 break;
753 if (pci_channel_offline(tp->pdev))
754 break;
755
756 udelay(10);
757 }
758
759 if (status != bit) {
760 /* Revoke the lock request. */
761 tg3_ape_write32(tp, gnt + off, bit);
762 ret = -EBUSY;
763 }
764
765 return ret;
766 }
767
tg3_ape_unlock(struct tg3 * tp,int locknum)768 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
769 {
770 u32 gnt, bit;
771
772 if (!tg3_flag(tp, ENABLE_APE))
773 return;
774
775 switch (locknum) {
776 case TG3_APE_LOCK_GPIO:
777 if (tg3_asic_rev(tp) == ASIC_REV_5761)
778 return;
779 fallthrough;
780 case TG3_APE_LOCK_GRC:
781 case TG3_APE_LOCK_MEM:
782 if (!tp->pci_fn)
783 bit = APE_LOCK_GRANT_DRIVER;
784 else
785 bit = 1 << tp->pci_fn;
786 break;
787 case TG3_APE_LOCK_PHY0:
788 case TG3_APE_LOCK_PHY1:
789 case TG3_APE_LOCK_PHY2:
790 case TG3_APE_LOCK_PHY3:
791 bit = APE_LOCK_GRANT_DRIVER;
792 break;
793 default:
794 return;
795 }
796
797 if (tg3_asic_rev(tp) == ASIC_REV_5761)
798 gnt = TG3_APE_LOCK_GRANT;
799 else
800 gnt = TG3_APE_PER_LOCK_GRANT;
801
802 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
803 }
804
tg3_ape_event_lock(struct tg3 * tp,u32 timeout_us)805 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
806 {
807 u32 apedata;
808
809 while (timeout_us) {
810 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
811 return -EBUSY;
812
813 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
814 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
815 break;
816
817 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
818
819 udelay(10);
820 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
821 }
822
823 return timeout_us ? 0 : -EBUSY;
824 }
825
826 #ifdef CONFIG_TIGON3_HWMON
tg3_ape_wait_for_event(struct tg3 * tp,u32 timeout_us)827 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
828 {
829 u32 i, apedata;
830
831 for (i = 0; i < timeout_us / 10; i++) {
832 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
833
834 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
835 break;
836
837 udelay(10);
838 }
839
840 return i == timeout_us / 10;
841 }
842
tg3_ape_scratchpad_read(struct tg3 * tp,u32 * data,u32 base_off,u32 len)843 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
844 u32 len)
845 {
846 int err;
847 u32 i, bufoff, msgoff, maxlen, apedata;
848
849 if (!tg3_flag(tp, APE_HAS_NCSI))
850 return 0;
851
852 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
853 if (apedata != APE_SEG_SIG_MAGIC)
854 return -ENODEV;
855
856 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
857 if (!(apedata & APE_FW_STATUS_READY))
858 return -EAGAIN;
859
860 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
861 TG3_APE_SHMEM_BASE;
862 msgoff = bufoff + 2 * sizeof(u32);
863 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
864
865 while (len) {
866 u32 length;
867
868 /* Cap xfer sizes to scratchpad limits. */
869 length = (len > maxlen) ? maxlen : len;
870 len -= length;
871
872 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
873 if (!(apedata & APE_FW_STATUS_READY))
874 return -EAGAIN;
875
876 /* Wait for up to 1 msec for APE to service previous event. */
877 err = tg3_ape_event_lock(tp, 1000);
878 if (err)
879 return err;
880
881 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
882 APE_EVENT_STATUS_SCRTCHPD_READ |
883 APE_EVENT_STATUS_EVENT_PENDING;
884 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
885
886 tg3_ape_write32(tp, bufoff, base_off);
887 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
888
889 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
890 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
891
892 base_off += length;
893
894 if (tg3_ape_wait_for_event(tp, 30000))
895 return -EAGAIN;
896
897 for (i = 0; length; i += 4, length -= 4) {
898 u32 val = tg3_ape_read32(tp, msgoff + i);
899 memcpy(data, &val, sizeof(u32));
900 data++;
901 }
902 }
903
904 return 0;
905 }
906 #endif
907
tg3_ape_send_event(struct tg3 * tp,u32 event)908 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
909 {
910 int err;
911 u32 apedata;
912
913 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
914 if (apedata != APE_SEG_SIG_MAGIC)
915 return -EAGAIN;
916
917 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
918 if (!(apedata & APE_FW_STATUS_READY))
919 return -EAGAIN;
920
921 /* Wait for up to 20 millisecond for APE to service previous event. */
922 err = tg3_ape_event_lock(tp, 20000);
923 if (err)
924 return err;
925
926 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
927 event | APE_EVENT_STATUS_EVENT_PENDING);
928
929 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
930 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
931
932 return 0;
933 }
934
tg3_ape_driver_state_change(struct tg3 * tp,int kind)935 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
936 {
937 u32 event;
938 u32 apedata;
939
940 if (!tg3_flag(tp, ENABLE_APE))
941 return;
942
943 switch (kind) {
944 case RESET_KIND_INIT:
945 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
946 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
947 APE_HOST_SEG_SIG_MAGIC);
948 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
949 APE_HOST_SEG_LEN_MAGIC);
950 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
951 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
952 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
953 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
954 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
955 APE_HOST_BEHAV_NO_PHYLOCK);
956 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
957 TG3_APE_HOST_DRVR_STATE_START);
958
959 event = APE_EVENT_STATUS_STATE_START;
960 break;
961 case RESET_KIND_SHUTDOWN:
962 if (device_may_wakeup(&tp->pdev->dev) &&
963 tg3_flag(tp, WOL_ENABLE)) {
964 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
965 TG3_APE_HOST_WOL_SPEED_AUTO);
966 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
967 } else
968 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
969
970 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
971
972 event = APE_EVENT_STATUS_STATE_UNLOAD;
973 break;
974 default:
975 return;
976 }
977
978 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
979
980 tg3_ape_send_event(tp, event);
981 }
982
tg3_send_ape_heartbeat(struct tg3 * tp,unsigned long interval)983 static void tg3_send_ape_heartbeat(struct tg3 *tp,
984 unsigned long interval)
985 {
986 /* Check if hb interval has exceeded */
987 if (!tg3_flag(tp, ENABLE_APE) ||
988 time_before(jiffies, tp->ape_hb_jiffies + interval))
989 return;
990
991 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
992 tp->ape_hb_jiffies = jiffies;
993 }
994
tg3_disable_ints(struct tg3 * tp)995 static void tg3_disable_ints(struct tg3 *tp)
996 {
997 int i;
998
999 tw32(TG3PCI_MISC_HOST_CTRL,
1000 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1001 for (i = 0; i < tp->irq_max; i++)
1002 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1003 }
1004
tg3_enable_ints(struct tg3 * tp)1005 static void tg3_enable_ints(struct tg3 *tp)
1006 {
1007 int i;
1008
1009 tp->irq_sync = 0;
1010 wmb();
1011
1012 tw32(TG3PCI_MISC_HOST_CTRL,
1013 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1014
1015 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1016 for (i = 0; i < tp->irq_cnt; i++) {
1017 struct tg3_napi *tnapi = &tp->napi[i];
1018
1019 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1020 if (tg3_flag(tp, 1SHOT_MSI))
1021 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1022
1023 tp->coal_now |= tnapi->coal_now;
1024 }
1025
1026 /* Force an initial interrupt */
1027 if (!tg3_flag(tp, TAGGED_STATUS) &&
1028 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1029 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1030 else
1031 tw32(HOSTCC_MODE, tp->coal_now);
1032
1033 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1034 }
1035
tg3_has_work(struct tg3_napi * tnapi)1036 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1037 {
1038 struct tg3 *tp = tnapi->tp;
1039 struct tg3_hw_status *sblk = tnapi->hw_status;
1040 unsigned int work_exists = 0;
1041
1042 /* check for phy events */
1043 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1044 if (sblk->status & SD_STATUS_LINK_CHG)
1045 work_exists = 1;
1046 }
1047
1048 /* check for TX work to do */
1049 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1050 work_exists = 1;
1051
1052 /* check for RX work to do */
1053 if (tnapi->rx_rcb_prod_idx &&
1054 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1055 work_exists = 1;
1056
1057 return work_exists;
1058 }
1059
1060 /* tg3_int_reenable
1061 * similar to tg3_enable_ints, but it accurately determines whether there
1062 * is new work pending and can return without flushing the PIO write
1063 * which reenables interrupts
1064 */
tg3_int_reenable(struct tg3_napi * tnapi)1065 static void tg3_int_reenable(struct tg3_napi *tnapi)
1066 {
1067 struct tg3 *tp = tnapi->tp;
1068
1069 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1070
1071 /* When doing tagged status, this work check is unnecessary.
1072 * The last_tag we write above tells the chip which piece of
1073 * work we've completed.
1074 */
1075 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1076 tw32(HOSTCC_MODE, tp->coalesce_mode |
1077 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1078 }
1079
tg3_switch_clocks(struct tg3 * tp)1080 static void tg3_switch_clocks(struct tg3 *tp)
1081 {
1082 u32 clock_ctrl;
1083 u32 orig_clock_ctrl;
1084
1085 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1086 return;
1087
1088 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1089
1090 orig_clock_ctrl = clock_ctrl;
1091 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1092 CLOCK_CTRL_CLKRUN_OENABLE |
1093 0x1f);
1094 tp->pci_clock_ctrl = clock_ctrl;
1095
1096 if (tg3_flag(tp, 5705_PLUS)) {
1097 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1098 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1099 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1100 }
1101 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1102 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1103 clock_ctrl |
1104 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1105 40);
1106 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1107 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1108 40);
1109 }
1110 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1111 }
1112
1113 #define PHY_BUSY_LOOPS 5000
1114
__tg3_readphy(struct tg3 * tp,unsigned int phy_addr,int reg,u32 * val)1115 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1116 u32 *val)
1117 {
1118 u32 frame_val;
1119 unsigned int loops;
1120 int ret;
1121
1122 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1123 tw32_f(MAC_MI_MODE,
1124 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1125 udelay(80);
1126 }
1127
1128 tg3_ape_lock(tp, tp->phy_ape_lock);
1129
1130 *val = 0x0;
1131
1132 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1133 MI_COM_PHY_ADDR_MASK);
1134 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1135 MI_COM_REG_ADDR_MASK);
1136 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1137
1138 tw32_f(MAC_MI_COM, frame_val);
1139
1140 loops = PHY_BUSY_LOOPS;
1141 while (loops != 0) {
1142 udelay(10);
1143 frame_val = tr32(MAC_MI_COM);
1144
1145 if ((frame_val & MI_COM_BUSY) == 0) {
1146 udelay(5);
1147 frame_val = tr32(MAC_MI_COM);
1148 break;
1149 }
1150 loops -= 1;
1151 }
1152
1153 ret = -EBUSY;
1154 if (loops != 0) {
1155 *val = frame_val & MI_COM_DATA_MASK;
1156 ret = 0;
1157 }
1158
1159 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1160 tw32_f(MAC_MI_MODE, tp->mi_mode);
1161 udelay(80);
1162 }
1163
1164 tg3_ape_unlock(tp, tp->phy_ape_lock);
1165
1166 return ret;
1167 }
1168
tg3_readphy(struct tg3 * tp,int reg,u32 * val)1169 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1170 {
1171 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1172 }
1173
__tg3_writephy(struct tg3 * tp,unsigned int phy_addr,int reg,u32 val)1174 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1175 u32 val)
1176 {
1177 u32 frame_val;
1178 unsigned int loops;
1179 int ret;
1180
1181 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1182 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1183 return 0;
1184
1185 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1186 tw32_f(MAC_MI_MODE,
1187 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1188 udelay(80);
1189 }
1190
1191 tg3_ape_lock(tp, tp->phy_ape_lock);
1192
1193 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1194 MI_COM_PHY_ADDR_MASK);
1195 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1196 MI_COM_REG_ADDR_MASK);
1197 frame_val |= (val & MI_COM_DATA_MASK);
1198 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1199
1200 tw32_f(MAC_MI_COM, frame_val);
1201
1202 loops = PHY_BUSY_LOOPS;
1203 while (loops != 0) {
1204 udelay(10);
1205 frame_val = tr32(MAC_MI_COM);
1206 if ((frame_val & MI_COM_BUSY) == 0) {
1207 udelay(5);
1208 frame_val = tr32(MAC_MI_COM);
1209 break;
1210 }
1211 loops -= 1;
1212 }
1213
1214 ret = -EBUSY;
1215 if (loops != 0)
1216 ret = 0;
1217
1218 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1219 tw32_f(MAC_MI_MODE, tp->mi_mode);
1220 udelay(80);
1221 }
1222
1223 tg3_ape_unlock(tp, tp->phy_ape_lock);
1224
1225 return ret;
1226 }
1227
tg3_writephy(struct tg3 * tp,int reg,u32 val)1228 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1229 {
1230 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1231 }
1232
tg3_phy_cl45_write(struct tg3 * tp,u32 devad,u32 addr,u32 val)1233 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1234 {
1235 int err;
1236
1237 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1238 if (err)
1239 goto done;
1240
1241 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1242 if (err)
1243 goto done;
1244
1245 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1246 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1247 if (err)
1248 goto done;
1249
1250 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1251
1252 done:
1253 return err;
1254 }
1255
tg3_phy_cl45_read(struct tg3 * tp,u32 devad,u32 addr,u32 * val)1256 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1257 {
1258 int err;
1259
1260 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1261 if (err)
1262 goto done;
1263
1264 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1265 if (err)
1266 goto done;
1267
1268 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1269 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1270 if (err)
1271 goto done;
1272
1273 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1274
1275 done:
1276 return err;
1277 }
1278
tg3_phydsp_read(struct tg3 * tp,u32 reg,u32 * val)1279 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1280 {
1281 int err;
1282
1283 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1284 if (!err)
1285 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1286
1287 return err;
1288 }
1289
tg3_phydsp_write(struct tg3 * tp,u32 reg,u32 val)1290 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1291 {
1292 int err;
1293
1294 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1295 if (!err)
1296 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1297
1298 return err;
1299 }
1300
tg3_phy_auxctl_read(struct tg3 * tp,int reg,u32 * val)1301 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1302 {
1303 int err;
1304
1305 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1306 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1307 MII_TG3_AUXCTL_SHDWSEL_MISC);
1308 if (!err)
1309 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1310
1311 return err;
1312 }
1313
tg3_phy_auxctl_write(struct tg3 * tp,int reg,u32 set)1314 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1315 {
1316 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1317 set |= MII_TG3_AUXCTL_MISC_WREN;
1318
1319 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1320 }
1321
tg3_phy_toggle_auxctl_smdsp(struct tg3 * tp,bool enable)1322 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1323 {
1324 u32 val;
1325 int err;
1326
1327 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1328
1329 if (err)
1330 return err;
1331
1332 if (enable)
1333 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1334 else
1335 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1336
1337 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1338 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1339
1340 return err;
1341 }
1342
tg3_phy_shdw_write(struct tg3 * tp,int reg,u32 val)1343 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1344 {
1345 return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1346 reg | val | MII_TG3_MISC_SHDW_WREN);
1347 }
1348
tg3_bmcr_reset(struct tg3 * tp)1349 static int tg3_bmcr_reset(struct tg3 *tp)
1350 {
1351 u32 phy_control;
1352 int limit, err;
1353
1354 /* OK, reset it, and poll the BMCR_RESET bit until it
1355 * clears or we time out.
1356 */
1357 phy_control = BMCR_RESET;
1358 err = tg3_writephy(tp, MII_BMCR, phy_control);
1359 if (err != 0)
1360 return -EBUSY;
1361
1362 limit = 5000;
1363 while (limit--) {
1364 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1365 if (err != 0)
1366 return -EBUSY;
1367
1368 if ((phy_control & BMCR_RESET) == 0) {
1369 udelay(40);
1370 break;
1371 }
1372 udelay(10);
1373 }
1374 if (limit < 0)
1375 return -EBUSY;
1376
1377 return 0;
1378 }
1379
tg3_mdio_read(struct mii_bus * bp,int mii_id,int reg)1380 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1381 {
1382 struct tg3 *tp = bp->priv;
1383 u32 val;
1384
1385 spin_lock_bh(&tp->lock);
1386
1387 if (__tg3_readphy(tp, mii_id, reg, &val))
1388 val = -EIO;
1389
1390 spin_unlock_bh(&tp->lock);
1391
1392 return val;
1393 }
1394
tg3_mdio_write(struct mii_bus * bp,int mii_id,int reg,u16 val)1395 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1396 {
1397 struct tg3 *tp = bp->priv;
1398 u32 ret = 0;
1399
1400 spin_lock_bh(&tp->lock);
1401
1402 if (__tg3_writephy(tp, mii_id, reg, val))
1403 ret = -EIO;
1404
1405 spin_unlock_bh(&tp->lock);
1406
1407 return ret;
1408 }
1409
tg3_mdio_config_5785(struct tg3 * tp)1410 static void tg3_mdio_config_5785(struct tg3 *tp)
1411 {
1412 u32 val;
1413 struct phy_device *phydev;
1414
1415 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1416 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1417 case PHY_ID_BCM50610:
1418 case PHY_ID_BCM50610M:
1419 val = MAC_PHYCFG2_50610_LED_MODES;
1420 break;
1421 case PHY_ID_BCMAC131:
1422 val = MAC_PHYCFG2_AC131_LED_MODES;
1423 break;
1424 case PHY_ID_RTL8211C:
1425 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1426 break;
1427 case PHY_ID_RTL8201E:
1428 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1429 break;
1430 default:
1431 return;
1432 }
1433
1434 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1435 tw32(MAC_PHYCFG2, val);
1436
1437 val = tr32(MAC_PHYCFG1);
1438 val &= ~(MAC_PHYCFG1_RGMII_INT |
1439 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1440 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1441 tw32(MAC_PHYCFG1, val);
1442
1443 return;
1444 }
1445
1446 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1447 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1448 MAC_PHYCFG2_FMODE_MASK_MASK |
1449 MAC_PHYCFG2_GMODE_MASK_MASK |
1450 MAC_PHYCFG2_ACT_MASK_MASK |
1451 MAC_PHYCFG2_QUAL_MASK_MASK |
1452 MAC_PHYCFG2_INBAND_ENABLE;
1453
1454 tw32(MAC_PHYCFG2, val);
1455
1456 val = tr32(MAC_PHYCFG1);
1457 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1458 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1459 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1460 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1461 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1462 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1463 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1464 }
1465 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1466 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1467 tw32(MAC_PHYCFG1, val);
1468
1469 val = tr32(MAC_EXT_RGMII_MODE);
1470 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1471 MAC_RGMII_MODE_RX_QUALITY |
1472 MAC_RGMII_MODE_RX_ACTIVITY |
1473 MAC_RGMII_MODE_RX_ENG_DET |
1474 MAC_RGMII_MODE_TX_ENABLE |
1475 MAC_RGMII_MODE_TX_LOWPWR |
1476 MAC_RGMII_MODE_TX_RESET);
1477 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1478 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1479 val |= MAC_RGMII_MODE_RX_INT_B |
1480 MAC_RGMII_MODE_RX_QUALITY |
1481 MAC_RGMII_MODE_RX_ACTIVITY |
1482 MAC_RGMII_MODE_RX_ENG_DET;
1483 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1484 val |= MAC_RGMII_MODE_TX_ENABLE |
1485 MAC_RGMII_MODE_TX_LOWPWR |
1486 MAC_RGMII_MODE_TX_RESET;
1487 }
1488 tw32(MAC_EXT_RGMII_MODE, val);
1489 }
1490
tg3_mdio_start(struct tg3 * tp)1491 static void tg3_mdio_start(struct tg3 *tp)
1492 {
1493 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1494 tw32_f(MAC_MI_MODE, tp->mi_mode);
1495 udelay(80);
1496
1497 if (tg3_flag(tp, MDIOBUS_INITED) &&
1498 tg3_asic_rev(tp) == ASIC_REV_5785)
1499 tg3_mdio_config_5785(tp);
1500 }
1501
tg3_mdio_init(struct tg3 * tp)1502 static int tg3_mdio_init(struct tg3 *tp)
1503 {
1504 int i;
1505 u32 reg;
1506 struct phy_device *phydev;
1507
1508 if (tg3_flag(tp, 5717_PLUS)) {
1509 u32 is_serdes;
1510
1511 tp->phy_addr = tp->pci_fn + 1;
1512
1513 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1514 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1515 else
1516 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1517 TG3_CPMU_PHY_STRAP_IS_SERDES;
1518 if (is_serdes)
1519 tp->phy_addr += 7;
1520 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1521 int addr;
1522
1523 addr = ssb_gige_get_phyaddr(tp->pdev);
1524 if (addr < 0)
1525 return addr;
1526 tp->phy_addr = addr;
1527 } else
1528 tp->phy_addr = TG3_PHY_MII_ADDR;
1529
1530 tg3_mdio_start(tp);
1531
1532 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1533 return 0;
1534
1535 tp->mdio_bus = mdiobus_alloc();
1536 if (tp->mdio_bus == NULL)
1537 return -ENOMEM;
1538
1539 tp->mdio_bus->name = "tg3 mdio bus";
1540 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1541 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1542 tp->mdio_bus->priv = tp;
1543 tp->mdio_bus->parent = &tp->pdev->dev;
1544 tp->mdio_bus->read = &tg3_mdio_read;
1545 tp->mdio_bus->write = &tg3_mdio_write;
1546 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1547
1548 /* The bus registration will look for all the PHYs on the mdio bus.
1549 * Unfortunately, it does not ensure the PHY is powered up before
1550 * accessing the PHY ID registers. A chip reset is the
1551 * quickest way to bring the device back to an operational state..
1552 */
1553 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1554 tg3_bmcr_reset(tp);
1555
1556 i = mdiobus_register(tp->mdio_bus);
1557 if (i) {
1558 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1559 mdiobus_free(tp->mdio_bus);
1560 return i;
1561 }
1562
1563 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1564
1565 if (!phydev || !phydev->drv) {
1566 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1567 mdiobus_unregister(tp->mdio_bus);
1568 mdiobus_free(tp->mdio_bus);
1569 return -ENODEV;
1570 }
1571
1572 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1573 case PHY_ID_BCM57780:
1574 phydev->interface = PHY_INTERFACE_MODE_GMII;
1575 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1576 break;
1577 case PHY_ID_BCM50610:
1578 case PHY_ID_BCM50610M:
1579 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1580 PHY_BRCM_RX_REFCLK_UNUSED |
1581 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1582 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1583 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1584 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1585 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1586 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1587 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1588 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1589 fallthrough;
1590 case PHY_ID_RTL8211C:
1591 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1592 break;
1593 case PHY_ID_RTL8201E:
1594 case PHY_ID_BCMAC131:
1595 phydev->interface = PHY_INTERFACE_MODE_MII;
1596 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1597 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1598 break;
1599 }
1600
1601 tg3_flag_set(tp, MDIOBUS_INITED);
1602
1603 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1604 tg3_mdio_config_5785(tp);
1605
1606 return 0;
1607 }
1608
tg3_mdio_fini(struct tg3 * tp)1609 static void tg3_mdio_fini(struct tg3 *tp)
1610 {
1611 if (tg3_flag(tp, MDIOBUS_INITED)) {
1612 tg3_flag_clear(tp, MDIOBUS_INITED);
1613 mdiobus_unregister(tp->mdio_bus);
1614 mdiobus_free(tp->mdio_bus);
1615 }
1616 }
1617
1618 /* tp->lock is held. */
tg3_generate_fw_event(struct tg3 * tp)1619 static inline void tg3_generate_fw_event(struct tg3 *tp)
1620 {
1621 u32 val;
1622
1623 val = tr32(GRC_RX_CPU_EVENT);
1624 val |= GRC_RX_CPU_DRIVER_EVENT;
1625 tw32_f(GRC_RX_CPU_EVENT, val);
1626
1627 tp->last_event_jiffies = jiffies;
1628 }
1629
1630 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1631
1632 /* tp->lock is held. */
tg3_wait_for_event_ack(struct tg3 * tp)1633 static void tg3_wait_for_event_ack(struct tg3 *tp)
1634 {
1635 int i;
1636 unsigned int delay_cnt;
1637 long time_remain;
1638
1639 /* If enough time has passed, no wait is necessary. */
1640 time_remain = (long)(tp->last_event_jiffies + 1 +
1641 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1642 (long)jiffies;
1643 if (time_remain < 0)
1644 return;
1645
1646 /* Check if we can shorten the wait time. */
1647 delay_cnt = jiffies_to_usecs(time_remain);
1648 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1649 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1650 delay_cnt = (delay_cnt >> 3) + 1;
1651
1652 for (i = 0; i < delay_cnt; i++) {
1653 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1654 break;
1655 if (pci_channel_offline(tp->pdev))
1656 break;
1657
1658 udelay(8);
1659 }
1660 }
1661
1662 /* tp->lock is held. */
tg3_phy_gather_ump_data(struct tg3 * tp,u32 * data)1663 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1664 {
1665 u32 reg, val;
1666
1667 val = 0;
1668 if (!tg3_readphy(tp, MII_BMCR, ®))
1669 val = reg << 16;
1670 if (!tg3_readphy(tp, MII_BMSR, ®))
1671 val |= (reg & 0xffff);
1672 *data++ = val;
1673
1674 val = 0;
1675 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1676 val = reg << 16;
1677 if (!tg3_readphy(tp, MII_LPA, ®))
1678 val |= (reg & 0xffff);
1679 *data++ = val;
1680
1681 val = 0;
1682 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1683 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1684 val = reg << 16;
1685 if (!tg3_readphy(tp, MII_STAT1000, ®))
1686 val |= (reg & 0xffff);
1687 }
1688 *data++ = val;
1689
1690 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1691 val = reg << 16;
1692 else
1693 val = 0;
1694 *data++ = val;
1695 }
1696
1697 /* tp->lock is held. */
tg3_ump_link_report(struct tg3 * tp)1698 static void tg3_ump_link_report(struct tg3 *tp)
1699 {
1700 u32 data[4];
1701
1702 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1703 return;
1704
1705 tg3_phy_gather_ump_data(tp, data);
1706
1707 tg3_wait_for_event_ack(tp);
1708
1709 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1710 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1711 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1712 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1713 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1714 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1715
1716 tg3_generate_fw_event(tp);
1717 }
1718
1719 /* tp->lock is held. */
tg3_stop_fw(struct tg3 * tp)1720 static void tg3_stop_fw(struct tg3 *tp)
1721 {
1722 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1723 /* Wait for RX cpu to ACK the previous event. */
1724 tg3_wait_for_event_ack(tp);
1725
1726 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1727
1728 tg3_generate_fw_event(tp);
1729
1730 /* Wait for RX cpu to ACK this event. */
1731 tg3_wait_for_event_ack(tp);
1732 }
1733 }
1734
1735 /* tp->lock is held. */
tg3_write_sig_pre_reset(struct tg3 * tp,int kind)1736 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1737 {
1738 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1739 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1740
1741 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1742 switch (kind) {
1743 case RESET_KIND_INIT:
1744 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1745 DRV_STATE_START);
1746 break;
1747
1748 case RESET_KIND_SHUTDOWN:
1749 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1750 DRV_STATE_UNLOAD);
1751 break;
1752
1753 case RESET_KIND_SUSPEND:
1754 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1755 DRV_STATE_SUSPEND);
1756 break;
1757
1758 default:
1759 break;
1760 }
1761 }
1762 }
1763
1764 /* tp->lock is held. */
tg3_write_sig_post_reset(struct tg3 * tp,int kind)1765 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1766 {
1767 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1768 switch (kind) {
1769 case RESET_KIND_INIT:
1770 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1771 DRV_STATE_START_DONE);
1772 break;
1773
1774 case RESET_KIND_SHUTDOWN:
1775 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1776 DRV_STATE_UNLOAD_DONE);
1777 break;
1778
1779 default:
1780 break;
1781 }
1782 }
1783 }
1784
1785 /* tp->lock is held. */
tg3_write_sig_legacy(struct tg3 * tp,int kind)1786 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1787 {
1788 if (tg3_flag(tp, ENABLE_ASF)) {
1789 switch (kind) {
1790 case RESET_KIND_INIT:
1791 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1792 DRV_STATE_START);
1793 break;
1794
1795 case RESET_KIND_SHUTDOWN:
1796 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1797 DRV_STATE_UNLOAD);
1798 break;
1799
1800 case RESET_KIND_SUSPEND:
1801 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1802 DRV_STATE_SUSPEND);
1803 break;
1804
1805 default:
1806 break;
1807 }
1808 }
1809 }
1810
tg3_poll_fw(struct tg3 * tp)1811 static int tg3_poll_fw(struct tg3 *tp)
1812 {
1813 int i;
1814 u32 val;
1815
1816 if (tg3_flag(tp, NO_FWARE_REPORTED))
1817 return 0;
1818
1819 if (tg3_flag(tp, IS_SSB_CORE)) {
1820 /* We don't use firmware. */
1821 return 0;
1822 }
1823
1824 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1825 /* Wait up to 20ms for init done. */
1826 for (i = 0; i < 200; i++) {
1827 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1828 return 0;
1829 if (pci_channel_offline(tp->pdev))
1830 return -ENODEV;
1831
1832 udelay(100);
1833 }
1834 return -ENODEV;
1835 }
1836
1837 /* Wait for firmware initialization to complete. */
1838 for (i = 0; i < 100000; i++) {
1839 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1840 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1841 break;
1842 if (pci_channel_offline(tp->pdev)) {
1843 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1844 tg3_flag_set(tp, NO_FWARE_REPORTED);
1845 netdev_info(tp->dev, "No firmware running\n");
1846 }
1847
1848 break;
1849 }
1850
1851 udelay(10);
1852 }
1853
1854 /* Chip might not be fitted with firmware. Some Sun onboard
1855 * parts are configured like that. So don't signal the timeout
1856 * of the above loop as an error, but do report the lack of
1857 * running firmware once.
1858 */
1859 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1860 tg3_flag_set(tp, NO_FWARE_REPORTED);
1861
1862 netdev_info(tp->dev, "No firmware running\n");
1863 }
1864
1865 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1866 /* The 57765 A0 needs a little more
1867 * time to do some important work.
1868 */
1869 mdelay(10);
1870 }
1871
1872 return 0;
1873 }
1874
tg3_link_report(struct tg3 * tp)1875 static void tg3_link_report(struct tg3 *tp)
1876 {
1877 if (!netif_carrier_ok(tp->dev)) {
1878 netif_info(tp, link, tp->dev, "Link is down\n");
1879 tg3_ump_link_report(tp);
1880 } else if (netif_msg_link(tp)) {
1881 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1882 (tp->link_config.active_speed == SPEED_1000 ?
1883 1000 :
1884 (tp->link_config.active_speed == SPEED_100 ?
1885 100 : 10)),
1886 (tp->link_config.active_duplex == DUPLEX_FULL ?
1887 "full" : "half"));
1888
1889 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1890 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1891 "on" : "off",
1892 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1893 "on" : "off");
1894
1895 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1896 netdev_info(tp->dev, "EEE is %s\n",
1897 tp->setlpicnt ? "enabled" : "disabled");
1898
1899 tg3_ump_link_report(tp);
1900 }
1901
1902 tp->link_up = netif_carrier_ok(tp->dev);
1903 }
1904
tg3_decode_flowctrl_1000T(u32 adv)1905 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1906 {
1907 u32 flowctrl = 0;
1908
1909 if (adv & ADVERTISE_PAUSE_CAP) {
1910 flowctrl |= FLOW_CTRL_RX;
1911 if (!(adv & ADVERTISE_PAUSE_ASYM))
1912 flowctrl |= FLOW_CTRL_TX;
1913 } else if (adv & ADVERTISE_PAUSE_ASYM)
1914 flowctrl |= FLOW_CTRL_TX;
1915
1916 return flowctrl;
1917 }
1918
tg3_advert_flowctrl_1000X(u8 flow_ctrl)1919 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1920 {
1921 u16 miireg;
1922
1923 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1924 miireg = ADVERTISE_1000XPAUSE;
1925 else if (flow_ctrl & FLOW_CTRL_TX)
1926 miireg = ADVERTISE_1000XPSE_ASYM;
1927 else if (flow_ctrl & FLOW_CTRL_RX)
1928 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1929 else
1930 miireg = 0;
1931
1932 return miireg;
1933 }
1934
tg3_decode_flowctrl_1000X(u32 adv)1935 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1936 {
1937 u32 flowctrl = 0;
1938
1939 if (adv & ADVERTISE_1000XPAUSE) {
1940 flowctrl |= FLOW_CTRL_RX;
1941 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1942 flowctrl |= FLOW_CTRL_TX;
1943 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1944 flowctrl |= FLOW_CTRL_TX;
1945
1946 return flowctrl;
1947 }
1948
tg3_resolve_flowctrl_1000X(u16 lcladv,u16 rmtadv)1949 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1950 {
1951 u8 cap = 0;
1952
1953 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1954 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1955 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1956 if (lcladv & ADVERTISE_1000XPAUSE)
1957 cap = FLOW_CTRL_RX;
1958 if (rmtadv & ADVERTISE_1000XPAUSE)
1959 cap = FLOW_CTRL_TX;
1960 }
1961
1962 return cap;
1963 }
1964
tg3_setup_flow_control(struct tg3 * tp,u32 lcladv,u32 rmtadv)1965 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1966 {
1967 u8 autoneg;
1968 u8 flowctrl = 0;
1969 u32 old_rx_mode = tp->rx_mode;
1970 u32 old_tx_mode = tp->tx_mode;
1971
1972 if (tg3_flag(tp, USE_PHYLIB))
1973 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1974 else
1975 autoneg = tp->link_config.autoneg;
1976
1977 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1978 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1979 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1980 else
1981 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1982 } else
1983 flowctrl = tp->link_config.flowctrl;
1984
1985 tp->link_config.active_flowctrl = flowctrl;
1986
1987 if (flowctrl & FLOW_CTRL_RX)
1988 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1989 else
1990 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1991
1992 if (old_rx_mode != tp->rx_mode)
1993 tw32_f(MAC_RX_MODE, tp->rx_mode);
1994
1995 if (flowctrl & FLOW_CTRL_TX)
1996 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1997 else
1998 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1999
2000 if (old_tx_mode != tp->tx_mode)
2001 tw32_f(MAC_TX_MODE, tp->tx_mode);
2002 }
2003
tg3_adjust_link(struct net_device * dev)2004 static void tg3_adjust_link(struct net_device *dev)
2005 {
2006 u8 oldflowctrl, linkmesg = 0;
2007 u32 mac_mode, lcl_adv, rmt_adv;
2008 struct tg3 *tp = netdev_priv(dev);
2009 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2010
2011 spin_lock_bh(&tp->lock);
2012
2013 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2014 MAC_MODE_HALF_DUPLEX);
2015
2016 oldflowctrl = tp->link_config.active_flowctrl;
2017
2018 if (phydev->link) {
2019 lcl_adv = 0;
2020 rmt_adv = 0;
2021
2022 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2023 mac_mode |= MAC_MODE_PORT_MODE_MII;
2024 else if (phydev->speed == SPEED_1000 ||
2025 tg3_asic_rev(tp) != ASIC_REV_5785)
2026 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2027 else
2028 mac_mode |= MAC_MODE_PORT_MODE_MII;
2029
2030 if (phydev->duplex == DUPLEX_HALF)
2031 mac_mode |= MAC_MODE_HALF_DUPLEX;
2032 else {
2033 lcl_adv = mii_advertise_flowctrl(
2034 tp->link_config.flowctrl);
2035
2036 if (phydev->pause)
2037 rmt_adv = LPA_PAUSE_CAP;
2038 if (phydev->asym_pause)
2039 rmt_adv |= LPA_PAUSE_ASYM;
2040 }
2041
2042 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2043 } else
2044 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2045
2046 if (mac_mode != tp->mac_mode) {
2047 tp->mac_mode = mac_mode;
2048 tw32_f(MAC_MODE, tp->mac_mode);
2049 udelay(40);
2050 }
2051
2052 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2053 if (phydev->speed == SPEED_10)
2054 tw32(MAC_MI_STAT,
2055 MAC_MI_STAT_10MBPS_MODE |
2056 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2057 else
2058 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2059 }
2060
2061 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2062 tw32(MAC_TX_LENGTHS,
2063 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2064 (6 << TX_LENGTHS_IPG_SHIFT) |
2065 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2066 else
2067 tw32(MAC_TX_LENGTHS,
2068 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2069 (6 << TX_LENGTHS_IPG_SHIFT) |
2070 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2071
2072 if (phydev->link != tp->old_link ||
2073 phydev->speed != tp->link_config.active_speed ||
2074 phydev->duplex != tp->link_config.active_duplex ||
2075 oldflowctrl != tp->link_config.active_flowctrl)
2076 linkmesg = 1;
2077
2078 tp->old_link = phydev->link;
2079 tp->link_config.active_speed = phydev->speed;
2080 tp->link_config.active_duplex = phydev->duplex;
2081
2082 spin_unlock_bh(&tp->lock);
2083
2084 if (linkmesg)
2085 tg3_link_report(tp);
2086 }
2087
tg3_phy_init(struct tg3 * tp)2088 static int tg3_phy_init(struct tg3 *tp)
2089 {
2090 struct phy_device *phydev;
2091
2092 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2093 return 0;
2094
2095 /* Bring the PHY back to a known state. */
2096 tg3_bmcr_reset(tp);
2097
2098 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2099
2100 /* Attach the MAC to the PHY. */
2101 phydev = phy_connect(tp->dev, phydev_name(phydev),
2102 tg3_adjust_link, phydev->interface);
2103 if (IS_ERR(phydev)) {
2104 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2105 return PTR_ERR(phydev);
2106 }
2107
2108 /* Mask with MAC supported features. */
2109 switch (phydev->interface) {
2110 case PHY_INTERFACE_MODE_GMII:
2111 case PHY_INTERFACE_MODE_RGMII:
2112 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2113 phy_set_max_speed(phydev, SPEED_1000);
2114 phy_support_asym_pause(phydev);
2115 break;
2116 }
2117 fallthrough;
2118 case PHY_INTERFACE_MODE_MII:
2119 phy_set_max_speed(phydev, SPEED_100);
2120 phy_support_asym_pause(phydev);
2121 break;
2122 default:
2123 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2124 return -EINVAL;
2125 }
2126
2127 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2128
2129 phy_attached_info(phydev);
2130
2131 return 0;
2132 }
2133
tg3_phy_start(struct tg3 * tp)2134 static void tg3_phy_start(struct tg3 *tp)
2135 {
2136 struct phy_device *phydev;
2137
2138 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2139 return;
2140
2141 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2142
2143 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2144 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2145 phydev->speed = tp->link_config.speed;
2146 phydev->duplex = tp->link_config.duplex;
2147 phydev->autoneg = tp->link_config.autoneg;
2148 ethtool_convert_legacy_u32_to_link_mode(
2149 phydev->advertising, tp->link_config.advertising);
2150 }
2151
2152 phy_start(phydev);
2153
2154 phy_start_aneg(phydev);
2155 }
2156
tg3_phy_stop(struct tg3 * tp)2157 static void tg3_phy_stop(struct tg3 *tp)
2158 {
2159 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2160 return;
2161
2162 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2163 }
2164
tg3_phy_fini(struct tg3 * tp)2165 static void tg3_phy_fini(struct tg3 *tp)
2166 {
2167 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2168 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2169 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2170 }
2171 }
2172
tg3_phy_set_extloopbk(struct tg3 * tp)2173 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2174 {
2175 int err;
2176 u32 val;
2177
2178 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2179 return 0;
2180
2181 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2182 /* Cannot do read-modify-write on 5401 */
2183 err = tg3_phy_auxctl_write(tp,
2184 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2185 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2186 0x4c20);
2187 goto done;
2188 }
2189
2190 err = tg3_phy_auxctl_read(tp,
2191 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2192 if (err)
2193 return err;
2194
2195 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2196 err = tg3_phy_auxctl_write(tp,
2197 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2198
2199 done:
2200 return err;
2201 }
2202
tg3_phy_fet_toggle_apd(struct tg3 * tp,bool enable)2203 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2204 {
2205 u32 phytest;
2206
2207 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2208 u32 phy;
2209
2210 tg3_writephy(tp, MII_TG3_FET_TEST,
2211 phytest | MII_TG3_FET_SHADOW_EN);
2212 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2213 if (enable)
2214 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2215 else
2216 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2217 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2218 }
2219 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2220 }
2221 }
2222
tg3_phy_toggle_apd(struct tg3 * tp,bool enable)2223 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2224 {
2225 u32 reg;
2226
2227 if (!tg3_flag(tp, 5705_PLUS) ||
2228 (tg3_flag(tp, 5717_PLUS) &&
2229 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2230 return;
2231
2232 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2233 tg3_phy_fet_toggle_apd(tp, enable);
2234 return;
2235 }
2236
2237 reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2238 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2239 MII_TG3_MISC_SHDW_SCR5_SDTL |
2240 MII_TG3_MISC_SHDW_SCR5_C125OE;
2241 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2242 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2243
2244 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2245
2246
2247 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2248 if (enable)
2249 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2250
2251 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2252 }
2253
tg3_phy_toggle_automdix(struct tg3 * tp,bool enable)2254 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2255 {
2256 u32 phy;
2257
2258 if (!tg3_flag(tp, 5705_PLUS) ||
2259 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2260 return;
2261
2262 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2263 u32 ephy;
2264
2265 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2266 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2267
2268 tg3_writephy(tp, MII_TG3_FET_TEST,
2269 ephy | MII_TG3_FET_SHADOW_EN);
2270 if (!tg3_readphy(tp, reg, &phy)) {
2271 if (enable)
2272 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2273 else
2274 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2275 tg3_writephy(tp, reg, phy);
2276 }
2277 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2278 }
2279 } else {
2280 int ret;
2281
2282 ret = tg3_phy_auxctl_read(tp,
2283 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2284 if (!ret) {
2285 if (enable)
2286 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2287 else
2288 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2289 tg3_phy_auxctl_write(tp,
2290 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2291 }
2292 }
2293 }
2294
tg3_phy_set_wirespeed(struct tg3 * tp)2295 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2296 {
2297 int ret;
2298 u32 val;
2299
2300 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2301 return;
2302
2303 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2304 if (!ret)
2305 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2306 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2307 }
2308
tg3_phy_apply_otp(struct tg3 * tp)2309 static void tg3_phy_apply_otp(struct tg3 *tp)
2310 {
2311 u32 otp, phy;
2312
2313 if (!tp->phy_otp)
2314 return;
2315
2316 otp = tp->phy_otp;
2317
2318 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2319 return;
2320
2321 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2322 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2323 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2324
2325 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2326 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2327 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2328
2329 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2330 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2331 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2332
2333 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2334 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2335
2336 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2337 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2338
2339 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2340 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2341 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2342
2343 tg3_phy_toggle_auxctl_smdsp(tp, false);
2344 }
2345
tg3_eee_pull_config(struct tg3 * tp,struct ethtool_eee * eee)2346 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2347 {
2348 u32 val;
2349 struct ethtool_eee *dest = &tp->eee;
2350
2351 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2352 return;
2353
2354 if (eee)
2355 dest = eee;
2356
2357 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2358 return;
2359
2360 /* Pull eee_active */
2361 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2362 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2363 dest->eee_active = 1;
2364 } else
2365 dest->eee_active = 0;
2366
2367 /* Pull lp advertised settings */
2368 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2369 return;
2370 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2371
2372 /* Pull advertised and eee_enabled settings */
2373 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2374 return;
2375 dest->eee_enabled = !!val;
2376 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2377
2378 /* Pull tx_lpi_enabled */
2379 val = tr32(TG3_CPMU_EEE_MODE);
2380 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2381
2382 /* Pull lpi timer value */
2383 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2384 }
2385
tg3_phy_eee_adjust(struct tg3 * tp,bool current_link_up)2386 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2387 {
2388 u32 val;
2389
2390 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2391 return;
2392
2393 tp->setlpicnt = 0;
2394
2395 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2396 current_link_up &&
2397 tp->link_config.active_duplex == DUPLEX_FULL &&
2398 (tp->link_config.active_speed == SPEED_100 ||
2399 tp->link_config.active_speed == SPEED_1000)) {
2400 u32 eeectl;
2401
2402 if (tp->link_config.active_speed == SPEED_1000)
2403 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2404 else
2405 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2406
2407 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2408
2409 tg3_eee_pull_config(tp, NULL);
2410 if (tp->eee.eee_active)
2411 tp->setlpicnt = 2;
2412 }
2413
2414 if (!tp->setlpicnt) {
2415 if (current_link_up &&
2416 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2417 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2418 tg3_phy_toggle_auxctl_smdsp(tp, false);
2419 }
2420
2421 val = tr32(TG3_CPMU_EEE_MODE);
2422 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2423 }
2424 }
2425
tg3_phy_eee_enable(struct tg3 * tp)2426 static void tg3_phy_eee_enable(struct tg3 *tp)
2427 {
2428 u32 val;
2429
2430 if (tp->link_config.active_speed == SPEED_1000 &&
2431 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2432 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2433 tg3_flag(tp, 57765_CLASS)) &&
2434 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2435 val = MII_TG3_DSP_TAP26_ALNOKO |
2436 MII_TG3_DSP_TAP26_RMRXSTO;
2437 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2438 tg3_phy_toggle_auxctl_smdsp(tp, false);
2439 }
2440
2441 val = tr32(TG3_CPMU_EEE_MODE);
2442 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2443 }
2444
tg3_wait_macro_done(struct tg3 * tp)2445 static int tg3_wait_macro_done(struct tg3 *tp)
2446 {
2447 int limit = 100;
2448
2449 while (limit--) {
2450 u32 tmp32;
2451
2452 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2453 if ((tmp32 & 0x1000) == 0)
2454 break;
2455 }
2456 }
2457 if (limit < 0)
2458 return -EBUSY;
2459
2460 return 0;
2461 }
2462
tg3_phy_write_and_check_testpat(struct tg3 * tp,int * resetp)2463 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2464 {
2465 static const u32 test_pat[4][6] = {
2466 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2467 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2468 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2469 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2470 };
2471 int chan;
2472
2473 for (chan = 0; chan < 4; chan++) {
2474 int i;
2475
2476 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2477 (chan * 0x2000) | 0x0200);
2478 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2479
2480 for (i = 0; i < 6; i++)
2481 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2482 test_pat[chan][i]);
2483
2484 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2485 if (tg3_wait_macro_done(tp)) {
2486 *resetp = 1;
2487 return -EBUSY;
2488 }
2489
2490 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2491 (chan * 0x2000) | 0x0200);
2492 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2493 if (tg3_wait_macro_done(tp)) {
2494 *resetp = 1;
2495 return -EBUSY;
2496 }
2497
2498 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2499 if (tg3_wait_macro_done(tp)) {
2500 *resetp = 1;
2501 return -EBUSY;
2502 }
2503
2504 for (i = 0; i < 6; i += 2) {
2505 u32 low, high;
2506
2507 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2508 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2509 tg3_wait_macro_done(tp)) {
2510 *resetp = 1;
2511 return -EBUSY;
2512 }
2513 low &= 0x7fff;
2514 high &= 0x000f;
2515 if (low != test_pat[chan][i] ||
2516 high != test_pat[chan][i+1]) {
2517 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2518 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2519 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2520
2521 return -EBUSY;
2522 }
2523 }
2524 }
2525
2526 return 0;
2527 }
2528
tg3_phy_reset_chanpat(struct tg3 * tp)2529 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2530 {
2531 int chan;
2532
2533 for (chan = 0; chan < 4; chan++) {
2534 int i;
2535
2536 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2537 (chan * 0x2000) | 0x0200);
2538 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2539 for (i = 0; i < 6; i++)
2540 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2541 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2542 if (tg3_wait_macro_done(tp))
2543 return -EBUSY;
2544 }
2545
2546 return 0;
2547 }
2548
tg3_phy_reset_5703_4_5(struct tg3 * tp)2549 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2550 {
2551 u32 reg32, phy9_orig;
2552 int retries, do_phy_reset, err;
2553
2554 retries = 10;
2555 do_phy_reset = 1;
2556 do {
2557 if (do_phy_reset) {
2558 err = tg3_bmcr_reset(tp);
2559 if (err)
2560 return err;
2561 do_phy_reset = 0;
2562 }
2563
2564 /* Disable transmitter and interrupt. */
2565 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2566 continue;
2567
2568 reg32 |= 0x3000;
2569 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2570
2571 /* Set full-duplex, 1000 mbps. */
2572 tg3_writephy(tp, MII_BMCR,
2573 BMCR_FULLDPLX | BMCR_SPEED1000);
2574
2575 /* Set to master mode. */
2576 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2577 continue;
2578
2579 tg3_writephy(tp, MII_CTRL1000,
2580 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2581
2582 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2583 if (err)
2584 return err;
2585
2586 /* Block the PHY control access. */
2587 tg3_phydsp_write(tp, 0x8005, 0x0800);
2588
2589 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2590 if (!err)
2591 break;
2592 } while (--retries);
2593
2594 err = tg3_phy_reset_chanpat(tp);
2595 if (err)
2596 return err;
2597
2598 tg3_phydsp_write(tp, 0x8005, 0x0000);
2599
2600 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2601 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2602
2603 tg3_phy_toggle_auxctl_smdsp(tp, false);
2604
2605 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2606
2607 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
2608 if (err)
2609 return err;
2610
2611 reg32 &= ~0x3000;
2612 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2613
2614 return 0;
2615 }
2616
tg3_carrier_off(struct tg3 * tp)2617 static void tg3_carrier_off(struct tg3 *tp)
2618 {
2619 netif_carrier_off(tp->dev);
2620 tp->link_up = false;
2621 }
2622
tg3_warn_mgmt_link_flap(struct tg3 * tp)2623 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2624 {
2625 if (tg3_flag(tp, ENABLE_ASF))
2626 netdev_warn(tp->dev,
2627 "Management side-band traffic will be interrupted during phy settings change\n");
2628 }
2629
2630 /* This will reset the tigon3 PHY if there is no valid
2631 * link unless the FORCE argument is non-zero.
2632 */
tg3_phy_reset(struct tg3 * tp)2633 static int tg3_phy_reset(struct tg3 *tp)
2634 {
2635 u32 val, cpmuctrl;
2636 int err;
2637
2638 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2639 val = tr32(GRC_MISC_CFG);
2640 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2641 udelay(40);
2642 }
2643 err = tg3_readphy(tp, MII_BMSR, &val);
2644 err |= tg3_readphy(tp, MII_BMSR, &val);
2645 if (err != 0)
2646 return -EBUSY;
2647
2648 if (netif_running(tp->dev) && tp->link_up) {
2649 netif_carrier_off(tp->dev);
2650 tg3_link_report(tp);
2651 }
2652
2653 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2654 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2655 tg3_asic_rev(tp) == ASIC_REV_5705) {
2656 err = tg3_phy_reset_5703_4_5(tp);
2657 if (err)
2658 return err;
2659 goto out;
2660 }
2661
2662 cpmuctrl = 0;
2663 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2664 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2665 cpmuctrl = tr32(TG3_CPMU_CTRL);
2666 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2667 tw32(TG3_CPMU_CTRL,
2668 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2669 }
2670
2671 err = tg3_bmcr_reset(tp);
2672 if (err)
2673 return err;
2674
2675 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2676 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2677 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2678
2679 tw32(TG3_CPMU_CTRL, cpmuctrl);
2680 }
2681
2682 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2683 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2684 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2685 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2686 CPMU_LSPD_1000MB_MACCLK_12_5) {
2687 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2688 udelay(40);
2689 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2690 }
2691 }
2692
2693 if (tg3_flag(tp, 5717_PLUS) &&
2694 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2695 return 0;
2696
2697 tg3_phy_apply_otp(tp);
2698
2699 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2700 tg3_phy_toggle_apd(tp, true);
2701 else
2702 tg3_phy_toggle_apd(tp, false);
2703
2704 out:
2705 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2706 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2707 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2708 tg3_phydsp_write(tp, 0x000a, 0x0323);
2709 tg3_phy_toggle_auxctl_smdsp(tp, false);
2710 }
2711
2712 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2713 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2714 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2715 }
2716
2717 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2718 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2719 tg3_phydsp_write(tp, 0x000a, 0x310b);
2720 tg3_phydsp_write(tp, 0x201f, 0x9506);
2721 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2722 tg3_phy_toggle_auxctl_smdsp(tp, false);
2723 }
2724 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2725 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2726 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2727 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2728 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2729 tg3_writephy(tp, MII_TG3_TEST1,
2730 MII_TG3_TEST1_TRIM_EN | 0x4);
2731 } else
2732 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2733
2734 tg3_phy_toggle_auxctl_smdsp(tp, false);
2735 }
2736 }
2737
2738 /* Set Extended packet length bit (bit 14) on all chips that */
2739 /* support jumbo frames */
2740 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2741 /* Cannot do read-modify-write on 5401 */
2742 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2743 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2744 /* Set bit 14 with read-modify-write to preserve other bits */
2745 err = tg3_phy_auxctl_read(tp,
2746 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2747 if (!err)
2748 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2749 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2750 }
2751
2752 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2753 * jumbo frames transmission.
2754 */
2755 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2756 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2757 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2758 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2759 }
2760
2761 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2762 /* adjust output voltage */
2763 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2764 }
2765
2766 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2767 tg3_phydsp_write(tp, 0xffb, 0x4000);
2768
2769 tg3_phy_toggle_automdix(tp, true);
2770 tg3_phy_set_wirespeed(tp);
2771 return 0;
2772 }
2773
2774 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2775 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2776 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2777 TG3_GPIO_MSG_NEED_VAUX)
2778 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2779 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2780 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2781 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2782 (TG3_GPIO_MSG_DRVR_PRES << 12))
2783
2784 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2785 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2786 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2787 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2788 (TG3_GPIO_MSG_NEED_VAUX << 12))
2789
tg3_set_function_status(struct tg3 * tp,u32 newstat)2790 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2791 {
2792 u32 status, shift;
2793
2794 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2795 tg3_asic_rev(tp) == ASIC_REV_5719)
2796 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2797 else
2798 status = tr32(TG3_CPMU_DRV_STATUS);
2799
2800 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2801 status &= ~(TG3_GPIO_MSG_MASK << shift);
2802 status |= (newstat << shift);
2803
2804 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2805 tg3_asic_rev(tp) == ASIC_REV_5719)
2806 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2807 else
2808 tw32(TG3_CPMU_DRV_STATUS, status);
2809
2810 return status >> TG3_APE_GPIO_MSG_SHIFT;
2811 }
2812
tg3_pwrsrc_switch_to_vmain(struct tg3 * tp)2813 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2814 {
2815 if (!tg3_flag(tp, IS_NIC))
2816 return 0;
2817
2818 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2819 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2820 tg3_asic_rev(tp) == ASIC_REV_5720) {
2821 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2822 return -EIO;
2823
2824 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2825
2826 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2827 TG3_GRC_LCLCTL_PWRSW_DELAY);
2828
2829 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2830 } else {
2831 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2832 TG3_GRC_LCLCTL_PWRSW_DELAY);
2833 }
2834
2835 return 0;
2836 }
2837
tg3_pwrsrc_die_with_vmain(struct tg3 * tp)2838 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2839 {
2840 u32 grc_local_ctrl;
2841
2842 if (!tg3_flag(tp, IS_NIC) ||
2843 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2844 tg3_asic_rev(tp) == ASIC_REV_5701)
2845 return;
2846
2847 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2848
2849 tw32_wait_f(GRC_LOCAL_CTRL,
2850 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2851 TG3_GRC_LCLCTL_PWRSW_DELAY);
2852
2853 tw32_wait_f(GRC_LOCAL_CTRL,
2854 grc_local_ctrl,
2855 TG3_GRC_LCLCTL_PWRSW_DELAY);
2856
2857 tw32_wait_f(GRC_LOCAL_CTRL,
2858 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2859 TG3_GRC_LCLCTL_PWRSW_DELAY);
2860 }
2861
tg3_pwrsrc_switch_to_vaux(struct tg3 * tp)2862 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2863 {
2864 if (!tg3_flag(tp, IS_NIC))
2865 return;
2866
2867 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2868 tg3_asic_rev(tp) == ASIC_REV_5701) {
2869 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2870 (GRC_LCLCTRL_GPIO_OE0 |
2871 GRC_LCLCTRL_GPIO_OE1 |
2872 GRC_LCLCTRL_GPIO_OE2 |
2873 GRC_LCLCTRL_GPIO_OUTPUT0 |
2874 GRC_LCLCTRL_GPIO_OUTPUT1),
2875 TG3_GRC_LCLCTL_PWRSW_DELAY);
2876 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2877 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2878 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2879 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2880 GRC_LCLCTRL_GPIO_OE1 |
2881 GRC_LCLCTRL_GPIO_OE2 |
2882 GRC_LCLCTRL_GPIO_OUTPUT0 |
2883 GRC_LCLCTRL_GPIO_OUTPUT1 |
2884 tp->grc_local_ctrl;
2885 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2886 TG3_GRC_LCLCTL_PWRSW_DELAY);
2887
2888 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2889 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2890 TG3_GRC_LCLCTL_PWRSW_DELAY);
2891
2892 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2893 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2894 TG3_GRC_LCLCTL_PWRSW_DELAY);
2895 } else {
2896 u32 no_gpio2;
2897 u32 grc_local_ctrl = 0;
2898
2899 /* Workaround to prevent overdrawing Amps. */
2900 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2901 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2902 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2903 grc_local_ctrl,
2904 TG3_GRC_LCLCTL_PWRSW_DELAY);
2905 }
2906
2907 /* On 5753 and variants, GPIO2 cannot be used. */
2908 no_gpio2 = tp->nic_sram_data_cfg &
2909 NIC_SRAM_DATA_CFG_NO_GPIO2;
2910
2911 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2912 GRC_LCLCTRL_GPIO_OE1 |
2913 GRC_LCLCTRL_GPIO_OE2 |
2914 GRC_LCLCTRL_GPIO_OUTPUT1 |
2915 GRC_LCLCTRL_GPIO_OUTPUT2;
2916 if (no_gpio2) {
2917 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2918 GRC_LCLCTRL_GPIO_OUTPUT2);
2919 }
2920 tw32_wait_f(GRC_LOCAL_CTRL,
2921 tp->grc_local_ctrl | grc_local_ctrl,
2922 TG3_GRC_LCLCTL_PWRSW_DELAY);
2923
2924 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2925
2926 tw32_wait_f(GRC_LOCAL_CTRL,
2927 tp->grc_local_ctrl | grc_local_ctrl,
2928 TG3_GRC_LCLCTL_PWRSW_DELAY);
2929
2930 if (!no_gpio2) {
2931 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2932 tw32_wait_f(GRC_LOCAL_CTRL,
2933 tp->grc_local_ctrl | grc_local_ctrl,
2934 TG3_GRC_LCLCTL_PWRSW_DELAY);
2935 }
2936 }
2937 }
2938
tg3_frob_aux_power_5717(struct tg3 * tp,bool wol_enable)2939 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2940 {
2941 u32 msg = 0;
2942
2943 /* Serialize power state transitions */
2944 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2945 return;
2946
2947 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2948 msg = TG3_GPIO_MSG_NEED_VAUX;
2949
2950 msg = tg3_set_function_status(tp, msg);
2951
2952 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2953 goto done;
2954
2955 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2956 tg3_pwrsrc_switch_to_vaux(tp);
2957 else
2958 tg3_pwrsrc_die_with_vmain(tp);
2959
2960 done:
2961 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2962 }
2963
tg3_frob_aux_power(struct tg3 * tp,bool include_wol)2964 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2965 {
2966 bool need_vaux = false;
2967
2968 /* The GPIOs do something completely different on 57765. */
2969 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2970 return;
2971
2972 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2973 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2974 tg3_asic_rev(tp) == ASIC_REV_5720) {
2975 tg3_frob_aux_power_5717(tp, include_wol ?
2976 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2977 return;
2978 }
2979
2980 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2981 struct net_device *dev_peer;
2982
2983 dev_peer = pci_get_drvdata(tp->pdev_peer);
2984
2985 /* remove_one() may have been run on the peer. */
2986 if (dev_peer) {
2987 struct tg3 *tp_peer = netdev_priv(dev_peer);
2988
2989 if (tg3_flag(tp_peer, INIT_COMPLETE))
2990 return;
2991
2992 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2993 tg3_flag(tp_peer, ENABLE_ASF))
2994 need_vaux = true;
2995 }
2996 }
2997
2998 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2999 tg3_flag(tp, ENABLE_ASF))
3000 need_vaux = true;
3001
3002 if (need_vaux)
3003 tg3_pwrsrc_switch_to_vaux(tp);
3004 else
3005 tg3_pwrsrc_die_with_vmain(tp);
3006 }
3007
tg3_5700_link_polarity(struct tg3 * tp,u32 speed)3008 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3009 {
3010 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3011 return 1;
3012 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3013 if (speed != SPEED_10)
3014 return 1;
3015 } else if (speed == SPEED_10)
3016 return 1;
3017
3018 return 0;
3019 }
3020
tg3_phy_power_bug(struct tg3 * tp)3021 static bool tg3_phy_power_bug(struct tg3 *tp)
3022 {
3023 switch (tg3_asic_rev(tp)) {
3024 case ASIC_REV_5700:
3025 case ASIC_REV_5704:
3026 return true;
3027 case ASIC_REV_5780:
3028 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3029 return true;
3030 return false;
3031 case ASIC_REV_5717:
3032 if (!tp->pci_fn)
3033 return true;
3034 return false;
3035 case ASIC_REV_5719:
3036 case ASIC_REV_5720:
3037 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3038 !tp->pci_fn)
3039 return true;
3040 return false;
3041 }
3042
3043 return false;
3044 }
3045
tg3_phy_led_bug(struct tg3 * tp)3046 static bool tg3_phy_led_bug(struct tg3 *tp)
3047 {
3048 switch (tg3_asic_rev(tp)) {
3049 case ASIC_REV_5719:
3050 case ASIC_REV_5720:
3051 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3052 !tp->pci_fn)
3053 return true;
3054 return false;
3055 }
3056
3057 return false;
3058 }
3059
tg3_power_down_phy(struct tg3 * tp,bool do_low_power)3060 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3061 {
3062 u32 val;
3063
3064 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3065 return;
3066
3067 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3068 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3069 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3070 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3071
3072 sg_dig_ctrl |=
3073 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3074 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3075 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3076 }
3077 return;
3078 }
3079
3080 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3081 tg3_bmcr_reset(tp);
3082 val = tr32(GRC_MISC_CFG);
3083 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3084 udelay(40);
3085 return;
3086 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3087 u32 phytest;
3088 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3089 u32 phy;
3090
3091 tg3_writephy(tp, MII_ADVERTISE, 0);
3092 tg3_writephy(tp, MII_BMCR,
3093 BMCR_ANENABLE | BMCR_ANRESTART);
3094
3095 tg3_writephy(tp, MII_TG3_FET_TEST,
3096 phytest | MII_TG3_FET_SHADOW_EN);
3097 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3098 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3099 tg3_writephy(tp,
3100 MII_TG3_FET_SHDW_AUXMODE4,
3101 phy);
3102 }
3103 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3104 }
3105 return;
3106 } else if (do_low_power) {
3107 if (!tg3_phy_led_bug(tp))
3108 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3109 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3110
3111 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3112 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3113 MII_TG3_AUXCTL_PCTL_VREG_11V;
3114 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3115 }
3116
3117 /* The PHY should not be powered down on some chips because
3118 * of bugs.
3119 */
3120 if (tg3_phy_power_bug(tp))
3121 return;
3122
3123 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3124 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3125 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3126 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3127 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3128 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3129 }
3130
3131 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3132 }
3133
3134 /* tp->lock is held. */
tg3_nvram_lock(struct tg3 * tp)3135 static int tg3_nvram_lock(struct tg3 *tp)
3136 {
3137 if (tg3_flag(tp, NVRAM)) {
3138 int i;
3139
3140 if (tp->nvram_lock_cnt == 0) {
3141 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3142 for (i = 0; i < 8000; i++) {
3143 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3144 break;
3145 udelay(20);
3146 }
3147 if (i == 8000) {
3148 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3149 return -ENODEV;
3150 }
3151 }
3152 tp->nvram_lock_cnt++;
3153 }
3154 return 0;
3155 }
3156
3157 /* tp->lock is held. */
tg3_nvram_unlock(struct tg3 * tp)3158 static void tg3_nvram_unlock(struct tg3 *tp)
3159 {
3160 if (tg3_flag(tp, NVRAM)) {
3161 if (tp->nvram_lock_cnt > 0)
3162 tp->nvram_lock_cnt--;
3163 if (tp->nvram_lock_cnt == 0)
3164 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3165 }
3166 }
3167
3168 /* tp->lock is held. */
tg3_enable_nvram_access(struct tg3 * tp)3169 static void tg3_enable_nvram_access(struct tg3 *tp)
3170 {
3171 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3172 u32 nvaccess = tr32(NVRAM_ACCESS);
3173
3174 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3175 }
3176 }
3177
3178 /* tp->lock is held. */
tg3_disable_nvram_access(struct tg3 * tp)3179 static void tg3_disable_nvram_access(struct tg3 *tp)
3180 {
3181 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3182 u32 nvaccess = tr32(NVRAM_ACCESS);
3183
3184 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3185 }
3186 }
3187
tg3_nvram_read_using_eeprom(struct tg3 * tp,u32 offset,u32 * val)3188 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3189 u32 offset, u32 *val)
3190 {
3191 u32 tmp;
3192 int i;
3193
3194 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3195 return -EINVAL;
3196
3197 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3198 EEPROM_ADDR_DEVID_MASK |
3199 EEPROM_ADDR_READ);
3200 tw32(GRC_EEPROM_ADDR,
3201 tmp |
3202 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3203 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3204 EEPROM_ADDR_ADDR_MASK) |
3205 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3206
3207 for (i = 0; i < 1000; i++) {
3208 tmp = tr32(GRC_EEPROM_ADDR);
3209
3210 if (tmp & EEPROM_ADDR_COMPLETE)
3211 break;
3212 msleep(1);
3213 }
3214 if (!(tmp & EEPROM_ADDR_COMPLETE))
3215 return -EBUSY;
3216
3217 tmp = tr32(GRC_EEPROM_DATA);
3218
3219 /*
3220 * The data will always be opposite the native endian
3221 * format. Perform a blind byteswap to compensate.
3222 */
3223 *val = swab32(tmp);
3224
3225 return 0;
3226 }
3227
3228 #define NVRAM_CMD_TIMEOUT 10000
3229
tg3_nvram_exec_cmd(struct tg3 * tp,u32 nvram_cmd)3230 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3231 {
3232 int i;
3233
3234 tw32(NVRAM_CMD, nvram_cmd);
3235 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3236 usleep_range(10, 40);
3237 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3238 udelay(10);
3239 break;
3240 }
3241 }
3242
3243 if (i == NVRAM_CMD_TIMEOUT)
3244 return -EBUSY;
3245
3246 return 0;
3247 }
3248
tg3_nvram_phys_addr(struct tg3 * tp,u32 addr)3249 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3250 {
3251 if (tg3_flag(tp, NVRAM) &&
3252 tg3_flag(tp, NVRAM_BUFFERED) &&
3253 tg3_flag(tp, FLASH) &&
3254 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3255 (tp->nvram_jedecnum == JEDEC_ATMEL))
3256
3257 addr = ((addr / tp->nvram_pagesize) <<
3258 ATMEL_AT45DB0X1B_PAGE_POS) +
3259 (addr % tp->nvram_pagesize);
3260
3261 return addr;
3262 }
3263
tg3_nvram_logical_addr(struct tg3 * tp,u32 addr)3264 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3265 {
3266 if (tg3_flag(tp, NVRAM) &&
3267 tg3_flag(tp, NVRAM_BUFFERED) &&
3268 tg3_flag(tp, FLASH) &&
3269 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3270 (tp->nvram_jedecnum == JEDEC_ATMEL))
3271
3272 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3273 tp->nvram_pagesize) +
3274 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3275
3276 return addr;
3277 }
3278
3279 /* NOTE: Data read in from NVRAM is byteswapped according to
3280 * the byteswapping settings for all other register accesses.
3281 * tg3 devices are BE devices, so on a BE machine, the data
3282 * returned will be exactly as it is seen in NVRAM. On a LE
3283 * machine, the 32-bit value will be byteswapped.
3284 */
tg3_nvram_read(struct tg3 * tp,u32 offset,u32 * val)3285 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3286 {
3287 int ret;
3288
3289 if (!tg3_flag(tp, NVRAM))
3290 return tg3_nvram_read_using_eeprom(tp, offset, val);
3291
3292 offset = tg3_nvram_phys_addr(tp, offset);
3293
3294 if (offset > NVRAM_ADDR_MSK)
3295 return -EINVAL;
3296
3297 ret = tg3_nvram_lock(tp);
3298 if (ret)
3299 return ret;
3300
3301 tg3_enable_nvram_access(tp);
3302
3303 tw32(NVRAM_ADDR, offset);
3304 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3305 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3306
3307 if (ret == 0)
3308 *val = tr32(NVRAM_RDDATA);
3309
3310 tg3_disable_nvram_access(tp);
3311
3312 tg3_nvram_unlock(tp);
3313
3314 return ret;
3315 }
3316
3317 /* Ensures NVRAM data is in bytestream format. */
tg3_nvram_read_be32(struct tg3 * tp,u32 offset,__be32 * val)3318 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3319 {
3320 u32 v;
3321 int res = tg3_nvram_read(tp, offset, &v);
3322 if (!res)
3323 *val = cpu_to_be32(v);
3324 return res;
3325 }
3326
tg3_nvram_write_block_using_eeprom(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3327 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3328 u32 offset, u32 len, u8 *buf)
3329 {
3330 int i, j, rc = 0;
3331 u32 val;
3332
3333 for (i = 0; i < len; i += 4) {
3334 u32 addr;
3335 __be32 data;
3336
3337 addr = offset + i;
3338
3339 memcpy(&data, buf + i, 4);
3340
3341 /*
3342 * The SEEPROM interface expects the data to always be opposite
3343 * the native endian format. We accomplish this by reversing
3344 * all the operations that would have been performed on the
3345 * data from a call to tg3_nvram_read_be32().
3346 */
3347 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3348
3349 val = tr32(GRC_EEPROM_ADDR);
3350 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3351
3352 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3353 EEPROM_ADDR_READ);
3354 tw32(GRC_EEPROM_ADDR, val |
3355 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3356 (addr & EEPROM_ADDR_ADDR_MASK) |
3357 EEPROM_ADDR_START |
3358 EEPROM_ADDR_WRITE);
3359
3360 for (j = 0; j < 1000; j++) {
3361 val = tr32(GRC_EEPROM_ADDR);
3362
3363 if (val & EEPROM_ADDR_COMPLETE)
3364 break;
3365 msleep(1);
3366 }
3367 if (!(val & EEPROM_ADDR_COMPLETE)) {
3368 rc = -EBUSY;
3369 break;
3370 }
3371 }
3372
3373 return rc;
3374 }
3375
3376 /* offset and length are dword aligned */
tg3_nvram_write_block_unbuffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3377 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3378 u8 *buf)
3379 {
3380 int ret = 0;
3381 u32 pagesize = tp->nvram_pagesize;
3382 u32 pagemask = pagesize - 1;
3383 u32 nvram_cmd;
3384 u8 *tmp;
3385
3386 tmp = kmalloc(pagesize, GFP_KERNEL);
3387 if (tmp == NULL)
3388 return -ENOMEM;
3389
3390 while (len) {
3391 int j;
3392 u32 phy_addr, page_off, size;
3393
3394 phy_addr = offset & ~pagemask;
3395
3396 for (j = 0; j < pagesize; j += 4) {
3397 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3398 (__be32 *) (tmp + j));
3399 if (ret)
3400 break;
3401 }
3402 if (ret)
3403 break;
3404
3405 page_off = offset & pagemask;
3406 size = pagesize;
3407 if (len < size)
3408 size = len;
3409
3410 len -= size;
3411
3412 memcpy(tmp + page_off, buf, size);
3413
3414 offset = offset + (pagesize - page_off);
3415
3416 tg3_enable_nvram_access(tp);
3417
3418 /*
3419 * Before we can erase the flash page, we need
3420 * to issue a special "write enable" command.
3421 */
3422 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3423
3424 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3425 break;
3426
3427 /* Erase the target page */
3428 tw32(NVRAM_ADDR, phy_addr);
3429
3430 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3431 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3432
3433 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3434 break;
3435
3436 /* Issue another write enable to start the write. */
3437 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3438
3439 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3440 break;
3441
3442 for (j = 0; j < pagesize; j += 4) {
3443 __be32 data;
3444
3445 data = *((__be32 *) (tmp + j));
3446
3447 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3448
3449 tw32(NVRAM_ADDR, phy_addr + j);
3450
3451 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3452 NVRAM_CMD_WR;
3453
3454 if (j == 0)
3455 nvram_cmd |= NVRAM_CMD_FIRST;
3456 else if (j == (pagesize - 4))
3457 nvram_cmd |= NVRAM_CMD_LAST;
3458
3459 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3460 if (ret)
3461 break;
3462 }
3463 if (ret)
3464 break;
3465 }
3466
3467 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3468 tg3_nvram_exec_cmd(tp, nvram_cmd);
3469
3470 kfree(tmp);
3471
3472 return ret;
3473 }
3474
3475 /* offset and length are dword aligned */
tg3_nvram_write_block_buffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3476 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3477 u8 *buf)
3478 {
3479 int i, ret = 0;
3480
3481 for (i = 0; i < len; i += 4, offset += 4) {
3482 u32 page_off, phy_addr, nvram_cmd;
3483 __be32 data;
3484
3485 memcpy(&data, buf + i, 4);
3486 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3487
3488 page_off = offset % tp->nvram_pagesize;
3489
3490 phy_addr = tg3_nvram_phys_addr(tp, offset);
3491
3492 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3493
3494 if (page_off == 0 || i == 0)
3495 nvram_cmd |= NVRAM_CMD_FIRST;
3496 if (page_off == (tp->nvram_pagesize - 4))
3497 nvram_cmd |= NVRAM_CMD_LAST;
3498
3499 if (i == (len - 4))
3500 nvram_cmd |= NVRAM_CMD_LAST;
3501
3502 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3503 !tg3_flag(tp, FLASH) ||
3504 !tg3_flag(tp, 57765_PLUS))
3505 tw32(NVRAM_ADDR, phy_addr);
3506
3507 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3508 !tg3_flag(tp, 5755_PLUS) &&
3509 (tp->nvram_jedecnum == JEDEC_ST) &&
3510 (nvram_cmd & NVRAM_CMD_FIRST)) {
3511 u32 cmd;
3512
3513 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3514 ret = tg3_nvram_exec_cmd(tp, cmd);
3515 if (ret)
3516 break;
3517 }
3518 if (!tg3_flag(tp, FLASH)) {
3519 /* We always do complete word writes to eeprom. */
3520 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3521 }
3522
3523 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3524 if (ret)
3525 break;
3526 }
3527 return ret;
3528 }
3529
3530 /* offset and length are dword aligned */
tg3_nvram_write_block(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3531 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3532 {
3533 int ret;
3534
3535 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3536 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3537 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3538 udelay(40);
3539 }
3540
3541 if (!tg3_flag(tp, NVRAM)) {
3542 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3543 } else {
3544 u32 grc_mode;
3545
3546 ret = tg3_nvram_lock(tp);
3547 if (ret)
3548 return ret;
3549
3550 tg3_enable_nvram_access(tp);
3551 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3552 tw32(NVRAM_WRITE1, 0x406);
3553
3554 grc_mode = tr32(GRC_MODE);
3555 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3556
3557 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3558 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3559 buf);
3560 } else {
3561 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3562 buf);
3563 }
3564
3565 grc_mode = tr32(GRC_MODE);
3566 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3567
3568 tg3_disable_nvram_access(tp);
3569 tg3_nvram_unlock(tp);
3570 }
3571
3572 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3573 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3574 udelay(40);
3575 }
3576
3577 return ret;
3578 }
3579
3580 #define RX_CPU_SCRATCH_BASE 0x30000
3581 #define RX_CPU_SCRATCH_SIZE 0x04000
3582 #define TX_CPU_SCRATCH_BASE 0x34000
3583 #define TX_CPU_SCRATCH_SIZE 0x04000
3584
3585 /* tp->lock is held. */
tg3_pause_cpu(struct tg3 * tp,u32 cpu_base)3586 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3587 {
3588 int i;
3589 const int iters = 10000;
3590
3591 for (i = 0; i < iters; i++) {
3592 tw32(cpu_base + CPU_STATE, 0xffffffff);
3593 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3594 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3595 break;
3596 if (pci_channel_offline(tp->pdev))
3597 return -EBUSY;
3598 }
3599
3600 return (i == iters) ? -EBUSY : 0;
3601 }
3602
3603 /* tp->lock is held. */
tg3_rxcpu_pause(struct tg3 * tp)3604 static int tg3_rxcpu_pause(struct tg3 *tp)
3605 {
3606 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3607
3608 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3609 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3610 udelay(10);
3611
3612 return rc;
3613 }
3614
3615 /* tp->lock is held. */
tg3_txcpu_pause(struct tg3 * tp)3616 static int tg3_txcpu_pause(struct tg3 *tp)
3617 {
3618 return tg3_pause_cpu(tp, TX_CPU_BASE);
3619 }
3620
3621 /* tp->lock is held. */
tg3_resume_cpu(struct tg3 * tp,u32 cpu_base)3622 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3623 {
3624 tw32(cpu_base + CPU_STATE, 0xffffffff);
3625 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3626 }
3627
3628 /* tp->lock is held. */
tg3_rxcpu_resume(struct tg3 * tp)3629 static void tg3_rxcpu_resume(struct tg3 *tp)
3630 {
3631 tg3_resume_cpu(tp, RX_CPU_BASE);
3632 }
3633
3634 /* tp->lock is held. */
tg3_halt_cpu(struct tg3 * tp,u32 cpu_base)3635 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3636 {
3637 int rc;
3638
3639 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3640
3641 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3642 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3643
3644 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3645 return 0;
3646 }
3647 if (cpu_base == RX_CPU_BASE) {
3648 rc = tg3_rxcpu_pause(tp);
3649 } else {
3650 /*
3651 * There is only an Rx CPU for the 5750 derivative in the
3652 * BCM4785.
3653 */
3654 if (tg3_flag(tp, IS_SSB_CORE))
3655 return 0;
3656
3657 rc = tg3_txcpu_pause(tp);
3658 }
3659
3660 if (rc) {
3661 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3662 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3663 return -ENODEV;
3664 }
3665
3666 /* Clear firmware's nvram arbitration. */
3667 if (tg3_flag(tp, NVRAM))
3668 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3669 return 0;
3670 }
3671
tg3_fw_data_len(struct tg3 * tp,const struct tg3_firmware_hdr * fw_hdr)3672 static int tg3_fw_data_len(struct tg3 *tp,
3673 const struct tg3_firmware_hdr *fw_hdr)
3674 {
3675 int fw_len;
3676
3677 /* Non fragmented firmware have one firmware header followed by a
3678 * contiguous chunk of data to be written. The length field in that
3679 * header is not the length of data to be written but the complete
3680 * length of the bss. The data length is determined based on
3681 * tp->fw->size minus headers.
3682 *
3683 * Fragmented firmware have a main header followed by multiple
3684 * fragments. Each fragment is identical to non fragmented firmware
3685 * with a firmware header followed by a contiguous chunk of data. In
3686 * the main header, the length field is unused and set to 0xffffffff.
3687 * In each fragment header the length is the entire size of that
3688 * fragment i.e. fragment data + header length. Data length is
3689 * therefore length field in the header minus TG3_FW_HDR_LEN.
3690 */
3691 if (tp->fw_len == 0xffffffff)
3692 fw_len = be32_to_cpu(fw_hdr->len);
3693 else
3694 fw_len = tp->fw->size;
3695
3696 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3697 }
3698
3699 /* tp->lock is held. */
tg3_load_firmware_cpu(struct tg3 * tp,u32 cpu_base,u32 cpu_scratch_base,int cpu_scratch_size,const struct tg3_firmware_hdr * fw_hdr)3700 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3701 u32 cpu_scratch_base, int cpu_scratch_size,
3702 const struct tg3_firmware_hdr *fw_hdr)
3703 {
3704 int err, i;
3705 void (*write_op)(struct tg3 *, u32, u32);
3706 int total_len = tp->fw->size;
3707
3708 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3709 netdev_err(tp->dev,
3710 "%s: Trying to load TX cpu firmware which is 5705\n",
3711 __func__);
3712 return -EINVAL;
3713 }
3714
3715 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3716 write_op = tg3_write_mem;
3717 else
3718 write_op = tg3_write_indirect_reg32;
3719
3720 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3721 /* It is possible that bootcode is still loading at this point.
3722 * Get the nvram lock first before halting the cpu.
3723 */
3724 int lock_err = tg3_nvram_lock(tp);
3725 err = tg3_halt_cpu(tp, cpu_base);
3726 if (!lock_err)
3727 tg3_nvram_unlock(tp);
3728 if (err)
3729 goto out;
3730
3731 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3732 write_op(tp, cpu_scratch_base + i, 0);
3733 tw32(cpu_base + CPU_STATE, 0xffffffff);
3734 tw32(cpu_base + CPU_MODE,
3735 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3736 } else {
3737 /* Subtract additional main header for fragmented firmware and
3738 * advance to the first fragment
3739 */
3740 total_len -= TG3_FW_HDR_LEN;
3741 fw_hdr++;
3742 }
3743
3744 do {
3745 u32 *fw_data = (u32 *)(fw_hdr + 1);
3746 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3747 write_op(tp, cpu_scratch_base +
3748 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3749 (i * sizeof(u32)),
3750 be32_to_cpu(fw_data[i]));
3751
3752 total_len -= be32_to_cpu(fw_hdr->len);
3753
3754 /* Advance to next fragment */
3755 fw_hdr = (struct tg3_firmware_hdr *)
3756 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3757 } while (total_len > 0);
3758
3759 err = 0;
3760
3761 out:
3762 return err;
3763 }
3764
3765 /* tp->lock is held. */
tg3_pause_cpu_and_set_pc(struct tg3 * tp,u32 cpu_base,u32 pc)3766 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3767 {
3768 int i;
3769 const int iters = 5;
3770
3771 tw32(cpu_base + CPU_STATE, 0xffffffff);
3772 tw32_f(cpu_base + CPU_PC, pc);
3773
3774 for (i = 0; i < iters; i++) {
3775 if (tr32(cpu_base + CPU_PC) == pc)
3776 break;
3777 tw32(cpu_base + CPU_STATE, 0xffffffff);
3778 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3779 tw32_f(cpu_base + CPU_PC, pc);
3780 udelay(1000);
3781 }
3782
3783 return (i == iters) ? -EBUSY : 0;
3784 }
3785
3786 /* tp->lock is held. */
tg3_load_5701_a0_firmware_fix(struct tg3 * tp)3787 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3788 {
3789 const struct tg3_firmware_hdr *fw_hdr;
3790 int err;
3791
3792 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3793
3794 /* Firmware blob starts with version numbers, followed by
3795 start address and length. We are setting complete length.
3796 length = end_address_of_bss - start_address_of_text.
3797 Remainder is the blob to be loaded contiguously
3798 from start address. */
3799
3800 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3801 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3802 fw_hdr);
3803 if (err)
3804 return err;
3805
3806 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3807 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3808 fw_hdr);
3809 if (err)
3810 return err;
3811
3812 /* Now startup only the RX cpu. */
3813 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3814 be32_to_cpu(fw_hdr->base_addr));
3815 if (err) {
3816 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3817 "should be %08x\n", __func__,
3818 tr32(RX_CPU_BASE + CPU_PC),
3819 be32_to_cpu(fw_hdr->base_addr));
3820 return -ENODEV;
3821 }
3822
3823 tg3_rxcpu_resume(tp);
3824
3825 return 0;
3826 }
3827
tg3_validate_rxcpu_state(struct tg3 * tp)3828 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3829 {
3830 const int iters = 1000;
3831 int i;
3832 u32 val;
3833
3834 /* Wait for boot code to complete initialization and enter service
3835 * loop. It is then safe to download service patches
3836 */
3837 for (i = 0; i < iters; i++) {
3838 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3839 break;
3840
3841 udelay(10);
3842 }
3843
3844 if (i == iters) {
3845 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3846 return -EBUSY;
3847 }
3848
3849 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3850 if (val & 0xff) {
3851 netdev_warn(tp->dev,
3852 "Other patches exist. Not downloading EEE patch\n");
3853 return -EEXIST;
3854 }
3855
3856 return 0;
3857 }
3858
3859 /* tp->lock is held. */
tg3_load_57766_firmware(struct tg3 * tp)3860 static void tg3_load_57766_firmware(struct tg3 *tp)
3861 {
3862 struct tg3_firmware_hdr *fw_hdr;
3863
3864 if (!tg3_flag(tp, NO_NVRAM))
3865 return;
3866
3867 if (tg3_validate_rxcpu_state(tp))
3868 return;
3869
3870 if (!tp->fw)
3871 return;
3872
3873 /* This firmware blob has a different format than older firmware
3874 * releases as given below. The main difference is we have fragmented
3875 * data to be written to non-contiguous locations.
3876 *
3877 * In the beginning we have a firmware header identical to other
3878 * firmware which consists of version, base addr and length. The length
3879 * here is unused and set to 0xffffffff.
3880 *
3881 * This is followed by a series of firmware fragments which are
3882 * individually identical to previous firmware. i.e. they have the
3883 * firmware header and followed by data for that fragment. The version
3884 * field of the individual fragment header is unused.
3885 */
3886
3887 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3888 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3889 return;
3890
3891 if (tg3_rxcpu_pause(tp))
3892 return;
3893
3894 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3895 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3896
3897 tg3_rxcpu_resume(tp);
3898 }
3899
3900 /* tp->lock is held. */
tg3_load_tso_firmware(struct tg3 * tp)3901 static int tg3_load_tso_firmware(struct tg3 *tp)
3902 {
3903 const struct tg3_firmware_hdr *fw_hdr;
3904 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3905 int err;
3906
3907 if (!tg3_flag(tp, FW_TSO))
3908 return 0;
3909
3910 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3911
3912 /* Firmware blob starts with version numbers, followed by
3913 start address and length. We are setting complete length.
3914 length = end_address_of_bss - start_address_of_text.
3915 Remainder is the blob to be loaded contiguously
3916 from start address. */
3917
3918 cpu_scratch_size = tp->fw_len;
3919
3920 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3921 cpu_base = RX_CPU_BASE;
3922 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3923 } else {
3924 cpu_base = TX_CPU_BASE;
3925 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3926 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3927 }
3928
3929 err = tg3_load_firmware_cpu(tp, cpu_base,
3930 cpu_scratch_base, cpu_scratch_size,
3931 fw_hdr);
3932 if (err)
3933 return err;
3934
3935 /* Now startup the cpu. */
3936 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3937 be32_to_cpu(fw_hdr->base_addr));
3938 if (err) {
3939 netdev_err(tp->dev,
3940 "%s fails to set CPU PC, is %08x should be %08x\n",
3941 __func__, tr32(cpu_base + CPU_PC),
3942 be32_to_cpu(fw_hdr->base_addr));
3943 return -ENODEV;
3944 }
3945
3946 tg3_resume_cpu(tp, cpu_base);
3947 return 0;
3948 }
3949
3950 /* tp->lock is held. */
__tg3_set_one_mac_addr(struct tg3 * tp,u8 * mac_addr,int index)3951 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3952 {
3953 u32 addr_high, addr_low;
3954
3955 addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3956 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3957 (mac_addr[4] << 8) | mac_addr[5]);
3958
3959 if (index < 4) {
3960 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3961 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3962 } else {
3963 index -= 4;
3964 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3965 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3966 }
3967 }
3968
3969 /* tp->lock is held. */
__tg3_set_mac_addr(struct tg3 * tp,bool skip_mac_1)3970 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3971 {
3972 u32 addr_high;
3973 int i;
3974
3975 for (i = 0; i < 4; i++) {
3976 if (i == 1 && skip_mac_1)
3977 continue;
3978 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3979 }
3980
3981 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3982 tg3_asic_rev(tp) == ASIC_REV_5704) {
3983 for (i = 4; i < 16; i++)
3984 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3985 }
3986
3987 addr_high = (tp->dev->dev_addr[0] +
3988 tp->dev->dev_addr[1] +
3989 tp->dev->dev_addr[2] +
3990 tp->dev->dev_addr[3] +
3991 tp->dev->dev_addr[4] +
3992 tp->dev->dev_addr[5]) &
3993 TX_BACKOFF_SEED_MASK;
3994 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3995 }
3996
tg3_enable_register_access(struct tg3 * tp)3997 static void tg3_enable_register_access(struct tg3 *tp)
3998 {
3999 /*
4000 * Make sure register accesses (indirect or otherwise) will function
4001 * correctly.
4002 */
4003 pci_write_config_dword(tp->pdev,
4004 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4005 }
4006
tg3_power_up(struct tg3 * tp)4007 static int tg3_power_up(struct tg3 *tp)
4008 {
4009 int err;
4010
4011 tg3_enable_register_access(tp);
4012
4013 err = pci_set_power_state(tp->pdev, PCI_D0);
4014 if (!err) {
4015 /* Switch out of Vaux if it is a NIC */
4016 tg3_pwrsrc_switch_to_vmain(tp);
4017 } else {
4018 netdev_err(tp->dev, "Transition to D0 failed\n");
4019 }
4020
4021 return err;
4022 }
4023
4024 static int tg3_setup_phy(struct tg3 *, bool);
4025
tg3_power_down_prepare(struct tg3 * tp)4026 static int tg3_power_down_prepare(struct tg3 *tp)
4027 {
4028 u32 misc_host_ctrl;
4029 bool device_should_wake, do_low_power;
4030
4031 tg3_enable_register_access(tp);
4032
4033 /* Restore the CLKREQ setting. */
4034 if (tg3_flag(tp, CLKREQ_BUG))
4035 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4036 PCI_EXP_LNKCTL_CLKREQ_EN);
4037
4038 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4039 tw32(TG3PCI_MISC_HOST_CTRL,
4040 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4041
4042 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4043 tg3_flag(tp, WOL_ENABLE);
4044
4045 if (tg3_flag(tp, USE_PHYLIB)) {
4046 do_low_power = false;
4047 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4048 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4049 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4050 struct phy_device *phydev;
4051 u32 phyid;
4052
4053 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4054
4055 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4056
4057 tp->link_config.speed = phydev->speed;
4058 tp->link_config.duplex = phydev->duplex;
4059 tp->link_config.autoneg = phydev->autoneg;
4060 ethtool_convert_link_mode_to_legacy_u32(
4061 &tp->link_config.advertising,
4062 phydev->advertising);
4063
4064 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4065 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4066 advertising);
4067 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4068 advertising);
4069 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4070 advertising);
4071
4072 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4073 if (tg3_flag(tp, WOL_SPEED_100MB)) {
4074 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4075 advertising);
4076 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4077 advertising);
4078 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4079 advertising);
4080 } else {
4081 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4082 advertising);
4083 }
4084 }
4085
4086 linkmode_copy(phydev->advertising, advertising);
4087 phy_start_aneg(phydev);
4088
4089 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4090 if (phyid != PHY_ID_BCMAC131) {
4091 phyid &= PHY_BCM_OUI_MASK;
4092 if (phyid == PHY_BCM_OUI_1 ||
4093 phyid == PHY_BCM_OUI_2 ||
4094 phyid == PHY_BCM_OUI_3)
4095 do_low_power = true;
4096 }
4097 }
4098 } else {
4099 do_low_power = true;
4100
4101 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4102 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4103
4104 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4105 tg3_setup_phy(tp, false);
4106 }
4107
4108 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4109 u32 val;
4110
4111 val = tr32(GRC_VCPU_EXT_CTRL);
4112 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4113 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4114 int i;
4115 u32 val;
4116
4117 for (i = 0; i < 200; i++) {
4118 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4119 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4120 break;
4121 msleep(1);
4122 }
4123 }
4124 if (tg3_flag(tp, WOL_CAP))
4125 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4126 WOL_DRV_STATE_SHUTDOWN |
4127 WOL_DRV_WOL |
4128 WOL_SET_MAGIC_PKT);
4129
4130 if (device_should_wake) {
4131 u32 mac_mode;
4132
4133 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4134 if (do_low_power &&
4135 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4136 tg3_phy_auxctl_write(tp,
4137 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4138 MII_TG3_AUXCTL_PCTL_WOL_EN |
4139 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4140 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4141 udelay(40);
4142 }
4143
4144 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4145 mac_mode = MAC_MODE_PORT_MODE_GMII;
4146 else if (tp->phy_flags &
4147 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4148 if (tp->link_config.active_speed == SPEED_1000)
4149 mac_mode = MAC_MODE_PORT_MODE_GMII;
4150 else
4151 mac_mode = MAC_MODE_PORT_MODE_MII;
4152 } else
4153 mac_mode = MAC_MODE_PORT_MODE_MII;
4154
4155 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4156 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4157 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4158 SPEED_100 : SPEED_10;
4159 if (tg3_5700_link_polarity(tp, speed))
4160 mac_mode |= MAC_MODE_LINK_POLARITY;
4161 else
4162 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4163 }
4164 } else {
4165 mac_mode = MAC_MODE_PORT_MODE_TBI;
4166 }
4167
4168 if (!tg3_flag(tp, 5750_PLUS))
4169 tw32(MAC_LED_CTRL, tp->led_ctrl);
4170
4171 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4172 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4173 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4174 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4175
4176 if (tg3_flag(tp, ENABLE_APE))
4177 mac_mode |= MAC_MODE_APE_TX_EN |
4178 MAC_MODE_APE_RX_EN |
4179 MAC_MODE_TDE_ENABLE;
4180
4181 tw32_f(MAC_MODE, mac_mode);
4182 udelay(100);
4183
4184 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4185 udelay(10);
4186 }
4187
4188 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4189 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4190 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4191 u32 base_val;
4192
4193 base_val = tp->pci_clock_ctrl;
4194 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4195 CLOCK_CTRL_TXCLK_DISABLE);
4196
4197 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4198 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4199 } else if (tg3_flag(tp, 5780_CLASS) ||
4200 tg3_flag(tp, CPMU_PRESENT) ||
4201 tg3_asic_rev(tp) == ASIC_REV_5906) {
4202 /* do nothing */
4203 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4204 u32 newbits1, newbits2;
4205
4206 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4207 tg3_asic_rev(tp) == ASIC_REV_5701) {
4208 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4209 CLOCK_CTRL_TXCLK_DISABLE |
4210 CLOCK_CTRL_ALTCLK);
4211 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4212 } else if (tg3_flag(tp, 5705_PLUS)) {
4213 newbits1 = CLOCK_CTRL_625_CORE;
4214 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4215 } else {
4216 newbits1 = CLOCK_CTRL_ALTCLK;
4217 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4218 }
4219
4220 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4221 40);
4222
4223 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4224 40);
4225
4226 if (!tg3_flag(tp, 5705_PLUS)) {
4227 u32 newbits3;
4228
4229 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4230 tg3_asic_rev(tp) == ASIC_REV_5701) {
4231 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4232 CLOCK_CTRL_TXCLK_DISABLE |
4233 CLOCK_CTRL_44MHZ_CORE);
4234 } else {
4235 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4236 }
4237
4238 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4239 tp->pci_clock_ctrl | newbits3, 40);
4240 }
4241 }
4242
4243 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4244 tg3_power_down_phy(tp, do_low_power);
4245
4246 tg3_frob_aux_power(tp, true);
4247
4248 /* Workaround for unstable PLL clock */
4249 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4250 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4251 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4252 u32 val = tr32(0x7d00);
4253
4254 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4255 tw32(0x7d00, val);
4256 if (!tg3_flag(tp, ENABLE_ASF)) {
4257 int err;
4258
4259 err = tg3_nvram_lock(tp);
4260 tg3_halt_cpu(tp, RX_CPU_BASE);
4261 if (!err)
4262 tg3_nvram_unlock(tp);
4263 }
4264 }
4265
4266 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4267
4268 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4269
4270 return 0;
4271 }
4272
tg3_power_down(struct tg3 * tp)4273 static void tg3_power_down(struct tg3 *tp)
4274 {
4275 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4276 pci_set_power_state(tp->pdev, PCI_D3hot);
4277 }
4278
tg3_aux_stat_to_speed_duplex(struct tg3 * tp,u32 val,u32 * speed,u8 * duplex)4279 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4280 {
4281 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4282 case MII_TG3_AUX_STAT_10HALF:
4283 *speed = SPEED_10;
4284 *duplex = DUPLEX_HALF;
4285 break;
4286
4287 case MII_TG3_AUX_STAT_10FULL:
4288 *speed = SPEED_10;
4289 *duplex = DUPLEX_FULL;
4290 break;
4291
4292 case MII_TG3_AUX_STAT_100HALF:
4293 *speed = SPEED_100;
4294 *duplex = DUPLEX_HALF;
4295 break;
4296
4297 case MII_TG3_AUX_STAT_100FULL:
4298 *speed = SPEED_100;
4299 *duplex = DUPLEX_FULL;
4300 break;
4301
4302 case MII_TG3_AUX_STAT_1000HALF:
4303 *speed = SPEED_1000;
4304 *duplex = DUPLEX_HALF;
4305 break;
4306
4307 case MII_TG3_AUX_STAT_1000FULL:
4308 *speed = SPEED_1000;
4309 *duplex = DUPLEX_FULL;
4310 break;
4311
4312 default:
4313 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4314 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4315 SPEED_10;
4316 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4317 DUPLEX_HALF;
4318 break;
4319 }
4320 *speed = SPEED_UNKNOWN;
4321 *duplex = DUPLEX_UNKNOWN;
4322 break;
4323 }
4324 }
4325
tg3_phy_autoneg_cfg(struct tg3 * tp,u32 advertise,u32 flowctrl)4326 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4327 {
4328 int err = 0;
4329 u32 val, new_adv;
4330
4331 new_adv = ADVERTISE_CSMA;
4332 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4333 new_adv |= mii_advertise_flowctrl(flowctrl);
4334
4335 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4336 if (err)
4337 goto done;
4338
4339 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4340 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4341
4342 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4343 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4344 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4345
4346 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4347 if (err)
4348 goto done;
4349 }
4350
4351 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4352 goto done;
4353
4354 tw32(TG3_CPMU_EEE_MODE,
4355 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4356
4357 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4358 if (!err) {
4359 u32 err2;
4360
4361 val = 0;
4362 /* Advertise 100-BaseTX EEE ability */
4363 if (advertise & ADVERTISED_100baseT_Full)
4364 val |= MDIO_AN_EEE_ADV_100TX;
4365 /* Advertise 1000-BaseT EEE ability */
4366 if (advertise & ADVERTISED_1000baseT_Full)
4367 val |= MDIO_AN_EEE_ADV_1000T;
4368
4369 if (!tp->eee.eee_enabled) {
4370 val = 0;
4371 tp->eee.advertised = 0;
4372 } else {
4373 tp->eee.advertised = advertise &
4374 (ADVERTISED_100baseT_Full |
4375 ADVERTISED_1000baseT_Full);
4376 }
4377
4378 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4379 if (err)
4380 val = 0;
4381
4382 switch (tg3_asic_rev(tp)) {
4383 case ASIC_REV_5717:
4384 case ASIC_REV_57765:
4385 case ASIC_REV_57766:
4386 case ASIC_REV_5719:
4387 /* If we advertised any eee advertisements above... */
4388 if (val)
4389 val = MII_TG3_DSP_TAP26_ALNOKO |
4390 MII_TG3_DSP_TAP26_RMRXSTO |
4391 MII_TG3_DSP_TAP26_OPCSINPT;
4392 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4393 fallthrough;
4394 case ASIC_REV_5720:
4395 case ASIC_REV_5762:
4396 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4397 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4398 MII_TG3_DSP_CH34TP2_HIBW01);
4399 }
4400
4401 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4402 if (!err)
4403 err = err2;
4404 }
4405
4406 done:
4407 return err;
4408 }
4409
tg3_phy_copper_begin(struct tg3 * tp)4410 static void tg3_phy_copper_begin(struct tg3 *tp)
4411 {
4412 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4413 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4414 u32 adv, fc;
4415
4416 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4417 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4418 adv = ADVERTISED_10baseT_Half |
4419 ADVERTISED_10baseT_Full;
4420 if (tg3_flag(tp, WOL_SPEED_100MB))
4421 adv |= ADVERTISED_100baseT_Half |
4422 ADVERTISED_100baseT_Full;
4423 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4424 if (!(tp->phy_flags &
4425 TG3_PHYFLG_DISABLE_1G_HD_ADV))
4426 adv |= ADVERTISED_1000baseT_Half;
4427 adv |= ADVERTISED_1000baseT_Full;
4428 }
4429
4430 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4431 } else {
4432 adv = tp->link_config.advertising;
4433 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4434 adv &= ~(ADVERTISED_1000baseT_Half |
4435 ADVERTISED_1000baseT_Full);
4436
4437 fc = tp->link_config.flowctrl;
4438 }
4439
4440 tg3_phy_autoneg_cfg(tp, adv, fc);
4441
4442 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4443 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4444 /* Normally during power down we want to autonegotiate
4445 * the lowest possible speed for WOL. However, to avoid
4446 * link flap, we leave it untouched.
4447 */
4448 return;
4449 }
4450
4451 tg3_writephy(tp, MII_BMCR,
4452 BMCR_ANENABLE | BMCR_ANRESTART);
4453 } else {
4454 int i;
4455 u32 bmcr, orig_bmcr;
4456
4457 tp->link_config.active_speed = tp->link_config.speed;
4458 tp->link_config.active_duplex = tp->link_config.duplex;
4459
4460 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4461 /* With autoneg disabled, 5715 only links up when the
4462 * advertisement register has the configured speed
4463 * enabled.
4464 */
4465 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4466 }
4467
4468 bmcr = 0;
4469 switch (tp->link_config.speed) {
4470 default:
4471 case SPEED_10:
4472 break;
4473
4474 case SPEED_100:
4475 bmcr |= BMCR_SPEED100;
4476 break;
4477
4478 case SPEED_1000:
4479 bmcr |= BMCR_SPEED1000;
4480 break;
4481 }
4482
4483 if (tp->link_config.duplex == DUPLEX_FULL)
4484 bmcr |= BMCR_FULLDPLX;
4485
4486 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4487 (bmcr != orig_bmcr)) {
4488 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4489 for (i = 0; i < 1500; i++) {
4490 u32 tmp;
4491
4492 udelay(10);
4493 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4494 tg3_readphy(tp, MII_BMSR, &tmp))
4495 continue;
4496 if (!(tmp & BMSR_LSTATUS)) {
4497 udelay(40);
4498 break;
4499 }
4500 }
4501 tg3_writephy(tp, MII_BMCR, bmcr);
4502 udelay(40);
4503 }
4504 }
4505 }
4506
tg3_phy_pull_config(struct tg3 * tp)4507 static int tg3_phy_pull_config(struct tg3 *tp)
4508 {
4509 int err;
4510 u32 val;
4511
4512 err = tg3_readphy(tp, MII_BMCR, &val);
4513 if (err)
4514 goto done;
4515
4516 if (!(val & BMCR_ANENABLE)) {
4517 tp->link_config.autoneg = AUTONEG_DISABLE;
4518 tp->link_config.advertising = 0;
4519 tg3_flag_clear(tp, PAUSE_AUTONEG);
4520
4521 err = -EIO;
4522
4523 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4524 case 0:
4525 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4526 goto done;
4527
4528 tp->link_config.speed = SPEED_10;
4529 break;
4530 case BMCR_SPEED100:
4531 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4532 goto done;
4533
4534 tp->link_config.speed = SPEED_100;
4535 break;
4536 case BMCR_SPEED1000:
4537 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4538 tp->link_config.speed = SPEED_1000;
4539 break;
4540 }
4541 fallthrough;
4542 default:
4543 goto done;
4544 }
4545
4546 if (val & BMCR_FULLDPLX)
4547 tp->link_config.duplex = DUPLEX_FULL;
4548 else
4549 tp->link_config.duplex = DUPLEX_HALF;
4550
4551 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4552
4553 err = 0;
4554 goto done;
4555 }
4556
4557 tp->link_config.autoneg = AUTONEG_ENABLE;
4558 tp->link_config.advertising = ADVERTISED_Autoneg;
4559 tg3_flag_set(tp, PAUSE_AUTONEG);
4560
4561 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4562 u32 adv;
4563
4564 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4565 if (err)
4566 goto done;
4567
4568 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4569 tp->link_config.advertising |= adv | ADVERTISED_TP;
4570
4571 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4572 } else {
4573 tp->link_config.advertising |= ADVERTISED_FIBRE;
4574 }
4575
4576 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4577 u32 adv;
4578
4579 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4580 err = tg3_readphy(tp, MII_CTRL1000, &val);
4581 if (err)
4582 goto done;
4583
4584 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4585 } else {
4586 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4587 if (err)
4588 goto done;
4589
4590 adv = tg3_decode_flowctrl_1000X(val);
4591 tp->link_config.flowctrl = adv;
4592
4593 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4594 adv = mii_adv_to_ethtool_adv_x(val);
4595 }
4596
4597 tp->link_config.advertising |= adv;
4598 }
4599
4600 done:
4601 return err;
4602 }
4603
tg3_init_5401phy_dsp(struct tg3 * tp)4604 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4605 {
4606 int err;
4607
4608 /* Turn off tap power management. */
4609 /* Set Extended packet length bit */
4610 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4611
4612 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4613 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4614 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4615 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4616 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4617
4618 udelay(40);
4619
4620 return err;
4621 }
4622
tg3_phy_eee_config_ok(struct tg3 * tp)4623 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4624 {
4625 struct ethtool_eee eee;
4626
4627 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4628 return true;
4629
4630 tg3_eee_pull_config(tp, &eee);
4631
4632 if (tp->eee.eee_enabled) {
4633 if (tp->eee.advertised != eee.advertised ||
4634 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4635 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4636 return false;
4637 } else {
4638 /* EEE is disabled but we're advertising */
4639 if (eee.advertised)
4640 return false;
4641 }
4642
4643 return true;
4644 }
4645
tg3_phy_copper_an_config_ok(struct tg3 * tp,u32 * lcladv)4646 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4647 {
4648 u32 advmsk, tgtadv, advertising;
4649
4650 advertising = tp->link_config.advertising;
4651 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4652
4653 advmsk = ADVERTISE_ALL;
4654 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4655 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4656 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4657 }
4658
4659 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4660 return false;
4661
4662 if ((*lcladv & advmsk) != tgtadv)
4663 return false;
4664
4665 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4666 u32 tg3_ctrl;
4667
4668 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4669
4670 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4671 return false;
4672
4673 if (tgtadv &&
4674 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4675 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4676 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4677 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4678 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4679 } else {
4680 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4681 }
4682
4683 if (tg3_ctrl != tgtadv)
4684 return false;
4685 }
4686
4687 return true;
4688 }
4689
tg3_phy_copper_fetch_rmtadv(struct tg3 * tp,u32 * rmtadv)4690 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4691 {
4692 u32 lpeth = 0;
4693
4694 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4695 u32 val;
4696
4697 if (tg3_readphy(tp, MII_STAT1000, &val))
4698 return false;
4699
4700 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4701 }
4702
4703 if (tg3_readphy(tp, MII_LPA, rmtadv))
4704 return false;
4705
4706 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4707 tp->link_config.rmt_adv = lpeth;
4708
4709 return true;
4710 }
4711
tg3_test_and_report_link_chg(struct tg3 * tp,bool curr_link_up)4712 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4713 {
4714 if (curr_link_up != tp->link_up) {
4715 if (curr_link_up) {
4716 netif_carrier_on(tp->dev);
4717 } else {
4718 netif_carrier_off(tp->dev);
4719 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4720 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4721 }
4722
4723 tg3_link_report(tp);
4724 return true;
4725 }
4726
4727 return false;
4728 }
4729
tg3_clear_mac_status(struct tg3 * tp)4730 static void tg3_clear_mac_status(struct tg3 *tp)
4731 {
4732 tw32(MAC_EVENT, 0);
4733
4734 tw32_f(MAC_STATUS,
4735 MAC_STATUS_SYNC_CHANGED |
4736 MAC_STATUS_CFG_CHANGED |
4737 MAC_STATUS_MI_COMPLETION |
4738 MAC_STATUS_LNKSTATE_CHANGED);
4739 udelay(40);
4740 }
4741
tg3_setup_eee(struct tg3 * tp)4742 static void tg3_setup_eee(struct tg3 *tp)
4743 {
4744 u32 val;
4745
4746 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4747 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4748 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4749 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4750
4751 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4752
4753 tw32_f(TG3_CPMU_EEE_CTRL,
4754 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4755
4756 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4757 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4758 TG3_CPMU_EEEMD_LPI_IN_RX |
4759 TG3_CPMU_EEEMD_EEE_ENABLE;
4760
4761 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4762 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4763
4764 if (tg3_flag(tp, ENABLE_APE))
4765 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4766
4767 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4768
4769 tw32_f(TG3_CPMU_EEE_DBTMR1,
4770 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4771 (tp->eee.tx_lpi_timer & 0xffff));
4772
4773 tw32_f(TG3_CPMU_EEE_DBTMR2,
4774 TG3_CPMU_DBTMR2_APE_TX_2047US |
4775 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4776 }
4777
tg3_setup_copper_phy(struct tg3 * tp,bool force_reset)4778 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4779 {
4780 bool current_link_up;
4781 u32 bmsr, val;
4782 u32 lcl_adv, rmt_adv;
4783 u32 current_speed;
4784 u8 current_duplex;
4785 int i, err;
4786
4787 tg3_clear_mac_status(tp);
4788
4789 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4790 tw32_f(MAC_MI_MODE,
4791 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4792 udelay(80);
4793 }
4794
4795 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4796
4797 /* Some third-party PHYs need to be reset on link going
4798 * down.
4799 */
4800 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4801 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4802 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4803 tp->link_up) {
4804 tg3_readphy(tp, MII_BMSR, &bmsr);
4805 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4806 !(bmsr & BMSR_LSTATUS))
4807 force_reset = true;
4808 }
4809 if (force_reset)
4810 tg3_phy_reset(tp);
4811
4812 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4813 tg3_readphy(tp, MII_BMSR, &bmsr);
4814 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4815 !tg3_flag(tp, INIT_COMPLETE))
4816 bmsr = 0;
4817
4818 if (!(bmsr & BMSR_LSTATUS)) {
4819 err = tg3_init_5401phy_dsp(tp);
4820 if (err)
4821 return err;
4822
4823 tg3_readphy(tp, MII_BMSR, &bmsr);
4824 for (i = 0; i < 1000; i++) {
4825 udelay(10);
4826 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4827 (bmsr & BMSR_LSTATUS)) {
4828 udelay(40);
4829 break;
4830 }
4831 }
4832
4833 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4834 TG3_PHY_REV_BCM5401_B0 &&
4835 !(bmsr & BMSR_LSTATUS) &&
4836 tp->link_config.active_speed == SPEED_1000) {
4837 err = tg3_phy_reset(tp);
4838 if (!err)
4839 err = tg3_init_5401phy_dsp(tp);
4840 if (err)
4841 return err;
4842 }
4843 }
4844 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4845 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4846 /* 5701 {A0,B0} CRC bug workaround */
4847 tg3_writephy(tp, 0x15, 0x0a75);
4848 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4849 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4850 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4851 }
4852
4853 /* Clear pending interrupts... */
4854 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4855 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4856
4857 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4858 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4859 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4860 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4861
4862 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4863 tg3_asic_rev(tp) == ASIC_REV_5701) {
4864 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4865 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4866 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4867 else
4868 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4869 }
4870
4871 current_link_up = false;
4872 current_speed = SPEED_UNKNOWN;
4873 current_duplex = DUPLEX_UNKNOWN;
4874 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4875 tp->link_config.rmt_adv = 0;
4876
4877 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4878 err = tg3_phy_auxctl_read(tp,
4879 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4880 &val);
4881 if (!err && !(val & (1 << 10))) {
4882 tg3_phy_auxctl_write(tp,
4883 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4884 val | (1 << 10));
4885 goto relink;
4886 }
4887 }
4888
4889 bmsr = 0;
4890 for (i = 0; i < 100; i++) {
4891 tg3_readphy(tp, MII_BMSR, &bmsr);
4892 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4893 (bmsr & BMSR_LSTATUS))
4894 break;
4895 udelay(40);
4896 }
4897
4898 if (bmsr & BMSR_LSTATUS) {
4899 u32 aux_stat, bmcr;
4900
4901 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4902 for (i = 0; i < 2000; i++) {
4903 udelay(10);
4904 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4905 aux_stat)
4906 break;
4907 }
4908
4909 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4910 ¤t_speed,
4911 ¤t_duplex);
4912
4913 bmcr = 0;
4914 for (i = 0; i < 200; i++) {
4915 tg3_readphy(tp, MII_BMCR, &bmcr);
4916 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4917 continue;
4918 if (bmcr && bmcr != 0x7fff)
4919 break;
4920 udelay(10);
4921 }
4922
4923 lcl_adv = 0;
4924 rmt_adv = 0;
4925
4926 tp->link_config.active_speed = current_speed;
4927 tp->link_config.active_duplex = current_duplex;
4928
4929 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4930 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4931
4932 if ((bmcr & BMCR_ANENABLE) &&
4933 eee_config_ok &&
4934 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4935 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4936 current_link_up = true;
4937
4938 /* EEE settings changes take effect only after a phy
4939 * reset. If we have skipped a reset due to Link Flap
4940 * Avoidance being enabled, do it now.
4941 */
4942 if (!eee_config_ok &&
4943 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4944 !force_reset) {
4945 tg3_setup_eee(tp);
4946 tg3_phy_reset(tp);
4947 }
4948 } else {
4949 if (!(bmcr & BMCR_ANENABLE) &&
4950 tp->link_config.speed == current_speed &&
4951 tp->link_config.duplex == current_duplex) {
4952 current_link_up = true;
4953 }
4954 }
4955
4956 if (current_link_up &&
4957 tp->link_config.active_duplex == DUPLEX_FULL) {
4958 u32 reg, bit;
4959
4960 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4961 reg = MII_TG3_FET_GEN_STAT;
4962 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4963 } else {
4964 reg = MII_TG3_EXT_STAT;
4965 bit = MII_TG3_EXT_STAT_MDIX;
4966 }
4967
4968 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4969 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4970
4971 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4972 }
4973 }
4974
4975 relink:
4976 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4977 tg3_phy_copper_begin(tp);
4978
4979 if (tg3_flag(tp, ROBOSWITCH)) {
4980 current_link_up = true;
4981 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4982 current_speed = SPEED_1000;
4983 current_duplex = DUPLEX_FULL;
4984 tp->link_config.active_speed = current_speed;
4985 tp->link_config.active_duplex = current_duplex;
4986 }
4987
4988 tg3_readphy(tp, MII_BMSR, &bmsr);
4989 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4990 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4991 current_link_up = true;
4992 }
4993
4994 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4995 if (current_link_up) {
4996 if (tp->link_config.active_speed == SPEED_100 ||
4997 tp->link_config.active_speed == SPEED_10)
4998 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4999 else
5000 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5001 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
5002 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5003 else
5004 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5005
5006 /* In order for the 5750 core in BCM4785 chip to work properly
5007 * in RGMII mode, the Led Control Register must be set up.
5008 */
5009 if (tg3_flag(tp, RGMII_MODE)) {
5010 u32 led_ctrl = tr32(MAC_LED_CTRL);
5011 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5012
5013 if (tp->link_config.active_speed == SPEED_10)
5014 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5015 else if (tp->link_config.active_speed == SPEED_100)
5016 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5017 LED_CTRL_100MBPS_ON);
5018 else if (tp->link_config.active_speed == SPEED_1000)
5019 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5020 LED_CTRL_1000MBPS_ON);
5021
5022 tw32(MAC_LED_CTRL, led_ctrl);
5023 udelay(40);
5024 }
5025
5026 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5027 if (tp->link_config.active_duplex == DUPLEX_HALF)
5028 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5029
5030 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5031 if (current_link_up &&
5032 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5033 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5034 else
5035 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5036 }
5037
5038 /* ??? Without this setting Netgear GA302T PHY does not
5039 * ??? send/receive packets...
5040 */
5041 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5042 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5043 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5044 tw32_f(MAC_MI_MODE, tp->mi_mode);
5045 udelay(80);
5046 }
5047
5048 tw32_f(MAC_MODE, tp->mac_mode);
5049 udelay(40);
5050
5051 tg3_phy_eee_adjust(tp, current_link_up);
5052
5053 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5054 /* Polled via timer. */
5055 tw32_f(MAC_EVENT, 0);
5056 } else {
5057 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5058 }
5059 udelay(40);
5060
5061 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5062 current_link_up &&
5063 tp->link_config.active_speed == SPEED_1000 &&
5064 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5065 udelay(120);
5066 tw32_f(MAC_STATUS,
5067 (MAC_STATUS_SYNC_CHANGED |
5068 MAC_STATUS_CFG_CHANGED));
5069 udelay(40);
5070 tg3_write_mem(tp,
5071 NIC_SRAM_FIRMWARE_MBOX,
5072 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5073 }
5074
5075 /* Prevent send BD corruption. */
5076 if (tg3_flag(tp, CLKREQ_BUG)) {
5077 if (tp->link_config.active_speed == SPEED_100 ||
5078 tp->link_config.active_speed == SPEED_10)
5079 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5080 PCI_EXP_LNKCTL_CLKREQ_EN);
5081 else
5082 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5083 PCI_EXP_LNKCTL_CLKREQ_EN);
5084 }
5085
5086 tg3_test_and_report_link_chg(tp, current_link_up);
5087
5088 return 0;
5089 }
5090
5091 struct tg3_fiber_aneginfo {
5092 int state;
5093 #define ANEG_STATE_UNKNOWN 0
5094 #define ANEG_STATE_AN_ENABLE 1
5095 #define ANEG_STATE_RESTART_INIT 2
5096 #define ANEG_STATE_RESTART 3
5097 #define ANEG_STATE_DISABLE_LINK_OK 4
5098 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5099 #define ANEG_STATE_ABILITY_DETECT 6
5100 #define ANEG_STATE_ACK_DETECT_INIT 7
5101 #define ANEG_STATE_ACK_DETECT 8
5102 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5103 #define ANEG_STATE_COMPLETE_ACK 10
5104 #define ANEG_STATE_IDLE_DETECT_INIT 11
5105 #define ANEG_STATE_IDLE_DETECT 12
5106 #define ANEG_STATE_LINK_OK 13
5107 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5108 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5109
5110 u32 flags;
5111 #define MR_AN_ENABLE 0x00000001
5112 #define MR_RESTART_AN 0x00000002
5113 #define MR_AN_COMPLETE 0x00000004
5114 #define MR_PAGE_RX 0x00000008
5115 #define MR_NP_LOADED 0x00000010
5116 #define MR_TOGGLE_TX 0x00000020
5117 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5118 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5119 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5120 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5121 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5122 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5123 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5124 #define MR_TOGGLE_RX 0x00002000
5125 #define MR_NP_RX 0x00004000
5126
5127 #define MR_LINK_OK 0x80000000
5128
5129 unsigned long link_time, cur_time;
5130
5131 u32 ability_match_cfg;
5132 int ability_match_count;
5133
5134 char ability_match, idle_match, ack_match;
5135
5136 u32 txconfig, rxconfig;
5137 #define ANEG_CFG_NP 0x00000080
5138 #define ANEG_CFG_ACK 0x00000040
5139 #define ANEG_CFG_RF2 0x00000020
5140 #define ANEG_CFG_RF1 0x00000010
5141 #define ANEG_CFG_PS2 0x00000001
5142 #define ANEG_CFG_PS1 0x00008000
5143 #define ANEG_CFG_HD 0x00004000
5144 #define ANEG_CFG_FD 0x00002000
5145 #define ANEG_CFG_INVAL 0x00001f06
5146
5147 };
5148 #define ANEG_OK 0
5149 #define ANEG_DONE 1
5150 #define ANEG_TIMER_ENAB 2
5151 #define ANEG_FAILED -1
5152
5153 #define ANEG_STATE_SETTLE_TIME 10000
5154
tg3_fiber_aneg_smachine(struct tg3 * tp,struct tg3_fiber_aneginfo * ap)5155 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5156 struct tg3_fiber_aneginfo *ap)
5157 {
5158 u16 flowctrl;
5159 unsigned long delta;
5160 u32 rx_cfg_reg;
5161 int ret;
5162
5163 if (ap->state == ANEG_STATE_UNKNOWN) {
5164 ap->rxconfig = 0;
5165 ap->link_time = 0;
5166 ap->cur_time = 0;
5167 ap->ability_match_cfg = 0;
5168 ap->ability_match_count = 0;
5169 ap->ability_match = 0;
5170 ap->idle_match = 0;
5171 ap->ack_match = 0;
5172 }
5173 ap->cur_time++;
5174
5175 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5176 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5177
5178 if (rx_cfg_reg != ap->ability_match_cfg) {
5179 ap->ability_match_cfg = rx_cfg_reg;
5180 ap->ability_match = 0;
5181 ap->ability_match_count = 0;
5182 } else {
5183 if (++ap->ability_match_count > 1) {
5184 ap->ability_match = 1;
5185 ap->ability_match_cfg = rx_cfg_reg;
5186 }
5187 }
5188 if (rx_cfg_reg & ANEG_CFG_ACK)
5189 ap->ack_match = 1;
5190 else
5191 ap->ack_match = 0;
5192
5193 ap->idle_match = 0;
5194 } else {
5195 ap->idle_match = 1;
5196 ap->ability_match_cfg = 0;
5197 ap->ability_match_count = 0;
5198 ap->ability_match = 0;
5199 ap->ack_match = 0;
5200
5201 rx_cfg_reg = 0;
5202 }
5203
5204 ap->rxconfig = rx_cfg_reg;
5205 ret = ANEG_OK;
5206
5207 switch (ap->state) {
5208 case ANEG_STATE_UNKNOWN:
5209 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5210 ap->state = ANEG_STATE_AN_ENABLE;
5211
5212 fallthrough;
5213 case ANEG_STATE_AN_ENABLE:
5214 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5215 if (ap->flags & MR_AN_ENABLE) {
5216 ap->link_time = 0;
5217 ap->cur_time = 0;
5218 ap->ability_match_cfg = 0;
5219 ap->ability_match_count = 0;
5220 ap->ability_match = 0;
5221 ap->idle_match = 0;
5222 ap->ack_match = 0;
5223
5224 ap->state = ANEG_STATE_RESTART_INIT;
5225 } else {
5226 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5227 }
5228 break;
5229
5230 case ANEG_STATE_RESTART_INIT:
5231 ap->link_time = ap->cur_time;
5232 ap->flags &= ~(MR_NP_LOADED);
5233 ap->txconfig = 0;
5234 tw32(MAC_TX_AUTO_NEG, 0);
5235 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5236 tw32_f(MAC_MODE, tp->mac_mode);
5237 udelay(40);
5238
5239 ret = ANEG_TIMER_ENAB;
5240 ap->state = ANEG_STATE_RESTART;
5241
5242 fallthrough;
5243 case ANEG_STATE_RESTART:
5244 delta = ap->cur_time - ap->link_time;
5245 if (delta > ANEG_STATE_SETTLE_TIME)
5246 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5247 else
5248 ret = ANEG_TIMER_ENAB;
5249 break;
5250
5251 case ANEG_STATE_DISABLE_LINK_OK:
5252 ret = ANEG_DONE;
5253 break;
5254
5255 case ANEG_STATE_ABILITY_DETECT_INIT:
5256 ap->flags &= ~(MR_TOGGLE_TX);
5257 ap->txconfig = ANEG_CFG_FD;
5258 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5259 if (flowctrl & ADVERTISE_1000XPAUSE)
5260 ap->txconfig |= ANEG_CFG_PS1;
5261 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5262 ap->txconfig |= ANEG_CFG_PS2;
5263 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5264 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5265 tw32_f(MAC_MODE, tp->mac_mode);
5266 udelay(40);
5267
5268 ap->state = ANEG_STATE_ABILITY_DETECT;
5269 break;
5270
5271 case ANEG_STATE_ABILITY_DETECT:
5272 if (ap->ability_match != 0 && ap->rxconfig != 0)
5273 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5274 break;
5275
5276 case ANEG_STATE_ACK_DETECT_INIT:
5277 ap->txconfig |= ANEG_CFG_ACK;
5278 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5279 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5280 tw32_f(MAC_MODE, tp->mac_mode);
5281 udelay(40);
5282
5283 ap->state = ANEG_STATE_ACK_DETECT;
5284
5285 fallthrough;
5286 case ANEG_STATE_ACK_DETECT:
5287 if (ap->ack_match != 0) {
5288 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5289 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5290 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5291 } else {
5292 ap->state = ANEG_STATE_AN_ENABLE;
5293 }
5294 } else if (ap->ability_match != 0 &&
5295 ap->rxconfig == 0) {
5296 ap->state = ANEG_STATE_AN_ENABLE;
5297 }
5298 break;
5299
5300 case ANEG_STATE_COMPLETE_ACK_INIT:
5301 if (ap->rxconfig & ANEG_CFG_INVAL) {
5302 ret = ANEG_FAILED;
5303 break;
5304 }
5305 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5306 MR_LP_ADV_HALF_DUPLEX |
5307 MR_LP_ADV_SYM_PAUSE |
5308 MR_LP_ADV_ASYM_PAUSE |
5309 MR_LP_ADV_REMOTE_FAULT1 |
5310 MR_LP_ADV_REMOTE_FAULT2 |
5311 MR_LP_ADV_NEXT_PAGE |
5312 MR_TOGGLE_RX |
5313 MR_NP_RX);
5314 if (ap->rxconfig & ANEG_CFG_FD)
5315 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5316 if (ap->rxconfig & ANEG_CFG_HD)
5317 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5318 if (ap->rxconfig & ANEG_CFG_PS1)
5319 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5320 if (ap->rxconfig & ANEG_CFG_PS2)
5321 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5322 if (ap->rxconfig & ANEG_CFG_RF1)
5323 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5324 if (ap->rxconfig & ANEG_CFG_RF2)
5325 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5326 if (ap->rxconfig & ANEG_CFG_NP)
5327 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5328
5329 ap->link_time = ap->cur_time;
5330
5331 ap->flags ^= (MR_TOGGLE_TX);
5332 if (ap->rxconfig & 0x0008)
5333 ap->flags |= MR_TOGGLE_RX;
5334 if (ap->rxconfig & ANEG_CFG_NP)
5335 ap->flags |= MR_NP_RX;
5336 ap->flags |= MR_PAGE_RX;
5337
5338 ap->state = ANEG_STATE_COMPLETE_ACK;
5339 ret = ANEG_TIMER_ENAB;
5340 break;
5341
5342 case ANEG_STATE_COMPLETE_ACK:
5343 if (ap->ability_match != 0 &&
5344 ap->rxconfig == 0) {
5345 ap->state = ANEG_STATE_AN_ENABLE;
5346 break;
5347 }
5348 delta = ap->cur_time - ap->link_time;
5349 if (delta > ANEG_STATE_SETTLE_TIME) {
5350 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5351 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5352 } else {
5353 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5354 !(ap->flags & MR_NP_RX)) {
5355 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5356 } else {
5357 ret = ANEG_FAILED;
5358 }
5359 }
5360 }
5361 break;
5362
5363 case ANEG_STATE_IDLE_DETECT_INIT:
5364 ap->link_time = ap->cur_time;
5365 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5366 tw32_f(MAC_MODE, tp->mac_mode);
5367 udelay(40);
5368
5369 ap->state = ANEG_STATE_IDLE_DETECT;
5370 ret = ANEG_TIMER_ENAB;
5371 break;
5372
5373 case ANEG_STATE_IDLE_DETECT:
5374 if (ap->ability_match != 0 &&
5375 ap->rxconfig == 0) {
5376 ap->state = ANEG_STATE_AN_ENABLE;
5377 break;
5378 }
5379 delta = ap->cur_time - ap->link_time;
5380 if (delta > ANEG_STATE_SETTLE_TIME) {
5381 /* XXX another gem from the Broadcom driver :( */
5382 ap->state = ANEG_STATE_LINK_OK;
5383 }
5384 break;
5385
5386 case ANEG_STATE_LINK_OK:
5387 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5388 ret = ANEG_DONE;
5389 break;
5390
5391 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5392 /* ??? unimplemented */
5393 break;
5394
5395 case ANEG_STATE_NEXT_PAGE_WAIT:
5396 /* ??? unimplemented */
5397 break;
5398
5399 default:
5400 ret = ANEG_FAILED;
5401 break;
5402 }
5403
5404 return ret;
5405 }
5406
fiber_autoneg(struct tg3 * tp,u32 * txflags,u32 * rxflags)5407 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5408 {
5409 int res = 0;
5410 struct tg3_fiber_aneginfo aninfo;
5411 int status = ANEG_FAILED;
5412 unsigned int tick;
5413 u32 tmp;
5414
5415 tw32_f(MAC_TX_AUTO_NEG, 0);
5416
5417 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5418 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5419 udelay(40);
5420
5421 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5422 udelay(40);
5423
5424 memset(&aninfo, 0, sizeof(aninfo));
5425 aninfo.flags |= MR_AN_ENABLE;
5426 aninfo.state = ANEG_STATE_UNKNOWN;
5427 aninfo.cur_time = 0;
5428 tick = 0;
5429 while (++tick < 195000) {
5430 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5431 if (status == ANEG_DONE || status == ANEG_FAILED)
5432 break;
5433
5434 udelay(1);
5435 }
5436
5437 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5438 tw32_f(MAC_MODE, tp->mac_mode);
5439 udelay(40);
5440
5441 *txflags = aninfo.txconfig;
5442 *rxflags = aninfo.flags;
5443
5444 if (status == ANEG_DONE &&
5445 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5446 MR_LP_ADV_FULL_DUPLEX)))
5447 res = 1;
5448
5449 return res;
5450 }
5451
tg3_init_bcm8002(struct tg3 * tp)5452 static void tg3_init_bcm8002(struct tg3 *tp)
5453 {
5454 u32 mac_status = tr32(MAC_STATUS);
5455 int i;
5456
5457 /* Reset when initting first time or we have a link. */
5458 if (tg3_flag(tp, INIT_COMPLETE) &&
5459 !(mac_status & MAC_STATUS_PCS_SYNCED))
5460 return;
5461
5462 /* Set PLL lock range. */
5463 tg3_writephy(tp, 0x16, 0x8007);
5464
5465 /* SW reset */
5466 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5467
5468 /* Wait for reset to complete. */
5469 /* XXX schedule_timeout() ... */
5470 for (i = 0; i < 500; i++)
5471 udelay(10);
5472
5473 /* Config mode; select PMA/Ch 1 regs. */
5474 tg3_writephy(tp, 0x10, 0x8411);
5475
5476 /* Enable auto-lock and comdet, select txclk for tx. */
5477 tg3_writephy(tp, 0x11, 0x0a10);
5478
5479 tg3_writephy(tp, 0x18, 0x00a0);
5480 tg3_writephy(tp, 0x16, 0x41ff);
5481
5482 /* Assert and deassert POR. */
5483 tg3_writephy(tp, 0x13, 0x0400);
5484 udelay(40);
5485 tg3_writephy(tp, 0x13, 0x0000);
5486
5487 tg3_writephy(tp, 0x11, 0x0a50);
5488 udelay(40);
5489 tg3_writephy(tp, 0x11, 0x0a10);
5490
5491 /* Wait for signal to stabilize */
5492 /* XXX schedule_timeout() ... */
5493 for (i = 0; i < 15000; i++)
5494 udelay(10);
5495
5496 /* Deselect the channel register so we can read the PHYID
5497 * later.
5498 */
5499 tg3_writephy(tp, 0x10, 0x8011);
5500 }
5501
tg3_setup_fiber_hw_autoneg(struct tg3 * tp,u32 mac_status)5502 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5503 {
5504 u16 flowctrl;
5505 bool current_link_up;
5506 u32 sg_dig_ctrl, sg_dig_status;
5507 u32 serdes_cfg, expected_sg_dig_ctrl;
5508 int workaround, port_a;
5509
5510 serdes_cfg = 0;
5511 expected_sg_dig_ctrl = 0;
5512 workaround = 0;
5513 port_a = 1;
5514 current_link_up = false;
5515
5516 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5517 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5518 workaround = 1;
5519 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5520 port_a = 0;
5521
5522 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5523 /* preserve bits 20-23 for voltage regulator */
5524 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5525 }
5526
5527 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5528
5529 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5530 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5531 if (workaround) {
5532 u32 val = serdes_cfg;
5533
5534 if (port_a)
5535 val |= 0xc010000;
5536 else
5537 val |= 0x4010000;
5538 tw32_f(MAC_SERDES_CFG, val);
5539 }
5540
5541 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5542 }
5543 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5544 tg3_setup_flow_control(tp, 0, 0);
5545 current_link_up = true;
5546 }
5547 goto out;
5548 }
5549
5550 /* Want auto-negotiation. */
5551 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5552
5553 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5554 if (flowctrl & ADVERTISE_1000XPAUSE)
5555 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5556 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5557 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5558
5559 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5560 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5561 tp->serdes_counter &&
5562 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5563 MAC_STATUS_RCVD_CFG)) ==
5564 MAC_STATUS_PCS_SYNCED)) {
5565 tp->serdes_counter--;
5566 current_link_up = true;
5567 goto out;
5568 }
5569 restart_autoneg:
5570 if (workaround)
5571 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5572 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5573 udelay(5);
5574 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5575
5576 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5577 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5578 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5579 MAC_STATUS_SIGNAL_DET)) {
5580 sg_dig_status = tr32(SG_DIG_STATUS);
5581 mac_status = tr32(MAC_STATUS);
5582
5583 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5584 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5585 u32 local_adv = 0, remote_adv = 0;
5586
5587 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5588 local_adv |= ADVERTISE_1000XPAUSE;
5589 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5590 local_adv |= ADVERTISE_1000XPSE_ASYM;
5591
5592 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5593 remote_adv |= LPA_1000XPAUSE;
5594 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5595 remote_adv |= LPA_1000XPAUSE_ASYM;
5596
5597 tp->link_config.rmt_adv =
5598 mii_adv_to_ethtool_adv_x(remote_adv);
5599
5600 tg3_setup_flow_control(tp, local_adv, remote_adv);
5601 current_link_up = true;
5602 tp->serdes_counter = 0;
5603 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5604 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5605 if (tp->serdes_counter)
5606 tp->serdes_counter--;
5607 else {
5608 if (workaround) {
5609 u32 val = serdes_cfg;
5610
5611 if (port_a)
5612 val |= 0xc010000;
5613 else
5614 val |= 0x4010000;
5615
5616 tw32_f(MAC_SERDES_CFG, val);
5617 }
5618
5619 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5620 udelay(40);
5621
5622 /* Link parallel detection - link is up */
5623 /* only if we have PCS_SYNC and not */
5624 /* receiving config code words */
5625 mac_status = tr32(MAC_STATUS);
5626 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5627 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5628 tg3_setup_flow_control(tp, 0, 0);
5629 current_link_up = true;
5630 tp->phy_flags |=
5631 TG3_PHYFLG_PARALLEL_DETECT;
5632 tp->serdes_counter =
5633 SERDES_PARALLEL_DET_TIMEOUT;
5634 } else
5635 goto restart_autoneg;
5636 }
5637 }
5638 } else {
5639 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5640 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5641 }
5642
5643 out:
5644 return current_link_up;
5645 }
5646
tg3_setup_fiber_by_hand(struct tg3 * tp,u32 mac_status)5647 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5648 {
5649 bool current_link_up = false;
5650
5651 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5652 goto out;
5653
5654 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5655 u32 txflags, rxflags;
5656 int i;
5657
5658 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5659 u32 local_adv = 0, remote_adv = 0;
5660
5661 if (txflags & ANEG_CFG_PS1)
5662 local_adv |= ADVERTISE_1000XPAUSE;
5663 if (txflags & ANEG_CFG_PS2)
5664 local_adv |= ADVERTISE_1000XPSE_ASYM;
5665
5666 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5667 remote_adv |= LPA_1000XPAUSE;
5668 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5669 remote_adv |= LPA_1000XPAUSE_ASYM;
5670
5671 tp->link_config.rmt_adv =
5672 mii_adv_to_ethtool_adv_x(remote_adv);
5673
5674 tg3_setup_flow_control(tp, local_adv, remote_adv);
5675
5676 current_link_up = true;
5677 }
5678 for (i = 0; i < 30; i++) {
5679 udelay(20);
5680 tw32_f(MAC_STATUS,
5681 (MAC_STATUS_SYNC_CHANGED |
5682 MAC_STATUS_CFG_CHANGED));
5683 udelay(40);
5684 if ((tr32(MAC_STATUS) &
5685 (MAC_STATUS_SYNC_CHANGED |
5686 MAC_STATUS_CFG_CHANGED)) == 0)
5687 break;
5688 }
5689
5690 mac_status = tr32(MAC_STATUS);
5691 if (!current_link_up &&
5692 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5693 !(mac_status & MAC_STATUS_RCVD_CFG))
5694 current_link_up = true;
5695 } else {
5696 tg3_setup_flow_control(tp, 0, 0);
5697
5698 /* Forcing 1000FD link up. */
5699 current_link_up = true;
5700
5701 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5702 udelay(40);
5703
5704 tw32_f(MAC_MODE, tp->mac_mode);
5705 udelay(40);
5706 }
5707
5708 out:
5709 return current_link_up;
5710 }
5711
tg3_setup_fiber_phy(struct tg3 * tp,bool force_reset)5712 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5713 {
5714 u32 orig_pause_cfg;
5715 u32 orig_active_speed;
5716 u8 orig_active_duplex;
5717 u32 mac_status;
5718 bool current_link_up;
5719 int i;
5720
5721 orig_pause_cfg = tp->link_config.active_flowctrl;
5722 orig_active_speed = tp->link_config.active_speed;
5723 orig_active_duplex = tp->link_config.active_duplex;
5724
5725 if (!tg3_flag(tp, HW_AUTONEG) &&
5726 tp->link_up &&
5727 tg3_flag(tp, INIT_COMPLETE)) {
5728 mac_status = tr32(MAC_STATUS);
5729 mac_status &= (MAC_STATUS_PCS_SYNCED |
5730 MAC_STATUS_SIGNAL_DET |
5731 MAC_STATUS_CFG_CHANGED |
5732 MAC_STATUS_RCVD_CFG);
5733 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5734 MAC_STATUS_SIGNAL_DET)) {
5735 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5736 MAC_STATUS_CFG_CHANGED));
5737 return 0;
5738 }
5739 }
5740
5741 tw32_f(MAC_TX_AUTO_NEG, 0);
5742
5743 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5744 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5745 tw32_f(MAC_MODE, tp->mac_mode);
5746 udelay(40);
5747
5748 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5749 tg3_init_bcm8002(tp);
5750
5751 /* Enable link change event even when serdes polling. */
5752 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5753 udelay(40);
5754
5755 current_link_up = false;
5756 tp->link_config.rmt_adv = 0;
5757 mac_status = tr32(MAC_STATUS);
5758
5759 if (tg3_flag(tp, HW_AUTONEG))
5760 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5761 else
5762 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5763
5764 tp->napi[0].hw_status->status =
5765 (SD_STATUS_UPDATED |
5766 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5767
5768 for (i = 0; i < 100; i++) {
5769 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5770 MAC_STATUS_CFG_CHANGED));
5771 udelay(5);
5772 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5773 MAC_STATUS_CFG_CHANGED |
5774 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5775 break;
5776 }
5777
5778 mac_status = tr32(MAC_STATUS);
5779 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5780 current_link_up = false;
5781 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5782 tp->serdes_counter == 0) {
5783 tw32_f(MAC_MODE, (tp->mac_mode |
5784 MAC_MODE_SEND_CONFIGS));
5785 udelay(1);
5786 tw32_f(MAC_MODE, tp->mac_mode);
5787 }
5788 }
5789
5790 if (current_link_up) {
5791 tp->link_config.active_speed = SPEED_1000;
5792 tp->link_config.active_duplex = DUPLEX_FULL;
5793 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5794 LED_CTRL_LNKLED_OVERRIDE |
5795 LED_CTRL_1000MBPS_ON));
5796 } else {
5797 tp->link_config.active_speed = SPEED_UNKNOWN;
5798 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5799 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5800 LED_CTRL_LNKLED_OVERRIDE |
5801 LED_CTRL_TRAFFIC_OVERRIDE));
5802 }
5803
5804 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5805 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5806 if (orig_pause_cfg != now_pause_cfg ||
5807 orig_active_speed != tp->link_config.active_speed ||
5808 orig_active_duplex != tp->link_config.active_duplex)
5809 tg3_link_report(tp);
5810 }
5811
5812 return 0;
5813 }
5814
tg3_setup_fiber_mii_phy(struct tg3 * tp,bool force_reset)5815 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5816 {
5817 int err = 0;
5818 u32 bmsr, bmcr;
5819 u32 current_speed = SPEED_UNKNOWN;
5820 u8 current_duplex = DUPLEX_UNKNOWN;
5821 bool current_link_up = false;
5822 u32 local_adv, remote_adv, sgsr;
5823
5824 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5825 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5826 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5827 (sgsr & SERDES_TG3_SGMII_MODE)) {
5828
5829 if (force_reset)
5830 tg3_phy_reset(tp);
5831
5832 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5833
5834 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5835 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5836 } else {
5837 current_link_up = true;
5838 if (sgsr & SERDES_TG3_SPEED_1000) {
5839 current_speed = SPEED_1000;
5840 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5841 } else if (sgsr & SERDES_TG3_SPEED_100) {
5842 current_speed = SPEED_100;
5843 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5844 } else {
5845 current_speed = SPEED_10;
5846 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5847 }
5848
5849 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5850 current_duplex = DUPLEX_FULL;
5851 else
5852 current_duplex = DUPLEX_HALF;
5853 }
5854
5855 tw32_f(MAC_MODE, tp->mac_mode);
5856 udelay(40);
5857
5858 tg3_clear_mac_status(tp);
5859
5860 goto fiber_setup_done;
5861 }
5862
5863 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5864 tw32_f(MAC_MODE, tp->mac_mode);
5865 udelay(40);
5866
5867 tg3_clear_mac_status(tp);
5868
5869 if (force_reset)
5870 tg3_phy_reset(tp);
5871
5872 tp->link_config.rmt_adv = 0;
5873
5874 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5875 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5876 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5877 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5878 bmsr |= BMSR_LSTATUS;
5879 else
5880 bmsr &= ~BMSR_LSTATUS;
5881 }
5882
5883 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5884
5885 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5886 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5887 /* do nothing, just check for link up at the end */
5888 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5889 u32 adv, newadv;
5890
5891 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5892 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5893 ADVERTISE_1000XPAUSE |
5894 ADVERTISE_1000XPSE_ASYM |
5895 ADVERTISE_SLCT);
5896
5897 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5898 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5899
5900 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5901 tg3_writephy(tp, MII_ADVERTISE, newadv);
5902 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5903 tg3_writephy(tp, MII_BMCR, bmcr);
5904
5905 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5906 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5907 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5908
5909 return err;
5910 }
5911 } else {
5912 u32 new_bmcr;
5913
5914 bmcr &= ~BMCR_SPEED1000;
5915 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5916
5917 if (tp->link_config.duplex == DUPLEX_FULL)
5918 new_bmcr |= BMCR_FULLDPLX;
5919
5920 if (new_bmcr != bmcr) {
5921 /* BMCR_SPEED1000 is a reserved bit that needs
5922 * to be set on write.
5923 */
5924 new_bmcr |= BMCR_SPEED1000;
5925
5926 /* Force a linkdown */
5927 if (tp->link_up) {
5928 u32 adv;
5929
5930 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5931 adv &= ~(ADVERTISE_1000XFULL |
5932 ADVERTISE_1000XHALF |
5933 ADVERTISE_SLCT);
5934 tg3_writephy(tp, MII_ADVERTISE, adv);
5935 tg3_writephy(tp, MII_BMCR, bmcr |
5936 BMCR_ANRESTART |
5937 BMCR_ANENABLE);
5938 udelay(10);
5939 tg3_carrier_off(tp);
5940 }
5941 tg3_writephy(tp, MII_BMCR, new_bmcr);
5942 bmcr = new_bmcr;
5943 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5944 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5945 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5946 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5947 bmsr |= BMSR_LSTATUS;
5948 else
5949 bmsr &= ~BMSR_LSTATUS;
5950 }
5951 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5952 }
5953 }
5954
5955 if (bmsr & BMSR_LSTATUS) {
5956 current_speed = SPEED_1000;
5957 current_link_up = true;
5958 if (bmcr & BMCR_FULLDPLX)
5959 current_duplex = DUPLEX_FULL;
5960 else
5961 current_duplex = DUPLEX_HALF;
5962
5963 local_adv = 0;
5964 remote_adv = 0;
5965
5966 if (bmcr & BMCR_ANENABLE) {
5967 u32 common;
5968
5969 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5970 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5971 common = local_adv & remote_adv;
5972 if (common & (ADVERTISE_1000XHALF |
5973 ADVERTISE_1000XFULL)) {
5974 if (common & ADVERTISE_1000XFULL)
5975 current_duplex = DUPLEX_FULL;
5976 else
5977 current_duplex = DUPLEX_HALF;
5978
5979 tp->link_config.rmt_adv =
5980 mii_adv_to_ethtool_adv_x(remote_adv);
5981 } else if (!tg3_flag(tp, 5780_CLASS)) {
5982 /* Link is up via parallel detect */
5983 } else {
5984 current_link_up = false;
5985 }
5986 }
5987 }
5988
5989 fiber_setup_done:
5990 if (current_link_up && current_duplex == DUPLEX_FULL)
5991 tg3_setup_flow_control(tp, local_adv, remote_adv);
5992
5993 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5994 if (tp->link_config.active_duplex == DUPLEX_HALF)
5995 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5996
5997 tw32_f(MAC_MODE, tp->mac_mode);
5998 udelay(40);
5999
6000 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
6001
6002 tp->link_config.active_speed = current_speed;
6003 tp->link_config.active_duplex = current_duplex;
6004
6005 tg3_test_and_report_link_chg(tp, current_link_up);
6006 return err;
6007 }
6008
tg3_serdes_parallel_detect(struct tg3 * tp)6009 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6010 {
6011 if (tp->serdes_counter) {
6012 /* Give autoneg time to complete. */
6013 tp->serdes_counter--;
6014 return;
6015 }
6016
6017 if (!tp->link_up &&
6018 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6019 u32 bmcr;
6020
6021 tg3_readphy(tp, MII_BMCR, &bmcr);
6022 if (bmcr & BMCR_ANENABLE) {
6023 u32 phy1, phy2;
6024
6025 /* Select shadow register 0x1f */
6026 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6027 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6028
6029 /* Select expansion interrupt status register */
6030 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6031 MII_TG3_DSP_EXP1_INT_STAT);
6032 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6033 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6034
6035 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6036 /* We have signal detect and not receiving
6037 * config code words, link is up by parallel
6038 * detection.
6039 */
6040
6041 bmcr &= ~BMCR_ANENABLE;
6042 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6043 tg3_writephy(tp, MII_BMCR, bmcr);
6044 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6045 }
6046 }
6047 } else if (tp->link_up &&
6048 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6049 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6050 u32 phy2;
6051
6052 /* Select expansion interrupt status register */
6053 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6054 MII_TG3_DSP_EXP1_INT_STAT);
6055 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6056 if (phy2 & 0x20) {
6057 u32 bmcr;
6058
6059 /* Config code words received, turn on autoneg. */
6060 tg3_readphy(tp, MII_BMCR, &bmcr);
6061 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6062
6063 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6064
6065 }
6066 }
6067 }
6068
tg3_setup_phy(struct tg3 * tp,bool force_reset)6069 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6070 {
6071 u32 val;
6072 int err;
6073
6074 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6075 err = tg3_setup_fiber_phy(tp, force_reset);
6076 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6077 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6078 else
6079 err = tg3_setup_copper_phy(tp, force_reset);
6080
6081 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6082 u32 scale;
6083
6084 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6085 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6086 scale = 65;
6087 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6088 scale = 6;
6089 else
6090 scale = 12;
6091
6092 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6093 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6094 tw32(GRC_MISC_CFG, val);
6095 }
6096
6097 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6098 (6 << TX_LENGTHS_IPG_SHIFT);
6099 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6100 tg3_asic_rev(tp) == ASIC_REV_5762)
6101 val |= tr32(MAC_TX_LENGTHS) &
6102 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6103 TX_LENGTHS_CNT_DWN_VAL_MSK);
6104
6105 if (tp->link_config.active_speed == SPEED_1000 &&
6106 tp->link_config.active_duplex == DUPLEX_HALF)
6107 tw32(MAC_TX_LENGTHS, val |
6108 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6109 else
6110 tw32(MAC_TX_LENGTHS, val |
6111 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6112
6113 if (!tg3_flag(tp, 5705_PLUS)) {
6114 if (tp->link_up) {
6115 tw32(HOSTCC_STAT_COAL_TICKS,
6116 tp->coal.stats_block_coalesce_usecs);
6117 } else {
6118 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6119 }
6120 }
6121
6122 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6123 val = tr32(PCIE_PWR_MGMT_THRESH);
6124 if (!tp->link_up)
6125 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6126 tp->pwrmgmt_thresh;
6127 else
6128 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6129 tw32(PCIE_PWR_MGMT_THRESH, val);
6130 }
6131
6132 return err;
6133 }
6134
6135 /* tp->lock must be held */
tg3_refclk_read(struct tg3 * tp,struct ptp_system_timestamp * sts)6136 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6137 {
6138 u64 stamp;
6139
6140 ptp_read_system_prets(sts);
6141 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6142 ptp_read_system_postts(sts);
6143 stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6144
6145 return stamp;
6146 }
6147
6148 /* tp->lock must be held */
tg3_refclk_write(struct tg3 * tp,u64 newval)6149 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6150 {
6151 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6152
6153 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6154 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6155 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6156 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6157 }
6158
6159 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6160 static inline void tg3_full_unlock(struct tg3 *tp);
tg3_get_ts_info(struct net_device * dev,struct ethtool_ts_info * info)6161 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6162 {
6163 struct tg3 *tp = netdev_priv(dev);
6164
6165 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6166 SOF_TIMESTAMPING_RX_SOFTWARE |
6167 SOF_TIMESTAMPING_SOFTWARE;
6168
6169 if (tg3_flag(tp, PTP_CAPABLE)) {
6170 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6171 SOF_TIMESTAMPING_RX_HARDWARE |
6172 SOF_TIMESTAMPING_RAW_HARDWARE;
6173 }
6174
6175 if (tp->ptp_clock)
6176 info->phc_index = ptp_clock_index(tp->ptp_clock);
6177 else
6178 info->phc_index = -1;
6179
6180 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6181
6182 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6183 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6184 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6185 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6186 return 0;
6187 }
6188
tg3_ptp_adjfreq(struct ptp_clock_info * ptp,s32 ppb)6189 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6190 {
6191 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6192 bool neg_adj = false;
6193 u32 correction = 0;
6194
6195 if (ppb < 0) {
6196 neg_adj = true;
6197 ppb = -ppb;
6198 }
6199
6200 /* Frequency adjustment is performed using hardware with a 24 bit
6201 * accumulator and a programmable correction value. On each clk, the
6202 * correction value gets added to the accumulator and when it
6203 * overflows, the time counter is incremented/decremented.
6204 *
6205 * So conversion from ppb to correction value is
6206 * ppb * (1 << 24) / 1000000000
6207 */
6208 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6209 TG3_EAV_REF_CLK_CORRECT_MASK;
6210
6211 tg3_full_lock(tp, 0);
6212
6213 if (correction)
6214 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6215 TG3_EAV_REF_CLK_CORRECT_EN |
6216 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6217 else
6218 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6219
6220 tg3_full_unlock(tp);
6221
6222 return 0;
6223 }
6224
tg3_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)6225 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6226 {
6227 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6228
6229 tg3_full_lock(tp, 0);
6230 tp->ptp_adjust += delta;
6231 tg3_full_unlock(tp);
6232
6233 return 0;
6234 }
6235
tg3_ptp_gettimex(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)6236 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6237 struct ptp_system_timestamp *sts)
6238 {
6239 u64 ns;
6240 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6241
6242 tg3_full_lock(tp, 0);
6243 ns = tg3_refclk_read(tp, sts);
6244 ns += tp->ptp_adjust;
6245 tg3_full_unlock(tp);
6246
6247 *ts = ns_to_timespec64(ns);
6248
6249 return 0;
6250 }
6251
tg3_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)6252 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6253 const struct timespec64 *ts)
6254 {
6255 u64 ns;
6256 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6257
6258 ns = timespec64_to_ns(ts);
6259
6260 tg3_full_lock(tp, 0);
6261 tg3_refclk_write(tp, ns);
6262 tp->ptp_adjust = 0;
6263 tg3_full_unlock(tp);
6264
6265 return 0;
6266 }
6267
tg3_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)6268 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6269 struct ptp_clock_request *rq, int on)
6270 {
6271 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6272 u32 clock_ctl;
6273 int rval = 0;
6274
6275 switch (rq->type) {
6276 case PTP_CLK_REQ_PEROUT:
6277 /* Reject requests with unsupported flags */
6278 if (rq->perout.flags)
6279 return -EOPNOTSUPP;
6280
6281 if (rq->perout.index != 0)
6282 return -EINVAL;
6283
6284 tg3_full_lock(tp, 0);
6285 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6286 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6287
6288 if (on) {
6289 u64 nsec;
6290
6291 nsec = rq->perout.start.sec * 1000000000ULL +
6292 rq->perout.start.nsec;
6293
6294 if (rq->perout.period.sec || rq->perout.period.nsec) {
6295 netdev_warn(tp->dev,
6296 "Device supports only a one-shot timesync output, period must be 0\n");
6297 rval = -EINVAL;
6298 goto err_out;
6299 }
6300
6301 if (nsec & (1ULL << 63)) {
6302 netdev_warn(tp->dev,
6303 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6304 rval = -EINVAL;
6305 goto err_out;
6306 }
6307
6308 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6309 tw32(TG3_EAV_WATCHDOG0_MSB,
6310 TG3_EAV_WATCHDOG0_EN |
6311 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6312
6313 tw32(TG3_EAV_REF_CLCK_CTL,
6314 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6315 } else {
6316 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6317 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6318 }
6319
6320 err_out:
6321 tg3_full_unlock(tp);
6322 return rval;
6323
6324 default:
6325 break;
6326 }
6327
6328 return -EOPNOTSUPP;
6329 }
6330
6331 static const struct ptp_clock_info tg3_ptp_caps = {
6332 .owner = THIS_MODULE,
6333 .name = "tg3 clock",
6334 .max_adj = 250000000,
6335 .n_alarm = 0,
6336 .n_ext_ts = 0,
6337 .n_per_out = 1,
6338 .n_pins = 0,
6339 .pps = 0,
6340 .adjfreq = tg3_ptp_adjfreq,
6341 .adjtime = tg3_ptp_adjtime,
6342 .gettimex64 = tg3_ptp_gettimex,
6343 .settime64 = tg3_ptp_settime,
6344 .enable = tg3_ptp_enable,
6345 };
6346
tg3_hwclock_to_timestamp(struct tg3 * tp,u64 hwclock,struct skb_shared_hwtstamps * timestamp)6347 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6348 struct skb_shared_hwtstamps *timestamp)
6349 {
6350 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6351 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6352 tp->ptp_adjust);
6353 }
6354
6355 /* tp->lock must be held */
tg3_ptp_init(struct tg3 * tp)6356 static void tg3_ptp_init(struct tg3 *tp)
6357 {
6358 if (!tg3_flag(tp, PTP_CAPABLE))
6359 return;
6360
6361 /* Initialize the hardware clock to the system time. */
6362 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6363 tp->ptp_adjust = 0;
6364 tp->ptp_info = tg3_ptp_caps;
6365 }
6366
6367 /* tp->lock must be held */
tg3_ptp_resume(struct tg3 * tp)6368 static void tg3_ptp_resume(struct tg3 *tp)
6369 {
6370 if (!tg3_flag(tp, PTP_CAPABLE))
6371 return;
6372
6373 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6374 tp->ptp_adjust = 0;
6375 }
6376
tg3_ptp_fini(struct tg3 * tp)6377 static void tg3_ptp_fini(struct tg3 *tp)
6378 {
6379 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6380 return;
6381
6382 ptp_clock_unregister(tp->ptp_clock);
6383 tp->ptp_clock = NULL;
6384 tp->ptp_adjust = 0;
6385 }
6386
tg3_irq_sync(struct tg3 * tp)6387 static inline int tg3_irq_sync(struct tg3 *tp)
6388 {
6389 return tp->irq_sync;
6390 }
6391
tg3_rd32_loop(struct tg3 * tp,u32 * dst,u32 off,u32 len)6392 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6393 {
6394 int i;
6395
6396 dst = (u32 *)((u8 *)dst + off);
6397 for (i = 0; i < len; i += sizeof(u32))
6398 *dst++ = tr32(off + i);
6399 }
6400
tg3_dump_legacy_regs(struct tg3 * tp,u32 * regs)6401 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6402 {
6403 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6404 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6405 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6406 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6407 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6408 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6409 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6410 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6411 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6412 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6413 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6414 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6415 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6416 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6417 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6418 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6419 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6420 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6421 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6422
6423 if (tg3_flag(tp, SUPPORT_MSIX))
6424 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6425
6426 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6427 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6428 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6429 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6430 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6431 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6432 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6433 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6434
6435 if (!tg3_flag(tp, 5705_PLUS)) {
6436 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6437 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6438 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6439 }
6440
6441 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6442 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6443 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6444 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6445 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6446
6447 if (tg3_flag(tp, NVRAM))
6448 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6449 }
6450
tg3_dump_state(struct tg3 * tp)6451 static void tg3_dump_state(struct tg3 *tp)
6452 {
6453 int i;
6454 u32 *regs;
6455
6456 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6457 if (!regs)
6458 return;
6459
6460 if (tg3_flag(tp, PCI_EXPRESS)) {
6461 /* Read up to but not including private PCI registers */
6462 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6463 regs[i / sizeof(u32)] = tr32(i);
6464 } else
6465 tg3_dump_legacy_regs(tp, regs);
6466
6467 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6468 if (!regs[i + 0] && !regs[i + 1] &&
6469 !regs[i + 2] && !regs[i + 3])
6470 continue;
6471
6472 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6473 i * 4,
6474 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6475 }
6476
6477 kfree(regs);
6478
6479 for (i = 0; i < tp->irq_cnt; i++) {
6480 struct tg3_napi *tnapi = &tp->napi[i];
6481
6482 /* SW status block */
6483 netdev_err(tp->dev,
6484 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6485 i,
6486 tnapi->hw_status->status,
6487 tnapi->hw_status->status_tag,
6488 tnapi->hw_status->rx_jumbo_consumer,
6489 tnapi->hw_status->rx_consumer,
6490 tnapi->hw_status->rx_mini_consumer,
6491 tnapi->hw_status->idx[0].rx_producer,
6492 tnapi->hw_status->idx[0].tx_consumer);
6493
6494 netdev_err(tp->dev,
6495 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6496 i,
6497 tnapi->last_tag, tnapi->last_irq_tag,
6498 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6499 tnapi->rx_rcb_ptr,
6500 tnapi->prodring.rx_std_prod_idx,
6501 tnapi->prodring.rx_std_cons_idx,
6502 tnapi->prodring.rx_jmb_prod_idx,
6503 tnapi->prodring.rx_jmb_cons_idx);
6504 }
6505 }
6506
6507 /* This is called whenever we suspect that the system chipset is re-
6508 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6509 * is bogus tx completions. We try to recover by setting the
6510 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6511 * in the workqueue.
6512 */
tg3_tx_recover(struct tg3 * tp)6513 static void tg3_tx_recover(struct tg3 *tp)
6514 {
6515 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6516 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6517
6518 netdev_warn(tp->dev,
6519 "The system may be re-ordering memory-mapped I/O "
6520 "cycles to the network device, attempting to recover. "
6521 "Please report the problem to the driver maintainer "
6522 "and include system chipset information.\n");
6523
6524 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6525 }
6526
tg3_tx_avail(struct tg3_napi * tnapi)6527 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6528 {
6529 /* Tell compiler to fetch tx indices from memory. */
6530 barrier();
6531 return tnapi->tx_pending -
6532 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6533 }
6534
6535 /* Tigon3 never reports partial packet sends. So we do not
6536 * need special logic to handle SKBs that have not had all
6537 * of their frags sent yet, like SunGEM does.
6538 */
tg3_tx(struct tg3_napi * tnapi)6539 static void tg3_tx(struct tg3_napi *tnapi)
6540 {
6541 struct tg3 *tp = tnapi->tp;
6542 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6543 u32 sw_idx = tnapi->tx_cons;
6544 struct netdev_queue *txq;
6545 int index = tnapi - tp->napi;
6546 unsigned int pkts_compl = 0, bytes_compl = 0;
6547
6548 if (tg3_flag(tp, ENABLE_TSS))
6549 index--;
6550
6551 txq = netdev_get_tx_queue(tp->dev, index);
6552
6553 while (sw_idx != hw_idx) {
6554 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6555 struct sk_buff *skb = ri->skb;
6556 int i, tx_bug = 0;
6557
6558 if (unlikely(skb == NULL)) {
6559 tg3_tx_recover(tp);
6560 return;
6561 }
6562
6563 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6564 struct skb_shared_hwtstamps timestamp;
6565 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6566 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6567
6568 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6569
6570 skb_tstamp_tx(skb, ×tamp);
6571 }
6572
6573 pci_unmap_single(tp->pdev,
6574 dma_unmap_addr(ri, mapping),
6575 skb_headlen(skb),
6576 PCI_DMA_TODEVICE);
6577
6578 ri->skb = NULL;
6579
6580 while (ri->fragmented) {
6581 ri->fragmented = false;
6582 sw_idx = NEXT_TX(sw_idx);
6583 ri = &tnapi->tx_buffers[sw_idx];
6584 }
6585
6586 sw_idx = NEXT_TX(sw_idx);
6587
6588 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6589 ri = &tnapi->tx_buffers[sw_idx];
6590 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6591 tx_bug = 1;
6592
6593 pci_unmap_page(tp->pdev,
6594 dma_unmap_addr(ri, mapping),
6595 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6596 PCI_DMA_TODEVICE);
6597
6598 while (ri->fragmented) {
6599 ri->fragmented = false;
6600 sw_idx = NEXT_TX(sw_idx);
6601 ri = &tnapi->tx_buffers[sw_idx];
6602 }
6603
6604 sw_idx = NEXT_TX(sw_idx);
6605 }
6606
6607 pkts_compl++;
6608 bytes_compl += skb->len;
6609
6610 dev_consume_skb_any(skb);
6611
6612 if (unlikely(tx_bug)) {
6613 tg3_tx_recover(tp);
6614 return;
6615 }
6616 }
6617
6618 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6619
6620 tnapi->tx_cons = sw_idx;
6621
6622 /* Need to make the tx_cons update visible to tg3_start_xmit()
6623 * before checking for netif_queue_stopped(). Without the
6624 * memory barrier, there is a small possibility that tg3_start_xmit()
6625 * will miss it and cause the queue to be stopped forever.
6626 */
6627 smp_mb();
6628
6629 if (unlikely(netif_tx_queue_stopped(txq) &&
6630 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6631 __netif_tx_lock(txq, smp_processor_id());
6632 if (netif_tx_queue_stopped(txq) &&
6633 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6634 netif_tx_wake_queue(txq);
6635 __netif_tx_unlock(txq);
6636 }
6637 }
6638
tg3_frag_free(bool is_frag,void * data)6639 static void tg3_frag_free(bool is_frag, void *data)
6640 {
6641 if (is_frag)
6642 skb_free_frag(data);
6643 else
6644 kfree(data);
6645 }
6646
tg3_rx_data_free(struct tg3 * tp,struct ring_info * ri,u32 map_sz)6647 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6648 {
6649 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6650 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6651
6652 if (!ri->data)
6653 return;
6654
6655 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6656 map_sz, PCI_DMA_FROMDEVICE);
6657 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6658 ri->data = NULL;
6659 }
6660
6661
6662 /* Returns size of skb allocated or < 0 on error.
6663 *
6664 * We only need to fill in the address because the other members
6665 * of the RX descriptor are invariant, see tg3_init_rings.
6666 *
6667 * Note the purposeful assymetry of cpu vs. chip accesses. For
6668 * posting buffers we only dirty the first cache line of the RX
6669 * descriptor (containing the address). Whereas for the RX status
6670 * buffers the cpu only reads the last cacheline of the RX descriptor
6671 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6672 */
tg3_alloc_rx_data(struct tg3 * tp,struct tg3_rx_prodring_set * tpr,u32 opaque_key,u32 dest_idx_unmasked,unsigned int * frag_size)6673 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6674 u32 opaque_key, u32 dest_idx_unmasked,
6675 unsigned int *frag_size)
6676 {
6677 struct tg3_rx_buffer_desc *desc;
6678 struct ring_info *map;
6679 u8 *data;
6680 dma_addr_t mapping;
6681 int skb_size, data_size, dest_idx;
6682
6683 switch (opaque_key) {
6684 case RXD_OPAQUE_RING_STD:
6685 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6686 desc = &tpr->rx_std[dest_idx];
6687 map = &tpr->rx_std_buffers[dest_idx];
6688 data_size = tp->rx_pkt_map_sz;
6689 break;
6690
6691 case RXD_OPAQUE_RING_JUMBO:
6692 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6693 desc = &tpr->rx_jmb[dest_idx].std;
6694 map = &tpr->rx_jmb_buffers[dest_idx];
6695 data_size = TG3_RX_JMB_MAP_SZ;
6696 break;
6697
6698 default:
6699 return -EINVAL;
6700 }
6701
6702 /* Do not overwrite any of the map or rp information
6703 * until we are sure we can commit to a new buffer.
6704 *
6705 * Callers depend upon this behavior and assume that
6706 * we leave everything unchanged if we fail.
6707 */
6708 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6709 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6710 if (skb_size <= PAGE_SIZE) {
6711 data = napi_alloc_frag(skb_size);
6712 *frag_size = skb_size;
6713 } else {
6714 data = kmalloc(skb_size, GFP_ATOMIC);
6715 *frag_size = 0;
6716 }
6717 if (!data)
6718 return -ENOMEM;
6719
6720 mapping = pci_map_single(tp->pdev,
6721 data + TG3_RX_OFFSET(tp),
6722 data_size,
6723 PCI_DMA_FROMDEVICE);
6724 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6725 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6726 return -EIO;
6727 }
6728
6729 map->data = data;
6730 dma_unmap_addr_set(map, mapping, mapping);
6731
6732 desc->addr_hi = ((u64)mapping >> 32);
6733 desc->addr_lo = ((u64)mapping & 0xffffffff);
6734
6735 return data_size;
6736 }
6737
6738 /* We only need to move over in the address because the other
6739 * members of the RX descriptor are invariant. See notes above
6740 * tg3_alloc_rx_data for full details.
6741 */
tg3_recycle_rx(struct tg3_napi * tnapi,struct tg3_rx_prodring_set * dpr,u32 opaque_key,int src_idx,u32 dest_idx_unmasked)6742 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6743 struct tg3_rx_prodring_set *dpr,
6744 u32 opaque_key, int src_idx,
6745 u32 dest_idx_unmasked)
6746 {
6747 struct tg3 *tp = tnapi->tp;
6748 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6749 struct ring_info *src_map, *dest_map;
6750 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6751 int dest_idx;
6752
6753 switch (opaque_key) {
6754 case RXD_OPAQUE_RING_STD:
6755 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6756 dest_desc = &dpr->rx_std[dest_idx];
6757 dest_map = &dpr->rx_std_buffers[dest_idx];
6758 src_desc = &spr->rx_std[src_idx];
6759 src_map = &spr->rx_std_buffers[src_idx];
6760 break;
6761
6762 case RXD_OPAQUE_RING_JUMBO:
6763 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6764 dest_desc = &dpr->rx_jmb[dest_idx].std;
6765 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6766 src_desc = &spr->rx_jmb[src_idx].std;
6767 src_map = &spr->rx_jmb_buffers[src_idx];
6768 break;
6769
6770 default:
6771 return;
6772 }
6773
6774 dest_map->data = src_map->data;
6775 dma_unmap_addr_set(dest_map, mapping,
6776 dma_unmap_addr(src_map, mapping));
6777 dest_desc->addr_hi = src_desc->addr_hi;
6778 dest_desc->addr_lo = src_desc->addr_lo;
6779
6780 /* Ensure that the update to the skb happens after the physical
6781 * addresses have been transferred to the new BD location.
6782 */
6783 smp_wmb();
6784
6785 src_map->data = NULL;
6786 }
6787
6788 /* The RX ring scheme is composed of multiple rings which post fresh
6789 * buffers to the chip, and one special ring the chip uses to report
6790 * status back to the host.
6791 *
6792 * The special ring reports the status of received packets to the
6793 * host. The chip does not write into the original descriptor the
6794 * RX buffer was obtained from. The chip simply takes the original
6795 * descriptor as provided by the host, updates the status and length
6796 * field, then writes this into the next status ring entry.
6797 *
6798 * Each ring the host uses to post buffers to the chip is described
6799 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6800 * it is first placed into the on-chip ram. When the packet's length
6801 * is known, it walks down the TG3_BDINFO entries to select the ring.
6802 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6803 * which is within the range of the new packet's length is chosen.
6804 *
6805 * The "separate ring for rx status" scheme may sound queer, but it makes
6806 * sense from a cache coherency perspective. If only the host writes
6807 * to the buffer post rings, and only the chip writes to the rx status
6808 * rings, then cache lines never move beyond shared-modified state.
6809 * If both the host and chip were to write into the same ring, cache line
6810 * eviction could occur since both entities want it in an exclusive state.
6811 */
tg3_rx(struct tg3_napi * tnapi,int budget)6812 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6813 {
6814 struct tg3 *tp = tnapi->tp;
6815 u32 work_mask, rx_std_posted = 0;
6816 u32 std_prod_idx, jmb_prod_idx;
6817 u32 sw_idx = tnapi->rx_rcb_ptr;
6818 u16 hw_idx;
6819 int received;
6820 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6821
6822 hw_idx = *(tnapi->rx_rcb_prod_idx);
6823 /*
6824 * We need to order the read of hw_idx and the read of
6825 * the opaque cookie.
6826 */
6827 rmb();
6828 work_mask = 0;
6829 received = 0;
6830 std_prod_idx = tpr->rx_std_prod_idx;
6831 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6832 while (sw_idx != hw_idx && budget > 0) {
6833 struct ring_info *ri;
6834 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6835 unsigned int len;
6836 struct sk_buff *skb;
6837 dma_addr_t dma_addr;
6838 u32 opaque_key, desc_idx, *post_ptr;
6839 u8 *data;
6840 u64 tstamp = 0;
6841
6842 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6843 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6844 if (opaque_key == RXD_OPAQUE_RING_STD) {
6845 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6846 dma_addr = dma_unmap_addr(ri, mapping);
6847 data = ri->data;
6848 post_ptr = &std_prod_idx;
6849 rx_std_posted++;
6850 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6851 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6852 dma_addr = dma_unmap_addr(ri, mapping);
6853 data = ri->data;
6854 post_ptr = &jmb_prod_idx;
6855 } else
6856 goto next_pkt_nopost;
6857
6858 work_mask |= opaque_key;
6859
6860 if (desc->err_vlan & RXD_ERR_MASK) {
6861 drop_it:
6862 tg3_recycle_rx(tnapi, tpr, opaque_key,
6863 desc_idx, *post_ptr);
6864 drop_it_no_recycle:
6865 /* Other statistics kept track of by card. */
6866 tp->rx_dropped++;
6867 goto next_pkt;
6868 }
6869
6870 prefetch(data + TG3_RX_OFFSET(tp));
6871 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6872 ETH_FCS_LEN;
6873
6874 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6875 RXD_FLAG_PTPSTAT_PTPV1 ||
6876 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6877 RXD_FLAG_PTPSTAT_PTPV2) {
6878 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6879 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6880 }
6881
6882 if (len > TG3_RX_COPY_THRESH(tp)) {
6883 int skb_size;
6884 unsigned int frag_size;
6885
6886 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6887 *post_ptr, &frag_size);
6888 if (skb_size < 0)
6889 goto drop_it;
6890
6891 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6892 PCI_DMA_FROMDEVICE);
6893
6894 /* Ensure that the update to the data happens
6895 * after the usage of the old DMA mapping.
6896 */
6897 smp_wmb();
6898
6899 ri->data = NULL;
6900
6901 skb = build_skb(data, frag_size);
6902 if (!skb) {
6903 tg3_frag_free(frag_size != 0, data);
6904 goto drop_it_no_recycle;
6905 }
6906 skb_reserve(skb, TG3_RX_OFFSET(tp));
6907 } else {
6908 tg3_recycle_rx(tnapi, tpr, opaque_key,
6909 desc_idx, *post_ptr);
6910
6911 skb = netdev_alloc_skb(tp->dev,
6912 len + TG3_RAW_IP_ALIGN);
6913 if (skb == NULL)
6914 goto drop_it_no_recycle;
6915
6916 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6917 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6918 memcpy(skb->data,
6919 data + TG3_RX_OFFSET(tp),
6920 len);
6921 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6922 }
6923
6924 skb_put(skb, len);
6925 if (tstamp)
6926 tg3_hwclock_to_timestamp(tp, tstamp,
6927 skb_hwtstamps(skb));
6928
6929 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6930 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6931 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6932 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6933 skb->ip_summed = CHECKSUM_UNNECESSARY;
6934 else
6935 skb_checksum_none_assert(skb);
6936
6937 skb->protocol = eth_type_trans(skb, tp->dev);
6938
6939 if (len > (tp->dev->mtu + ETH_HLEN) &&
6940 skb->protocol != htons(ETH_P_8021Q) &&
6941 skb->protocol != htons(ETH_P_8021AD)) {
6942 dev_kfree_skb_any(skb);
6943 goto drop_it_no_recycle;
6944 }
6945
6946 if (desc->type_flags & RXD_FLAG_VLAN &&
6947 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6948 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6949 desc->err_vlan & RXD_VLAN_MASK);
6950
6951 napi_gro_receive(&tnapi->napi, skb);
6952
6953 received++;
6954 budget--;
6955
6956 next_pkt:
6957 (*post_ptr)++;
6958
6959 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6960 tpr->rx_std_prod_idx = std_prod_idx &
6961 tp->rx_std_ring_mask;
6962 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6963 tpr->rx_std_prod_idx);
6964 work_mask &= ~RXD_OPAQUE_RING_STD;
6965 rx_std_posted = 0;
6966 }
6967 next_pkt_nopost:
6968 sw_idx++;
6969 sw_idx &= tp->rx_ret_ring_mask;
6970
6971 /* Refresh hw_idx to see if there is new work */
6972 if (sw_idx == hw_idx) {
6973 hw_idx = *(tnapi->rx_rcb_prod_idx);
6974 rmb();
6975 }
6976 }
6977
6978 /* ACK the status ring. */
6979 tnapi->rx_rcb_ptr = sw_idx;
6980 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6981
6982 /* Refill RX ring(s). */
6983 if (!tg3_flag(tp, ENABLE_RSS)) {
6984 /* Sync BD data before updating mailbox */
6985 wmb();
6986
6987 if (work_mask & RXD_OPAQUE_RING_STD) {
6988 tpr->rx_std_prod_idx = std_prod_idx &
6989 tp->rx_std_ring_mask;
6990 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6991 tpr->rx_std_prod_idx);
6992 }
6993 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6994 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6995 tp->rx_jmb_ring_mask;
6996 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6997 tpr->rx_jmb_prod_idx);
6998 }
6999 } else if (work_mask) {
7000 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
7001 * updated before the producer indices can be updated.
7002 */
7003 smp_wmb();
7004
7005 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
7006 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
7007
7008 if (tnapi != &tp->napi[1]) {
7009 tp->rx_refill = true;
7010 napi_schedule(&tp->napi[1].napi);
7011 }
7012 }
7013
7014 return received;
7015 }
7016
tg3_poll_link(struct tg3 * tp)7017 static void tg3_poll_link(struct tg3 *tp)
7018 {
7019 /* handle link change and other phy events */
7020 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7021 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7022
7023 if (sblk->status & SD_STATUS_LINK_CHG) {
7024 sblk->status = SD_STATUS_UPDATED |
7025 (sblk->status & ~SD_STATUS_LINK_CHG);
7026 spin_lock(&tp->lock);
7027 if (tg3_flag(tp, USE_PHYLIB)) {
7028 tw32_f(MAC_STATUS,
7029 (MAC_STATUS_SYNC_CHANGED |
7030 MAC_STATUS_CFG_CHANGED |
7031 MAC_STATUS_MI_COMPLETION |
7032 MAC_STATUS_LNKSTATE_CHANGED));
7033 udelay(40);
7034 } else
7035 tg3_setup_phy(tp, false);
7036 spin_unlock(&tp->lock);
7037 }
7038 }
7039 }
7040
tg3_rx_prodring_xfer(struct tg3 * tp,struct tg3_rx_prodring_set * dpr,struct tg3_rx_prodring_set * spr)7041 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7042 struct tg3_rx_prodring_set *dpr,
7043 struct tg3_rx_prodring_set *spr)
7044 {
7045 u32 si, di, cpycnt, src_prod_idx;
7046 int i, err = 0;
7047
7048 while (1) {
7049 src_prod_idx = spr->rx_std_prod_idx;
7050
7051 /* Make sure updates to the rx_std_buffers[] entries and the
7052 * standard producer index are seen in the correct order.
7053 */
7054 smp_rmb();
7055
7056 if (spr->rx_std_cons_idx == src_prod_idx)
7057 break;
7058
7059 if (spr->rx_std_cons_idx < src_prod_idx)
7060 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7061 else
7062 cpycnt = tp->rx_std_ring_mask + 1 -
7063 spr->rx_std_cons_idx;
7064
7065 cpycnt = min(cpycnt,
7066 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7067
7068 si = spr->rx_std_cons_idx;
7069 di = dpr->rx_std_prod_idx;
7070
7071 for (i = di; i < di + cpycnt; i++) {
7072 if (dpr->rx_std_buffers[i].data) {
7073 cpycnt = i - di;
7074 err = -ENOSPC;
7075 break;
7076 }
7077 }
7078
7079 if (!cpycnt)
7080 break;
7081
7082 /* Ensure that updates to the rx_std_buffers ring and the
7083 * shadowed hardware producer ring from tg3_recycle_skb() are
7084 * ordered correctly WRT the skb check above.
7085 */
7086 smp_rmb();
7087
7088 memcpy(&dpr->rx_std_buffers[di],
7089 &spr->rx_std_buffers[si],
7090 cpycnt * sizeof(struct ring_info));
7091
7092 for (i = 0; i < cpycnt; i++, di++, si++) {
7093 struct tg3_rx_buffer_desc *sbd, *dbd;
7094 sbd = &spr->rx_std[si];
7095 dbd = &dpr->rx_std[di];
7096 dbd->addr_hi = sbd->addr_hi;
7097 dbd->addr_lo = sbd->addr_lo;
7098 }
7099
7100 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7101 tp->rx_std_ring_mask;
7102 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7103 tp->rx_std_ring_mask;
7104 }
7105
7106 while (1) {
7107 src_prod_idx = spr->rx_jmb_prod_idx;
7108
7109 /* Make sure updates to the rx_jmb_buffers[] entries and
7110 * the jumbo producer index are seen in the correct order.
7111 */
7112 smp_rmb();
7113
7114 if (spr->rx_jmb_cons_idx == src_prod_idx)
7115 break;
7116
7117 if (spr->rx_jmb_cons_idx < src_prod_idx)
7118 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7119 else
7120 cpycnt = tp->rx_jmb_ring_mask + 1 -
7121 spr->rx_jmb_cons_idx;
7122
7123 cpycnt = min(cpycnt,
7124 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7125
7126 si = spr->rx_jmb_cons_idx;
7127 di = dpr->rx_jmb_prod_idx;
7128
7129 for (i = di; i < di + cpycnt; i++) {
7130 if (dpr->rx_jmb_buffers[i].data) {
7131 cpycnt = i - di;
7132 err = -ENOSPC;
7133 break;
7134 }
7135 }
7136
7137 if (!cpycnt)
7138 break;
7139
7140 /* Ensure that updates to the rx_jmb_buffers ring and the
7141 * shadowed hardware producer ring from tg3_recycle_skb() are
7142 * ordered correctly WRT the skb check above.
7143 */
7144 smp_rmb();
7145
7146 memcpy(&dpr->rx_jmb_buffers[di],
7147 &spr->rx_jmb_buffers[si],
7148 cpycnt * sizeof(struct ring_info));
7149
7150 for (i = 0; i < cpycnt; i++, di++, si++) {
7151 struct tg3_rx_buffer_desc *sbd, *dbd;
7152 sbd = &spr->rx_jmb[si].std;
7153 dbd = &dpr->rx_jmb[di].std;
7154 dbd->addr_hi = sbd->addr_hi;
7155 dbd->addr_lo = sbd->addr_lo;
7156 }
7157
7158 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7159 tp->rx_jmb_ring_mask;
7160 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7161 tp->rx_jmb_ring_mask;
7162 }
7163
7164 return err;
7165 }
7166
tg3_poll_work(struct tg3_napi * tnapi,int work_done,int budget)7167 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7168 {
7169 struct tg3 *tp = tnapi->tp;
7170
7171 /* run TX completion thread */
7172 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7173 tg3_tx(tnapi);
7174 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7175 return work_done;
7176 }
7177
7178 if (!tnapi->rx_rcb_prod_idx)
7179 return work_done;
7180
7181 /* run RX thread, within the bounds set by NAPI.
7182 * All RX "locking" is done by ensuring outside
7183 * code synchronizes with tg3->napi.poll()
7184 */
7185 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7186 work_done += tg3_rx(tnapi, budget - work_done);
7187
7188 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7189 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7190 int i, err = 0;
7191 u32 std_prod_idx = dpr->rx_std_prod_idx;
7192 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7193
7194 tp->rx_refill = false;
7195 for (i = 1; i <= tp->rxq_cnt; i++)
7196 err |= tg3_rx_prodring_xfer(tp, dpr,
7197 &tp->napi[i].prodring);
7198
7199 wmb();
7200
7201 if (std_prod_idx != dpr->rx_std_prod_idx)
7202 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7203 dpr->rx_std_prod_idx);
7204
7205 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7206 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7207 dpr->rx_jmb_prod_idx);
7208
7209 if (err)
7210 tw32_f(HOSTCC_MODE, tp->coal_now);
7211 }
7212
7213 return work_done;
7214 }
7215
tg3_reset_task_schedule(struct tg3 * tp)7216 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7217 {
7218 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7219 schedule_work(&tp->reset_task);
7220 }
7221
tg3_reset_task_cancel(struct tg3 * tp)7222 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7223 {
7224 if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7225 cancel_work_sync(&tp->reset_task);
7226 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7227 }
7228
tg3_poll_msix(struct napi_struct * napi,int budget)7229 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7230 {
7231 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7232 struct tg3 *tp = tnapi->tp;
7233 int work_done = 0;
7234 struct tg3_hw_status *sblk = tnapi->hw_status;
7235
7236 while (1) {
7237 work_done = tg3_poll_work(tnapi, work_done, budget);
7238
7239 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7240 goto tx_recovery;
7241
7242 if (unlikely(work_done >= budget))
7243 break;
7244
7245 /* tp->last_tag is used in tg3_int_reenable() below
7246 * to tell the hw how much work has been processed,
7247 * so we must read it before checking for more work.
7248 */
7249 tnapi->last_tag = sblk->status_tag;
7250 tnapi->last_irq_tag = tnapi->last_tag;
7251 rmb();
7252
7253 /* check for RX/TX work to do */
7254 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7255 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7256
7257 /* This test here is not race free, but will reduce
7258 * the number of interrupts by looping again.
7259 */
7260 if (tnapi == &tp->napi[1] && tp->rx_refill)
7261 continue;
7262
7263 napi_complete_done(napi, work_done);
7264 /* Reenable interrupts. */
7265 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7266
7267 /* This test here is synchronized by napi_schedule()
7268 * and napi_complete() to close the race condition.
7269 */
7270 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7271 tw32(HOSTCC_MODE, tp->coalesce_mode |
7272 HOSTCC_MODE_ENABLE |
7273 tnapi->coal_now);
7274 }
7275 break;
7276 }
7277 }
7278
7279 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7280 return work_done;
7281
7282 tx_recovery:
7283 /* work_done is guaranteed to be less than budget. */
7284 napi_complete(napi);
7285 tg3_reset_task_schedule(tp);
7286 return work_done;
7287 }
7288
tg3_process_error(struct tg3 * tp)7289 static void tg3_process_error(struct tg3 *tp)
7290 {
7291 u32 val;
7292 bool real_error = false;
7293
7294 if (tg3_flag(tp, ERROR_PROCESSED))
7295 return;
7296
7297 /* Check Flow Attention register */
7298 val = tr32(HOSTCC_FLOW_ATTN);
7299 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7300 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7301 real_error = true;
7302 }
7303
7304 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7305 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7306 real_error = true;
7307 }
7308
7309 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7310 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7311 real_error = true;
7312 }
7313
7314 if (!real_error)
7315 return;
7316
7317 tg3_dump_state(tp);
7318
7319 tg3_flag_set(tp, ERROR_PROCESSED);
7320 tg3_reset_task_schedule(tp);
7321 }
7322
tg3_poll(struct napi_struct * napi,int budget)7323 static int tg3_poll(struct napi_struct *napi, int budget)
7324 {
7325 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7326 struct tg3 *tp = tnapi->tp;
7327 int work_done = 0;
7328 struct tg3_hw_status *sblk = tnapi->hw_status;
7329
7330 while (1) {
7331 if (sblk->status & SD_STATUS_ERROR)
7332 tg3_process_error(tp);
7333
7334 tg3_poll_link(tp);
7335
7336 work_done = tg3_poll_work(tnapi, work_done, budget);
7337
7338 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7339 goto tx_recovery;
7340
7341 if (unlikely(work_done >= budget))
7342 break;
7343
7344 if (tg3_flag(tp, TAGGED_STATUS)) {
7345 /* tp->last_tag is used in tg3_int_reenable() below
7346 * to tell the hw how much work has been processed,
7347 * so we must read it before checking for more work.
7348 */
7349 tnapi->last_tag = sblk->status_tag;
7350 tnapi->last_irq_tag = tnapi->last_tag;
7351 rmb();
7352 } else
7353 sblk->status &= ~SD_STATUS_UPDATED;
7354
7355 if (likely(!tg3_has_work(tnapi))) {
7356 napi_complete_done(napi, work_done);
7357 tg3_int_reenable(tnapi);
7358 break;
7359 }
7360 }
7361
7362 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7363 return work_done;
7364
7365 tx_recovery:
7366 /* work_done is guaranteed to be less than budget. */
7367 napi_complete(napi);
7368 tg3_reset_task_schedule(tp);
7369 return work_done;
7370 }
7371
tg3_napi_disable(struct tg3 * tp)7372 static void tg3_napi_disable(struct tg3 *tp)
7373 {
7374 int i;
7375
7376 for (i = tp->irq_cnt - 1; i >= 0; i--)
7377 napi_disable(&tp->napi[i].napi);
7378 }
7379
tg3_napi_enable(struct tg3 * tp)7380 static void tg3_napi_enable(struct tg3 *tp)
7381 {
7382 int i;
7383
7384 for (i = 0; i < tp->irq_cnt; i++)
7385 napi_enable(&tp->napi[i].napi);
7386 }
7387
tg3_napi_init(struct tg3 * tp)7388 static void tg3_napi_init(struct tg3 *tp)
7389 {
7390 int i;
7391
7392 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7393 for (i = 1; i < tp->irq_cnt; i++)
7394 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7395 }
7396
tg3_napi_fini(struct tg3 * tp)7397 static void tg3_napi_fini(struct tg3 *tp)
7398 {
7399 int i;
7400
7401 for (i = 0; i < tp->irq_cnt; i++)
7402 netif_napi_del(&tp->napi[i].napi);
7403 }
7404
tg3_netif_stop(struct tg3 * tp)7405 static inline void tg3_netif_stop(struct tg3 *tp)
7406 {
7407 netif_trans_update(tp->dev); /* prevent tx timeout */
7408 tg3_napi_disable(tp);
7409 netif_carrier_off(tp->dev);
7410 netif_tx_disable(tp->dev);
7411 }
7412
7413 /* tp->lock must be held */
tg3_netif_start(struct tg3 * tp)7414 static inline void tg3_netif_start(struct tg3 *tp)
7415 {
7416 tg3_ptp_resume(tp);
7417
7418 /* NOTE: unconditional netif_tx_wake_all_queues is only
7419 * appropriate so long as all callers are assured to
7420 * have free tx slots (such as after tg3_init_hw)
7421 */
7422 netif_tx_wake_all_queues(tp->dev);
7423
7424 if (tp->link_up)
7425 netif_carrier_on(tp->dev);
7426
7427 tg3_napi_enable(tp);
7428 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7429 tg3_enable_ints(tp);
7430 }
7431
tg3_irq_quiesce(struct tg3 * tp)7432 static void tg3_irq_quiesce(struct tg3 *tp)
7433 __releases(tp->lock)
7434 __acquires(tp->lock)
7435 {
7436 int i;
7437
7438 BUG_ON(tp->irq_sync);
7439
7440 tp->irq_sync = 1;
7441 smp_mb();
7442
7443 spin_unlock_bh(&tp->lock);
7444
7445 for (i = 0; i < tp->irq_cnt; i++)
7446 synchronize_irq(tp->napi[i].irq_vec);
7447
7448 spin_lock_bh(&tp->lock);
7449 }
7450
7451 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7452 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7453 * with as well. Most of the time, this is not necessary except when
7454 * shutting down the device.
7455 */
tg3_full_lock(struct tg3 * tp,int irq_sync)7456 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7457 {
7458 spin_lock_bh(&tp->lock);
7459 if (irq_sync)
7460 tg3_irq_quiesce(tp);
7461 }
7462
tg3_full_unlock(struct tg3 * tp)7463 static inline void tg3_full_unlock(struct tg3 *tp)
7464 {
7465 spin_unlock_bh(&tp->lock);
7466 }
7467
7468 /* One-shot MSI handler - Chip automatically disables interrupt
7469 * after sending MSI so driver doesn't have to do it.
7470 */
tg3_msi_1shot(int irq,void * dev_id)7471 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7472 {
7473 struct tg3_napi *tnapi = dev_id;
7474 struct tg3 *tp = tnapi->tp;
7475
7476 prefetch(tnapi->hw_status);
7477 if (tnapi->rx_rcb)
7478 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7479
7480 if (likely(!tg3_irq_sync(tp)))
7481 napi_schedule(&tnapi->napi);
7482
7483 return IRQ_HANDLED;
7484 }
7485
7486 /* MSI ISR - No need to check for interrupt sharing and no need to
7487 * flush status block and interrupt mailbox. PCI ordering rules
7488 * guarantee that MSI will arrive after the status block.
7489 */
tg3_msi(int irq,void * dev_id)7490 static irqreturn_t tg3_msi(int irq, void *dev_id)
7491 {
7492 struct tg3_napi *tnapi = dev_id;
7493 struct tg3 *tp = tnapi->tp;
7494
7495 prefetch(tnapi->hw_status);
7496 if (tnapi->rx_rcb)
7497 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7498 /*
7499 * Writing any value to intr-mbox-0 clears PCI INTA# and
7500 * chip-internal interrupt pending events.
7501 * Writing non-zero to intr-mbox-0 additional tells the
7502 * NIC to stop sending us irqs, engaging "in-intr-handler"
7503 * event coalescing.
7504 */
7505 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7506 if (likely(!tg3_irq_sync(tp)))
7507 napi_schedule(&tnapi->napi);
7508
7509 return IRQ_RETVAL(1);
7510 }
7511
tg3_interrupt(int irq,void * dev_id)7512 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7513 {
7514 struct tg3_napi *tnapi = dev_id;
7515 struct tg3 *tp = tnapi->tp;
7516 struct tg3_hw_status *sblk = tnapi->hw_status;
7517 unsigned int handled = 1;
7518
7519 /* In INTx mode, it is possible for the interrupt to arrive at
7520 * the CPU before the status block posted prior to the interrupt.
7521 * Reading the PCI State register will confirm whether the
7522 * interrupt is ours and will flush the status block.
7523 */
7524 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7525 if (tg3_flag(tp, CHIP_RESETTING) ||
7526 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7527 handled = 0;
7528 goto out;
7529 }
7530 }
7531
7532 /*
7533 * Writing any value to intr-mbox-0 clears PCI INTA# and
7534 * chip-internal interrupt pending events.
7535 * Writing non-zero to intr-mbox-0 additional tells the
7536 * NIC to stop sending us irqs, engaging "in-intr-handler"
7537 * event coalescing.
7538 *
7539 * Flush the mailbox to de-assert the IRQ immediately to prevent
7540 * spurious interrupts. The flush impacts performance but
7541 * excessive spurious interrupts can be worse in some cases.
7542 */
7543 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7544 if (tg3_irq_sync(tp))
7545 goto out;
7546 sblk->status &= ~SD_STATUS_UPDATED;
7547 if (likely(tg3_has_work(tnapi))) {
7548 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7549 napi_schedule(&tnapi->napi);
7550 } else {
7551 /* No work, shared interrupt perhaps? re-enable
7552 * interrupts, and flush that PCI write
7553 */
7554 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7555 0x00000000);
7556 }
7557 out:
7558 return IRQ_RETVAL(handled);
7559 }
7560
tg3_interrupt_tagged(int irq,void * dev_id)7561 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7562 {
7563 struct tg3_napi *tnapi = dev_id;
7564 struct tg3 *tp = tnapi->tp;
7565 struct tg3_hw_status *sblk = tnapi->hw_status;
7566 unsigned int handled = 1;
7567
7568 /* In INTx mode, it is possible for the interrupt to arrive at
7569 * the CPU before the status block posted prior to the interrupt.
7570 * Reading the PCI State register will confirm whether the
7571 * interrupt is ours and will flush the status block.
7572 */
7573 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7574 if (tg3_flag(tp, CHIP_RESETTING) ||
7575 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7576 handled = 0;
7577 goto out;
7578 }
7579 }
7580
7581 /*
7582 * writing any value to intr-mbox-0 clears PCI INTA# and
7583 * chip-internal interrupt pending events.
7584 * writing non-zero to intr-mbox-0 additional tells the
7585 * NIC to stop sending us irqs, engaging "in-intr-handler"
7586 * event coalescing.
7587 *
7588 * Flush the mailbox to de-assert the IRQ immediately to prevent
7589 * spurious interrupts. The flush impacts performance but
7590 * excessive spurious interrupts can be worse in some cases.
7591 */
7592 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7593
7594 /*
7595 * In a shared interrupt configuration, sometimes other devices'
7596 * interrupts will scream. We record the current status tag here
7597 * so that the above check can report that the screaming interrupts
7598 * are unhandled. Eventually they will be silenced.
7599 */
7600 tnapi->last_irq_tag = sblk->status_tag;
7601
7602 if (tg3_irq_sync(tp))
7603 goto out;
7604
7605 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7606
7607 napi_schedule(&tnapi->napi);
7608
7609 out:
7610 return IRQ_RETVAL(handled);
7611 }
7612
7613 /* ISR for interrupt test */
tg3_test_isr(int irq,void * dev_id)7614 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7615 {
7616 struct tg3_napi *tnapi = dev_id;
7617 struct tg3 *tp = tnapi->tp;
7618 struct tg3_hw_status *sblk = tnapi->hw_status;
7619
7620 if ((sblk->status & SD_STATUS_UPDATED) ||
7621 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7622 tg3_disable_ints(tp);
7623 return IRQ_RETVAL(1);
7624 }
7625 return IRQ_RETVAL(0);
7626 }
7627
7628 #ifdef CONFIG_NET_POLL_CONTROLLER
tg3_poll_controller(struct net_device * dev)7629 static void tg3_poll_controller(struct net_device *dev)
7630 {
7631 int i;
7632 struct tg3 *tp = netdev_priv(dev);
7633
7634 if (tg3_irq_sync(tp))
7635 return;
7636
7637 for (i = 0; i < tp->irq_cnt; i++)
7638 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7639 }
7640 #endif
7641
tg3_tx_timeout(struct net_device * dev,unsigned int txqueue)7642 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
7643 {
7644 struct tg3 *tp = netdev_priv(dev);
7645
7646 if (netif_msg_tx_err(tp)) {
7647 netdev_err(dev, "transmit timed out, resetting\n");
7648 tg3_dump_state(tp);
7649 }
7650
7651 tg3_reset_task_schedule(tp);
7652 }
7653
7654 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
tg3_4g_overflow_test(dma_addr_t mapping,int len)7655 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7656 {
7657 u32 base = (u32) mapping & 0xffffffff;
7658
7659 return base + len + 8 < base;
7660 }
7661
7662 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7663 * of any 4GB boundaries: 4G, 8G, etc
7664 */
tg3_4g_tso_overflow_test(struct tg3 * tp,dma_addr_t mapping,u32 len,u32 mss)7665 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7666 u32 len, u32 mss)
7667 {
7668 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7669 u32 base = (u32) mapping & 0xffffffff;
7670
7671 return ((base + len + (mss & 0x3fff)) < base);
7672 }
7673 return 0;
7674 }
7675
7676 /* Test for DMA addresses > 40-bit */
tg3_40bit_overflow_test(struct tg3 * tp,dma_addr_t mapping,int len)7677 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7678 int len)
7679 {
7680 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7681 if (tg3_flag(tp, 40BIT_DMA_BUG))
7682 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7683 return 0;
7684 #else
7685 return 0;
7686 #endif
7687 }
7688
tg3_tx_set_bd(struct tg3_tx_buffer_desc * txbd,dma_addr_t mapping,u32 len,u32 flags,u32 mss,u32 vlan)7689 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7690 dma_addr_t mapping, u32 len, u32 flags,
7691 u32 mss, u32 vlan)
7692 {
7693 txbd->addr_hi = ((u64) mapping >> 32);
7694 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7695 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7696 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7697 }
7698
tg3_tx_frag_set(struct tg3_napi * tnapi,u32 * entry,u32 * budget,dma_addr_t map,u32 len,u32 flags,u32 mss,u32 vlan)7699 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7700 dma_addr_t map, u32 len, u32 flags,
7701 u32 mss, u32 vlan)
7702 {
7703 struct tg3 *tp = tnapi->tp;
7704 bool hwbug = false;
7705
7706 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7707 hwbug = true;
7708
7709 if (tg3_4g_overflow_test(map, len))
7710 hwbug = true;
7711
7712 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7713 hwbug = true;
7714
7715 if (tg3_40bit_overflow_test(tp, map, len))
7716 hwbug = true;
7717
7718 if (tp->dma_limit) {
7719 u32 prvidx = *entry;
7720 u32 tmp_flag = flags & ~TXD_FLAG_END;
7721 while (len > tp->dma_limit && *budget) {
7722 u32 frag_len = tp->dma_limit;
7723 len -= tp->dma_limit;
7724
7725 /* Avoid the 8byte DMA problem */
7726 if (len <= 8) {
7727 len += tp->dma_limit / 2;
7728 frag_len = tp->dma_limit / 2;
7729 }
7730
7731 tnapi->tx_buffers[*entry].fragmented = true;
7732
7733 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7734 frag_len, tmp_flag, mss, vlan);
7735 *budget -= 1;
7736 prvidx = *entry;
7737 *entry = NEXT_TX(*entry);
7738
7739 map += frag_len;
7740 }
7741
7742 if (len) {
7743 if (*budget) {
7744 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7745 len, flags, mss, vlan);
7746 *budget -= 1;
7747 *entry = NEXT_TX(*entry);
7748 } else {
7749 hwbug = true;
7750 tnapi->tx_buffers[prvidx].fragmented = false;
7751 }
7752 }
7753 } else {
7754 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7755 len, flags, mss, vlan);
7756 *entry = NEXT_TX(*entry);
7757 }
7758
7759 return hwbug;
7760 }
7761
tg3_tx_skb_unmap(struct tg3_napi * tnapi,u32 entry,int last)7762 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7763 {
7764 int i;
7765 struct sk_buff *skb;
7766 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7767
7768 skb = txb->skb;
7769 txb->skb = NULL;
7770
7771 pci_unmap_single(tnapi->tp->pdev,
7772 dma_unmap_addr(txb, mapping),
7773 skb_headlen(skb),
7774 PCI_DMA_TODEVICE);
7775
7776 while (txb->fragmented) {
7777 txb->fragmented = false;
7778 entry = NEXT_TX(entry);
7779 txb = &tnapi->tx_buffers[entry];
7780 }
7781
7782 for (i = 0; i <= last; i++) {
7783 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7784
7785 entry = NEXT_TX(entry);
7786 txb = &tnapi->tx_buffers[entry];
7787
7788 pci_unmap_page(tnapi->tp->pdev,
7789 dma_unmap_addr(txb, mapping),
7790 skb_frag_size(frag), PCI_DMA_TODEVICE);
7791
7792 while (txb->fragmented) {
7793 txb->fragmented = false;
7794 entry = NEXT_TX(entry);
7795 txb = &tnapi->tx_buffers[entry];
7796 }
7797 }
7798 }
7799
7800 /* Workaround 4GB and 40-bit hardware DMA bugs. */
tigon3_dma_hwbug_workaround(struct tg3_napi * tnapi,struct sk_buff ** pskb,u32 * entry,u32 * budget,u32 base_flags,u32 mss,u32 vlan)7801 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7802 struct sk_buff **pskb,
7803 u32 *entry, u32 *budget,
7804 u32 base_flags, u32 mss, u32 vlan)
7805 {
7806 struct tg3 *tp = tnapi->tp;
7807 struct sk_buff *new_skb, *skb = *pskb;
7808 dma_addr_t new_addr = 0;
7809 int ret = 0;
7810
7811 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7812 new_skb = skb_copy(skb, GFP_ATOMIC);
7813 else {
7814 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7815
7816 new_skb = skb_copy_expand(skb,
7817 skb_headroom(skb) + more_headroom,
7818 skb_tailroom(skb), GFP_ATOMIC);
7819 }
7820
7821 if (!new_skb) {
7822 ret = -1;
7823 } else {
7824 /* New SKB is guaranteed to be linear. */
7825 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7826 PCI_DMA_TODEVICE);
7827 /* Make sure the mapping succeeded */
7828 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7829 dev_kfree_skb_any(new_skb);
7830 ret = -1;
7831 } else {
7832 u32 save_entry = *entry;
7833
7834 base_flags |= TXD_FLAG_END;
7835
7836 tnapi->tx_buffers[*entry].skb = new_skb;
7837 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7838 mapping, new_addr);
7839
7840 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7841 new_skb->len, base_flags,
7842 mss, vlan)) {
7843 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7844 dev_kfree_skb_any(new_skb);
7845 ret = -1;
7846 }
7847 }
7848 }
7849
7850 dev_consume_skb_any(skb);
7851 *pskb = new_skb;
7852 return ret;
7853 }
7854
tg3_tso_bug_gso_check(struct tg3_napi * tnapi,struct sk_buff * skb)7855 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7856 {
7857 /* Check if we will never have enough descriptors,
7858 * as gso_segs can be more than current ring size
7859 */
7860 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7861 }
7862
7863 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7864
7865 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7866 * indicated in tg3_tx_frag_set()
7867 */
tg3_tso_bug(struct tg3 * tp,struct tg3_napi * tnapi,struct netdev_queue * txq,struct sk_buff * skb)7868 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7869 struct netdev_queue *txq, struct sk_buff *skb)
7870 {
7871 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7872 struct sk_buff *segs, *seg, *next;
7873
7874 /* Estimate the number of fragments in the worst case */
7875 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7876 netif_tx_stop_queue(txq);
7877
7878 /* netif_tx_stop_queue() must be done before checking
7879 * checking tx index in tg3_tx_avail() below, because in
7880 * tg3_tx(), we update tx index before checking for
7881 * netif_tx_queue_stopped().
7882 */
7883 smp_mb();
7884 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7885 return NETDEV_TX_BUSY;
7886
7887 netif_tx_wake_queue(txq);
7888 }
7889
7890 segs = skb_gso_segment(skb, tp->dev->features &
7891 ~(NETIF_F_TSO | NETIF_F_TSO6));
7892 if (IS_ERR(segs) || !segs)
7893 goto tg3_tso_bug_end;
7894
7895 skb_list_walk_safe(segs, seg, next) {
7896 skb_mark_not_on_list(seg);
7897 tg3_start_xmit(seg, tp->dev);
7898 }
7899
7900 tg3_tso_bug_end:
7901 dev_consume_skb_any(skb);
7902
7903 return NETDEV_TX_OK;
7904 }
7905
7906 /* hard_start_xmit for all devices */
tg3_start_xmit(struct sk_buff * skb,struct net_device * dev)7907 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7908 {
7909 struct tg3 *tp = netdev_priv(dev);
7910 u32 len, entry, base_flags, mss, vlan = 0;
7911 u32 budget;
7912 int i = -1, would_hit_hwbug;
7913 dma_addr_t mapping;
7914 struct tg3_napi *tnapi;
7915 struct netdev_queue *txq;
7916 unsigned int last;
7917 struct iphdr *iph = NULL;
7918 struct tcphdr *tcph = NULL;
7919 __sum16 tcp_csum = 0, ip_csum = 0;
7920 __be16 ip_tot_len = 0;
7921
7922 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7923 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7924 if (tg3_flag(tp, ENABLE_TSS))
7925 tnapi++;
7926
7927 budget = tg3_tx_avail(tnapi);
7928
7929 /* We are running in BH disabled context with netif_tx_lock
7930 * and TX reclaim runs via tp->napi.poll inside of a software
7931 * interrupt. Furthermore, IRQ processing runs lockless so we have
7932 * no IRQ context deadlocks to worry about either. Rejoice!
7933 */
7934 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7935 if (!netif_tx_queue_stopped(txq)) {
7936 netif_tx_stop_queue(txq);
7937
7938 /* This is a hard error, log it. */
7939 netdev_err(dev,
7940 "BUG! Tx Ring full when queue awake!\n");
7941 }
7942 return NETDEV_TX_BUSY;
7943 }
7944
7945 entry = tnapi->tx_prod;
7946 base_flags = 0;
7947
7948 mss = skb_shinfo(skb)->gso_size;
7949 if (mss) {
7950 u32 tcp_opt_len, hdr_len;
7951
7952 if (skb_cow_head(skb, 0))
7953 goto drop;
7954
7955 iph = ip_hdr(skb);
7956 tcp_opt_len = tcp_optlen(skb);
7957
7958 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7959
7960 /* HW/FW can not correctly segment packets that have been
7961 * vlan encapsulated.
7962 */
7963 if (skb->protocol == htons(ETH_P_8021Q) ||
7964 skb->protocol == htons(ETH_P_8021AD)) {
7965 if (tg3_tso_bug_gso_check(tnapi, skb))
7966 return tg3_tso_bug(tp, tnapi, txq, skb);
7967 goto drop;
7968 }
7969
7970 if (!skb_is_gso_v6(skb)) {
7971 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7972 tg3_flag(tp, TSO_BUG)) {
7973 if (tg3_tso_bug_gso_check(tnapi, skb))
7974 return tg3_tso_bug(tp, tnapi, txq, skb);
7975 goto drop;
7976 }
7977 ip_csum = iph->check;
7978 ip_tot_len = iph->tot_len;
7979 iph->check = 0;
7980 iph->tot_len = htons(mss + hdr_len);
7981 }
7982
7983 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7984 TXD_FLAG_CPU_POST_DMA);
7985
7986 tcph = tcp_hdr(skb);
7987 tcp_csum = tcph->check;
7988
7989 if (tg3_flag(tp, HW_TSO_1) ||
7990 tg3_flag(tp, HW_TSO_2) ||
7991 tg3_flag(tp, HW_TSO_3)) {
7992 tcph->check = 0;
7993 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7994 } else {
7995 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7996 0, IPPROTO_TCP, 0);
7997 }
7998
7999 if (tg3_flag(tp, HW_TSO_3)) {
8000 mss |= (hdr_len & 0xc) << 12;
8001 if (hdr_len & 0x10)
8002 base_flags |= 0x00000010;
8003 base_flags |= (hdr_len & 0x3e0) << 5;
8004 } else if (tg3_flag(tp, HW_TSO_2))
8005 mss |= hdr_len << 9;
8006 else if (tg3_flag(tp, HW_TSO_1) ||
8007 tg3_asic_rev(tp) == ASIC_REV_5705) {
8008 if (tcp_opt_len || iph->ihl > 5) {
8009 int tsflags;
8010
8011 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8012 mss |= (tsflags << 11);
8013 }
8014 } else {
8015 if (tcp_opt_len || iph->ihl > 5) {
8016 int tsflags;
8017
8018 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8019 base_flags |= tsflags << 12;
8020 }
8021 }
8022 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8023 /* HW/FW can not correctly checksum packets that have been
8024 * vlan encapsulated.
8025 */
8026 if (skb->protocol == htons(ETH_P_8021Q) ||
8027 skb->protocol == htons(ETH_P_8021AD)) {
8028 if (skb_checksum_help(skb))
8029 goto drop;
8030 } else {
8031 base_flags |= TXD_FLAG_TCPUDP_CSUM;
8032 }
8033 }
8034
8035 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8036 !mss && skb->len > VLAN_ETH_FRAME_LEN)
8037 base_flags |= TXD_FLAG_JMB_PKT;
8038
8039 if (skb_vlan_tag_present(skb)) {
8040 base_flags |= TXD_FLAG_VLAN;
8041 vlan = skb_vlan_tag_get(skb);
8042 }
8043
8044 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8045 tg3_flag(tp, TX_TSTAMP_EN)) {
8046 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8047 base_flags |= TXD_FLAG_HWTSTAMP;
8048 }
8049
8050 len = skb_headlen(skb);
8051
8052 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8053 if (pci_dma_mapping_error(tp->pdev, mapping))
8054 goto drop;
8055
8056
8057 tnapi->tx_buffers[entry].skb = skb;
8058 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8059
8060 would_hit_hwbug = 0;
8061
8062 if (tg3_flag(tp, 5701_DMA_BUG))
8063 would_hit_hwbug = 1;
8064
8065 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8066 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8067 mss, vlan)) {
8068 would_hit_hwbug = 1;
8069 } else if (skb_shinfo(skb)->nr_frags > 0) {
8070 u32 tmp_mss = mss;
8071
8072 if (!tg3_flag(tp, HW_TSO_1) &&
8073 !tg3_flag(tp, HW_TSO_2) &&
8074 !tg3_flag(tp, HW_TSO_3))
8075 tmp_mss = 0;
8076
8077 /* Now loop through additional data
8078 * fragments, and queue them.
8079 */
8080 last = skb_shinfo(skb)->nr_frags - 1;
8081 for (i = 0; i <= last; i++) {
8082 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8083
8084 len = skb_frag_size(frag);
8085 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8086 len, DMA_TO_DEVICE);
8087
8088 tnapi->tx_buffers[entry].skb = NULL;
8089 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8090 mapping);
8091 if (dma_mapping_error(&tp->pdev->dev, mapping))
8092 goto dma_error;
8093
8094 if (!budget ||
8095 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8096 len, base_flags |
8097 ((i == last) ? TXD_FLAG_END : 0),
8098 tmp_mss, vlan)) {
8099 would_hit_hwbug = 1;
8100 break;
8101 }
8102 }
8103 }
8104
8105 if (would_hit_hwbug) {
8106 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8107
8108 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8109 /* If it's a TSO packet, do GSO instead of
8110 * allocating and copying to a large linear SKB
8111 */
8112 if (ip_tot_len) {
8113 iph->check = ip_csum;
8114 iph->tot_len = ip_tot_len;
8115 }
8116 tcph->check = tcp_csum;
8117 return tg3_tso_bug(tp, tnapi, txq, skb);
8118 }
8119
8120 /* If the workaround fails due to memory/mapping
8121 * failure, silently drop this packet.
8122 */
8123 entry = tnapi->tx_prod;
8124 budget = tg3_tx_avail(tnapi);
8125 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8126 base_flags, mss, vlan))
8127 goto drop_nofree;
8128 }
8129
8130 skb_tx_timestamp(skb);
8131 netdev_tx_sent_queue(txq, skb->len);
8132
8133 /* Sync BD data before updating mailbox */
8134 wmb();
8135
8136 tnapi->tx_prod = entry;
8137 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8138 netif_tx_stop_queue(txq);
8139
8140 /* netif_tx_stop_queue() must be done before checking
8141 * checking tx index in tg3_tx_avail() below, because in
8142 * tg3_tx(), we update tx index before checking for
8143 * netif_tx_queue_stopped().
8144 */
8145 smp_mb();
8146 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8147 netif_tx_wake_queue(txq);
8148 }
8149
8150 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8151 /* Packets are ready, update Tx producer idx on card. */
8152 tw32_tx_mbox(tnapi->prodmbox, entry);
8153 }
8154
8155 return NETDEV_TX_OK;
8156
8157 dma_error:
8158 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8159 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8160 drop:
8161 dev_kfree_skb_any(skb);
8162 drop_nofree:
8163 tp->tx_dropped++;
8164 return NETDEV_TX_OK;
8165 }
8166
tg3_mac_loopback(struct tg3 * tp,bool enable)8167 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8168 {
8169 if (enable) {
8170 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8171 MAC_MODE_PORT_MODE_MASK);
8172
8173 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8174
8175 if (!tg3_flag(tp, 5705_PLUS))
8176 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8177
8178 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8179 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8180 else
8181 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8182 } else {
8183 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8184
8185 if (tg3_flag(tp, 5705_PLUS) ||
8186 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8187 tg3_asic_rev(tp) == ASIC_REV_5700)
8188 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8189 }
8190
8191 tw32(MAC_MODE, tp->mac_mode);
8192 udelay(40);
8193 }
8194
tg3_phy_lpbk_set(struct tg3 * tp,u32 speed,bool extlpbk)8195 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8196 {
8197 u32 val, bmcr, mac_mode, ptest = 0;
8198
8199 tg3_phy_toggle_apd(tp, false);
8200 tg3_phy_toggle_automdix(tp, false);
8201
8202 if (extlpbk && tg3_phy_set_extloopbk(tp))
8203 return -EIO;
8204
8205 bmcr = BMCR_FULLDPLX;
8206 switch (speed) {
8207 case SPEED_10:
8208 break;
8209 case SPEED_100:
8210 bmcr |= BMCR_SPEED100;
8211 break;
8212 case SPEED_1000:
8213 default:
8214 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8215 speed = SPEED_100;
8216 bmcr |= BMCR_SPEED100;
8217 } else {
8218 speed = SPEED_1000;
8219 bmcr |= BMCR_SPEED1000;
8220 }
8221 }
8222
8223 if (extlpbk) {
8224 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8225 tg3_readphy(tp, MII_CTRL1000, &val);
8226 val |= CTL1000_AS_MASTER |
8227 CTL1000_ENABLE_MASTER;
8228 tg3_writephy(tp, MII_CTRL1000, val);
8229 } else {
8230 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8231 MII_TG3_FET_PTEST_TRIM_2;
8232 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8233 }
8234 } else
8235 bmcr |= BMCR_LOOPBACK;
8236
8237 tg3_writephy(tp, MII_BMCR, bmcr);
8238
8239 /* The write needs to be flushed for the FETs */
8240 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8241 tg3_readphy(tp, MII_BMCR, &bmcr);
8242
8243 udelay(40);
8244
8245 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8246 tg3_asic_rev(tp) == ASIC_REV_5785) {
8247 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8248 MII_TG3_FET_PTEST_FRC_TX_LINK |
8249 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8250
8251 /* The write needs to be flushed for the AC131 */
8252 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8253 }
8254
8255 /* Reset to prevent losing 1st rx packet intermittently */
8256 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8257 tg3_flag(tp, 5780_CLASS)) {
8258 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8259 udelay(10);
8260 tw32_f(MAC_RX_MODE, tp->rx_mode);
8261 }
8262
8263 mac_mode = tp->mac_mode &
8264 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8265 if (speed == SPEED_1000)
8266 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8267 else
8268 mac_mode |= MAC_MODE_PORT_MODE_MII;
8269
8270 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8271 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8272
8273 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8274 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8275 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8276 mac_mode |= MAC_MODE_LINK_POLARITY;
8277
8278 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8279 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8280 }
8281
8282 tw32(MAC_MODE, mac_mode);
8283 udelay(40);
8284
8285 return 0;
8286 }
8287
tg3_set_loopback(struct net_device * dev,netdev_features_t features)8288 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8289 {
8290 struct tg3 *tp = netdev_priv(dev);
8291
8292 if (features & NETIF_F_LOOPBACK) {
8293 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8294 return;
8295
8296 spin_lock_bh(&tp->lock);
8297 tg3_mac_loopback(tp, true);
8298 netif_carrier_on(tp->dev);
8299 spin_unlock_bh(&tp->lock);
8300 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8301 } else {
8302 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8303 return;
8304
8305 spin_lock_bh(&tp->lock);
8306 tg3_mac_loopback(tp, false);
8307 /* Force link status check */
8308 tg3_setup_phy(tp, true);
8309 spin_unlock_bh(&tp->lock);
8310 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8311 }
8312 }
8313
tg3_fix_features(struct net_device * dev,netdev_features_t features)8314 static netdev_features_t tg3_fix_features(struct net_device *dev,
8315 netdev_features_t features)
8316 {
8317 struct tg3 *tp = netdev_priv(dev);
8318
8319 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8320 features &= ~NETIF_F_ALL_TSO;
8321
8322 return features;
8323 }
8324
tg3_set_features(struct net_device * dev,netdev_features_t features)8325 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8326 {
8327 netdev_features_t changed = dev->features ^ features;
8328
8329 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8330 tg3_set_loopback(dev, features);
8331
8332 return 0;
8333 }
8334
tg3_rx_prodring_free(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8335 static void tg3_rx_prodring_free(struct tg3 *tp,
8336 struct tg3_rx_prodring_set *tpr)
8337 {
8338 int i;
8339
8340 if (tpr != &tp->napi[0].prodring) {
8341 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8342 i = (i + 1) & tp->rx_std_ring_mask)
8343 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8344 tp->rx_pkt_map_sz);
8345
8346 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8347 for (i = tpr->rx_jmb_cons_idx;
8348 i != tpr->rx_jmb_prod_idx;
8349 i = (i + 1) & tp->rx_jmb_ring_mask) {
8350 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8351 TG3_RX_JMB_MAP_SZ);
8352 }
8353 }
8354
8355 return;
8356 }
8357
8358 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8359 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8360 tp->rx_pkt_map_sz);
8361
8362 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8363 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8364 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8365 TG3_RX_JMB_MAP_SZ);
8366 }
8367 }
8368
8369 /* Initialize rx rings for packet processing.
8370 *
8371 * The chip has been shut down and the driver detached from
8372 * the networking, so no interrupts or new tx packets will
8373 * end up in the driver. tp->{tx,}lock are held and thus
8374 * we may not sleep.
8375 */
tg3_rx_prodring_alloc(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8376 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8377 struct tg3_rx_prodring_set *tpr)
8378 {
8379 u32 i, rx_pkt_dma_sz;
8380
8381 tpr->rx_std_cons_idx = 0;
8382 tpr->rx_std_prod_idx = 0;
8383 tpr->rx_jmb_cons_idx = 0;
8384 tpr->rx_jmb_prod_idx = 0;
8385
8386 if (tpr != &tp->napi[0].prodring) {
8387 memset(&tpr->rx_std_buffers[0], 0,
8388 TG3_RX_STD_BUFF_RING_SIZE(tp));
8389 if (tpr->rx_jmb_buffers)
8390 memset(&tpr->rx_jmb_buffers[0], 0,
8391 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8392 goto done;
8393 }
8394
8395 /* Zero out all descriptors. */
8396 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8397
8398 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8399 if (tg3_flag(tp, 5780_CLASS) &&
8400 tp->dev->mtu > ETH_DATA_LEN)
8401 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8402 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8403
8404 /* Initialize invariants of the rings, we only set this
8405 * stuff once. This works because the card does not
8406 * write into the rx buffer posting rings.
8407 */
8408 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8409 struct tg3_rx_buffer_desc *rxd;
8410
8411 rxd = &tpr->rx_std[i];
8412 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8413 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8414 rxd->opaque = (RXD_OPAQUE_RING_STD |
8415 (i << RXD_OPAQUE_INDEX_SHIFT));
8416 }
8417
8418 /* Now allocate fresh SKBs for each rx ring. */
8419 for (i = 0; i < tp->rx_pending; i++) {
8420 unsigned int frag_size;
8421
8422 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8423 &frag_size) < 0) {
8424 netdev_warn(tp->dev,
8425 "Using a smaller RX standard ring. Only "
8426 "%d out of %d buffers were allocated "
8427 "successfully\n", i, tp->rx_pending);
8428 if (i == 0)
8429 goto initfail;
8430 tp->rx_pending = i;
8431 break;
8432 }
8433 }
8434
8435 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8436 goto done;
8437
8438 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8439
8440 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8441 goto done;
8442
8443 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8444 struct tg3_rx_buffer_desc *rxd;
8445
8446 rxd = &tpr->rx_jmb[i].std;
8447 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8448 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8449 RXD_FLAG_JUMBO;
8450 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8451 (i << RXD_OPAQUE_INDEX_SHIFT));
8452 }
8453
8454 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8455 unsigned int frag_size;
8456
8457 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8458 &frag_size) < 0) {
8459 netdev_warn(tp->dev,
8460 "Using a smaller RX jumbo ring. Only %d "
8461 "out of %d buffers were allocated "
8462 "successfully\n", i, tp->rx_jumbo_pending);
8463 if (i == 0)
8464 goto initfail;
8465 tp->rx_jumbo_pending = i;
8466 break;
8467 }
8468 }
8469
8470 done:
8471 return 0;
8472
8473 initfail:
8474 tg3_rx_prodring_free(tp, tpr);
8475 return -ENOMEM;
8476 }
8477
tg3_rx_prodring_fini(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8478 static void tg3_rx_prodring_fini(struct tg3 *tp,
8479 struct tg3_rx_prodring_set *tpr)
8480 {
8481 kfree(tpr->rx_std_buffers);
8482 tpr->rx_std_buffers = NULL;
8483 kfree(tpr->rx_jmb_buffers);
8484 tpr->rx_jmb_buffers = NULL;
8485 if (tpr->rx_std) {
8486 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8487 tpr->rx_std, tpr->rx_std_mapping);
8488 tpr->rx_std = NULL;
8489 }
8490 if (tpr->rx_jmb) {
8491 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8492 tpr->rx_jmb, tpr->rx_jmb_mapping);
8493 tpr->rx_jmb = NULL;
8494 }
8495 }
8496
tg3_rx_prodring_init(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8497 static int tg3_rx_prodring_init(struct tg3 *tp,
8498 struct tg3_rx_prodring_set *tpr)
8499 {
8500 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8501 GFP_KERNEL);
8502 if (!tpr->rx_std_buffers)
8503 return -ENOMEM;
8504
8505 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8506 TG3_RX_STD_RING_BYTES(tp),
8507 &tpr->rx_std_mapping,
8508 GFP_KERNEL);
8509 if (!tpr->rx_std)
8510 goto err_out;
8511
8512 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8513 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8514 GFP_KERNEL);
8515 if (!tpr->rx_jmb_buffers)
8516 goto err_out;
8517
8518 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8519 TG3_RX_JMB_RING_BYTES(tp),
8520 &tpr->rx_jmb_mapping,
8521 GFP_KERNEL);
8522 if (!tpr->rx_jmb)
8523 goto err_out;
8524 }
8525
8526 return 0;
8527
8528 err_out:
8529 tg3_rx_prodring_fini(tp, tpr);
8530 return -ENOMEM;
8531 }
8532
8533 /* Free up pending packets in all rx/tx rings.
8534 *
8535 * The chip has been shut down and the driver detached from
8536 * the networking, so no interrupts or new tx packets will
8537 * end up in the driver. tp->{tx,}lock is not held and we are not
8538 * in an interrupt context and thus may sleep.
8539 */
tg3_free_rings(struct tg3 * tp)8540 static void tg3_free_rings(struct tg3 *tp)
8541 {
8542 int i, j;
8543
8544 for (j = 0; j < tp->irq_cnt; j++) {
8545 struct tg3_napi *tnapi = &tp->napi[j];
8546
8547 tg3_rx_prodring_free(tp, &tnapi->prodring);
8548
8549 if (!tnapi->tx_buffers)
8550 continue;
8551
8552 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8553 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8554
8555 if (!skb)
8556 continue;
8557
8558 tg3_tx_skb_unmap(tnapi, i,
8559 skb_shinfo(skb)->nr_frags - 1);
8560
8561 dev_consume_skb_any(skb);
8562 }
8563 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8564 }
8565 }
8566
8567 /* Initialize tx/rx rings for packet processing.
8568 *
8569 * The chip has been shut down and the driver detached from
8570 * the networking, so no interrupts or new tx packets will
8571 * end up in the driver. tp->{tx,}lock are held and thus
8572 * we may not sleep.
8573 */
tg3_init_rings(struct tg3 * tp)8574 static int tg3_init_rings(struct tg3 *tp)
8575 {
8576 int i;
8577
8578 /* Free up all the SKBs. */
8579 tg3_free_rings(tp);
8580
8581 for (i = 0; i < tp->irq_cnt; i++) {
8582 struct tg3_napi *tnapi = &tp->napi[i];
8583
8584 tnapi->last_tag = 0;
8585 tnapi->last_irq_tag = 0;
8586 tnapi->hw_status->status = 0;
8587 tnapi->hw_status->status_tag = 0;
8588 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8589
8590 tnapi->tx_prod = 0;
8591 tnapi->tx_cons = 0;
8592 if (tnapi->tx_ring)
8593 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8594
8595 tnapi->rx_rcb_ptr = 0;
8596 if (tnapi->rx_rcb)
8597 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8598
8599 if (tnapi->prodring.rx_std &&
8600 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8601 tg3_free_rings(tp);
8602 return -ENOMEM;
8603 }
8604 }
8605
8606 return 0;
8607 }
8608
tg3_mem_tx_release(struct tg3 * tp)8609 static void tg3_mem_tx_release(struct tg3 *tp)
8610 {
8611 int i;
8612
8613 for (i = 0; i < tp->irq_max; i++) {
8614 struct tg3_napi *tnapi = &tp->napi[i];
8615
8616 if (tnapi->tx_ring) {
8617 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8618 tnapi->tx_ring, tnapi->tx_desc_mapping);
8619 tnapi->tx_ring = NULL;
8620 }
8621
8622 kfree(tnapi->tx_buffers);
8623 tnapi->tx_buffers = NULL;
8624 }
8625 }
8626
tg3_mem_tx_acquire(struct tg3 * tp)8627 static int tg3_mem_tx_acquire(struct tg3 *tp)
8628 {
8629 int i;
8630 struct tg3_napi *tnapi = &tp->napi[0];
8631
8632 /* If multivector TSS is enabled, vector 0 does not handle
8633 * tx interrupts. Don't allocate any resources for it.
8634 */
8635 if (tg3_flag(tp, ENABLE_TSS))
8636 tnapi++;
8637
8638 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8639 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8640 sizeof(struct tg3_tx_ring_info),
8641 GFP_KERNEL);
8642 if (!tnapi->tx_buffers)
8643 goto err_out;
8644
8645 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8646 TG3_TX_RING_BYTES,
8647 &tnapi->tx_desc_mapping,
8648 GFP_KERNEL);
8649 if (!tnapi->tx_ring)
8650 goto err_out;
8651 }
8652
8653 return 0;
8654
8655 err_out:
8656 tg3_mem_tx_release(tp);
8657 return -ENOMEM;
8658 }
8659
tg3_mem_rx_release(struct tg3 * tp)8660 static void tg3_mem_rx_release(struct tg3 *tp)
8661 {
8662 int i;
8663
8664 for (i = 0; i < tp->irq_max; i++) {
8665 struct tg3_napi *tnapi = &tp->napi[i];
8666
8667 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8668
8669 if (!tnapi->rx_rcb)
8670 continue;
8671
8672 dma_free_coherent(&tp->pdev->dev,
8673 TG3_RX_RCB_RING_BYTES(tp),
8674 tnapi->rx_rcb,
8675 tnapi->rx_rcb_mapping);
8676 tnapi->rx_rcb = NULL;
8677 }
8678 }
8679
tg3_mem_rx_acquire(struct tg3 * tp)8680 static int tg3_mem_rx_acquire(struct tg3 *tp)
8681 {
8682 unsigned int i, limit;
8683
8684 limit = tp->rxq_cnt;
8685
8686 /* If RSS is enabled, we need a (dummy) producer ring
8687 * set on vector zero. This is the true hw prodring.
8688 */
8689 if (tg3_flag(tp, ENABLE_RSS))
8690 limit++;
8691
8692 for (i = 0; i < limit; i++) {
8693 struct tg3_napi *tnapi = &tp->napi[i];
8694
8695 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8696 goto err_out;
8697
8698 /* If multivector RSS is enabled, vector 0
8699 * does not handle rx or tx interrupts.
8700 * Don't allocate any resources for it.
8701 */
8702 if (!i && tg3_flag(tp, ENABLE_RSS))
8703 continue;
8704
8705 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8706 TG3_RX_RCB_RING_BYTES(tp),
8707 &tnapi->rx_rcb_mapping,
8708 GFP_KERNEL);
8709 if (!tnapi->rx_rcb)
8710 goto err_out;
8711 }
8712
8713 return 0;
8714
8715 err_out:
8716 tg3_mem_rx_release(tp);
8717 return -ENOMEM;
8718 }
8719
8720 /*
8721 * Must not be invoked with interrupt sources disabled and
8722 * the hardware shutdown down.
8723 */
tg3_free_consistent(struct tg3 * tp)8724 static void tg3_free_consistent(struct tg3 *tp)
8725 {
8726 int i;
8727
8728 for (i = 0; i < tp->irq_cnt; i++) {
8729 struct tg3_napi *tnapi = &tp->napi[i];
8730
8731 if (tnapi->hw_status) {
8732 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8733 tnapi->hw_status,
8734 tnapi->status_mapping);
8735 tnapi->hw_status = NULL;
8736 }
8737 }
8738
8739 tg3_mem_rx_release(tp);
8740 tg3_mem_tx_release(tp);
8741
8742 /* tp->hw_stats can be referenced safely:
8743 * 1. under rtnl_lock
8744 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8745 */
8746 if (tp->hw_stats) {
8747 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8748 tp->hw_stats, tp->stats_mapping);
8749 tp->hw_stats = NULL;
8750 }
8751 }
8752
8753 /*
8754 * Must not be invoked with interrupt sources disabled and
8755 * the hardware shutdown down. Can sleep.
8756 */
tg3_alloc_consistent(struct tg3 * tp)8757 static int tg3_alloc_consistent(struct tg3 *tp)
8758 {
8759 int i;
8760
8761 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8762 sizeof(struct tg3_hw_stats),
8763 &tp->stats_mapping, GFP_KERNEL);
8764 if (!tp->hw_stats)
8765 goto err_out;
8766
8767 for (i = 0; i < tp->irq_cnt; i++) {
8768 struct tg3_napi *tnapi = &tp->napi[i];
8769 struct tg3_hw_status *sblk;
8770
8771 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8772 TG3_HW_STATUS_SIZE,
8773 &tnapi->status_mapping,
8774 GFP_KERNEL);
8775 if (!tnapi->hw_status)
8776 goto err_out;
8777
8778 sblk = tnapi->hw_status;
8779
8780 if (tg3_flag(tp, ENABLE_RSS)) {
8781 u16 *prodptr = NULL;
8782
8783 /*
8784 * When RSS is enabled, the status block format changes
8785 * slightly. The "rx_jumbo_consumer", "reserved",
8786 * and "rx_mini_consumer" members get mapped to the
8787 * other three rx return ring producer indexes.
8788 */
8789 switch (i) {
8790 case 1:
8791 prodptr = &sblk->idx[0].rx_producer;
8792 break;
8793 case 2:
8794 prodptr = &sblk->rx_jumbo_consumer;
8795 break;
8796 case 3:
8797 prodptr = &sblk->reserved;
8798 break;
8799 case 4:
8800 prodptr = &sblk->rx_mini_consumer;
8801 break;
8802 }
8803 tnapi->rx_rcb_prod_idx = prodptr;
8804 } else {
8805 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8806 }
8807 }
8808
8809 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8810 goto err_out;
8811
8812 return 0;
8813
8814 err_out:
8815 tg3_free_consistent(tp);
8816 return -ENOMEM;
8817 }
8818
8819 #define MAX_WAIT_CNT 1000
8820
8821 /* To stop a block, clear the enable bit and poll till it
8822 * clears. tp->lock is held.
8823 */
tg3_stop_block(struct tg3 * tp,unsigned long ofs,u32 enable_bit,bool silent)8824 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8825 {
8826 unsigned int i;
8827 u32 val;
8828
8829 if (tg3_flag(tp, 5705_PLUS)) {
8830 switch (ofs) {
8831 case RCVLSC_MODE:
8832 case DMAC_MODE:
8833 case MBFREE_MODE:
8834 case BUFMGR_MODE:
8835 case MEMARB_MODE:
8836 /* We can't enable/disable these bits of the
8837 * 5705/5750, just say success.
8838 */
8839 return 0;
8840
8841 default:
8842 break;
8843 }
8844 }
8845
8846 val = tr32(ofs);
8847 val &= ~enable_bit;
8848 tw32_f(ofs, val);
8849
8850 for (i = 0; i < MAX_WAIT_CNT; i++) {
8851 if (pci_channel_offline(tp->pdev)) {
8852 dev_err(&tp->pdev->dev,
8853 "tg3_stop_block device offline, "
8854 "ofs=%lx enable_bit=%x\n",
8855 ofs, enable_bit);
8856 return -ENODEV;
8857 }
8858
8859 udelay(100);
8860 val = tr32(ofs);
8861 if ((val & enable_bit) == 0)
8862 break;
8863 }
8864
8865 if (i == MAX_WAIT_CNT && !silent) {
8866 dev_err(&tp->pdev->dev,
8867 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8868 ofs, enable_bit);
8869 return -ENODEV;
8870 }
8871
8872 return 0;
8873 }
8874
8875 /* tp->lock is held. */
tg3_abort_hw(struct tg3 * tp,bool silent)8876 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8877 {
8878 int i, err;
8879
8880 tg3_disable_ints(tp);
8881
8882 if (pci_channel_offline(tp->pdev)) {
8883 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8884 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8885 err = -ENODEV;
8886 goto err_no_dev;
8887 }
8888
8889 tp->rx_mode &= ~RX_MODE_ENABLE;
8890 tw32_f(MAC_RX_MODE, tp->rx_mode);
8891 udelay(10);
8892
8893 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8894 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8895 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8896 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8897 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8898 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8899
8900 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8901 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8902 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8903 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8904 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8905 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8906 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8907
8908 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8909 tw32_f(MAC_MODE, tp->mac_mode);
8910 udelay(40);
8911
8912 tp->tx_mode &= ~TX_MODE_ENABLE;
8913 tw32_f(MAC_TX_MODE, tp->tx_mode);
8914
8915 for (i = 0; i < MAX_WAIT_CNT; i++) {
8916 udelay(100);
8917 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8918 break;
8919 }
8920 if (i >= MAX_WAIT_CNT) {
8921 dev_err(&tp->pdev->dev,
8922 "%s timed out, TX_MODE_ENABLE will not clear "
8923 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8924 err |= -ENODEV;
8925 }
8926
8927 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8928 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8929 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8930
8931 tw32(FTQ_RESET, 0xffffffff);
8932 tw32(FTQ_RESET, 0x00000000);
8933
8934 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8935 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8936
8937 err_no_dev:
8938 for (i = 0; i < tp->irq_cnt; i++) {
8939 struct tg3_napi *tnapi = &tp->napi[i];
8940 if (tnapi->hw_status)
8941 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8942 }
8943
8944 return err;
8945 }
8946
8947 /* Save PCI command register before chip reset */
tg3_save_pci_state(struct tg3 * tp)8948 static void tg3_save_pci_state(struct tg3 *tp)
8949 {
8950 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8951 }
8952
8953 /* Restore PCI state after chip reset */
tg3_restore_pci_state(struct tg3 * tp)8954 static void tg3_restore_pci_state(struct tg3 *tp)
8955 {
8956 u32 val;
8957
8958 /* Re-enable indirect register accesses. */
8959 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8960 tp->misc_host_ctrl);
8961
8962 /* Set MAX PCI retry to zero. */
8963 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8964 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8965 tg3_flag(tp, PCIX_MODE))
8966 val |= PCISTATE_RETRY_SAME_DMA;
8967 /* Allow reads and writes to the APE register and memory space. */
8968 if (tg3_flag(tp, ENABLE_APE))
8969 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8970 PCISTATE_ALLOW_APE_SHMEM_WR |
8971 PCISTATE_ALLOW_APE_PSPACE_WR;
8972 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8973
8974 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8975
8976 if (!tg3_flag(tp, PCI_EXPRESS)) {
8977 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8978 tp->pci_cacheline_sz);
8979 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8980 tp->pci_lat_timer);
8981 }
8982
8983 /* Make sure PCI-X relaxed ordering bit is clear. */
8984 if (tg3_flag(tp, PCIX_MODE)) {
8985 u16 pcix_cmd;
8986
8987 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8988 &pcix_cmd);
8989 pcix_cmd &= ~PCI_X_CMD_ERO;
8990 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8991 pcix_cmd);
8992 }
8993
8994 if (tg3_flag(tp, 5780_CLASS)) {
8995
8996 /* Chip reset on 5780 will reset MSI enable bit,
8997 * so need to restore it.
8998 */
8999 if (tg3_flag(tp, USING_MSI)) {
9000 u16 ctrl;
9001
9002 pci_read_config_word(tp->pdev,
9003 tp->msi_cap + PCI_MSI_FLAGS,
9004 &ctrl);
9005 pci_write_config_word(tp->pdev,
9006 tp->msi_cap + PCI_MSI_FLAGS,
9007 ctrl | PCI_MSI_FLAGS_ENABLE);
9008 val = tr32(MSGINT_MODE);
9009 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9010 }
9011 }
9012 }
9013
tg3_override_clk(struct tg3 * tp)9014 static void tg3_override_clk(struct tg3 *tp)
9015 {
9016 u32 val;
9017
9018 switch (tg3_asic_rev(tp)) {
9019 case ASIC_REV_5717:
9020 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9021 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9022 TG3_CPMU_MAC_ORIDE_ENABLE);
9023 break;
9024
9025 case ASIC_REV_5719:
9026 case ASIC_REV_5720:
9027 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9028 break;
9029
9030 default:
9031 return;
9032 }
9033 }
9034
tg3_restore_clk(struct tg3 * tp)9035 static void tg3_restore_clk(struct tg3 *tp)
9036 {
9037 u32 val;
9038
9039 switch (tg3_asic_rev(tp)) {
9040 case ASIC_REV_5717:
9041 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9042 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9043 val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9044 break;
9045
9046 case ASIC_REV_5719:
9047 case ASIC_REV_5720:
9048 val = tr32(TG3_CPMU_CLCK_ORIDE);
9049 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9050 break;
9051
9052 default:
9053 return;
9054 }
9055 }
9056
9057 /* tp->lock is held. */
tg3_chip_reset(struct tg3 * tp)9058 static int tg3_chip_reset(struct tg3 *tp)
9059 __releases(tp->lock)
9060 __acquires(tp->lock)
9061 {
9062 u32 val;
9063 void (*write_op)(struct tg3 *, u32, u32);
9064 int i, err;
9065
9066 if (!pci_device_is_present(tp->pdev))
9067 return -ENODEV;
9068
9069 tg3_nvram_lock(tp);
9070
9071 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9072
9073 /* No matching tg3_nvram_unlock() after this because
9074 * chip reset below will undo the nvram lock.
9075 */
9076 tp->nvram_lock_cnt = 0;
9077
9078 /* GRC_MISC_CFG core clock reset will clear the memory
9079 * enable bit in PCI register 4 and the MSI enable bit
9080 * on some chips, so we save relevant registers here.
9081 */
9082 tg3_save_pci_state(tp);
9083
9084 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9085 tg3_flag(tp, 5755_PLUS))
9086 tw32(GRC_FASTBOOT_PC, 0);
9087
9088 /*
9089 * We must avoid the readl() that normally takes place.
9090 * It locks machines, causes machine checks, and other
9091 * fun things. So, temporarily disable the 5701
9092 * hardware workaround, while we do the reset.
9093 */
9094 write_op = tp->write32;
9095 if (write_op == tg3_write_flush_reg32)
9096 tp->write32 = tg3_write32;
9097
9098 /* Prevent the irq handler from reading or writing PCI registers
9099 * during chip reset when the memory enable bit in the PCI command
9100 * register may be cleared. The chip does not generate interrupt
9101 * at this time, but the irq handler may still be called due to irq
9102 * sharing or irqpoll.
9103 */
9104 tg3_flag_set(tp, CHIP_RESETTING);
9105 for (i = 0; i < tp->irq_cnt; i++) {
9106 struct tg3_napi *tnapi = &tp->napi[i];
9107 if (tnapi->hw_status) {
9108 tnapi->hw_status->status = 0;
9109 tnapi->hw_status->status_tag = 0;
9110 }
9111 tnapi->last_tag = 0;
9112 tnapi->last_irq_tag = 0;
9113 }
9114 smp_mb();
9115
9116 tg3_full_unlock(tp);
9117
9118 for (i = 0; i < tp->irq_cnt; i++)
9119 synchronize_irq(tp->napi[i].irq_vec);
9120
9121 tg3_full_lock(tp, 0);
9122
9123 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9124 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9125 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9126 }
9127
9128 /* do the reset */
9129 val = GRC_MISC_CFG_CORECLK_RESET;
9130
9131 if (tg3_flag(tp, PCI_EXPRESS)) {
9132 /* Force PCIe 1.0a mode */
9133 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9134 !tg3_flag(tp, 57765_PLUS) &&
9135 tr32(TG3_PCIE_PHY_TSTCTL) ==
9136 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9137 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9138
9139 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9140 tw32(GRC_MISC_CFG, (1 << 29));
9141 val |= (1 << 29);
9142 }
9143 }
9144
9145 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9146 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9147 tw32(GRC_VCPU_EXT_CTRL,
9148 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9149 }
9150
9151 /* Set the clock to the highest frequency to avoid timeouts. With link
9152 * aware mode, the clock speed could be slow and bootcode does not
9153 * complete within the expected time. Override the clock to allow the
9154 * bootcode to finish sooner and then restore it.
9155 */
9156 tg3_override_clk(tp);
9157
9158 /* Manage gphy power for all CPMU absent PCIe devices. */
9159 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9160 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9161
9162 tw32(GRC_MISC_CFG, val);
9163
9164 /* restore 5701 hardware bug workaround write method */
9165 tp->write32 = write_op;
9166
9167 /* Unfortunately, we have to delay before the PCI read back.
9168 * Some 575X chips even will not respond to a PCI cfg access
9169 * when the reset command is given to the chip.
9170 *
9171 * How do these hardware designers expect things to work
9172 * properly if the PCI write is posted for a long period
9173 * of time? It is always necessary to have some method by
9174 * which a register read back can occur to push the write
9175 * out which does the reset.
9176 *
9177 * For most tg3 variants the trick below was working.
9178 * Ho hum...
9179 */
9180 udelay(120);
9181
9182 /* Flush PCI posted writes. The normal MMIO registers
9183 * are inaccessible at this time so this is the only
9184 * way to make this reliably (actually, this is no longer
9185 * the case, see above). I tried to use indirect
9186 * register read/write but this upset some 5701 variants.
9187 */
9188 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9189
9190 udelay(120);
9191
9192 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9193 u16 val16;
9194
9195 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9196 int j;
9197 u32 cfg_val;
9198
9199 /* Wait for link training to complete. */
9200 for (j = 0; j < 5000; j++)
9201 udelay(100);
9202
9203 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9204 pci_write_config_dword(tp->pdev, 0xc4,
9205 cfg_val | (1 << 15));
9206 }
9207
9208 /* Clear the "no snoop" and "relaxed ordering" bits. */
9209 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9210 /*
9211 * Older PCIe devices only support the 128 byte
9212 * MPS setting. Enforce the restriction.
9213 */
9214 if (!tg3_flag(tp, CPMU_PRESENT))
9215 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9216 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9217
9218 /* Clear error status */
9219 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9220 PCI_EXP_DEVSTA_CED |
9221 PCI_EXP_DEVSTA_NFED |
9222 PCI_EXP_DEVSTA_FED |
9223 PCI_EXP_DEVSTA_URD);
9224 }
9225
9226 tg3_restore_pci_state(tp);
9227
9228 tg3_flag_clear(tp, CHIP_RESETTING);
9229 tg3_flag_clear(tp, ERROR_PROCESSED);
9230
9231 val = 0;
9232 if (tg3_flag(tp, 5780_CLASS))
9233 val = tr32(MEMARB_MODE);
9234 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9235
9236 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9237 tg3_stop_fw(tp);
9238 tw32(0x5000, 0x400);
9239 }
9240
9241 if (tg3_flag(tp, IS_SSB_CORE)) {
9242 /*
9243 * BCM4785: In order to avoid repercussions from using
9244 * potentially defective internal ROM, stop the Rx RISC CPU,
9245 * which is not required.
9246 */
9247 tg3_stop_fw(tp);
9248 tg3_halt_cpu(tp, RX_CPU_BASE);
9249 }
9250
9251 err = tg3_poll_fw(tp);
9252 if (err)
9253 return err;
9254
9255 tw32(GRC_MODE, tp->grc_mode);
9256
9257 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9258 val = tr32(0xc4);
9259
9260 tw32(0xc4, val | (1 << 15));
9261 }
9262
9263 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9264 tg3_asic_rev(tp) == ASIC_REV_5705) {
9265 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9266 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9267 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9268 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9269 }
9270
9271 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9272 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9273 val = tp->mac_mode;
9274 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9275 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9276 val = tp->mac_mode;
9277 } else
9278 val = 0;
9279
9280 tw32_f(MAC_MODE, val);
9281 udelay(40);
9282
9283 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9284
9285 tg3_mdio_start(tp);
9286
9287 if (tg3_flag(tp, PCI_EXPRESS) &&
9288 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9289 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9290 !tg3_flag(tp, 57765_PLUS)) {
9291 val = tr32(0x7c00);
9292
9293 tw32(0x7c00, val | (1 << 25));
9294 }
9295
9296 tg3_restore_clk(tp);
9297
9298 /* Increase the core clock speed to fix tx timeout issue for 5762
9299 * with 100Mbps link speed.
9300 */
9301 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9302 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9303 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9304 TG3_CPMU_MAC_ORIDE_ENABLE);
9305 }
9306
9307 /* Reprobe ASF enable state. */
9308 tg3_flag_clear(tp, ENABLE_ASF);
9309 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9310 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9311
9312 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9313 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9314 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9315 u32 nic_cfg;
9316
9317 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9318 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9319 tg3_flag_set(tp, ENABLE_ASF);
9320 tp->last_event_jiffies = jiffies;
9321 if (tg3_flag(tp, 5750_PLUS))
9322 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9323
9324 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9325 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9326 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9327 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9328 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9329 }
9330 }
9331
9332 return 0;
9333 }
9334
9335 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9336 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9337 static void __tg3_set_rx_mode(struct net_device *);
9338
9339 /* tp->lock is held. */
tg3_halt(struct tg3 * tp,int kind,bool silent)9340 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9341 {
9342 int err;
9343
9344 tg3_stop_fw(tp);
9345
9346 tg3_write_sig_pre_reset(tp, kind);
9347
9348 tg3_abort_hw(tp, silent);
9349 err = tg3_chip_reset(tp);
9350
9351 __tg3_set_mac_addr(tp, false);
9352
9353 tg3_write_sig_legacy(tp, kind);
9354 tg3_write_sig_post_reset(tp, kind);
9355
9356 if (tp->hw_stats) {
9357 /* Save the stats across chip resets... */
9358 tg3_get_nstats(tp, &tp->net_stats_prev);
9359 tg3_get_estats(tp, &tp->estats_prev);
9360
9361 /* And make sure the next sample is new data */
9362 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9363 }
9364
9365 return err;
9366 }
9367
tg3_set_mac_addr(struct net_device * dev,void * p)9368 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9369 {
9370 struct tg3 *tp = netdev_priv(dev);
9371 struct sockaddr *addr = p;
9372 int err = 0;
9373 bool skip_mac_1 = false;
9374
9375 if (!is_valid_ether_addr(addr->sa_data))
9376 return -EADDRNOTAVAIL;
9377
9378 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9379
9380 if (!netif_running(dev))
9381 return 0;
9382
9383 if (tg3_flag(tp, ENABLE_ASF)) {
9384 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9385
9386 addr0_high = tr32(MAC_ADDR_0_HIGH);
9387 addr0_low = tr32(MAC_ADDR_0_LOW);
9388 addr1_high = tr32(MAC_ADDR_1_HIGH);
9389 addr1_low = tr32(MAC_ADDR_1_LOW);
9390
9391 /* Skip MAC addr 1 if ASF is using it. */
9392 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9393 !(addr1_high == 0 && addr1_low == 0))
9394 skip_mac_1 = true;
9395 }
9396 spin_lock_bh(&tp->lock);
9397 __tg3_set_mac_addr(tp, skip_mac_1);
9398 __tg3_set_rx_mode(dev);
9399 spin_unlock_bh(&tp->lock);
9400
9401 return err;
9402 }
9403
9404 /* tp->lock is held. */
tg3_set_bdinfo(struct tg3 * tp,u32 bdinfo_addr,dma_addr_t mapping,u32 maxlen_flags,u32 nic_addr)9405 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9406 dma_addr_t mapping, u32 maxlen_flags,
9407 u32 nic_addr)
9408 {
9409 tg3_write_mem(tp,
9410 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9411 ((u64) mapping >> 32));
9412 tg3_write_mem(tp,
9413 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9414 ((u64) mapping & 0xffffffff));
9415 tg3_write_mem(tp,
9416 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9417 maxlen_flags);
9418
9419 if (!tg3_flag(tp, 5705_PLUS))
9420 tg3_write_mem(tp,
9421 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9422 nic_addr);
9423 }
9424
9425
tg3_coal_tx_init(struct tg3 * tp,struct ethtool_coalesce * ec)9426 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9427 {
9428 int i = 0;
9429
9430 if (!tg3_flag(tp, ENABLE_TSS)) {
9431 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9432 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9433 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9434 } else {
9435 tw32(HOSTCC_TXCOL_TICKS, 0);
9436 tw32(HOSTCC_TXMAX_FRAMES, 0);
9437 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9438
9439 for (; i < tp->txq_cnt; i++) {
9440 u32 reg;
9441
9442 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9443 tw32(reg, ec->tx_coalesce_usecs);
9444 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9445 tw32(reg, ec->tx_max_coalesced_frames);
9446 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9447 tw32(reg, ec->tx_max_coalesced_frames_irq);
9448 }
9449 }
9450
9451 for (; i < tp->irq_max - 1; i++) {
9452 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9453 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9454 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9455 }
9456 }
9457
tg3_coal_rx_init(struct tg3 * tp,struct ethtool_coalesce * ec)9458 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9459 {
9460 int i = 0;
9461 u32 limit = tp->rxq_cnt;
9462
9463 if (!tg3_flag(tp, ENABLE_RSS)) {
9464 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9465 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9466 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9467 limit--;
9468 } else {
9469 tw32(HOSTCC_RXCOL_TICKS, 0);
9470 tw32(HOSTCC_RXMAX_FRAMES, 0);
9471 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9472 }
9473
9474 for (; i < limit; i++) {
9475 u32 reg;
9476
9477 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9478 tw32(reg, ec->rx_coalesce_usecs);
9479 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9480 tw32(reg, ec->rx_max_coalesced_frames);
9481 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9482 tw32(reg, ec->rx_max_coalesced_frames_irq);
9483 }
9484
9485 for (; i < tp->irq_max - 1; i++) {
9486 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9487 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9488 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9489 }
9490 }
9491
__tg3_set_coalesce(struct tg3 * tp,struct ethtool_coalesce * ec)9492 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9493 {
9494 tg3_coal_tx_init(tp, ec);
9495 tg3_coal_rx_init(tp, ec);
9496
9497 if (!tg3_flag(tp, 5705_PLUS)) {
9498 u32 val = ec->stats_block_coalesce_usecs;
9499
9500 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9501 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9502
9503 if (!tp->link_up)
9504 val = 0;
9505
9506 tw32(HOSTCC_STAT_COAL_TICKS, val);
9507 }
9508 }
9509
9510 /* tp->lock is held. */
tg3_tx_rcbs_disable(struct tg3 * tp)9511 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9512 {
9513 u32 txrcb, limit;
9514
9515 /* Disable all transmit rings but the first. */
9516 if (!tg3_flag(tp, 5705_PLUS))
9517 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9518 else if (tg3_flag(tp, 5717_PLUS))
9519 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9520 else if (tg3_flag(tp, 57765_CLASS) ||
9521 tg3_asic_rev(tp) == ASIC_REV_5762)
9522 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9523 else
9524 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9525
9526 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9527 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9528 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9529 BDINFO_FLAGS_DISABLED);
9530 }
9531
9532 /* tp->lock is held. */
tg3_tx_rcbs_init(struct tg3 * tp)9533 static void tg3_tx_rcbs_init(struct tg3 *tp)
9534 {
9535 int i = 0;
9536 u32 txrcb = NIC_SRAM_SEND_RCB;
9537
9538 if (tg3_flag(tp, ENABLE_TSS))
9539 i++;
9540
9541 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9542 struct tg3_napi *tnapi = &tp->napi[i];
9543
9544 if (!tnapi->tx_ring)
9545 continue;
9546
9547 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9548 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9549 NIC_SRAM_TX_BUFFER_DESC);
9550 }
9551 }
9552
9553 /* tp->lock is held. */
tg3_rx_ret_rcbs_disable(struct tg3 * tp)9554 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9555 {
9556 u32 rxrcb, limit;
9557
9558 /* Disable all receive return rings but the first. */
9559 if (tg3_flag(tp, 5717_PLUS))
9560 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9561 else if (!tg3_flag(tp, 5705_PLUS))
9562 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9563 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9564 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9565 tg3_flag(tp, 57765_CLASS))
9566 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9567 else
9568 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9569
9570 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9571 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9572 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9573 BDINFO_FLAGS_DISABLED);
9574 }
9575
9576 /* tp->lock is held. */
tg3_rx_ret_rcbs_init(struct tg3 * tp)9577 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9578 {
9579 int i = 0;
9580 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9581
9582 if (tg3_flag(tp, ENABLE_RSS))
9583 i++;
9584
9585 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9586 struct tg3_napi *tnapi = &tp->napi[i];
9587
9588 if (!tnapi->rx_rcb)
9589 continue;
9590
9591 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9592 (tp->rx_ret_ring_mask + 1) <<
9593 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9594 }
9595 }
9596
9597 /* tp->lock is held. */
tg3_rings_reset(struct tg3 * tp)9598 static void tg3_rings_reset(struct tg3 *tp)
9599 {
9600 int i;
9601 u32 stblk;
9602 struct tg3_napi *tnapi = &tp->napi[0];
9603
9604 tg3_tx_rcbs_disable(tp);
9605
9606 tg3_rx_ret_rcbs_disable(tp);
9607
9608 /* Disable interrupts */
9609 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9610 tp->napi[0].chk_msi_cnt = 0;
9611 tp->napi[0].last_rx_cons = 0;
9612 tp->napi[0].last_tx_cons = 0;
9613
9614 /* Zero mailbox registers. */
9615 if (tg3_flag(tp, SUPPORT_MSIX)) {
9616 for (i = 1; i < tp->irq_max; i++) {
9617 tp->napi[i].tx_prod = 0;
9618 tp->napi[i].tx_cons = 0;
9619 if (tg3_flag(tp, ENABLE_TSS))
9620 tw32_mailbox(tp->napi[i].prodmbox, 0);
9621 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9622 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9623 tp->napi[i].chk_msi_cnt = 0;
9624 tp->napi[i].last_rx_cons = 0;
9625 tp->napi[i].last_tx_cons = 0;
9626 }
9627 if (!tg3_flag(tp, ENABLE_TSS))
9628 tw32_mailbox(tp->napi[0].prodmbox, 0);
9629 } else {
9630 tp->napi[0].tx_prod = 0;
9631 tp->napi[0].tx_cons = 0;
9632 tw32_mailbox(tp->napi[0].prodmbox, 0);
9633 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9634 }
9635
9636 /* Make sure the NIC-based send BD rings are disabled. */
9637 if (!tg3_flag(tp, 5705_PLUS)) {
9638 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9639 for (i = 0; i < 16; i++)
9640 tw32_tx_mbox(mbox + i * 8, 0);
9641 }
9642
9643 /* Clear status block in ram. */
9644 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9645
9646 /* Set status block DMA address */
9647 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9648 ((u64) tnapi->status_mapping >> 32));
9649 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9650 ((u64) tnapi->status_mapping & 0xffffffff));
9651
9652 stblk = HOSTCC_STATBLCK_RING1;
9653
9654 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9655 u64 mapping = (u64)tnapi->status_mapping;
9656 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9657 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9658 stblk += 8;
9659
9660 /* Clear status block in ram. */
9661 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9662 }
9663
9664 tg3_tx_rcbs_init(tp);
9665 tg3_rx_ret_rcbs_init(tp);
9666 }
9667
tg3_setup_rxbd_thresholds(struct tg3 * tp)9668 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9669 {
9670 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9671
9672 if (!tg3_flag(tp, 5750_PLUS) ||
9673 tg3_flag(tp, 5780_CLASS) ||
9674 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9675 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9676 tg3_flag(tp, 57765_PLUS))
9677 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9678 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9679 tg3_asic_rev(tp) == ASIC_REV_5787)
9680 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9681 else
9682 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9683
9684 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9685 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9686
9687 val = min(nic_rep_thresh, host_rep_thresh);
9688 tw32(RCVBDI_STD_THRESH, val);
9689
9690 if (tg3_flag(tp, 57765_PLUS))
9691 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9692
9693 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9694 return;
9695
9696 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9697
9698 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9699
9700 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9701 tw32(RCVBDI_JUMBO_THRESH, val);
9702
9703 if (tg3_flag(tp, 57765_PLUS))
9704 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9705 }
9706
calc_crc(unsigned char * buf,int len)9707 static inline u32 calc_crc(unsigned char *buf, int len)
9708 {
9709 u32 reg;
9710 u32 tmp;
9711 int j, k;
9712
9713 reg = 0xffffffff;
9714
9715 for (j = 0; j < len; j++) {
9716 reg ^= buf[j];
9717
9718 for (k = 0; k < 8; k++) {
9719 tmp = reg & 0x01;
9720
9721 reg >>= 1;
9722
9723 if (tmp)
9724 reg ^= CRC32_POLY_LE;
9725 }
9726 }
9727
9728 return ~reg;
9729 }
9730
tg3_set_multi(struct tg3 * tp,unsigned int accept_all)9731 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9732 {
9733 /* accept or reject all multicast frames */
9734 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9735 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9736 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9737 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9738 }
9739
__tg3_set_rx_mode(struct net_device * dev)9740 static void __tg3_set_rx_mode(struct net_device *dev)
9741 {
9742 struct tg3 *tp = netdev_priv(dev);
9743 u32 rx_mode;
9744
9745 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9746 RX_MODE_KEEP_VLAN_TAG);
9747
9748 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9749 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9750 * flag clear.
9751 */
9752 if (!tg3_flag(tp, ENABLE_ASF))
9753 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9754 #endif
9755
9756 if (dev->flags & IFF_PROMISC) {
9757 /* Promiscuous mode. */
9758 rx_mode |= RX_MODE_PROMISC;
9759 } else if (dev->flags & IFF_ALLMULTI) {
9760 /* Accept all multicast. */
9761 tg3_set_multi(tp, 1);
9762 } else if (netdev_mc_empty(dev)) {
9763 /* Reject all multicast. */
9764 tg3_set_multi(tp, 0);
9765 } else {
9766 /* Accept one or more multicast(s). */
9767 struct netdev_hw_addr *ha;
9768 u32 mc_filter[4] = { 0, };
9769 u32 regidx;
9770 u32 bit;
9771 u32 crc;
9772
9773 netdev_for_each_mc_addr(ha, dev) {
9774 crc = calc_crc(ha->addr, ETH_ALEN);
9775 bit = ~crc & 0x7f;
9776 regidx = (bit & 0x60) >> 5;
9777 bit &= 0x1f;
9778 mc_filter[regidx] |= (1 << bit);
9779 }
9780
9781 tw32(MAC_HASH_REG_0, mc_filter[0]);
9782 tw32(MAC_HASH_REG_1, mc_filter[1]);
9783 tw32(MAC_HASH_REG_2, mc_filter[2]);
9784 tw32(MAC_HASH_REG_3, mc_filter[3]);
9785 }
9786
9787 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9788 rx_mode |= RX_MODE_PROMISC;
9789 } else if (!(dev->flags & IFF_PROMISC)) {
9790 /* Add all entries into to the mac addr filter list */
9791 int i = 0;
9792 struct netdev_hw_addr *ha;
9793
9794 netdev_for_each_uc_addr(ha, dev) {
9795 __tg3_set_one_mac_addr(tp, ha->addr,
9796 i + TG3_UCAST_ADDR_IDX(tp));
9797 i++;
9798 }
9799 }
9800
9801 if (rx_mode != tp->rx_mode) {
9802 tp->rx_mode = rx_mode;
9803 tw32_f(MAC_RX_MODE, rx_mode);
9804 udelay(10);
9805 }
9806 }
9807
tg3_rss_init_dflt_indir_tbl(struct tg3 * tp,u32 qcnt)9808 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9809 {
9810 int i;
9811
9812 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9813 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9814 }
9815
tg3_rss_check_indir_tbl(struct tg3 * tp)9816 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9817 {
9818 int i;
9819
9820 if (!tg3_flag(tp, SUPPORT_MSIX))
9821 return;
9822
9823 if (tp->rxq_cnt == 1) {
9824 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9825 return;
9826 }
9827
9828 /* Validate table against current IRQ count */
9829 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9830 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9831 break;
9832 }
9833
9834 if (i != TG3_RSS_INDIR_TBL_SIZE)
9835 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9836 }
9837
tg3_rss_write_indir_tbl(struct tg3 * tp)9838 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9839 {
9840 int i = 0;
9841 u32 reg = MAC_RSS_INDIR_TBL_0;
9842
9843 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9844 u32 val = tp->rss_ind_tbl[i];
9845 i++;
9846 for (; i % 8; i++) {
9847 val <<= 4;
9848 val |= tp->rss_ind_tbl[i];
9849 }
9850 tw32(reg, val);
9851 reg += 4;
9852 }
9853 }
9854
tg3_lso_rd_dma_workaround_bit(struct tg3 * tp)9855 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9856 {
9857 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9858 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9859 else
9860 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9861 }
9862
9863 /* tp->lock is held. */
tg3_reset_hw(struct tg3 * tp,bool reset_phy)9864 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9865 {
9866 u32 val, rdmac_mode;
9867 int i, err, limit;
9868 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9869
9870 tg3_disable_ints(tp);
9871
9872 tg3_stop_fw(tp);
9873
9874 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9875
9876 if (tg3_flag(tp, INIT_COMPLETE))
9877 tg3_abort_hw(tp, 1);
9878
9879 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9880 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9881 tg3_phy_pull_config(tp);
9882 tg3_eee_pull_config(tp, NULL);
9883 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9884 }
9885
9886 /* Enable MAC control of LPI */
9887 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9888 tg3_setup_eee(tp);
9889
9890 if (reset_phy)
9891 tg3_phy_reset(tp);
9892
9893 err = tg3_chip_reset(tp);
9894 if (err)
9895 return err;
9896
9897 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9898
9899 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9900 val = tr32(TG3_CPMU_CTRL);
9901 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9902 tw32(TG3_CPMU_CTRL, val);
9903
9904 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9905 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9906 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9907 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9908
9909 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9910 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9911 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9912 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9913
9914 val = tr32(TG3_CPMU_HST_ACC);
9915 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9916 val |= CPMU_HST_ACC_MACCLK_6_25;
9917 tw32(TG3_CPMU_HST_ACC, val);
9918 }
9919
9920 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9921 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9922 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9923 PCIE_PWR_MGMT_L1_THRESH_4MS;
9924 tw32(PCIE_PWR_MGMT_THRESH, val);
9925
9926 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9927 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9928
9929 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9930
9931 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9932 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9933 }
9934
9935 if (tg3_flag(tp, L1PLLPD_EN)) {
9936 u32 grc_mode = tr32(GRC_MODE);
9937
9938 /* Access the lower 1K of PL PCIE block registers. */
9939 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9940 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9941
9942 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9943 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9944 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9945
9946 tw32(GRC_MODE, grc_mode);
9947 }
9948
9949 if (tg3_flag(tp, 57765_CLASS)) {
9950 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9951 u32 grc_mode = tr32(GRC_MODE);
9952
9953 /* Access the lower 1K of PL PCIE block registers. */
9954 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9955 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9956
9957 val = tr32(TG3_PCIE_TLDLPL_PORT +
9958 TG3_PCIE_PL_LO_PHYCTL5);
9959 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9960 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9961
9962 tw32(GRC_MODE, grc_mode);
9963 }
9964
9965 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9966 u32 grc_mode;
9967
9968 /* Fix transmit hangs */
9969 val = tr32(TG3_CPMU_PADRNG_CTL);
9970 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9971 tw32(TG3_CPMU_PADRNG_CTL, val);
9972
9973 grc_mode = tr32(GRC_MODE);
9974
9975 /* Access the lower 1K of DL PCIE block registers. */
9976 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9977 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9978
9979 val = tr32(TG3_PCIE_TLDLPL_PORT +
9980 TG3_PCIE_DL_LO_FTSMAX);
9981 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9982 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9983 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9984
9985 tw32(GRC_MODE, grc_mode);
9986 }
9987
9988 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9989 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9990 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9991 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9992 }
9993
9994 /* This works around an issue with Athlon chipsets on
9995 * B3 tigon3 silicon. This bit has no effect on any
9996 * other revision. But do not set this on PCI Express
9997 * chips and don't even touch the clocks if the CPMU is present.
9998 */
9999 if (!tg3_flag(tp, CPMU_PRESENT)) {
10000 if (!tg3_flag(tp, PCI_EXPRESS))
10001 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
10002 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
10003 }
10004
10005 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
10006 tg3_flag(tp, PCIX_MODE)) {
10007 val = tr32(TG3PCI_PCISTATE);
10008 val |= PCISTATE_RETRY_SAME_DMA;
10009 tw32(TG3PCI_PCISTATE, val);
10010 }
10011
10012 if (tg3_flag(tp, ENABLE_APE)) {
10013 /* Allow reads and writes to the
10014 * APE register and memory space.
10015 */
10016 val = tr32(TG3PCI_PCISTATE);
10017 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10018 PCISTATE_ALLOW_APE_SHMEM_WR |
10019 PCISTATE_ALLOW_APE_PSPACE_WR;
10020 tw32(TG3PCI_PCISTATE, val);
10021 }
10022
10023 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10024 /* Enable some hw fixes. */
10025 val = tr32(TG3PCI_MSI_DATA);
10026 val |= (1 << 26) | (1 << 28) | (1 << 29);
10027 tw32(TG3PCI_MSI_DATA, val);
10028 }
10029
10030 /* Descriptor ring init may make accesses to the
10031 * NIC SRAM area to setup the TX descriptors, so we
10032 * can only do this after the hardware has been
10033 * successfully reset.
10034 */
10035 err = tg3_init_rings(tp);
10036 if (err)
10037 return err;
10038
10039 if (tg3_flag(tp, 57765_PLUS)) {
10040 val = tr32(TG3PCI_DMA_RW_CTRL) &
10041 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10042 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10043 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10044 if (!tg3_flag(tp, 57765_CLASS) &&
10045 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10046 tg3_asic_rev(tp) != ASIC_REV_5762)
10047 val |= DMA_RWCTRL_TAGGED_STAT_WA;
10048 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10049 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10050 tg3_asic_rev(tp) != ASIC_REV_5761) {
10051 /* This value is determined during the probe time DMA
10052 * engine test, tg3_test_dma.
10053 */
10054 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10055 }
10056
10057 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10058 GRC_MODE_4X_NIC_SEND_RINGS |
10059 GRC_MODE_NO_TX_PHDR_CSUM |
10060 GRC_MODE_NO_RX_PHDR_CSUM);
10061 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10062
10063 /* Pseudo-header checksum is done by hardware logic and not
10064 * the offload processers, so make the chip do the pseudo-
10065 * header checksums on receive. For transmit it is more
10066 * convenient to do the pseudo-header checksum in software
10067 * as Linux does that on transmit for us in all cases.
10068 */
10069 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10070
10071 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10072 if (tp->rxptpctl)
10073 tw32(TG3_RX_PTP_CTL,
10074 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10075
10076 if (tg3_flag(tp, PTP_CAPABLE))
10077 val |= GRC_MODE_TIME_SYNC_ENABLE;
10078
10079 tw32(GRC_MODE, tp->grc_mode | val);
10080
10081 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10082 * south bridge limitation. As a workaround, Driver is setting MRRS
10083 * to 2048 instead of default 4096.
10084 */
10085 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10086 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10087 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10088 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10089 }
10090
10091 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10092 val = tr32(GRC_MISC_CFG);
10093 val &= ~0xff;
10094 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10095 tw32(GRC_MISC_CFG, val);
10096
10097 /* Initialize MBUF/DESC pool. */
10098 if (tg3_flag(tp, 5750_PLUS)) {
10099 /* Do nothing. */
10100 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10101 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10102 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10103 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10104 else
10105 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10106 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10107 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10108 } else if (tg3_flag(tp, TSO_CAPABLE)) {
10109 int fw_len;
10110
10111 fw_len = tp->fw_len;
10112 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10113 tw32(BUFMGR_MB_POOL_ADDR,
10114 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10115 tw32(BUFMGR_MB_POOL_SIZE,
10116 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10117 }
10118
10119 if (tp->dev->mtu <= ETH_DATA_LEN) {
10120 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10121 tp->bufmgr_config.mbuf_read_dma_low_water);
10122 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10123 tp->bufmgr_config.mbuf_mac_rx_low_water);
10124 tw32(BUFMGR_MB_HIGH_WATER,
10125 tp->bufmgr_config.mbuf_high_water);
10126 } else {
10127 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10128 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10129 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10130 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10131 tw32(BUFMGR_MB_HIGH_WATER,
10132 tp->bufmgr_config.mbuf_high_water_jumbo);
10133 }
10134 tw32(BUFMGR_DMA_LOW_WATER,
10135 tp->bufmgr_config.dma_low_water);
10136 tw32(BUFMGR_DMA_HIGH_WATER,
10137 tp->bufmgr_config.dma_high_water);
10138
10139 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10140 if (tg3_asic_rev(tp) == ASIC_REV_5719)
10141 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10142 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10143 tg3_asic_rev(tp) == ASIC_REV_5762 ||
10144 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10145 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10146 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10147 tw32(BUFMGR_MODE, val);
10148 for (i = 0; i < 2000; i++) {
10149 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10150 break;
10151 udelay(10);
10152 }
10153 if (i >= 2000) {
10154 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10155 return -ENODEV;
10156 }
10157
10158 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10159 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10160
10161 tg3_setup_rxbd_thresholds(tp);
10162
10163 /* Initialize TG3_BDINFO's at:
10164 * RCVDBDI_STD_BD: standard eth size rx ring
10165 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10166 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10167 *
10168 * like so:
10169 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10170 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10171 * ring attribute flags
10172 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10173 *
10174 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10175 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10176 *
10177 * The size of each ring is fixed in the firmware, but the location is
10178 * configurable.
10179 */
10180 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10181 ((u64) tpr->rx_std_mapping >> 32));
10182 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10183 ((u64) tpr->rx_std_mapping & 0xffffffff));
10184 if (!tg3_flag(tp, 5717_PLUS))
10185 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10186 NIC_SRAM_RX_BUFFER_DESC);
10187
10188 /* Disable the mini ring */
10189 if (!tg3_flag(tp, 5705_PLUS))
10190 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10191 BDINFO_FLAGS_DISABLED);
10192
10193 /* Program the jumbo buffer descriptor ring control
10194 * blocks on those devices that have them.
10195 */
10196 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10197 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10198
10199 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10200 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10201 ((u64) tpr->rx_jmb_mapping >> 32));
10202 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10203 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10204 val = TG3_RX_JMB_RING_SIZE(tp) <<
10205 BDINFO_FLAGS_MAXLEN_SHIFT;
10206 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10207 val | BDINFO_FLAGS_USE_EXT_RECV);
10208 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10209 tg3_flag(tp, 57765_CLASS) ||
10210 tg3_asic_rev(tp) == ASIC_REV_5762)
10211 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10212 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10213 } else {
10214 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10215 BDINFO_FLAGS_DISABLED);
10216 }
10217
10218 if (tg3_flag(tp, 57765_PLUS)) {
10219 val = TG3_RX_STD_RING_SIZE(tp);
10220 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10221 val |= (TG3_RX_STD_DMA_SZ << 2);
10222 } else
10223 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10224 } else
10225 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10226
10227 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10228
10229 tpr->rx_std_prod_idx = tp->rx_pending;
10230 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10231
10232 tpr->rx_jmb_prod_idx =
10233 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10234 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10235
10236 tg3_rings_reset(tp);
10237
10238 /* Initialize MAC address and backoff seed. */
10239 __tg3_set_mac_addr(tp, false);
10240
10241 /* MTU + ethernet header + FCS + optional VLAN tag */
10242 tw32(MAC_RX_MTU_SIZE,
10243 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10244
10245 /* The slot time is changed by tg3_setup_phy if we
10246 * run at gigabit with half duplex.
10247 */
10248 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10249 (6 << TX_LENGTHS_IPG_SHIFT) |
10250 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10251
10252 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10253 tg3_asic_rev(tp) == ASIC_REV_5762)
10254 val |= tr32(MAC_TX_LENGTHS) &
10255 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10256 TX_LENGTHS_CNT_DWN_VAL_MSK);
10257
10258 tw32(MAC_TX_LENGTHS, val);
10259
10260 /* Receive rules. */
10261 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10262 tw32(RCVLPC_CONFIG, 0x0181);
10263
10264 /* Calculate RDMAC_MODE setting early, we need it to determine
10265 * the RCVLPC_STATE_ENABLE mask.
10266 */
10267 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10268 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10269 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10270 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10271 RDMAC_MODE_LNGREAD_ENAB);
10272
10273 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10274 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10275
10276 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10277 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10278 tg3_asic_rev(tp) == ASIC_REV_57780)
10279 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10280 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10281 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10282
10283 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10284 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10285 if (tg3_flag(tp, TSO_CAPABLE) &&
10286 tg3_asic_rev(tp) == ASIC_REV_5705) {
10287 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10288 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10289 !tg3_flag(tp, IS_5788)) {
10290 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10291 }
10292 }
10293
10294 if (tg3_flag(tp, PCI_EXPRESS))
10295 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10296
10297 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10298 tp->dma_limit = 0;
10299 if (tp->dev->mtu <= ETH_DATA_LEN) {
10300 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10301 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10302 }
10303 }
10304
10305 if (tg3_flag(tp, HW_TSO_1) ||
10306 tg3_flag(tp, HW_TSO_2) ||
10307 tg3_flag(tp, HW_TSO_3))
10308 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10309
10310 if (tg3_flag(tp, 57765_PLUS) ||
10311 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10312 tg3_asic_rev(tp) == ASIC_REV_57780)
10313 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10314
10315 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10316 tg3_asic_rev(tp) == ASIC_REV_5762)
10317 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10318
10319 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10320 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10321 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10322 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10323 tg3_flag(tp, 57765_PLUS)) {
10324 u32 tgtreg;
10325
10326 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10327 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10328 else
10329 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10330
10331 val = tr32(tgtreg);
10332 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10333 tg3_asic_rev(tp) == ASIC_REV_5762) {
10334 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10335 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10336 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10337 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10338 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10339 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10340 }
10341 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10342 }
10343
10344 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10345 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10346 tg3_asic_rev(tp) == ASIC_REV_5762) {
10347 u32 tgtreg;
10348
10349 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10350 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10351 else
10352 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10353
10354 val = tr32(tgtreg);
10355 tw32(tgtreg, val |
10356 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10357 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10358 }
10359
10360 /* Receive/send statistics. */
10361 if (tg3_flag(tp, 5750_PLUS)) {
10362 val = tr32(RCVLPC_STATS_ENABLE);
10363 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10364 tw32(RCVLPC_STATS_ENABLE, val);
10365 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10366 tg3_flag(tp, TSO_CAPABLE)) {
10367 val = tr32(RCVLPC_STATS_ENABLE);
10368 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10369 tw32(RCVLPC_STATS_ENABLE, val);
10370 } else {
10371 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10372 }
10373 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10374 tw32(SNDDATAI_STATSENAB, 0xffffff);
10375 tw32(SNDDATAI_STATSCTRL,
10376 (SNDDATAI_SCTRL_ENABLE |
10377 SNDDATAI_SCTRL_FASTUPD));
10378
10379 /* Setup host coalescing engine. */
10380 tw32(HOSTCC_MODE, 0);
10381 for (i = 0; i < 2000; i++) {
10382 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10383 break;
10384 udelay(10);
10385 }
10386
10387 __tg3_set_coalesce(tp, &tp->coal);
10388
10389 if (!tg3_flag(tp, 5705_PLUS)) {
10390 /* Status/statistics block address. See tg3_timer,
10391 * the tg3_periodic_fetch_stats call there, and
10392 * tg3_get_stats to see how this works for 5705/5750 chips.
10393 */
10394 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10395 ((u64) tp->stats_mapping >> 32));
10396 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10397 ((u64) tp->stats_mapping & 0xffffffff));
10398 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10399
10400 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10401
10402 /* Clear statistics and status block memory areas */
10403 for (i = NIC_SRAM_STATS_BLK;
10404 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10405 i += sizeof(u32)) {
10406 tg3_write_mem(tp, i, 0);
10407 udelay(40);
10408 }
10409 }
10410
10411 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10412
10413 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10414 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10415 if (!tg3_flag(tp, 5705_PLUS))
10416 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10417
10418 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10419 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10420 /* reset to prevent losing 1st rx packet intermittently */
10421 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10422 udelay(10);
10423 }
10424
10425 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10426 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10427 MAC_MODE_FHDE_ENABLE;
10428 if (tg3_flag(tp, ENABLE_APE))
10429 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10430 if (!tg3_flag(tp, 5705_PLUS) &&
10431 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10432 tg3_asic_rev(tp) != ASIC_REV_5700)
10433 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10434 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10435 udelay(40);
10436
10437 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10438 * If TG3_FLAG_IS_NIC is zero, we should read the
10439 * register to preserve the GPIO settings for LOMs. The GPIOs,
10440 * whether used as inputs or outputs, are set by boot code after
10441 * reset.
10442 */
10443 if (!tg3_flag(tp, IS_NIC)) {
10444 u32 gpio_mask;
10445
10446 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10447 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10448 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10449
10450 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10451 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10452 GRC_LCLCTRL_GPIO_OUTPUT3;
10453
10454 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10455 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10456
10457 tp->grc_local_ctrl &= ~gpio_mask;
10458 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10459
10460 /* GPIO1 must be driven high for eeprom write protect */
10461 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10462 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10463 GRC_LCLCTRL_GPIO_OUTPUT1);
10464 }
10465 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10466 udelay(100);
10467
10468 if (tg3_flag(tp, USING_MSIX)) {
10469 val = tr32(MSGINT_MODE);
10470 val |= MSGINT_MODE_ENABLE;
10471 if (tp->irq_cnt > 1)
10472 val |= MSGINT_MODE_MULTIVEC_EN;
10473 if (!tg3_flag(tp, 1SHOT_MSI))
10474 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10475 tw32(MSGINT_MODE, val);
10476 }
10477
10478 if (!tg3_flag(tp, 5705_PLUS)) {
10479 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10480 udelay(40);
10481 }
10482
10483 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10484 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10485 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10486 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10487 WDMAC_MODE_LNGREAD_ENAB);
10488
10489 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10490 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10491 if (tg3_flag(tp, TSO_CAPABLE) &&
10492 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10493 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10494 /* nothing */
10495 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10496 !tg3_flag(tp, IS_5788)) {
10497 val |= WDMAC_MODE_RX_ACCEL;
10498 }
10499 }
10500
10501 /* Enable host coalescing bug fix */
10502 if (tg3_flag(tp, 5755_PLUS))
10503 val |= WDMAC_MODE_STATUS_TAG_FIX;
10504
10505 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10506 val |= WDMAC_MODE_BURST_ALL_DATA;
10507
10508 tw32_f(WDMAC_MODE, val);
10509 udelay(40);
10510
10511 if (tg3_flag(tp, PCIX_MODE)) {
10512 u16 pcix_cmd;
10513
10514 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10515 &pcix_cmd);
10516 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10517 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10518 pcix_cmd |= PCI_X_CMD_READ_2K;
10519 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10520 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10521 pcix_cmd |= PCI_X_CMD_READ_2K;
10522 }
10523 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10524 pcix_cmd);
10525 }
10526
10527 tw32_f(RDMAC_MODE, rdmac_mode);
10528 udelay(40);
10529
10530 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10531 tg3_asic_rev(tp) == ASIC_REV_5720) {
10532 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10533 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10534 break;
10535 }
10536 if (i < TG3_NUM_RDMA_CHANNELS) {
10537 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10538 val |= tg3_lso_rd_dma_workaround_bit(tp);
10539 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10540 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10541 }
10542 }
10543
10544 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10545 if (!tg3_flag(tp, 5705_PLUS))
10546 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10547
10548 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10549 tw32(SNDDATAC_MODE,
10550 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10551 else
10552 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10553
10554 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10555 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10556 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10557 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10558 val |= RCVDBDI_MODE_LRG_RING_SZ;
10559 tw32(RCVDBDI_MODE, val);
10560 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10561 if (tg3_flag(tp, HW_TSO_1) ||
10562 tg3_flag(tp, HW_TSO_2) ||
10563 tg3_flag(tp, HW_TSO_3))
10564 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10565 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10566 if (tg3_flag(tp, ENABLE_TSS))
10567 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10568 tw32(SNDBDI_MODE, val);
10569 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10570
10571 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10572 err = tg3_load_5701_a0_firmware_fix(tp);
10573 if (err)
10574 return err;
10575 }
10576
10577 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10578 /* Ignore any errors for the firmware download. If download
10579 * fails, the device will operate with EEE disabled
10580 */
10581 tg3_load_57766_firmware(tp);
10582 }
10583
10584 if (tg3_flag(tp, TSO_CAPABLE)) {
10585 err = tg3_load_tso_firmware(tp);
10586 if (err)
10587 return err;
10588 }
10589
10590 tp->tx_mode = TX_MODE_ENABLE;
10591
10592 if (tg3_flag(tp, 5755_PLUS) ||
10593 tg3_asic_rev(tp) == ASIC_REV_5906)
10594 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10595
10596 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10597 tg3_asic_rev(tp) == ASIC_REV_5762) {
10598 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10599 tp->tx_mode &= ~val;
10600 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10601 }
10602
10603 tw32_f(MAC_TX_MODE, tp->tx_mode);
10604 udelay(100);
10605
10606 if (tg3_flag(tp, ENABLE_RSS)) {
10607 u32 rss_key[10];
10608
10609 tg3_rss_write_indir_tbl(tp);
10610
10611 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10612
10613 for (i = 0; i < 10 ; i++)
10614 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10615 }
10616
10617 tp->rx_mode = RX_MODE_ENABLE;
10618 if (tg3_flag(tp, 5755_PLUS))
10619 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10620
10621 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10622 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10623
10624 if (tg3_flag(tp, ENABLE_RSS))
10625 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10626 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10627 RX_MODE_RSS_IPV6_HASH_EN |
10628 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10629 RX_MODE_RSS_IPV4_HASH_EN |
10630 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10631
10632 tw32_f(MAC_RX_MODE, tp->rx_mode);
10633 udelay(10);
10634
10635 tw32(MAC_LED_CTRL, tp->led_ctrl);
10636
10637 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10638 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10639 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10640 udelay(10);
10641 }
10642 tw32_f(MAC_RX_MODE, tp->rx_mode);
10643 udelay(10);
10644
10645 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10646 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10647 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10648 /* Set drive transmission level to 1.2V */
10649 /* only if the signal pre-emphasis bit is not set */
10650 val = tr32(MAC_SERDES_CFG);
10651 val &= 0xfffff000;
10652 val |= 0x880;
10653 tw32(MAC_SERDES_CFG, val);
10654 }
10655 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10656 tw32(MAC_SERDES_CFG, 0x616000);
10657 }
10658
10659 /* Prevent chip from dropping frames when flow control
10660 * is enabled.
10661 */
10662 if (tg3_flag(tp, 57765_CLASS))
10663 val = 1;
10664 else
10665 val = 2;
10666 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10667
10668 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10669 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10670 /* Use hardware link auto-negotiation */
10671 tg3_flag_set(tp, HW_AUTONEG);
10672 }
10673
10674 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10675 tg3_asic_rev(tp) == ASIC_REV_5714) {
10676 u32 tmp;
10677
10678 tmp = tr32(SERDES_RX_CTRL);
10679 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10680 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10681 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10682 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10683 }
10684
10685 if (!tg3_flag(tp, USE_PHYLIB)) {
10686 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10687 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10688
10689 err = tg3_setup_phy(tp, false);
10690 if (err)
10691 return err;
10692
10693 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10694 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10695 u32 tmp;
10696
10697 /* Clear CRC stats. */
10698 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10699 tg3_writephy(tp, MII_TG3_TEST1,
10700 tmp | MII_TG3_TEST1_CRC_EN);
10701 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10702 }
10703 }
10704 }
10705
10706 __tg3_set_rx_mode(tp->dev);
10707
10708 /* Initialize receive rules. */
10709 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10710 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10711 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10712 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10713
10714 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10715 limit = 8;
10716 else
10717 limit = 16;
10718 if (tg3_flag(tp, ENABLE_ASF))
10719 limit -= 4;
10720 switch (limit) {
10721 case 16:
10722 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10723 fallthrough;
10724 case 15:
10725 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10726 fallthrough;
10727 case 14:
10728 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10729 fallthrough;
10730 case 13:
10731 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10732 fallthrough;
10733 case 12:
10734 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10735 fallthrough;
10736 case 11:
10737 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10738 fallthrough;
10739 case 10:
10740 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10741 fallthrough;
10742 case 9:
10743 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10744 fallthrough;
10745 case 8:
10746 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10747 fallthrough;
10748 case 7:
10749 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10750 fallthrough;
10751 case 6:
10752 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10753 fallthrough;
10754 case 5:
10755 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10756 fallthrough;
10757 case 4:
10758 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10759 case 3:
10760 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10761 case 2:
10762 case 1:
10763
10764 default:
10765 break;
10766 }
10767
10768 if (tg3_flag(tp, ENABLE_APE))
10769 /* Write our heartbeat update interval to APE. */
10770 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10771 APE_HOST_HEARTBEAT_INT_5SEC);
10772
10773 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10774
10775 return 0;
10776 }
10777
10778 /* Called at device open time to get the chip ready for
10779 * packet processing. Invoked with tp->lock held.
10780 */
tg3_init_hw(struct tg3 * tp,bool reset_phy)10781 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10782 {
10783 /* Chip may have been just powered on. If so, the boot code may still
10784 * be running initialization. Wait for it to finish to avoid races in
10785 * accessing the hardware.
10786 */
10787 tg3_enable_register_access(tp);
10788 tg3_poll_fw(tp);
10789
10790 tg3_switch_clocks(tp);
10791
10792 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10793
10794 return tg3_reset_hw(tp, reset_phy);
10795 }
10796
10797 #ifdef CONFIG_TIGON3_HWMON
tg3_sd_scan_scratchpad(struct tg3 * tp,struct tg3_ocir * ocir)10798 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10799 {
10800 u32 off, len = TG3_OCIR_LEN;
10801 int i;
10802
10803 for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
10804 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10805
10806 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10807 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10808 memset(ocir, 0, len);
10809 }
10810 }
10811
10812 /* sysfs attributes for hwmon */
tg3_show_temp(struct device * dev,struct device_attribute * devattr,char * buf)10813 static ssize_t tg3_show_temp(struct device *dev,
10814 struct device_attribute *devattr, char *buf)
10815 {
10816 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10817 struct tg3 *tp = dev_get_drvdata(dev);
10818 u32 temperature;
10819
10820 spin_lock_bh(&tp->lock);
10821 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10822 sizeof(temperature));
10823 spin_unlock_bh(&tp->lock);
10824 return sprintf(buf, "%u\n", temperature * 1000);
10825 }
10826
10827
10828 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10829 TG3_TEMP_SENSOR_OFFSET);
10830 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10831 TG3_TEMP_CAUTION_OFFSET);
10832 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10833 TG3_TEMP_MAX_OFFSET);
10834
10835 static struct attribute *tg3_attrs[] = {
10836 &sensor_dev_attr_temp1_input.dev_attr.attr,
10837 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10838 &sensor_dev_attr_temp1_max.dev_attr.attr,
10839 NULL
10840 };
10841 ATTRIBUTE_GROUPS(tg3);
10842
tg3_hwmon_close(struct tg3 * tp)10843 static void tg3_hwmon_close(struct tg3 *tp)
10844 {
10845 if (tp->hwmon_dev) {
10846 hwmon_device_unregister(tp->hwmon_dev);
10847 tp->hwmon_dev = NULL;
10848 }
10849 }
10850
tg3_hwmon_open(struct tg3 * tp)10851 static void tg3_hwmon_open(struct tg3 *tp)
10852 {
10853 int i;
10854 u32 size = 0;
10855 struct pci_dev *pdev = tp->pdev;
10856 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10857
10858 tg3_sd_scan_scratchpad(tp, ocirs);
10859
10860 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10861 if (!ocirs[i].src_data_length)
10862 continue;
10863
10864 size += ocirs[i].src_hdr_length;
10865 size += ocirs[i].src_data_length;
10866 }
10867
10868 if (!size)
10869 return;
10870
10871 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10872 tp, tg3_groups);
10873 if (IS_ERR(tp->hwmon_dev)) {
10874 tp->hwmon_dev = NULL;
10875 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10876 }
10877 }
10878 #else
tg3_hwmon_close(struct tg3 * tp)10879 static inline void tg3_hwmon_close(struct tg3 *tp) { }
tg3_hwmon_open(struct tg3 * tp)10880 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10881 #endif /* CONFIG_TIGON3_HWMON */
10882
10883
10884 #define TG3_STAT_ADD32(PSTAT, REG) \
10885 do { u32 __val = tr32(REG); \
10886 (PSTAT)->low += __val; \
10887 if ((PSTAT)->low < __val) \
10888 (PSTAT)->high += 1; \
10889 } while (0)
10890
tg3_periodic_fetch_stats(struct tg3 * tp)10891 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10892 {
10893 struct tg3_hw_stats *sp = tp->hw_stats;
10894
10895 if (!tp->link_up)
10896 return;
10897
10898 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10899 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10900 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10901 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10902 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10903 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10904 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10905 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10906 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10907 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10908 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10909 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10910 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10911 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10912 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10913 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10914 u32 val;
10915
10916 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10917 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10918 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10919 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10920 }
10921
10922 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10923 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10924 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10925 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10926 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10927 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10928 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10929 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10930 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10931 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10932 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10933 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10934 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10935 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10936
10937 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10938 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10939 tg3_asic_rev(tp) != ASIC_REV_5762 &&
10940 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10941 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10942 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10943 } else {
10944 u32 val = tr32(HOSTCC_FLOW_ATTN);
10945 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10946 if (val) {
10947 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10948 sp->rx_discards.low += val;
10949 if (sp->rx_discards.low < val)
10950 sp->rx_discards.high += 1;
10951 }
10952 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10953 }
10954 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10955 }
10956
tg3_chk_missed_msi(struct tg3 * tp)10957 static void tg3_chk_missed_msi(struct tg3 *tp)
10958 {
10959 u32 i;
10960
10961 for (i = 0; i < tp->irq_cnt; i++) {
10962 struct tg3_napi *tnapi = &tp->napi[i];
10963
10964 if (tg3_has_work(tnapi)) {
10965 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10966 tnapi->last_tx_cons == tnapi->tx_cons) {
10967 if (tnapi->chk_msi_cnt < 1) {
10968 tnapi->chk_msi_cnt++;
10969 return;
10970 }
10971 tg3_msi(0, tnapi);
10972 }
10973 }
10974 tnapi->chk_msi_cnt = 0;
10975 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10976 tnapi->last_tx_cons = tnapi->tx_cons;
10977 }
10978 }
10979
tg3_timer(struct timer_list * t)10980 static void tg3_timer(struct timer_list *t)
10981 {
10982 struct tg3 *tp = from_timer(tp, t, timer);
10983
10984 spin_lock(&tp->lock);
10985
10986 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10987 spin_unlock(&tp->lock);
10988 goto restart_timer;
10989 }
10990
10991 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10992 tg3_flag(tp, 57765_CLASS))
10993 tg3_chk_missed_msi(tp);
10994
10995 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10996 /* BCM4785: Flush posted writes from GbE to host memory. */
10997 tr32(HOSTCC_MODE);
10998 }
10999
11000 if (!tg3_flag(tp, TAGGED_STATUS)) {
11001 /* All of this garbage is because when using non-tagged
11002 * IRQ status the mailbox/status_block protocol the chip
11003 * uses with the cpu is race prone.
11004 */
11005 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
11006 tw32(GRC_LOCAL_CTRL,
11007 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
11008 } else {
11009 tw32(HOSTCC_MODE, tp->coalesce_mode |
11010 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11011 }
11012
11013 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11014 spin_unlock(&tp->lock);
11015 tg3_reset_task_schedule(tp);
11016 goto restart_timer;
11017 }
11018 }
11019
11020 /* This part only runs once per second. */
11021 if (!--tp->timer_counter) {
11022 if (tg3_flag(tp, 5705_PLUS))
11023 tg3_periodic_fetch_stats(tp);
11024
11025 if (tp->setlpicnt && !--tp->setlpicnt)
11026 tg3_phy_eee_enable(tp);
11027
11028 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11029 u32 mac_stat;
11030 int phy_event;
11031
11032 mac_stat = tr32(MAC_STATUS);
11033
11034 phy_event = 0;
11035 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11036 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11037 phy_event = 1;
11038 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11039 phy_event = 1;
11040
11041 if (phy_event)
11042 tg3_setup_phy(tp, false);
11043 } else if (tg3_flag(tp, POLL_SERDES)) {
11044 u32 mac_stat = tr32(MAC_STATUS);
11045 int need_setup = 0;
11046
11047 if (tp->link_up &&
11048 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11049 need_setup = 1;
11050 }
11051 if (!tp->link_up &&
11052 (mac_stat & (MAC_STATUS_PCS_SYNCED |
11053 MAC_STATUS_SIGNAL_DET))) {
11054 need_setup = 1;
11055 }
11056 if (need_setup) {
11057 if (!tp->serdes_counter) {
11058 tw32_f(MAC_MODE,
11059 (tp->mac_mode &
11060 ~MAC_MODE_PORT_MODE_MASK));
11061 udelay(40);
11062 tw32_f(MAC_MODE, tp->mac_mode);
11063 udelay(40);
11064 }
11065 tg3_setup_phy(tp, false);
11066 }
11067 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11068 tg3_flag(tp, 5780_CLASS)) {
11069 tg3_serdes_parallel_detect(tp);
11070 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11071 u32 cpmu = tr32(TG3_CPMU_STATUS);
11072 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11073 TG3_CPMU_STATUS_LINK_MASK);
11074
11075 if (link_up != tp->link_up)
11076 tg3_setup_phy(tp, false);
11077 }
11078
11079 tp->timer_counter = tp->timer_multiplier;
11080 }
11081
11082 /* Heartbeat is only sent once every 2 seconds.
11083 *
11084 * The heartbeat is to tell the ASF firmware that the host
11085 * driver is still alive. In the event that the OS crashes,
11086 * ASF needs to reset the hardware to free up the FIFO space
11087 * that may be filled with rx packets destined for the host.
11088 * If the FIFO is full, ASF will no longer function properly.
11089 *
11090 * Unintended resets have been reported on real time kernels
11091 * where the timer doesn't run on time. Netpoll will also have
11092 * same problem.
11093 *
11094 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11095 * to check the ring condition when the heartbeat is expiring
11096 * before doing the reset. This will prevent most unintended
11097 * resets.
11098 */
11099 if (!--tp->asf_counter) {
11100 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11101 tg3_wait_for_event_ack(tp);
11102
11103 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11104 FWCMD_NICDRV_ALIVE3);
11105 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11106 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11107 TG3_FW_UPDATE_TIMEOUT_SEC);
11108
11109 tg3_generate_fw_event(tp);
11110 }
11111 tp->asf_counter = tp->asf_multiplier;
11112 }
11113
11114 /* Update the APE heartbeat every 5 seconds.*/
11115 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11116
11117 spin_unlock(&tp->lock);
11118
11119 restart_timer:
11120 tp->timer.expires = jiffies + tp->timer_offset;
11121 add_timer(&tp->timer);
11122 }
11123
tg3_timer_init(struct tg3 * tp)11124 static void tg3_timer_init(struct tg3 *tp)
11125 {
11126 if (tg3_flag(tp, TAGGED_STATUS) &&
11127 tg3_asic_rev(tp) != ASIC_REV_5717 &&
11128 !tg3_flag(tp, 57765_CLASS))
11129 tp->timer_offset = HZ;
11130 else
11131 tp->timer_offset = HZ / 10;
11132
11133 BUG_ON(tp->timer_offset > HZ);
11134
11135 tp->timer_multiplier = (HZ / tp->timer_offset);
11136 tp->asf_multiplier = (HZ / tp->timer_offset) *
11137 TG3_FW_UPDATE_FREQ_SEC;
11138
11139 timer_setup(&tp->timer, tg3_timer, 0);
11140 }
11141
tg3_timer_start(struct tg3 * tp)11142 static void tg3_timer_start(struct tg3 *tp)
11143 {
11144 tp->asf_counter = tp->asf_multiplier;
11145 tp->timer_counter = tp->timer_multiplier;
11146
11147 tp->timer.expires = jiffies + tp->timer_offset;
11148 add_timer(&tp->timer);
11149 }
11150
tg3_timer_stop(struct tg3 * tp)11151 static void tg3_timer_stop(struct tg3 *tp)
11152 {
11153 del_timer_sync(&tp->timer);
11154 }
11155
11156 /* Restart hardware after configuration changes, self-test, etc.
11157 * Invoked with tp->lock held.
11158 */
tg3_restart_hw(struct tg3 * tp,bool reset_phy)11159 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11160 __releases(tp->lock)
11161 __acquires(tp->lock)
11162 {
11163 int err;
11164
11165 err = tg3_init_hw(tp, reset_phy);
11166 if (err) {
11167 netdev_err(tp->dev,
11168 "Failed to re-initialize device, aborting\n");
11169 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11170 tg3_full_unlock(tp);
11171 tg3_timer_stop(tp);
11172 tp->irq_sync = 0;
11173 tg3_napi_enable(tp);
11174 dev_close(tp->dev);
11175 tg3_full_lock(tp, 0);
11176 }
11177 return err;
11178 }
11179
tg3_reset_task(struct work_struct * work)11180 static void tg3_reset_task(struct work_struct *work)
11181 {
11182 struct tg3 *tp = container_of(work, struct tg3, reset_task);
11183 int err;
11184
11185 rtnl_lock();
11186 tg3_full_lock(tp, 0);
11187
11188 if (!netif_running(tp->dev)) {
11189 tg3_flag_clear(tp, RESET_TASK_PENDING);
11190 tg3_full_unlock(tp);
11191 rtnl_unlock();
11192 return;
11193 }
11194
11195 tg3_full_unlock(tp);
11196
11197 tg3_phy_stop(tp);
11198
11199 tg3_netif_stop(tp);
11200
11201 tg3_full_lock(tp, 1);
11202
11203 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11204 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11205 tp->write32_rx_mbox = tg3_write_flush_reg32;
11206 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11207 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11208 }
11209
11210 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11211 err = tg3_init_hw(tp, true);
11212 if (err) {
11213 tg3_full_unlock(tp);
11214 tp->irq_sync = 0;
11215 tg3_napi_enable(tp);
11216 /* Clear this flag so that tg3_reset_task_cancel() will not
11217 * call cancel_work_sync() and wait forever.
11218 */
11219 tg3_flag_clear(tp, RESET_TASK_PENDING);
11220 dev_close(tp->dev);
11221 goto out;
11222 }
11223
11224 tg3_netif_start(tp);
11225
11226 tg3_full_unlock(tp);
11227
11228 if (!err)
11229 tg3_phy_start(tp);
11230
11231 tg3_flag_clear(tp, RESET_TASK_PENDING);
11232 out:
11233 rtnl_unlock();
11234 }
11235
tg3_request_irq(struct tg3 * tp,int irq_num)11236 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11237 {
11238 irq_handler_t fn;
11239 unsigned long flags;
11240 char *name;
11241 struct tg3_napi *tnapi = &tp->napi[irq_num];
11242
11243 if (tp->irq_cnt == 1)
11244 name = tp->dev->name;
11245 else {
11246 name = &tnapi->irq_lbl[0];
11247 if (tnapi->tx_buffers && tnapi->rx_rcb)
11248 snprintf(name, IFNAMSIZ,
11249 "%s-txrx-%d", tp->dev->name, irq_num);
11250 else if (tnapi->tx_buffers)
11251 snprintf(name, IFNAMSIZ,
11252 "%s-tx-%d", tp->dev->name, irq_num);
11253 else if (tnapi->rx_rcb)
11254 snprintf(name, IFNAMSIZ,
11255 "%s-rx-%d", tp->dev->name, irq_num);
11256 else
11257 snprintf(name, IFNAMSIZ,
11258 "%s-%d", tp->dev->name, irq_num);
11259 name[IFNAMSIZ-1] = 0;
11260 }
11261
11262 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11263 fn = tg3_msi;
11264 if (tg3_flag(tp, 1SHOT_MSI))
11265 fn = tg3_msi_1shot;
11266 flags = 0;
11267 } else {
11268 fn = tg3_interrupt;
11269 if (tg3_flag(tp, TAGGED_STATUS))
11270 fn = tg3_interrupt_tagged;
11271 flags = IRQF_SHARED;
11272 }
11273
11274 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11275 }
11276
tg3_test_interrupt(struct tg3 * tp)11277 static int tg3_test_interrupt(struct tg3 *tp)
11278 {
11279 struct tg3_napi *tnapi = &tp->napi[0];
11280 struct net_device *dev = tp->dev;
11281 int err, i, intr_ok = 0;
11282 u32 val;
11283
11284 if (!netif_running(dev))
11285 return -ENODEV;
11286
11287 tg3_disable_ints(tp);
11288
11289 free_irq(tnapi->irq_vec, tnapi);
11290
11291 /*
11292 * Turn off MSI one shot mode. Otherwise this test has no
11293 * observable way to know whether the interrupt was delivered.
11294 */
11295 if (tg3_flag(tp, 57765_PLUS)) {
11296 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11297 tw32(MSGINT_MODE, val);
11298 }
11299
11300 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11301 IRQF_SHARED, dev->name, tnapi);
11302 if (err)
11303 return err;
11304
11305 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11306 tg3_enable_ints(tp);
11307
11308 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11309 tnapi->coal_now);
11310
11311 for (i = 0; i < 5; i++) {
11312 u32 int_mbox, misc_host_ctrl;
11313
11314 int_mbox = tr32_mailbox(tnapi->int_mbox);
11315 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11316
11317 if ((int_mbox != 0) ||
11318 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11319 intr_ok = 1;
11320 break;
11321 }
11322
11323 if (tg3_flag(tp, 57765_PLUS) &&
11324 tnapi->hw_status->status_tag != tnapi->last_tag)
11325 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11326
11327 msleep(10);
11328 }
11329
11330 tg3_disable_ints(tp);
11331
11332 free_irq(tnapi->irq_vec, tnapi);
11333
11334 err = tg3_request_irq(tp, 0);
11335
11336 if (err)
11337 return err;
11338
11339 if (intr_ok) {
11340 /* Reenable MSI one shot mode. */
11341 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11342 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11343 tw32(MSGINT_MODE, val);
11344 }
11345 return 0;
11346 }
11347
11348 return -EIO;
11349 }
11350
11351 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11352 * successfully restored
11353 */
tg3_test_msi(struct tg3 * tp)11354 static int tg3_test_msi(struct tg3 *tp)
11355 {
11356 int err;
11357 u16 pci_cmd;
11358
11359 if (!tg3_flag(tp, USING_MSI))
11360 return 0;
11361
11362 /* Turn off SERR reporting in case MSI terminates with Master
11363 * Abort.
11364 */
11365 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11366 pci_write_config_word(tp->pdev, PCI_COMMAND,
11367 pci_cmd & ~PCI_COMMAND_SERR);
11368
11369 err = tg3_test_interrupt(tp);
11370
11371 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11372
11373 if (!err)
11374 return 0;
11375
11376 /* other failures */
11377 if (err != -EIO)
11378 return err;
11379
11380 /* MSI test failed, go back to INTx mode */
11381 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11382 "to INTx mode. Please report this failure to the PCI "
11383 "maintainer and include system chipset information\n");
11384
11385 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11386
11387 pci_disable_msi(tp->pdev);
11388
11389 tg3_flag_clear(tp, USING_MSI);
11390 tp->napi[0].irq_vec = tp->pdev->irq;
11391
11392 err = tg3_request_irq(tp, 0);
11393 if (err)
11394 return err;
11395
11396 /* Need to reset the chip because the MSI cycle may have terminated
11397 * with Master Abort.
11398 */
11399 tg3_full_lock(tp, 1);
11400
11401 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11402 err = tg3_init_hw(tp, true);
11403
11404 tg3_full_unlock(tp);
11405
11406 if (err)
11407 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11408
11409 return err;
11410 }
11411
tg3_request_firmware(struct tg3 * tp)11412 static int tg3_request_firmware(struct tg3 *tp)
11413 {
11414 const struct tg3_firmware_hdr *fw_hdr;
11415
11416 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11417 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11418 tp->fw_needed);
11419 return -ENOENT;
11420 }
11421
11422 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11423
11424 /* Firmware blob starts with version numbers, followed by
11425 * start address and _full_ length including BSS sections
11426 * (which must be longer than the actual data, of course
11427 */
11428
11429 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11430 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11431 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11432 tp->fw_len, tp->fw_needed);
11433 release_firmware(tp->fw);
11434 tp->fw = NULL;
11435 return -EINVAL;
11436 }
11437
11438 /* We no longer need firmware; we have it. */
11439 tp->fw_needed = NULL;
11440 return 0;
11441 }
11442
tg3_irq_count(struct tg3 * tp)11443 static u32 tg3_irq_count(struct tg3 *tp)
11444 {
11445 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11446
11447 if (irq_cnt > 1) {
11448 /* We want as many rx rings enabled as there are cpus.
11449 * In multiqueue MSI-X mode, the first MSI-X vector
11450 * only deals with link interrupts, etc, so we add
11451 * one to the number of vectors we are requesting.
11452 */
11453 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11454 }
11455
11456 return irq_cnt;
11457 }
11458
tg3_enable_msix(struct tg3 * tp)11459 static bool tg3_enable_msix(struct tg3 *tp)
11460 {
11461 int i, rc;
11462 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11463
11464 tp->txq_cnt = tp->txq_req;
11465 tp->rxq_cnt = tp->rxq_req;
11466 if (!tp->rxq_cnt)
11467 tp->rxq_cnt = netif_get_num_default_rss_queues();
11468 if (tp->rxq_cnt > tp->rxq_max)
11469 tp->rxq_cnt = tp->rxq_max;
11470
11471 /* Disable multiple TX rings by default. Simple round-robin hardware
11472 * scheduling of the TX rings can cause starvation of rings with
11473 * small packets when other rings have TSO or jumbo packets.
11474 */
11475 if (!tp->txq_req)
11476 tp->txq_cnt = 1;
11477
11478 tp->irq_cnt = tg3_irq_count(tp);
11479
11480 for (i = 0; i < tp->irq_max; i++) {
11481 msix_ent[i].entry = i;
11482 msix_ent[i].vector = 0;
11483 }
11484
11485 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11486 if (rc < 0) {
11487 return false;
11488 } else if (rc < tp->irq_cnt) {
11489 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11490 tp->irq_cnt, rc);
11491 tp->irq_cnt = rc;
11492 tp->rxq_cnt = max(rc - 1, 1);
11493 if (tp->txq_cnt)
11494 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11495 }
11496
11497 for (i = 0; i < tp->irq_max; i++)
11498 tp->napi[i].irq_vec = msix_ent[i].vector;
11499
11500 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11501 pci_disable_msix(tp->pdev);
11502 return false;
11503 }
11504
11505 if (tp->irq_cnt == 1)
11506 return true;
11507
11508 tg3_flag_set(tp, ENABLE_RSS);
11509
11510 if (tp->txq_cnt > 1)
11511 tg3_flag_set(tp, ENABLE_TSS);
11512
11513 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11514
11515 return true;
11516 }
11517
tg3_ints_init(struct tg3 * tp)11518 static void tg3_ints_init(struct tg3 *tp)
11519 {
11520 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11521 !tg3_flag(tp, TAGGED_STATUS)) {
11522 /* All MSI supporting chips should support tagged
11523 * status. Assert that this is the case.
11524 */
11525 netdev_warn(tp->dev,
11526 "MSI without TAGGED_STATUS? Not using MSI\n");
11527 goto defcfg;
11528 }
11529
11530 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11531 tg3_flag_set(tp, USING_MSIX);
11532 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11533 tg3_flag_set(tp, USING_MSI);
11534
11535 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11536 u32 msi_mode = tr32(MSGINT_MODE);
11537 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11538 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11539 if (!tg3_flag(tp, 1SHOT_MSI))
11540 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11541 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11542 }
11543 defcfg:
11544 if (!tg3_flag(tp, USING_MSIX)) {
11545 tp->irq_cnt = 1;
11546 tp->napi[0].irq_vec = tp->pdev->irq;
11547 }
11548
11549 if (tp->irq_cnt == 1) {
11550 tp->txq_cnt = 1;
11551 tp->rxq_cnt = 1;
11552 netif_set_real_num_tx_queues(tp->dev, 1);
11553 netif_set_real_num_rx_queues(tp->dev, 1);
11554 }
11555 }
11556
tg3_ints_fini(struct tg3 * tp)11557 static void tg3_ints_fini(struct tg3 *tp)
11558 {
11559 if (tg3_flag(tp, USING_MSIX))
11560 pci_disable_msix(tp->pdev);
11561 else if (tg3_flag(tp, USING_MSI))
11562 pci_disable_msi(tp->pdev);
11563 tg3_flag_clear(tp, USING_MSI);
11564 tg3_flag_clear(tp, USING_MSIX);
11565 tg3_flag_clear(tp, ENABLE_RSS);
11566 tg3_flag_clear(tp, ENABLE_TSS);
11567 }
11568
tg3_start(struct tg3 * tp,bool reset_phy,bool test_irq,bool init)11569 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11570 bool init)
11571 {
11572 struct net_device *dev = tp->dev;
11573 int i, err;
11574
11575 /*
11576 * Setup interrupts first so we know how
11577 * many NAPI resources to allocate
11578 */
11579 tg3_ints_init(tp);
11580
11581 tg3_rss_check_indir_tbl(tp);
11582
11583 /* The placement of this call is tied
11584 * to the setup and use of Host TX descriptors.
11585 */
11586 err = tg3_alloc_consistent(tp);
11587 if (err)
11588 goto out_ints_fini;
11589
11590 tg3_napi_init(tp);
11591
11592 tg3_napi_enable(tp);
11593
11594 for (i = 0; i < tp->irq_cnt; i++) {
11595 err = tg3_request_irq(tp, i);
11596 if (err) {
11597 for (i--; i >= 0; i--) {
11598 struct tg3_napi *tnapi = &tp->napi[i];
11599
11600 free_irq(tnapi->irq_vec, tnapi);
11601 }
11602 goto out_napi_fini;
11603 }
11604 }
11605
11606 tg3_full_lock(tp, 0);
11607
11608 if (init)
11609 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11610
11611 err = tg3_init_hw(tp, reset_phy);
11612 if (err) {
11613 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11614 tg3_free_rings(tp);
11615 }
11616
11617 tg3_full_unlock(tp);
11618
11619 if (err)
11620 goto out_free_irq;
11621
11622 if (test_irq && tg3_flag(tp, USING_MSI)) {
11623 err = tg3_test_msi(tp);
11624
11625 if (err) {
11626 tg3_full_lock(tp, 0);
11627 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11628 tg3_free_rings(tp);
11629 tg3_full_unlock(tp);
11630
11631 goto out_napi_fini;
11632 }
11633
11634 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11635 u32 val = tr32(PCIE_TRANSACTION_CFG);
11636
11637 tw32(PCIE_TRANSACTION_CFG,
11638 val | PCIE_TRANS_CFG_1SHOT_MSI);
11639 }
11640 }
11641
11642 tg3_phy_start(tp);
11643
11644 tg3_hwmon_open(tp);
11645
11646 tg3_full_lock(tp, 0);
11647
11648 tg3_timer_start(tp);
11649 tg3_flag_set(tp, INIT_COMPLETE);
11650 tg3_enable_ints(tp);
11651
11652 tg3_ptp_resume(tp);
11653
11654 tg3_full_unlock(tp);
11655
11656 netif_tx_start_all_queues(dev);
11657
11658 /*
11659 * Reset loopback feature if it was turned on while the device was down
11660 * make sure that it's installed properly now.
11661 */
11662 if (dev->features & NETIF_F_LOOPBACK)
11663 tg3_set_loopback(dev, dev->features);
11664
11665 return 0;
11666
11667 out_free_irq:
11668 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11669 struct tg3_napi *tnapi = &tp->napi[i];
11670 free_irq(tnapi->irq_vec, tnapi);
11671 }
11672
11673 out_napi_fini:
11674 tg3_napi_disable(tp);
11675 tg3_napi_fini(tp);
11676 tg3_free_consistent(tp);
11677
11678 out_ints_fini:
11679 tg3_ints_fini(tp);
11680
11681 return err;
11682 }
11683
tg3_stop(struct tg3 * tp)11684 static void tg3_stop(struct tg3 *tp)
11685 {
11686 int i;
11687
11688 tg3_reset_task_cancel(tp);
11689 tg3_netif_stop(tp);
11690
11691 tg3_timer_stop(tp);
11692
11693 tg3_hwmon_close(tp);
11694
11695 tg3_phy_stop(tp);
11696
11697 tg3_full_lock(tp, 1);
11698
11699 tg3_disable_ints(tp);
11700
11701 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11702 tg3_free_rings(tp);
11703 tg3_flag_clear(tp, INIT_COMPLETE);
11704
11705 tg3_full_unlock(tp);
11706
11707 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11708 struct tg3_napi *tnapi = &tp->napi[i];
11709 free_irq(tnapi->irq_vec, tnapi);
11710 }
11711
11712 tg3_ints_fini(tp);
11713
11714 tg3_napi_fini(tp);
11715
11716 tg3_free_consistent(tp);
11717 }
11718
tg3_open(struct net_device * dev)11719 static int tg3_open(struct net_device *dev)
11720 {
11721 struct tg3 *tp = netdev_priv(dev);
11722 int err;
11723
11724 if (tp->pcierr_recovery) {
11725 netdev_err(dev, "Failed to open device. PCI error recovery "
11726 "in progress\n");
11727 return -EAGAIN;
11728 }
11729
11730 if (tp->fw_needed) {
11731 err = tg3_request_firmware(tp);
11732 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11733 if (err) {
11734 netdev_warn(tp->dev, "EEE capability disabled\n");
11735 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11736 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11737 netdev_warn(tp->dev, "EEE capability restored\n");
11738 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11739 }
11740 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11741 if (err)
11742 return err;
11743 } else if (err) {
11744 netdev_warn(tp->dev, "TSO capability disabled\n");
11745 tg3_flag_clear(tp, TSO_CAPABLE);
11746 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11747 netdev_notice(tp->dev, "TSO capability restored\n");
11748 tg3_flag_set(tp, TSO_CAPABLE);
11749 }
11750 }
11751
11752 tg3_carrier_off(tp);
11753
11754 err = tg3_power_up(tp);
11755 if (err)
11756 return err;
11757
11758 tg3_full_lock(tp, 0);
11759
11760 tg3_disable_ints(tp);
11761 tg3_flag_clear(tp, INIT_COMPLETE);
11762
11763 tg3_full_unlock(tp);
11764
11765 err = tg3_start(tp,
11766 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11767 true, true);
11768 if (err) {
11769 tg3_frob_aux_power(tp, false);
11770 pci_set_power_state(tp->pdev, PCI_D3hot);
11771 }
11772
11773 return err;
11774 }
11775
tg3_close(struct net_device * dev)11776 static int tg3_close(struct net_device *dev)
11777 {
11778 struct tg3 *tp = netdev_priv(dev);
11779
11780 if (tp->pcierr_recovery) {
11781 netdev_err(dev, "Failed to close device. PCI error recovery "
11782 "in progress\n");
11783 return -EAGAIN;
11784 }
11785
11786 tg3_stop(tp);
11787
11788 if (pci_device_is_present(tp->pdev)) {
11789 tg3_power_down_prepare(tp);
11790
11791 tg3_carrier_off(tp);
11792 }
11793 return 0;
11794 }
11795
get_stat64(tg3_stat64_t * val)11796 static inline u64 get_stat64(tg3_stat64_t *val)
11797 {
11798 return ((u64)val->high << 32) | ((u64)val->low);
11799 }
11800
tg3_calc_crc_errors(struct tg3 * tp)11801 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11802 {
11803 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11804
11805 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11806 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11807 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11808 u32 val;
11809
11810 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11811 tg3_writephy(tp, MII_TG3_TEST1,
11812 val | MII_TG3_TEST1_CRC_EN);
11813 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11814 } else
11815 val = 0;
11816
11817 tp->phy_crc_errors += val;
11818
11819 return tp->phy_crc_errors;
11820 }
11821
11822 return get_stat64(&hw_stats->rx_fcs_errors);
11823 }
11824
11825 #define ESTAT_ADD(member) \
11826 estats->member = old_estats->member + \
11827 get_stat64(&hw_stats->member)
11828
tg3_get_estats(struct tg3 * tp,struct tg3_ethtool_stats * estats)11829 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11830 {
11831 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11832 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11833
11834 ESTAT_ADD(rx_octets);
11835 ESTAT_ADD(rx_fragments);
11836 ESTAT_ADD(rx_ucast_packets);
11837 ESTAT_ADD(rx_mcast_packets);
11838 ESTAT_ADD(rx_bcast_packets);
11839 ESTAT_ADD(rx_fcs_errors);
11840 ESTAT_ADD(rx_align_errors);
11841 ESTAT_ADD(rx_xon_pause_rcvd);
11842 ESTAT_ADD(rx_xoff_pause_rcvd);
11843 ESTAT_ADD(rx_mac_ctrl_rcvd);
11844 ESTAT_ADD(rx_xoff_entered);
11845 ESTAT_ADD(rx_frame_too_long_errors);
11846 ESTAT_ADD(rx_jabbers);
11847 ESTAT_ADD(rx_undersize_packets);
11848 ESTAT_ADD(rx_in_length_errors);
11849 ESTAT_ADD(rx_out_length_errors);
11850 ESTAT_ADD(rx_64_or_less_octet_packets);
11851 ESTAT_ADD(rx_65_to_127_octet_packets);
11852 ESTAT_ADD(rx_128_to_255_octet_packets);
11853 ESTAT_ADD(rx_256_to_511_octet_packets);
11854 ESTAT_ADD(rx_512_to_1023_octet_packets);
11855 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11856 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11857 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11858 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11859 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11860
11861 ESTAT_ADD(tx_octets);
11862 ESTAT_ADD(tx_collisions);
11863 ESTAT_ADD(tx_xon_sent);
11864 ESTAT_ADD(tx_xoff_sent);
11865 ESTAT_ADD(tx_flow_control);
11866 ESTAT_ADD(tx_mac_errors);
11867 ESTAT_ADD(tx_single_collisions);
11868 ESTAT_ADD(tx_mult_collisions);
11869 ESTAT_ADD(tx_deferred);
11870 ESTAT_ADD(tx_excessive_collisions);
11871 ESTAT_ADD(tx_late_collisions);
11872 ESTAT_ADD(tx_collide_2times);
11873 ESTAT_ADD(tx_collide_3times);
11874 ESTAT_ADD(tx_collide_4times);
11875 ESTAT_ADD(tx_collide_5times);
11876 ESTAT_ADD(tx_collide_6times);
11877 ESTAT_ADD(tx_collide_7times);
11878 ESTAT_ADD(tx_collide_8times);
11879 ESTAT_ADD(tx_collide_9times);
11880 ESTAT_ADD(tx_collide_10times);
11881 ESTAT_ADD(tx_collide_11times);
11882 ESTAT_ADD(tx_collide_12times);
11883 ESTAT_ADD(tx_collide_13times);
11884 ESTAT_ADD(tx_collide_14times);
11885 ESTAT_ADD(tx_collide_15times);
11886 ESTAT_ADD(tx_ucast_packets);
11887 ESTAT_ADD(tx_mcast_packets);
11888 ESTAT_ADD(tx_bcast_packets);
11889 ESTAT_ADD(tx_carrier_sense_errors);
11890 ESTAT_ADD(tx_discards);
11891 ESTAT_ADD(tx_errors);
11892
11893 ESTAT_ADD(dma_writeq_full);
11894 ESTAT_ADD(dma_write_prioq_full);
11895 ESTAT_ADD(rxbds_empty);
11896 ESTAT_ADD(rx_discards);
11897 ESTAT_ADD(rx_errors);
11898 ESTAT_ADD(rx_threshold_hit);
11899
11900 ESTAT_ADD(dma_readq_full);
11901 ESTAT_ADD(dma_read_prioq_full);
11902 ESTAT_ADD(tx_comp_queue_full);
11903
11904 ESTAT_ADD(ring_set_send_prod_index);
11905 ESTAT_ADD(ring_status_update);
11906 ESTAT_ADD(nic_irqs);
11907 ESTAT_ADD(nic_avoided_irqs);
11908 ESTAT_ADD(nic_tx_threshold_hit);
11909
11910 ESTAT_ADD(mbuf_lwm_thresh_hit);
11911 }
11912
tg3_get_nstats(struct tg3 * tp,struct rtnl_link_stats64 * stats)11913 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11914 {
11915 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11916 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11917
11918 stats->rx_packets = old_stats->rx_packets +
11919 get_stat64(&hw_stats->rx_ucast_packets) +
11920 get_stat64(&hw_stats->rx_mcast_packets) +
11921 get_stat64(&hw_stats->rx_bcast_packets);
11922
11923 stats->tx_packets = old_stats->tx_packets +
11924 get_stat64(&hw_stats->tx_ucast_packets) +
11925 get_stat64(&hw_stats->tx_mcast_packets) +
11926 get_stat64(&hw_stats->tx_bcast_packets);
11927
11928 stats->rx_bytes = old_stats->rx_bytes +
11929 get_stat64(&hw_stats->rx_octets);
11930 stats->tx_bytes = old_stats->tx_bytes +
11931 get_stat64(&hw_stats->tx_octets);
11932
11933 stats->rx_errors = old_stats->rx_errors +
11934 get_stat64(&hw_stats->rx_errors);
11935 stats->tx_errors = old_stats->tx_errors +
11936 get_stat64(&hw_stats->tx_errors) +
11937 get_stat64(&hw_stats->tx_mac_errors) +
11938 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11939 get_stat64(&hw_stats->tx_discards);
11940
11941 stats->multicast = old_stats->multicast +
11942 get_stat64(&hw_stats->rx_mcast_packets);
11943 stats->collisions = old_stats->collisions +
11944 get_stat64(&hw_stats->tx_collisions);
11945
11946 stats->rx_length_errors = old_stats->rx_length_errors +
11947 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11948 get_stat64(&hw_stats->rx_undersize_packets);
11949
11950 stats->rx_frame_errors = old_stats->rx_frame_errors +
11951 get_stat64(&hw_stats->rx_align_errors);
11952 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11953 get_stat64(&hw_stats->tx_discards);
11954 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11955 get_stat64(&hw_stats->tx_carrier_sense_errors);
11956
11957 stats->rx_crc_errors = old_stats->rx_crc_errors +
11958 tg3_calc_crc_errors(tp);
11959
11960 stats->rx_missed_errors = old_stats->rx_missed_errors +
11961 get_stat64(&hw_stats->rx_discards);
11962
11963 stats->rx_dropped = tp->rx_dropped;
11964 stats->tx_dropped = tp->tx_dropped;
11965 }
11966
tg3_get_regs_len(struct net_device * dev)11967 static int tg3_get_regs_len(struct net_device *dev)
11968 {
11969 return TG3_REG_BLK_SIZE;
11970 }
11971
tg3_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * _p)11972 static void tg3_get_regs(struct net_device *dev,
11973 struct ethtool_regs *regs, void *_p)
11974 {
11975 struct tg3 *tp = netdev_priv(dev);
11976
11977 regs->version = 0;
11978
11979 memset(_p, 0, TG3_REG_BLK_SIZE);
11980
11981 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11982 return;
11983
11984 tg3_full_lock(tp, 0);
11985
11986 tg3_dump_legacy_regs(tp, (u32 *)_p);
11987
11988 tg3_full_unlock(tp);
11989 }
11990
tg3_get_eeprom_len(struct net_device * dev)11991 static int tg3_get_eeprom_len(struct net_device *dev)
11992 {
11993 struct tg3 *tp = netdev_priv(dev);
11994
11995 return tp->nvram_size;
11996 }
11997
tg3_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)11998 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11999 {
12000 struct tg3 *tp = netdev_priv(dev);
12001 int ret, cpmu_restore = 0;
12002 u8 *pd;
12003 u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
12004 __be32 val;
12005
12006 if (tg3_flag(tp, NO_NVRAM))
12007 return -EINVAL;
12008
12009 offset = eeprom->offset;
12010 len = eeprom->len;
12011 eeprom->len = 0;
12012
12013 eeprom->magic = TG3_EEPROM_MAGIC;
12014
12015 /* Override clock, link aware and link idle modes */
12016 if (tg3_flag(tp, CPMU_PRESENT)) {
12017 cpmu_val = tr32(TG3_CPMU_CTRL);
12018 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12019 CPMU_CTRL_LINK_IDLE_MODE)) {
12020 tw32(TG3_CPMU_CTRL, cpmu_val &
12021 ~(CPMU_CTRL_LINK_AWARE_MODE |
12022 CPMU_CTRL_LINK_IDLE_MODE));
12023 cpmu_restore = 1;
12024 }
12025 }
12026 tg3_override_clk(tp);
12027
12028 if (offset & 3) {
12029 /* adjustments to start on required 4 byte boundary */
12030 b_offset = offset & 3;
12031 b_count = 4 - b_offset;
12032 if (b_count > len) {
12033 /* i.e. offset=1 len=2 */
12034 b_count = len;
12035 }
12036 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12037 if (ret)
12038 goto eeprom_done;
12039 memcpy(data, ((char *)&val) + b_offset, b_count);
12040 len -= b_count;
12041 offset += b_count;
12042 eeprom->len += b_count;
12043 }
12044
12045 /* read bytes up to the last 4 byte boundary */
12046 pd = &data[eeprom->len];
12047 for (i = 0; i < (len - (len & 3)); i += 4) {
12048 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12049 if (ret) {
12050 if (i)
12051 i -= 4;
12052 eeprom->len += i;
12053 goto eeprom_done;
12054 }
12055 memcpy(pd + i, &val, 4);
12056 if (need_resched()) {
12057 if (signal_pending(current)) {
12058 eeprom->len += i;
12059 ret = -EINTR;
12060 goto eeprom_done;
12061 }
12062 cond_resched();
12063 }
12064 }
12065 eeprom->len += i;
12066
12067 if (len & 3) {
12068 /* read last bytes not ending on 4 byte boundary */
12069 pd = &data[eeprom->len];
12070 b_count = len & 3;
12071 b_offset = offset + len - b_count;
12072 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12073 if (ret)
12074 goto eeprom_done;
12075 memcpy(pd, &val, b_count);
12076 eeprom->len += b_count;
12077 }
12078 ret = 0;
12079
12080 eeprom_done:
12081 /* Restore clock, link aware and link idle modes */
12082 tg3_restore_clk(tp);
12083 if (cpmu_restore)
12084 tw32(TG3_CPMU_CTRL, cpmu_val);
12085
12086 return ret;
12087 }
12088
tg3_set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)12089 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12090 {
12091 struct tg3 *tp = netdev_priv(dev);
12092 int ret;
12093 u32 offset, len, b_offset, odd_len;
12094 u8 *buf;
12095 __be32 start = 0, end;
12096
12097 if (tg3_flag(tp, NO_NVRAM) ||
12098 eeprom->magic != TG3_EEPROM_MAGIC)
12099 return -EINVAL;
12100
12101 offset = eeprom->offset;
12102 len = eeprom->len;
12103
12104 if ((b_offset = (offset & 3))) {
12105 /* adjustments to start on required 4 byte boundary */
12106 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12107 if (ret)
12108 return ret;
12109 len += b_offset;
12110 offset &= ~3;
12111 if (len < 4)
12112 len = 4;
12113 }
12114
12115 odd_len = 0;
12116 if (len & 3) {
12117 /* adjustments to end on required 4 byte boundary */
12118 odd_len = 1;
12119 len = (len + 3) & ~3;
12120 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12121 if (ret)
12122 return ret;
12123 }
12124
12125 buf = data;
12126 if (b_offset || odd_len) {
12127 buf = kmalloc(len, GFP_KERNEL);
12128 if (!buf)
12129 return -ENOMEM;
12130 if (b_offset)
12131 memcpy(buf, &start, 4);
12132 if (odd_len)
12133 memcpy(buf+len-4, &end, 4);
12134 memcpy(buf + b_offset, data, eeprom->len);
12135 }
12136
12137 ret = tg3_nvram_write_block(tp, offset, len, buf);
12138
12139 if (buf != data)
12140 kfree(buf);
12141
12142 return ret;
12143 }
12144
tg3_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)12145 static int tg3_get_link_ksettings(struct net_device *dev,
12146 struct ethtool_link_ksettings *cmd)
12147 {
12148 struct tg3 *tp = netdev_priv(dev);
12149 u32 supported, advertising;
12150
12151 if (tg3_flag(tp, USE_PHYLIB)) {
12152 struct phy_device *phydev;
12153 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12154 return -EAGAIN;
12155 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12156 phy_ethtool_ksettings_get(phydev, cmd);
12157
12158 return 0;
12159 }
12160
12161 supported = (SUPPORTED_Autoneg);
12162
12163 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12164 supported |= (SUPPORTED_1000baseT_Half |
12165 SUPPORTED_1000baseT_Full);
12166
12167 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12168 supported |= (SUPPORTED_100baseT_Half |
12169 SUPPORTED_100baseT_Full |
12170 SUPPORTED_10baseT_Half |
12171 SUPPORTED_10baseT_Full |
12172 SUPPORTED_TP);
12173 cmd->base.port = PORT_TP;
12174 } else {
12175 supported |= SUPPORTED_FIBRE;
12176 cmd->base.port = PORT_FIBRE;
12177 }
12178 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12179 supported);
12180
12181 advertising = tp->link_config.advertising;
12182 if (tg3_flag(tp, PAUSE_AUTONEG)) {
12183 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12184 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12185 advertising |= ADVERTISED_Pause;
12186 } else {
12187 advertising |= ADVERTISED_Pause |
12188 ADVERTISED_Asym_Pause;
12189 }
12190 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12191 advertising |= ADVERTISED_Asym_Pause;
12192 }
12193 }
12194 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12195 advertising);
12196
12197 if (netif_running(dev) && tp->link_up) {
12198 cmd->base.speed = tp->link_config.active_speed;
12199 cmd->base.duplex = tp->link_config.active_duplex;
12200 ethtool_convert_legacy_u32_to_link_mode(
12201 cmd->link_modes.lp_advertising,
12202 tp->link_config.rmt_adv);
12203
12204 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12205 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12206 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12207 else
12208 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12209 }
12210 } else {
12211 cmd->base.speed = SPEED_UNKNOWN;
12212 cmd->base.duplex = DUPLEX_UNKNOWN;
12213 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12214 }
12215 cmd->base.phy_address = tp->phy_addr;
12216 cmd->base.autoneg = tp->link_config.autoneg;
12217 return 0;
12218 }
12219
tg3_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)12220 static int tg3_set_link_ksettings(struct net_device *dev,
12221 const struct ethtool_link_ksettings *cmd)
12222 {
12223 struct tg3 *tp = netdev_priv(dev);
12224 u32 speed = cmd->base.speed;
12225 u32 advertising;
12226
12227 if (tg3_flag(tp, USE_PHYLIB)) {
12228 struct phy_device *phydev;
12229 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12230 return -EAGAIN;
12231 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12232 return phy_ethtool_ksettings_set(phydev, cmd);
12233 }
12234
12235 if (cmd->base.autoneg != AUTONEG_ENABLE &&
12236 cmd->base.autoneg != AUTONEG_DISABLE)
12237 return -EINVAL;
12238
12239 if (cmd->base.autoneg == AUTONEG_DISABLE &&
12240 cmd->base.duplex != DUPLEX_FULL &&
12241 cmd->base.duplex != DUPLEX_HALF)
12242 return -EINVAL;
12243
12244 ethtool_convert_link_mode_to_legacy_u32(&advertising,
12245 cmd->link_modes.advertising);
12246
12247 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12248 u32 mask = ADVERTISED_Autoneg |
12249 ADVERTISED_Pause |
12250 ADVERTISED_Asym_Pause;
12251
12252 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12253 mask |= ADVERTISED_1000baseT_Half |
12254 ADVERTISED_1000baseT_Full;
12255
12256 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12257 mask |= ADVERTISED_100baseT_Half |
12258 ADVERTISED_100baseT_Full |
12259 ADVERTISED_10baseT_Half |
12260 ADVERTISED_10baseT_Full |
12261 ADVERTISED_TP;
12262 else
12263 mask |= ADVERTISED_FIBRE;
12264
12265 if (advertising & ~mask)
12266 return -EINVAL;
12267
12268 mask &= (ADVERTISED_1000baseT_Half |
12269 ADVERTISED_1000baseT_Full |
12270 ADVERTISED_100baseT_Half |
12271 ADVERTISED_100baseT_Full |
12272 ADVERTISED_10baseT_Half |
12273 ADVERTISED_10baseT_Full);
12274
12275 advertising &= mask;
12276 } else {
12277 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12278 if (speed != SPEED_1000)
12279 return -EINVAL;
12280
12281 if (cmd->base.duplex != DUPLEX_FULL)
12282 return -EINVAL;
12283 } else {
12284 if (speed != SPEED_100 &&
12285 speed != SPEED_10)
12286 return -EINVAL;
12287 }
12288 }
12289
12290 tg3_full_lock(tp, 0);
12291
12292 tp->link_config.autoneg = cmd->base.autoneg;
12293 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12294 tp->link_config.advertising = (advertising |
12295 ADVERTISED_Autoneg);
12296 tp->link_config.speed = SPEED_UNKNOWN;
12297 tp->link_config.duplex = DUPLEX_UNKNOWN;
12298 } else {
12299 tp->link_config.advertising = 0;
12300 tp->link_config.speed = speed;
12301 tp->link_config.duplex = cmd->base.duplex;
12302 }
12303
12304 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12305
12306 tg3_warn_mgmt_link_flap(tp);
12307
12308 if (netif_running(dev))
12309 tg3_setup_phy(tp, true);
12310
12311 tg3_full_unlock(tp);
12312
12313 return 0;
12314 }
12315
tg3_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)12316 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12317 {
12318 struct tg3 *tp = netdev_priv(dev);
12319
12320 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12321 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12322 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12323 }
12324
tg3_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)12325 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12326 {
12327 struct tg3 *tp = netdev_priv(dev);
12328
12329 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12330 wol->supported = WAKE_MAGIC;
12331 else
12332 wol->supported = 0;
12333 wol->wolopts = 0;
12334 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12335 wol->wolopts = WAKE_MAGIC;
12336 memset(&wol->sopass, 0, sizeof(wol->sopass));
12337 }
12338
tg3_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)12339 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12340 {
12341 struct tg3 *tp = netdev_priv(dev);
12342 struct device *dp = &tp->pdev->dev;
12343
12344 if (wol->wolopts & ~WAKE_MAGIC)
12345 return -EINVAL;
12346 if ((wol->wolopts & WAKE_MAGIC) &&
12347 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12348 return -EINVAL;
12349
12350 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12351
12352 if (device_may_wakeup(dp))
12353 tg3_flag_set(tp, WOL_ENABLE);
12354 else
12355 tg3_flag_clear(tp, WOL_ENABLE);
12356
12357 return 0;
12358 }
12359
tg3_get_msglevel(struct net_device * dev)12360 static u32 tg3_get_msglevel(struct net_device *dev)
12361 {
12362 struct tg3 *tp = netdev_priv(dev);
12363 return tp->msg_enable;
12364 }
12365
tg3_set_msglevel(struct net_device * dev,u32 value)12366 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12367 {
12368 struct tg3 *tp = netdev_priv(dev);
12369 tp->msg_enable = value;
12370 }
12371
tg3_nway_reset(struct net_device * dev)12372 static int tg3_nway_reset(struct net_device *dev)
12373 {
12374 struct tg3 *tp = netdev_priv(dev);
12375 int r;
12376
12377 if (!netif_running(dev))
12378 return -EAGAIN;
12379
12380 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12381 return -EINVAL;
12382
12383 tg3_warn_mgmt_link_flap(tp);
12384
12385 if (tg3_flag(tp, USE_PHYLIB)) {
12386 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12387 return -EAGAIN;
12388 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12389 } else {
12390 u32 bmcr;
12391
12392 spin_lock_bh(&tp->lock);
12393 r = -EINVAL;
12394 tg3_readphy(tp, MII_BMCR, &bmcr);
12395 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12396 ((bmcr & BMCR_ANENABLE) ||
12397 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12398 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12399 BMCR_ANENABLE);
12400 r = 0;
12401 }
12402 spin_unlock_bh(&tp->lock);
12403 }
12404
12405 return r;
12406 }
12407
tg3_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ering)12408 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12409 {
12410 struct tg3 *tp = netdev_priv(dev);
12411
12412 ering->rx_max_pending = tp->rx_std_ring_mask;
12413 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12414 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12415 else
12416 ering->rx_jumbo_max_pending = 0;
12417
12418 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12419
12420 ering->rx_pending = tp->rx_pending;
12421 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12422 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12423 else
12424 ering->rx_jumbo_pending = 0;
12425
12426 ering->tx_pending = tp->napi[0].tx_pending;
12427 }
12428
tg3_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ering)12429 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12430 {
12431 struct tg3 *tp = netdev_priv(dev);
12432 int i, irq_sync = 0, err = 0;
12433 bool reset_phy = false;
12434
12435 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12436 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12437 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12438 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12439 (tg3_flag(tp, TSO_BUG) &&
12440 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12441 return -EINVAL;
12442
12443 if (netif_running(dev)) {
12444 tg3_phy_stop(tp);
12445 tg3_netif_stop(tp);
12446 irq_sync = 1;
12447 }
12448
12449 tg3_full_lock(tp, irq_sync);
12450
12451 tp->rx_pending = ering->rx_pending;
12452
12453 if (tg3_flag(tp, MAX_RXPEND_64) &&
12454 tp->rx_pending > 63)
12455 tp->rx_pending = 63;
12456
12457 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12458 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12459
12460 for (i = 0; i < tp->irq_max; i++)
12461 tp->napi[i].tx_pending = ering->tx_pending;
12462
12463 if (netif_running(dev)) {
12464 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12465 /* Reset PHY to avoid PHY lock up */
12466 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12467 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12468 tg3_asic_rev(tp) == ASIC_REV_5720)
12469 reset_phy = true;
12470
12471 err = tg3_restart_hw(tp, reset_phy);
12472 if (!err)
12473 tg3_netif_start(tp);
12474 }
12475
12476 tg3_full_unlock(tp);
12477
12478 if (irq_sync && !err)
12479 tg3_phy_start(tp);
12480
12481 return err;
12482 }
12483
tg3_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)12484 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12485 {
12486 struct tg3 *tp = netdev_priv(dev);
12487
12488 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12489
12490 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12491 epause->rx_pause = 1;
12492 else
12493 epause->rx_pause = 0;
12494
12495 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12496 epause->tx_pause = 1;
12497 else
12498 epause->tx_pause = 0;
12499 }
12500
tg3_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)12501 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12502 {
12503 struct tg3 *tp = netdev_priv(dev);
12504 int err = 0;
12505 bool reset_phy = false;
12506
12507 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12508 tg3_warn_mgmt_link_flap(tp);
12509
12510 if (tg3_flag(tp, USE_PHYLIB)) {
12511 struct phy_device *phydev;
12512
12513 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12514
12515 if (!phy_validate_pause(phydev, epause))
12516 return -EINVAL;
12517
12518 tp->link_config.flowctrl = 0;
12519 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12520 if (epause->rx_pause) {
12521 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12522
12523 if (epause->tx_pause) {
12524 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12525 }
12526 } else if (epause->tx_pause) {
12527 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12528 }
12529
12530 if (epause->autoneg)
12531 tg3_flag_set(tp, PAUSE_AUTONEG);
12532 else
12533 tg3_flag_clear(tp, PAUSE_AUTONEG);
12534
12535 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12536 if (phydev->autoneg) {
12537 /* phy_set_asym_pause() will
12538 * renegotiate the link to inform our
12539 * link partner of our flow control
12540 * settings, even if the flow control
12541 * is forced. Let tg3_adjust_link()
12542 * do the final flow control setup.
12543 */
12544 return 0;
12545 }
12546
12547 if (!epause->autoneg)
12548 tg3_setup_flow_control(tp, 0, 0);
12549 }
12550 } else {
12551 int irq_sync = 0;
12552
12553 if (netif_running(dev)) {
12554 tg3_netif_stop(tp);
12555 irq_sync = 1;
12556 }
12557
12558 tg3_full_lock(tp, irq_sync);
12559
12560 if (epause->autoneg)
12561 tg3_flag_set(tp, PAUSE_AUTONEG);
12562 else
12563 tg3_flag_clear(tp, PAUSE_AUTONEG);
12564 if (epause->rx_pause)
12565 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12566 else
12567 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12568 if (epause->tx_pause)
12569 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12570 else
12571 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12572
12573 if (netif_running(dev)) {
12574 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12575 /* Reset PHY to avoid PHY lock up */
12576 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12577 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12578 tg3_asic_rev(tp) == ASIC_REV_5720)
12579 reset_phy = true;
12580
12581 err = tg3_restart_hw(tp, reset_phy);
12582 if (!err)
12583 tg3_netif_start(tp);
12584 }
12585
12586 tg3_full_unlock(tp);
12587 }
12588
12589 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12590
12591 return err;
12592 }
12593
tg3_get_sset_count(struct net_device * dev,int sset)12594 static int tg3_get_sset_count(struct net_device *dev, int sset)
12595 {
12596 switch (sset) {
12597 case ETH_SS_TEST:
12598 return TG3_NUM_TEST;
12599 case ETH_SS_STATS:
12600 return TG3_NUM_STATS;
12601 default:
12602 return -EOPNOTSUPP;
12603 }
12604 }
12605
tg3_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info,u32 * rules __always_unused)12606 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12607 u32 *rules __always_unused)
12608 {
12609 struct tg3 *tp = netdev_priv(dev);
12610
12611 if (!tg3_flag(tp, SUPPORT_MSIX))
12612 return -EOPNOTSUPP;
12613
12614 switch (info->cmd) {
12615 case ETHTOOL_GRXRINGS:
12616 if (netif_running(tp->dev))
12617 info->data = tp->rxq_cnt;
12618 else {
12619 info->data = num_online_cpus();
12620 if (info->data > TG3_RSS_MAX_NUM_QS)
12621 info->data = TG3_RSS_MAX_NUM_QS;
12622 }
12623
12624 return 0;
12625
12626 default:
12627 return -EOPNOTSUPP;
12628 }
12629 }
12630
tg3_get_rxfh_indir_size(struct net_device * dev)12631 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12632 {
12633 u32 size = 0;
12634 struct tg3 *tp = netdev_priv(dev);
12635
12636 if (tg3_flag(tp, SUPPORT_MSIX))
12637 size = TG3_RSS_INDIR_TBL_SIZE;
12638
12639 return size;
12640 }
12641
tg3_get_rxfh(struct net_device * dev,u32 * indir,u8 * key,u8 * hfunc)12642 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12643 {
12644 struct tg3 *tp = netdev_priv(dev);
12645 int i;
12646
12647 if (hfunc)
12648 *hfunc = ETH_RSS_HASH_TOP;
12649 if (!indir)
12650 return 0;
12651
12652 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12653 indir[i] = tp->rss_ind_tbl[i];
12654
12655 return 0;
12656 }
12657
tg3_set_rxfh(struct net_device * dev,const u32 * indir,const u8 * key,const u8 hfunc)12658 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12659 const u8 hfunc)
12660 {
12661 struct tg3 *tp = netdev_priv(dev);
12662 size_t i;
12663
12664 /* We require at least one supported parameter to be changed and no
12665 * change in any of the unsupported parameters
12666 */
12667 if (key ||
12668 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12669 return -EOPNOTSUPP;
12670
12671 if (!indir)
12672 return 0;
12673
12674 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12675 tp->rss_ind_tbl[i] = indir[i];
12676
12677 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12678 return 0;
12679
12680 /* It is legal to write the indirection
12681 * table while the device is running.
12682 */
12683 tg3_full_lock(tp, 0);
12684 tg3_rss_write_indir_tbl(tp);
12685 tg3_full_unlock(tp);
12686
12687 return 0;
12688 }
12689
tg3_get_channels(struct net_device * dev,struct ethtool_channels * channel)12690 static void tg3_get_channels(struct net_device *dev,
12691 struct ethtool_channels *channel)
12692 {
12693 struct tg3 *tp = netdev_priv(dev);
12694 u32 deflt_qs = netif_get_num_default_rss_queues();
12695
12696 channel->max_rx = tp->rxq_max;
12697 channel->max_tx = tp->txq_max;
12698
12699 if (netif_running(dev)) {
12700 channel->rx_count = tp->rxq_cnt;
12701 channel->tx_count = tp->txq_cnt;
12702 } else {
12703 if (tp->rxq_req)
12704 channel->rx_count = tp->rxq_req;
12705 else
12706 channel->rx_count = min(deflt_qs, tp->rxq_max);
12707
12708 if (tp->txq_req)
12709 channel->tx_count = tp->txq_req;
12710 else
12711 channel->tx_count = min(deflt_qs, tp->txq_max);
12712 }
12713 }
12714
tg3_set_channels(struct net_device * dev,struct ethtool_channels * channel)12715 static int tg3_set_channels(struct net_device *dev,
12716 struct ethtool_channels *channel)
12717 {
12718 struct tg3 *tp = netdev_priv(dev);
12719
12720 if (!tg3_flag(tp, SUPPORT_MSIX))
12721 return -EOPNOTSUPP;
12722
12723 if (channel->rx_count > tp->rxq_max ||
12724 channel->tx_count > tp->txq_max)
12725 return -EINVAL;
12726
12727 tp->rxq_req = channel->rx_count;
12728 tp->txq_req = channel->tx_count;
12729
12730 if (!netif_running(dev))
12731 return 0;
12732
12733 tg3_stop(tp);
12734
12735 tg3_carrier_off(tp);
12736
12737 tg3_start(tp, true, false, false);
12738
12739 return 0;
12740 }
12741
tg3_get_strings(struct net_device * dev,u32 stringset,u8 * buf)12742 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12743 {
12744 switch (stringset) {
12745 case ETH_SS_STATS:
12746 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12747 break;
12748 case ETH_SS_TEST:
12749 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12750 break;
12751 default:
12752 WARN_ON(1); /* we need a WARN() */
12753 break;
12754 }
12755 }
12756
tg3_set_phys_id(struct net_device * dev,enum ethtool_phys_id_state state)12757 static int tg3_set_phys_id(struct net_device *dev,
12758 enum ethtool_phys_id_state state)
12759 {
12760 struct tg3 *tp = netdev_priv(dev);
12761
12762 switch (state) {
12763 case ETHTOOL_ID_ACTIVE:
12764 return 1; /* cycle on/off once per second */
12765
12766 case ETHTOOL_ID_ON:
12767 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12768 LED_CTRL_1000MBPS_ON |
12769 LED_CTRL_100MBPS_ON |
12770 LED_CTRL_10MBPS_ON |
12771 LED_CTRL_TRAFFIC_OVERRIDE |
12772 LED_CTRL_TRAFFIC_BLINK |
12773 LED_CTRL_TRAFFIC_LED);
12774 break;
12775
12776 case ETHTOOL_ID_OFF:
12777 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12778 LED_CTRL_TRAFFIC_OVERRIDE);
12779 break;
12780
12781 case ETHTOOL_ID_INACTIVE:
12782 tw32(MAC_LED_CTRL, tp->led_ctrl);
12783 break;
12784 }
12785
12786 return 0;
12787 }
12788
tg3_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * estats,u64 * tmp_stats)12789 static void tg3_get_ethtool_stats(struct net_device *dev,
12790 struct ethtool_stats *estats, u64 *tmp_stats)
12791 {
12792 struct tg3 *tp = netdev_priv(dev);
12793
12794 if (tp->hw_stats)
12795 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12796 else
12797 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12798 }
12799
tg3_vpd_readblock(struct tg3 * tp,u32 * vpdlen)12800 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12801 {
12802 int i;
12803 __be32 *buf;
12804 u32 offset = 0, len = 0;
12805 u32 magic, val;
12806
12807 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12808 return NULL;
12809
12810 if (magic == TG3_EEPROM_MAGIC) {
12811 for (offset = TG3_NVM_DIR_START;
12812 offset < TG3_NVM_DIR_END;
12813 offset += TG3_NVM_DIRENT_SIZE) {
12814 if (tg3_nvram_read(tp, offset, &val))
12815 return NULL;
12816
12817 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12818 TG3_NVM_DIRTYPE_EXTVPD)
12819 break;
12820 }
12821
12822 if (offset != TG3_NVM_DIR_END) {
12823 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12824 if (tg3_nvram_read(tp, offset + 4, &offset))
12825 return NULL;
12826
12827 offset = tg3_nvram_logical_addr(tp, offset);
12828 }
12829 }
12830
12831 if (!offset || !len) {
12832 offset = TG3_NVM_VPD_OFF;
12833 len = TG3_NVM_VPD_LEN;
12834 }
12835
12836 buf = kmalloc(len, GFP_KERNEL);
12837 if (buf == NULL)
12838 return NULL;
12839
12840 if (magic == TG3_EEPROM_MAGIC) {
12841 for (i = 0; i < len; i += 4) {
12842 /* The data is in little-endian format in NVRAM.
12843 * Use the big-endian read routines to preserve
12844 * the byte order as it exists in NVRAM.
12845 */
12846 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12847 goto error;
12848 }
12849 } else {
12850 u8 *ptr;
12851 ssize_t cnt;
12852 unsigned int pos = 0;
12853
12854 ptr = (u8 *)&buf[0];
12855 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12856 cnt = pci_read_vpd(tp->pdev, pos,
12857 len - pos, ptr);
12858 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12859 cnt = 0;
12860 else if (cnt < 0)
12861 goto error;
12862 }
12863 if (pos != len)
12864 goto error;
12865 }
12866
12867 *vpdlen = len;
12868
12869 return buf;
12870
12871 error:
12872 kfree(buf);
12873 return NULL;
12874 }
12875
12876 #define NVRAM_TEST_SIZE 0x100
12877 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12878 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12879 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12880 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12881 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12882 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12883 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12884 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12885
tg3_test_nvram(struct tg3 * tp)12886 static int tg3_test_nvram(struct tg3 *tp)
12887 {
12888 u32 csum, magic, len;
12889 __be32 *buf;
12890 int i, j, k, err = 0, size;
12891
12892 if (tg3_flag(tp, NO_NVRAM))
12893 return 0;
12894
12895 if (tg3_nvram_read(tp, 0, &magic) != 0)
12896 return -EIO;
12897
12898 if (magic == TG3_EEPROM_MAGIC)
12899 size = NVRAM_TEST_SIZE;
12900 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12901 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12902 TG3_EEPROM_SB_FORMAT_1) {
12903 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12904 case TG3_EEPROM_SB_REVISION_0:
12905 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12906 break;
12907 case TG3_EEPROM_SB_REVISION_2:
12908 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12909 break;
12910 case TG3_EEPROM_SB_REVISION_3:
12911 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12912 break;
12913 case TG3_EEPROM_SB_REVISION_4:
12914 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12915 break;
12916 case TG3_EEPROM_SB_REVISION_5:
12917 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12918 break;
12919 case TG3_EEPROM_SB_REVISION_6:
12920 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12921 break;
12922 default:
12923 return -EIO;
12924 }
12925 } else
12926 return 0;
12927 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12928 size = NVRAM_SELFBOOT_HW_SIZE;
12929 else
12930 return -EIO;
12931
12932 buf = kmalloc(size, GFP_KERNEL);
12933 if (buf == NULL)
12934 return -ENOMEM;
12935
12936 err = -EIO;
12937 for (i = 0, j = 0; i < size; i += 4, j++) {
12938 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12939 if (err)
12940 break;
12941 }
12942 if (i < size)
12943 goto out;
12944
12945 /* Selfboot format */
12946 magic = be32_to_cpu(buf[0]);
12947 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12948 TG3_EEPROM_MAGIC_FW) {
12949 u8 *buf8 = (u8 *) buf, csum8 = 0;
12950
12951 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12952 TG3_EEPROM_SB_REVISION_2) {
12953 /* For rev 2, the csum doesn't include the MBA. */
12954 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12955 csum8 += buf8[i];
12956 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12957 csum8 += buf8[i];
12958 } else {
12959 for (i = 0; i < size; i++)
12960 csum8 += buf8[i];
12961 }
12962
12963 if (csum8 == 0) {
12964 err = 0;
12965 goto out;
12966 }
12967
12968 err = -EIO;
12969 goto out;
12970 }
12971
12972 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12973 TG3_EEPROM_MAGIC_HW) {
12974 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12975 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12976 u8 *buf8 = (u8 *) buf;
12977
12978 /* Separate the parity bits and the data bytes. */
12979 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12980 if ((i == 0) || (i == 8)) {
12981 int l;
12982 u8 msk;
12983
12984 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12985 parity[k++] = buf8[i] & msk;
12986 i++;
12987 } else if (i == 16) {
12988 int l;
12989 u8 msk;
12990
12991 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12992 parity[k++] = buf8[i] & msk;
12993 i++;
12994
12995 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12996 parity[k++] = buf8[i] & msk;
12997 i++;
12998 }
12999 data[j++] = buf8[i];
13000 }
13001
13002 err = -EIO;
13003 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
13004 u8 hw8 = hweight8(data[i]);
13005
13006 if ((hw8 & 0x1) && parity[i])
13007 goto out;
13008 else if (!(hw8 & 0x1) && !parity[i])
13009 goto out;
13010 }
13011 err = 0;
13012 goto out;
13013 }
13014
13015 err = -EIO;
13016
13017 /* Bootstrap checksum at offset 0x10 */
13018 csum = calc_crc((unsigned char *) buf, 0x10);
13019 if (csum != le32_to_cpu(buf[0x10/4]))
13020 goto out;
13021
13022 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13023 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13024 if (csum != le32_to_cpu(buf[0xfc/4]))
13025 goto out;
13026
13027 kfree(buf);
13028
13029 buf = tg3_vpd_readblock(tp, &len);
13030 if (!buf)
13031 return -ENOMEM;
13032
13033 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
13034 if (i > 0) {
13035 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
13036 if (j < 0)
13037 goto out;
13038
13039 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
13040 goto out;
13041
13042 i += PCI_VPD_LRDT_TAG_SIZE;
13043 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
13044 PCI_VPD_RO_KEYWORD_CHKSUM);
13045 if (j > 0) {
13046 u8 csum8 = 0;
13047
13048 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13049
13050 for (i = 0; i <= j; i++)
13051 csum8 += ((u8 *)buf)[i];
13052
13053 if (csum8)
13054 goto out;
13055 }
13056 }
13057
13058 err = 0;
13059
13060 out:
13061 kfree(buf);
13062 return err;
13063 }
13064
13065 #define TG3_SERDES_TIMEOUT_SEC 2
13066 #define TG3_COPPER_TIMEOUT_SEC 6
13067
tg3_test_link(struct tg3 * tp)13068 static int tg3_test_link(struct tg3 *tp)
13069 {
13070 int i, max;
13071
13072 if (!netif_running(tp->dev))
13073 return -ENODEV;
13074
13075 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13076 max = TG3_SERDES_TIMEOUT_SEC;
13077 else
13078 max = TG3_COPPER_TIMEOUT_SEC;
13079
13080 for (i = 0; i < max; i++) {
13081 if (tp->link_up)
13082 return 0;
13083
13084 if (msleep_interruptible(1000))
13085 break;
13086 }
13087
13088 return -EIO;
13089 }
13090
13091 /* Only test the commonly used registers */
tg3_test_registers(struct tg3 * tp)13092 static int tg3_test_registers(struct tg3 *tp)
13093 {
13094 int i, is_5705, is_5750;
13095 u32 offset, read_mask, write_mask, val, save_val, read_val;
13096 static struct {
13097 u16 offset;
13098 u16 flags;
13099 #define TG3_FL_5705 0x1
13100 #define TG3_FL_NOT_5705 0x2
13101 #define TG3_FL_NOT_5788 0x4
13102 #define TG3_FL_NOT_5750 0x8
13103 u32 read_mask;
13104 u32 write_mask;
13105 } reg_tbl[] = {
13106 /* MAC Control Registers */
13107 { MAC_MODE, TG3_FL_NOT_5705,
13108 0x00000000, 0x00ef6f8c },
13109 { MAC_MODE, TG3_FL_5705,
13110 0x00000000, 0x01ef6b8c },
13111 { MAC_STATUS, TG3_FL_NOT_5705,
13112 0x03800107, 0x00000000 },
13113 { MAC_STATUS, TG3_FL_5705,
13114 0x03800100, 0x00000000 },
13115 { MAC_ADDR_0_HIGH, 0x0000,
13116 0x00000000, 0x0000ffff },
13117 { MAC_ADDR_0_LOW, 0x0000,
13118 0x00000000, 0xffffffff },
13119 { MAC_RX_MTU_SIZE, 0x0000,
13120 0x00000000, 0x0000ffff },
13121 { MAC_TX_MODE, 0x0000,
13122 0x00000000, 0x00000070 },
13123 { MAC_TX_LENGTHS, 0x0000,
13124 0x00000000, 0x00003fff },
13125 { MAC_RX_MODE, TG3_FL_NOT_5705,
13126 0x00000000, 0x000007fc },
13127 { MAC_RX_MODE, TG3_FL_5705,
13128 0x00000000, 0x000007dc },
13129 { MAC_HASH_REG_0, 0x0000,
13130 0x00000000, 0xffffffff },
13131 { MAC_HASH_REG_1, 0x0000,
13132 0x00000000, 0xffffffff },
13133 { MAC_HASH_REG_2, 0x0000,
13134 0x00000000, 0xffffffff },
13135 { MAC_HASH_REG_3, 0x0000,
13136 0x00000000, 0xffffffff },
13137
13138 /* Receive Data and Receive BD Initiator Control Registers. */
13139 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13140 0x00000000, 0xffffffff },
13141 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13142 0x00000000, 0xffffffff },
13143 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13144 0x00000000, 0x00000003 },
13145 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13146 0x00000000, 0xffffffff },
13147 { RCVDBDI_STD_BD+0, 0x0000,
13148 0x00000000, 0xffffffff },
13149 { RCVDBDI_STD_BD+4, 0x0000,
13150 0x00000000, 0xffffffff },
13151 { RCVDBDI_STD_BD+8, 0x0000,
13152 0x00000000, 0xffff0002 },
13153 { RCVDBDI_STD_BD+0xc, 0x0000,
13154 0x00000000, 0xffffffff },
13155
13156 /* Receive BD Initiator Control Registers. */
13157 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13158 0x00000000, 0xffffffff },
13159 { RCVBDI_STD_THRESH, TG3_FL_5705,
13160 0x00000000, 0x000003ff },
13161 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13162 0x00000000, 0xffffffff },
13163
13164 /* Host Coalescing Control Registers. */
13165 { HOSTCC_MODE, TG3_FL_NOT_5705,
13166 0x00000000, 0x00000004 },
13167 { HOSTCC_MODE, TG3_FL_5705,
13168 0x00000000, 0x000000f6 },
13169 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13170 0x00000000, 0xffffffff },
13171 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13172 0x00000000, 0x000003ff },
13173 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13174 0x00000000, 0xffffffff },
13175 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13176 0x00000000, 0x000003ff },
13177 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13178 0x00000000, 0xffffffff },
13179 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13180 0x00000000, 0x000000ff },
13181 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13182 0x00000000, 0xffffffff },
13183 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13184 0x00000000, 0x000000ff },
13185 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13186 0x00000000, 0xffffffff },
13187 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13188 0x00000000, 0xffffffff },
13189 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13190 0x00000000, 0xffffffff },
13191 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13192 0x00000000, 0x000000ff },
13193 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13194 0x00000000, 0xffffffff },
13195 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13196 0x00000000, 0x000000ff },
13197 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13198 0x00000000, 0xffffffff },
13199 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13200 0x00000000, 0xffffffff },
13201 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13202 0x00000000, 0xffffffff },
13203 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13204 0x00000000, 0xffffffff },
13205 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13206 0x00000000, 0xffffffff },
13207 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13208 0xffffffff, 0x00000000 },
13209 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13210 0xffffffff, 0x00000000 },
13211
13212 /* Buffer Manager Control Registers. */
13213 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13214 0x00000000, 0x007fff80 },
13215 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13216 0x00000000, 0x007fffff },
13217 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13218 0x00000000, 0x0000003f },
13219 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13220 0x00000000, 0x000001ff },
13221 { BUFMGR_MB_HIGH_WATER, 0x0000,
13222 0x00000000, 0x000001ff },
13223 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13224 0xffffffff, 0x00000000 },
13225 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13226 0xffffffff, 0x00000000 },
13227
13228 /* Mailbox Registers */
13229 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13230 0x00000000, 0x000001ff },
13231 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13232 0x00000000, 0x000001ff },
13233 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13234 0x00000000, 0x000007ff },
13235 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13236 0x00000000, 0x000001ff },
13237
13238 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13239 };
13240
13241 is_5705 = is_5750 = 0;
13242 if (tg3_flag(tp, 5705_PLUS)) {
13243 is_5705 = 1;
13244 if (tg3_flag(tp, 5750_PLUS))
13245 is_5750 = 1;
13246 }
13247
13248 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13249 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13250 continue;
13251
13252 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13253 continue;
13254
13255 if (tg3_flag(tp, IS_5788) &&
13256 (reg_tbl[i].flags & TG3_FL_NOT_5788))
13257 continue;
13258
13259 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13260 continue;
13261
13262 offset = (u32) reg_tbl[i].offset;
13263 read_mask = reg_tbl[i].read_mask;
13264 write_mask = reg_tbl[i].write_mask;
13265
13266 /* Save the original register content */
13267 save_val = tr32(offset);
13268
13269 /* Determine the read-only value. */
13270 read_val = save_val & read_mask;
13271
13272 /* Write zero to the register, then make sure the read-only bits
13273 * are not changed and the read/write bits are all zeros.
13274 */
13275 tw32(offset, 0);
13276
13277 val = tr32(offset);
13278
13279 /* Test the read-only and read/write bits. */
13280 if (((val & read_mask) != read_val) || (val & write_mask))
13281 goto out;
13282
13283 /* Write ones to all the bits defined by RdMask and WrMask, then
13284 * make sure the read-only bits are not changed and the
13285 * read/write bits are all ones.
13286 */
13287 tw32(offset, read_mask | write_mask);
13288
13289 val = tr32(offset);
13290
13291 /* Test the read-only bits. */
13292 if ((val & read_mask) != read_val)
13293 goto out;
13294
13295 /* Test the read/write bits. */
13296 if ((val & write_mask) != write_mask)
13297 goto out;
13298
13299 tw32(offset, save_val);
13300 }
13301
13302 return 0;
13303
13304 out:
13305 if (netif_msg_hw(tp))
13306 netdev_err(tp->dev,
13307 "Register test failed at offset %x\n", offset);
13308 tw32(offset, save_val);
13309 return -EIO;
13310 }
13311
tg3_do_mem_test(struct tg3 * tp,u32 offset,u32 len)13312 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13313 {
13314 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13315 int i;
13316 u32 j;
13317
13318 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13319 for (j = 0; j < len; j += 4) {
13320 u32 val;
13321
13322 tg3_write_mem(tp, offset + j, test_pattern[i]);
13323 tg3_read_mem(tp, offset + j, &val);
13324 if (val != test_pattern[i])
13325 return -EIO;
13326 }
13327 }
13328 return 0;
13329 }
13330
tg3_test_memory(struct tg3 * tp)13331 static int tg3_test_memory(struct tg3 *tp)
13332 {
13333 static struct mem_entry {
13334 u32 offset;
13335 u32 len;
13336 } mem_tbl_570x[] = {
13337 { 0x00000000, 0x00b50},
13338 { 0x00002000, 0x1c000},
13339 { 0xffffffff, 0x00000}
13340 }, mem_tbl_5705[] = {
13341 { 0x00000100, 0x0000c},
13342 { 0x00000200, 0x00008},
13343 { 0x00004000, 0x00800},
13344 { 0x00006000, 0x01000},
13345 { 0x00008000, 0x02000},
13346 { 0x00010000, 0x0e000},
13347 { 0xffffffff, 0x00000}
13348 }, mem_tbl_5755[] = {
13349 { 0x00000200, 0x00008},
13350 { 0x00004000, 0x00800},
13351 { 0x00006000, 0x00800},
13352 { 0x00008000, 0x02000},
13353 { 0x00010000, 0x0c000},
13354 { 0xffffffff, 0x00000}
13355 }, mem_tbl_5906[] = {
13356 { 0x00000200, 0x00008},
13357 { 0x00004000, 0x00400},
13358 { 0x00006000, 0x00400},
13359 { 0x00008000, 0x01000},
13360 { 0x00010000, 0x01000},
13361 { 0xffffffff, 0x00000}
13362 }, mem_tbl_5717[] = {
13363 { 0x00000200, 0x00008},
13364 { 0x00010000, 0x0a000},
13365 { 0x00020000, 0x13c00},
13366 { 0xffffffff, 0x00000}
13367 }, mem_tbl_57765[] = {
13368 { 0x00000200, 0x00008},
13369 { 0x00004000, 0x00800},
13370 { 0x00006000, 0x09800},
13371 { 0x00010000, 0x0a000},
13372 { 0xffffffff, 0x00000}
13373 };
13374 struct mem_entry *mem_tbl;
13375 int err = 0;
13376 int i;
13377
13378 if (tg3_flag(tp, 5717_PLUS))
13379 mem_tbl = mem_tbl_5717;
13380 else if (tg3_flag(tp, 57765_CLASS) ||
13381 tg3_asic_rev(tp) == ASIC_REV_5762)
13382 mem_tbl = mem_tbl_57765;
13383 else if (tg3_flag(tp, 5755_PLUS))
13384 mem_tbl = mem_tbl_5755;
13385 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13386 mem_tbl = mem_tbl_5906;
13387 else if (tg3_flag(tp, 5705_PLUS))
13388 mem_tbl = mem_tbl_5705;
13389 else
13390 mem_tbl = mem_tbl_570x;
13391
13392 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13393 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13394 if (err)
13395 break;
13396 }
13397
13398 return err;
13399 }
13400
13401 #define TG3_TSO_MSS 500
13402
13403 #define TG3_TSO_IP_HDR_LEN 20
13404 #define TG3_TSO_TCP_HDR_LEN 20
13405 #define TG3_TSO_TCP_OPT_LEN 12
13406
13407 static const u8 tg3_tso_header[] = {
13408 0x08, 0x00,
13409 0x45, 0x00, 0x00, 0x00,
13410 0x00, 0x00, 0x40, 0x00,
13411 0x40, 0x06, 0x00, 0x00,
13412 0x0a, 0x00, 0x00, 0x01,
13413 0x0a, 0x00, 0x00, 0x02,
13414 0x0d, 0x00, 0xe0, 0x00,
13415 0x00, 0x00, 0x01, 0x00,
13416 0x00, 0x00, 0x02, 0x00,
13417 0x80, 0x10, 0x10, 0x00,
13418 0x14, 0x09, 0x00, 0x00,
13419 0x01, 0x01, 0x08, 0x0a,
13420 0x11, 0x11, 0x11, 0x11,
13421 0x11, 0x11, 0x11, 0x11,
13422 };
13423
tg3_run_loopback(struct tg3 * tp,u32 pktsz,bool tso_loopback)13424 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13425 {
13426 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13427 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13428 u32 budget;
13429 struct sk_buff *skb;
13430 u8 *tx_data, *rx_data;
13431 dma_addr_t map;
13432 int num_pkts, tx_len, rx_len, i, err;
13433 struct tg3_rx_buffer_desc *desc;
13434 struct tg3_napi *tnapi, *rnapi;
13435 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13436
13437 tnapi = &tp->napi[0];
13438 rnapi = &tp->napi[0];
13439 if (tp->irq_cnt > 1) {
13440 if (tg3_flag(tp, ENABLE_RSS))
13441 rnapi = &tp->napi[1];
13442 if (tg3_flag(tp, ENABLE_TSS))
13443 tnapi = &tp->napi[1];
13444 }
13445 coal_now = tnapi->coal_now | rnapi->coal_now;
13446
13447 err = -EIO;
13448
13449 tx_len = pktsz;
13450 skb = netdev_alloc_skb(tp->dev, tx_len);
13451 if (!skb)
13452 return -ENOMEM;
13453
13454 tx_data = skb_put(skb, tx_len);
13455 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13456 memset(tx_data + ETH_ALEN, 0x0, 8);
13457
13458 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13459
13460 if (tso_loopback) {
13461 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13462
13463 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13464 TG3_TSO_TCP_OPT_LEN;
13465
13466 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13467 sizeof(tg3_tso_header));
13468 mss = TG3_TSO_MSS;
13469
13470 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13471 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13472
13473 /* Set the total length field in the IP header */
13474 iph->tot_len = htons((u16)(mss + hdr_len));
13475
13476 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13477 TXD_FLAG_CPU_POST_DMA);
13478
13479 if (tg3_flag(tp, HW_TSO_1) ||
13480 tg3_flag(tp, HW_TSO_2) ||
13481 tg3_flag(tp, HW_TSO_3)) {
13482 struct tcphdr *th;
13483 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13484 th = (struct tcphdr *)&tx_data[val];
13485 th->check = 0;
13486 } else
13487 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13488
13489 if (tg3_flag(tp, HW_TSO_3)) {
13490 mss |= (hdr_len & 0xc) << 12;
13491 if (hdr_len & 0x10)
13492 base_flags |= 0x00000010;
13493 base_flags |= (hdr_len & 0x3e0) << 5;
13494 } else if (tg3_flag(tp, HW_TSO_2))
13495 mss |= hdr_len << 9;
13496 else if (tg3_flag(tp, HW_TSO_1) ||
13497 tg3_asic_rev(tp) == ASIC_REV_5705) {
13498 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13499 } else {
13500 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13501 }
13502
13503 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13504 } else {
13505 num_pkts = 1;
13506 data_off = ETH_HLEN;
13507
13508 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13509 tx_len > VLAN_ETH_FRAME_LEN)
13510 base_flags |= TXD_FLAG_JMB_PKT;
13511 }
13512
13513 for (i = data_off; i < tx_len; i++)
13514 tx_data[i] = (u8) (i & 0xff);
13515
13516 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13517 if (pci_dma_mapping_error(tp->pdev, map)) {
13518 dev_kfree_skb(skb);
13519 return -EIO;
13520 }
13521
13522 val = tnapi->tx_prod;
13523 tnapi->tx_buffers[val].skb = skb;
13524 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13525
13526 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13527 rnapi->coal_now);
13528
13529 udelay(10);
13530
13531 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13532
13533 budget = tg3_tx_avail(tnapi);
13534 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13535 base_flags | TXD_FLAG_END, mss, 0)) {
13536 tnapi->tx_buffers[val].skb = NULL;
13537 dev_kfree_skb(skb);
13538 return -EIO;
13539 }
13540
13541 tnapi->tx_prod++;
13542
13543 /* Sync BD data before updating mailbox */
13544 wmb();
13545
13546 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13547 tr32_mailbox(tnapi->prodmbox);
13548
13549 udelay(10);
13550
13551 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13552 for (i = 0; i < 35; i++) {
13553 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13554 coal_now);
13555
13556 udelay(10);
13557
13558 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13559 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13560 if ((tx_idx == tnapi->tx_prod) &&
13561 (rx_idx == (rx_start_idx + num_pkts)))
13562 break;
13563 }
13564
13565 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13566 dev_kfree_skb(skb);
13567
13568 if (tx_idx != tnapi->tx_prod)
13569 goto out;
13570
13571 if (rx_idx != rx_start_idx + num_pkts)
13572 goto out;
13573
13574 val = data_off;
13575 while (rx_idx != rx_start_idx) {
13576 desc = &rnapi->rx_rcb[rx_start_idx++];
13577 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13578 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13579
13580 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13581 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13582 goto out;
13583
13584 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13585 - ETH_FCS_LEN;
13586
13587 if (!tso_loopback) {
13588 if (rx_len != tx_len)
13589 goto out;
13590
13591 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13592 if (opaque_key != RXD_OPAQUE_RING_STD)
13593 goto out;
13594 } else {
13595 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13596 goto out;
13597 }
13598 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13599 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13600 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13601 goto out;
13602 }
13603
13604 if (opaque_key == RXD_OPAQUE_RING_STD) {
13605 rx_data = tpr->rx_std_buffers[desc_idx].data;
13606 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13607 mapping);
13608 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13609 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13610 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13611 mapping);
13612 } else
13613 goto out;
13614
13615 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13616 PCI_DMA_FROMDEVICE);
13617
13618 rx_data += TG3_RX_OFFSET(tp);
13619 for (i = data_off; i < rx_len; i++, val++) {
13620 if (*(rx_data + i) != (u8) (val & 0xff))
13621 goto out;
13622 }
13623 }
13624
13625 err = 0;
13626
13627 /* tg3_free_rings will unmap and free the rx_data */
13628 out:
13629 return err;
13630 }
13631
13632 #define TG3_STD_LOOPBACK_FAILED 1
13633 #define TG3_JMB_LOOPBACK_FAILED 2
13634 #define TG3_TSO_LOOPBACK_FAILED 4
13635 #define TG3_LOOPBACK_FAILED \
13636 (TG3_STD_LOOPBACK_FAILED | \
13637 TG3_JMB_LOOPBACK_FAILED | \
13638 TG3_TSO_LOOPBACK_FAILED)
13639
tg3_test_loopback(struct tg3 * tp,u64 * data,bool do_extlpbk)13640 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13641 {
13642 int err = -EIO;
13643 u32 eee_cap;
13644 u32 jmb_pkt_sz = 9000;
13645
13646 if (tp->dma_limit)
13647 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13648
13649 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13650 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13651
13652 if (!netif_running(tp->dev)) {
13653 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13654 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13655 if (do_extlpbk)
13656 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13657 goto done;
13658 }
13659
13660 err = tg3_reset_hw(tp, true);
13661 if (err) {
13662 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13663 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13664 if (do_extlpbk)
13665 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13666 goto done;
13667 }
13668
13669 if (tg3_flag(tp, ENABLE_RSS)) {
13670 int i;
13671
13672 /* Reroute all rx packets to the 1st queue */
13673 for (i = MAC_RSS_INDIR_TBL_0;
13674 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13675 tw32(i, 0x0);
13676 }
13677
13678 /* HW errata - mac loopback fails in some cases on 5780.
13679 * Normal traffic and PHY loopback are not affected by
13680 * errata. Also, the MAC loopback test is deprecated for
13681 * all newer ASIC revisions.
13682 */
13683 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13684 !tg3_flag(tp, CPMU_PRESENT)) {
13685 tg3_mac_loopback(tp, true);
13686
13687 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13688 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13689
13690 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13691 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13692 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13693
13694 tg3_mac_loopback(tp, false);
13695 }
13696
13697 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13698 !tg3_flag(tp, USE_PHYLIB)) {
13699 int i;
13700
13701 tg3_phy_lpbk_set(tp, 0, false);
13702
13703 /* Wait for link */
13704 for (i = 0; i < 100; i++) {
13705 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13706 break;
13707 mdelay(1);
13708 }
13709
13710 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13711 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13712 if (tg3_flag(tp, TSO_CAPABLE) &&
13713 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13714 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13715 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13716 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13717 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13718
13719 if (do_extlpbk) {
13720 tg3_phy_lpbk_set(tp, 0, true);
13721
13722 /* All link indications report up, but the hardware
13723 * isn't really ready for about 20 msec. Double it
13724 * to be sure.
13725 */
13726 mdelay(40);
13727
13728 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13729 data[TG3_EXT_LOOPB_TEST] |=
13730 TG3_STD_LOOPBACK_FAILED;
13731 if (tg3_flag(tp, TSO_CAPABLE) &&
13732 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13733 data[TG3_EXT_LOOPB_TEST] |=
13734 TG3_TSO_LOOPBACK_FAILED;
13735 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13736 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13737 data[TG3_EXT_LOOPB_TEST] |=
13738 TG3_JMB_LOOPBACK_FAILED;
13739 }
13740
13741 /* Re-enable gphy autopowerdown. */
13742 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13743 tg3_phy_toggle_apd(tp, true);
13744 }
13745
13746 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13747 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13748
13749 done:
13750 tp->phy_flags |= eee_cap;
13751
13752 return err;
13753 }
13754
tg3_self_test(struct net_device * dev,struct ethtool_test * etest,u64 * data)13755 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13756 u64 *data)
13757 {
13758 struct tg3 *tp = netdev_priv(dev);
13759 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13760
13761 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13762 if (tg3_power_up(tp)) {
13763 etest->flags |= ETH_TEST_FL_FAILED;
13764 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13765 return;
13766 }
13767 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13768 }
13769
13770 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13771
13772 if (tg3_test_nvram(tp) != 0) {
13773 etest->flags |= ETH_TEST_FL_FAILED;
13774 data[TG3_NVRAM_TEST] = 1;
13775 }
13776 if (!doextlpbk && tg3_test_link(tp)) {
13777 etest->flags |= ETH_TEST_FL_FAILED;
13778 data[TG3_LINK_TEST] = 1;
13779 }
13780 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13781 int err, err2 = 0, irq_sync = 0;
13782
13783 if (netif_running(dev)) {
13784 tg3_phy_stop(tp);
13785 tg3_netif_stop(tp);
13786 irq_sync = 1;
13787 }
13788
13789 tg3_full_lock(tp, irq_sync);
13790 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13791 err = tg3_nvram_lock(tp);
13792 tg3_halt_cpu(tp, RX_CPU_BASE);
13793 if (!tg3_flag(tp, 5705_PLUS))
13794 tg3_halt_cpu(tp, TX_CPU_BASE);
13795 if (!err)
13796 tg3_nvram_unlock(tp);
13797
13798 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13799 tg3_phy_reset(tp);
13800
13801 if (tg3_test_registers(tp) != 0) {
13802 etest->flags |= ETH_TEST_FL_FAILED;
13803 data[TG3_REGISTER_TEST] = 1;
13804 }
13805
13806 if (tg3_test_memory(tp) != 0) {
13807 etest->flags |= ETH_TEST_FL_FAILED;
13808 data[TG3_MEMORY_TEST] = 1;
13809 }
13810
13811 if (doextlpbk)
13812 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13813
13814 if (tg3_test_loopback(tp, data, doextlpbk))
13815 etest->flags |= ETH_TEST_FL_FAILED;
13816
13817 tg3_full_unlock(tp);
13818
13819 if (tg3_test_interrupt(tp) != 0) {
13820 etest->flags |= ETH_TEST_FL_FAILED;
13821 data[TG3_INTERRUPT_TEST] = 1;
13822 }
13823
13824 tg3_full_lock(tp, 0);
13825
13826 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13827 if (netif_running(dev)) {
13828 tg3_flag_set(tp, INIT_COMPLETE);
13829 err2 = tg3_restart_hw(tp, true);
13830 if (!err2)
13831 tg3_netif_start(tp);
13832 }
13833
13834 tg3_full_unlock(tp);
13835
13836 if (irq_sync && !err2)
13837 tg3_phy_start(tp);
13838 }
13839 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13840 tg3_power_down_prepare(tp);
13841
13842 }
13843
tg3_hwtstamp_set(struct net_device * dev,struct ifreq * ifr)13844 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13845 {
13846 struct tg3 *tp = netdev_priv(dev);
13847 struct hwtstamp_config stmpconf;
13848
13849 if (!tg3_flag(tp, PTP_CAPABLE))
13850 return -EOPNOTSUPP;
13851
13852 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13853 return -EFAULT;
13854
13855 if (stmpconf.flags)
13856 return -EINVAL;
13857
13858 if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13859 stmpconf.tx_type != HWTSTAMP_TX_OFF)
13860 return -ERANGE;
13861
13862 switch (stmpconf.rx_filter) {
13863 case HWTSTAMP_FILTER_NONE:
13864 tp->rxptpctl = 0;
13865 break;
13866 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13867 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13868 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13869 break;
13870 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13871 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13872 TG3_RX_PTP_CTL_SYNC_EVNT;
13873 break;
13874 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13875 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13876 TG3_RX_PTP_CTL_DELAY_REQ;
13877 break;
13878 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13879 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13880 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13881 break;
13882 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13883 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13884 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13885 break;
13886 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13887 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13888 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13889 break;
13890 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13891 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13892 TG3_RX_PTP_CTL_SYNC_EVNT;
13893 break;
13894 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13895 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13896 TG3_RX_PTP_CTL_SYNC_EVNT;
13897 break;
13898 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13899 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13900 TG3_RX_PTP_CTL_SYNC_EVNT;
13901 break;
13902 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13903 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13904 TG3_RX_PTP_CTL_DELAY_REQ;
13905 break;
13906 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13907 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13908 TG3_RX_PTP_CTL_DELAY_REQ;
13909 break;
13910 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13911 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13912 TG3_RX_PTP_CTL_DELAY_REQ;
13913 break;
13914 default:
13915 return -ERANGE;
13916 }
13917
13918 if (netif_running(dev) && tp->rxptpctl)
13919 tw32(TG3_RX_PTP_CTL,
13920 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13921
13922 if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13923 tg3_flag_set(tp, TX_TSTAMP_EN);
13924 else
13925 tg3_flag_clear(tp, TX_TSTAMP_EN);
13926
13927 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13928 -EFAULT : 0;
13929 }
13930
tg3_hwtstamp_get(struct net_device * dev,struct ifreq * ifr)13931 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13932 {
13933 struct tg3 *tp = netdev_priv(dev);
13934 struct hwtstamp_config stmpconf;
13935
13936 if (!tg3_flag(tp, PTP_CAPABLE))
13937 return -EOPNOTSUPP;
13938
13939 stmpconf.flags = 0;
13940 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13941 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13942
13943 switch (tp->rxptpctl) {
13944 case 0:
13945 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13946 break;
13947 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13948 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13949 break;
13950 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13951 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13952 break;
13953 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13954 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13955 break;
13956 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13957 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13958 break;
13959 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13960 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13961 break;
13962 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13963 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13964 break;
13965 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13966 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13967 break;
13968 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13969 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13970 break;
13971 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13972 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13973 break;
13974 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13975 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13976 break;
13977 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13978 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13979 break;
13980 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13981 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13982 break;
13983 default:
13984 WARN_ON_ONCE(1);
13985 return -ERANGE;
13986 }
13987
13988 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13989 -EFAULT : 0;
13990 }
13991
tg3_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)13992 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13993 {
13994 struct mii_ioctl_data *data = if_mii(ifr);
13995 struct tg3 *tp = netdev_priv(dev);
13996 int err;
13997
13998 if (tg3_flag(tp, USE_PHYLIB)) {
13999 struct phy_device *phydev;
14000 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
14001 return -EAGAIN;
14002 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
14003 return phy_mii_ioctl(phydev, ifr, cmd);
14004 }
14005
14006 switch (cmd) {
14007 case SIOCGMIIPHY:
14008 data->phy_id = tp->phy_addr;
14009
14010 fallthrough;
14011 case SIOCGMIIREG: {
14012 u32 mii_regval;
14013
14014 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14015 break; /* We have no PHY */
14016
14017 if (!netif_running(dev))
14018 return -EAGAIN;
14019
14020 spin_lock_bh(&tp->lock);
14021 err = __tg3_readphy(tp, data->phy_id & 0x1f,
14022 data->reg_num & 0x1f, &mii_regval);
14023 spin_unlock_bh(&tp->lock);
14024
14025 data->val_out = mii_regval;
14026
14027 return err;
14028 }
14029
14030 case SIOCSMIIREG:
14031 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14032 break; /* We have no PHY */
14033
14034 if (!netif_running(dev))
14035 return -EAGAIN;
14036
14037 spin_lock_bh(&tp->lock);
14038 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14039 data->reg_num & 0x1f, data->val_in);
14040 spin_unlock_bh(&tp->lock);
14041
14042 return err;
14043
14044 case SIOCSHWTSTAMP:
14045 return tg3_hwtstamp_set(dev, ifr);
14046
14047 case SIOCGHWTSTAMP:
14048 return tg3_hwtstamp_get(dev, ifr);
14049
14050 default:
14051 /* do nothing */
14052 break;
14053 }
14054 return -EOPNOTSUPP;
14055 }
14056
tg3_get_coalesce(struct net_device * dev,struct ethtool_coalesce * ec)14057 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14058 {
14059 struct tg3 *tp = netdev_priv(dev);
14060
14061 memcpy(ec, &tp->coal, sizeof(*ec));
14062 return 0;
14063 }
14064
tg3_set_coalesce(struct net_device * dev,struct ethtool_coalesce * ec)14065 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14066 {
14067 struct tg3 *tp = netdev_priv(dev);
14068 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14069 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14070
14071 if (!tg3_flag(tp, 5705_PLUS)) {
14072 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14073 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14074 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14075 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14076 }
14077
14078 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14079 (!ec->rx_coalesce_usecs) ||
14080 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14081 (!ec->tx_coalesce_usecs) ||
14082 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14083 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14084 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14085 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14086 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14087 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14088 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14089 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14090 return -EINVAL;
14091
14092 /* Only copy relevant parameters, ignore all others. */
14093 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14094 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14095 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14096 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14097 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14098 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14099 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14100 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14101 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14102
14103 if (netif_running(dev)) {
14104 tg3_full_lock(tp, 0);
14105 __tg3_set_coalesce(tp, &tp->coal);
14106 tg3_full_unlock(tp);
14107 }
14108 return 0;
14109 }
14110
tg3_set_eee(struct net_device * dev,struct ethtool_eee * edata)14111 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14112 {
14113 struct tg3 *tp = netdev_priv(dev);
14114
14115 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14116 netdev_warn(tp->dev, "Board does not support EEE!\n");
14117 return -EOPNOTSUPP;
14118 }
14119
14120 if (edata->advertised != tp->eee.advertised) {
14121 netdev_warn(tp->dev,
14122 "Direct manipulation of EEE advertisement is not supported\n");
14123 return -EINVAL;
14124 }
14125
14126 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14127 netdev_warn(tp->dev,
14128 "Maximal Tx Lpi timer supported is %#x(u)\n",
14129 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14130 return -EINVAL;
14131 }
14132
14133 tp->eee = *edata;
14134
14135 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14136 tg3_warn_mgmt_link_flap(tp);
14137
14138 if (netif_running(tp->dev)) {
14139 tg3_full_lock(tp, 0);
14140 tg3_setup_eee(tp);
14141 tg3_phy_reset(tp);
14142 tg3_full_unlock(tp);
14143 }
14144
14145 return 0;
14146 }
14147
tg3_get_eee(struct net_device * dev,struct ethtool_eee * edata)14148 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14149 {
14150 struct tg3 *tp = netdev_priv(dev);
14151
14152 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14153 netdev_warn(tp->dev,
14154 "Board does not support EEE!\n");
14155 return -EOPNOTSUPP;
14156 }
14157
14158 *edata = tp->eee;
14159 return 0;
14160 }
14161
14162 static const struct ethtool_ops tg3_ethtool_ops = {
14163 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
14164 ETHTOOL_COALESCE_MAX_FRAMES |
14165 ETHTOOL_COALESCE_USECS_IRQ |
14166 ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
14167 ETHTOOL_COALESCE_STATS_BLOCK_USECS,
14168 .get_drvinfo = tg3_get_drvinfo,
14169 .get_regs_len = tg3_get_regs_len,
14170 .get_regs = tg3_get_regs,
14171 .get_wol = tg3_get_wol,
14172 .set_wol = tg3_set_wol,
14173 .get_msglevel = tg3_get_msglevel,
14174 .set_msglevel = tg3_set_msglevel,
14175 .nway_reset = tg3_nway_reset,
14176 .get_link = ethtool_op_get_link,
14177 .get_eeprom_len = tg3_get_eeprom_len,
14178 .get_eeprom = tg3_get_eeprom,
14179 .set_eeprom = tg3_set_eeprom,
14180 .get_ringparam = tg3_get_ringparam,
14181 .set_ringparam = tg3_set_ringparam,
14182 .get_pauseparam = tg3_get_pauseparam,
14183 .set_pauseparam = tg3_set_pauseparam,
14184 .self_test = tg3_self_test,
14185 .get_strings = tg3_get_strings,
14186 .set_phys_id = tg3_set_phys_id,
14187 .get_ethtool_stats = tg3_get_ethtool_stats,
14188 .get_coalesce = tg3_get_coalesce,
14189 .set_coalesce = tg3_set_coalesce,
14190 .get_sset_count = tg3_get_sset_count,
14191 .get_rxnfc = tg3_get_rxnfc,
14192 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
14193 .get_rxfh = tg3_get_rxfh,
14194 .set_rxfh = tg3_set_rxfh,
14195 .get_channels = tg3_get_channels,
14196 .set_channels = tg3_set_channels,
14197 .get_ts_info = tg3_get_ts_info,
14198 .get_eee = tg3_get_eee,
14199 .set_eee = tg3_set_eee,
14200 .get_link_ksettings = tg3_get_link_ksettings,
14201 .set_link_ksettings = tg3_set_link_ksettings,
14202 };
14203
tg3_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)14204 static void tg3_get_stats64(struct net_device *dev,
14205 struct rtnl_link_stats64 *stats)
14206 {
14207 struct tg3 *tp = netdev_priv(dev);
14208
14209 spin_lock_bh(&tp->lock);
14210 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14211 *stats = tp->net_stats_prev;
14212 spin_unlock_bh(&tp->lock);
14213 return;
14214 }
14215
14216 tg3_get_nstats(tp, stats);
14217 spin_unlock_bh(&tp->lock);
14218 }
14219
tg3_set_rx_mode(struct net_device * dev)14220 static void tg3_set_rx_mode(struct net_device *dev)
14221 {
14222 struct tg3 *tp = netdev_priv(dev);
14223
14224 if (!netif_running(dev))
14225 return;
14226
14227 tg3_full_lock(tp, 0);
14228 __tg3_set_rx_mode(dev);
14229 tg3_full_unlock(tp);
14230 }
14231
tg3_set_mtu(struct net_device * dev,struct tg3 * tp,int new_mtu)14232 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14233 int new_mtu)
14234 {
14235 dev->mtu = new_mtu;
14236
14237 if (new_mtu > ETH_DATA_LEN) {
14238 if (tg3_flag(tp, 5780_CLASS)) {
14239 netdev_update_features(dev);
14240 tg3_flag_clear(tp, TSO_CAPABLE);
14241 } else {
14242 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14243 }
14244 } else {
14245 if (tg3_flag(tp, 5780_CLASS)) {
14246 tg3_flag_set(tp, TSO_CAPABLE);
14247 netdev_update_features(dev);
14248 }
14249 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14250 }
14251 }
14252
tg3_change_mtu(struct net_device * dev,int new_mtu)14253 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14254 {
14255 struct tg3 *tp = netdev_priv(dev);
14256 int err;
14257 bool reset_phy = false;
14258
14259 if (!netif_running(dev)) {
14260 /* We'll just catch it later when the
14261 * device is up'd.
14262 */
14263 tg3_set_mtu(dev, tp, new_mtu);
14264 return 0;
14265 }
14266
14267 tg3_phy_stop(tp);
14268
14269 tg3_netif_stop(tp);
14270
14271 tg3_set_mtu(dev, tp, new_mtu);
14272
14273 tg3_full_lock(tp, 1);
14274
14275 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14276
14277 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14278 * breaks all requests to 256 bytes.
14279 */
14280 if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14281 tg3_asic_rev(tp) == ASIC_REV_5717 ||
14282 tg3_asic_rev(tp) == ASIC_REV_5719 ||
14283 tg3_asic_rev(tp) == ASIC_REV_5720)
14284 reset_phy = true;
14285
14286 err = tg3_restart_hw(tp, reset_phy);
14287
14288 if (!err)
14289 tg3_netif_start(tp);
14290
14291 tg3_full_unlock(tp);
14292
14293 if (!err)
14294 tg3_phy_start(tp);
14295
14296 return err;
14297 }
14298
14299 static const struct net_device_ops tg3_netdev_ops = {
14300 .ndo_open = tg3_open,
14301 .ndo_stop = tg3_close,
14302 .ndo_start_xmit = tg3_start_xmit,
14303 .ndo_get_stats64 = tg3_get_stats64,
14304 .ndo_validate_addr = eth_validate_addr,
14305 .ndo_set_rx_mode = tg3_set_rx_mode,
14306 .ndo_set_mac_address = tg3_set_mac_addr,
14307 .ndo_do_ioctl = tg3_ioctl,
14308 .ndo_tx_timeout = tg3_tx_timeout,
14309 .ndo_change_mtu = tg3_change_mtu,
14310 .ndo_fix_features = tg3_fix_features,
14311 .ndo_set_features = tg3_set_features,
14312 #ifdef CONFIG_NET_POLL_CONTROLLER
14313 .ndo_poll_controller = tg3_poll_controller,
14314 #endif
14315 };
14316
tg3_get_eeprom_size(struct tg3 * tp)14317 static void tg3_get_eeprom_size(struct tg3 *tp)
14318 {
14319 u32 cursize, val, magic;
14320
14321 tp->nvram_size = EEPROM_CHIP_SIZE;
14322
14323 if (tg3_nvram_read(tp, 0, &magic) != 0)
14324 return;
14325
14326 if ((magic != TG3_EEPROM_MAGIC) &&
14327 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14328 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14329 return;
14330
14331 /*
14332 * Size the chip by reading offsets at increasing powers of two.
14333 * When we encounter our validation signature, we know the addressing
14334 * has wrapped around, and thus have our chip size.
14335 */
14336 cursize = 0x10;
14337
14338 while (cursize < tp->nvram_size) {
14339 if (tg3_nvram_read(tp, cursize, &val) != 0)
14340 return;
14341
14342 if (val == magic)
14343 break;
14344
14345 cursize <<= 1;
14346 }
14347
14348 tp->nvram_size = cursize;
14349 }
14350
tg3_get_nvram_size(struct tg3 * tp)14351 static void tg3_get_nvram_size(struct tg3 *tp)
14352 {
14353 u32 val;
14354
14355 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14356 return;
14357
14358 /* Selfboot format */
14359 if (val != TG3_EEPROM_MAGIC) {
14360 tg3_get_eeprom_size(tp);
14361 return;
14362 }
14363
14364 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14365 if (val != 0) {
14366 /* This is confusing. We want to operate on the
14367 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14368 * call will read from NVRAM and byteswap the data
14369 * according to the byteswapping settings for all
14370 * other register accesses. This ensures the data we
14371 * want will always reside in the lower 16-bits.
14372 * However, the data in NVRAM is in LE format, which
14373 * means the data from the NVRAM read will always be
14374 * opposite the endianness of the CPU. The 16-bit
14375 * byteswap then brings the data to CPU endianness.
14376 */
14377 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14378 return;
14379 }
14380 }
14381 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14382 }
14383
tg3_get_nvram_info(struct tg3 * tp)14384 static void tg3_get_nvram_info(struct tg3 *tp)
14385 {
14386 u32 nvcfg1;
14387
14388 nvcfg1 = tr32(NVRAM_CFG1);
14389 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14390 tg3_flag_set(tp, FLASH);
14391 } else {
14392 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14393 tw32(NVRAM_CFG1, nvcfg1);
14394 }
14395
14396 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14397 tg3_flag(tp, 5780_CLASS)) {
14398 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14399 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14400 tp->nvram_jedecnum = JEDEC_ATMEL;
14401 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14402 tg3_flag_set(tp, NVRAM_BUFFERED);
14403 break;
14404 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14405 tp->nvram_jedecnum = JEDEC_ATMEL;
14406 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14407 break;
14408 case FLASH_VENDOR_ATMEL_EEPROM:
14409 tp->nvram_jedecnum = JEDEC_ATMEL;
14410 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14411 tg3_flag_set(tp, NVRAM_BUFFERED);
14412 break;
14413 case FLASH_VENDOR_ST:
14414 tp->nvram_jedecnum = JEDEC_ST;
14415 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14416 tg3_flag_set(tp, NVRAM_BUFFERED);
14417 break;
14418 case FLASH_VENDOR_SAIFUN:
14419 tp->nvram_jedecnum = JEDEC_SAIFUN;
14420 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14421 break;
14422 case FLASH_VENDOR_SST_SMALL:
14423 case FLASH_VENDOR_SST_LARGE:
14424 tp->nvram_jedecnum = JEDEC_SST;
14425 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14426 break;
14427 }
14428 } else {
14429 tp->nvram_jedecnum = JEDEC_ATMEL;
14430 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14431 tg3_flag_set(tp, NVRAM_BUFFERED);
14432 }
14433 }
14434
tg3_nvram_get_pagesize(struct tg3 * tp,u32 nvmcfg1)14435 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14436 {
14437 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14438 case FLASH_5752PAGE_SIZE_256:
14439 tp->nvram_pagesize = 256;
14440 break;
14441 case FLASH_5752PAGE_SIZE_512:
14442 tp->nvram_pagesize = 512;
14443 break;
14444 case FLASH_5752PAGE_SIZE_1K:
14445 tp->nvram_pagesize = 1024;
14446 break;
14447 case FLASH_5752PAGE_SIZE_2K:
14448 tp->nvram_pagesize = 2048;
14449 break;
14450 case FLASH_5752PAGE_SIZE_4K:
14451 tp->nvram_pagesize = 4096;
14452 break;
14453 case FLASH_5752PAGE_SIZE_264:
14454 tp->nvram_pagesize = 264;
14455 break;
14456 case FLASH_5752PAGE_SIZE_528:
14457 tp->nvram_pagesize = 528;
14458 break;
14459 }
14460 }
14461
tg3_get_5752_nvram_info(struct tg3 * tp)14462 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14463 {
14464 u32 nvcfg1;
14465
14466 nvcfg1 = tr32(NVRAM_CFG1);
14467
14468 /* NVRAM protection for TPM */
14469 if (nvcfg1 & (1 << 27))
14470 tg3_flag_set(tp, PROTECTED_NVRAM);
14471
14472 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14473 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14474 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14475 tp->nvram_jedecnum = JEDEC_ATMEL;
14476 tg3_flag_set(tp, NVRAM_BUFFERED);
14477 break;
14478 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14479 tp->nvram_jedecnum = JEDEC_ATMEL;
14480 tg3_flag_set(tp, NVRAM_BUFFERED);
14481 tg3_flag_set(tp, FLASH);
14482 break;
14483 case FLASH_5752VENDOR_ST_M45PE10:
14484 case FLASH_5752VENDOR_ST_M45PE20:
14485 case FLASH_5752VENDOR_ST_M45PE40:
14486 tp->nvram_jedecnum = JEDEC_ST;
14487 tg3_flag_set(tp, NVRAM_BUFFERED);
14488 tg3_flag_set(tp, FLASH);
14489 break;
14490 }
14491
14492 if (tg3_flag(tp, FLASH)) {
14493 tg3_nvram_get_pagesize(tp, nvcfg1);
14494 } else {
14495 /* For eeprom, set pagesize to maximum eeprom size */
14496 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14497
14498 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14499 tw32(NVRAM_CFG1, nvcfg1);
14500 }
14501 }
14502
tg3_get_5755_nvram_info(struct tg3 * tp)14503 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14504 {
14505 u32 nvcfg1, protect = 0;
14506
14507 nvcfg1 = tr32(NVRAM_CFG1);
14508
14509 /* NVRAM protection for TPM */
14510 if (nvcfg1 & (1 << 27)) {
14511 tg3_flag_set(tp, PROTECTED_NVRAM);
14512 protect = 1;
14513 }
14514
14515 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14516 switch (nvcfg1) {
14517 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14518 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14519 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14520 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14521 tp->nvram_jedecnum = JEDEC_ATMEL;
14522 tg3_flag_set(tp, NVRAM_BUFFERED);
14523 tg3_flag_set(tp, FLASH);
14524 tp->nvram_pagesize = 264;
14525 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14526 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14527 tp->nvram_size = (protect ? 0x3e200 :
14528 TG3_NVRAM_SIZE_512KB);
14529 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14530 tp->nvram_size = (protect ? 0x1f200 :
14531 TG3_NVRAM_SIZE_256KB);
14532 else
14533 tp->nvram_size = (protect ? 0x1f200 :
14534 TG3_NVRAM_SIZE_128KB);
14535 break;
14536 case FLASH_5752VENDOR_ST_M45PE10:
14537 case FLASH_5752VENDOR_ST_M45PE20:
14538 case FLASH_5752VENDOR_ST_M45PE40:
14539 tp->nvram_jedecnum = JEDEC_ST;
14540 tg3_flag_set(tp, NVRAM_BUFFERED);
14541 tg3_flag_set(tp, FLASH);
14542 tp->nvram_pagesize = 256;
14543 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14544 tp->nvram_size = (protect ?
14545 TG3_NVRAM_SIZE_64KB :
14546 TG3_NVRAM_SIZE_128KB);
14547 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14548 tp->nvram_size = (protect ?
14549 TG3_NVRAM_SIZE_64KB :
14550 TG3_NVRAM_SIZE_256KB);
14551 else
14552 tp->nvram_size = (protect ?
14553 TG3_NVRAM_SIZE_128KB :
14554 TG3_NVRAM_SIZE_512KB);
14555 break;
14556 }
14557 }
14558
tg3_get_5787_nvram_info(struct tg3 * tp)14559 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14560 {
14561 u32 nvcfg1;
14562
14563 nvcfg1 = tr32(NVRAM_CFG1);
14564
14565 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14566 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14567 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14568 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14569 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14570 tp->nvram_jedecnum = JEDEC_ATMEL;
14571 tg3_flag_set(tp, NVRAM_BUFFERED);
14572 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14573
14574 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14575 tw32(NVRAM_CFG1, nvcfg1);
14576 break;
14577 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14578 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14579 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14580 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14581 tp->nvram_jedecnum = JEDEC_ATMEL;
14582 tg3_flag_set(tp, NVRAM_BUFFERED);
14583 tg3_flag_set(tp, FLASH);
14584 tp->nvram_pagesize = 264;
14585 break;
14586 case FLASH_5752VENDOR_ST_M45PE10:
14587 case FLASH_5752VENDOR_ST_M45PE20:
14588 case FLASH_5752VENDOR_ST_M45PE40:
14589 tp->nvram_jedecnum = JEDEC_ST;
14590 tg3_flag_set(tp, NVRAM_BUFFERED);
14591 tg3_flag_set(tp, FLASH);
14592 tp->nvram_pagesize = 256;
14593 break;
14594 }
14595 }
14596
tg3_get_5761_nvram_info(struct tg3 * tp)14597 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14598 {
14599 u32 nvcfg1, protect = 0;
14600
14601 nvcfg1 = tr32(NVRAM_CFG1);
14602
14603 /* NVRAM protection for TPM */
14604 if (nvcfg1 & (1 << 27)) {
14605 tg3_flag_set(tp, PROTECTED_NVRAM);
14606 protect = 1;
14607 }
14608
14609 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14610 switch (nvcfg1) {
14611 case FLASH_5761VENDOR_ATMEL_ADB021D:
14612 case FLASH_5761VENDOR_ATMEL_ADB041D:
14613 case FLASH_5761VENDOR_ATMEL_ADB081D:
14614 case FLASH_5761VENDOR_ATMEL_ADB161D:
14615 case FLASH_5761VENDOR_ATMEL_MDB021D:
14616 case FLASH_5761VENDOR_ATMEL_MDB041D:
14617 case FLASH_5761VENDOR_ATMEL_MDB081D:
14618 case FLASH_5761VENDOR_ATMEL_MDB161D:
14619 tp->nvram_jedecnum = JEDEC_ATMEL;
14620 tg3_flag_set(tp, NVRAM_BUFFERED);
14621 tg3_flag_set(tp, FLASH);
14622 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14623 tp->nvram_pagesize = 256;
14624 break;
14625 case FLASH_5761VENDOR_ST_A_M45PE20:
14626 case FLASH_5761VENDOR_ST_A_M45PE40:
14627 case FLASH_5761VENDOR_ST_A_M45PE80:
14628 case FLASH_5761VENDOR_ST_A_M45PE16:
14629 case FLASH_5761VENDOR_ST_M_M45PE20:
14630 case FLASH_5761VENDOR_ST_M_M45PE40:
14631 case FLASH_5761VENDOR_ST_M_M45PE80:
14632 case FLASH_5761VENDOR_ST_M_M45PE16:
14633 tp->nvram_jedecnum = JEDEC_ST;
14634 tg3_flag_set(tp, NVRAM_BUFFERED);
14635 tg3_flag_set(tp, FLASH);
14636 tp->nvram_pagesize = 256;
14637 break;
14638 }
14639
14640 if (protect) {
14641 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14642 } else {
14643 switch (nvcfg1) {
14644 case FLASH_5761VENDOR_ATMEL_ADB161D:
14645 case FLASH_5761VENDOR_ATMEL_MDB161D:
14646 case FLASH_5761VENDOR_ST_A_M45PE16:
14647 case FLASH_5761VENDOR_ST_M_M45PE16:
14648 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14649 break;
14650 case FLASH_5761VENDOR_ATMEL_ADB081D:
14651 case FLASH_5761VENDOR_ATMEL_MDB081D:
14652 case FLASH_5761VENDOR_ST_A_M45PE80:
14653 case FLASH_5761VENDOR_ST_M_M45PE80:
14654 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14655 break;
14656 case FLASH_5761VENDOR_ATMEL_ADB041D:
14657 case FLASH_5761VENDOR_ATMEL_MDB041D:
14658 case FLASH_5761VENDOR_ST_A_M45PE40:
14659 case FLASH_5761VENDOR_ST_M_M45PE40:
14660 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14661 break;
14662 case FLASH_5761VENDOR_ATMEL_ADB021D:
14663 case FLASH_5761VENDOR_ATMEL_MDB021D:
14664 case FLASH_5761VENDOR_ST_A_M45PE20:
14665 case FLASH_5761VENDOR_ST_M_M45PE20:
14666 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14667 break;
14668 }
14669 }
14670 }
14671
tg3_get_5906_nvram_info(struct tg3 * tp)14672 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14673 {
14674 tp->nvram_jedecnum = JEDEC_ATMEL;
14675 tg3_flag_set(tp, NVRAM_BUFFERED);
14676 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14677 }
14678
tg3_get_57780_nvram_info(struct tg3 * tp)14679 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14680 {
14681 u32 nvcfg1;
14682
14683 nvcfg1 = tr32(NVRAM_CFG1);
14684
14685 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14686 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14687 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14688 tp->nvram_jedecnum = JEDEC_ATMEL;
14689 tg3_flag_set(tp, NVRAM_BUFFERED);
14690 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14691
14692 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14693 tw32(NVRAM_CFG1, nvcfg1);
14694 return;
14695 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14696 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14697 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14698 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14699 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14700 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14701 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14702 tp->nvram_jedecnum = JEDEC_ATMEL;
14703 tg3_flag_set(tp, NVRAM_BUFFERED);
14704 tg3_flag_set(tp, FLASH);
14705
14706 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14707 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14708 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14709 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14710 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14711 break;
14712 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14713 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14714 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14715 break;
14716 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14717 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14718 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14719 break;
14720 }
14721 break;
14722 case FLASH_5752VENDOR_ST_M45PE10:
14723 case FLASH_5752VENDOR_ST_M45PE20:
14724 case FLASH_5752VENDOR_ST_M45PE40:
14725 tp->nvram_jedecnum = JEDEC_ST;
14726 tg3_flag_set(tp, NVRAM_BUFFERED);
14727 tg3_flag_set(tp, FLASH);
14728
14729 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14730 case FLASH_5752VENDOR_ST_M45PE10:
14731 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14732 break;
14733 case FLASH_5752VENDOR_ST_M45PE20:
14734 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14735 break;
14736 case FLASH_5752VENDOR_ST_M45PE40:
14737 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14738 break;
14739 }
14740 break;
14741 default:
14742 tg3_flag_set(tp, NO_NVRAM);
14743 return;
14744 }
14745
14746 tg3_nvram_get_pagesize(tp, nvcfg1);
14747 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14748 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14749 }
14750
14751
tg3_get_5717_nvram_info(struct tg3 * tp)14752 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14753 {
14754 u32 nvcfg1;
14755
14756 nvcfg1 = tr32(NVRAM_CFG1);
14757
14758 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14759 case FLASH_5717VENDOR_ATMEL_EEPROM:
14760 case FLASH_5717VENDOR_MICRO_EEPROM:
14761 tp->nvram_jedecnum = JEDEC_ATMEL;
14762 tg3_flag_set(tp, NVRAM_BUFFERED);
14763 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14764
14765 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14766 tw32(NVRAM_CFG1, nvcfg1);
14767 return;
14768 case FLASH_5717VENDOR_ATMEL_MDB011D:
14769 case FLASH_5717VENDOR_ATMEL_ADB011B:
14770 case FLASH_5717VENDOR_ATMEL_ADB011D:
14771 case FLASH_5717VENDOR_ATMEL_MDB021D:
14772 case FLASH_5717VENDOR_ATMEL_ADB021B:
14773 case FLASH_5717VENDOR_ATMEL_ADB021D:
14774 case FLASH_5717VENDOR_ATMEL_45USPT:
14775 tp->nvram_jedecnum = JEDEC_ATMEL;
14776 tg3_flag_set(tp, NVRAM_BUFFERED);
14777 tg3_flag_set(tp, FLASH);
14778
14779 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14780 case FLASH_5717VENDOR_ATMEL_MDB021D:
14781 /* Detect size with tg3_nvram_get_size() */
14782 break;
14783 case FLASH_5717VENDOR_ATMEL_ADB021B:
14784 case FLASH_5717VENDOR_ATMEL_ADB021D:
14785 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14786 break;
14787 default:
14788 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14789 break;
14790 }
14791 break;
14792 case FLASH_5717VENDOR_ST_M_M25PE10:
14793 case FLASH_5717VENDOR_ST_A_M25PE10:
14794 case FLASH_5717VENDOR_ST_M_M45PE10:
14795 case FLASH_5717VENDOR_ST_A_M45PE10:
14796 case FLASH_5717VENDOR_ST_M_M25PE20:
14797 case FLASH_5717VENDOR_ST_A_M25PE20:
14798 case FLASH_5717VENDOR_ST_M_M45PE20:
14799 case FLASH_5717VENDOR_ST_A_M45PE20:
14800 case FLASH_5717VENDOR_ST_25USPT:
14801 case FLASH_5717VENDOR_ST_45USPT:
14802 tp->nvram_jedecnum = JEDEC_ST;
14803 tg3_flag_set(tp, NVRAM_BUFFERED);
14804 tg3_flag_set(tp, FLASH);
14805
14806 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14807 case FLASH_5717VENDOR_ST_M_M25PE20:
14808 case FLASH_5717VENDOR_ST_M_M45PE20:
14809 /* Detect size with tg3_nvram_get_size() */
14810 break;
14811 case FLASH_5717VENDOR_ST_A_M25PE20:
14812 case FLASH_5717VENDOR_ST_A_M45PE20:
14813 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14814 break;
14815 default:
14816 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14817 break;
14818 }
14819 break;
14820 default:
14821 tg3_flag_set(tp, NO_NVRAM);
14822 return;
14823 }
14824
14825 tg3_nvram_get_pagesize(tp, nvcfg1);
14826 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14827 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14828 }
14829
tg3_get_5720_nvram_info(struct tg3 * tp)14830 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14831 {
14832 u32 nvcfg1, nvmpinstrp, nv_status;
14833
14834 nvcfg1 = tr32(NVRAM_CFG1);
14835 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14836
14837 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14838 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14839 tg3_flag_set(tp, NO_NVRAM);
14840 return;
14841 }
14842
14843 switch (nvmpinstrp) {
14844 case FLASH_5762_MX25L_100:
14845 case FLASH_5762_MX25L_200:
14846 case FLASH_5762_MX25L_400:
14847 case FLASH_5762_MX25L_800:
14848 case FLASH_5762_MX25L_160_320:
14849 tp->nvram_pagesize = 4096;
14850 tp->nvram_jedecnum = JEDEC_MACRONIX;
14851 tg3_flag_set(tp, NVRAM_BUFFERED);
14852 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14853 tg3_flag_set(tp, FLASH);
14854 nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14855 tp->nvram_size =
14856 (1 << (nv_status >> AUTOSENSE_DEVID &
14857 AUTOSENSE_DEVID_MASK)
14858 << AUTOSENSE_SIZE_IN_MB);
14859 return;
14860
14861 case FLASH_5762_EEPROM_HD:
14862 nvmpinstrp = FLASH_5720_EEPROM_HD;
14863 break;
14864 case FLASH_5762_EEPROM_LD:
14865 nvmpinstrp = FLASH_5720_EEPROM_LD;
14866 break;
14867 case FLASH_5720VENDOR_M_ST_M45PE20:
14868 /* This pinstrap supports multiple sizes, so force it
14869 * to read the actual size from location 0xf0.
14870 */
14871 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14872 break;
14873 }
14874 }
14875
14876 switch (nvmpinstrp) {
14877 case FLASH_5720_EEPROM_HD:
14878 case FLASH_5720_EEPROM_LD:
14879 tp->nvram_jedecnum = JEDEC_ATMEL;
14880 tg3_flag_set(tp, NVRAM_BUFFERED);
14881
14882 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14883 tw32(NVRAM_CFG1, nvcfg1);
14884 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14885 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14886 else
14887 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14888 return;
14889 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14890 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14891 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14892 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14893 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14894 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14895 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14896 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14897 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14898 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14899 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14900 case FLASH_5720VENDOR_ATMEL_45USPT:
14901 tp->nvram_jedecnum = JEDEC_ATMEL;
14902 tg3_flag_set(tp, NVRAM_BUFFERED);
14903 tg3_flag_set(tp, FLASH);
14904
14905 switch (nvmpinstrp) {
14906 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14907 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14908 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14909 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14910 break;
14911 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14912 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14913 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14914 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14915 break;
14916 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14917 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14918 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14919 break;
14920 default:
14921 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14922 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14923 break;
14924 }
14925 break;
14926 case FLASH_5720VENDOR_M_ST_M25PE10:
14927 case FLASH_5720VENDOR_M_ST_M45PE10:
14928 case FLASH_5720VENDOR_A_ST_M25PE10:
14929 case FLASH_5720VENDOR_A_ST_M45PE10:
14930 case FLASH_5720VENDOR_M_ST_M25PE20:
14931 case FLASH_5720VENDOR_M_ST_M45PE20:
14932 case FLASH_5720VENDOR_A_ST_M25PE20:
14933 case FLASH_5720VENDOR_A_ST_M45PE20:
14934 case FLASH_5720VENDOR_M_ST_M25PE40:
14935 case FLASH_5720VENDOR_M_ST_M45PE40:
14936 case FLASH_5720VENDOR_A_ST_M25PE40:
14937 case FLASH_5720VENDOR_A_ST_M45PE40:
14938 case FLASH_5720VENDOR_M_ST_M25PE80:
14939 case FLASH_5720VENDOR_M_ST_M45PE80:
14940 case FLASH_5720VENDOR_A_ST_M25PE80:
14941 case FLASH_5720VENDOR_A_ST_M45PE80:
14942 case FLASH_5720VENDOR_ST_25USPT:
14943 case FLASH_5720VENDOR_ST_45USPT:
14944 tp->nvram_jedecnum = JEDEC_ST;
14945 tg3_flag_set(tp, NVRAM_BUFFERED);
14946 tg3_flag_set(tp, FLASH);
14947
14948 switch (nvmpinstrp) {
14949 case FLASH_5720VENDOR_M_ST_M25PE20:
14950 case FLASH_5720VENDOR_M_ST_M45PE20:
14951 case FLASH_5720VENDOR_A_ST_M25PE20:
14952 case FLASH_5720VENDOR_A_ST_M45PE20:
14953 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14954 break;
14955 case FLASH_5720VENDOR_M_ST_M25PE40:
14956 case FLASH_5720VENDOR_M_ST_M45PE40:
14957 case FLASH_5720VENDOR_A_ST_M25PE40:
14958 case FLASH_5720VENDOR_A_ST_M45PE40:
14959 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14960 break;
14961 case FLASH_5720VENDOR_M_ST_M25PE80:
14962 case FLASH_5720VENDOR_M_ST_M45PE80:
14963 case FLASH_5720VENDOR_A_ST_M25PE80:
14964 case FLASH_5720VENDOR_A_ST_M45PE80:
14965 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14966 break;
14967 default:
14968 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14969 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14970 break;
14971 }
14972 break;
14973 default:
14974 tg3_flag_set(tp, NO_NVRAM);
14975 return;
14976 }
14977
14978 tg3_nvram_get_pagesize(tp, nvcfg1);
14979 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14980 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14981
14982 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14983 u32 val;
14984
14985 if (tg3_nvram_read(tp, 0, &val))
14986 return;
14987
14988 if (val != TG3_EEPROM_MAGIC &&
14989 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14990 tg3_flag_set(tp, NO_NVRAM);
14991 }
14992 }
14993
14994 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
tg3_nvram_init(struct tg3 * tp)14995 static void tg3_nvram_init(struct tg3 *tp)
14996 {
14997 if (tg3_flag(tp, IS_SSB_CORE)) {
14998 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14999 tg3_flag_clear(tp, NVRAM);
15000 tg3_flag_clear(tp, NVRAM_BUFFERED);
15001 tg3_flag_set(tp, NO_NVRAM);
15002 return;
15003 }
15004
15005 tw32_f(GRC_EEPROM_ADDR,
15006 (EEPROM_ADDR_FSM_RESET |
15007 (EEPROM_DEFAULT_CLOCK_PERIOD <<
15008 EEPROM_ADDR_CLKPERD_SHIFT)));
15009
15010 msleep(1);
15011
15012 /* Enable seeprom accesses. */
15013 tw32_f(GRC_LOCAL_CTRL,
15014 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
15015 udelay(100);
15016
15017 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15018 tg3_asic_rev(tp) != ASIC_REV_5701) {
15019 tg3_flag_set(tp, NVRAM);
15020
15021 if (tg3_nvram_lock(tp)) {
15022 netdev_warn(tp->dev,
15023 "Cannot get nvram lock, %s failed\n",
15024 __func__);
15025 return;
15026 }
15027 tg3_enable_nvram_access(tp);
15028
15029 tp->nvram_size = 0;
15030
15031 if (tg3_asic_rev(tp) == ASIC_REV_5752)
15032 tg3_get_5752_nvram_info(tp);
15033 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15034 tg3_get_5755_nvram_info(tp);
15035 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15036 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15037 tg3_asic_rev(tp) == ASIC_REV_5785)
15038 tg3_get_5787_nvram_info(tp);
15039 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15040 tg3_get_5761_nvram_info(tp);
15041 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15042 tg3_get_5906_nvram_info(tp);
15043 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15044 tg3_flag(tp, 57765_CLASS))
15045 tg3_get_57780_nvram_info(tp);
15046 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15047 tg3_asic_rev(tp) == ASIC_REV_5719)
15048 tg3_get_5717_nvram_info(tp);
15049 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15050 tg3_asic_rev(tp) == ASIC_REV_5762)
15051 tg3_get_5720_nvram_info(tp);
15052 else
15053 tg3_get_nvram_info(tp);
15054
15055 if (tp->nvram_size == 0)
15056 tg3_get_nvram_size(tp);
15057
15058 tg3_disable_nvram_access(tp);
15059 tg3_nvram_unlock(tp);
15060
15061 } else {
15062 tg3_flag_clear(tp, NVRAM);
15063 tg3_flag_clear(tp, NVRAM_BUFFERED);
15064
15065 tg3_get_eeprom_size(tp);
15066 }
15067 }
15068
15069 struct subsys_tbl_ent {
15070 u16 subsys_vendor, subsys_devid;
15071 u32 phy_id;
15072 };
15073
15074 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15075 /* Broadcom boards. */
15076 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15077 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15078 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15079 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15080 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15081 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15082 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15083 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15084 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15085 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15086 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15087 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15088 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15089 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15090 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15091 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15092 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15093 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15094 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15095 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15096 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15097 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15098
15099 /* 3com boards. */
15100 { TG3PCI_SUBVENDOR_ID_3COM,
15101 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15102 { TG3PCI_SUBVENDOR_ID_3COM,
15103 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15104 { TG3PCI_SUBVENDOR_ID_3COM,
15105 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15106 { TG3PCI_SUBVENDOR_ID_3COM,
15107 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15108 { TG3PCI_SUBVENDOR_ID_3COM,
15109 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15110
15111 /* DELL boards. */
15112 { TG3PCI_SUBVENDOR_ID_DELL,
15113 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15114 { TG3PCI_SUBVENDOR_ID_DELL,
15115 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15116 { TG3PCI_SUBVENDOR_ID_DELL,
15117 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15118 { TG3PCI_SUBVENDOR_ID_DELL,
15119 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15120
15121 /* Compaq boards. */
15122 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15123 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15124 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15125 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15126 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15127 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15128 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15129 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15130 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15131 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15132
15133 /* IBM boards. */
15134 { TG3PCI_SUBVENDOR_ID_IBM,
15135 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15136 };
15137
tg3_lookup_by_subsys(struct tg3 * tp)15138 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15139 {
15140 int i;
15141
15142 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15143 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15144 tp->pdev->subsystem_vendor) &&
15145 (subsys_id_to_phy_id[i].subsys_devid ==
15146 tp->pdev->subsystem_device))
15147 return &subsys_id_to_phy_id[i];
15148 }
15149 return NULL;
15150 }
15151
tg3_get_eeprom_hw_cfg(struct tg3 * tp)15152 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15153 {
15154 u32 val;
15155
15156 tp->phy_id = TG3_PHY_ID_INVALID;
15157 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15158
15159 /* Assume an onboard device and WOL capable by default. */
15160 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15161 tg3_flag_set(tp, WOL_CAP);
15162
15163 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15164 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15165 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15166 tg3_flag_set(tp, IS_NIC);
15167 }
15168 val = tr32(VCPU_CFGSHDW);
15169 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15170 tg3_flag_set(tp, ASPM_WORKAROUND);
15171 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15172 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15173 tg3_flag_set(tp, WOL_ENABLE);
15174 device_set_wakeup_enable(&tp->pdev->dev, true);
15175 }
15176 goto done;
15177 }
15178
15179 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15180 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15181 u32 nic_cfg, led_cfg;
15182 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15183 u32 nic_phy_id, ver, eeprom_phy_id;
15184 int eeprom_phy_serdes = 0;
15185
15186 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15187 tp->nic_sram_data_cfg = nic_cfg;
15188
15189 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15190 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15191 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15192 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15193 tg3_asic_rev(tp) != ASIC_REV_5703 &&
15194 (ver > 0) && (ver < 0x100))
15195 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15196
15197 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15198 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15199
15200 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15201 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15202 tg3_asic_rev(tp) == ASIC_REV_5720)
15203 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15204
15205 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15206 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15207 eeprom_phy_serdes = 1;
15208
15209 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15210 if (nic_phy_id != 0) {
15211 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15212 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15213
15214 eeprom_phy_id = (id1 >> 16) << 10;
15215 eeprom_phy_id |= (id2 & 0xfc00) << 16;
15216 eeprom_phy_id |= (id2 & 0x03ff) << 0;
15217 } else
15218 eeprom_phy_id = 0;
15219
15220 tp->phy_id = eeprom_phy_id;
15221 if (eeprom_phy_serdes) {
15222 if (!tg3_flag(tp, 5705_PLUS))
15223 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15224 else
15225 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15226 }
15227
15228 if (tg3_flag(tp, 5750_PLUS))
15229 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15230 SHASTA_EXT_LED_MODE_MASK);
15231 else
15232 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15233
15234 switch (led_cfg) {
15235 default:
15236 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15237 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15238 break;
15239
15240 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15241 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15242 break;
15243
15244 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15245 tp->led_ctrl = LED_CTRL_MODE_MAC;
15246
15247 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15248 * read on some older 5700/5701 bootcode.
15249 */
15250 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15251 tg3_asic_rev(tp) == ASIC_REV_5701)
15252 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15253
15254 break;
15255
15256 case SHASTA_EXT_LED_SHARED:
15257 tp->led_ctrl = LED_CTRL_MODE_SHARED;
15258 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15259 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15260 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15261 LED_CTRL_MODE_PHY_2);
15262
15263 if (tg3_flag(tp, 5717_PLUS) ||
15264 tg3_asic_rev(tp) == ASIC_REV_5762)
15265 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15266 LED_CTRL_BLINK_RATE_MASK;
15267
15268 break;
15269
15270 case SHASTA_EXT_LED_MAC:
15271 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15272 break;
15273
15274 case SHASTA_EXT_LED_COMBO:
15275 tp->led_ctrl = LED_CTRL_MODE_COMBO;
15276 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15277 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15278 LED_CTRL_MODE_PHY_2);
15279 break;
15280
15281 }
15282
15283 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15284 tg3_asic_rev(tp) == ASIC_REV_5701) &&
15285 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15286 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15287
15288 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15289 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15290
15291 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15292 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15293 if ((tp->pdev->subsystem_vendor ==
15294 PCI_VENDOR_ID_ARIMA) &&
15295 (tp->pdev->subsystem_device == 0x205a ||
15296 tp->pdev->subsystem_device == 0x2063))
15297 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15298 } else {
15299 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15300 tg3_flag_set(tp, IS_NIC);
15301 }
15302
15303 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15304 tg3_flag_set(tp, ENABLE_ASF);
15305 if (tg3_flag(tp, 5750_PLUS))
15306 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15307 }
15308
15309 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15310 tg3_flag(tp, 5750_PLUS))
15311 tg3_flag_set(tp, ENABLE_APE);
15312
15313 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15314 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15315 tg3_flag_clear(tp, WOL_CAP);
15316
15317 if (tg3_flag(tp, WOL_CAP) &&
15318 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15319 tg3_flag_set(tp, WOL_ENABLE);
15320 device_set_wakeup_enable(&tp->pdev->dev, true);
15321 }
15322
15323 if (cfg2 & (1 << 17))
15324 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15325
15326 /* serdes signal pre-emphasis in register 0x590 set by */
15327 /* bootcode if bit 18 is set */
15328 if (cfg2 & (1 << 18))
15329 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15330
15331 if ((tg3_flag(tp, 57765_PLUS) ||
15332 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15333 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15334 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15335 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15336
15337 if (tg3_flag(tp, PCI_EXPRESS)) {
15338 u32 cfg3;
15339
15340 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15341 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15342 !tg3_flag(tp, 57765_PLUS) &&
15343 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15344 tg3_flag_set(tp, ASPM_WORKAROUND);
15345 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15346 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15347 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15348 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15349 }
15350
15351 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15352 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15353 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15354 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15355 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15356 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15357
15358 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15359 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15360 }
15361 done:
15362 if (tg3_flag(tp, WOL_CAP))
15363 device_set_wakeup_enable(&tp->pdev->dev,
15364 tg3_flag(tp, WOL_ENABLE));
15365 else
15366 device_set_wakeup_capable(&tp->pdev->dev, false);
15367 }
15368
tg3_ape_otp_read(struct tg3 * tp,u32 offset,u32 * val)15369 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15370 {
15371 int i, err;
15372 u32 val2, off = offset * 8;
15373
15374 err = tg3_nvram_lock(tp);
15375 if (err)
15376 return err;
15377
15378 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15379 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15380 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15381 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15382 udelay(10);
15383
15384 for (i = 0; i < 100; i++) {
15385 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15386 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15387 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15388 break;
15389 }
15390 udelay(10);
15391 }
15392
15393 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15394
15395 tg3_nvram_unlock(tp);
15396 if (val2 & APE_OTP_STATUS_CMD_DONE)
15397 return 0;
15398
15399 return -EBUSY;
15400 }
15401
tg3_issue_otp_command(struct tg3 * tp,u32 cmd)15402 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15403 {
15404 int i;
15405 u32 val;
15406
15407 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15408 tw32(OTP_CTRL, cmd);
15409
15410 /* Wait for up to 1 ms for command to execute. */
15411 for (i = 0; i < 100; i++) {
15412 val = tr32(OTP_STATUS);
15413 if (val & OTP_STATUS_CMD_DONE)
15414 break;
15415 udelay(10);
15416 }
15417
15418 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15419 }
15420
15421 /* Read the gphy configuration from the OTP region of the chip. The gphy
15422 * configuration is a 32-bit value that straddles the alignment boundary.
15423 * We do two 32-bit reads and then shift and merge the results.
15424 */
tg3_read_otp_phycfg(struct tg3 * tp)15425 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15426 {
15427 u32 bhalf_otp, thalf_otp;
15428
15429 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15430
15431 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15432 return 0;
15433
15434 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15435
15436 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15437 return 0;
15438
15439 thalf_otp = tr32(OTP_READ_DATA);
15440
15441 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15442
15443 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15444 return 0;
15445
15446 bhalf_otp = tr32(OTP_READ_DATA);
15447
15448 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15449 }
15450
tg3_phy_init_link_config(struct tg3 * tp)15451 static void tg3_phy_init_link_config(struct tg3 *tp)
15452 {
15453 u32 adv = ADVERTISED_Autoneg;
15454
15455 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15456 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15457 adv |= ADVERTISED_1000baseT_Half;
15458 adv |= ADVERTISED_1000baseT_Full;
15459 }
15460
15461 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15462 adv |= ADVERTISED_100baseT_Half |
15463 ADVERTISED_100baseT_Full |
15464 ADVERTISED_10baseT_Half |
15465 ADVERTISED_10baseT_Full |
15466 ADVERTISED_TP;
15467 else
15468 adv |= ADVERTISED_FIBRE;
15469
15470 tp->link_config.advertising = adv;
15471 tp->link_config.speed = SPEED_UNKNOWN;
15472 tp->link_config.duplex = DUPLEX_UNKNOWN;
15473 tp->link_config.autoneg = AUTONEG_ENABLE;
15474 tp->link_config.active_speed = SPEED_UNKNOWN;
15475 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15476
15477 tp->old_link = -1;
15478 }
15479
tg3_phy_probe(struct tg3 * tp)15480 static int tg3_phy_probe(struct tg3 *tp)
15481 {
15482 u32 hw_phy_id_1, hw_phy_id_2;
15483 u32 hw_phy_id, hw_phy_id_masked;
15484 int err;
15485
15486 /* flow control autonegotiation is default behavior */
15487 tg3_flag_set(tp, PAUSE_AUTONEG);
15488 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15489
15490 if (tg3_flag(tp, ENABLE_APE)) {
15491 switch (tp->pci_fn) {
15492 case 0:
15493 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15494 break;
15495 case 1:
15496 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15497 break;
15498 case 2:
15499 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15500 break;
15501 case 3:
15502 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15503 break;
15504 }
15505 }
15506
15507 if (!tg3_flag(tp, ENABLE_ASF) &&
15508 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15509 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15510 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15511 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15512
15513 if (tg3_flag(tp, USE_PHYLIB))
15514 return tg3_phy_init(tp);
15515
15516 /* Reading the PHY ID register can conflict with ASF
15517 * firmware access to the PHY hardware.
15518 */
15519 err = 0;
15520 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15521 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15522 } else {
15523 /* Now read the physical PHY_ID from the chip and verify
15524 * that it is sane. If it doesn't look good, we fall back
15525 * to either the hard-coded table based PHY_ID and failing
15526 * that the value found in the eeprom area.
15527 */
15528 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15529 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15530
15531 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15532 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15533 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15534
15535 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15536 }
15537
15538 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15539 tp->phy_id = hw_phy_id;
15540 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15541 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15542 else
15543 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15544 } else {
15545 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15546 /* Do nothing, phy ID already set up in
15547 * tg3_get_eeprom_hw_cfg().
15548 */
15549 } else {
15550 struct subsys_tbl_ent *p;
15551
15552 /* No eeprom signature? Try the hardcoded
15553 * subsys device table.
15554 */
15555 p = tg3_lookup_by_subsys(tp);
15556 if (p) {
15557 tp->phy_id = p->phy_id;
15558 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15559 /* For now we saw the IDs 0xbc050cd0,
15560 * 0xbc050f80 and 0xbc050c30 on devices
15561 * connected to an BCM4785 and there are
15562 * probably more. Just assume that the phy is
15563 * supported when it is connected to a SSB core
15564 * for now.
15565 */
15566 return -ENODEV;
15567 }
15568
15569 if (!tp->phy_id ||
15570 tp->phy_id == TG3_PHY_ID_BCM8002)
15571 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15572 }
15573 }
15574
15575 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15576 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15577 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15578 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15579 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15580 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15581 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15582 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15583 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15584 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15585
15586 tp->eee.supported = SUPPORTED_100baseT_Full |
15587 SUPPORTED_1000baseT_Full;
15588 tp->eee.advertised = ADVERTISED_100baseT_Full |
15589 ADVERTISED_1000baseT_Full;
15590 tp->eee.eee_enabled = 1;
15591 tp->eee.tx_lpi_enabled = 1;
15592 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15593 }
15594
15595 tg3_phy_init_link_config(tp);
15596
15597 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15598 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15599 !tg3_flag(tp, ENABLE_APE) &&
15600 !tg3_flag(tp, ENABLE_ASF)) {
15601 u32 bmsr, dummy;
15602
15603 tg3_readphy(tp, MII_BMSR, &bmsr);
15604 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15605 (bmsr & BMSR_LSTATUS))
15606 goto skip_phy_reset;
15607
15608 err = tg3_phy_reset(tp);
15609 if (err)
15610 return err;
15611
15612 tg3_phy_set_wirespeed(tp);
15613
15614 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15615 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15616 tp->link_config.flowctrl);
15617
15618 tg3_writephy(tp, MII_BMCR,
15619 BMCR_ANENABLE | BMCR_ANRESTART);
15620 }
15621 }
15622
15623 skip_phy_reset:
15624 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15625 err = tg3_init_5401phy_dsp(tp);
15626 if (err)
15627 return err;
15628
15629 err = tg3_init_5401phy_dsp(tp);
15630 }
15631
15632 return err;
15633 }
15634
tg3_read_vpd(struct tg3 * tp)15635 static void tg3_read_vpd(struct tg3 *tp)
15636 {
15637 u8 *vpd_data;
15638 unsigned int block_end, rosize, len;
15639 u32 vpdlen;
15640 int j, i = 0;
15641
15642 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15643 if (!vpd_data)
15644 goto out_no_vpd;
15645
15646 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15647 if (i < 0)
15648 goto out_not_found;
15649
15650 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15651 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15652 i += PCI_VPD_LRDT_TAG_SIZE;
15653
15654 if (block_end > vpdlen)
15655 goto out_not_found;
15656
15657 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15658 PCI_VPD_RO_KEYWORD_MFR_ID);
15659 if (j > 0) {
15660 len = pci_vpd_info_field_size(&vpd_data[j]);
15661
15662 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15663 if (j + len > block_end || len != 4 ||
15664 memcmp(&vpd_data[j], "1028", 4))
15665 goto partno;
15666
15667 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15668 PCI_VPD_RO_KEYWORD_VENDOR0);
15669 if (j < 0)
15670 goto partno;
15671
15672 len = pci_vpd_info_field_size(&vpd_data[j]);
15673
15674 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15675 if (j + len > block_end)
15676 goto partno;
15677
15678 if (len >= sizeof(tp->fw_ver))
15679 len = sizeof(tp->fw_ver) - 1;
15680 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15681 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15682 &vpd_data[j]);
15683 }
15684
15685 partno:
15686 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15687 PCI_VPD_RO_KEYWORD_PARTNO);
15688 if (i < 0)
15689 goto out_not_found;
15690
15691 len = pci_vpd_info_field_size(&vpd_data[i]);
15692
15693 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15694 if (len > TG3_BPN_SIZE ||
15695 (len + i) > vpdlen)
15696 goto out_not_found;
15697
15698 memcpy(tp->board_part_number, &vpd_data[i], len);
15699
15700 out_not_found:
15701 kfree(vpd_data);
15702 if (tp->board_part_number[0])
15703 return;
15704
15705 out_no_vpd:
15706 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15707 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15708 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15709 strcpy(tp->board_part_number, "BCM5717");
15710 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15711 strcpy(tp->board_part_number, "BCM5718");
15712 else
15713 goto nomatch;
15714 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15715 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15716 strcpy(tp->board_part_number, "BCM57780");
15717 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15718 strcpy(tp->board_part_number, "BCM57760");
15719 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15720 strcpy(tp->board_part_number, "BCM57790");
15721 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15722 strcpy(tp->board_part_number, "BCM57788");
15723 else
15724 goto nomatch;
15725 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15726 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15727 strcpy(tp->board_part_number, "BCM57761");
15728 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15729 strcpy(tp->board_part_number, "BCM57765");
15730 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15731 strcpy(tp->board_part_number, "BCM57781");
15732 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15733 strcpy(tp->board_part_number, "BCM57785");
15734 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15735 strcpy(tp->board_part_number, "BCM57791");
15736 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15737 strcpy(tp->board_part_number, "BCM57795");
15738 else
15739 goto nomatch;
15740 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15741 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15742 strcpy(tp->board_part_number, "BCM57762");
15743 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15744 strcpy(tp->board_part_number, "BCM57766");
15745 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15746 strcpy(tp->board_part_number, "BCM57782");
15747 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15748 strcpy(tp->board_part_number, "BCM57786");
15749 else
15750 goto nomatch;
15751 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15752 strcpy(tp->board_part_number, "BCM95906");
15753 } else {
15754 nomatch:
15755 strcpy(tp->board_part_number, "none");
15756 }
15757 }
15758
tg3_fw_img_is_valid(struct tg3 * tp,u32 offset)15759 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15760 {
15761 u32 val;
15762
15763 if (tg3_nvram_read(tp, offset, &val) ||
15764 (val & 0xfc000000) != 0x0c000000 ||
15765 tg3_nvram_read(tp, offset + 4, &val) ||
15766 val != 0)
15767 return 0;
15768
15769 return 1;
15770 }
15771
tg3_read_bc_ver(struct tg3 * tp)15772 static void tg3_read_bc_ver(struct tg3 *tp)
15773 {
15774 u32 val, offset, start, ver_offset;
15775 int i, dst_off;
15776 bool newver = false;
15777
15778 if (tg3_nvram_read(tp, 0xc, &offset) ||
15779 tg3_nvram_read(tp, 0x4, &start))
15780 return;
15781
15782 offset = tg3_nvram_logical_addr(tp, offset);
15783
15784 if (tg3_nvram_read(tp, offset, &val))
15785 return;
15786
15787 if ((val & 0xfc000000) == 0x0c000000) {
15788 if (tg3_nvram_read(tp, offset + 4, &val))
15789 return;
15790
15791 if (val == 0)
15792 newver = true;
15793 }
15794
15795 dst_off = strlen(tp->fw_ver);
15796
15797 if (newver) {
15798 if (TG3_VER_SIZE - dst_off < 16 ||
15799 tg3_nvram_read(tp, offset + 8, &ver_offset))
15800 return;
15801
15802 offset = offset + ver_offset - start;
15803 for (i = 0; i < 16; i += 4) {
15804 __be32 v;
15805 if (tg3_nvram_read_be32(tp, offset + i, &v))
15806 return;
15807
15808 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15809 }
15810 } else {
15811 u32 major, minor;
15812
15813 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15814 return;
15815
15816 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15817 TG3_NVM_BCVER_MAJSFT;
15818 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15819 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15820 "v%d.%02d", major, minor);
15821 }
15822 }
15823
tg3_read_hwsb_ver(struct tg3 * tp)15824 static void tg3_read_hwsb_ver(struct tg3 *tp)
15825 {
15826 u32 val, major, minor;
15827
15828 /* Use native endian representation */
15829 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15830 return;
15831
15832 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15833 TG3_NVM_HWSB_CFG1_MAJSFT;
15834 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15835 TG3_NVM_HWSB_CFG1_MINSFT;
15836
15837 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15838 }
15839
tg3_read_sb_ver(struct tg3 * tp,u32 val)15840 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15841 {
15842 u32 offset, major, minor, build;
15843
15844 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15845
15846 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15847 return;
15848
15849 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15850 case TG3_EEPROM_SB_REVISION_0:
15851 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15852 break;
15853 case TG3_EEPROM_SB_REVISION_2:
15854 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15855 break;
15856 case TG3_EEPROM_SB_REVISION_3:
15857 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15858 break;
15859 case TG3_EEPROM_SB_REVISION_4:
15860 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15861 break;
15862 case TG3_EEPROM_SB_REVISION_5:
15863 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15864 break;
15865 case TG3_EEPROM_SB_REVISION_6:
15866 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15867 break;
15868 default:
15869 return;
15870 }
15871
15872 if (tg3_nvram_read(tp, offset, &val))
15873 return;
15874
15875 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15876 TG3_EEPROM_SB_EDH_BLD_SHFT;
15877 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15878 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15879 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15880
15881 if (minor > 99 || build > 26)
15882 return;
15883
15884 offset = strlen(tp->fw_ver);
15885 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15886 " v%d.%02d", major, minor);
15887
15888 if (build > 0) {
15889 offset = strlen(tp->fw_ver);
15890 if (offset < TG3_VER_SIZE - 1)
15891 tp->fw_ver[offset] = 'a' + build - 1;
15892 }
15893 }
15894
tg3_read_mgmtfw_ver(struct tg3 * tp)15895 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15896 {
15897 u32 val, offset, start;
15898 int i, vlen;
15899
15900 for (offset = TG3_NVM_DIR_START;
15901 offset < TG3_NVM_DIR_END;
15902 offset += TG3_NVM_DIRENT_SIZE) {
15903 if (tg3_nvram_read(tp, offset, &val))
15904 return;
15905
15906 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15907 break;
15908 }
15909
15910 if (offset == TG3_NVM_DIR_END)
15911 return;
15912
15913 if (!tg3_flag(tp, 5705_PLUS))
15914 start = 0x08000000;
15915 else if (tg3_nvram_read(tp, offset - 4, &start))
15916 return;
15917
15918 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15919 !tg3_fw_img_is_valid(tp, offset) ||
15920 tg3_nvram_read(tp, offset + 8, &val))
15921 return;
15922
15923 offset += val - start;
15924
15925 vlen = strlen(tp->fw_ver);
15926
15927 tp->fw_ver[vlen++] = ',';
15928 tp->fw_ver[vlen++] = ' ';
15929
15930 for (i = 0; i < 4; i++) {
15931 __be32 v;
15932 if (tg3_nvram_read_be32(tp, offset, &v))
15933 return;
15934
15935 offset += sizeof(v);
15936
15937 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15938 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15939 break;
15940 }
15941
15942 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15943 vlen += sizeof(v);
15944 }
15945 }
15946
tg3_probe_ncsi(struct tg3 * tp)15947 static void tg3_probe_ncsi(struct tg3 *tp)
15948 {
15949 u32 apedata;
15950
15951 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15952 if (apedata != APE_SEG_SIG_MAGIC)
15953 return;
15954
15955 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15956 if (!(apedata & APE_FW_STATUS_READY))
15957 return;
15958
15959 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15960 tg3_flag_set(tp, APE_HAS_NCSI);
15961 }
15962
tg3_read_dash_ver(struct tg3 * tp)15963 static void tg3_read_dash_ver(struct tg3 *tp)
15964 {
15965 int vlen;
15966 u32 apedata;
15967 char *fwtype;
15968
15969 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15970
15971 if (tg3_flag(tp, APE_HAS_NCSI))
15972 fwtype = "NCSI";
15973 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15974 fwtype = "SMASH";
15975 else
15976 fwtype = "DASH";
15977
15978 vlen = strlen(tp->fw_ver);
15979
15980 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15981 fwtype,
15982 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15983 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15984 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15985 (apedata & APE_FW_VERSION_BLDMSK));
15986 }
15987
tg3_read_otp_ver(struct tg3 * tp)15988 static void tg3_read_otp_ver(struct tg3 *tp)
15989 {
15990 u32 val, val2;
15991
15992 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15993 return;
15994
15995 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15996 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15997 TG3_OTP_MAGIC0_VALID(val)) {
15998 u64 val64 = (u64) val << 32 | val2;
15999 u32 ver = 0;
16000 int i, vlen;
16001
16002 for (i = 0; i < 7; i++) {
16003 if ((val64 & 0xff) == 0)
16004 break;
16005 ver = val64 & 0xff;
16006 val64 >>= 8;
16007 }
16008 vlen = strlen(tp->fw_ver);
16009 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
16010 }
16011 }
16012
tg3_read_fw_ver(struct tg3 * tp)16013 static void tg3_read_fw_ver(struct tg3 *tp)
16014 {
16015 u32 val;
16016 bool vpd_vers = false;
16017
16018 if (tp->fw_ver[0] != 0)
16019 vpd_vers = true;
16020
16021 if (tg3_flag(tp, NO_NVRAM)) {
16022 strcat(tp->fw_ver, "sb");
16023 tg3_read_otp_ver(tp);
16024 return;
16025 }
16026
16027 if (tg3_nvram_read(tp, 0, &val))
16028 return;
16029
16030 if (val == TG3_EEPROM_MAGIC)
16031 tg3_read_bc_ver(tp);
16032 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16033 tg3_read_sb_ver(tp, val);
16034 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16035 tg3_read_hwsb_ver(tp);
16036
16037 if (tg3_flag(tp, ENABLE_ASF)) {
16038 if (tg3_flag(tp, ENABLE_APE)) {
16039 tg3_probe_ncsi(tp);
16040 if (!vpd_vers)
16041 tg3_read_dash_ver(tp);
16042 } else if (!vpd_vers) {
16043 tg3_read_mgmtfw_ver(tp);
16044 }
16045 }
16046
16047 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16048 }
16049
tg3_rx_ret_ring_size(struct tg3 * tp)16050 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16051 {
16052 if (tg3_flag(tp, LRG_PROD_RING_CAP))
16053 return TG3_RX_RET_MAX_SIZE_5717;
16054 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16055 return TG3_RX_RET_MAX_SIZE_5700;
16056 else
16057 return TG3_RX_RET_MAX_SIZE_5705;
16058 }
16059
16060 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16061 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16062 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16063 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16064 { },
16065 };
16066
tg3_find_peer(struct tg3 * tp)16067 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16068 {
16069 struct pci_dev *peer;
16070 unsigned int func, devnr = tp->pdev->devfn & ~7;
16071
16072 for (func = 0; func < 8; func++) {
16073 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16074 if (peer && peer != tp->pdev)
16075 break;
16076 pci_dev_put(peer);
16077 }
16078 /* 5704 can be configured in single-port mode, set peer to
16079 * tp->pdev in that case.
16080 */
16081 if (!peer) {
16082 peer = tp->pdev;
16083 return peer;
16084 }
16085
16086 /*
16087 * We don't need to keep the refcount elevated; there's no way
16088 * to remove one half of this device without removing the other
16089 */
16090 pci_dev_put(peer);
16091
16092 return peer;
16093 }
16094
tg3_detect_asic_rev(struct tg3 * tp,u32 misc_ctrl_reg)16095 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16096 {
16097 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16098 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16099 u32 reg;
16100
16101 /* All devices that use the alternate
16102 * ASIC REV location have a CPMU.
16103 */
16104 tg3_flag_set(tp, CPMU_PRESENT);
16105
16106 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16107 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16108 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16109 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16110 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16111 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16112 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16113 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16114 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16115 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16116 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16117 reg = TG3PCI_GEN2_PRODID_ASICREV;
16118 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16119 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16120 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16121 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16122 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16123 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16124 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16125 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16126 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16127 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16128 reg = TG3PCI_GEN15_PRODID_ASICREV;
16129 else
16130 reg = TG3PCI_PRODID_ASICREV;
16131
16132 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16133 }
16134
16135 /* Wrong chip ID in 5752 A0. This code can be removed later
16136 * as A0 is not in production.
16137 */
16138 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16139 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16140
16141 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16142 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16143
16144 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16145 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16146 tg3_asic_rev(tp) == ASIC_REV_5720)
16147 tg3_flag_set(tp, 5717_PLUS);
16148
16149 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16150 tg3_asic_rev(tp) == ASIC_REV_57766)
16151 tg3_flag_set(tp, 57765_CLASS);
16152
16153 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16154 tg3_asic_rev(tp) == ASIC_REV_5762)
16155 tg3_flag_set(tp, 57765_PLUS);
16156
16157 /* Intentionally exclude ASIC_REV_5906 */
16158 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16159 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16160 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16161 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16162 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16163 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16164 tg3_flag(tp, 57765_PLUS))
16165 tg3_flag_set(tp, 5755_PLUS);
16166
16167 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16168 tg3_asic_rev(tp) == ASIC_REV_5714)
16169 tg3_flag_set(tp, 5780_CLASS);
16170
16171 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16172 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16173 tg3_asic_rev(tp) == ASIC_REV_5906 ||
16174 tg3_flag(tp, 5755_PLUS) ||
16175 tg3_flag(tp, 5780_CLASS))
16176 tg3_flag_set(tp, 5750_PLUS);
16177
16178 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16179 tg3_flag(tp, 5750_PLUS))
16180 tg3_flag_set(tp, 5705_PLUS);
16181 }
16182
tg3_10_100_only_device(struct tg3 * tp,const struct pci_device_id * ent)16183 static bool tg3_10_100_only_device(struct tg3 *tp,
16184 const struct pci_device_id *ent)
16185 {
16186 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16187
16188 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16189 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16190 (tp->phy_flags & TG3_PHYFLG_IS_FET))
16191 return true;
16192
16193 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16194 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16195 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16196 return true;
16197 } else {
16198 return true;
16199 }
16200 }
16201
16202 return false;
16203 }
16204
tg3_get_invariants(struct tg3 * tp,const struct pci_device_id * ent)16205 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16206 {
16207 u32 misc_ctrl_reg;
16208 u32 pci_state_reg, grc_misc_cfg;
16209 u32 val;
16210 u16 pci_cmd;
16211 int err;
16212
16213 /* Force memory write invalidate off. If we leave it on,
16214 * then on 5700_BX chips we have to enable a workaround.
16215 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16216 * to match the cacheline size. The Broadcom driver have this
16217 * workaround but turns MWI off all the times so never uses
16218 * it. This seems to suggest that the workaround is insufficient.
16219 */
16220 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16221 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16222 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16223
16224 /* Important! -- Make sure register accesses are byteswapped
16225 * correctly. Also, for those chips that require it, make
16226 * sure that indirect register accesses are enabled before
16227 * the first operation.
16228 */
16229 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16230 &misc_ctrl_reg);
16231 tp->misc_host_ctrl |= (misc_ctrl_reg &
16232 MISC_HOST_CTRL_CHIPREV);
16233 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16234 tp->misc_host_ctrl);
16235
16236 tg3_detect_asic_rev(tp, misc_ctrl_reg);
16237
16238 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16239 * we need to disable memory and use config. cycles
16240 * only to access all registers. The 5702/03 chips
16241 * can mistakenly decode the special cycles from the
16242 * ICH chipsets as memory write cycles, causing corruption
16243 * of register and memory space. Only certain ICH bridges
16244 * will drive special cycles with non-zero data during the
16245 * address phase which can fall within the 5703's address
16246 * range. This is not an ICH bug as the PCI spec allows
16247 * non-zero address during special cycles. However, only
16248 * these ICH bridges are known to drive non-zero addresses
16249 * during special cycles.
16250 *
16251 * Since special cycles do not cross PCI bridges, we only
16252 * enable this workaround if the 5703 is on the secondary
16253 * bus of these ICH bridges.
16254 */
16255 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16256 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16257 static struct tg3_dev_id {
16258 u32 vendor;
16259 u32 device;
16260 u32 rev;
16261 } ich_chipsets[] = {
16262 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16263 PCI_ANY_ID },
16264 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16265 PCI_ANY_ID },
16266 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16267 0xa },
16268 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16269 PCI_ANY_ID },
16270 { },
16271 };
16272 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16273 struct pci_dev *bridge = NULL;
16274
16275 while (pci_id->vendor != 0) {
16276 bridge = pci_get_device(pci_id->vendor, pci_id->device,
16277 bridge);
16278 if (!bridge) {
16279 pci_id++;
16280 continue;
16281 }
16282 if (pci_id->rev != PCI_ANY_ID) {
16283 if (bridge->revision > pci_id->rev)
16284 continue;
16285 }
16286 if (bridge->subordinate &&
16287 (bridge->subordinate->number ==
16288 tp->pdev->bus->number)) {
16289 tg3_flag_set(tp, ICH_WORKAROUND);
16290 pci_dev_put(bridge);
16291 break;
16292 }
16293 }
16294 }
16295
16296 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16297 static struct tg3_dev_id {
16298 u32 vendor;
16299 u32 device;
16300 } bridge_chipsets[] = {
16301 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16302 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16303 { },
16304 };
16305 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16306 struct pci_dev *bridge = NULL;
16307
16308 while (pci_id->vendor != 0) {
16309 bridge = pci_get_device(pci_id->vendor,
16310 pci_id->device,
16311 bridge);
16312 if (!bridge) {
16313 pci_id++;
16314 continue;
16315 }
16316 if (bridge->subordinate &&
16317 (bridge->subordinate->number <=
16318 tp->pdev->bus->number) &&
16319 (bridge->subordinate->busn_res.end >=
16320 tp->pdev->bus->number)) {
16321 tg3_flag_set(tp, 5701_DMA_BUG);
16322 pci_dev_put(bridge);
16323 break;
16324 }
16325 }
16326 }
16327
16328 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16329 * DMA addresses > 40-bit. This bridge may have other additional
16330 * 57xx devices behind it in some 4-port NIC designs for example.
16331 * Any tg3 device found behind the bridge will also need the 40-bit
16332 * DMA workaround.
16333 */
16334 if (tg3_flag(tp, 5780_CLASS)) {
16335 tg3_flag_set(tp, 40BIT_DMA_BUG);
16336 tp->msi_cap = tp->pdev->msi_cap;
16337 } else {
16338 struct pci_dev *bridge = NULL;
16339
16340 do {
16341 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16342 PCI_DEVICE_ID_SERVERWORKS_EPB,
16343 bridge);
16344 if (bridge && bridge->subordinate &&
16345 (bridge->subordinate->number <=
16346 tp->pdev->bus->number) &&
16347 (bridge->subordinate->busn_res.end >=
16348 tp->pdev->bus->number)) {
16349 tg3_flag_set(tp, 40BIT_DMA_BUG);
16350 pci_dev_put(bridge);
16351 break;
16352 }
16353 } while (bridge);
16354 }
16355
16356 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16357 tg3_asic_rev(tp) == ASIC_REV_5714)
16358 tp->pdev_peer = tg3_find_peer(tp);
16359
16360 /* Determine TSO capabilities */
16361 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16362 ; /* Do nothing. HW bug. */
16363 else if (tg3_flag(tp, 57765_PLUS))
16364 tg3_flag_set(tp, HW_TSO_3);
16365 else if (tg3_flag(tp, 5755_PLUS) ||
16366 tg3_asic_rev(tp) == ASIC_REV_5906)
16367 tg3_flag_set(tp, HW_TSO_2);
16368 else if (tg3_flag(tp, 5750_PLUS)) {
16369 tg3_flag_set(tp, HW_TSO_1);
16370 tg3_flag_set(tp, TSO_BUG);
16371 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16372 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16373 tg3_flag_clear(tp, TSO_BUG);
16374 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16375 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16376 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16377 tg3_flag_set(tp, FW_TSO);
16378 tg3_flag_set(tp, TSO_BUG);
16379 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16380 tp->fw_needed = FIRMWARE_TG3TSO5;
16381 else
16382 tp->fw_needed = FIRMWARE_TG3TSO;
16383 }
16384
16385 /* Selectively allow TSO based on operating conditions */
16386 if (tg3_flag(tp, HW_TSO_1) ||
16387 tg3_flag(tp, HW_TSO_2) ||
16388 tg3_flag(tp, HW_TSO_3) ||
16389 tg3_flag(tp, FW_TSO)) {
16390 /* For firmware TSO, assume ASF is disabled.
16391 * We'll disable TSO later if we discover ASF
16392 * is enabled in tg3_get_eeprom_hw_cfg().
16393 */
16394 tg3_flag_set(tp, TSO_CAPABLE);
16395 } else {
16396 tg3_flag_clear(tp, TSO_CAPABLE);
16397 tg3_flag_clear(tp, TSO_BUG);
16398 tp->fw_needed = NULL;
16399 }
16400
16401 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16402 tp->fw_needed = FIRMWARE_TG3;
16403
16404 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16405 tp->fw_needed = FIRMWARE_TG357766;
16406
16407 tp->irq_max = 1;
16408
16409 if (tg3_flag(tp, 5750_PLUS)) {
16410 tg3_flag_set(tp, SUPPORT_MSI);
16411 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16412 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16413 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16414 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16415 tp->pdev_peer == tp->pdev))
16416 tg3_flag_clear(tp, SUPPORT_MSI);
16417
16418 if (tg3_flag(tp, 5755_PLUS) ||
16419 tg3_asic_rev(tp) == ASIC_REV_5906) {
16420 tg3_flag_set(tp, 1SHOT_MSI);
16421 }
16422
16423 if (tg3_flag(tp, 57765_PLUS)) {
16424 tg3_flag_set(tp, SUPPORT_MSIX);
16425 tp->irq_max = TG3_IRQ_MAX_VECS;
16426 }
16427 }
16428
16429 tp->txq_max = 1;
16430 tp->rxq_max = 1;
16431 if (tp->irq_max > 1) {
16432 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16433 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16434
16435 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16436 tg3_asic_rev(tp) == ASIC_REV_5720)
16437 tp->txq_max = tp->irq_max - 1;
16438 }
16439
16440 if (tg3_flag(tp, 5755_PLUS) ||
16441 tg3_asic_rev(tp) == ASIC_REV_5906)
16442 tg3_flag_set(tp, SHORT_DMA_BUG);
16443
16444 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16445 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16446
16447 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16448 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16449 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16450 tg3_asic_rev(tp) == ASIC_REV_5762)
16451 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16452
16453 if (tg3_flag(tp, 57765_PLUS) &&
16454 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16455 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16456
16457 if (!tg3_flag(tp, 5705_PLUS) ||
16458 tg3_flag(tp, 5780_CLASS) ||
16459 tg3_flag(tp, USE_JUMBO_BDFLAG))
16460 tg3_flag_set(tp, JUMBO_CAPABLE);
16461
16462 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16463 &pci_state_reg);
16464
16465 if (pci_is_pcie(tp->pdev)) {
16466 u16 lnkctl;
16467
16468 tg3_flag_set(tp, PCI_EXPRESS);
16469
16470 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16471 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16472 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16473 tg3_flag_clear(tp, HW_TSO_2);
16474 tg3_flag_clear(tp, TSO_CAPABLE);
16475 }
16476 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16477 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16478 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16479 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16480 tg3_flag_set(tp, CLKREQ_BUG);
16481 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16482 tg3_flag_set(tp, L1PLLPD_EN);
16483 }
16484 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16485 /* BCM5785 devices are effectively PCIe devices, and should
16486 * follow PCIe codepaths, but do not have a PCIe capabilities
16487 * section.
16488 */
16489 tg3_flag_set(tp, PCI_EXPRESS);
16490 } else if (!tg3_flag(tp, 5705_PLUS) ||
16491 tg3_flag(tp, 5780_CLASS)) {
16492 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16493 if (!tp->pcix_cap) {
16494 dev_err(&tp->pdev->dev,
16495 "Cannot find PCI-X capability, aborting\n");
16496 return -EIO;
16497 }
16498
16499 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16500 tg3_flag_set(tp, PCIX_MODE);
16501 }
16502
16503 /* If we have an AMD 762 or VIA K8T800 chipset, write
16504 * reordering to the mailbox registers done by the host
16505 * controller can cause major troubles. We read back from
16506 * every mailbox register write to force the writes to be
16507 * posted to the chip in order.
16508 */
16509 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16510 !tg3_flag(tp, PCI_EXPRESS))
16511 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16512
16513 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16514 &tp->pci_cacheline_sz);
16515 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16516 &tp->pci_lat_timer);
16517 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16518 tp->pci_lat_timer < 64) {
16519 tp->pci_lat_timer = 64;
16520 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16521 tp->pci_lat_timer);
16522 }
16523
16524 /* Important! -- It is critical that the PCI-X hw workaround
16525 * situation is decided before the first MMIO register access.
16526 */
16527 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16528 /* 5700 BX chips need to have their TX producer index
16529 * mailboxes written twice to workaround a bug.
16530 */
16531 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16532
16533 /* If we are in PCI-X mode, enable register write workaround.
16534 *
16535 * The workaround is to use indirect register accesses
16536 * for all chip writes not to mailbox registers.
16537 */
16538 if (tg3_flag(tp, PCIX_MODE)) {
16539 u32 pm_reg;
16540
16541 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16542
16543 /* The chip can have it's power management PCI config
16544 * space registers clobbered due to this bug.
16545 * So explicitly force the chip into D0 here.
16546 */
16547 pci_read_config_dword(tp->pdev,
16548 tp->pdev->pm_cap + PCI_PM_CTRL,
16549 &pm_reg);
16550 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16551 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16552 pci_write_config_dword(tp->pdev,
16553 tp->pdev->pm_cap + PCI_PM_CTRL,
16554 pm_reg);
16555
16556 /* Also, force SERR#/PERR# in PCI command. */
16557 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16558 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16559 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16560 }
16561 }
16562
16563 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16564 tg3_flag_set(tp, PCI_HIGH_SPEED);
16565 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16566 tg3_flag_set(tp, PCI_32BIT);
16567
16568 /* Chip-specific fixup from Broadcom driver */
16569 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16570 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16571 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16572 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16573 }
16574
16575 /* Default fast path register access methods */
16576 tp->read32 = tg3_read32;
16577 tp->write32 = tg3_write32;
16578 tp->read32_mbox = tg3_read32;
16579 tp->write32_mbox = tg3_write32;
16580 tp->write32_tx_mbox = tg3_write32;
16581 tp->write32_rx_mbox = tg3_write32;
16582
16583 /* Various workaround register access methods */
16584 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16585 tp->write32 = tg3_write_indirect_reg32;
16586 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16587 (tg3_flag(tp, PCI_EXPRESS) &&
16588 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16589 /*
16590 * Back to back register writes can cause problems on these
16591 * chips, the workaround is to read back all reg writes
16592 * except those to mailbox regs.
16593 *
16594 * See tg3_write_indirect_reg32().
16595 */
16596 tp->write32 = tg3_write_flush_reg32;
16597 }
16598
16599 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16600 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16601 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16602 tp->write32_rx_mbox = tg3_write_flush_reg32;
16603 }
16604
16605 if (tg3_flag(tp, ICH_WORKAROUND)) {
16606 tp->read32 = tg3_read_indirect_reg32;
16607 tp->write32 = tg3_write_indirect_reg32;
16608 tp->read32_mbox = tg3_read_indirect_mbox;
16609 tp->write32_mbox = tg3_write_indirect_mbox;
16610 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16611 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16612
16613 iounmap(tp->regs);
16614 tp->regs = NULL;
16615
16616 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16617 pci_cmd &= ~PCI_COMMAND_MEMORY;
16618 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16619 }
16620 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16621 tp->read32_mbox = tg3_read32_mbox_5906;
16622 tp->write32_mbox = tg3_write32_mbox_5906;
16623 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16624 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16625 }
16626
16627 if (tp->write32 == tg3_write_indirect_reg32 ||
16628 (tg3_flag(tp, PCIX_MODE) &&
16629 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16630 tg3_asic_rev(tp) == ASIC_REV_5701)))
16631 tg3_flag_set(tp, SRAM_USE_CONFIG);
16632
16633 /* The memory arbiter has to be enabled in order for SRAM accesses
16634 * to succeed. Normally on powerup the tg3 chip firmware will make
16635 * sure it is enabled, but other entities such as system netboot
16636 * code might disable it.
16637 */
16638 val = tr32(MEMARB_MODE);
16639 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16640
16641 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16642 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16643 tg3_flag(tp, 5780_CLASS)) {
16644 if (tg3_flag(tp, PCIX_MODE)) {
16645 pci_read_config_dword(tp->pdev,
16646 tp->pcix_cap + PCI_X_STATUS,
16647 &val);
16648 tp->pci_fn = val & 0x7;
16649 }
16650 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16651 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16652 tg3_asic_rev(tp) == ASIC_REV_5720) {
16653 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16654 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16655 val = tr32(TG3_CPMU_STATUS);
16656
16657 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16658 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16659 else
16660 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16661 TG3_CPMU_STATUS_FSHFT_5719;
16662 }
16663
16664 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16665 tp->write32_tx_mbox = tg3_write_flush_reg32;
16666 tp->write32_rx_mbox = tg3_write_flush_reg32;
16667 }
16668
16669 /* Get eeprom hw config before calling tg3_set_power_state().
16670 * In particular, the TG3_FLAG_IS_NIC flag must be
16671 * determined before calling tg3_set_power_state() so that
16672 * we know whether or not to switch out of Vaux power.
16673 * When the flag is set, it means that GPIO1 is used for eeprom
16674 * write protect and also implies that it is a LOM where GPIOs
16675 * are not used to switch power.
16676 */
16677 tg3_get_eeprom_hw_cfg(tp);
16678
16679 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16680 tg3_flag_clear(tp, TSO_CAPABLE);
16681 tg3_flag_clear(tp, TSO_BUG);
16682 tp->fw_needed = NULL;
16683 }
16684
16685 if (tg3_flag(tp, ENABLE_APE)) {
16686 /* Allow reads and writes to the
16687 * APE register and memory space.
16688 */
16689 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16690 PCISTATE_ALLOW_APE_SHMEM_WR |
16691 PCISTATE_ALLOW_APE_PSPACE_WR;
16692 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16693 pci_state_reg);
16694
16695 tg3_ape_lock_init(tp);
16696 tp->ape_hb_interval =
16697 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16698 }
16699
16700 /* Set up tp->grc_local_ctrl before calling
16701 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16702 * will bring 5700's external PHY out of reset.
16703 * It is also used as eeprom write protect on LOMs.
16704 */
16705 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16706 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16707 tg3_flag(tp, EEPROM_WRITE_PROT))
16708 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16709 GRC_LCLCTRL_GPIO_OUTPUT1);
16710 /* Unused GPIO3 must be driven as output on 5752 because there
16711 * are no pull-up resistors on unused GPIO pins.
16712 */
16713 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16714 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16715
16716 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16717 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16718 tg3_flag(tp, 57765_CLASS))
16719 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16720
16721 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16722 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16723 /* Turn off the debug UART. */
16724 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16725 if (tg3_flag(tp, IS_NIC))
16726 /* Keep VMain power. */
16727 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16728 GRC_LCLCTRL_GPIO_OUTPUT0;
16729 }
16730
16731 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16732 tp->grc_local_ctrl |=
16733 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16734
16735 /* Switch out of Vaux if it is a NIC */
16736 tg3_pwrsrc_switch_to_vmain(tp);
16737
16738 /* Derive initial jumbo mode from MTU assigned in
16739 * ether_setup() via the alloc_etherdev() call
16740 */
16741 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16742 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16743
16744 /* Determine WakeOnLan speed to use. */
16745 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16746 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16747 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16748 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16749 tg3_flag_clear(tp, WOL_SPEED_100MB);
16750 } else {
16751 tg3_flag_set(tp, WOL_SPEED_100MB);
16752 }
16753
16754 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16755 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16756
16757 /* A few boards don't want Ethernet@WireSpeed phy feature */
16758 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16759 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16760 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16761 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16762 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16763 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16764 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16765
16766 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16767 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16768 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16769 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16770 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16771
16772 if (tg3_flag(tp, 5705_PLUS) &&
16773 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16774 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16775 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16776 !tg3_flag(tp, 57765_PLUS)) {
16777 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16778 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16779 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16780 tg3_asic_rev(tp) == ASIC_REV_5761) {
16781 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16782 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16783 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16784 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16785 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16786 } else
16787 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16788 }
16789
16790 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16791 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16792 tp->phy_otp = tg3_read_otp_phycfg(tp);
16793 if (tp->phy_otp == 0)
16794 tp->phy_otp = TG3_OTP_DEFAULT;
16795 }
16796
16797 if (tg3_flag(tp, CPMU_PRESENT))
16798 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16799 else
16800 tp->mi_mode = MAC_MI_MODE_BASE;
16801
16802 tp->coalesce_mode = 0;
16803 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16804 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16805 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16806
16807 /* Set these bits to enable statistics workaround. */
16808 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16809 tg3_asic_rev(tp) == ASIC_REV_5762 ||
16810 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16811 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16812 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16813 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16814 }
16815
16816 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16817 tg3_asic_rev(tp) == ASIC_REV_57780)
16818 tg3_flag_set(tp, USE_PHYLIB);
16819
16820 err = tg3_mdio_init(tp);
16821 if (err)
16822 return err;
16823
16824 /* Initialize data/descriptor byte/word swapping. */
16825 val = tr32(GRC_MODE);
16826 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16827 tg3_asic_rev(tp) == ASIC_REV_5762)
16828 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16829 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16830 GRC_MODE_B2HRX_ENABLE |
16831 GRC_MODE_HTX2B_ENABLE |
16832 GRC_MODE_HOST_STACKUP);
16833 else
16834 val &= GRC_MODE_HOST_STACKUP;
16835
16836 tw32(GRC_MODE, val | tp->grc_mode);
16837
16838 tg3_switch_clocks(tp);
16839
16840 /* Clear this out for sanity. */
16841 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16842
16843 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16844 tw32(TG3PCI_REG_BASE_ADDR, 0);
16845
16846 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16847 &pci_state_reg);
16848 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16849 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16850 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16851 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16852 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16853 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16854 void __iomem *sram_base;
16855
16856 /* Write some dummy words into the SRAM status block
16857 * area, see if it reads back correctly. If the return
16858 * value is bad, force enable the PCIX workaround.
16859 */
16860 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16861
16862 writel(0x00000000, sram_base);
16863 writel(0x00000000, sram_base + 4);
16864 writel(0xffffffff, sram_base + 4);
16865 if (readl(sram_base) != 0x00000000)
16866 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16867 }
16868 }
16869
16870 udelay(50);
16871 tg3_nvram_init(tp);
16872
16873 /* If the device has an NVRAM, no need to load patch firmware */
16874 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16875 !tg3_flag(tp, NO_NVRAM))
16876 tp->fw_needed = NULL;
16877
16878 grc_misc_cfg = tr32(GRC_MISC_CFG);
16879 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16880
16881 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16882 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16883 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16884 tg3_flag_set(tp, IS_5788);
16885
16886 if (!tg3_flag(tp, IS_5788) &&
16887 tg3_asic_rev(tp) != ASIC_REV_5700)
16888 tg3_flag_set(tp, TAGGED_STATUS);
16889 if (tg3_flag(tp, TAGGED_STATUS)) {
16890 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16891 HOSTCC_MODE_CLRTICK_TXBD);
16892
16893 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16894 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16895 tp->misc_host_ctrl);
16896 }
16897
16898 /* Preserve the APE MAC_MODE bits */
16899 if (tg3_flag(tp, ENABLE_APE))
16900 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16901 else
16902 tp->mac_mode = 0;
16903
16904 if (tg3_10_100_only_device(tp, ent))
16905 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16906
16907 err = tg3_phy_probe(tp);
16908 if (err) {
16909 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16910 /* ... but do not return immediately ... */
16911 tg3_mdio_fini(tp);
16912 }
16913
16914 tg3_read_vpd(tp);
16915 tg3_read_fw_ver(tp);
16916
16917 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16918 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16919 } else {
16920 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16921 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16922 else
16923 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16924 }
16925
16926 /* 5700 {AX,BX} chips have a broken status block link
16927 * change bit implementation, so we must use the
16928 * status register in those cases.
16929 */
16930 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16931 tg3_flag_set(tp, USE_LINKCHG_REG);
16932 else
16933 tg3_flag_clear(tp, USE_LINKCHG_REG);
16934
16935 /* The led_ctrl is set during tg3_phy_probe, here we might
16936 * have to force the link status polling mechanism based
16937 * upon subsystem IDs.
16938 */
16939 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16940 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16941 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16942 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16943 tg3_flag_set(tp, USE_LINKCHG_REG);
16944 }
16945
16946 /* For all SERDES we poll the MAC status register. */
16947 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16948 tg3_flag_set(tp, POLL_SERDES);
16949 else
16950 tg3_flag_clear(tp, POLL_SERDES);
16951
16952 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16953 tg3_flag_set(tp, POLL_CPMU_LINK);
16954
16955 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16956 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16957 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16958 tg3_flag(tp, PCIX_MODE)) {
16959 tp->rx_offset = NET_SKB_PAD;
16960 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16961 tp->rx_copy_thresh = ~(u16)0;
16962 #endif
16963 }
16964
16965 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16966 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16967 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16968
16969 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16970
16971 /* Increment the rx prod index on the rx std ring by at most
16972 * 8 for these chips to workaround hw errata.
16973 */
16974 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16975 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16976 tg3_asic_rev(tp) == ASIC_REV_5755)
16977 tp->rx_std_max_post = 8;
16978
16979 if (tg3_flag(tp, ASPM_WORKAROUND))
16980 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16981 PCIE_PWR_MGMT_L1_THRESH_MSK;
16982
16983 return err;
16984 }
16985
tg3_get_device_address(struct tg3 * tp)16986 static int tg3_get_device_address(struct tg3 *tp)
16987 {
16988 struct net_device *dev = tp->dev;
16989 u32 hi, lo, mac_offset;
16990 int addr_ok = 0;
16991 int err;
16992
16993 if (!eth_platform_get_mac_address(&tp->pdev->dev, dev->dev_addr))
16994 return 0;
16995
16996 if (tg3_flag(tp, IS_SSB_CORE)) {
16997 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16998 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16999 return 0;
17000 }
17001
17002 mac_offset = 0x7c;
17003 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17004 tg3_flag(tp, 5780_CLASS)) {
17005 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17006 mac_offset = 0xcc;
17007 if (tg3_nvram_lock(tp))
17008 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17009 else
17010 tg3_nvram_unlock(tp);
17011 } else if (tg3_flag(tp, 5717_PLUS)) {
17012 if (tp->pci_fn & 1)
17013 mac_offset = 0xcc;
17014 if (tp->pci_fn > 1)
17015 mac_offset += 0x18c;
17016 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17017 mac_offset = 0x10;
17018
17019 /* First try to get it from MAC address mailbox. */
17020 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17021 if ((hi >> 16) == 0x484b) {
17022 dev->dev_addr[0] = (hi >> 8) & 0xff;
17023 dev->dev_addr[1] = (hi >> 0) & 0xff;
17024
17025 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17026 dev->dev_addr[2] = (lo >> 24) & 0xff;
17027 dev->dev_addr[3] = (lo >> 16) & 0xff;
17028 dev->dev_addr[4] = (lo >> 8) & 0xff;
17029 dev->dev_addr[5] = (lo >> 0) & 0xff;
17030
17031 /* Some old bootcode may report a 0 MAC address in SRAM */
17032 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
17033 }
17034 if (!addr_ok) {
17035 /* Next, try NVRAM. */
17036 if (!tg3_flag(tp, NO_NVRAM) &&
17037 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17038 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17039 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
17040 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
17041 }
17042 /* Finally just fetch it out of the MAC control regs. */
17043 else {
17044 hi = tr32(MAC_ADDR_0_HIGH);
17045 lo = tr32(MAC_ADDR_0_LOW);
17046
17047 dev->dev_addr[5] = lo & 0xff;
17048 dev->dev_addr[4] = (lo >> 8) & 0xff;
17049 dev->dev_addr[3] = (lo >> 16) & 0xff;
17050 dev->dev_addr[2] = (lo >> 24) & 0xff;
17051 dev->dev_addr[1] = hi & 0xff;
17052 dev->dev_addr[0] = (hi >> 8) & 0xff;
17053 }
17054 }
17055
17056 if (!is_valid_ether_addr(&dev->dev_addr[0]))
17057 return -EINVAL;
17058 return 0;
17059 }
17060
17061 #define BOUNDARY_SINGLE_CACHELINE 1
17062 #define BOUNDARY_MULTI_CACHELINE 2
17063
tg3_calc_dma_bndry(struct tg3 * tp,u32 val)17064 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17065 {
17066 int cacheline_size;
17067 u8 byte;
17068 int goal;
17069
17070 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17071 if (byte == 0)
17072 cacheline_size = 1024;
17073 else
17074 cacheline_size = (int) byte * 4;
17075
17076 /* On 5703 and later chips, the boundary bits have no
17077 * effect.
17078 */
17079 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17080 tg3_asic_rev(tp) != ASIC_REV_5701 &&
17081 !tg3_flag(tp, PCI_EXPRESS))
17082 goto out;
17083
17084 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17085 goal = BOUNDARY_MULTI_CACHELINE;
17086 #else
17087 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17088 goal = BOUNDARY_SINGLE_CACHELINE;
17089 #else
17090 goal = 0;
17091 #endif
17092 #endif
17093
17094 if (tg3_flag(tp, 57765_PLUS)) {
17095 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17096 goto out;
17097 }
17098
17099 if (!goal)
17100 goto out;
17101
17102 /* PCI controllers on most RISC systems tend to disconnect
17103 * when a device tries to burst across a cache-line boundary.
17104 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17105 *
17106 * Unfortunately, for PCI-E there are only limited
17107 * write-side controls for this, and thus for reads
17108 * we will still get the disconnects. We'll also waste
17109 * these PCI cycles for both read and write for chips
17110 * other than 5700 and 5701 which do not implement the
17111 * boundary bits.
17112 */
17113 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17114 switch (cacheline_size) {
17115 case 16:
17116 case 32:
17117 case 64:
17118 case 128:
17119 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17120 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17121 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17122 } else {
17123 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17124 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17125 }
17126 break;
17127
17128 case 256:
17129 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17130 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17131 break;
17132
17133 default:
17134 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17135 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17136 break;
17137 }
17138 } else if (tg3_flag(tp, PCI_EXPRESS)) {
17139 switch (cacheline_size) {
17140 case 16:
17141 case 32:
17142 case 64:
17143 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17144 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17145 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17146 break;
17147 }
17148 fallthrough;
17149 case 128:
17150 default:
17151 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17152 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17153 break;
17154 }
17155 } else {
17156 switch (cacheline_size) {
17157 case 16:
17158 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17159 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17160 DMA_RWCTRL_WRITE_BNDRY_16);
17161 break;
17162 }
17163 fallthrough;
17164 case 32:
17165 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17166 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17167 DMA_RWCTRL_WRITE_BNDRY_32);
17168 break;
17169 }
17170 fallthrough;
17171 case 64:
17172 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17173 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17174 DMA_RWCTRL_WRITE_BNDRY_64);
17175 break;
17176 }
17177 fallthrough;
17178 case 128:
17179 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17180 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17181 DMA_RWCTRL_WRITE_BNDRY_128);
17182 break;
17183 }
17184 fallthrough;
17185 case 256:
17186 val |= (DMA_RWCTRL_READ_BNDRY_256 |
17187 DMA_RWCTRL_WRITE_BNDRY_256);
17188 break;
17189 case 512:
17190 val |= (DMA_RWCTRL_READ_BNDRY_512 |
17191 DMA_RWCTRL_WRITE_BNDRY_512);
17192 break;
17193 case 1024:
17194 default:
17195 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17196 DMA_RWCTRL_WRITE_BNDRY_1024);
17197 break;
17198 }
17199 }
17200
17201 out:
17202 return val;
17203 }
17204
tg3_do_test_dma(struct tg3 * tp,u32 * buf,dma_addr_t buf_dma,int size,bool to_device)17205 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17206 int size, bool to_device)
17207 {
17208 struct tg3_internal_buffer_desc test_desc;
17209 u32 sram_dma_descs;
17210 int i, ret;
17211
17212 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17213
17214 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17215 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17216 tw32(RDMAC_STATUS, 0);
17217 tw32(WDMAC_STATUS, 0);
17218
17219 tw32(BUFMGR_MODE, 0);
17220 tw32(FTQ_RESET, 0);
17221
17222 test_desc.addr_hi = ((u64) buf_dma) >> 32;
17223 test_desc.addr_lo = buf_dma & 0xffffffff;
17224 test_desc.nic_mbuf = 0x00002100;
17225 test_desc.len = size;
17226
17227 /*
17228 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17229 * the *second* time the tg3 driver was getting loaded after an
17230 * initial scan.
17231 *
17232 * Broadcom tells me:
17233 * ...the DMA engine is connected to the GRC block and a DMA
17234 * reset may affect the GRC block in some unpredictable way...
17235 * The behavior of resets to individual blocks has not been tested.
17236 *
17237 * Broadcom noted the GRC reset will also reset all sub-components.
17238 */
17239 if (to_device) {
17240 test_desc.cqid_sqid = (13 << 8) | 2;
17241
17242 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17243 udelay(40);
17244 } else {
17245 test_desc.cqid_sqid = (16 << 8) | 7;
17246
17247 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17248 udelay(40);
17249 }
17250 test_desc.flags = 0x00000005;
17251
17252 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17253 u32 val;
17254
17255 val = *(((u32 *)&test_desc) + i);
17256 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17257 sram_dma_descs + (i * sizeof(u32)));
17258 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17259 }
17260 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17261
17262 if (to_device)
17263 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17264 else
17265 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17266
17267 ret = -ENODEV;
17268 for (i = 0; i < 40; i++) {
17269 u32 val;
17270
17271 if (to_device)
17272 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17273 else
17274 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17275 if ((val & 0xffff) == sram_dma_descs) {
17276 ret = 0;
17277 break;
17278 }
17279
17280 udelay(100);
17281 }
17282
17283 return ret;
17284 }
17285
17286 #define TEST_BUFFER_SIZE 0x2000
17287
17288 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17289 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17290 { },
17291 };
17292
tg3_test_dma(struct tg3 * tp)17293 static int tg3_test_dma(struct tg3 *tp)
17294 {
17295 dma_addr_t buf_dma;
17296 u32 *buf, saved_dma_rwctrl;
17297 int ret = 0;
17298
17299 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17300 &buf_dma, GFP_KERNEL);
17301 if (!buf) {
17302 ret = -ENOMEM;
17303 goto out_nofree;
17304 }
17305
17306 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17307 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17308
17309 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17310
17311 if (tg3_flag(tp, 57765_PLUS))
17312 goto out;
17313
17314 if (tg3_flag(tp, PCI_EXPRESS)) {
17315 /* DMA read watermark not used on PCIE */
17316 tp->dma_rwctrl |= 0x00180000;
17317 } else if (!tg3_flag(tp, PCIX_MODE)) {
17318 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17319 tg3_asic_rev(tp) == ASIC_REV_5750)
17320 tp->dma_rwctrl |= 0x003f0000;
17321 else
17322 tp->dma_rwctrl |= 0x003f000f;
17323 } else {
17324 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17325 tg3_asic_rev(tp) == ASIC_REV_5704) {
17326 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17327 u32 read_water = 0x7;
17328
17329 /* If the 5704 is behind the EPB bridge, we can
17330 * do the less restrictive ONE_DMA workaround for
17331 * better performance.
17332 */
17333 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17334 tg3_asic_rev(tp) == ASIC_REV_5704)
17335 tp->dma_rwctrl |= 0x8000;
17336 else if (ccval == 0x6 || ccval == 0x7)
17337 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17338
17339 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17340 read_water = 4;
17341 /* Set bit 23 to enable PCIX hw bug fix */
17342 tp->dma_rwctrl |=
17343 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17344 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17345 (1 << 23);
17346 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17347 /* 5780 always in PCIX mode */
17348 tp->dma_rwctrl |= 0x00144000;
17349 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17350 /* 5714 always in PCIX mode */
17351 tp->dma_rwctrl |= 0x00148000;
17352 } else {
17353 tp->dma_rwctrl |= 0x001b000f;
17354 }
17355 }
17356 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17357 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17358
17359 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17360 tg3_asic_rev(tp) == ASIC_REV_5704)
17361 tp->dma_rwctrl &= 0xfffffff0;
17362
17363 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17364 tg3_asic_rev(tp) == ASIC_REV_5701) {
17365 /* Remove this if it causes problems for some boards. */
17366 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17367
17368 /* On 5700/5701 chips, we need to set this bit.
17369 * Otherwise the chip will issue cacheline transactions
17370 * to streamable DMA memory with not all the byte
17371 * enables turned on. This is an error on several
17372 * RISC PCI controllers, in particular sparc64.
17373 *
17374 * On 5703/5704 chips, this bit has been reassigned
17375 * a different meaning. In particular, it is used
17376 * on those chips to enable a PCI-X workaround.
17377 */
17378 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17379 }
17380
17381 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17382
17383
17384 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17385 tg3_asic_rev(tp) != ASIC_REV_5701)
17386 goto out;
17387
17388 /* It is best to perform DMA test with maximum write burst size
17389 * to expose the 5700/5701 write DMA bug.
17390 */
17391 saved_dma_rwctrl = tp->dma_rwctrl;
17392 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17393 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17394
17395 while (1) {
17396 u32 *p = buf, i;
17397
17398 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17399 p[i] = i;
17400
17401 /* Send the buffer to the chip. */
17402 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17403 if (ret) {
17404 dev_err(&tp->pdev->dev,
17405 "%s: Buffer write failed. err = %d\n",
17406 __func__, ret);
17407 break;
17408 }
17409
17410 /* Now read it back. */
17411 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17412 if (ret) {
17413 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17414 "err = %d\n", __func__, ret);
17415 break;
17416 }
17417
17418 /* Verify it. */
17419 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17420 if (p[i] == i)
17421 continue;
17422
17423 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17424 DMA_RWCTRL_WRITE_BNDRY_16) {
17425 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17426 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17427 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17428 break;
17429 } else {
17430 dev_err(&tp->pdev->dev,
17431 "%s: Buffer corrupted on read back! "
17432 "(%d != %d)\n", __func__, p[i], i);
17433 ret = -ENODEV;
17434 goto out;
17435 }
17436 }
17437
17438 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17439 /* Success. */
17440 ret = 0;
17441 break;
17442 }
17443 }
17444 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17445 DMA_RWCTRL_WRITE_BNDRY_16) {
17446 /* DMA test passed without adjusting DMA boundary,
17447 * now look for chipsets that are known to expose the
17448 * DMA bug without failing the test.
17449 */
17450 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17451 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17452 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17453 } else {
17454 /* Safe to use the calculated DMA boundary. */
17455 tp->dma_rwctrl = saved_dma_rwctrl;
17456 }
17457
17458 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17459 }
17460
17461 out:
17462 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17463 out_nofree:
17464 return ret;
17465 }
17466
tg3_init_bufmgr_config(struct tg3 * tp)17467 static void tg3_init_bufmgr_config(struct tg3 *tp)
17468 {
17469 if (tg3_flag(tp, 57765_PLUS)) {
17470 tp->bufmgr_config.mbuf_read_dma_low_water =
17471 DEFAULT_MB_RDMA_LOW_WATER_5705;
17472 tp->bufmgr_config.mbuf_mac_rx_low_water =
17473 DEFAULT_MB_MACRX_LOW_WATER_57765;
17474 tp->bufmgr_config.mbuf_high_water =
17475 DEFAULT_MB_HIGH_WATER_57765;
17476
17477 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17478 DEFAULT_MB_RDMA_LOW_WATER_5705;
17479 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17480 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17481 tp->bufmgr_config.mbuf_high_water_jumbo =
17482 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17483 } else if (tg3_flag(tp, 5705_PLUS)) {
17484 tp->bufmgr_config.mbuf_read_dma_low_water =
17485 DEFAULT_MB_RDMA_LOW_WATER_5705;
17486 tp->bufmgr_config.mbuf_mac_rx_low_water =
17487 DEFAULT_MB_MACRX_LOW_WATER_5705;
17488 tp->bufmgr_config.mbuf_high_water =
17489 DEFAULT_MB_HIGH_WATER_5705;
17490 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17491 tp->bufmgr_config.mbuf_mac_rx_low_water =
17492 DEFAULT_MB_MACRX_LOW_WATER_5906;
17493 tp->bufmgr_config.mbuf_high_water =
17494 DEFAULT_MB_HIGH_WATER_5906;
17495 }
17496
17497 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17498 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17499 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17500 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17501 tp->bufmgr_config.mbuf_high_water_jumbo =
17502 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17503 } else {
17504 tp->bufmgr_config.mbuf_read_dma_low_water =
17505 DEFAULT_MB_RDMA_LOW_WATER;
17506 tp->bufmgr_config.mbuf_mac_rx_low_water =
17507 DEFAULT_MB_MACRX_LOW_WATER;
17508 tp->bufmgr_config.mbuf_high_water =
17509 DEFAULT_MB_HIGH_WATER;
17510
17511 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17512 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17513 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17514 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17515 tp->bufmgr_config.mbuf_high_water_jumbo =
17516 DEFAULT_MB_HIGH_WATER_JUMBO;
17517 }
17518
17519 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17520 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17521 }
17522
tg3_phy_string(struct tg3 * tp)17523 static char *tg3_phy_string(struct tg3 *tp)
17524 {
17525 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17526 case TG3_PHY_ID_BCM5400: return "5400";
17527 case TG3_PHY_ID_BCM5401: return "5401";
17528 case TG3_PHY_ID_BCM5411: return "5411";
17529 case TG3_PHY_ID_BCM5701: return "5701";
17530 case TG3_PHY_ID_BCM5703: return "5703";
17531 case TG3_PHY_ID_BCM5704: return "5704";
17532 case TG3_PHY_ID_BCM5705: return "5705";
17533 case TG3_PHY_ID_BCM5750: return "5750";
17534 case TG3_PHY_ID_BCM5752: return "5752";
17535 case TG3_PHY_ID_BCM5714: return "5714";
17536 case TG3_PHY_ID_BCM5780: return "5780";
17537 case TG3_PHY_ID_BCM5755: return "5755";
17538 case TG3_PHY_ID_BCM5787: return "5787";
17539 case TG3_PHY_ID_BCM5784: return "5784";
17540 case TG3_PHY_ID_BCM5756: return "5722/5756";
17541 case TG3_PHY_ID_BCM5906: return "5906";
17542 case TG3_PHY_ID_BCM5761: return "5761";
17543 case TG3_PHY_ID_BCM5718C: return "5718C";
17544 case TG3_PHY_ID_BCM5718S: return "5718S";
17545 case TG3_PHY_ID_BCM57765: return "57765";
17546 case TG3_PHY_ID_BCM5719C: return "5719C";
17547 case TG3_PHY_ID_BCM5720C: return "5720C";
17548 case TG3_PHY_ID_BCM5762: return "5762C";
17549 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17550 case 0: return "serdes";
17551 default: return "unknown";
17552 }
17553 }
17554
tg3_bus_string(struct tg3 * tp,char * str)17555 static char *tg3_bus_string(struct tg3 *tp, char *str)
17556 {
17557 if (tg3_flag(tp, PCI_EXPRESS)) {
17558 strcpy(str, "PCI Express");
17559 return str;
17560 } else if (tg3_flag(tp, PCIX_MODE)) {
17561 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17562
17563 strcpy(str, "PCIX:");
17564
17565 if ((clock_ctrl == 7) ||
17566 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17567 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17568 strcat(str, "133MHz");
17569 else if (clock_ctrl == 0)
17570 strcat(str, "33MHz");
17571 else if (clock_ctrl == 2)
17572 strcat(str, "50MHz");
17573 else if (clock_ctrl == 4)
17574 strcat(str, "66MHz");
17575 else if (clock_ctrl == 6)
17576 strcat(str, "100MHz");
17577 } else {
17578 strcpy(str, "PCI:");
17579 if (tg3_flag(tp, PCI_HIGH_SPEED))
17580 strcat(str, "66MHz");
17581 else
17582 strcat(str, "33MHz");
17583 }
17584 if (tg3_flag(tp, PCI_32BIT))
17585 strcat(str, ":32-bit");
17586 else
17587 strcat(str, ":64-bit");
17588 return str;
17589 }
17590
tg3_init_coal(struct tg3 * tp)17591 static void tg3_init_coal(struct tg3 *tp)
17592 {
17593 struct ethtool_coalesce *ec = &tp->coal;
17594
17595 memset(ec, 0, sizeof(*ec));
17596 ec->cmd = ETHTOOL_GCOALESCE;
17597 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17598 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17599 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17600 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17601 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17602 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17603 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17604 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17605 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17606
17607 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17608 HOSTCC_MODE_CLRTICK_TXBD)) {
17609 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17610 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17611 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17612 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17613 }
17614
17615 if (tg3_flag(tp, 5705_PLUS)) {
17616 ec->rx_coalesce_usecs_irq = 0;
17617 ec->tx_coalesce_usecs_irq = 0;
17618 ec->stats_block_coalesce_usecs = 0;
17619 }
17620 }
17621
tg3_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)17622 static int tg3_init_one(struct pci_dev *pdev,
17623 const struct pci_device_id *ent)
17624 {
17625 struct net_device *dev;
17626 struct tg3 *tp;
17627 int i, err;
17628 u32 sndmbx, rcvmbx, intmbx;
17629 char str[40];
17630 u64 dma_mask, persist_dma_mask;
17631 netdev_features_t features = 0;
17632
17633 err = pci_enable_device(pdev);
17634 if (err) {
17635 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17636 return err;
17637 }
17638
17639 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17640 if (err) {
17641 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17642 goto err_out_disable_pdev;
17643 }
17644
17645 pci_set_master(pdev);
17646
17647 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17648 if (!dev) {
17649 err = -ENOMEM;
17650 goto err_out_free_res;
17651 }
17652
17653 SET_NETDEV_DEV(dev, &pdev->dev);
17654
17655 tp = netdev_priv(dev);
17656 tp->pdev = pdev;
17657 tp->dev = dev;
17658 tp->rx_mode = TG3_DEF_RX_MODE;
17659 tp->tx_mode = TG3_DEF_TX_MODE;
17660 tp->irq_sync = 1;
17661 tp->pcierr_recovery = false;
17662
17663 if (tg3_debug > 0)
17664 tp->msg_enable = tg3_debug;
17665 else
17666 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17667
17668 if (pdev_is_ssb_gige_core(pdev)) {
17669 tg3_flag_set(tp, IS_SSB_CORE);
17670 if (ssb_gige_must_flush_posted_writes(pdev))
17671 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17672 if (ssb_gige_one_dma_at_once(pdev))
17673 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17674 if (ssb_gige_have_roboswitch(pdev)) {
17675 tg3_flag_set(tp, USE_PHYLIB);
17676 tg3_flag_set(tp, ROBOSWITCH);
17677 }
17678 if (ssb_gige_is_rgmii(pdev))
17679 tg3_flag_set(tp, RGMII_MODE);
17680 }
17681
17682 /* The word/byte swap controls here control register access byte
17683 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17684 * setting below.
17685 */
17686 tp->misc_host_ctrl =
17687 MISC_HOST_CTRL_MASK_PCI_INT |
17688 MISC_HOST_CTRL_WORD_SWAP |
17689 MISC_HOST_CTRL_INDIR_ACCESS |
17690 MISC_HOST_CTRL_PCISTATE_RW;
17691
17692 /* The NONFRM (non-frame) byte/word swap controls take effect
17693 * on descriptor entries, anything which isn't packet data.
17694 *
17695 * The StrongARM chips on the board (one for tx, one for rx)
17696 * are running in big-endian mode.
17697 */
17698 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17699 GRC_MODE_WSWAP_NONFRM_DATA);
17700 #ifdef __BIG_ENDIAN
17701 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17702 #endif
17703 spin_lock_init(&tp->lock);
17704 spin_lock_init(&tp->indirect_lock);
17705 INIT_WORK(&tp->reset_task, tg3_reset_task);
17706
17707 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17708 if (!tp->regs) {
17709 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17710 err = -ENOMEM;
17711 goto err_out_free_dev;
17712 }
17713
17714 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17715 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17716 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17717 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17718 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17719 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17720 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17721 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17722 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17723 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17724 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17725 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17726 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17727 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17728 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17729 tg3_flag_set(tp, ENABLE_APE);
17730 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17731 if (!tp->aperegs) {
17732 dev_err(&pdev->dev,
17733 "Cannot map APE registers, aborting\n");
17734 err = -ENOMEM;
17735 goto err_out_iounmap;
17736 }
17737 }
17738
17739 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17740 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17741
17742 dev->ethtool_ops = &tg3_ethtool_ops;
17743 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17744 dev->netdev_ops = &tg3_netdev_ops;
17745 dev->irq = pdev->irq;
17746
17747 err = tg3_get_invariants(tp, ent);
17748 if (err) {
17749 dev_err(&pdev->dev,
17750 "Problem fetching invariants of chip, aborting\n");
17751 goto err_out_apeunmap;
17752 }
17753
17754 /* The EPB bridge inside 5714, 5715, and 5780 and any
17755 * device behind the EPB cannot support DMA addresses > 40-bit.
17756 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17757 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17758 * do DMA address check in tg3_start_xmit().
17759 */
17760 if (tg3_flag(tp, IS_5788))
17761 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17762 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17763 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17764 #ifdef CONFIG_HIGHMEM
17765 dma_mask = DMA_BIT_MASK(64);
17766 #endif
17767 } else
17768 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17769
17770 /* Configure DMA attributes. */
17771 if (dma_mask > DMA_BIT_MASK(32)) {
17772 err = pci_set_dma_mask(pdev, dma_mask);
17773 if (!err) {
17774 features |= NETIF_F_HIGHDMA;
17775 err = pci_set_consistent_dma_mask(pdev,
17776 persist_dma_mask);
17777 if (err < 0) {
17778 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17779 "DMA for consistent allocations\n");
17780 goto err_out_apeunmap;
17781 }
17782 }
17783 }
17784 if (err || dma_mask == DMA_BIT_MASK(32)) {
17785 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17786 if (err) {
17787 dev_err(&pdev->dev,
17788 "No usable DMA configuration, aborting\n");
17789 goto err_out_apeunmap;
17790 }
17791 }
17792
17793 tg3_init_bufmgr_config(tp);
17794
17795 /* 5700 B0 chips do not support checksumming correctly due
17796 * to hardware bugs.
17797 */
17798 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17799 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17800
17801 if (tg3_flag(tp, 5755_PLUS))
17802 features |= NETIF_F_IPV6_CSUM;
17803 }
17804
17805 /* TSO is on by default on chips that support hardware TSO.
17806 * Firmware TSO on older chips gives lower performance, so it
17807 * is off by default, but can be enabled using ethtool.
17808 */
17809 if ((tg3_flag(tp, HW_TSO_1) ||
17810 tg3_flag(tp, HW_TSO_2) ||
17811 tg3_flag(tp, HW_TSO_3)) &&
17812 (features & NETIF_F_IP_CSUM))
17813 features |= NETIF_F_TSO;
17814 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17815 if (features & NETIF_F_IPV6_CSUM)
17816 features |= NETIF_F_TSO6;
17817 if (tg3_flag(tp, HW_TSO_3) ||
17818 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17819 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17820 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17821 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17822 tg3_asic_rev(tp) == ASIC_REV_57780)
17823 features |= NETIF_F_TSO_ECN;
17824 }
17825
17826 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17827 NETIF_F_HW_VLAN_CTAG_RX;
17828 dev->vlan_features |= features;
17829
17830 /*
17831 * Add loopback capability only for a subset of devices that support
17832 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17833 * loopback for the remaining devices.
17834 */
17835 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17836 !tg3_flag(tp, CPMU_PRESENT))
17837 /* Add the loopback capability */
17838 features |= NETIF_F_LOOPBACK;
17839
17840 dev->hw_features |= features;
17841 dev->priv_flags |= IFF_UNICAST_FLT;
17842
17843 /* MTU range: 60 - 9000 or 1500, depending on hardware */
17844 dev->min_mtu = TG3_MIN_MTU;
17845 dev->max_mtu = TG3_MAX_MTU(tp);
17846
17847 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17848 !tg3_flag(tp, TSO_CAPABLE) &&
17849 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17850 tg3_flag_set(tp, MAX_RXPEND_64);
17851 tp->rx_pending = 63;
17852 }
17853
17854 err = tg3_get_device_address(tp);
17855 if (err) {
17856 dev_err(&pdev->dev,
17857 "Could not obtain valid ethernet address, aborting\n");
17858 goto err_out_apeunmap;
17859 }
17860
17861 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17862 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17863 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17864 for (i = 0; i < tp->irq_max; i++) {
17865 struct tg3_napi *tnapi = &tp->napi[i];
17866
17867 tnapi->tp = tp;
17868 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17869
17870 tnapi->int_mbox = intmbx;
17871 if (i <= 4)
17872 intmbx += 0x8;
17873 else
17874 intmbx += 0x4;
17875
17876 tnapi->consmbox = rcvmbx;
17877 tnapi->prodmbox = sndmbx;
17878
17879 if (i)
17880 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17881 else
17882 tnapi->coal_now = HOSTCC_MODE_NOW;
17883
17884 if (!tg3_flag(tp, SUPPORT_MSIX))
17885 break;
17886
17887 /*
17888 * If we support MSIX, we'll be using RSS. If we're using
17889 * RSS, the first vector only handles link interrupts and the
17890 * remaining vectors handle rx and tx interrupts. Reuse the
17891 * mailbox values for the next iteration. The values we setup
17892 * above are still useful for the single vectored mode.
17893 */
17894 if (!i)
17895 continue;
17896
17897 rcvmbx += 0x8;
17898
17899 if (sndmbx & 0x4)
17900 sndmbx -= 0x4;
17901 else
17902 sndmbx += 0xc;
17903 }
17904
17905 /*
17906 * Reset chip in case UNDI or EFI driver did not shutdown
17907 * DMA self test will enable WDMAC and we'll see (spurious)
17908 * pending DMA on the PCI bus at that point.
17909 */
17910 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17911 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17912 tg3_full_lock(tp, 0);
17913 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17914 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17915 tg3_full_unlock(tp);
17916 }
17917
17918 err = tg3_test_dma(tp);
17919 if (err) {
17920 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17921 goto err_out_apeunmap;
17922 }
17923
17924 tg3_init_coal(tp);
17925
17926 pci_set_drvdata(pdev, dev);
17927
17928 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17929 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17930 tg3_asic_rev(tp) == ASIC_REV_5762)
17931 tg3_flag_set(tp, PTP_CAPABLE);
17932
17933 tg3_timer_init(tp);
17934
17935 tg3_carrier_off(tp);
17936
17937 err = register_netdev(dev);
17938 if (err) {
17939 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17940 goto err_out_apeunmap;
17941 }
17942
17943 if (tg3_flag(tp, PTP_CAPABLE)) {
17944 tg3_ptp_init(tp);
17945 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17946 &tp->pdev->dev);
17947 if (IS_ERR(tp->ptp_clock))
17948 tp->ptp_clock = NULL;
17949 }
17950
17951 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17952 tp->board_part_number,
17953 tg3_chip_rev_id(tp),
17954 tg3_bus_string(tp, str),
17955 dev->dev_addr);
17956
17957 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17958 char *ethtype;
17959
17960 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17961 ethtype = "10/100Base-TX";
17962 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17963 ethtype = "1000Base-SX";
17964 else
17965 ethtype = "10/100/1000Base-T";
17966
17967 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17968 "(WireSpeed[%d], EEE[%d])\n",
17969 tg3_phy_string(tp), ethtype,
17970 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17971 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17972 }
17973
17974 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17975 (dev->features & NETIF_F_RXCSUM) != 0,
17976 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17977 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17978 tg3_flag(tp, ENABLE_ASF) != 0,
17979 tg3_flag(tp, TSO_CAPABLE) != 0);
17980 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17981 tp->dma_rwctrl,
17982 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17983 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17984
17985 pci_save_state(pdev);
17986
17987 return 0;
17988
17989 err_out_apeunmap:
17990 if (tp->aperegs) {
17991 iounmap(tp->aperegs);
17992 tp->aperegs = NULL;
17993 }
17994
17995 err_out_iounmap:
17996 if (tp->regs) {
17997 iounmap(tp->regs);
17998 tp->regs = NULL;
17999 }
18000
18001 err_out_free_dev:
18002 free_netdev(dev);
18003
18004 err_out_free_res:
18005 pci_release_regions(pdev);
18006
18007 err_out_disable_pdev:
18008 if (pci_is_enabled(pdev))
18009 pci_disable_device(pdev);
18010 return err;
18011 }
18012
tg3_remove_one(struct pci_dev * pdev)18013 static void tg3_remove_one(struct pci_dev *pdev)
18014 {
18015 struct net_device *dev = pci_get_drvdata(pdev);
18016
18017 if (dev) {
18018 struct tg3 *tp = netdev_priv(dev);
18019
18020 tg3_ptp_fini(tp);
18021
18022 release_firmware(tp->fw);
18023
18024 tg3_reset_task_cancel(tp);
18025
18026 if (tg3_flag(tp, USE_PHYLIB)) {
18027 tg3_phy_fini(tp);
18028 tg3_mdio_fini(tp);
18029 }
18030
18031 unregister_netdev(dev);
18032 if (tp->aperegs) {
18033 iounmap(tp->aperegs);
18034 tp->aperegs = NULL;
18035 }
18036 if (tp->regs) {
18037 iounmap(tp->regs);
18038 tp->regs = NULL;
18039 }
18040 free_netdev(dev);
18041 pci_release_regions(pdev);
18042 pci_disable_device(pdev);
18043 }
18044 }
18045
18046 #ifdef CONFIG_PM_SLEEP
tg3_suspend(struct device * device)18047 static int tg3_suspend(struct device *device)
18048 {
18049 struct net_device *dev = dev_get_drvdata(device);
18050 struct tg3 *tp = netdev_priv(dev);
18051 int err = 0;
18052
18053 rtnl_lock();
18054
18055 if (!netif_running(dev))
18056 goto unlock;
18057
18058 tg3_reset_task_cancel(tp);
18059 tg3_phy_stop(tp);
18060 tg3_netif_stop(tp);
18061
18062 tg3_timer_stop(tp);
18063
18064 tg3_full_lock(tp, 1);
18065 tg3_disable_ints(tp);
18066 tg3_full_unlock(tp);
18067
18068 netif_device_detach(dev);
18069
18070 tg3_full_lock(tp, 0);
18071 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18072 tg3_flag_clear(tp, INIT_COMPLETE);
18073 tg3_full_unlock(tp);
18074
18075 err = tg3_power_down_prepare(tp);
18076 if (err) {
18077 int err2;
18078
18079 tg3_full_lock(tp, 0);
18080
18081 tg3_flag_set(tp, INIT_COMPLETE);
18082 err2 = tg3_restart_hw(tp, true);
18083 if (err2)
18084 goto out;
18085
18086 tg3_timer_start(tp);
18087
18088 netif_device_attach(dev);
18089 tg3_netif_start(tp);
18090
18091 out:
18092 tg3_full_unlock(tp);
18093
18094 if (!err2)
18095 tg3_phy_start(tp);
18096 }
18097
18098 unlock:
18099 rtnl_unlock();
18100 return err;
18101 }
18102
tg3_resume(struct device * device)18103 static int tg3_resume(struct device *device)
18104 {
18105 struct net_device *dev = dev_get_drvdata(device);
18106 struct tg3 *tp = netdev_priv(dev);
18107 int err = 0;
18108
18109 rtnl_lock();
18110
18111 if (!netif_running(dev))
18112 goto unlock;
18113
18114 netif_device_attach(dev);
18115
18116 tg3_full_lock(tp, 0);
18117
18118 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18119
18120 tg3_flag_set(tp, INIT_COMPLETE);
18121 err = tg3_restart_hw(tp,
18122 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18123 if (err)
18124 goto out;
18125
18126 tg3_timer_start(tp);
18127
18128 tg3_netif_start(tp);
18129
18130 out:
18131 tg3_full_unlock(tp);
18132
18133 if (!err)
18134 tg3_phy_start(tp);
18135
18136 unlock:
18137 rtnl_unlock();
18138 return err;
18139 }
18140 #endif /* CONFIG_PM_SLEEP */
18141
18142 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18143
tg3_shutdown(struct pci_dev * pdev)18144 static void tg3_shutdown(struct pci_dev *pdev)
18145 {
18146 struct net_device *dev = pci_get_drvdata(pdev);
18147 struct tg3 *tp = netdev_priv(dev);
18148
18149 rtnl_lock();
18150 netif_device_detach(dev);
18151
18152 if (netif_running(dev))
18153 dev_close(dev);
18154
18155 if (system_state == SYSTEM_POWER_OFF)
18156 tg3_power_down(tp);
18157
18158 rtnl_unlock();
18159 }
18160
18161 /**
18162 * tg3_io_error_detected - called when PCI error is detected
18163 * @pdev: Pointer to PCI device
18164 * @state: The current pci connection state
18165 *
18166 * This function is called after a PCI bus error affecting
18167 * this device has been detected.
18168 */
tg3_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)18169 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18170 pci_channel_state_t state)
18171 {
18172 struct net_device *netdev = pci_get_drvdata(pdev);
18173 struct tg3 *tp = netdev_priv(netdev);
18174 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18175
18176 netdev_info(netdev, "PCI I/O error detected\n");
18177
18178 rtnl_lock();
18179
18180 /* Could be second call or maybe we don't have netdev yet */
18181 if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18182 goto done;
18183
18184 /* We needn't recover from permanent error */
18185 if (state == pci_channel_io_frozen)
18186 tp->pcierr_recovery = true;
18187
18188 tg3_phy_stop(tp);
18189
18190 tg3_netif_stop(tp);
18191
18192 tg3_timer_stop(tp);
18193
18194 /* Want to make sure that the reset task doesn't run */
18195 tg3_reset_task_cancel(tp);
18196
18197 netif_device_detach(netdev);
18198
18199 /* Clean up software state, even if MMIO is blocked */
18200 tg3_full_lock(tp, 0);
18201 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18202 tg3_full_unlock(tp);
18203
18204 done:
18205 if (state == pci_channel_io_perm_failure) {
18206 if (netdev) {
18207 tg3_napi_enable(tp);
18208 dev_close(netdev);
18209 }
18210 err = PCI_ERS_RESULT_DISCONNECT;
18211 } else {
18212 pci_disable_device(pdev);
18213 }
18214
18215 rtnl_unlock();
18216
18217 return err;
18218 }
18219
18220 /**
18221 * tg3_io_slot_reset - called after the pci bus has been reset.
18222 * @pdev: Pointer to PCI device
18223 *
18224 * Restart the card from scratch, as if from a cold-boot.
18225 * At this point, the card has exprienced a hard reset,
18226 * followed by fixups by BIOS, and has its config space
18227 * set up identically to what it was at cold boot.
18228 */
tg3_io_slot_reset(struct pci_dev * pdev)18229 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18230 {
18231 struct net_device *netdev = pci_get_drvdata(pdev);
18232 struct tg3 *tp = netdev_priv(netdev);
18233 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18234 int err;
18235
18236 rtnl_lock();
18237
18238 if (pci_enable_device(pdev)) {
18239 dev_err(&pdev->dev,
18240 "Cannot re-enable PCI device after reset.\n");
18241 goto done;
18242 }
18243
18244 pci_set_master(pdev);
18245 pci_restore_state(pdev);
18246 pci_save_state(pdev);
18247
18248 if (!netdev || !netif_running(netdev)) {
18249 rc = PCI_ERS_RESULT_RECOVERED;
18250 goto done;
18251 }
18252
18253 err = tg3_power_up(tp);
18254 if (err)
18255 goto done;
18256
18257 rc = PCI_ERS_RESULT_RECOVERED;
18258
18259 done:
18260 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18261 tg3_napi_enable(tp);
18262 dev_close(netdev);
18263 }
18264 rtnl_unlock();
18265
18266 return rc;
18267 }
18268
18269 /**
18270 * tg3_io_resume - called when traffic can start flowing again.
18271 * @pdev: Pointer to PCI device
18272 *
18273 * This callback is called when the error recovery driver tells
18274 * us that its OK to resume normal operation.
18275 */
tg3_io_resume(struct pci_dev * pdev)18276 static void tg3_io_resume(struct pci_dev *pdev)
18277 {
18278 struct net_device *netdev = pci_get_drvdata(pdev);
18279 struct tg3 *tp = netdev_priv(netdev);
18280 int err;
18281
18282 rtnl_lock();
18283
18284 if (!netdev || !netif_running(netdev))
18285 goto done;
18286
18287 tg3_full_lock(tp, 0);
18288 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18289 tg3_flag_set(tp, INIT_COMPLETE);
18290 err = tg3_restart_hw(tp, true);
18291 if (err) {
18292 tg3_full_unlock(tp);
18293 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18294 goto done;
18295 }
18296
18297 netif_device_attach(netdev);
18298
18299 tg3_timer_start(tp);
18300
18301 tg3_netif_start(tp);
18302
18303 tg3_full_unlock(tp);
18304
18305 tg3_phy_start(tp);
18306
18307 done:
18308 tp->pcierr_recovery = false;
18309 rtnl_unlock();
18310 }
18311
18312 static const struct pci_error_handlers tg3_err_handler = {
18313 .error_detected = tg3_io_error_detected,
18314 .slot_reset = tg3_io_slot_reset,
18315 .resume = tg3_io_resume
18316 };
18317
18318 static struct pci_driver tg3_driver = {
18319 .name = DRV_MODULE_NAME,
18320 .id_table = tg3_pci_tbl,
18321 .probe = tg3_init_one,
18322 .remove = tg3_remove_one,
18323 .err_handler = &tg3_err_handler,
18324 .driver.pm = &tg3_pm_ops,
18325 .shutdown = tg3_shutdown,
18326 };
18327
18328 module_pci_driver(tg3_driver);
18329