1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited.
9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10 * refers to Broadcom Inc. and/or its subsidiaries.
11 *
12 * Firmware is:
13 * Derived from proprietary unpublished source code,
14 * Copyright (C) 2000-2016 Broadcom Corporation.
15 * Copyright (C) 2016-2017 Broadcom Ltd.
16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17 * refers to Broadcom Inc. and/or its subsidiaries.
18 *
19 * Permission is hereby granted for the distribution of this firmware
20 * data in hexadecimal or equivalent format, provided this copyright
21 * notice is accompanying it.
22 */
23
24
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/in.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
46 #include <linux/if.h>
47 #include <linux/if_vlan.h>
48 #include <linux/ip.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
58
59 #include <net/checksum.h>
60 #include <net/gso.h>
61 #include <net/ip.h>
62
63 #include <linux/io.h>
64 #include <asm/byteorder.h>
65 #include <linux/uaccess.h>
66
67 #include <uapi/linux/net_tstamp.h>
68 #include <linux/ptp_clock_kernel.h>
69
70 #define BAR_0 0
71 #define BAR_2 2
72
73 #include "tg3.h"
74
75 /* Functions & macros to verify TG3_FLAGS types */
76
_tg3_flag(enum TG3_FLAGS flag,unsigned long * bits)77 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79 return test_bit(flag, bits);
80 }
81
_tg3_flag_set(enum TG3_FLAGS flag,unsigned long * bits)82 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
83 {
84 set_bit(flag, bits);
85 }
86
_tg3_flag_clear(enum TG3_FLAGS flag,unsigned long * bits)87 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
88 {
89 clear_bit(flag, bits);
90 }
91
92 #define tg3_flag(tp, flag) \
93 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
94 #define tg3_flag_set(tp, flag) \
95 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
96 #define tg3_flag_clear(tp, flag) \
97 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
98
99 #define DRV_MODULE_NAME "tg3"
100 /* DO NOT UPDATE TG3_*_NUM defines */
101 #define TG3_MAJ_NUM 3
102 #define TG3_MIN_NUM 137
103
104 #define RESET_KIND_SHUTDOWN 0
105 #define RESET_KIND_INIT 1
106 #define RESET_KIND_SUSPEND 2
107
108 #define TG3_DEF_RX_MODE 0
109 #define TG3_DEF_TX_MODE 0
110 #define TG3_DEF_MSG_ENABLE \
111 (NETIF_MSG_DRV | \
112 NETIF_MSG_PROBE | \
113 NETIF_MSG_LINK | \
114 NETIF_MSG_TIMER | \
115 NETIF_MSG_IFDOWN | \
116 NETIF_MSG_IFUP | \
117 NETIF_MSG_RX_ERR | \
118 NETIF_MSG_TX_ERR)
119
120 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
121
122 /* length of time before we decide the hardware is borked,
123 * and dev->tx_timeout() should be called to fix the problem
124 */
125
126 #define TG3_TX_TIMEOUT (5 * HZ)
127
128 /* hardware minimum and maximum for a single frame's data payload */
129 #define TG3_MIN_MTU ETH_ZLEN
130 #define TG3_MAX_MTU(tp) \
131 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
132
133 /* These numbers seem to be hard coded in the NIC firmware somehow.
134 * You can't change the ring sizes, but you can change where you place
135 * them in the NIC onboard memory.
136 */
137 #define TG3_RX_STD_RING_SIZE(tp) \
138 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
139 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
140 #define TG3_DEF_RX_RING_PENDING 200
141 #define TG3_RX_JMB_RING_SIZE(tp) \
142 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
143 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
144 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
145
146 /* Do not place this n-ring entries value into the tp struct itself,
147 * we really want to expose these constants to GCC so that modulo et
148 * al. operations are done with shifts and masks instead of with
149 * hw multiply/modulo instructions. Another solution would be to
150 * replace things like '% foo' with '& (foo - 1)'.
151 */
152
153 #define TG3_TX_RING_SIZE 512
154 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
155
156 #define TG3_RX_STD_RING_BYTES(tp) \
157 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
158 #define TG3_RX_JMB_RING_BYTES(tp) \
159 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
160 #define TG3_RX_RCB_RING_BYTES(tp) \
161 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
162 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
163 TG3_TX_RING_SIZE)
164 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
165
166 #define TG3_DMA_BYTE_ENAB 64
167
168 #define TG3_RX_STD_DMA_SZ 1536
169 #define TG3_RX_JMB_DMA_SZ 9046
170
171 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
172
173 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
174 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
175
176 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
177 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
178
179 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
180 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
181
182 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
183 * that are at least dword aligned when used in PCIX mode. The driver
184 * works around this bug by double copying the packet. This workaround
185 * is built into the normal double copy length check for efficiency.
186 *
187 * However, the double copy is only necessary on those architectures
188 * where unaligned memory accesses are inefficient. For those architectures
189 * where unaligned memory accesses incur little penalty, we can reintegrate
190 * the 5701 in the normal rx path. Doing so saves a device structure
191 * dereference by hardcoding the double copy threshold in place.
192 */
193 #define TG3_RX_COPY_THRESHOLD 256
194 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
195 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
196 #else
197 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
198 #endif
199
200 #if (NET_IP_ALIGN != 0)
201 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
202 #else
203 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
204 #endif
205
206 /* minimum number of free TX descriptors required to wake up TX process */
207 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
208 #define TG3_TX_BD_DMA_MAX_2K 2048
209 #define TG3_TX_BD_DMA_MAX_4K 4096
210
211 #define TG3_RAW_IP_ALIGN 2
212
213 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
214 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
215
216 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
217 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
218
219 #define FIRMWARE_TG3 "tigon/tg3.bin"
220 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
221 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
222 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
223
224 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
225 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
226 MODULE_LICENSE("GPL");
227 MODULE_FIRMWARE(FIRMWARE_TG3);
228 MODULE_FIRMWARE(FIRMWARE_TG357766);
229 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
230 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
231
232 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
233 module_param(tg3_debug, int, 0);
234 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
235
236 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
237 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
238
239 static const struct pci_device_id tg3_pci_tbl[] = {
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
259 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
260 TG3_DRV_DATA_FLAG_5705_10_100},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
262 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
263 TG3_DRV_DATA_FLAG_5705_10_100},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
266 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
267 TG3_DRV_DATA_FLAG_5705_10_100},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
274 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
280 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
288 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
289 PCI_VENDOR_ID_LENOVO,
290 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
291 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
294 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
313 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
314 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
315 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
316 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
317 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
318 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
322 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
334 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
347 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
348 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
349 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
350 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
351 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
352 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
353 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
354 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
355 {}
356 };
357
358 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
359
360 static const struct {
361 const char string[ETH_GSTRING_LEN];
362 } ethtool_stats_keys[] = {
363 { "rx_octets" },
364 { "rx_fragments" },
365 { "rx_ucast_packets" },
366 { "rx_mcast_packets" },
367 { "rx_bcast_packets" },
368 { "rx_fcs_errors" },
369 { "rx_align_errors" },
370 { "rx_xon_pause_rcvd" },
371 { "rx_xoff_pause_rcvd" },
372 { "rx_mac_ctrl_rcvd" },
373 { "rx_xoff_entered" },
374 { "rx_frame_too_long_errors" },
375 { "rx_jabbers" },
376 { "rx_undersize_packets" },
377 { "rx_in_length_errors" },
378 { "rx_out_length_errors" },
379 { "rx_64_or_less_octet_packets" },
380 { "rx_65_to_127_octet_packets" },
381 { "rx_128_to_255_octet_packets" },
382 { "rx_256_to_511_octet_packets" },
383 { "rx_512_to_1023_octet_packets" },
384 { "rx_1024_to_1522_octet_packets" },
385 { "rx_1523_to_2047_octet_packets" },
386 { "rx_2048_to_4095_octet_packets" },
387 { "rx_4096_to_8191_octet_packets" },
388 { "rx_8192_to_9022_octet_packets" },
389
390 { "tx_octets" },
391 { "tx_collisions" },
392
393 { "tx_xon_sent" },
394 { "tx_xoff_sent" },
395 { "tx_flow_control" },
396 { "tx_mac_errors" },
397 { "tx_single_collisions" },
398 { "tx_mult_collisions" },
399 { "tx_deferred" },
400 { "tx_excessive_collisions" },
401 { "tx_late_collisions" },
402 { "tx_collide_2times" },
403 { "tx_collide_3times" },
404 { "tx_collide_4times" },
405 { "tx_collide_5times" },
406 { "tx_collide_6times" },
407 { "tx_collide_7times" },
408 { "tx_collide_8times" },
409 { "tx_collide_9times" },
410 { "tx_collide_10times" },
411 { "tx_collide_11times" },
412 { "tx_collide_12times" },
413 { "tx_collide_13times" },
414 { "tx_collide_14times" },
415 { "tx_collide_15times" },
416 { "tx_ucast_packets" },
417 { "tx_mcast_packets" },
418 { "tx_bcast_packets" },
419 { "tx_carrier_sense_errors" },
420 { "tx_discards" },
421 { "tx_errors" },
422
423 { "dma_writeq_full" },
424 { "dma_write_prioq_full" },
425 { "rxbds_empty" },
426 { "rx_discards" },
427 { "rx_errors" },
428 { "rx_threshold_hit" },
429
430 { "dma_readq_full" },
431 { "dma_read_prioq_full" },
432 { "tx_comp_queue_full" },
433
434 { "ring_set_send_prod_index" },
435 { "ring_status_update" },
436 { "nic_irqs" },
437 { "nic_avoided_irqs" },
438 { "nic_tx_threshold_hit" },
439
440 { "mbuf_lwm_thresh_hit" },
441 };
442
443 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
444 #define TG3_NVRAM_TEST 0
445 #define TG3_LINK_TEST 1
446 #define TG3_REGISTER_TEST 2
447 #define TG3_MEMORY_TEST 3
448 #define TG3_MAC_LOOPB_TEST 4
449 #define TG3_PHY_LOOPB_TEST 5
450 #define TG3_EXT_LOOPB_TEST 6
451 #define TG3_INTERRUPT_TEST 7
452
453
454 static const struct {
455 const char string[ETH_GSTRING_LEN];
456 } ethtool_test_keys[] = {
457 [TG3_NVRAM_TEST] = { "nvram test (online) " },
458 [TG3_LINK_TEST] = { "link test (online) " },
459 [TG3_REGISTER_TEST] = { "register test (offline)" },
460 [TG3_MEMORY_TEST] = { "memory test (offline)" },
461 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
462 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
463 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
464 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
465 };
466
467 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
468
469
tg3_write32(struct tg3 * tp,u32 off,u32 val)470 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
471 {
472 writel(val, tp->regs + off);
473 }
474
tg3_read32(struct tg3 * tp,u32 off)475 static u32 tg3_read32(struct tg3 *tp, u32 off)
476 {
477 return readl(tp->regs + off);
478 }
479
tg3_ape_write32(struct tg3 * tp,u32 off,u32 val)480 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
481 {
482 writel(val, tp->aperegs + off);
483 }
484
tg3_ape_read32(struct tg3 * tp,u32 off)485 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
486 {
487 return readl(tp->aperegs + off);
488 }
489
tg3_write_indirect_reg32(struct tg3 * tp,u32 off,u32 val)490 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
491 {
492 unsigned long flags;
493
494 spin_lock_irqsave(&tp->indirect_lock, flags);
495 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
496 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
497 spin_unlock_irqrestore(&tp->indirect_lock, flags);
498 }
499
tg3_write_flush_reg32(struct tg3 * tp,u32 off,u32 val)500 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
501 {
502 writel(val, tp->regs + off);
503 readl(tp->regs + off);
504 }
505
tg3_read_indirect_reg32(struct tg3 * tp,u32 off)506 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
507 {
508 unsigned long flags;
509 u32 val;
510
511 spin_lock_irqsave(&tp->indirect_lock, flags);
512 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
513 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
514 spin_unlock_irqrestore(&tp->indirect_lock, flags);
515 return val;
516 }
517
tg3_write_indirect_mbox(struct tg3 * tp,u32 off,u32 val)518 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
519 {
520 unsigned long flags;
521
522 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
523 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
524 TG3_64BIT_REG_LOW, val);
525 return;
526 }
527 if (off == TG3_RX_STD_PROD_IDX_REG) {
528 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
529 TG3_64BIT_REG_LOW, val);
530 return;
531 }
532
533 spin_lock_irqsave(&tp->indirect_lock, flags);
534 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
535 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
536 spin_unlock_irqrestore(&tp->indirect_lock, flags);
537
538 /* In indirect mode when disabling interrupts, we also need
539 * to clear the interrupt bit in the GRC local ctrl register.
540 */
541 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
542 (val == 0x1)) {
543 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
544 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
545 }
546 }
547
tg3_read_indirect_mbox(struct tg3 * tp,u32 off)548 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
549 {
550 unsigned long flags;
551 u32 val;
552
553 spin_lock_irqsave(&tp->indirect_lock, flags);
554 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
555 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
556 spin_unlock_irqrestore(&tp->indirect_lock, flags);
557 return val;
558 }
559
560 /* usec_wait specifies the wait time in usec when writing to certain registers
561 * where it is unsafe to read back the register without some delay.
562 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
563 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
564 */
_tw32_flush(struct tg3 * tp,u32 off,u32 val,u32 usec_wait)565 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
566 {
567 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
568 /* Non-posted methods */
569 tp->write32(tp, off, val);
570 else {
571 /* Posted method */
572 tg3_write32(tp, off, val);
573 if (usec_wait)
574 udelay(usec_wait);
575 tp->read32(tp, off);
576 }
577 /* Wait again after the read for the posted method to guarantee that
578 * the wait time is met.
579 */
580 if (usec_wait)
581 udelay(usec_wait);
582 }
583
tw32_mailbox_flush(struct tg3 * tp,u32 off,u32 val)584 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
585 {
586 tp->write32_mbox(tp, off, val);
587 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
588 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
589 !tg3_flag(tp, ICH_WORKAROUND)))
590 tp->read32_mbox(tp, off);
591 }
592
tg3_write32_tx_mbox(struct tg3 * tp,u32 off,u32 val)593 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
594 {
595 void __iomem *mbox = tp->regs + off;
596 writel(val, mbox);
597 if (tg3_flag(tp, TXD_MBOX_HWBUG))
598 writel(val, mbox);
599 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
600 tg3_flag(tp, FLUSH_POSTED_WRITES))
601 readl(mbox);
602 }
603
tg3_read32_mbox_5906(struct tg3 * tp,u32 off)604 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
605 {
606 return readl(tp->regs + off + GRCMBOX_BASE);
607 }
608
tg3_write32_mbox_5906(struct tg3 * tp,u32 off,u32 val)609 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
610 {
611 writel(val, tp->regs + off + GRCMBOX_BASE);
612 }
613
614 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
615 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
616 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
617 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
618 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
619
620 #define tw32(reg, val) tp->write32(tp, reg, val)
621 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
622 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
623 #define tr32(reg) tp->read32(tp, reg)
624
tg3_write_mem(struct tg3 * tp,u32 off,u32 val)625 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
626 {
627 unsigned long flags;
628
629 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
630 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
631 return;
632
633 spin_lock_irqsave(&tp->indirect_lock, flags);
634 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
635 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
636 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
637
638 /* Always leave this as zero. */
639 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
640 } else {
641 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
642 tw32_f(TG3PCI_MEM_WIN_DATA, val);
643
644 /* Always leave this as zero. */
645 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
646 }
647 spin_unlock_irqrestore(&tp->indirect_lock, flags);
648 }
649
tg3_read_mem(struct tg3 * tp,u32 off,u32 * val)650 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
651 {
652 unsigned long flags;
653
654 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
655 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
656 *val = 0;
657 return;
658 }
659
660 spin_lock_irqsave(&tp->indirect_lock, flags);
661 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
662 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
663 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
664
665 /* Always leave this as zero. */
666 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
667 } else {
668 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
669 *val = tr32(TG3PCI_MEM_WIN_DATA);
670
671 /* Always leave this as zero. */
672 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
673 }
674 spin_unlock_irqrestore(&tp->indirect_lock, flags);
675 }
676
tg3_ape_lock_init(struct tg3 * tp)677 static void tg3_ape_lock_init(struct tg3 *tp)
678 {
679 int i;
680 u32 regbase, bit;
681
682 if (tg3_asic_rev(tp) == ASIC_REV_5761)
683 regbase = TG3_APE_LOCK_GRANT;
684 else
685 regbase = TG3_APE_PER_LOCK_GRANT;
686
687 /* Make sure the driver hasn't any stale locks. */
688 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
689 switch (i) {
690 case TG3_APE_LOCK_PHY0:
691 case TG3_APE_LOCK_PHY1:
692 case TG3_APE_LOCK_PHY2:
693 case TG3_APE_LOCK_PHY3:
694 bit = APE_LOCK_GRANT_DRIVER;
695 break;
696 default:
697 if (!tp->pci_fn)
698 bit = APE_LOCK_GRANT_DRIVER;
699 else
700 bit = 1 << tp->pci_fn;
701 }
702 tg3_ape_write32(tp, regbase + 4 * i, bit);
703 }
704
705 }
706
tg3_ape_lock(struct tg3 * tp,int locknum)707 static int tg3_ape_lock(struct tg3 *tp, int locknum)
708 {
709 int i, off;
710 int ret = 0;
711 u32 status, req, gnt, bit;
712
713 if (!tg3_flag(tp, ENABLE_APE))
714 return 0;
715
716 switch (locknum) {
717 case TG3_APE_LOCK_GPIO:
718 if (tg3_asic_rev(tp) == ASIC_REV_5761)
719 return 0;
720 fallthrough;
721 case TG3_APE_LOCK_GRC:
722 case TG3_APE_LOCK_MEM:
723 if (!tp->pci_fn)
724 bit = APE_LOCK_REQ_DRIVER;
725 else
726 bit = 1 << tp->pci_fn;
727 break;
728 case TG3_APE_LOCK_PHY0:
729 case TG3_APE_LOCK_PHY1:
730 case TG3_APE_LOCK_PHY2:
731 case TG3_APE_LOCK_PHY3:
732 bit = APE_LOCK_REQ_DRIVER;
733 break;
734 default:
735 return -EINVAL;
736 }
737
738 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
739 req = TG3_APE_LOCK_REQ;
740 gnt = TG3_APE_LOCK_GRANT;
741 } else {
742 req = TG3_APE_PER_LOCK_REQ;
743 gnt = TG3_APE_PER_LOCK_GRANT;
744 }
745
746 off = 4 * locknum;
747
748 tg3_ape_write32(tp, req + off, bit);
749
750 /* Wait for up to 1 millisecond to acquire lock. */
751 for (i = 0; i < 100; i++) {
752 status = tg3_ape_read32(tp, gnt + off);
753 if (status == bit)
754 break;
755 if (pci_channel_offline(tp->pdev))
756 break;
757
758 udelay(10);
759 }
760
761 if (status != bit) {
762 /* Revoke the lock request. */
763 tg3_ape_write32(tp, gnt + off, bit);
764 ret = -EBUSY;
765 }
766
767 return ret;
768 }
769
tg3_ape_unlock(struct tg3 * tp,int locknum)770 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
771 {
772 u32 gnt, bit;
773
774 if (!tg3_flag(tp, ENABLE_APE))
775 return;
776
777 switch (locknum) {
778 case TG3_APE_LOCK_GPIO:
779 if (tg3_asic_rev(tp) == ASIC_REV_5761)
780 return;
781 fallthrough;
782 case TG3_APE_LOCK_GRC:
783 case TG3_APE_LOCK_MEM:
784 if (!tp->pci_fn)
785 bit = APE_LOCK_GRANT_DRIVER;
786 else
787 bit = 1 << tp->pci_fn;
788 break;
789 case TG3_APE_LOCK_PHY0:
790 case TG3_APE_LOCK_PHY1:
791 case TG3_APE_LOCK_PHY2:
792 case TG3_APE_LOCK_PHY3:
793 bit = APE_LOCK_GRANT_DRIVER;
794 break;
795 default:
796 return;
797 }
798
799 if (tg3_asic_rev(tp) == ASIC_REV_5761)
800 gnt = TG3_APE_LOCK_GRANT;
801 else
802 gnt = TG3_APE_PER_LOCK_GRANT;
803
804 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
805 }
806
tg3_ape_event_lock(struct tg3 * tp,u32 timeout_us)807 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
808 {
809 u32 apedata;
810
811 while (timeout_us) {
812 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
813 return -EBUSY;
814
815 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
816 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
817 break;
818
819 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
820
821 udelay(10);
822 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
823 }
824
825 return timeout_us ? 0 : -EBUSY;
826 }
827
828 #ifdef CONFIG_TIGON3_HWMON
tg3_ape_wait_for_event(struct tg3 * tp,u32 timeout_us)829 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
830 {
831 u32 i, apedata;
832
833 for (i = 0; i < timeout_us / 10; i++) {
834 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
835
836 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
837 break;
838
839 udelay(10);
840 }
841
842 return i == timeout_us / 10;
843 }
844
tg3_ape_scratchpad_read(struct tg3 * tp,u32 * data,u32 base_off,u32 len)845 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
846 u32 len)
847 {
848 int err;
849 u32 i, bufoff, msgoff, maxlen, apedata;
850
851 if (!tg3_flag(tp, APE_HAS_NCSI))
852 return 0;
853
854 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
855 if (apedata != APE_SEG_SIG_MAGIC)
856 return -ENODEV;
857
858 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
859 if (!(apedata & APE_FW_STATUS_READY))
860 return -EAGAIN;
861
862 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
863 TG3_APE_SHMEM_BASE;
864 msgoff = bufoff + 2 * sizeof(u32);
865 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
866
867 while (len) {
868 u32 length;
869
870 /* Cap xfer sizes to scratchpad limits. */
871 length = (len > maxlen) ? maxlen : len;
872 len -= length;
873
874 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
875 if (!(apedata & APE_FW_STATUS_READY))
876 return -EAGAIN;
877
878 /* Wait for up to 1 msec for APE to service previous event. */
879 err = tg3_ape_event_lock(tp, 1000);
880 if (err)
881 return err;
882
883 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
884 APE_EVENT_STATUS_SCRTCHPD_READ |
885 APE_EVENT_STATUS_EVENT_PENDING;
886 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
887
888 tg3_ape_write32(tp, bufoff, base_off);
889 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
890
891 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
892 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
893
894 base_off += length;
895
896 if (tg3_ape_wait_for_event(tp, 30000))
897 return -EAGAIN;
898
899 for (i = 0; length; i += 4, length -= 4) {
900 u32 val = tg3_ape_read32(tp, msgoff + i);
901 memcpy(data, &val, sizeof(u32));
902 data++;
903 }
904 }
905
906 return 0;
907 }
908 #endif
909
tg3_ape_send_event(struct tg3 * tp,u32 event)910 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
911 {
912 int err;
913 u32 apedata;
914
915 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
916 if (apedata != APE_SEG_SIG_MAGIC)
917 return -EAGAIN;
918
919 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
920 if (!(apedata & APE_FW_STATUS_READY))
921 return -EAGAIN;
922
923 /* Wait for up to 20 millisecond for APE to service previous event. */
924 err = tg3_ape_event_lock(tp, 20000);
925 if (err)
926 return err;
927
928 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
929 event | APE_EVENT_STATUS_EVENT_PENDING);
930
931 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
932 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
933
934 return 0;
935 }
936
tg3_ape_driver_state_change(struct tg3 * tp,int kind)937 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
938 {
939 u32 event;
940 u32 apedata;
941
942 if (!tg3_flag(tp, ENABLE_APE))
943 return;
944
945 switch (kind) {
946 case RESET_KIND_INIT:
947 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
948 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
949 APE_HOST_SEG_SIG_MAGIC);
950 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
951 APE_HOST_SEG_LEN_MAGIC);
952 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
953 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
954 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
955 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
956 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
957 APE_HOST_BEHAV_NO_PHYLOCK);
958 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
959 TG3_APE_HOST_DRVR_STATE_START);
960
961 event = APE_EVENT_STATUS_STATE_START;
962 break;
963 case RESET_KIND_SHUTDOWN:
964 if (device_may_wakeup(&tp->pdev->dev) &&
965 tg3_flag(tp, WOL_ENABLE)) {
966 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
967 TG3_APE_HOST_WOL_SPEED_AUTO);
968 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
969 } else
970 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
971
972 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
973
974 event = APE_EVENT_STATUS_STATE_UNLOAD;
975 break;
976 default:
977 return;
978 }
979
980 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
981
982 tg3_ape_send_event(tp, event);
983 }
984
tg3_send_ape_heartbeat(struct tg3 * tp,unsigned long interval)985 static void tg3_send_ape_heartbeat(struct tg3 *tp,
986 unsigned long interval)
987 {
988 /* Check if hb interval has exceeded */
989 if (!tg3_flag(tp, ENABLE_APE) ||
990 time_before(jiffies, tp->ape_hb_jiffies + interval))
991 return;
992
993 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
994 tp->ape_hb_jiffies = jiffies;
995 }
996
tg3_disable_ints(struct tg3 * tp)997 static void tg3_disable_ints(struct tg3 *tp)
998 {
999 int i;
1000
1001 tw32(TG3PCI_MISC_HOST_CTRL,
1002 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1003 for (i = 0; i < tp->irq_max; i++)
1004 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1005 }
1006
tg3_enable_ints(struct tg3 * tp)1007 static void tg3_enable_ints(struct tg3 *tp)
1008 {
1009 int i;
1010
1011 tp->irq_sync = 0;
1012 wmb();
1013
1014 tw32(TG3PCI_MISC_HOST_CTRL,
1015 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1016
1017 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1018 for (i = 0; i < tp->irq_cnt; i++) {
1019 struct tg3_napi *tnapi = &tp->napi[i];
1020
1021 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1022 if (tg3_flag(tp, 1SHOT_MSI))
1023 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1024
1025 tp->coal_now |= tnapi->coal_now;
1026 }
1027
1028 /* Force an initial interrupt */
1029 if (!tg3_flag(tp, TAGGED_STATUS) &&
1030 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1031 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1032 else
1033 tw32(HOSTCC_MODE, tp->coal_now);
1034
1035 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1036 }
1037
tg3_has_work(struct tg3_napi * tnapi)1038 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1039 {
1040 struct tg3 *tp = tnapi->tp;
1041 struct tg3_hw_status *sblk = tnapi->hw_status;
1042 unsigned int work_exists = 0;
1043
1044 /* check for phy events */
1045 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1046 if (sblk->status & SD_STATUS_LINK_CHG)
1047 work_exists = 1;
1048 }
1049
1050 /* check for TX work to do */
1051 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1052 work_exists = 1;
1053
1054 /* check for RX work to do */
1055 if (tnapi->rx_rcb_prod_idx &&
1056 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1057 work_exists = 1;
1058
1059 return work_exists;
1060 }
1061
1062 /* tg3_int_reenable
1063 * similar to tg3_enable_ints, but it accurately determines whether there
1064 * is new work pending and can return without flushing the PIO write
1065 * which reenables interrupts
1066 */
tg3_int_reenable(struct tg3_napi * tnapi)1067 static void tg3_int_reenable(struct tg3_napi *tnapi)
1068 {
1069 struct tg3 *tp = tnapi->tp;
1070
1071 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1072
1073 /* When doing tagged status, this work check is unnecessary.
1074 * The last_tag we write above tells the chip which piece of
1075 * work we've completed.
1076 */
1077 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1078 tw32(HOSTCC_MODE, tp->coalesce_mode |
1079 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1080 }
1081
tg3_switch_clocks(struct tg3 * tp)1082 static void tg3_switch_clocks(struct tg3 *tp)
1083 {
1084 u32 clock_ctrl;
1085 u32 orig_clock_ctrl;
1086
1087 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1088 return;
1089
1090 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1091
1092 orig_clock_ctrl = clock_ctrl;
1093 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1094 CLOCK_CTRL_CLKRUN_OENABLE |
1095 0x1f);
1096 tp->pci_clock_ctrl = clock_ctrl;
1097
1098 if (tg3_flag(tp, 5705_PLUS)) {
1099 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1100 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1101 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1102 }
1103 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1104 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1105 clock_ctrl |
1106 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1107 40);
1108 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1109 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1110 40);
1111 }
1112 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1113 }
1114
1115 #define PHY_BUSY_LOOPS 5000
1116
__tg3_readphy(struct tg3 * tp,unsigned int phy_addr,int reg,u32 * val)1117 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1118 u32 *val)
1119 {
1120 u32 frame_val;
1121 unsigned int loops;
1122 int ret;
1123
1124 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1125 tw32_f(MAC_MI_MODE,
1126 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1127 udelay(80);
1128 }
1129
1130 tg3_ape_lock(tp, tp->phy_ape_lock);
1131
1132 *val = 0x0;
1133
1134 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1135 MI_COM_PHY_ADDR_MASK);
1136 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1137 MI_COM_REG_ADDR_MASK);
1138 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1139
1140 tw32_f(MAC_MI_COM, frame_val);
1141
1142 loops = PHY_BUSY_LOOPS;
1143 while (loops != 0) {
1144 udelay(10);
1145 frame_val = tr32(MAC_MI_COM);
1146
1147 if ((frame_val & MI_COM_BUSY) == 0) {
1148 udelay(5);
1149 frame_val = tr32(MAC_MI_COM);
1150 break;
1151 }
1152 loops -= 1;
1153 }
1154
1155 ret = -EBUSY;
1156 if (loops != 0) {
1157 *val = frame_val & MI_COM_DATA_MASK;
1158 ret = 0;
1159 }
1160
1161 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1162 tw32_f(MAC_MI_MODE, tp->mi_mode);
1163 udelay(80);
1164 }
1165
1166 tg3_ape_unlock(tp, tp->phy_ape_lock);
1167
1168 return ret;
1169 }
1170
tg3_readphy(struct tg3 * tp,int reg,u32 * val)1171 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1172 {
1173 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1174 }
1175
__tg3_writephy(struct tg3 * tp,unsigned int phy_addr,int reg,u32 val)1176 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1177 u32 val)
1178 {
1179 u32 frame_val;
1180 unsigned int loops;
1181 int ret;
1182
1183 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1184 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1185 return 0;
1186
1187 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1188 tw32_f(MAC_MI_MODE,
1189 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1190 udelay(80);
1191 }
1192
1193 tg3_ape_lock(tp, tp->phy_ape_lock);
1194
1195 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1196 MI_COM_PHY_ADDR_MASK);
1197 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1198 MI_COM_REG_ADDR_MASK);
1199 frame_val |= (val & MI_COM_DATA_MASK);
1200 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1201
1202 tw32_f(MAC_MI_COM, frame_val);
1203
1204 loops = PHY_BUSY_LOOPS;
1205 while (loops != 0) {
1206 udelay(10);
1207 frame_val = tr32(MAC_MI_COM);
1208 if ((frame_val & MI_COM_BUSY) == 0) {
1209 udelay(5);
1210 frame_val = tr32(MAC_MI_COM);
1211 break;
1212 }
1213 loops -= 1;
1214 }
1215
1216 ret = -EBUSY;
1217 if (loops != 0)
1218 ret = 0;
1219
1220 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1221 tw32_f(MAC_MI_MODE, tp->mi_mode);
1222 udelay(80);
1223 }
1224
1225 tg3_ape_unlock(tp, tp->phy_ape_lock);
1226
1227 return ret;
1228 }
1229
tg3_writephy(struct tg3 * tp,int reg,u32 val)1230 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1231 {
1232 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1233 }
1234
tg3_phy_cl45_write(struct tg3 * tp,u32 devad,u32 addr,u32 val)1235 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1236 {
1237 int err;
1238
1239 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1240 if (err)
1241 goto done;
1242
1243 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1244 if (err)
1245 goto done;
1246
1247 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1248 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1249 if (err)
1250 goto done;
1251
1252 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1253
1254 done:
1255 return err;
1256 }
1257
tg3_phy_cl45_read(struct tg3 * tp,u32 devad,u32 addr,u32 * val)1258 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1259 {
1260 int err;
1261
1262 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1263 if (err)
1264 goto done;
1265
1266 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1267 if (err)
1268 goto done;
1269
1270 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1271 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1272 if (err)
1273 goto done;
1274
1275 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1276
1277 done:
1278 return err;
1279 }
1280
tg3_phydsp_read(struct tg3 * tp,u32 reg,u32 * val)1281 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1282 {
1283 int err;
1284
1285 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1286 if (!err)
1287 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1288
1289 return err;
1290 }
1291
tg3_phydsp_write(struct tg3 * tp,u32 reg,u32 val)1292 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1293 {
1294 int err;
1295
1296 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1297 if (!err)
1298 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1299
1300 return err;
1301 }
1302
tg3_phy_auxctl_read(struct tg3 * tp,int reg,u32 * val)1303 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1304 {
1305 int err;
1306
1307 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1308 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1309 MII_TG3_AUXCTL_SHDWSEL_MISC);
1310 if (!err)
1311 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1312
1313 return err;
1314 }
1315
tg3_phy_auxctl_write(struct tg3 * tp,int reg,u32 set)1316 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1317 {
1318 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1319 set |= MII_TG3_AUXCTL_MISC_WREN;
1320
1321 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1322 }
1323
tg3_phy_toggle_auxctl_smdsp(struct tg3 * tp,bool enable)1324 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1325 {
1326 u32 val;
1327 int err;
1328
1329 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1330
1331 if (err)
1332 return err;
1333
1334 if (enable)
1335 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1336 else
1337 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1338
1339 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1340 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1341
1342 return err;
1343 }
1344
tg3_phy_shdw_write(struct tg3 * tp,int reg,u32 val)1345 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1346 {
1347 return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1348 reg | val | MII_TG3_MISC_SHDW_WREN);
1349 }
1350
tg3_bmcr_reset(struct tg3 * tp)1351 static int tg3_bmcr_reset(struct tg3 *tp)
1352 {
1353 u32 phy_control;
1354 int limit, err;
1355
1356 /* OK, reset it, and poll the BMCR_RESET bit until it
1357 * clears or we time out.
1358 */
1359 phy_control = BMCR_RESET;
1360 err = tg3_writephy(tp, MII_BMCR, phy_control);
1361 if (err != 0)
1362 return -EBUSY;
1363
1364 limit = 5000;
1365 while (limit--) {
1366 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1367 if (err != 0)
1368 return -EBUSY;
1369
1370 if ((phy_control & BMCR_RESET) == 0) {
1371 udelay(40);
1372 break;
1373 }
1374 udelay(10);
1375 }
1376 if (limit < 0)
1377 return -EBUSY;
1378
1379 return 0;
1380 }
1381
tg3_mdio_read(struct mii_bus * bp,int mii_id,int reg)1382 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1383 {
1384 struct tg3 *tp = bp->priv;
1385 u32 val;
1386
1387 spin_lock_bh(&tp->lock);
1388
1389 if (__tg3_readphy(tp, mii_id, reg, &val))
1390 val = -EIO;
1391
1392 spin_unlock_bh(&tp->lock);
1393
1394 return val;
1395 }
1396
tg3_mdio_write(struct mii_bus * bp,int mii_id,int reg,u16 val)1397 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1398 {
1399 struct tg3 *tp = bp->priv;
1400 u32 ret = 0;
1401
1402 spin_lock_bh(&tp->lock);
1403
1404 if (__tg3_writephy(tp, mii_id, reg, val))
1405 ret = -EIO;
1406
1407 spin_unlock_bh(&tp->lock);
1408
1409 return ret;
1410 }
1411
tg3_mdio_config_5785(struct tg3 * tp)1412 static void tg3_mdio_config_5785(struct tg3 *tp)
1413 {
1414 u32 val;
1415 struct phy_device *phydev;
1416
1417 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1418 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1419 case PHY_ID_BCM50610:
1420 case PHY_ID_BCM50610M:
1421 val = MAC_PHYCFG2_50610_LED_MODES;
1422 break;
1423 case PHY_ID_BCMAC131:
1424 val = MAC_PHYCFG2_AC131_LED_MODES;
1425 break;
1426 case PHY_ID_RTL8211C:
1427 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1428 break;
1429 case PHY_ID_RTL8201E:
1430 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1431 break;
1432 default:
1433 return;
1434 }
1435
1436 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1437 tw32(MAC_PHYCFG2, val);
1438
1439 val = tr32(MAC_PHYCFG1);
1440 val &= ~(MAC_PHYCFG1_RGMII_INT |
1441 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1442 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1443 tw32(MAC_PHYCFG1, val);
1444
1445 return;
1446 }
1447
1448 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1449 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1450 MAC_PHYCFG2_FMODE_MASK_MASK |
1451 MAC_PHYCFG2_GMODE_MASK_MASK |
1452 MAC_PHYCFG2_ACT_MASK_MASK |
1453 MAC_PHYCFG2_QUAL_MASK_MASK |
1454 MAC_PHYCFG2_INBAND_ENABLE;
1455
1456 tw32(MAC_PHYCFG2, val);
1457
1458 val = tr32(MAC_PHYCFG1);
1459 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1460 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1461 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1462 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1463 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1464 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1465 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1466 }
1467 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1468 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1469 tw32(MAC_PHYCFG1, val);
1470
1471 val = tr32(MAC_EXT_RGMII_MODE);
1472 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1473 MAC_RGMII_MODE_RX_QUALITY |
1474 MAC_RGMII_MODE_RX_ACTIVITY |
1475 MAC_RGMII_MODE_RX_ENG_DET |
1476 MAC_RGMII_MODE_TX_ENABLE |
1477 MAC_RGMII_MODE_TX_LOWPWR |
1478 MAC_RGMII_MODE_TX_RESET);
1479 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1480 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1481 val |= MAC_RGMII_MODE_RX_INT_B |
1482 MAC_RGMII_MODE_RX_QUALITY |
1483 MAC_RGMII_MODE_RX_ACTIVITY |
1484 MAC_RGMII_MODE_RX_ENG_DET;
1485 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1486 val |= MAC_RGMII_MODE_TX_ENABLE |
1487 MAC_RGMII_MODE_TX_LOWPWR |
1488 MAC_RGMII_MODE_TX_RESET;
1489 }
1490 tw32(MAC_EXT_RGMII_MODE, val);
1491 }
1492
tg3_mdio_start(struct tg3 * tp)1493 static void tg3_mdio_start(struct tg3 *tp)
1494 {
1495 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1496 tw32_f(MAC_MI_MODE, tp->mi_mode);
1497 udelay(80);
1498
1499 if (tg3_flag(tp, MDIOBUS_INITED) &&
1500 tg3_asic_rev(tp) == ASIC_REV_5785)
1501 tg3_mdio_config_5785(tp);
1502 }
1503
tg3_mdio_init(struct tg3 * tp)1504 static int tg3_mdio_init(struct tg3 *tp)
1505 {
1506 int i;
1507 u32 reg;
1508 struct phy_device *phydev;
1509
1510 if (tg3_flag(tp, 5717_PLUS)) {
1511 u32 is_serdes;
1512
1513 tp->phy_addr = tp->pci_fn + 1;
1514
1515 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1516 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1517 else
1518 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1519 TG3_CPMU_PHY_STRAP_IS_SERDES;
1520 if (is_serdes)
1521 tp->phy_addr += 7;
1522 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1523 int addr;
1524
1525 addr = ssb_gige_get_phyaddr(tp->pdev);
1526 if (addr < 0)
1527 return addr;
1528 tp->phy_addr = addr;
1529 } else
1530 tp->phy_addr = TG3_PHY_MII_ADDR;
1531
1532 tg3_mdio_start(tp);
1533
1534 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1535 return 0;
1536
1537 tp->mdio_bus = mdiobus_alloc();
1538 if (tp->mdio_bus == NULL)
1539 return -ENOMEM;
1540
1541 tp->mdio_bus->name = "tg3 mdio bus";
1542 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(tp->pdev));
1543 tp->mdio_bus->priv = tp;
1544 tp->mdio_bus->parent = &tp->pdev->dev;
1545 tp->mdio_bus->read = &tg3_mdio_read;
1546 tp->mdio_bus->write = &tg3_mdio_write;
1547 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1548
1549 /* The bus registration will look for all the PHYs on the mdio bus.
1550 * Unfortunately, it does not ensure the PHY is powered up before
1551 * accessing the PHY ID registers. A chip reset is the
1552 * quickest way to bring the device back to an operational state..
1553 */
1554 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1555 tg3_bmcr_reset(tp);
1556
1557 i = mdiobus_register(tp->mdio_bus);
1558 if (i) {
1559 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1560 mdiobus_free(tp->mdio_bus);
1561 return i;
1562 }
1563
1564 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1565
1566 if (!phydev || !phydev->drv) {
1567 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1568 mdiobus_unregister(tp->mdio_bus);
1569 mdiobus_free(tp->mdio_bus);
1570 return -ENODEV;
1571 }
1572
1573 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1574 case PHY_ID_BCM57780:
1575 phydev->interface = PHY_INTERFACE_MODE_GMII;
1576 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1577 break;
1578 case PHY_ID_BCM50610:
1579 case PHY_ID_BCM50610M:
1580 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1581 PHY_BRCM_RX_REFCLK_UNUSED |
1582 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1583 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1584 fallthrough;
1585 case PHY_ID_RTL8211C:
1586 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1587 break;
1588 case PHY_ID_RTL8201E:
1589 case PHY_ID_BCMAC131:
1590 phydev->interface = PHY_INTERFACE_MODE_MII;
1591 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1592 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1593 break;
1594 }
1595
1596 tg3_flag_set(tp, MDIOBUS_INITED);
1597
1598 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1599 tg3_mdio_config_5785(tp);
1600
1601 return 0;
1602 }
1603
tg3_mdio_fini(struct tg3 * tp)1604 static void tg3_mdio_fini(struct tg3 *tp)
1605 {
1606 if (tg3_flag(tp, MDIOBUS_INITED)) {
1607 tg3_flag_clear(tp, MDIOBUS_INITED);
1608 mdiobus_unregister(tp->mdio_bus);
1609 mdiobus_free(tp->mdio_bus);
1610 }
1611 }
1612
1613 /* tp->lock is held. */
tg3_generate_fw_event(struct tg3 * tp)1614 static inline void tg3_generate_fw_event(struct tg3 *tp)
1615 {
1616 u32 val;
1617
1618 val = tr32(GRC_RX_CPU_EVENT);
1619 val |= GRC_RX_CPU_DRIVER_EVENT;
1620 tw32_f(GRC_RX_CPU_EVENT, val);
1621
1622 tp->last_event_jiffies = jiffies;
1623 }
1624
1625 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1626
1627 /* tp->lock is held. */
tg3_wait_for_event_ack(struct tg3 * tp)1628 static void tg3_wait_for_event_ack(struct tg3 *tp)
1629 {
1630 int i;
1631 unsigned int delay_cnt;
1632 long time_remain;
1633
1634 /* If enough time has passed, no wait is necessary. */
1635 time_remain = (long)(tp->last_event_jiffies + 1 +
1636 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1637 (long)jiffies;
1638 if (time_remain < 0)
1639 return;
1640
1641 /* Check if we can shorten the wait time. */
1642 delay_cnt = jiffies_to_usecs(time_remain);
1643 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1644 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1645 delay_cnt = (delay_cnt >> 3) + 1;
1646
1647 for (i = 0; i < delay_cnt; i++) {
1648 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1649 break;
1650 if (pci_channel_offline(tp->pdev))
1651 break;
1652
1653 udelay(8);
1654 }
1655 }
1656
1657 /* tp->lock is held. */
tg3_phy_gather_ump_data(struct tg3 * tp,u32 * data)1658 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1659 {
1660 u32 reg, val;
1661
1662 val = 0;
1663 if (!tg3_readphy(tp, MII_BMCR, ®))
1664 val = reg << 16;
1665 if (!tg3_readphy(tp, MII_BMSR, ®))
1666 val |= (reg & 0xffff);
1667 *data++ = val;
1668
1669 val = 0;
1670 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1671 val = reg << 16;
1672 if (!tg3_readphy(tp, MII_LPA, ®))
1673 val |= (reg & 0xffff);
1674 *data++ = val;
1675
1676 val = 0;
1677 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1678 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1679 val = reg << 16;
1680 if (!tg3_readphy(tp, MII_STAT1000, ®))
1681 val |= (reg & 0xffff);
1682 }
1683 *data++ = val;
1684
1685 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1686 val = reg << 16;
1687 else
1688 val = 0;
1689 *data++ = val;
1690 }
1691
1692 /* tp->lock is held. */
tg3_ump_link_report(struct tg3 * tp)1693 static void tg3_ump_link_report(struct tg3 *tp)
1694 {
1695 u32 data[4];
1696
1697 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1698 return;
1699
1700 tg3_phy_gather_ump_data(tp, data);
1701
1702 tg3_wait_for_event_ack(tp);
1703
1704 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1705 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1707 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1708 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1709 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1710
1711 tg3_generate_fw_event(tp);
1712 }
1713
1714 /* tp->lock is held. */
tg3_stop_fw(struct tg3 * tp)1715 static void tg3_stop_fw(struct tg3 *tp)
1716 {
1717 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1718 /* Wait for RX cpu to ACK the previous event. */
1719 tg3_wait_for_event_ack(tp);
1720
1721 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1722
1723 tg3_generate_fw_event(tp);
1724
1725 /* Wait for RX cpu to ACK this event. */
1726 tg3_wait_for_event_ack(tp);
1727 }
1728 }
1729
1730 /* tp->lock is held. */
tg3_write_sig_pre_reset(struct tg3 * tp,int kind)1731 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1732 {
1733 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1734 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1735
1736 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1737 switch (kind) {
1738 case RESET_KIND_INIT:
1739 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1740 DRV_STATE_START);
1741 break;
1742
1743 case RESET_KIND_SHUTDOWN:
1744 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1745 DRV_STATE_UNLOAD);
1746 break;
1747
1748 case RESET_KIND_SUSPEND:
1749 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1750 DRV_STATE_SUSPEND);
1751 break;
1752
1753 default:
1754 break;
1755 }
1756 }
1757 }
1758
1759 /* tp->lock is held. */
tg3_write_sig_post_reset(struct tg3 * tp,int kind)1760 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1761 {
1762 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1763 switch (kind) {
1764 case RESET_KIND_INIT:
1765 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1766 DRV_STATE_START_DONE);
1767 break;
1768
1769 case RESET_KIND_SHUTDOWN:
1770 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1771 DRV_STATE_UNLOAD_DONE);
1772 break;
1773
1774 default:
1775 break;
1776 }
1777 }
1778 }
1779
1780 /* tp->lock is held. */
tg3_write_sig_legacy(struct tg3 * tp,int kind)1781 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1782 {
1783 if (tg3_flag(tp, ENABLE_ASF)) {
1784 switch (kind) {
1785 case RESET_KIND_INIT:
1786 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1787 DRV_STATE_START);
1788 break;
1789
1790 case RESET_KIND_SHUTDOWN:
1791 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1792 DRV_STATE_UNLOAD);
1793 break;
1794
1795 case RESET_KIND_SUSPEND:
1796 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1797 DRV_STATE_SUSPEND);
1798 break;
1799
1800 default:
1801 break;
1802 }
1803 }
1804 }
1805
tg3_poll_fw(struct tg3 * tp)1806 static int tg3_poll_fw(struct tg3 *tp)
1807 {
1808 int i;
1809 u32 val;
1810
1811 if (tg3_flag(tp, NO_FWARE_REPORTED))
1812 return 0;
1813
1814 if (tg3_flag(tp, IS_SSB_CORE)) {
1815 /* We don't use firmware. */
1816 return 0;
1817 }
1818
1819 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1820 /* Wait up to 20ms for init done. */
1821 for (i = 0; i < 200; i++) {
1822 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1823 return 0;
1824 if (pci_channel_offline(tp->pdev))
1825 return -ENODEV;
1826
1827 udelay(100);
1828 }
1829 return -ENODEV;
1830 }
1831
1832 /* Wait for firmware initialization to complete. */
1833 for (i = 0; i < 100000; i++) {
1834 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1835 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1836 break;
1837 if (pci_channel_offline(tp->pdev)) {
1838 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1839 tg3_flag_set(tp, NO_FWARE_REPORTED);
1840 netdev_info(tp->dev, "No firmware running\n");
1841 }
1842
1843 break;
1844 }
1845
1846 udelay(10);
1847 }
1848
1849 /* Chip might not be fitted with firmware. Some Sun onboard
1850 * parts are configured like that. So don't signal the timeout
1851 * of the above loop as an error, but do report the lack of
1852 * running firmware once.
1853 */
1854 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1855 tg3_flag_set(tp, NO_FWARE_REPORTED);
1856
1857 netdev_info(tp->dev, "No firmware running\n");
1858 }
1859
1860 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1861 /* The 57765 A0 needs a little more
1862 * time to do some important work.
1863 */
1864 mdelay(10);
1865 }
1866
1867 return 0;
1868 }
1869
tg3_link_report(struct tg3 * tp)1870 static void tg3_link_report(struct tg3 *tp)
1871 {
1872 if (!netif_carrier_ok(tp->dev)) {
1873 netif_info(tp, link, tp->dev, "Link is down\n");
1874 tg3_ump_link_report(tp);
1875 } else if (netif_msg_link(tp)) {
1876 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1877 (tp->link_config.active_speed == SPEED_1000 ?
1878 1000 :
1879 (tp->link_config.active_speed == SPEED_100 ?
1880 100 : 10)),
1881 (tp->link_config.active_duplex == DUPLEX_FULL ?
1882 "full" : "half"));
1883
1884 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1885 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1886 "on" : "off",
1887 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1888 "on" : "off");
1889
1890 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1891 netdev_info(tp->dev, "EEE is %s\n",
1892 tp->setlpicnt ? "enabled" : "disabled");
1893
1894 tg3_ump_link_report(tp);
1895 }
1896
1897 tp->link_up = netif_carrier_ok(tp->dev);
1898 }
1899
tg3_decode_flowctrl_1000T(u32 adv)1900 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1901 {
1902 u32 flowctrl = 0;
1903
1904 if (adv & ADVERTISE_PAUSE_CAP) {
1905 flowctrl |= FLOW_CTRL_RX;
1906 if (!(adv & ADVERTISE_PAUSE_ASYM))
1907 flowctrl |= FLOW_CTRL_TX;
1908 } else if (adv & ADVERTISE_PAUSE_ASYM)
1909 flowctrl |= FLOW_CTRL_TX;
1910
1911 return flowctrl;
1912 }
1913
tg3_advert_flowctrl_1000X(u8 flow_ctrl)1914 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1915 {
1916 u16 miireg;
1917
1918 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1919 miireg = ADVERTISE_1000XPAUSE;
1920 else if (flow_ctrl & FLOW_CTRL_TX)
1921 miireg = ADVERTISE_1000XPSE_ASYM;
1922 else if (flow_ctrl & FLOW_CTRL_RX)
1923 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1924 else
1925 miireg = 0;
1926
1927 return miireg;
1928 }
1929
tg3_decode_flowctrl_1000X(u32 adv)1930 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1931 {
1932 u32 flowctrl = 0;
1933
1934 if (adv & ADVERTISE_1000XPAUSE) {
1935 flowctrl |= FLOW_CTRL_RX;
1936 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1937 flowctrl |= FLOW_CTRL_TX;
1938 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1939 flowctrl |= FLOW_CTRL_TX;
1940
1941 return flowctrl;
1942 }
1943
tg3_resolve_flowctrl_1000X(u16 lcladv,u16 rmtadv)1944 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1945 {
1946 u8 cap = 0;
1947
1948 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1949 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1950 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1951 if (lcladv & ADVERTISE_1000XPAUSE)
1952 cap = FLOW_CTRL_RX;
1953 if (rmtadv & ADVERTISE_1000XPAUSE)
1954 cap = FLOW_CTRL_TX;
1955 }
1956
1957 return cap;
1958 }
1959
tg3_setup_flow_control(struct tg3 * tp,u32 lcladv,u32 rmtadv)1960 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1961 {
1962 u8 autoneg;
1963 u8 flowctrl = 0;
1964 u32 old_rx_mode = tp->rx_mode;
1965 u32 old_tx_mode = tp->tx_mode;
1966
1967 if (tg3_flag(tp, USE_PHYLIB))
1968 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1969 else
1970 autoneg = tp->link_config.autoneg;
1971
1972 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1973 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1974 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1975 else
1976 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1977 } else
1978 flowctrl = tp->link_config.flowctrl;
1979
1980 tp->link_config.active_flowctrl = flowctrl;
1981
1982 if (flowctrl & FLOW_CTRL_RX)
1983 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1984 else
1985 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1986
1987 if (old_rx_mode != tp->rx_mode)
1988 tw32_f(MAC_RX_MODE, tp->rx_mode);
1989
1990 if (flowctrl & FLOW_CTRL_TX)
1991 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1992 else
1993 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1994
1995 if (old_tx_mode != tp->tx_mode)
1996 tw32_f(MAC_TX_MODE, tp->tx_mode);
1997 }
1998
tg3_adjust_link(struct net_device * dev)1999 static void tg3_adjust_link(struct net_device *dev)
2000 {
2001 u8 oldflowctrl, linkmesg = 0;
2002 u32 mac_mode, lcl_adv, rmt_adv;
2003 struct tg3 *tp = netdev_priv(dev);
2004 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2005
2006 spin_lock_bh(&tp->lock);
2007
2008 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2009 MAC_MODE_HALF_DUPLEX);
2010
2011 oldflowctrl = tp->link_config.active_flowctrl;
2012
2013 if (phydev->link) {
2014 lcl_adv = 0;
2015 rmt_adv = 0;
2016
2017 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2018 mac_mode |= MAC_MODE_PORT_MODE_MII;
2019 else if (phydev->speed == SPEED_1000 ||
2020 tg3_asic_rev(tp) != ASIC_REV_5785)
2021 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2022 else
2023 mac_mode |= MAC_MODE_PORT_MODE_MII;
2024
2025 if (phydev->duplex == DUPLEX_HALF)
2026 mac_mode |= MAC_MODE_HALF_DUPLEX;
2027 else {
2028 lcl_adv = mii_advertise_flowctrl(
2029 tp->link_config.flowctrl);
2030
2031 if (phydev->pause)
2032 rmt_adv = LPA_PAUSE_CAP;
2033 if (phydev->asym_pause)
2034 rmt_adv |= LPA_PAUSE_ASYM;
2035 }
2036
2037 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2038 } else
2039 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2040
2041 if (mac_mode != tp->mac_mode) {
2042 tp->mac_mode = mac_mode;
2043 tw32_f(MAC_MODE, tp->mac_mode);
2044 udelay(40);
2045 }
2046
2047 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2048 if (phydev->speed == SPEED_10)
2049 tw32(MAC_MI_STAT,
2050 MAC_MI_STAT_10MBPS_MODE |
2051 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2052 else
2053 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2054 }
2055
2056 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2057 tw32(MAC_TX_LENGTHS,
2058 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2059 (6 << TX_LENGTHS_IPG_SHIFT) |
2060 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2061 else
2062 tw32(MAC_TX_LENGTHS,
2063 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2064 (6 << TX_LENGTHS_IPG_SHIFT) |
2065 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2066
2067 if (phydev->link != tp->old_link ||
2068 phydev->speed != tp->link_config.active_speed ||
2069 phydev->duplex != tp->link_config.active_duplex ||
2070 oldflowctrl != tp->link_config.active_flowctrl)
2071 linkmesg = 1;
2072
2073 tp->old_link = phydev->link;
2074 tp->link_config.active_speed = phydev->speed;
2075 tp->link_config.active_duplex = phydev->duplex;
2076
2077 spin_unlock_bh(&tp->lock);
2078
2079 if (linkmesg)
2080 tg3_link_report(tp);
2081 }
2082
tg3_phy_init(struct tg3 * tp)2083 static int tg3_phy_init(struct tg3 *tp)
2084 {
2085 struct phy_device *phydev;
2086
2087 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2088 return 0;
2089
2090 /* Bring the PHY back to a known state. */
2091 tg3_bmcr_reset(tp);
2092
2093 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2094
2095 /* Attach the MAC to the PHY. */
2096 phydev = phy_connect(tp->dev, phydev_name(phydev),
2097 tg3_adjust_link, phydev->interface);
2098 if (IS_ERR(phydev)) {
2099 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2100 return PTR_ERR(phydev);
2101 }
2102
2103 /* Mask with MAC supported features. */
2104 switch (phydev->interface) {
2105 case PHY_INTERFACE_MODE_GMII:
2106 case PHY_INTERFACE_MODE_RGMII:
2107 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2108 phy_set_max_speed(phydev, SPEED_1000);
2109 phy_support_asym_pause(phydev);
2110 break;
2111 }
2112 fallthrough;
2113 case PHY_INTERFACE_MODE_MII:
2114 phy_set_max_speed(phydev, SPEED_100);
2115 phy_support_asym_pause(phydev);
2116 break;
2117 default:
2118 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2119 return -EINVAL;
2120 }
2121
2122 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2123
2124 phy_attached_info(phydev);
2125
2126 return 0;
2127 }
2128
tg3_phy_start(struct tg3 * tp)2129 static void tg3_phy_start(struct tg3 *tp)
2130 {
2131 struct phy_device *phydev;
2132
2133 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2134 return;
2135
2136 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2137
2138 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2139 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2140 phydev->speed = tp->link_config.speed;
2141 phydev->duplex = tp->link_config.duplex;
2142 phydev->autoneg = tp->link_config.autoneg;
2143 ethtool_convert_legacy_u32_to_link_mode(
2144 phydev->advertising, tp->link_config.advertising);
2145 }
2146
2147 phy_start(phydev);
2148
2149 phy_start_aneg(phydev);
2150 }
2151
tg3_phy_stop(struct tg3 * tp)2152 static void tg3_phy_stop(struct tg3 *tp)
2153 {
2154 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2155 return;
2156
2157 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2158 }
2159
tg3_phy_fini(struct tg3 * tp)2160 static void tg3_phy_fini(struct tg3 *tp)
2161 {
2162 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2163 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2164 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2165 }
2166 }
2167
tg3_phy_set_extloopbk(struct tg3 * tp)2168 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2169 {
2170 int err;
2171 u32 val;
2172
2173 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2174 return 0;
2175
2176 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2177 /* Cannot do read-modify-write on 5401 */
2178 err = tg3_phy_auxctl_write(tp,
2179 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2180 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2181 0x4c20);
2182 goto done;
2183 }
2184
2185 err = tg3_phy_auxctl_read(tp,
2186 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2187 if (err)
2188 return err;
2189
2190 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2191 err = tg3_phy_auxctl_write(tp,
2192 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2193
2194 done:
2195 return err;
2196 }
2197
tg3_phy_fet_toggle_apd(struct tg3 * tp,bool enable)2198 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2199 {
2200 u32 phytest;
2201
2202 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2203 u32 phy;
2204
2205 tg3_writephy(tp, MII_TG3_FET_TEST,
2206 phytest | MII_TG3_FET_SHADOW_EN);
2207 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2208 if (enable)
2209 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2210 else
2211 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2212 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2213 }
2214 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2215 }
2216 }
2217
tg3_phy_toggle_apd(struct tg3 * tp,bool enable)2218 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2219 {
2220 u32 reg;
2221
2222 if (!tg3_flag(tp, 5705_PLUS) ||
2223 (tg3_flag(tp, 5717_PLUS) &&
2224 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2225 return;
2226
2227 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2228 tg3_phy_fet_toggle_apd(tp, enable);
2229 return;
2230 }
2231
2232 reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2233 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2234 MII_TG3_MISC_SHDW_SCR5_SDTL |
2235 MII_TG3_MISC_SHDW_SCR5_C125OE;
2236 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2237 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2238
2239 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2240
2241
2242 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2243 if (enable)
2244 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2245
2246 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2247 }
2248
tg3_phy_toggle_automdix(struct tg3 * tp,bool enable)2249 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2250 {
2251 u32 phy;
2252
2253 if (!tg3_flag(tp, 5705_PLUS) ||
2254 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2255 return;
2256
2257 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2258 u32 ephy;
2259
2260 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2261 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2262
2263 tg3_writephy(tp, MII_TG3_FET_TEST,
2264 ephy | MII_TG3_FET_SHADOW_EN);
2265 if (!tg3_readphy(tp, reg, &phy)) {
2266 if (enable)
2267 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2268 else
2269 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2270 tg3_writephy(tp, reg, phy);
2271 }
2272 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2273 }
2274 } else {
2275 int ret;
2276
2277 ret = tg3_phy_auxctl_read(tp,
2278 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2279 if (!ret) {
2280 if (enable)
2281 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2282 else
2283 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2284 tg3_phy_auxctl_write(tp,
2285 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2286 }
2287 }
2288 }
2289
tg3_phy_set_wirespeed(struct tg3 * tp)2290 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2291 {
2292 int ret;
2293 u32 val;
2294
2295 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2296 return;
2297
2298 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2299 if (!ret)
2300 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2301 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2302 }
2303
tg3_phy_apply_otp(struct tg3 * tp)2304 static void tg3_phy_apply_otp(struct tg3 *tp)
2305 {
2306 u32 otp, phy;
2307
2308 if (!tp->phy_otp)
2309 return;
2310
2311 otp = tp->phy_otp;
2312
2313 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2314 return;
2315
2316 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2317 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2318 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2319
2320 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2321 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2322 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2323
2324 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2325 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2326 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2327
2328 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2329 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2330
2331 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2332 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2333
2334 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2335 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2336 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2337
2338 tg3_phy_toggle_auxctl_smdsp(tp, false);
2339 }
2340
tg3_eee_pull_config(struct tg3 * tp,struct ethtool_eee * eee)2341 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2342 {
2343 u32 val;
2344 struct ethtool_eee *dest = &tp->eee;
2345
2346 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2347 return;
2348
2349 if (eee)
2350 dest = eee;
2351
2352 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2353 return;
2354
2355 /* Pull eee_active */
2356 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2357 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2358 dest->eee_active = 1;
2359 } else
2360 dest->eee_active = 0;
2361
2362 /* Pull lp advertised settings */
2363 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2364 return;
2365 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2366
2367 /* Pull advertised and eee_enabled settings */
2368 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2369 return;
2370 dest->eee_enabled = !!val;
2371 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2372
2373 /* Pull tx_lpi_enabled */
2374 val = tr32(TG3_CPMU_EEE_MODE);
2375 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2376
2377 /* Pull lpi timer value */
2378 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2379 }
2380
tg3_phy_eee_adjust(struct tg3 * tp,bool current_link_up)2381 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2382 {
2383 u32 val;
2384
2385 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2386 return;
2387
2388 tp->setlpicnt = 0;
2389
2390 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2391 current_link_up &&
2392 tp->link_config.active_duplex == DUPLEX_FULL &&
2393 (tp->link_config.active_speed == SPEED_100 ||
2394 tp->link_config.active_speed == SPEED_1000)) {
2395 u32 eeectl;
2396
2397 if (tp->link_config.active_speed == SPEED_1000)
2398 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2399 else
2400 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2401
2402 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2403
2404 tg3_eee_pull_config(tp, NULL);
2405 if (tp->eee.eee_active)
2406 tp->setlpicnt = 2;
2407 }
2408
2409 if (!tp->setlpicnt) {
2410 if (current_link_up &&
2411 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2412 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2413 tg3_phy_toggle_auxctl_smdsp(tp, false);
2414 }
2415
2416 val = tr32(TG3_CPMU_EEE_MODE);
2417 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2418 }
2419 }
2420
tg3_phy_eee_enable(struct tg3 * tp)2421 static void tg3_phy_eee_enable(struct tg3 *tp)
2422 {
2423 u32 val;
2424
2425 if (tp->link_config.active_speed == SPEED_1000 &&
2426 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2427 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2428 tg3_flag(tp, 57765_CLASS)) &&
2429 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2430 val = MII_TG3_DSP_TAP26_ALNOKO |
2431 MII_TG3_DSP_TAP26_RMRXSTO;
2432 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2433 tg3_phy_toggle_auxctl_smdsp(tp, false);
2434 }
2435
2436 val = tr32(TG3_CPMU_EEE_MODE);
2437 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2438 }
2439
tg3_wait_macro_done(struct tg3 * tp)2440 static int tg3_wait_macro_done(struct tg3 *tp)
2441 {
2442 int limit = 100;
2443
2444 while (limit--) {
2445 u32 tmp32;
2446
2447 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2448 if ((tmp32 & 0x1000) == 0)
2449 break;
2450 }
2451 }
2452 if (limit < 0)
2453 return -EBUSY;
2454
2455 return 0;
2456 }
2457
tg3_phy_write_and_check_testpat(struct tg3 * tp,int * resetp)2458 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2459 {
2460 static const u32 test_pat[4][6] = {
2461 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2462 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2463 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2464 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2465 };
2466 int chan;
2467
2468 for (chan = 0; chan < 4; chan++) {
2469 int i;
2470
2471 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2472 (chan * 0x2000) | 0x0200);
2473 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2474
2475 for (i = 0; i < 6; i++)
2476 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2477 test_pat[chan][i]);
2478
2479 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2480 if (tg3_wait_macro_done(tp)) {
2481 *resetp = 1;
2482 return -EBUSY;
2483 }
2484
2485 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2486 (chan * 0x2000) | 0x0200);
2487 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2488 if (tg3_wait_macro_done(tp)) {
2489 *resetp = 1;
2490 return -EBUSY;
2491 }
2492
2493 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2494 if (tg3_wait_macro_done(tp)) {
2495 *resetp = 1;
2496 return -EBUSY;
2497 }
2498
2499 for (i = 0; i < 6; i += 2) {
2500 u32 low, high;
2501
2502 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2503 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2504 tg3_wait_macro_done(tp)) {
2505 *resetp = 1;
2506 return -EBUSY;
2507 }
2508 low &= 0x7fff;
2509 high &= 0x000f;
2510 if (low != test_pat[chan][i] ||
2511 high != test_pat[chan][i+1]) {
2512 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2513 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2514 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2515
2516 return -EBUSY;
2517 }
2518 }
2519 }
2520
2521 return 0;
2522 }
2523
tg3_phy_reset_chanpat(struct tg3 * tp)2524 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2525 {
2526 int chan;
2527
2528 for (chan = 0; chan < 4; chan++) {
2529 int i;
2530
2531 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2532 (chan * 0x2000) | 0x0200);
2533 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2534 for (i = 0; i < 6; i++)
2535 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2536 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2537 if (tg3_wait_macro_done(tp))
2538 return -EBUSY;
2539 }
2540
2541 return 0;
2542 }
2543
tg3_phy_reset_5703_4_5(struct tg3 * tp)2544 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2545 {
2546 u32 reg32, phy9_orig;
2547 int retries, do_phy_reset, err;
2548
2549 retries = 10;
2550 do_phy_reset = 1;
2551 do {
2552 if (do_phy_reset) {
2553 err = tg3_bmcr_reset(tp);
2554 if (err)
2555 return err;
2556 do_phy_reset = 0;
2557 }
2558
2559 /* Disable transmitter and interrupt. */
2560 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2561 continue;
2562
2563 reg32 |= 0x3000;
2564 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2565
2566 /* Set full-duplex, 1000 mbps. */
2567 tg3_writephy(tp, MII_BMCR,
2568 BMCR_FULLDPLX | BMCR_SPEED1000);
2569
2570 /* Set to master mode. */
2571 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2572 continue;
2573
2574 tg3_writephy(tp, MII_CTRL1000,
2575 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2576
2577 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2578 if (err)
2579 return err;
2580
2581 /* Block the PHY control access. */
2582 tg3_phydsp_write(tp, 0x8005, 0x0800);
2583
2584 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2585 if (!err)
2586 break;
2587 } while (--retries);
2588
2589 err = tg3_phy_reset_chanpat(tp);
2590 if (err)
2591 return err;
2592
2593 tg3_phydsp_write(tp, 0x8005, 0x0000);
2594
2595 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2596 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2597
2598 tg3_phy_toggle_auxctl_smdsp(tp, false);
2599
2600 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2601
2602 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
2603 if (err)
2604 return err;
2605
2606 reg32 &= ~0x3000;
2607 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2608
2609 return 0;
2610 }
2611
tg3_carrier_off(struct tg3 * tp)2612 static void tg3_carrier_off(struct tg3 *tp)
2613 {
2614 netif_carrier_off(tp->dev);
2615 tp->link_up = false;
2616 }
2617
tg3_warn_mgmt_link_flap(struct tg3 * tp)2618 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2619 {
2620 if (tg3_flag(tp, ENABLE_ASF))
2621 netdev_warn(tp->dev,
2622 "Management side-band traffic will be interrupted during phy settings change\n");
2623 }
2624
2625 /* This will reset the tigon3 PHY if there is no valid
2626 * link unless the FORCE argument is non-zero.
2627 */
tg3_phy_reset(struct tg3 * tp)2628 static int tg3_phy_reset(struct tg3 *tp)
2629 {
2630 u32 val, cpmuctrl;
2631 int err;
2632
2633 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2634 val = tr32(GRC_MISC_CFG);
2635 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2636 udelay(40);
2637 }
2638 err = tg3_readphy(tp, MII_BMSR, &val);
2639 err |= tg3_readphy(tp, MII_BMSR, &val);
2640 if (err != 0)
2641 return -EBUSY;
2642
2643 if (netif_running(tp->dev) && tp->link_up) {
2644 netif_carrier_off(tp->dev);
2645 tg3_link_report(tp);
2646 }
2647
2648 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2649 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2650 tg3_asic_rev(tp) == ASIC_REV_5705) {
2651 err = tg3_phy_reset_5703_4_5(tp);
2652 if (err)
2653 return err;
2654 goto out;
2655 }
2656
2657 cpmuctrl = 0;
2658 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2659 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2660 cpmuctrl = tr32(TG3_CPMU_CTRL);
2661 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2662 tw32(TG3_CPMU_CTRL,
2663 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2664 }
2665
2666 err = tg3_bmcr_reset(tp);
2667 if (err)
2668 return err;
2669
2670 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2671 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2672 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2673
2674 tw32(TG3_CPMU_CTRL, cpmuctrl);
2675 }
2676
2677 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2678 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2679 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2680 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2681 CPMU_LSPD_1000MB_MACCLK_12_5) {
2682 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2683 udelay(40);
2684 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2685 }
2686 }
2687
2688 if (tg3_flag(tp, 5717_PLUS) &&
2689 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2690 return 0;
2691
2692 tg3_phy_apply_otp(tp);
2693
2694 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2695 tg3_phy_toggle_apd(tp, true);
2696 else
2697 tg3_phy_toggle_apd(tp, false);
2698
2699 out:
2700 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2701 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2702 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2703 tg3_phydsp_write(tp, 0x000a, 0x0323);
2704 tg3_phy_toggle_auxctl_smdsp(tp, false);
2705 }
2706
2707 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2708 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2709 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2710 }
2711
2712 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2713 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2714 tg3_phydsp_write(tp, 0x000a, 0x310b);
2715 tg3_phydsp_write(tp, 0x201f, 0x9506);
2716 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2717 tg3_phy_toggle_auxctl_smdsp(tp, false);
2718 }
2719 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2720 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2721 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2722 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2723 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2724 tg3_writephy(tp, MII_TG3_TEST1,
2725 MII_TG3_TEST1_TRIM_EN | 0x4);
2726 } else
2727 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2728
2729 tg3_phy_toggle_auxctl_smdsp(tp, false);
2730 }
2731 }
2732
2733 /* Set Extended packet length bit (bit 14) on all chips that */
2734 /* support jumbo frames */
2735 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2736 /* Cannot do read-modify-write on 5401 */
2737 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2738 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2739 /* Set bit 14 with read-modify-write to preserve other bits */
2740 err = tg3_phy_auxctl_read(tp,
2741 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2742 if (!err)
2743 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2744 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2745 }
2746
2747 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2748 * jumbo frames transmission.
2749 */
2750 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2751 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2752 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2753 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2754 }
2755
2756 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2757 /* adjust output voltage */
2758 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2759 }
2760
2761 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2762 tg3_phydsp_write(tp, 0xffb, 0x4000);
2763
2764 tg3_phy_toggle_automdix(tp, true);
2765 tg3_phy_set_wirespeed(tp);
2766 return 0;
2767 }
2768
2769 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2770 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2771 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2772 TG3_GPIO_MSG_NEED_VAUX)
2773 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2774 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2775 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2776 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2777 (TG3_GPIO_MSG_DRVR_PRES << 12))
2778
2779 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2780 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2781 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2782 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2783 (TG3_GPIO_MSG_NEED_VAUX << 12))
2784
tg3_set_function_status(struct tg3 * tp,u32 newstat)2785 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2786 {
2787 u32 status, shift;
2788
2789 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2790 tg3_asic_rev(tp) == ASIC_REV_5719)
2791 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2792 else
2793 status = tr32(TG3_CPMU_DRV_STATUS);
2794
2795 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2796 status &= ~(TG3_GPIO_MSG_MASK << shift);
2797 status |= (newstat << shift);
2798
2799 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2800 tg3_asic_rev(tp) == ASIC_REV_5719)
2801 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2802 else
2803 tw32(TG3_CPMU_DRV_STATUS, status);
2804
2805 return status >> TG3_APE_GPIO_MSG_SHIFT;
2806 }
2807
tg3_pwrsrc_switch_to_vmain(struct tg3 * tp)2808 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2809 {
2810 if (!tg3_flag(tp, IS_NIC))
2811 return 0;
2812
2813 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2814 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2815 tg3_asic_rev(tp) == ASIC_REV_5720) {
2816 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2817 return -EIO;
2818
2819 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2820
2821 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2822 TG3_GRC_LCLCTL_PWRSW_DELAY);
2823
2824 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2825 } else {
2826 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2827 TG3_GRC_LCLCTL_PWRSW_DELAY);
2828 }
2829
2830 return 0;
2831 }
2832
tg3_pwrsrc_die_with_vmain(struct tg3 * tp)2833 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2834 {
2835 u32 grc_local_ctrl;
2836
2837 if (!tg3_flag(tp, IS_NIC) ||
2838 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2839 tg3_asic_rev(tp) == ASIC_REV_5701)
2840 return;
2841
2842 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2843
2844 tw32_wait_f(GRC_LOCAL_CTRL,
2845 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2846 TG3_GRC_LCLCTL_PWRSW_DELAY);
2847
2848 tw32_wait_f(GRC_LOCAL_CTRL,
2849 grc_local_ctrl,
2850 TG3_GRC_LCLCTL_PWRSW_DELAY);
2851
2852 tw32_wait_f(GRC_LOCAL_CTRL,
2853 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2854 TG3_GRC_LCLCTL_PWRSW_DELAY);
2855 }
2856
tg3_pwrsrc_switch_to_vaux(struct tg3 * tp)2857 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2858 {
2859 if (!tg3_flag(tp, IS_NIC))
2860 return;
2861
2862 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2863 tg3_asic_rev(tp) == ASIC_REV_5701) {
2864 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2865 (GRC_LCLCTRL_GPIO_OE0 |
2866 GRC_LCLCTRL_GPIO_OE1 |
2867 GRC_LCLCTRL_GPIO_OE2 |
2868 GRC_LCLCTRL_GPIO_OUTPUT0 |
2869 GRC_LCLCTRL_GPIO_OUTPUT1),
2870 TG3_GRC_LCLCTL_PWRSW_DELAY);
2871 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2872 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2873 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2874 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2875 GRC_LCLCTRL_GPIO_OE1 |
2876 GRC_LCLCTRL_GPIO_OE2 |
2877 GRC_LCLCTRL_GPIO_OUTPUT0 |
2878 GRC_LCLCTRL_GPIO_OUTPUT1 |
2879 tp->grc_local_ctrl;
2880 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2881 TG3_GRC_LCLCTL_PWRSW_DELAY);
2882
2883 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2884 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2885 TG3_GRC_LCLCTL_PWRSW_DELAY);
2886
2887 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2888 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2889 TG3_GRC_LCLCTL_PWRSW_DELAY);
2890 } else {
2891 u32 no_gpio2;
2892 u32 grc_local_ctrl = 0;
2893
2894 /* Workaround to prevent overdrawing Amps. */
2895 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2896 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2897 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2898 grc_local_ctrl,
2899 TG3_GRC_LCLCTL_PWRSW_DELAY);
2900 }
2901
2902 /* On 5753 and variants, GPIO2 cannot be used. */
2903 no_gpio2 = tp->nic_sram_data_cfg &
2904 NIC_SRAM_DATA_CFG_NO_GPIO2;
2905
2906 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2907 GRC_LCLCTRL_GPIO_OE1 |
2908 GRC_LCLCTRL_GPIO_OE2 |
2909 GRC_LCLCTRL_GPIO_OUTPUT1 |
2910 GRC_LCLCTRL_GPIO_OUTPUT2;
2911 if (no_gpio2) {
2912 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2913 GRC_LCLCTRL_GPIO_OUTPUT2);
2914 }
2915 tw32_wait_f(GRC_LOCAL_CTRL,
2916 tp->grc_local_ctrl | grc_local_ctrl,
2917 TG3_GRC_LCLCTL_PWRSW_DELAY);
2918
2919 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2920
2921 tw32_wait_f(GRC_LOCAL_CTRL,
2922 tp->grc_local_ctrl | grc_local_ctrl,
2923 TG3_GRC_LCLCTL_PWRSW_DELAY);
2924
2925 if (!no_gpio2) {
2926 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2927 tw32_wait_f(GRC_LOCAL_CTRL,
2928 tp->grc_local_ctrl | grc_local_ctrl,
2929 TG3_GRC_LCLCTL_PWRSW_DELAY);
2930 }
2931 }
2932 }
2933
tg3_frob_aux_power_5717(struct tg3 * tp,bool wol_enable)2934 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2935 {
2936 u32 msg = 0;
2937
2938 /* Serialize power state transitions */
2939 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2940 return;
2941
2942 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2943 msg = TG3_GPIO_MSG_NEED_VAUX;
2944
2945 msg = tg3_set_function_status(tp, msg);
2946
2947 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2948 goto done;
2949
2950 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2951 tg3_pwrsrc_switch_to_vaux(tp);
2952 else
2953 tg3_pwrsrc_die_with_vmain(tp);
2954
2955 done:
2956 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2957 }
2958
tg3_frob_aux_power(struct tg3 * tp,bool include_wol)2959 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2960 {
2961 bool need_vaux = false;
2962
2963 /* The GPIOs do something completely different on 57765. */
2964 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2965 return;
2966
2967 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2968 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2969 tg3_asic_rev(tp) == ASIC_REV_5720) {
2970 tg3_frob_aux_power_5717(tp, include_wol ?
2971 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2972 return;
2973 }
2974
2975 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2976 struct net_device *dev_peer;
2977
2978 dev_peer = pci_get_drvdata(tp->pdev_peer);
2979
2980 /* remove_one() may have been run on the peer. */
2981 if (dev_peer) {
2982 struct tg3 *tp_peer = netdev_priv(dev_peer);
2983
2984 if (tg3_flag(tp_peer, INIT_COMPLETE))
2985 return;
2986
2987 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2988 tg3_flag(tp_peer, ENABLE_ASF))
2989 need_vaux = true;
2990 }
2991 }
2992
2993 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2994 tg3_flag(tp, ENABLE_ASF))
2995 need_vaux = true;
2996
2997 if (need_vaux)
2998 tg3_pwrsrc_switch_to_vaux(tp);
2999 else
3000 tg3_pwrsrc_die_with_vmain(tp);
3001 }
3002
tg3_5700_link_polarity(struct tg3 * tp,u32 speed)3003 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3004 {
3005 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3006 return 1;
3007 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3008 if (speed != SPEED_10)
3009 return 1;
3010 } else if (speed == SPEED_10)
3011 return 1;
3012
3013 return 0;
3014 }
3015
tg3_phy_power_bug(struct tg3 * tp)3016 static bool tg3_phy_power_bug(struct tg3 *tp)
3017 {
3018 switch (tg3_asic_rev(tp)) {
3019 case ASIC_REV_5700:
3020 case ASIC_REV_5704:
3021 return true;
3022 case ASIC_REV_5780:
3023 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3024 return true;
3025 return false;
3026 case ASIC_REV_5717:
3027 if (!tp->pci_fn)
3028 return true;
3029 return false;
3030 case ASIC_REV_5719:
3031 case ASIC_REV_5720:
3032 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3033 !tp->pci_fn)
3034 return true;
3035 return false;
3036 }
3037
3038 return false;
3039 }
3040
tg3_phy_led_bug(struct tg3 * tp)3041 static bool tg3_phy_led_bug(struct tg3 *tp)
3042 {
3043 switch (tg3_asic_rev(tp)) {
3044 case ASIC_REV_5719:
3045 case ASIC_REV_5720:
3046 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3047 !tp->pci_fn)
3048 return true;
3049 return false;
3050 }
3051
3052 return false;
3053 }
3054
tg3_power_down_phy(struct tg3 * tp,bool do_low_power)3055 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3056 {
3057 u32 val;
3058
3059 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3060 return;
3061
3062 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3063 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3064 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3065 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3066
3067 sg_dig_ctrl |=
3068 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3069 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3070 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3071 }
3072 return;
3073 }
3074
3075 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3076 tg3_bmcr_reset(tp);
3077 val = tr32(GRC_MISC_CFG);
3078 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3079 udelay(40);
3080 return;
3081 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3082 u32 phytest;
3083 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3084 u32 phy;
3085
3086 tg3_writephy(tp, MII_ADVERTISE, 0);
3087 tg3_writephy(tp, MII_BMCR,
3088 BMCR_ANENABLE | BMCR_ANRESTART);
3089
3090 tg3_writephy(tp, MII_TG3_FET_TEST,
3091 phytest | MII_TG3_FET_SHADOW_EN);
3092 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3093 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3094 tg3_writephy(tp,
3095 MII_TG3_FET_SHDW_AUXMODE4,
3096 phy);
3097 }
3098 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3099 }
3100 return;
3101 } else if (do_low_power) {
3102 if (!tg3_phy_led_bug(tp))
3103 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3104 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3105
3106 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3107 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3108 MII_TG3_AUXCTL_PCTL_VREG_11V;
3109 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3110 }
3111
3112 /* The PHY should not be powered down on some chips because
3113 * of bugs.
3114 */
3115 if (tg3_phy_power_bug(tp))
3116 return;
3117
3118 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3119 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3120 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3121 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3122 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3123 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3124 }
3125
3126 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3127 }
3128
3129 /* tp->lock is held. */
tg3_nvram_lock(struct tg3 * tp)3130 static int tg3_nvram_lock(struct tg3 *tp)
3131 {
3132 if (tg3_flag(tp, NVRAM)) {
3133 int i;
3134
3135 if (tp->nvram_lock_cnt == 0) {
3136 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3137 for (i = 0; i < 8000; i++) {
3138 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3139 break;
3140 udelay(20);
3141 }
3142 if (i == 8000) {
3143 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3144 return -ENODEV;
3145 }
3146 }
3147 tp->nvram_lock_cnt++;
3148 }
3149 return 0;
3150 }
3151
3152 /* tp->lock is held. */
tg3_nvram_unlock(struct tg3 * tp)3153 static void tg3_nvram_unlock(struct tg3 *tp)
3154 {
3155 if (tg3_flag(tp, NVRAM)) {
3156 if (tp->nvram_lock_cnt > 0)
3157 tp->nvram_lock_cnt--;
3158 if (tp->nvram_lock_cnt == 0)
3159 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3160 }
3161 }
3162
3163 /* tp->lock is held. */
tg3_enable_nvram_access(struct tg3 * tp)3164 static void tg3_enable_nvram_access(struct tg3 *tp)
3165 {
3166 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3167 u32 nvaccess = tr32(NVRAM_ACCESS);
3168
3169 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3170 }
3171 }
3172
3173 /* tp->lock is held. */
tg3_disable_nvram_access(struct tg3 * tp)3174 static void tg3_disable_nvram_access(struct tg3 *tp)
3175 {
3176 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3177 u32 nvaccess = tr32(NVRAM_ACCESS);
3178
3179 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3180 }
3181 }
3182
tg3_nvram_read_using_eeprom(struct tg3 * tp,u32 offset,u32 * val)3183 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3184 u32 offset, u32 *val)
3185 {
3186 u32 tmp;
3187 int i;
3188
3189 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3190 return -EINVAL;
3191
3192 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3193 EEPROM_ADDR_DEVID_MASK |
3194 EEPROM_ADDR_READ);
3195 tw32(GRC_EEPROM_ADDR,
3196 tmp |
3197 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3198 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3199 EEPROM_ADDR_ADDR_MASK) |
3200 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3201
3202 for (i = 0; i < 1000; i++) {
3203 tmp = tr32(GRC_EEPROM_ADDR);
3204
3205 if (tmp & EEPROM_ADDR_COMPLETE)
3206 break;
3207 msleep(1);
3208 }
3209 if (!(tmp & EEPROM_ADDR_COMPLETE))
3210 return -EBUSY;
3211
3212 tmp = tr32(GRC_EEPROM_DATA);
3213
3214 /*
3215 * The data will always be opposite the native endian
3216 * format. Perform a blind byteswap to compensate.
3217 */
3218 *val = swab32(tmp);
3219
3220 return 0;
3221 }
3222
3223 #define NVRAM_CMD_TIMEOUT 10000
3224
tg3_nvram_exec_cmd(struct tg3 * tp,u32 nvram_cmd)3225 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3226 {
3227 int i;
3228
3229 tw32(NVRAM_CMD, nvram_cmd);
3230 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3231 usleep_range(10, 40);
3232 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3233 udelay(10);
3234 break;
3235 }
3236 }
3237
3238 if (i == NVRAM_CMD_TIMEOUT)
3239 return -EBUSY;
3240
3241 return 0;
3242 }
3243
tg3_nvram_phys_addr(struct tg3 * tp,u32 addr)3244 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3245 {
3246 if (tg3_flag(tp, NVRAM) &&
3247 tg3_flag(tp, NVRAM_BUFFERED) &&
3248 tg3_flag(tp, FLASH) &&
3249 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3250 (tp->nvram_jedecnum == JEDEC_ATMEL))
3251
3252 addr = ((addr / tp->nvram_pagesize) <<
3253 ATMEL_AT45DB0X1B_PAGE_POS) +
3254 (addr % tp->nvram_pagesize);
3255
3256 return addr;
3257 }
3258
tg3_nvram_logical_addr(struct tg3 * tp,u32 addr)3259 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3260 {
3261 if (tg3_flag(tp, NVRAM) &&
3262 tg3_flag(tp, NVRAM_BUFFERED) &&
3263 tg3_flag(tp, FLASH) &&
3264 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3265 (tp->nvram_jedecnum == JEDEC_ATMEL))
3266
3267 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3268 tp->nvram_pagesize) +
3269 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3270
3271 return addr;
3272 }
3273
3274 /* NOTE: Data read in from NVRAM is byteswapped according to
3275 * the byteswapping settings for all other register accesses.
3276 * tg3 devices are BE devices, so on a BE machine, the data
3277 * returned will be exactly as it is seen in NVRAM. On a LE
3278 * machine, the 32-bit value will be byteswapped.
3279 */
tg3_nvram_read(struct tg3 * tp,u32 offset,u32 * val)3280 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3281 {
3282 int ret;
3283
3284 if (!tg3_flag(tp, NVRAM))
3285 return tg3_nvram_read_using_eeprom(tp, offset, val);
3286
3287 offset = tg3_nvram_phys_addr(tp, offset);
3288
3289 if (offset > NVRAM_ADDR_MSK)
3290 return -EINVAL;
3291
3292 ret = tg3_nvram_lock(tp);
3293 if (ret)
3294 return ret;
3295
3296 tg3_enable_nvram_access(tp);
3297
3298 tw32(NVRAM_ADDR, offset);
3299 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3300 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3301
3302 if (ret == 0)
3303 *val = tr32(NVRAM_RDDATA);
3304
3305 tg3_disable_nvram_access(tp);
3306
3307 tg3_nvram_unlock(tp);
3308
3309 return ret;
3310 }
3311
3312 /* Ensures NVRAM data is in bytestream format. */
tg3_nvram_read_be32(struct tg3 * tp,u32 offset,__be32 * val)3313 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3314 {
3315 u32 v;
3316 int res = tg3_nvram_read(tp, offset, &v);
3317 if (!res)
3318 *val = cpu_to_be32(v);
3319 return res;
3320 }
3321
tg3_nvram_write_block_using_eeprom(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3322 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3323 u32 offset, u32 len, u8 *buf)
3324 {
3325 int i, j, rc = 0;
3326 u32 val;
3327
3328 for (i = 0; i < len; i += 4) {
3329 u32 addr;
3330 __be32 data;
3331
3332 addr = offset + i;
3333
3334 memcpy(&data, buf + i, 4);
3335
3336 /*
3337 * The SEEPROM interface expects the data to always be opposite
3338 * the native endian format. We accomplish this by reversing
3339 * all the operations that would have been performed on the
3340 * data from a call to tg3_nvram_read_be32().
3341 */
3342 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3343
3344 val = tr32(GRC_EEPROM_ADDR);
3345 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3346
3347 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3348 EEPROM_ADDR_READ);
3349 tw32(GRC_EEPROM_ADDR, val |
3350 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3351 (addr & EEPROM_ADDR_ADDR_MASK) |
3352 EEPROM_ADDR_START |
3353 EEPROM_ADDR_WRITE);
3354
3355 for (j = 0; j < 1000; j++) {
3356 val = tr32(GRC_EEPROM_ADDR);
3357
3358 if (val & EEPROM_ADDR_COMPLETE)
3359 break;
3360 msleep(1);
3361 }
3362 if (!(val & EEPROM_ADDR_COMPLETE)) {
3363 rc = -EBUSY;
3364 break;
3365 }
3366 }
3367
3368 return rc;
3369 }
3370
3371 /* offset and length are dword aligned */
tg3_nvram_write_block_unbuffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3372 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3373 u8 *buf)
3374 {
3375 int ret = 0;
3376 u32 pagesize = tp->nvram_pagesize;
3377 u32 pagemask = pagesize - 1;
3378 u32 nvram_cmd;
3379 u8 *tmp;
3380
3381 tmp = kmalloc(pagesize, GFP_KERNEL);
3382 if (tmp == NULL)
3383 return -ENOMEM;
3384
3385 while (len) {
3386 int j;
3387 u32 phy_addr, page_off, size;
3388
3389 phy_addr = offset & ~pagemask;
3390
3391 for (j = 0; j < pagesize; j += 4) {
3392 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3393 (__be32 *) (tmp + j));
3394 if (ret)
3395 break;
3396 }
3397 if (ret)
3398 break;
3399
3400 page_off = offset & pagemask;
3401 size = pagesize;
3402 if (len < size)
3403 size = len;
3404
3405 len -= size;
3406
3407 memcpy(tmp + page_off, buf, size);
3408
3409 offset = offset + (pagesize - page_off);
3410
3411 tg3_enable_nvram_access(tp);
3412
3413 /*
3414 * Before we can erase the flash page, we need
3415 * to issue a special "write enable" command.
3416 */
3417 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3418
3419 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3420 break;
3421
3422 /* Erase the target page */
3423 tw32(NVRAM_ADDR, phy_addr);
3424
3425 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3426 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3427
3428 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3429 break;
3430
3431 /* Issue another write enable to start the write. */
3432 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3433
3434 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3435 break;
3436
3437 for (j = 0; j < pagesize; j += 4) {
3438 __be32 data;
3439
3440 data = *((__be32 *) (tmp + j));
3441
3442 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3443
3444 tw32(NVRAM_ADDR, phy_addr + j);
3445
3446 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3447 NVRAM_CMD_WR;
3448
3449 if (j == 0)
3450 nvram_cmd |= NVRAM_CMD_FIRST;
3451 else if (j == (pagesize - 4))
3452 nvram_cmd |= NVRAM_CMD_LAST;
3453
3454 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3455 if (ret)
3456 break;
3457 }
3458 if (ret)
3459 break;
3460 }
3461
3462 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3463 tg3_nvram_exec_cmd(tp, nvram_cmd);
3464
3465 kfree(tmp);
3466
3467 return ret;
3468 }
3469
3470 /* offset and length are dword aligned */
tg3_nvram_write_block_buffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3471 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3472 u8 *buf)
3473 {
3474 int i, ret = 0;
3475
3476 for (i = 0; i < len; i += 4, offset += 4) {
3477 u32 page_off, phy_addr, nvram_cmd;
3478 __be32 data;
3479
3480 memcpy(&data, buf + i, 4);
3481 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3482
3483 page_off = offset % tp->nvram_pagesize;
3484
3485 phy_addr = tg3_nvram_phys_addr(tp, offset);
3486
3487 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3488
3489 if (page_off == 0 || i == 0)
3490 nvram_cmd |= NVRAM_CMD_FIRST;
3491 if (page_off == (tp->nvram_pagesize - 4))
3492 nvram_cmd |= NVRAM_CMD_LAST;
3493
3494 if (i == (len - 4))
3495 nvram_cmd |= NVRAM_CMD_LAST;
3496
3497 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3498 !tg3_flag(tp, FLASH) ||
3499 !tg3_flag(tp, 57765_PLUS))
3500 tw32(NVRAM_ADDR, phy_addr);
3501
3502 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3503 !tg3_flag(tp, 5755_PLUS) &&
3504 (tp->nvram_jedecnum == JEDEC_ST) &&
3505 (nvram_cmd & NVRAM_CMD_FIRST)) {
3506 u32 cmd;
3507
3508 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3509 ret = tg3_nvram_exec_cmd(tp, cmd);
3510 if (ret)
3511 break;
3512 }
3513 if (!tg3_flag(tp, FLASH)) {
3514 /* We always do complete word writes to eeprom. */
3515 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3516 }
3517
3518 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3519 if (ret)
3520 break;
3521 }
3522 return ret;
3523 }
3524
3525 /* offset and length are dword aligned */
tg3_nvram_write_block(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3526 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3527 {
3528 int ret;
3529
3530 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3531 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3532 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3533 udelay(40);
3534 }
3535
3536 if (!tg3_flag(tp, NVRAM)) {
3537 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3538 } else {
3539 u32 grc_mode;
3540
3541 ret = tg3_nvram_lock(tp);
3542 if (ret)
3543 return ret;
3544
3545 tg3_enable_nvram_access(tp);
3546 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3547 tw32(NVRAM_WRITE1, 0x406);
3548
3549 grc_mode = tr32(GRC_MODE);
3550 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3551
3552 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3553 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3554 buf);
3555 } else {
3556 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3557 buf);
3558 }
3559
3560 grc_mode = tr32(GRC_MODE);
3561 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3562
3563 tg3_disable_nvram_access(tp);
3564 tg3_nvram_unlock(tp);
3565 }
3566
3567 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3568 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3569 udelay(40);
3570 }
3571
3572 return ret;
3573 }
3574
3575 #define RX_CPU_SCRATCH_BASE 0x30000
3576 #define RX_CPU_SCRATCH_SIZE 0x04000
3577 #define TX_CPU_SCRATCH_BASE 0x34000
3578 #define TX_CPU_SCRATCH_SIZE 0x04000
3579
3580 /* tp->lock is held. */
tg3_pause_cpu(struct tg3 * tp,u32 cpu_base)3581 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3582 {
3583 int i;
3584 const int iters = 10000;
3585
3586 for (i = 0; i < iters; i++) {
3587 tw32(cpu_base + CPU_STATE, 0xffffffff);
3588 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3589 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3590 break;
3591 if (pci_channel_offline(tp->pdev))
3592 return -EBUSY;
3593 }
3594
3595 return (i == iters) ? -EBUSY : 0;
3596 }
3597
3598 /* tp->lock is held. */
tg3_rxcpu_pause(struct tg3 * tp)3599 static int tg3_rxcpu_pause(struct tg3 *tp)
3600 {
3601 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3602
3603 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3604 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3605 udelay(10);
3606
3607 return rc;
3608 }
3609
3610 /* tp->lock is held. */
tg3_txcpu_pause(struct tg3 * tp)3611 static int tg3_txcpu_pause(struct tg3 *tp)
3612 {
3613 return tg3_pause_cpu(tp, TX_CPU_BASE);
3614 }
3615
3616 /* tp->lock is held. */
tg3_resume_cpu(struct tg3 * tp,u32 cpu_base)3617 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3618 {
3619 tw32(cpu_base + CPU_STATE, 0xffffffff);
3620 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3621 }
3622
3623 /* tp->lock is held. */
tg3_rxcpu_resume(struct tg3 * tp)3624 static void tg3_rxcpu_resume(struct tg3 *tp)
3625 {
3626 tg3_resume_cpu(tp, RX_CPU_BASE);
3627 }
3628
3629 /* tp->lock is held. */
tg3_halt_cpu(struct tg3 * tp,u32 cpu_base)3630 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3631 {
3632 int rc;
3633
3634 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3635
3636 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3637 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3638
3639 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3640 return 0;
3641 }
3642 if (cpu_base == RX_CPU_BASE) {
3643 rc = tg3_rxcpu_pause(tp);
3644 } else {
3645 /*
3646 * There is only an Rx CPU for the 5750 derivative in the
3647 * BCM4785.
3648 */
3649 if (tg3_flag(tp, IS_SSB_CORE))
3650 return 0;
3651
3652 rc = tg3_txcpu_pause(tp);
3653 }
3654
3655 if (rc) {
3656 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3657 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3658 return -ENODEV;
3659 }
3660
3661 /* Clear firmware's nvram arbitration. */
3662 if (tg3_flag(tp, NVRAM))
3663 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3664 return 0;
3665 }
3666
tg3_fw_data_len(struct tg3 * tp,const struct tg3_firmware_hdr * fw_hdr)3667 static int tg3_fw_data_len(struct tg3 *tp,
3668 const struct tg3_firmware_hdr *fw_hdr)
3669 {
3670 int fw_len;
3671
3672 /* Non fragmented firmware have one firmware header followed by a
3673 * contiguous chunk of data to be written. The length field in that
3674 * header is not the length of data to be written but the complete
3675 * length of the bss. The data length is determined based on
3676 * tp->fw->size minus headers.
3677 *
3678 * Fragmented firmware have a main header followed by multiple
3679 * fragments. Each fragment is identical to non fragmented firmware
3680 * with a firmware header followed by a contiguous chunk of data. In
3681 * the main header, the length field is unused and set to 0xffffffff.
3682 * In each fragment header the length is the entire size of that
3683 * fragment i.e. fragment data + header length. Data length is
3684 * therefore length field in the header minus TG3_FW_HDR_LEN.
3685 */
3686 if (tp->fw_len == 0xffffffff)
3687 fw_len = be32_to_cpu(fw_hdr->len);
3688 else
3689 fw_len = tp->fw->size;
3690
3691 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3692 }
3693
3694 /* tp->lock is held. */
tg3_load_firmware_cpu(struct tg3 * tp,u32 cpu_base,u32 cpu_scratch_base,int cpu_scratch_size,const struct tg3_firmware_hdr * fw_hdr)3695 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3696 u32 cpu_scratch_base, int cpu_scratch_size,
3697 const struct tg3_firmware_hdr *fw_hdr)
3698 {
3699 int err, i;
3700 void (*write_op)(struct tg3 *, u32, u32);
3701 int total_len = tp->fw->size;
3702
3703 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3704 netdev_err(tp->dev,
3705 "%s: Trying to load TX cpu firmware which is 5705\n",
3706 __func__);
3707 return -EINVAL;
3708 }
3709
3710 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3711 write_op = tg3_write_mem;
3712 else
3713 write_op = tg3_write_indirect_reg32;
3714
3715 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3716 /* It is possible that bootcode is still loading at this point.
3717 * Get the nvram lock first before halting the cpu.
3718 */
3719 int lock_err = tg3_nvram_lock(tp);
3720 err = tg3_halt_cpu(tp, cpu_base);
3721 if (!lock_err)
3722 tg3_nvram_unlock(tp);
3723 if (err)
3724 goto out;
3725
3726 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3727 write_op(tp, cpu_scratch_base + i, 0);
3728 tw32(cpu_base + CPU_STATE, 0xffffffff);
3729 tw32(cpu_base + CPU_MODE,
3730 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3731 } else {
3732 /* Subtract additional main header for fragmented firmware and
3733 * advance to the first fragment
3734 */
3735 total_len -= TG3_FW_HDR_LEN;
3736 fw_hdr++;
3737 }
3738
3739 do {
3740 u32 *fw_data = (u32 *)(fw_hdr + 1);
3741 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3742 write_op(tp, cpu_scratch_base +
3743 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3744 (i * sizeof(u32)),
3745 be32_to_cpu(fw_data[i]));
3746
3747 total_len -= be32_to_cpu(fw_hdr->len);
3748
3749 /* Advance to next fragment */
3750 fw_hdr = (struct tg3_firmware_hdr *)
3751 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3752 } while (total_len > 0);
3753
3754 err = 0;
3755
3756 out:
3757 return err;
3758 }
3759
3760 /* tp->lock is held. */
tg3_pause_cpu_and_set_pc(struct tg3 * tp,u32 cpu_base,u32 pc)3761 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3762 {
3763 int i;
3764 const int iters = 5;
3765
3766 tw32(cpu_base + CPU_STATE, 0xffffffff);
3767 tw32_f(cpu_base + CPU_PC, pc);
3768
3769 for (i = 0; i < iters; i++) {
3770 if (tr32(cpu_base + CPU_PC) == pc)
3771 break;
3772 tw32(cpu_base + CPU_STATE, 0xffffffff);
3773 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3774 tw32_f(cpu_base + CPU_PC, pc);
3775 udelay(1000);
3776 }
3777
3778 return (i == iters) ? -EBUSY : 0;
3779 }
3780
3781 /* tp->lock is held. */
tg3_load_5701_a0_firmware_fix(struct tg3 * tp)3782 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3783 {
3784 const struct tg3_firmware_hdr *fw_hdr;
3785 int err;
3786
3787 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3788
3789 /* Firmware blob starts with version numbers, followed by
3790 start address and length. We are setting complete length.
3791 length = end_address_of_bss - start_address_of_text.
3792 Remainder is the blob to be loaded contiguously
3793 from start address. */
3794
3795 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3796 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3797 fw_hdr);
3798 if (err)
3799 return err;
3800
3801 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3802 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3803 fw_hdr);
3804 if (err)
3805 return err;
3806
3807 /* Now startup only the RX cpu. */
3808 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3809 be32_to_cpu(fw_hdr->base_addr));
3810 if (err) {
3811 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3812 "should be %08x\n", __func__,
3813 tr32(RX_CPU_BASE + CPU_PC),
3814 be32_to_cpu(fw_hdr->base_addr));
3815 return -ENODEV;
3816 }
3817
3818 tg3_rxcpu_resume(tp);
3819
3820 return 0;
3821 }
3822
tg3_validate_rxcpu_state(struct tg3 * tp)3823 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3824 {
3825 const int iters = 1000;
3826 int i;
3827 u32 val;
3828
3829 /* Wait for boot code to complete initialization and enter service
3830 * loop. It is then safe to download service patches
3831 */
3832 for (i = 0; i < iters; i++) {
3833 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3834 break;
3835
3836 udelay(10);
3837 }
3838
3839 if (i == iters) {
3840 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3841 return -EBUSY;
3842 }
3843
3844 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3845 if (val & 0xff) {
3846 netdev_warn(tp->dev,
3847 "Other patches exist. Not downloading EEE patch\n");
3848 return -EEXIST;
3849 }
3850
3851 return 0;
3852 }
3853
3854 /* tp->lock is held. */
tg3_load_57766_firmware(struct tg3 * tp)3855 static void tg3_load_57766_firmware(struct tg3 *tp)
3856 {
3857 struct tg3_firmware_hdr *fw_hdr;
3858
3859 if (!tg3_flag(tp, NO_NVRAM))
3860 return;
3861
3862 if (tg3_validate_rxcpu_state(tp))
3863 return;
3864
3865 if (!tp->fw)
3866 return;
3867
3868 /* This firmware blob has a different format than older firmware
3869 * releases as given below. The main difference is we have fragmented
3870 * data to be written to non-contiguous locations.
3871 *
3872 * In the beginning we have a firmware header identical to other
3873 * firmware which consists of version, base addr and length. The length
3874 * here is unused and set to 0xffffffff.
3875 *
3876 * This is followed by a series of firmware fragments which are
3877 * individually identical to previous firmware. i.e. they have the
3878 * firmware header and followed by data for that fragment. The version
3879 * field of the individual fragment header is unused.
3880 */
3881
3882 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3883 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3884 return;
3885
3886 if (tg3_rxcpu_pause(tp))
3887 return;
3888
3889 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3890 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3891
3892 tg3_rxcpu_resume(tp);
3893 }
3894
3895 /* tp->lock is held. */
tg3_load_tso_firmware(struct tg3 * tp)3896 static int tg3_load_tso_firmware(struct tg3 *tp)
3897 {
3898 const struct tg3_firmware_hdr *fw_hdr;
3899 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3900 int err;
3901
3902 if (!tg3_flag(tp, FW_TSO))
3903 return 0;
3904
3905 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3906
3907 /* Firmware blob starts with version numbers, followed by
3908 start address and length. We are setting complete length.
3909 length = end_address_of_bss - start_address_of_text.
3910 Remainder is the blob to be loaded contiguously
3911 from start address. */
3912
3913 cpu_scratch_size = tp->fw_len;
3914
3915 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3916 cpu_base = RX_CPU_BASE;
3917 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3918 } else {
3919 cpu_base = TX_CPU_BASE;
3920 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3921 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3922 }
3923
3924 err = tg3_load_firmware_cpu(tp, cpu_base,
3925 cpu_scratch_base, cpu_scratch_size,
3926 fw_hdr);
3927 if (err)
3928 return err;
3929
3930 /* Now startup the cpu. */
3931 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3932 be32_to_cpu(fw_hdr->base_addr));
3933 if (err) {
3934 netdev_err(tp->dev,
3935 "%s fails to set CPU PC, is %08x should be %08x\n",
3936 __func__, tr32(cpu_base + CPU_PC),
3937 be32_to_cpu(fw_hdr->base_addr));
3938 return -ENODEV;
3939 }
3940
3941 tg3_resume_cpu(tp, cpu_base);
3942 return 0;
3943 }
3944
3945 /* tp->lock is held. */
__tg3_set_one_mac_addr(struct tg3 * tp,const u8 * mac_addr,int index)3946 static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr,
3947 int index)
3948 {
3949 u32 addr_high, addr_low;
3950
3951 addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3952 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3953 (mac_addr[4] << 8) | mac_addr[5]);
3954
3955 if (index < 4) {
3956 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3957 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3958 } else {
3959 index -= 4;
3960 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3961 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3962 }
3963 }
3964
3965 /* tp->lock is held. */
__tg3_set_mac_addr(struct tg3 * tp,bool skip_mac_1)3966 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3967 {
3968 u32 addr_high;
3969 int i;
3970
3971 for (i = 0; i < 4; i++) {
3972 if (i == 1 && skip_mac_1)
3973 continue;
3974 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3975 }
3976
3977 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3978 tg3_asic_rev(tp) == ASIC_REV_5704) {
3979 for (i = 4; i < 16; i++)
3980 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3981 }
3982
3983 addr_high = (tp->dev->dev_addr[0] +
3984 tp->dev->dev_addr[1] +
3985 tp->dev->dev_addr[2] +
3986 tp->dev->dev_addr[3] +
3987 tp->dev->dev_addr[4] +
3988 tp->dev->dev_addr[5]) &
3989 TX_BACKOFF_SEED_MASK;
3990 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3991 }
3992
tg3_enable_register_access(struct tg3 * tp)3993 static void tg3_enable_register_access(struct tg3 *tp)
3994 {
3995 /*
3996 * Make sure register accesses (indirect or otherwise) will function
3997 * correctly.
3998 */
3999 pci_write_config_dword(tp->pdev,
4000 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4001 }
4002
tg3_power_up(struct tg3 * tp)4003 static int tg3_power_up(struct tg3 *tp)
4004 {
4005 int err;
4006
4007 tg3_enable_register_access(tp);
4008
4009 err = pci_set_power_state(tp->pdev, PCI_D0);
4010 if (!err) {
4011 /* Switch out of Vaux if it is a NIC */
4012 tg3_pwrsrc_switch_to_vmain(tp);
4013 } else {
4014 netdev_err(tp->dev, "Transition to D0 failed\n");
4015 }
4016
4017 return err;
4018 }
4019
4020 static int tg3_setup_phy(struct tg3 *, bool);
4021
tg3_power_down_prepare(struct tg3 * tp)4022 static int tg3_power_down_prepare(struct tg3 *tp)
4023 {
4024 u32 misc_host_ctrl;
4025 bool device_should_wake, do_low_power;
4026
4027 tg3_enable_register_access(tp);
4028
4029 /* Restore the CLKREQ setting. */
4030 if (tg3_flag(tp, CLKREQ_BUG))
4031 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4032 PCI_EXP_LNKCTL_CLKREQ_EN);
4033
4034 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4035 tw32(TG3PCI_MISC_HOST_CTRL,
4036 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4037
4038 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4039 tg3_flag(tp, WOL_ENABLE);
4040
4041 if (tg3_flag(tp, USE_PHYLIB)) {
4042 do_low_power = false;
4043 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4044 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4045 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4046 struct phy_device *phydev;
4047 u32 phyid;
4048
4049 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4050
4051 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4052
4053 tp->link_config.speed = phydev->speed;
4054 tp->link_config.duplex = phydev->duplex;
4055 tp->link_config.autoneg = phydev->autoneg;
4056 ethtool_convert_link_mode_to_legacy_u32(
4057 &tp->link_config.advertising,
4058 phydev->advertising);
4059
4060 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4061 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4062 advertising);
4063 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4064 advertising);
4065 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4066 advertising);
4067
4068 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4069 if (tg3_flag(tp, WOL_SPEED_100MB)) {
4070 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4071 advertising);
4072 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4073 advertising);
4074 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4075 advertising);
4076 } else {
4077 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4078 advertising);
4079 }
4080 }
4081
4082 linkmode_copy(phydev->advertising, advertising);
4083 phy_start_aneg(phydev);
4084
4085 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4086 if (phyid != PHY_ID_BCMAC131) {
4087 phyid &= PHY_BCM_OUI_MASK;
4088 if (phyid == PHY_BCM_OUI_1 ||
4089 phyid == PHY_BCM_OUI_2 ||
4090 phyid == PHY_BCM_OUI_3)
4091 do_low_power = true;
4092 }
4093 }
4094 } else {
4095 do_low_power = true;
4096
4097 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4098 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4099
4100 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4101 tg3_setup_phy(tp, false);
4102 }
4103
4104 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4105 u32 val;
4106
4107 val = tr32(GRC_VCPU_EXT_CTRL);
4108 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4109 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4110 int i;
4111 u32 val;
4112
4113 for (i = 0; i < 200; i++) {
4114 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4115 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4116 break;
4117 msleep(1);
4118 }
4119 }
4120 if (tg3_flag(tp, WOL_CAP))
4121 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4122 WOL_DRV_STATE_SHUTDOWN |
4123 WOL_DRV_WOL |
4124 WOL_SET_MAGIC_PKT);
4125
4126 if (device_should_wake) {
4127 u32 mac_mode;
4128
4129 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4130 if (do_low_power &&
4131 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4132 tg3_phy_auxctl_write(tp,
4133 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4134 MII_TG3_AUXCTL_PCTL_WOL_EN |
4135 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4136 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4137 udelay(40);
4138 }
4139
4140 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4141 mac_mode = MAC_MODE_PORT_MODE_GMII;
4142 else if (tp->phy_flags &
4143 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4144 if (tp->link_config.active_speed == SPEED_1000)
4145 mac_mode = MAC_MODE_PORT_MODE_GMII;
4146 else
4147 mac_mode = MAC_MODE_PORT_MODE_MII;
4148 } else
4149 mac_mode = MAC_MODE_PORT_MODE_MII;
4150
4151 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4152 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4153 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4154 SPEED_100 : SPEED_10;
4155 if (tg3_5700_link_polarity(tp, speed))
4156 mac_mode |= MAC_MODE_LINK_POLARITY;
4157 else
4158 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4159 }
4160 } else {
4161 mac_mode = MAC_MODE_PORT_MODE_TBI;
4162 }
4163
4164 if (!tg3_flag(tp, 5750_PLUS))
4165 tw32(MAC_LED_CTRL, tp->led_ctrl);
4166
4167 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4168 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4169 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4170 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4171
4172 if (tg3_flag(tp, ENABLE_APE))
4173 mac_mode |= MAC_MODE_APE_TX_EN |
4174 MAC_MODE_APE_RX_EN |
4175 MAC_MODE_TDE_ENABLE;
4176
4177 tw32_f(MAC_MODE, mac_mode);
4178 udelay(100);
4179
4180 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4181 udelay(10);
4182 }
4183
4184 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4185 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4186 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4187 u32 base_val;
4188
4189 base_val = tp->pci_clock_ctrl;
4190 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4191 CLOCK_CTRL_TXCLK_DISABLE);
4192
4193 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4194 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4195 } else if (tg3_flag(tp, 5780_CLASS) ||
4196 tg3_flag(tp, CPMU_PRESENT) ||
4197 tg3_asic_rev(tp) == ASIC_REV_5906) {
4198 /* do nothing */
4199 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4200 u32 newbits1, newbits2;
4201
4202 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4203 tg3_asic_rev(tp) == ASIC_REV_5701) {
4204 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4205 CLOCK_CTRL_TXCLK_DISABLE |
4206 CLOCK_CTRL_ALTCLK);
4207 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4208 } else if (tg3_flag(tp, 5705_PLUS)) {
4209 newbits1 = CLOCK_CTRL_625_CORE;
4210 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4211 } else {
4212 newbits1 = CLOCK_CTRL_ALTCLK;
4213 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4214 }
4215
4216 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4217 40);
4218
4219 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4220 40);
4221
4222 if (!tg3_flag(tp, 5705_PLUS)) {
4223 u32 newbits3;
4224
4225 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4226 tg3_asic_rev(tp) == ASIC_REV_5701) {
4227 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4228 CLOCK_CTRL_TXCLK_DISABLE |
4229 CLOCK_CTRL_44MHZ_CORE);
4230 } else {
4231 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4232 }
4233
4234 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4235 tp->pci_clock_ctrl | newbits3, 40);
4236 }
4237 }
4238
4239 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4240 tg3_power_down_phy(tp, do_low_power);
4241
4242 tg3_frob_aux_power(tp, true);
4243
4244 /* Workaround for unstable PLL clock */
4245 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4246 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4247 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4248 u32 val = tr32(0x7d00);
4249
4250 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4251 tw32(0x7d00, val);
4252 if (!tg3_flag(tp, ENABLE_ASF)) {
4253 int err;
4254
4255 err = tg3_nvram_lock(tp);
4256 tg3_halt_cpu(tp, RX_CPU_BASE);
4257 if (!err)
4258 tg3_nvram_unlock(tp);
4259 }
4260 }
4261
4262 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4263
4264 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4265
4266 return 0;
4267 }
4268
tg3_power_down(struct tg3 * tp)4269 static void tg3_power_down(struct tg3 *tp)
4270 {
4271 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4272 pci_set_power_state(tp->pdev, PCI_D3hot);
4273 }
4274
tg3_aux_stat_to_speed_duplex(struct tg3 * tp,u32 val,u32 * speed,u8 * duplex)4275 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4276 {
4277 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4278 case MII_TG3_AUX_STAT_10HALF:
4279 *speed = SPEED_10;
4280 *duplex = DUPLEX_HALF;
4281 break;
4282
4283 case MII_TG3_AUX_STAT_10FULL:
4284 *speed = SPEED_10;
4285 *duplex = DUPLEX_FULL;
4286 break;
4287
4288 case MII_TG3_AUX_STAT_100HALF:
4289 *speed = SPEED_100;
4290 *duplex = DUPLEX_HALF;
4291 break;
4292
4293 case MII_TG3_AUX_STAT_100FULL:
4294 *speed = SPEED_100;
4295 *duplex = DUPLEX_FULL;
4296 break;
4297
4298 case MII_TG3_AUX_STAT_1000HALF:
4299 *speed = SPEED_1000;
4300 *duplex = DUPLEX_HALF;
4301 break;
4302
4303 case MII_TG3_AUX_STAT_1000FULL:
4304 *speed = SPEED_1000;
4305 *duplex = DUPLEX_FULL;
4306 break;
4307
4308 default:
4309 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4310 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4311 SPEED_10;
4312 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4313 DUPLEX_HALF;
4314 break;
4315 }
4316 *speed = SPEED_UNKNOWN;
4317 *duplex = DUPLEX_UNKNOWN;
4318 break;
4319 }
4320 }
4321
tg3_phy_autoneg_cfg(struct tg3 * tp,u32 advertise,u32 flowctrl)4322 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4323 {
4324 int err = 0;
4325 u32 val, new_adv;
4326
4327 new_adv = ADVERTISE_CSMA;
4328 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4329 new_adv |= mii_advertise_flowctrl(flowctrl);
4330
4331 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4332 if (err)
4333 goto done;
4334
4335 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4336 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4337
4338 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4339 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4340 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4341
4342 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4343 if (err)
4344 goto done;
4345 }
4346
4347 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4348 goto done;
4349
4350 tw32(TG3_CPMU_EEE_MODE,
4351 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4352
4353 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4354 if (!err) {
4355 u32 err2;
4356
4357 val = 0;
4358 /* Advertise 100-BaseTX EEE ability */
4359 if (advertise & ADVERTISED_100baseT_Full)
4360 val |= MDIO_AN_EEE_ADV_100TX;
4361 /* Advertise 1000-BaseT EEE ability */
4362 if (advertise & ADVERTISED_1000baseT_Full)
4363 val |= MDIO_AN_EEE_ADV_1000T;
4364
4365 if (!tp->eee.eee_enabled) {
4366 val = 0;
4367 tp->eee.advertised = 0;
4368 } else {
4369 tp->eee.advertised = advertise &
4370 (ADVERTISED_100baseT_Full |
4371 ADVERTISED_1000baseT_Full);
4372 }
4373
4374 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4375 if (err)
4376 val = 0;
4377
4378 switch (tg3_asic_rev(tp)) {
4379 case ASIC_REV_5717:
4380 case ASIC_REV_57765:
4381 case ASIC_REV_57766:
4382 case ASIC_REV_5719:
4383 /* If we advertised any eee advertisements above... */
4384 if (val)
4385 val = MII_TG3_DSP_TAP26_ALNOKO |
4386 MII_TG3_DSP_TAP26_RMRXSTO |
4387 MII_TG3_DSP_TAP26_OPCSINPT;
4388 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4389 fallthrough;
4390 case ASIC_REV_5720:
4391 case ASIC_REV_5762:
4392 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4393 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4394 MII_TG3_DSP_CH34TP2_HIBW01);
4395 }
4396
4397 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4398 if (!err)
4399 err = err2;
4400 }
4401
4402 done:
4403 return err;
4404 }
4405
tg3_phy_copper_begin(struct tg3 * tp)4406 static void tg3_phy_copper_begin(struct tg3 *tp)
4407 {
4408 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4409 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4410 u32 adv, fc;
4411
4412 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4413 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4414 adv = ADVERTISED_10baseT_Half |
4415 ADVERTISED_10baseT_Full;
4416 if (tg3_flag(tp, WOL_SPEED_100MB))
4417 adv |= ADVERTISED_100baseT_Half |
4418 ADVERTISED_100baseT_Full;
4419 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4420 if (!(tp->phy_flags &
4421 TG3_PHYFLG_DISABLE_1G_HD_ADV))
4422 adv |= ADVERTISED_1000baseT_Half;
4423 adv |= ADVERTISED_1000baseT_Full;
4424 }
4425
4426 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4427 } else {
4428 adv = tp->link_config.advertising;
4429 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4430 adv &= ~(ADVERTISED_1000baseT_Half |
4431 ADVERTISED_1000baseT_Full);
4432
4433 fc = tp->link_config.flowctrl;
4434 }
4435
4436 tg3_phy_autoneg_cfg(tp, adv, fc);
4437
4438 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4439 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4440 /* Normally during power down we want to autonegotiate
4441 * the lowest possible speed for WOL. However, to avoid
4442 * link flap, we leave it untouched.
4443 */
4444 return;
4445 }
4446
4447 tg3_writephy(tp, MII_BMCR,
4448 BMCR_ANENABLE | BMCR_ANRESTART);
4449 } else {
4450 int i;
4451 u32 bmcr, orig_bmcr;
4452
4453 tp->link_config.active_speed = tp->link_config.speed;
4454 tp->link_config.active_duplex = tp->link_config.duplex;
4455
4456 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4457 /* With autoneg disabled, 5715 only links up when the
4458 * advertisement register has the configured speed
4459 * enabled.
4460 */
4461 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4462 }
4463
4464 bmcr = 0;
4465 switch (tp->link_config.speed) {
4466 default:
4467 case SPEED_10:
4468 break;
4469
4470 case SPEED_100:
4471 bmcr |= BMCR_SPEED100;
4472 break;
4473
4474 case SPEED_1000:
4475 bmcr |= BMCR_SPEED1000;
4476 break;
4477 }
4478
4479 if (tp->link_config.duplex == DUPLEX_FULL)
4480 bmcr |= BMCR_FULLDPLX;
4481
4482 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4483 (bmcr != orig_bmcr)) {
4484 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4485 for (i = 0; i < 1500; i++) {
4486 u32 tmp;
4487
4488 udelay(10);
4489 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4490 tg3_readphy(tp, MII_BMSR, &tmp))
4491 continue;
4492 if (!(tmp & BMSR_LSTATUS)) {
4493 udelay(40);
4494 break;
4495 }
4496 }
4497 tg3_writephy(tp, MII_BMCR, bmcr);
4498 udelay(40);
4499 }
4500 }
4501 }
4502
tg3_phy_pull_config(struct tg3 * tp)4503 static int tg3_phy_pull_config(struct tg3 *tp)
4504 {
4505 int err;
4506 u32 val;
4507
4508 err = tg3_readphy(tp, MII_BMCR, &val);
4509 if (err)
4510 goto done;
4511
4512 if (!(val & BMCR_ANENABLE)) {
4513 tp->link_config.autoneg = AUTONEG_DISABLE;
4514 tp->link_config.advertising = 0;
4515 tg3_flag_clear(tp, PAUSE_AUTONEG);
4516
4517 err = -EIO;
4518
4519 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4520 case 0:
4521 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4522 goto done;
4523
4524 tp->link_config.speed = SPEED_10;
4525 break;
4526 case BMCR_SPEED100:
4527 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4528 goto done;
4529
4530 tp->link_config.speed = SPEED_100;
4531 break;
4532 case BMCR_SPEED1000:
4533 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4534 tp->link_config.speed = SPEED_1000;
4535 break;
4536 }
4537 fallthrough;
4538 default:
4539 goto done;
4540 }
4541
4542 if (val & BMCR_FULLDPLX)
4543 tp->link_config.duplex = DUPLEX_FULL;
4544 else
4545 tp->link_config.duplex = DUPLEX_HALF;
4546
4547 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4548
4549 err = 0;
4550 goto done;
4551 }
4552
4553 tp->link_config.autoneg = AUTONEG_ENABLE;
4554 tp->link_config.advertising = ADVERTISED_Autoneg;
4555 tg3_flag_set(tp, PAUSE_AUTONEG);
4556
4557 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4558 u32 adv;
4559
4560 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4561 if (err)
4562 goto done;
4563
4564 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4565 tp->link_config.advertising |= adv | ADVERTISED_TP;
4566
4567 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4568 } else {
4569 tp->link_config.advertising |= ADVERTISED_FIBRE;
4570 }
4571
4572 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4573 u32 adv;
4574
4575 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4576 err = tg3_readphy(tp, MII_CTRL1000, &val);
4577 if (err)
4578 goto done;
4579
4580 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4581 } else {
4582 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4583 if (err)
4584 goto done;
4585
4586 adv = tg3_decode_flowctrl_1000X(val);
4587 tp->link_config.flowctrl = adv;
4588
4589 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4590 adv = mii_adv_to_ethtool_adv_x(val);
4591 }
4592
4593 tp->link_config.advertising |= adv;
4594 }
4595
4596 done:
4597 return err;
4598 }
4599
tg3_init_5401phy_dsp(struct tg3 * tp)4600 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4601 {
4602 int err;
4603
4604 /* Turn off tap power management. */
4605 /* Set Extended packet length bit */
4606 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4607
4608 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4609 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4610 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4611 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4612 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4613
4614 udelay(40);
4615
4616 return err;
4617 }
4618
tg3_phy_eee_config_ok(struct tg3 * tp)4619 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4620 {
4621 struct ethtool_eee eee;
4622
4623 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4624 return true;
4625
4626 tg3_eee_pull_config(tp, &eee);
4627
4628 if (tp->eee.eee_enabled) {
4629 if (tp->eee.advertised != eee.advertised ||
4630 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4631 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4632 return false;
4633 } else {
4634 /* EEE is disabled but we're advertising */
4635 if (eee.advertised)
4636 return false;
4637 }
4638
4639 return true;
4640 }
4641
tg3_phy_copper_an_config_ok(struct tg3 * tp,u32 * lcladv)4642 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4643 {
4644 u32 advmsk, tgtadv, advertising;
4645
4646 advertising = tp->link_config.advertising;
4647 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4648
4649 advmsk = ADVERTISE_ALL;
4650 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4651 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4652 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4653 }
4654
4655 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4656 return false;
4657
4658 if ((*lcladv & advmsk) != tgtadv)
4659 return false;
4660
4661 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4662 u32 tg3_ctrl;
4663
4664 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4665
4666 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4667 return false;
4668
4669 if (tgtadv &&
4670 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4671 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4672 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4673 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4674 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4675 } else {
4676 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4677 }
4678
4679 if (tg3_ctrl != tgtadv)
4680 return false;
4681 }
4682
4683 return true;
4684 }
4685
tg3_phy_copper_fetch_rmtadv(struct tg3 * tp,u32 * rmtadv)4686 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4687 {
4688 u32 lpeth = 0;
4689
4690 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4691 u32 val;
4692
4693 if (tg3_readphy(tp, MII_STAT1000, &val))
4694 return false;
4695
4696 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4697 }
4698
4699 if (tg3_readphy(tp, MII_LPA, rmtadv))
4700 return false;
4701
4702 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4703 tp->link_config.rmt_adv = lpeth;
4704
4705 return true;
4706 }
4707
tg3_test_and_report_link_chg(struct tg3 * tp,bool curr_link_up)4708 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4709 {
4710 if (curr_link_up != tp->link_up) {
4711 if (curr_link_up) {
4712 netif_carrier_on(tp->dev);
4713 } else {
4714 netif_carrier_off(tp->dev);
4715 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4716 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4717 }
4718
4719 tg3_link_report(tp);
4720 return true;
4721 }
4722
4723 return false;
4724 }
4725
tg3_clear_mac_status(struct tg3 * tp)4726 static void tg3_clear_mac_status(struct tg3 *tp)
4727 {
4728 tw32(MAC_EVENT, 0);
4729
4730 tw32_f(MAC_STATUS,
4731 MAC_STATUS_SYNC_CHANGED |
4732 MAC_STATUS_CFG_CHANGED |
4733 MAC_STATUS_MI_COMPLETION |
4734 MAC_STATUS_LNKSTATE_CHANGED);
4735 udelay(40);
4736 }
4737
tg3_setup_eee(struct tg3 * tp)4738 static void tg3_setup_eee(struct tg3 *tp)
4739 {
4740 u32 val;
4741
4742 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4743 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4744 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4745 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4746
4747 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4748
4749 tw32_f(TG3_CPMU_EEE_CTRL,
4750 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4751
4752 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4753 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4754 TG3_CPMU_EEEMD_LPI_IN_RX |
4755 TG3_CPMU_EEEMD_EEE_ENABLE;
4756
4757 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4758 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4759
4760 if (tg3_flag(tp, ENABLE_APE))
4761 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4762
4763 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4764
4765 tw32_f(TG3_CPMU_EEE_DBTMR1,
4766 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4767 (tp->eee.tx_lpi_timer & 0xffff));
4768
4769 tw32_f(TG3_CPMU_EEE_DBTMR2,
4770 TG3_CPMU_DBTMR2_APE_TX_2047US |
4771 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4772 }
4773
tg3_setup_copper_phy(struct tg3 * tp,bool force_reset)4774 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4775 {
4776 bool current_link_up;
4777 u32 bmsr, val;
4778 u32 lcl_adv, rmt_adv;
4779 u32 current_speed;
4780 u8 current_duplex;
4781 int i, err;
4782
4783 tg3_clear_mac_status(tp);
4784
4785 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4786 tw32_f(MAC_MI_MODE,
4787 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4788 udelay(80);
4789 }
4790
4791 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4792
4793 /* Some third-party PHYs need to be reset on link going
4794 * down.
4795 */
4796 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4797 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4798 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4799 tp->link_up) {
4800 tg3_readphy(tp, MII_BMSR, &bmsr);
4801 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4802 !(bmsr & BMSR_LSTATUS))
4803 force_reset = true;
4804 }
4805 if (force_reset)
4806 tg3_phy_reset(tp);
4807
4808 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4809 tg3_readphy(tp, MII_BMSR, &bmsr);
4810 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4811 !tg3_flag(tp, INIT_COMPLETE))
4812 bmsr = 0;
4813
4814 if (!(bmsr & BMSR_LSTATUS)) {
4815 err = tg3_init_5401phy_dsp(tp);
4816 if (err)
4817 return err;
4818
4819 tg3_readphy(tp, MII_BMSR, &bmsr);
4820 for (i = 0; i < 1000; i++) {
4821 udelay(10);
4822 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4823 (bmsr & BMSR_LSTATUS)) {
4824 udelay(40);
4825 break;
4826 }
4827 }
4828
4829 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4830 TG3_PHY_REV_BCM5401_B0 &&
4831 !(bmsr & BMSR_LSTATUS) &&
4832 tp->link_config.active_speed == SPEED_1000) {
4833 err = tg3_phy_reset(tp);
4834 if (!err)
4835 err = tg3_init_5401phy_dsp(tp);
4836 if (err)
4837 return err;
4838 }
4839 }
4840 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4841 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4842 /* 5701 {A0,B0} CRC bug workaround */
4843 tg3_writephy(tp, 0x15, 0x0a75);
4844 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4845 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4846 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4847 }
4848
4849 /* Clear pending interrupts... */
4850 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4851 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4852
4853 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4854 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4855 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4856 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4857
4858 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4859 tg3_asic_rev(tp) == ASIC_REV_5701) {
4860 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4861 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4862 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4863 else
4864 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4865 }
4866
4867 current_link_up = false;
4868 current_speed = SPEED_UNKNOWN;
4869 current_duplex = DUPLEX_UNKNOWN;
4870 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4871 tp->link_config.rmt_adv = 0;
4872
4873 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4874 err = tg3_phy_auxctl_read(tp,
4875 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4876 &val);
4877 if (!err && !(val & (1 << 10))) {
4878 tg3_phy_auxctl_write(tp,
4879 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4880 val | (1 << 10));
4881 goto relink;
4882 }
4883 }
4884
4885 bmsr = 0;
4886 for (i = 0; i < 100; i++) {
4887 tg3_readphy(tp, MII_BMSR, &bmsr);
4888 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4889 (bmsr & BMSR_LSTATUS))
4890 break;
4891 udelay(40);
4892 }
4893
4894 if (bmsr & BMSR_LSTATUS) {
4895 u32 aux_stat, bmcr;
4896
4897 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4898 for (i = 0; i < 2000; i++) {
4899 udelay(10);
4900 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4901 aux_stat)
4902 break;
4903 }
4904
4905 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4906 ¤t_speed,
4907 ¤t_duplex);
4908
4909 bmcr = 0;
4910 for (i = 0; i < 200; i++) {
4911 tg3_readphy(tp, MII_BMCR, &bmcr);
4912 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4913 continue;
4914 if (bmcr && bmcr != 0x7fff)
4915 break;
4916 udelay(10);
4917 }
4918
4919 lcl_adv = 0;
4920 rmt_adv = 0;
4921
4922 tp->link_config.active_speed = current_speed;
4923 tp->link_config.active_duplex = current_duplex;
4924
4925 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4926 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4927
4928 if ((bmcr & BMCR_ANENABLE) &&
4929 eee_config_ok &&
4930 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4931 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4932 current_link_up = true;
4933
4934 /* EEE settings changes take effect only after a phy
4935 * reset. If we have skipped a reset due to Link Flap
4936 * Avoidance being enabled, do it now.
4937 */
4938 if (!eee_config_ok &&
4939 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4940 !force_reset) {
4941 tg3_setup_eee(tp);
4942 tg3_phy_reset(tp);
4943 }
4944 } else {
4945 if (!(bmcr & BMCR_ANENABLE) &&
4946 tp->link_config.speed == current_speed &&
4947 tp->link_config.duplex == current_duplex) {
4948 current_link_up = true;
4949 }
4950 }
4951
4952 if (current_link_up &&
4953 tp->link_config.active_duplex == DUPLEX_FULL) {
4954 u32 reg, bit;
4955
4956 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4957 reg = MII_TG3_FET_GEN_STAT;
4958 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4959 } else {
4960 reg = MII_TG3_EXT_STAT;
4961 bit = MII_TG3_EXT_STAT_MDIX;
4962 }
4963
4964 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4965 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4966
4967 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4968 }
4969 }
4970
4971 relink:
4972 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4973 tg3_phy_copper_begin(tp);
4974
4975 if (tg3_flag(tp, ROBOSWITCH)) {
4976 current_link_up = true;
4977 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4978 current_speed = SPEED_1000;
4979 current_duplex = DUPLEX_FULL;
4980 tp->link_config.active_speed = current_speed;
4981 tp->link_config.active_duplex = current_duplex;
4982 }
4983
4984 tg3_readphy(tp, MII_BMSR, &bmsr);
4985 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4986 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4987 current_link_up = true;
4988 }
4989
4990 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4991 if (current_link_up) {
4992 if (tp->link_config.active_speed == SPEED_100 ||
4993 tp->link_config.active_speed == SPEED_10)
4994 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4995 else
4996 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4997 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4998 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4999 else
5000 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5001
5002 /* In order for the 5750 core in BCM4785 chip to work properly
5003 * in RGMII mode, the Led Control Register must be set up.
5004 */
5005 if (tg3_flag(tp, RGMII_MODE)) {
5006 u32 led_ctrl = tr32(MAC_LED_CTRL);
5007 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5008
5009 if (tp->link_config.active_speed == SPEED_10)
5010 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5011 else if (tp->link_config.active_speed == SPEED_100)
5012 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5013 LED_CTRL_100MBPS_ON);
5014 else if (tp->link_config.active_speed == SPEED_1000)
5015 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5016 LED_CTRL_1000MBPS_ON);
5017
5018 tw32(MAC_LED_CTRL, led_ctrl);
5019 udelay(40);
5020 }
5021
5022 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5023 if (tp->link_config.active_duplex == DUPLEX_HALF)
5024 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5025
5026 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5027 if (current_link_up &&
5028 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5029 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5030 else
5031 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5032 }
5033
5034 /* ??? Without this setting Netgear GA302T PHY does not
5035 * ??? send/receive packets...
5036 */
5037 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5038 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5039 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5040 tw32_f(MAC_MI_MODE, tp->mi_mode);
5041 udelay(80);
5042 }
5043
5044 tw32_f(MAC_MODE, tp->mac_mode);
5045 udelay(40);
5046
5047 tg3_phy_eee_adjust(tp, current_link_up);
5048
5049 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5050 /* Polled via timer. */
5051 tw32_f(MAC_EVENT, 0);
5052 } else {
5053 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5054 }
5055 udelay(40);
5056
5057 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5058 current_link_up &&
5059 tp->link_config.active_speed == SPEED_1000 &&
5060 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5061 udelay(120);
5062 tw32_f(MAC_STATUS,
5063 (MAC_STATUS_SYNC_CHANGED |
5064 MAC_STATUS_CFG_CHANGED));
5065 udelay(40);
5066 tg3_write_mem(tp,
5067 NIC_SRAM_FIRMWARE_MBOX,
5068 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5069 }
5070
5071 /* Prevent send BD corruption. */
5072 if (tg3_flag(tp, CLKREQ_BUG)) {
5073 if (tp->link_config.active_speed == SPEED_100 ||
5074 tp->link_config.active_speed == SPEED_10)
5075 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5076 PCI_EXP_LNKCTL_CLKREQ_EN);
5077 else
5078 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5079 PCI_EXP_LNKCTL_CLKREQ_EN);
5080 }
5081
5082 tg3_test_and_report_link_chg(tp, current_link_up);
5083
5084 return 0;
5085 }
5086
5087 struct tg3_fiber_aneginfo {
5088 int state;
5089 #define ANEG_STATE_UNKNOWN 0
5090 #define ANEG_STATE_AN_ENABLE 1
5091 #define ANEG_STATE_RESTART_INIT 2
5092 #define ANEG_STATE_RESTART 3
5093 #define ANEG_STATE_DISABLE_LINK_OK 4
5094 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5095 #define ANEG_STATE_ABILITY_DETECT 6
5096 #define ANEG_STATE_ACK_DETECT_INIT 7
5097 #define ANEG_STATE_ACK_DETECT 8
5098 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5099 #define ANEG_STATE_COMPLETE_ACK 10
5100 #define ANEG_STATE_IDLE_DETECT_INIT 11
5101 #define ANEG_STATE_IDLE_DETECT 12
5102 #define ANEG_STATE_LINK_OK 13
5103 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5104 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5105
5106 u32 flags;
5107 #define MR_AN_ENABLE 0x00000001
5108 #define MR_RESTART_AN 0x00000002
5109 #define MR_AN_COMPLETE 0x00000004
5110 #define MR_PAGE_RX 0x00000008
5111 #define MR_NP_LOADED 0x00000010
5112 #define MR_TOGGLE_TX 0x00000020
5113 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5114 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5115 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5116 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5117 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5118 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5119 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5120 #define MR_TOGGLE_RX 0x00002000
5121 #define MR_NP_RX 0x00004000
5122
5123 #define MR_LINK_OK 0x80000000
5124
5125 unsigned long link_time, cur_time;
5126
5127 u32 ability_match_cfg;
5128 int ability_match_count;
5129
5130 char ability_match, idle_match, ack_match;
5131
5132 u32 txconfig, rxconfig;
5133 #define ANEG_CFG_NP 0x00000080
5134 #define ANEG_CFG_ACK 0x00000040
5135 #define ANEG_CFG_RF2 0x00000020
5136 #define ANEG_CFG_RF1 0x00000010
5137 #define ANEG_CFG_PS2 0x00000001
5138 #define ANEG_CFG_PS1 0x00008000
5139 #define ANEG_CFG_HD 0x00004000
5140 #define ANEG_CFG_FD 0x00002000
5141 #define ANEG_CFG_INVAL 0x00001f06
5142
5143 };
5144 #define ANEG_OK 0
5145 #define ANEG_DONE 1
5146 #define ANEG_TIMER_ENAB 2
5147 #define ANEG_FAILED -1
5148
5149 #define ANEG_STATE_SETTLE_TIME 10000
5150
tg3_fiber_aneg_smachine(struct tg3 * tp,struct tg3_fiber_aneginfo * ap)5151 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5152 struct tg3_fiber_aneginfo *ap)
5153 {
5154 u16 flowctrl;
5155 unsigned long delta;
5156 u32 rx_cfg_reg;
5157 int ret;
5158
5159 if (ap->state == ANEG_STATE_UNKNOWN) {
5160 ap->rxconfig = 0;
5161 ap->link_time = 0;
5162 ap->cur_time = 0;
5163 ap->ability_match_cfg = 0;
5164 ap->ability_match_count = 0;
5165 ap->ability_match = 0;
5166 ap->idle_match = 0;
5167 ap->ack_match = 0;
5168 }
5169 ap->cur_time++;
5170
5171 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5172 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5173
5174 if (rx_cfg_reg != ap->ability_match_cfg) {
5175 ap->ability_match_cfg = rx_cfg_reg;
5176 ap->ability_match = 0;
5177 ap->ability_match_count = 0;
5178 } else {
5179 if (++ap->ability_match_count > 1) {
5180 ap->ability_match = 1;
5181 ap->ability_match_cfg = rx_cfg_reg;
5182 }
5183 }
5184 if (rx_cfg_reg & ANEG_CFG_ACK)
5185 ap->ack_match = 1;
5186 else
5187 ap->ack_match = 0;
5188
5189 ap->idle_match = 0;
5190 } else {
5191 ap->idle_match = 1;
5192 ap->ability_match_cfg = 0;
5193 ap->ability_match_count = 0;
5194 ap->ability_match = 0;
5195 ap->ack_match = 0;
5196
5197 rx_cfg_reg = 0;
5198 }
5199
5200 ap->rxconfig = rx_cfg_reg;
5201 ret = ANEG_OK;
5202
5203 switch (ap->state) {
5204 case ANEG_STATE_UNKNOWN:
5205 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5206 ap->state = ANEG_STATE_AN_ENABLE;
5207
5208 fallthrough;
5209 case ANEG_STATE_AN_ENABLE:
5210 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5211 if (ap->flags & MR_AN_ENABLE) {
5212 ap->link_time = 0;
5213 ap->cur_time = 0;
5214 ap->ability_match_cfg = 0;
5215 ap->ability_match_count = 0;
5216 ap->ability_match = 0;
5217 ap->idle_match = 0;
5218 ap->ack_match = 0;
5219
5220 ap->state = ANEG_STATE_RESTART_INIT;
5221 } else {
5222 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5223 }
5224 break;
5225
5226 case ANEG_STATE_RESTART_INIT:
5227 ap->link_time = ap->cur_time;
5228 ap->flags &= ~(MR_NP_LOADED);
5229 ap->txconfig = 0;
5230 tw32(MAC_TX_AUTO_NEG, 0);
5231 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5232 tw32_f(MAC_MODE, tp->mac_mode);
5233 udelay(40);
5234
5235 ret = ANEG_TIMER_ENAB;
5236 ap->state = ANEG_STATE_RESTART;
5237
5238 fallthrough;
5239 case ANEG_STATE_RESTART:
5240 delta = ap->cur_time - ap->link_time;
5241 if (delta > ANEG_STATE_SETTLE_TIME)
5242 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5243 else
5244 ret = ANEG_TIMER_ENAB;
5245 break;
5246
5247 case ANEG_STATE_DISABLE_LINK_OK:
5248 ret = ANEG_DONE;
5249 break;
5250
5251 case ANEG_STATE_ABILITY_DETECT_INIT:
5252 ap->flags &= ~(MR_TOGGLE_TX);
5253 ap->txconfig = ANEG_CFG_FD;
5254 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5255 if (flowctrl & ADVERTISE_1000XPAUSE)
5256 ap->txconfig |= ANEG_CFG_PS1;
5257 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5258 ap->txconfig |= ANEG_CFG_PS2;
5259 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5260 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5261 tw32_f(MAC_MODE, tp->mac_mode);
5262 udelay(40);
5263
5264 ap->state = ANEG_STATE_ABILITY_DETECT;
5265 break;
5266
5267 case ANEG_STATE_ABILITY_DETECT:
5268 if (ap->ability_match != 0 && ap->rxconfig != 0)
5269 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5270 break;
5271
5272 case ANEG_STATE_ACK_DETECT_INIT:
5273 ap->txconfig |= ANEG_CFG_ACK;
5274 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5275 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5276 tw32_f(MAC_MODE, tp->mac_mode);
5277 udelay(40);
5278
5279 ap->state = ANEG_STATE_ACK_DETECT;
5280
5281 fallthrough;
5282 case ANEG_STATE_ACK_DETECT:
5283 if (ap->ack_match != 0) {
5284 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5285 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5286 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5287 } else {
5288 ap->state = ANEG_STATE_AN_ENABLE;
5289 }
5290 } else if (ap->ability_match != 0 &&
5291 ap->rxconfig == 0) {
5292 ap->state = ANEG_STATE_AN_ENABLE;
5293 }
5294 break;
5295
5296 case ANEG_STATE_COMPLETE_ACK_INIT:
5297 if (ap->rxconfig & ANEG_CFG_INVAL) {
5298 ret = ANEG_FAILED;
5299 break;
5300 }
5301 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5302 MR_LP_ADV_HALF_DUPLEX |
5303 MR_LP_ADV_SYM_PAUSE |
5304 MR_LP_ADV_ASYM_PAUSE |
5305 MR_LP_ADV_REMOTE_FAULT1 |
5306 MR_LP_ADV_REMOTE_FAULT2 |
5307 MR_LP_ADV_NEXT_PAGE |
5308 MR_TOGGLE_RX |
5309 MR_NP_RX);
5310 if (ap->rxconfig & ANEG_CFG_FD)
5311 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5312 if (ap->rxconfig & ANEG_CFG_HD)
5313 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5314 if (ap->rxconfig & ANEG_CFG_PS1)
5315 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5316 if (ap->rxconfig & ANEG_CFG_PS2)
5317 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5318 if (ap->rxconfig & ANEG_CFG_RF1)
5319 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5320 if (ap->rxconfig & ANEG_CFG_RF2)
5321 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5322 if (ap->rxconfig & ANEG_CFG_NP)
5323 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5324
5325 ap->link_time = ap->cur_time;
5326
5327 ap->flags ^= (MR_TOGGLE_TX);
5328 if (ap->rxconfig & 0x0008)
5329 ap->flags |= MR_TOGGLE_RX;
5330 if (ap->rxconfig & ANEG_CFG_NP)
5331 ap->flags |= MR_NP_RX;
5332 ap->flags |= MR_PAGE_RX;
5333
5334 ap->state = ANEG_STATE_COMPLETE_ACK;
5335 ret = ANEG_TIMER_ENAB;
5336 break;
5337
5338 case ANEG_STATE_COMPLETE_ACK:
5339 if (ap->ability_match != 0 &&
5340 ap->rxconfig == 0) {
5341 ap->state = ANEG_STATE_AN_ENABLE;
5342 break;
5343 }
5344 delta = ap->cur_time - ap->link_time;
5345 if (delta > ANEG_STATE_SETTLE_TIME) {
5346 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5347 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5348 } else {
5349 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5350 !(ap->flags & MR_NP_RX)) {
5351 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5352 } else {
5353 ret = ANEG_FAILED;
5354 }
5355 }
5356 }
5357 break;
5358
5359 case ANEG_STATE_IDLE_DETECT_INIT:
5360 ap->link_time = ap->cur_time;
5361 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5362 tw32_f(MAC_MODE, tp->mac_mode);
5363 udelay(40);
5364
5365 ap->state = ANEG_STATE_IDLE_DETECT;
5366 ret = ANEG_TIMER_ENAB;
5367 break;
5368
5369 case ANEG_STATE_IDLE_DETECT:
5370 if (ap->ability_match != 0 &&
5371 ap->rxconfig == 0) {
5372 ap->state = ANEG_STATE_AN_ENABLE;
5373 break;
5374 }
5375 delta = ap->cur_time - ap->link_time;
5376 if (delta > ANEG_STATE_SETTLE_TIME) {
5377 /* XXX another gem from the Broadcom driver :( */
5378 ap->state = ANEG_STATE_LINK_OK;
5379 }
5380 break;
5381
5382 case ANEG_STATE_LINK_OK:
5383 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5384 ret = ANEG_DONE;
5385 break;
5386
5387 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5388 /* ??? unimplemented */
5389 break;
5390
5391 case ANEG_STATE_NEXT_PAGE_WAIT:
5392 /* ??? unimplemented */
5393 break;
5394
5395 default:
5396 ret = ANEG_FAILED;
5397 break;
5398 }
5399
5400 return ret;
5401 }
5402
fiber_autoneg(struct tg3 * tp,u32 * txflags,u32 * rxflags)5403 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5404 {
5405 int res = 0;
5406 struct tg3_fiber_aneginfo aninfo;
5407 int status = ANEG_FAILED;
5408 unsigned int tick;
5409 u32 tmp;
5410
5411 tw32_f(MAC_TX_AUTO_NEG, 0);
5412
5413 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5414 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5415 udelay(40);
5416
5417 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5418 udelay(40);
5419
5420 memset(&aninfo, 0, sizeof(aninfo));
5421 aninfo.flags |= MR_AN_ENABLE;
5422 aninfo.state = ANEG_STATE_UNKNOWN;
5423 aninfo.cur_time = 0;
5424 tick = 0;
5425 while (++tick < 195000) {
5426 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5427 if (status == ANEG_DONE || status == ANEG_FAILED)
5428 break;
5429
5430 udelay(1);
5431 }
5432
5433 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5434 tw32_f(MAC_MODE, tp->mac_mode);
5435 udelay(40);
5436
5437 *txflags = aninfo.txconfig;
5438 *rxflags = aninfo.flags;
5439
5440 if (status == ANEG_DONE &&
5441 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5442 MR_LP_ADV_FULL_DUPLEX)))
5443 res = 1;
5444
5445 return res;
5446 }
5447
tg3_init_bcm8002(struct tg3 * tp)5448 static void tg3_init_bcm8002(struct tg3 *tp)
5449 {
5450 u32 mac_status = tr32(MAC_STATUS);
5451 int i;
5452
5453 /* Reset when initting first time or we have a link. */
5454 if (tg3_flag(tp, INIT_COMPLETE) &&
5455 !(mac_status & MAC_STATUS_PCS_SYNCED))
5456 return;
5457
5458 /* Set PLL lock range. */
5459 tg3_writephy(tp, 0x16, 0x8007);
5460
5461 /* SW reset */
5462 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5463
5464 /* Wait for reset to complete. */
5465 /* XXX schedule_timeout() ... */
5466 for (i = 0; i < 500; i++)
5467 udelay(10);
5468
5469 /* Config mode; select PMA/Ch 1 regs. */
5470 tg3_writephy(tp, 0x10, 0x8411);
5471
5472 /* Enable auto-lock and comdet, select txclk for tx. */
5473 tg3_writephy(tp, 0x11, 0x0a10);
5474
5475 tg3_writephy(tp, 0x18, 0x00a0);
5476 tg3_writephy(tp, 0x16, 0x41ff);
5477
5478 /* Assert and deassert POR. */
5479 tg3_writephy(tp, 0x13, 0x0400);
5480 udelay(40);
5481 tg3_writephy(tp, 0x13, 0x0000);
5482
5483 tg3_writephy(tp, 0x11, 0x0a50);
5484 udelay(40);
5485 tg3_writephy(tp, 0x11, 0x0a10);
5486
5487 /* Wait for signal to stabilize */
5488 /* XXX schedule_timeout() ... */
5489 for (i = 0; i < 15000; i++)
5490 udelay(10);
5491
5492 /* Deselect the channel register so we can read the PHYID
5493 * later.
5494 */
5495 tg3_writephy(tp, 0x10, 0x8011);
5496 }
5497
tg3_setup_fiber_hw_autoneg(struct tg3 * tp,u32 mac_status)5498 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5499 {
5500 u16 flowctrl;
5501 bool current_link_up;
5502 u32 sg_dig_ctrl, sg_dig_status;
5503 u32 serdes_cfg, expected_sg_dig_ctrl;
5504 int workaround, port_a;
5505
5506 serdes_cfg = 0;
5507 workaround = 0;
5508 port_a = 1;
5509 current_link_up = false;
5510
5511 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5512 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5513 workaround = 1;
5514 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5515 port_a = 0;
5516
5517 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5518 /* preserve bits 20-23 for voltage regulator */
5519 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5520 }
5521
5522 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5523
5524 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5525 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5526 if (workaround) {
5527 u32 val = serdes_cfg;
5528
5529 if (port_a)
5530 val |= 0xc010000;
5531 else
5532 val |= 0x4010000;
5533 tw32_f(MAC_SERDES_CFG, val);
5534 }
5535
5536 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5537 }
5538 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5539 tg3_setup_flow_control(tp, 0, 0);
5540 current_link_up = true;
5541 }
5542 goto out;
5543 }
5544
5545 /* Want auto-negotiation. */
5546 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5547
5548 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5549 if (flowctrl & ADVERTISE_1000XPAUSE)
5550 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5551 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5552 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5553
5554 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5555 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5556 tp->serdes_counter &&
5557 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5558 MAC_STATUS_RCVD_CFG)) ==
5559 MAC_STATUS_PCS_SYNCED)) {
5560 tp->serdes_counter--;
5561 current_link_up = true;
5562 goto out;
5563 }
5564 restart_autoneg:
5565 if (workaround)
5566 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5567 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5568 udelay(5);
5569 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5570
5571 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5572 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5573 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5574 MAC_STATUS_SIGNAL_DET)) {
5575 sg_dig_status = tr32(SG_DIG_STATUS);
5576 mac_status = tr32(MAC_STATUS);
5577
5578 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5579 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5580 u32 local_adv = 0, remote_adv = 0;
5581
5582 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5583 local_adv |= ADVERTISE_1000XPAUSE;
5584 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5585 local_adv |= ADVERTISE_1000XPSE_ASYM;
5586
5587 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5588 remote_adv |= LPA_1000XPAUSE;
5589 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5590 remote_adv |= LPA_1000XPAUSE_ASYM;
5591
5592 tp->link_config.rmt_adv =
5593 mii_adv_to_ethtool_adv_x(remote_adv);
5594
5595 tg3_setup_flow_control(tp, local_adv, remote_adv);
5596 current_link_up = true;
5597 tp->serdes_counter = 0;
5598 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5599 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5600 if (tp->serdes_counter)
5601 tp->serdes_counter--;
5602 else {
5603 if (workaround) {
5604 u32 val = serdes_cfg;
5605
5606 if (port_a)
5607 val |= 0xc010000;
5608 else
5609 val |= 0x4010000;
5610
5611 tw32_f(MAC_SERDES_CFG, val);
5612 }
5613
5614 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5615 udelay(40);
5616
5617 /* Link parallel detection - link is up */
5618 /* only if we have PCS_SYNC and not */
5619 /* receiving config code words */
5620 mac_status = tr32(MAC_STATUS);
5621 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5622 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5623 tg3_setup_flow_control(tp, 0, 0);
5624 current_link_up = true;
5625 tp->phy_flags |=
5626 TG3_PHYFLG_PARALLEL_DETECT;
5627 tp->serdes_counter =
5628 SERDES_PARALLEL_DET_TIMEOUT;
5629 } else
5630 goto restart_autoneg;
5631 }
5632 }
5633 } else {
5634 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5635 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5636 }
5637
5638 out:
5639 return current_link_up;
5640 }
5641
tg3_setup_fiber_by_hand(struct tg3 * tp,u32 mac_status)5642 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5643 {
5644 bool current_link_up = false;
5645
5646 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5647 goto out;
5648
5649 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5650 u32 txflags, rxflags;
5651 int i;
5652
5653 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5654 u32 local_adv = 0, remote_adv = 0;
5655
5656 if (txflags & ANEG_CFG_PS1)
5657 local_adv |= ADVERTISE_1000XPAUSE;
5658 if (txflags & ANEG_CFG_PS2)
5659 local_adv |= ADVERTISE_1000XPSE_ASYM;
5660
5661 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5662 remote_adv |= LPA_1000XPAUSE;
5663 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5664 remote_adv |= LPA_1000XPAUSE_ASYM;
5665
5666 tp->link_config.rmt_adv =
5667 mii_adv_to_ethtool_adv_x(remote_adv);
5668
5669 tg3_setup_flow_control(tp, local_adv, remote_adv);
5670
5671 current_link_up = true;
5672 }
5673 for (i = 0; i < 30; i++) {
5674 udelay(20);
5675 tw32_f(MAC_STATUS,
5676 (MAC_STATUS_SYNC_CHANGED |
5677 MAC_STATUS_CFG_CHANGED));
5678 udelay(40);
5679 if ((tr32(MAC_STATUS) &
5680 (MAC_STATUS_SYNC_CHANGED |
5681 MAC_STATUS_CFG_CHANGED)) == 0)
5682 break;
5683 }
5684
5685 mac_status = tr32(MAC_STATUS);
5686 if (!current_link_up &&
5687 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5688 !(mac_status & MAC_STATUS_RCVD_CFG))
5689 current_link_up = true;
5690 } else {
5691 tg3_setup_flow_control(tp, 0, 0);
5692
5693 /* Forcing 1000FD link up. */
5694 current_link_up = true;
5695
5696 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5697 udelay(40);
5698
5699 tw32_f(MAC_MODE, tp->mac_mode);
5700 udelay(40);
5701 }
5702
5703 out:
5704 return current_link_up;
5705 }
5706
tg3_setup_fiber_phy(struct tg3 * tp,bool force_reset)5707 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5708 {
5709 u32 orig_pause_cfg;
5710 u32 orig_active_speed;
5711 u8 orig_active_duplex;
5712 u32 mac_status;
5713 bool current_link_up;
5714 int i;
5715
5716 orig_pause_cfg = tp->link_config.active_flowctrl;
5717 orig_active_speed = tp->link_config.active_speed;
5718 orig_active_duplex = tp->link_config.active_duplex;
5719
5720 if (!tg3_flag(tp, HW_AUTONEG) &&
5721 tp->link_up &&
5722 tg3_flag(tp, INIT_COMPLETE)) {
5723 mac_status = tr32(MAC_STATUS);
5724 mac_status &= (MAC_STATUS_PCS_SYNCED |
5725 MAC_STATUS_SIGNAL_DET |
5726 MAC_STATUS_CFG_CHANGED |
5727 MAC_STATUS_RCVD_CFG);
5728 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5729 MAC_STATUS_SIGNAL_DET)) {
5730 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5731 MAC_STATUS_CFG_CHANGED));
5732 return 0;
5733 }
5734 }
5735
5736 tw32_f(MAC_TX_AUTO_NEG, 0);
5737
5738 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5739 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5740 tw32_f(MAC_MODE, tp->mac_mode);
5741 udelay(40);
5742
5743 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5744 tg3_init_bcm8002(tp);
5745
5746 /* Enable link change event even when serdes polling. */
5747 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5748 udelay(40);
5749
5750 tp->link_config.rmt_adv = 0;
5751 mac_status = tr32(MAC_STATUS);
5752
5753 if (tg3_flag(tp, HW_AUTONEG))
5754 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5755 else
5756 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5757
5758 tp->napi[0].hw_status->status =
5759 (SD_STATUS_UPDATED |
5760 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5761
5762 for (i = 0; i < 100; i++) {
5763 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5764 MAC_STATUS_CFG_CHANGED));
5765 udelay(5);
5766 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5767 MAC_STATUS_CFG_CHANGED |
5768 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5769 break;
5770 }
5771
5772 mac_status = tr32(MAC_STATUS);
5773 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5774 current_link_up = false;
5775 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5776 tp->serdes_counter == 0) {
5777 tw32_f(MAC_MODE, (tp->mac_mode |
5778 MAC_MODE_SEND_CONFIGS));
5779 udelay(1);
5780 tw32_f(MAC_MODE, tp->mac_mode);
5781 }
5782 }
5783
5784 if (current_link_up) {
5785 tp->link_config.active_speed = SPEED_1000;
5786 tp->link_config.active_duplex = DUPLEX_FULL;
5787 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5788 LED_CTRL_LNKLED_OVERRIDE |
5789 LED_CTRL_1000MBPS_ON));
5790 } else {
5791 tp->link_config.active_speed = SPEED_UNKNOWN;
5792 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5793 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5794 LED_CTRL_LNKLED_OVERRIDE |
5795 LED_CTRL_TRAFFIC_OVERRIDE));
5796 }
5797
5798 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5799 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5800 if (orig_pause_cfg != now_pause_cfg ||
5801 orig_active_speed != tp->link_config.active_speed ||
5802 orig_active_duplex != tp->link_config.active_duplex)
5803 tg3_link_report(tp);
5804 }
5805
5806 return 0;
5807 }
5808
tg3_setup_fiber_mii_phy(struct tg3 * tp,bool force_reset)5809 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5810 {
5811 int err = 0;
5812 u32 bmsr, bmcr;
5813 u32 current_speed = SPEED_UNKNOWN;
5814 u8 current_duplex = DUPLEX_UNKNOWN;
5815 bool current_link_up = false;
5816 u32 local_adv, remote_adv, sgsr;
5817
5818 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5819 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5820 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5821 (sgsr & SERDES_TG3_SGMII_MODE)) {
5822
5823 if (force_reset)
5824 tg3_phy_reset(tp);
5825
5826 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5827
5828 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5829 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5830 } else {
5831 current_link_up = true;
5832 if (sgsr & SERDES_TG3_SPEED_1000) {
5833 current_speed = SPEED_1000;
5834 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5835 } else if (sgsr & SERDES_TG3_SPEED_100) {
5836 current_speed = SPEED_100;
5837 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5838 } else {
5839 current_speed = SPEED_10;
5840 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5841 }
5842
5843 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5844 current_duplex = DUPLEX_FULL;
5845 else
5846 current_duplex = DUPLEX_HALF;
5847 }
5848
5849 tw32_f(MAC_MODE, tp->mac_mode);
5850 udelay(40);
5851
5852 tg3_clear_mac_status(tp);
5853
5854 goto fiber_setup_done;
5855 }
5856
5857 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5858 tw32_f(MAC_MODE, tp->mac_mode);
5859 udelay(40);
5860
5861 tg3_clear_mac_status(tp);
5862
5863 if (force_reset)
5864 tg3_phy_reset(tp);
5865
5866 tp->link_config.rmt_adv = 0;
5867
5868 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5869 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5870 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5871 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5872 bmsr |= BMSR_LSTATUS;
5873 else
5874 bmsr &= ~BMSR_LSTATUS;
5875 }
5876
5877 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5878
5879 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5880 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5881 /* do nothing, just check for link up at the end */
5882 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5883 u32 adv, newadv;
5884
5885 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5886 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5887 ADVERTISE_1000XPAUSE |
5888 ADVERTISE_1000XPSE_ASYM |
5889 ADVERTISE_SLCT);
5890
5891 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5892 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5893
5894 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5895 tg3_writephy(tp, MII_ADVERTISE, newadv);
5896 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5897 tg3_writephy(tp, MII_BMCR, bmcr);
5898
5899 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5900 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5901 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5902
5903 return err;
5904 }
5905 } else {
5906 u32 new_bmcr;
5907
5908 bmcr &= ~BMCR_SPEED1000;
5909 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5910
5911 if (tp->link_config.duplex == DUPLEX_FULL)
5912 new_bmcr |= BMCR_FULLDPLX;
5913
5914 if (new_bmcr != bmcr) {
5915 /* BMCR_SPEED1000 is a reserved bit that needs
5916 * to be set on write.
5917 */
5918 new_bmcr |= BMCR_SPEED1000;
5919
5920 /* Force a linkdown */
5921 if (tp->link_up) {
5922 u32 adv;
5923
5924 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5925 adv &= ~(ADVERTISE_1000XFULL |
5926 ADVERTISE_1000XHALF |
5927 ADVERTISE_SLCT);
5928 tg3_writephy(tp, MII_ADVERTISE, adv);
5929 tg3_writephy(tp, MII_BMCR, bmcr |
5930 BMCR_ANRESTART |
5931 BMCR_ANENABLE);
5932 udelay(10);
5933 tg3_carrier_off(tp);
5934 }
5935 tg3_writephy(tp, MII_BMCR, new_bmcr);
5936 bmcr = new_bmcr;
5937 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5938 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5939 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5940 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5941 bmsr |= BMSR_LSTATUS;
5942 else
5943 bmsr &= ~BMSR_LSTATUS;
5944 }
5945 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5946 }
5947 }
5948
5949 if (bmsr & BMSR_LSTATUS) {
5950 current_speed = SPEED_1000;
5951 current_link_up = true;
5952 if (bmcr & BMCR_FULLDPLX)
5953 current_duplex = DUPLEX_FULL;
5954 else
5955 current_duplex = DUPLEX_HALF;
5956
5957 local_adv = 0;
5958 remote_adv = 0;
5959
5960 if (bmcr & BMCR_ANENABLE) {
5961 u32 common;
5962
5963 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5964 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5965 common = local_adv & remote_adv;
5966 if (common & (ADVERTISE_1000XHALF |
5967 ADVERTISE_1000XFULL)) {
5968 if (common & ADVERTISE_1000XFULL)
5969 current_duplex = DUPLEX_FULL;
5970 else
5971 current_duplex = DUPLEX_HALF;
5972
5973 tp->link_config.rmt_adv =
5974 mii_adv_to_ethtool_adv_x(remote_adv);
5975 } else if (!tg3_flag(tp, 5780_CLASS)) {
5976 /* Link is up via parallel detect */
5977 } else {
5978 current_link_up = false;
5979 }
5980 }
5981 }
5982
5983 fiber_setup_done:
5984 if (current_link_up && current_duplex == DUPLEX_FULL)
5985 tg3_setup_flow_control(tp, local_adv, remote_adv);
5986
5987 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5988 if (tp->link_config.active_duplex == DUPLEX_HALF)
5989 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5990
5991 tw32_f(MAC_MODE, tp->mac_mode);
5992 udelay(40);
5993
5994 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5995
5996 tp->link_config.active_speed = current_speed;
5997 tp->link_config.active_duplex = current_duplex;
5998
5999 tg3_test_and_report_link_chg(tp, current_link_up);
6000 return err;
6001 }
6002
tg3_serdes_parallel_detect(struct tg3 * tp)6003 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6004 {
6005 if (tp->serdes_counter) {
6006 /* Give autoneg time to complete. */
6007 tp->serdes_counter--;
6008 return;
6009 }
6010
6011 if (!tp->link_up &&
6012 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6013 u32 bmcr;
6014
6015 tg3_readphy(tp, MII_BMCR, &bmcr);
6016 if (bmcr & BMCR_ANENABLE) {
6017 u32 phy1, phy2;
6018
6019 /* Select shadow register 0x1f */
6020 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6021 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6022
6023 /* Select expansion interrupt status register */
6024 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6025 MII_TG3_DSP_EXP1_INT_STAT);
6026 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6027 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6028
6029 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6030 /* We have signal detect and not receiving
6031 * config code words, link is up by parallel
6032 * detection.
6033 */
6034
6035 bmcr &= ~BMCR_ANENABLE;
6036 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6037 tg3_writephy(tp, MII_BMCR, bmcr);
6038 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6039 }
6040 }
6041 } else if (tp->link_up &&
6042 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6043 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6044 u32 phy2;
6045
6046 /* Select expansion interrupt status register */
6047 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6048 MII_TG3_DSP_EXP1_INT_STAT);
6049 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6050 if (phy2 & 0x20) {
6051 u32 bmcr;
6052
6053 /* Config code words received, turn on autoneg. */
6054 tg3_readphy(tp, MII_BMCR, &bmcr);
6055 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6056
6057 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6058
6059 }
6060 }
6061 }
6062
tg3_setup_phy(struct tg3 * tp,bool force_reset)6063 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6064 {
6065 u32 val;
6066 int err;
6067
6068 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6069 err = tg3_setup_fiber_phy(tp, force_reset);
6070 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6071 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6072 else
6073 err = tg3_setup_copper_phy(tp, force_reset);
6074
6075 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6076 u32 scale;
6077
6078 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6079 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6080 scale = 65;
6081 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6082 scale = 6;
6083 else
6084 scale = 12;
6085
6086 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6087 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6088 tw32(GRC_MISC_CFG, val);
6089 }
6090
6091 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6092 (6 << TX_LENGTHS_IPG_SHIFT);
6093 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6094 tg3_asic_rev(tp) == ASIC_REV_5762)
6095 val |= tr32(MAC_TX_LENGTHS) &
6096 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6097 TX_LENGTHS_CNT_DWN_VAL_MSK);
6098
6099 if (tp->link_config.active_speed == SPEED_1000 &&
6100 tp->link_config.active_duplex == DUPLEX_HALF)
6101 tw32(MAC_TX_LENGTHS, val |
6102 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6103 else
6104 tw32(MAC_TX_LENGTHS, val |
6105 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6106
6107 if (!tg3_flag(tp, 5705_PLUS)) {
6108 if (tp->link_up) {
6109 tw32(HOSTCC_STAT_COAL_TICKS,
6110 tp->coal.stats_block_coalesce_usecs);
6111 } else {
6112 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6113 }
6114 }
6115
6116 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6117 val = tr32(PCIE_PWR_MGMT_THRESH);
6118 if (!tp->link_up)
6119 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6120 tp->pwrmgmt_thresh;
6121 else
6122 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6123 tw32(PCIE_PWR_MGMT_THRESH, val);
6124 }
6125
6126 return err;
6127 }
6128
6129 /* tp->lock must be held */
tg3_refclk_read(struct tg3 * tp,struct ptp_system_timestamp * sts)6130 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6131 {
6132 u64 stamp;
6133
6134 ptp_read_system_prets(sts);
6135 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6136 ptp_read_system_postts(sts);
6137 stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6138
6139 return stamp;
6140 }
6141
6142 /* tp->lock must be held */
tg3_refclk_write(struct tg3 * tp,u64 newval)6143 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6144 {
6145 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6146
6147 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6148 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6149 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6150 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6151 }
6152
6153 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6154 static inline void tg3_full_unlock(struct tg3 *tp);
tg3_get_ts_info(struct net_device * dev,struct ethtool_ts_info * info)6155 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6156 {
6157 struct tg3 *tp = netdev_priv(dev);
6158
6159 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6160 SOF_TIMESTAMPING_RX_SOFTWARE |
6161 SOF_TIMESTAMPING_SOFTWARE;
6162
6163 if (tg3_flag(tp, PTP_CAPABLE)) {
6164 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6165 SOF_TIMESTAMPING_RX_HARDWARE |
6166 SOF_TIMESTAMPING_RAW_HARDWARE;
6167 }
6168
6169 if (tp->ptp_clock)
6170 info->phc_index = ptp_clock_index(tp->ptp_clock);
6171 else
6172 info->phc_index = -1;
6173
6174 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6175
6176 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6177 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6178 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6179 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6180 return 0;
6181 }
6182
tg3_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)6183 static int tg3_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
6184 {
6185 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6186 u64 correction;
6187 bool neg_adj;
6188
6189 /* Frequency adjustment is performed using hardware with a 24 bit
6190 * accumulator and a programmable correction value. On each clk, the
6191 * correction value gets added to the accumulator and when it
6192 * overflows, the time counter is incremented/decremented.
6193 */
6194 neg_adj = diff_by_scaled_ppm(1 << 24, scaled_ppm, &correction);
6195
6196 tg3_full_lock(tp, 0);
6197
6198 if (correction)
6199 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6200 TG3_EAV_REF_CLK_CORRECT_EN |
6201 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) |
6202 ((u32)correction & TG3_EAV_REF_CLK_CORRECT_MASK));
6203 else
6204 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6205
6206 tg3_full_unlock(tp);
6207
6208 return 0;
6209 }
6210
tg3_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)6211 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6212 {
6213 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6214
6215 tg3_full_lock(tp, 0);
6216 tp->ptp_adjust += delta;
6217 tg3_full_unlock(tp);
6218
6219 return 0;
6220 }
6221
tg3_ptp_gettimex(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)6222 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6223 struct ptp_system_timestamp *sts)
6224 {
6225 u64 ns;
6226 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6227
6228 tg3_full_lock(tp, 0);
6229 ns = tg3_refclk_read(tp, sts);
6230 ns += tp->ptp_adjust;
6231 tg3_full_unlock(tp);
6232
6233 *ts = ns_to_timespec64(ns);
6234
6235 return 0;
6236 }
6237
tg3_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)6238 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6239 const struct timespec64 *ts)
6240 {
6241 u64 ns;
6242 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6243
6244 ns = timespec64_to_ns(ts);
6245
6246 tg3_full_lock(tp, 0);
6247 tg3_refclk_write(tp, ns);
6248 tp->ptp_adjust = 0;
6249 tg3_full_unlock(tp);
6250
6251 return 0;
6252 }
6253
tg3_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)6254 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6255 struct ptp_clock_request *rq, int on)
6256 {
6257 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6258 u32 clock_ctl;
6259 int rval = 0;
6260
6261 switch (rq->type) {
6262 case PTP_CLK_REQ_PEROUT:
6263 /* Reject requests with unsupported flags */
6264 if (rq->perout.flags)
6265 return -EOPNOTSUPP;
6266
6267 if (rq->perout.index != 0)
6268 return -EINVAL;
6269
6270 tg3_full_lock(tp, 0);
6271 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6272 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6273
6274 if (on) {
6275 u64 nsec;
6276
6277 nsec = rq->perout.start.sec * 1000000000ULL +
6278 rq->perout.start.nsec;
6279
6280 if (rq->perout.period.sec || rq->perout.period.nsec) {
6281 netdev_warn(tp->dev,
6282 "Device supports only a one-shot timesync output, period must be 0\n");
6283 rval = -EINVAL;
6284 goto err_out;
6285 }
6286
6287 if (nsec & (1ULL << 63)) {
6288 netdev_warn(tp->dev,
6289 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6290 rval = -EINVAL;
6291 goto err_out;
6292 }
6293
6294 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6295 tw32(TG3_EAV_WATCHDOG0_MSB,
6296 TG3_EAV_WATCHDOG0_EN |
6297 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6298
6299 tw32(TG3_EAV_REF_CLCK_CTL,
6300 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6301 } else {
6302 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6303 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6304 }
6305
6306 err_out:
6307 tg3_full_unlock(tp);
6308 return rval;
6309
6310 default:
6311 break;
6312 }
6313
6314 return -EOPNOTSUPP;
6315 }
6316
6317 static const struct ptp_clock_info tg3_ptp_caps = {
6318 .owner = THIS_MODULE,
6319 .name = "tg3 clock",
6320 .max_adj = 250000000,
6321 .n_alarm = 0,
6322 .n_ext_ts = 0,
6323 .n_per_out = 1,
6324 .n_pins = 0,
6325 .pps = 0,
6326 .adjfine = tg3_ptp_adjfine,
6327 .adjtime = tg3_ptp_adjtime,
6328 .gettimex64 = tg3_ptp_gettimex,
6329 .settime64 = tg3_ptp_settime,
6330 .enable = tg3_ptp_enable,
6331 };
6332
tg3_hwclock_to_timestamp(struct tg3 * tp,u64 hwclock,struct skb_shared_hwtstamps * timestamp)6333 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6334 struct skb_shared_hwtstamps *timestamp)
6335 {
6336 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6337 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6338 tp->ptp_adjust);
6339 }
6340
6341 /* tp->lock must be held */
tg3_ptp_init(struct tg3 * tp)6342 static void tg3_ptp_init(struct tg3 *tp)
6343 {
6344 if (!tg3_flag(tp, PTP_CAPABLE))
6345 return;
6346
6347 /* Initialize the hardware clock to the system time. */
6348 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6349 tp->ptp_adjust = 0;
6350 tp->ptp_info = tg3_ptp_caps;
6351 }
6352
6353 /* tp->lock must be held */
tg3_ptp_resume(struct tg3 * tp)6354 static void tg3_ptp_resume(struct tg3 *tp)
6355 {
6356 if (!tg3_flag(tp, PTP_CAPABLE))
6357 return;
6358
6359 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6360 tp->ptp_adjust = 0;
6361 }
6362
tg3_ptp_fini(struct tg3 * tp)6363 static void tg3_ptp_fini(struct tg3 *tp)
6364 {
6365 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6366 return;
6367
6368 ptp_clock_unregister(tp->ptp_clock);
6369 tp->ptp_clock = NULL;
6370 tp->ptp_adjust = 0;
6371 }
6372
tg3_irq_sync(struct tg3 * tp)6373 static inline int tg3_irq_sync(struct tg3 *tp)
6374 {
6375 return tp->irq_sync;
6376 }
6377
tg3_rd32_loop(struct tg3 * tp,u32 * dst,u32 off,u32 len)6378 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6379 {
6380 int i;
6381
6382 dst = (u32 *)((u8 *)dst + off);
6383 for (i = 0; i < len; i += sizeof(u32))
6384 *dst++ = tr32(off + i);
6385 }
6386
tg3_dump_legacy_regs(struct tg3 * tp,u32 * regs)6387 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6388 {
6389 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6390 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6391 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6392 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6393 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6394 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6395 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6396 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6397 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6398 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6399 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6400 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6401 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6402 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6403 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6404 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6405 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6406 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6407 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6408
6409 if (tg3_flag(tp, SUPPORT_MSIX))
6410 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6411
6412 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6413 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6414 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6415 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6416 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6417 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6418 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6419 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6420
6421 if (!tg3_flag(tp, 5705_PLUS)) {
6422 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6423 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6424 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6425 }
6426
6427 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6428 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6429 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6430 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6431 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6432
6433 if (tg3_flag(tp, NVRAM))
6434 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6435 }
6436
tg3_dump_state(struct tg3 * tp)6437 static void tg3_dump_state(struct tg3 *tp)
6438 {
6439 int i;
6440 u32 *regs;
6441
6442 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6443 if (!regs)
6444 return;
6445
6446 if (tg3_flag(tp, PCI_EXPRESS)) {
6447 /* Read up to but not including private PCI registers */
6448 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6449 regs[i / sizeof(u32)] = tr32(i);
6450 } else
6451 tg3_dump_legacy_regs(tp, regs);
6452
6453 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6454 if (!regs[i + 0] && !regs[i + 1] &&
6455 !regs[i + 2] && !regs[i + 3])
6456 continue;
6457
6458 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6459 i * 4,
6460 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6461 }
6462
6463 kfree(regs);
6464
6465 for (i = 0; i < tp->irq_cnt; i++) {
6466 struct tg3_napi *tnapi = &tp->napi[i];
6467
6468 /* SW status block */
6469 netdev_err(tp->dev,
6470 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6471 i,
6472 tnapi->hw_status->status,
6473 tnapi->hw_status->status_tag,
6474 tnapi->hw_status->rx_jumbo_consumer,
6475 tnapi->hw_status->rx_consumer,
6476 tnapi->hw_status->rx_mini_consumer,
6477 tnapi->hw_status->idx[0].rx_producer,
6478 tnapi->hw_status->idx[0].tx_consumer);
6479
6480 netdev_err(tp->dev,
6481 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6482 i,
6483 tnapi->last_tag, tnapi->last_irq_tag,
6484 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6485 tnapi->rx_rcb_ptr,
6486 tnapi->prodring.rx_std_prod_idx,
6487 tnapi->prodring.rx_std_cons_idx,
6488 tnapi->prodring.rx_jmb_prod_idx,
6489 tnapi->prodring.rx_jmb_cons_idx);
6490 }
6491 }
6492
6493 /* This is called whenever we suspect that the system chipset is re-
6494 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6495 * is bogus tx completions. We try to recover by setting the
6496 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6497 * in the workqueue.
6498 */
tg3_tx_recover(struct tg3 * tp)6499 static void tg3_tx_recover(struct tg3 *tp)
6500 {
6501 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6502 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6503
6504 netdev_warn(tp->dev,
6505 "The system may be re-ordering memory-mapped I/O "
6506 "cycles to the network device, attempting to recover. "
6507 "Please report the problem to the driver maintainer "
6508 "and include system chipset information.\n");
6509
6510 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6511 }
6512
tg3_tx_avail(struct tg3_napi * tnapi)6513 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6514 {
6515 /* Tell compiler to fetch tx indices from memory. */
6516 barrier();
6517 return tnapi->tx_pending -
6518 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6519 }
6520
6521 /* Tigon3 never reports partial packet sends. So we do not
6522 * need special logic to handle SKBs that have not had all
6523 * of their frags sent yet, like SunGEM does.
6524 */
tg3_tx(struct tg3_napi * tnapi)6525 static void tg3_tx(struct tg3_napi *tnapi)
6526 {
6527 struct tg3 *tp = tnapi->tp;
6528 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6529 u32 sw_idx = tnapi->tx_cons;
6530 struct netdev_queue *txq;
6531 int index = tnapi - tp->napi;
6532 unsigned int pkts_compl = 0, bytes_compl = 0;
6533
6534 if (tg3_flag(tp, ENABLE_TSS))
6535 index--;
6536
6537 txq = netdev_get_tx_queue(tp->dev, index);
6538
6539 while (sw_idx != hw_idx) {
6540 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6541 struct sk_buff *skb = ri->skb;
6542 int i, tx_bug = 0;
6543
6544 if (unlikely(skb == NULL)) {
6545 tg3_tx_recover(tp);
6546 return;
6547 }
6548
6549 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6550 struct skb_shared_hwtstamps timestamp;
6551 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6552 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6553
6554 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6555
6556 skb_tstamp_tx(skb, ×tamp);
6557 }
6558
6559 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping),
6560 skb_headlen(skb), DMA_TO_DEVICE);
6561
6562 ri->skb = NULL;
6563
6564 while (ri->fragmented) {
6565 ri->fragmented = false;
6566 sw_idx = NEXT_TX(sw_idx);
6567 ri = &tnapi->tx_buffers[sw_idx];
6568 }
6569
6570 sw_idx = NEXT_TX(sw_idx);
6571
6572 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6573 ri = &tnapi->tx_buffers[sw_idx];
6574 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6575 tx_bug = 1;
6576
6577 dma_unmap_page(&tp->pdev->dev,
6578 dma_unmap_addr(ri, mapping),
6579 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6580 DMA_TO_DEVICE);
6581
6582 while (ri->fragmented) {
6583 ri->fragmented = false;
6584 sw_idx = NEXT_TX(sw_idx);
6585 ri = &tnapi->tx_buffers[sw_idx];
6586 }
6587
6588 sw_idx = NEXT_TX(sw_idx);
6589 }
6590
6591 pkts_compl++;
6592 bytes_compl += skb->len;
6593
6594 dev_consume_skb_any(skb);
6595
6596 if (unlikely(tx_bug)) {
6597 tg3_tx_recover(tp);
6598 return;
6599 }
6600 }
6601
6602 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6603
6604 tnapi->tx_cons = sw_idx;
6605
6606 /* Need to make the tx_cons update visible to tg3_start_xmit()
6607 * before checking for netif_queue_stopped(). Without the
6608 * memory barrier, there is a small possibility that tg3_start_xmit()
6609 * will miss it and cause the queue to be stopped forever.
6610 */
6611 smp_mb();
6612
6613 if (unlikely(netif_tx_queue_stopped(txq) &&
6614 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6615 __netif_tx_lock(txq, smp_processor_id());
6616 if (netif_tx_queue_stopped(txq) &&
6617 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6618 netif_tx_wake_queue(txq);
6619 __netif_tx_unlock(txq);
6620 }
6621 }
6622
tg3_frag_free(bool is_frag,void * data)6623 static void tg3_frag_free(bool is_frag, void *data)
6624 {
6625 if (is_frag)
6626 skb_free_frag(data);
6627 else
6628 kfree(data);
6629 }
6630
tg3_rx_data_free(struct tg3 * tp,struct ring_info * ri,u32 map_sz)6631 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6632 {
6633 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6634 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6635
6636 if (!ri->data)
6637 return;
6638
6639 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz,
6640 DMA_FROM_DEVICE);
6641 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6642 ri->data = NULL;
6643 }
6644
6645
6646 /* Returns size of skb allocated or < 0 on error.
6647 *
6648 * We only need to fill in the address because the other members
6649 * of the RX descriptor are invariant, see tg3_init_rings.
6650 *
6651 * Note the purposeful assymetry of cpu vs. chip accesses. For
6652 * posting buffers we only dirty the first cache line of the RX
6653 * descriptor (containing the address). Whereas for the RX status
6654 * buffers the cpu only reads the last cacheline of the RX descriptor
6655 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6656 */
tg3_alloc_rx_data(struct tg3 * tp,struct tg3_rx_prodring_set * tpr,u32 opaque_key,u32 dest_idx_unmasked,unsigned int * frag_size)6657 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6658 u32 opaque_key, u32 dest_idx_unmasked,
6659 unsigned int *frag_size)
6660 {
6661 struct tg3_rx_buffer_desc *desc;
6662 struct ring_info *map;
6663 u8 *data;
6664 dma_addr_t mapping;
6665 int skb_size, data_size, dest_idx;
6666
6667 switch (opaque_key) {
6668 case RXD_OPAQUE_RING_STD:
6669 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6670 desc = &tpr->rx_std[dest_idx];
6671 map = &tpr->rx_std_buffers[dest_idx];
6672 data_size = tp->rx_pkt_map_sz;
6673 break;
6674
6675 case RXD_OPAQUE_RING_JUMBO:
6676 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6677 desc = &tpr->rx_jmb[dest_idx].std;
6678 map = &tpr->rx_jmb_buffers[dest_idx];
6679 data_size = TG3_RX_JMB_MAP_SZ;
6680 break;
6681
6682 default:
6683 return -EINVAL;
6684 }
6685
6686 /* Do not overwrite any of the map or rp information
6687 * until we are sure we can commit to a new buffer.
6688 *
6689 * Callers depend upon this behavior and assume that
6690 * we leave everything unchanged if we fail.
6691 */
6692 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6693 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6694 if (skb_size <= PAGE_SIZE) {
6695 data = napi_alloc_frag(skb_size);
6696 *frag_size = skb_size;
6697 } else {
6698 data = kmalloc(skb_size, GFP_ATOMIC);
6699 *frag_size = 0;
6700 }
6701 if (!data)
6702 return -ENOMEM;
6703
6704 mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp),
6705 data_size, DMA_FROM_DEVICE);
6706 if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
6707 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6708 return -EIO;
6709 }
6710
6711 map->data = data;
6712 dma_unmap_addr_set(map, mapping, mapping);
6713
6714 desc->addr_hi = ((u64)mapping >> 32);
6715 desc->addr_lo = ((u64)mapping & 0xffffffff);
6716
6717 return data_size;
6718 }
6719
6720 /* We only need to move over in the address because the other
6721 * members of the RX descriptor are invariant. See notes above
6722 * tg3_alloc_rx_data for full details.
6723 */
tg3_recycle_rx(struct tg3_napi * tnapi,struct tg3_rx_prodring_set * dpr,u32 opaque_key,int src_idx,u32 dest_idx_unmasked)6724 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6725 struct tg3_rx_prodring_set *dpr,
6726 u32 opaque_key, int src_idx,
6727 u32 dest_idx_unmasked)
6728 {
6729 struct tg3 *tp = tnapi->tp;
6730 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6731 struct ring_info *src_map, *dest_map;
6732 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6733 int dest_idx;
6734
6735 switch (opaque_key) {
6736 case RXD_OPAQUE_RING_STD:
6737 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6738 dest_desc = &dpr->rx_std[dest_idx];
6739 dest_map = &dpr->rx_std_buffers[dest_idx];
6740 src_desc = &spr->rx_std[src_idx];
6741 src_map = &spr->rx_std_buffers[src_idx];
6742 break;
6743
6744 case RXD_OPAQUE_RING_JUMBO:
6745 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6746 dest_desc = &dpr->rx_jmb[dest_idx].std;
6747 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6748 src_desc = &spr->rx_jmb[src_idx].std;
6749 src_map = &spr->rx_jmb_buffers[src_idx];
6750 break;
6751
6752 default:
6753 return;
6754 }
6755
6756 dest_map->data = src_map->data;
6757 dma_unmap_addr_set(dest_map, mapping,
6758 dma_unmap_addr(src_map, mapping));
6759 dest_desc->addr_hi = src_desc->addr_hi;
6760 dest_desc->addr_lo = src_desc->addr_lo;
6761
6762 /* Ensure that the update to the skb happens after the physical
6763 * addresses have been transferred to the new BD location.
6764 */
6765 smp_wmb();
6766
6767 src_map->data = NULL;
6768 }
6769
6770 /* The RX ring scheme is composed of multiple rings which post fresh
6771 * buffers to the chip, and one special ring the chip uses to report
6772 * status back to the host.
6773 *
6774 * The special ring reports the status of received packets to the
6775 * host. The chip does not write into the original descriptor the
6776 * RX buffer was obtained from. The chip simply takes the original
6777 * descriptor as provided by the host, updates the status and length
6778 * field, then writes this into the next status ring entry.
6779 *
6780 * Each ring the host uses to post buffers to the chip is described
6781 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6782 * it is first placed into the on-chip ram. When the packet's length
6783 * is known, it walks down the TG3_BDINFO entries to select the ring.
6784 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6785 * which is within the range of the new packet's length is chosen.
6786 *
6787 * The "separate ring for rx status" scheme may sound queer, but it makes
6788 * sense from a cache coherency perspective. If only the host writes
6789 * to the buffer post rings, and only the chip writes to the rx status
6790 * rings, then cache lines never move beyond shared-modified state.
6791 * If both the host and chip were to write into the same ring, cache line
6792 * eviction could occur since both entities want it in an exclusive state.
6793 */
tg3_rx(struct tg3_napi * tnapi,int budget)6794 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6795 {
6796 struct tg3 *tp = tnapi->tp;
6797 u32 work_mask, rx_std_posted = 0;
6798 u32 std_prod_idx, jmb_prod_idx;
6799 u32 sw_idx = tnapi->rx_rcb_ptr;
6800 u16 hw_idx;
6801 int received;
6802 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6803
6804 hw_idx = *(tnapi->rx_rcb_prod_idx);
6805 /*
6806 * We need to order the read of hw_idx and the read of
6807 * the opaque cookie.
6808 */
6809 rmb();
6810 work_mask = 0;
6811 received = 0;
6812 std_prod_idx = tpr->rx_std_prod_idx;
6813 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6814 while (sw_idx != hw_idx && budget > 0) {
6815 struct ring_info *ri;
6816 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6817 unsigned int len;
6818 struct sk_buff *skb;
6819 dma_addr_t dma_addr;
6820 u32 opaque_key, desc_idx, *post_ptr;
6821 u8 *data;
6822 u64 tstamp = 0;
6823
6824 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6825 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6826 if (opaque_key == RXD_OPAQUE_RING_STD) {
6827 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6828 dma_addr = dma_unmap_addr(ri, mapping);
6829 data = ri->data;
6830 post_ptr = &std_prod_idx;
6831 rx_std_posted++;
6832 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6833 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6834 dma_addr = dma_unmap_addr(ri, mapping);
6835 data = ri->data;
6836 post_ptr = &jmb_prod_idx;
6837 } else
6838 goto next_pkt_nopost;
6839
6840 work_mask |= opaque_key;
6841
6842 if (desc->err_vlan & RXD_ERR_MASK) {
6843 drop_it:
6844 tg3_recycle_rx(tnapi, tpr, opaque_key,
6845 desc_idx, *post_ptr);
6846 drop_it_no_recycle:
6847 /* Other statistics kept track of by card. */
6848 tp->rx_dropped++;
6849 goto next_pkt;
6850 }
6851
6852 prefetch(data + TG3_RX_OFFSET(tp));
6853 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6854 ETH_FCS_LEN;
6855
6856 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6857 RXD_FLAG_PTPSTAT_PTPV1 ||
6858 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6859 RXD_FLAG_PTPSTAT_PTPV2) {
6860 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6861 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6862 }
6863
6864 if (len > TG3_RX_COPY_THRESH(tp)) {
6865 int skb_size;
6866 unsigned int frag_size;
6867
6868 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6869 *post_ptr, &frag_size);
6870 if (skb_size < 0)
6871 goto drop_it;
6872
6873 dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size,
6874 DMA_FROM_DEVICE);
6875
6876 /* Ensure that the update to the data happens
6877 * after the usage of the old DMA mapping.
6878 */
6879 smp_wmb();
6880
6881 ri->data = NULL;
6882
6883 if (frag_size)
6884 skb = build_skb(data, frag_size);
6885 else
6886 skb = slab_build_skb(data);
6887 if (!skb) {
6888 tg3_frag_free(frag_size != 0, data);
6889 goto drop_it_no_recycle;
6890 }
6891 skb_reserve(skb, TG3_RX_OFFSET(tp));
6892 } else {
6893 tg3_recycle_rx(tnapi, tpr, opaque_key,
6894 desc_idx, *post_ptr);
6895
6896 skb = netdev_alloc_skb(tp->dev,
6897 len + TG3_RAW_IP_ALIGN);
6898 if (skb == NULL)
6899 goto drop_it_no_recycle;
6900
6901 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6902 dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len,
6903 DMA_FROM_DEVICE);
6904 memcpy(skb->data,
6905 data + TG3_RX_OFFSET(tp),
6906 len);
6907 dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
6908 len, DMA_FROM_DEVICE);
6909 }
6910
6911 skb_put(skb, len);
6912 if (tstamp)
6913 tg3_hwclock_to_timestamp(tp, tstamp,
6914 skb_hwtstamps(skb));
6915
6916 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6917 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6918 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6919 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6920 skb->ip_summed = CHECKSUM_UNNECESSARY;
6921 else
6922 skb_checksum_none_assert(skb);
6923
6924 skb->protocol = eth_type_trans(skb, tp->dev);
6925
6926 if (len > (tp->dev->mtu + ETH_HLEN) &&
6927 skb->protocol != htons(ETH_P_8021Q) &&
6928 skb->protocol != htons(ETH_P_8021AD)) {
6929 dev_kfree_skb_any(skb);
6930 goto drop_it_no_recycle;
6931 }
6932
6933 if (desc->type_flags & RXD_FLAG_VLAN &&
6934 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6935 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6936 desc->err_vlan & RXD_VLAN_MASK);
6937
6938 napi_gro_receive(&tnapi->napi, skb);
6939
6940 received++;
6941 budget--;
6942
6943 next_pkt:
6944 (*post_ptr)++;
6945
6946 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6947 tpr->rx_std_prod_idx = std_prod_idx &
6948 tp->rx_std_ring_mask;
6949 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6950 tpr->rx_std_prod_idx);
6951 work_mask &= ~RXD_OPAQUE_RING_STD;
6952 rx_std_posted = 0;
6953 }
6954 next_pkt_nopost:
6955 sw_idx++;
6956 sw_idx &= tp->rx_ret_ring_mask;
6957
6958 /* Refresh hw_idx to see if there is new work */
6959 if (sw_idx == hw_idx) {
6960 hw_idx = *(tnapi->rx_rcb_prod_idx);
6961 rmb();
6962 }
6963 }
6964
6965 /* ACK the status ring. */
6966 tnapi->rx_rcb_ptr = sw_idx;
6967 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6968
6969 /* Refill RX ring(s). */
6970 if (!tg3_flag(tp, ENABLE_RSS)) {
6971 /* Sync BD data before updating mailbox */
6972 wmb();
6973
6974 if (work_mask & RXD_OPAQUE_RING_STD) {
6975 tpr->rx_std_prod_idx = std_prod_idx &
6976 tp->rx_std_ring_mask;
6977 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6978 tpr->rx_std_prod_idx);
6979 }
6980 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6981 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6982 tp->rx_jmb_ring_mask;
6983 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6984 tpr->rx_jmb_prod_idx);
6985 }
6986 } else if (work_mask) {
6987 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6988 * updated before the producer indices can be updated.
6989 */
6990 smp_wmb();
6991
6992 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6993 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6994
6995 if (tnapi != &tp->napi[1]) {
6996 tp->rx_refill = true;
6997 napi_schedule(&tp->napi[1].napi);
6998 }
6999 }
7000
7001 return received;
7002 }
7003
tg3_poll_link(struct tg3 * tp)7004 static void tg3_poll_link(struct tg3 *tp)
7005 {
7006 /* handle link change and other phy events */
7007 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7008 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7009
7010 if (sblk->status & SD_STATUS_LINK_CHG) {
7011 sblk->status = SD_STATUS_UPDATED |
7012 (sblk->status & ~SD_STATUS_LINK_CHG);
7013 spin_lock(&tp->lock);
7014 if (tg3_flag(tp, USE_PHYLIB)) {
7015 tw32_f(MAC_STATUS,
7016 (MAC_STATUS_SYNC_CHANGED |
7017 MAC_STATUS_CFG_CHANGED |
7018 MAC_STATUS_MI_COMPLETION |
7019 MAC_STATUS_LNKSTATE_CHANGED));
7020 udelay(40);
7021 } else
7022 tg3_setup_phy(tp, false);
7023 spin_unlock(&tp->lock);
7024 }
7025 }
7026 }
7027
tg3_rx_prodring_xfer(struct tg3 * tp,struct tg3_rx_prodring_set * dpr,struct tg3_rx_prodring_set * spr)7028 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7029 struct tg3_rx_prodring_set *dpr,
7030 struct tg3_rx_prodring_set *spr)
7031 {
7032 u32 si, di, cpycnt, src_prod_idx;
7033 int i, err = 0;
7034
7035 while (1) {
7036 src_prod_idx = spr->rx_std_prod_idx;
7037
7038 /* Make sure updates to the rx_std_buffers[] entries and the
7039 * standard producer index are seen in the correct order.
7040 */
7041 smp_rmb();
7042
7043 if (spr->rx_std_cons_idx == src_prod_idx)
7044 break;
7045
7046 if (spr->rx_std_cons_idx < src_prod_idx)
7047 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7048 else
7049 cpycnt = tp->rx_std_ring_mask + 1 -
7050 spr->rx_std_cons_idx;
7051
7052 cpycnt = min(cpycnt,
7053 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7054
7055 si = spr->rx_std_cons_idx;
7056 di = dpr->rx_std_prod_idx;
7057
7058 for (i = di; i < di + cpycnt; i++) {
7059 if (dpr->rx_std_buffers[i].data) {
7060 cpycnt = i - di;
7061 err = -ENOSPC;
7062 break;
7063 }
7064 }
7065
7066 if (!cpycnt)
7067 break;
7068
7069 /* Ensure that updates to the rx_std_buffers ring and the
7070 * shadowed hardware producer ring from tg3_recycle_skb() are
7071 * ordered correctly WRT the skb check above.
7072 */
7073 smp_rmb();
7074
7075 memcpy(&dpr->rx_std_buffers[di],
7076 &spr->rx_std_buffers[si],
7077 cpycnt * sizeof(struct ring_info));
7078
7079 for (i = 0; i < cpycnt; i++, di++, si++) {
7080 struct tg3_rx_buffer_desc *sbd, *dbd;
7081 sbd = &spr->rx_std[si];
7082 dbd = &dpr->rx_std[di];
7083 dbd->addr_hi = sbd->addr_hi;
7084 dbd->addr_lo = sbd->addr_lo;
7085 }
7086
7087 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7088 tp->rx_std_ring_mask;
7089 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7090 tp->rx_std_ring_mask;
7091 }
7092
7093 while (1) {
7094 src_prod_idx = spr->rx_jmb_prod_idx;
7095
7096 /* Make sure updates to the rx_jmb_buffers[] entries and
7097 * the jumbo producer index are seen in the correct order.
7098 */
7099 smp_rmb();
7100
7101 if (spr->rx_jmb_cons_idx == src_prod_idx)
7102 break;
7103
7104 if (spr->rx_jmb_cons_idx < src_prod_idx)
7105 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7106 else
7107 cpycnt = tp->rx_jmb_ring_mask + 1 -
7108 spr->rx_jmb_cons_idx;
7109
7110 cpycnt = min(cpycnt,
7111 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7112
7113 si = spr->rx_jmb_cons_idx;
7114 di = dpr->rx_jmb_prod_idx;
7115
7116 for (i = di; i < di + cpycnt; i++) {
7117 if (dpr->rx_jmb_buffers[i].data) {
7118 cpycnt = i - di;
7119 err = -ENOSPC;
7120 break;
7121 }
7122 }
7123
7124 if (!cpycnt)
7125 break;
7126
7127 /* Ensure that updates to the rx_jmb_buffers ring and the
7128 * shadowed hardware producer ring from tg3_recycle_skb() are
7129 * ordered correctly WRT the skb check above.
7130 */
7131 smp_rmb();
7132
7133 memcpy(&dpr->rx_jmb_buffers[di],
7134 &spr->rx_jmb_buffers[si],
7135 cpycnt * sizeof(struct ring_info));
7136
7137 for (i = 0; i < cpycnt; i++, di++, si++) {
7138 struct tg3_rx_buffer_desc *sbd, *dbd;
7139 sbd = &spr->rx_jmb[si].std;
7140 dbd = &dpr->rx_jmb[di].std;
7141 dbd->addr_hi = sbd->addr_hi;
7142 dbd->addr_lo = sbd->addr_lo;
7143 }
7144
7145 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7146 tp->rx_jmb_ring_mask;
7147 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7148 tp->rx_jmb_ring_mask;
7149 }
7150
7151 return err;
7152 }
7153
tg3_poll_work(struct tg3_napi * tnapi,int work_done,int budget)7154 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7155 {
7156 struct tg3 *tp = tnapi->tp;
7157
7158 /* run TX completion thread */
7159 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7160 tg3_tx(tnapi);
7161 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7162 return work_done;
7163 }
7164
7165 if (!tnapi->rx_rcb_prod_idx)
7166 return work_done;
7167
7168 /* run RX thread, within the bounds set by NAPI.
7169 * All RX "locking" is done by ensuring outside
7170 * code synchronizes with tg3->napi.poll()
7171 */
7172 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7173 work_done += tg3_rx(tnapi, budget - work_done);
7174
7175 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7176 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7177 int i, err = 0;
7178 u32 std_prod_idx = dpr->rx_std_prod_idx;
7179 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7180
7181 tp->rx_refill = false;
7182 for (i = 1; i <= tp->rxq_cnt; i++)
7183 err |= tg3_rx_prodring_xfer(tp, dpr,
7184 &tp->napi[i].prodring);
7185
7186 wmb();
7187
7188 if (std_prod_idx != dpr->rx_std_prod_idx)
7189 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7190 dpr->rx_std_prod_idx);
7191
7192 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7193 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7194 dpr->rx_jmb_prod_idx);
7195
7196 if (err)
7197 tw32_f(HOSTCC_MODE, tp->coal_now);
7198 }
7199
7200 return work_done;
7201 }
7202
tg3_reset_task_schedule(struct tg3 * tp)7203 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7204 {
7205 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7206 schedule_work(&tp->reset_task);
7207 }
7208
tg3_reset_task_cancel(struct tg3 * tp)7209 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7210 {
7211 if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7212 cancel_work_sync(&tp->reset_task);
7213 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7214 }
7215
tg3_poll_msix(struct napi_struct * napi,int budget)7216 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7217 {
7218 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7219 struct tg3 *tp = tnapi->tp;
7220 int work_done = 0;
7221 struct tg3_hw_status *sblk = tnapi->hw_status;
7222
7223 while (1) {
7224 work_done = tg3_poll_work(tnapi, work_done, budget);
7225
7226 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7227 goto tx_recovery;
7228
7229 if (unlikely(work_done >= budget))
7230 break;
7231
7232 /* tp->last_tag is used in tg3_int_reenable() below
7233 * to tell the hw how much work has been processed,
7234 * so we must read it before checking for more work.
7235 */
7236 tnapi->last_tag = sblk->status_tag;
7237 tnapi->last_irq_tag = tnapi->last_tag;
7238 rmb();
7239
7240 /* check for RX/TX work to do */
7241 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7242 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7243
7244 /* This test here is not race free, but will reduce
7245 * the number of interrupts by looping again.
7246 */
7247 if (tnapi == &tp->napi[1] && tp->rx_refill)
7248 continue;
7249
7250 napi_complete_done(napi, work_done);
7251 /* Reenable interrupts. */
7252 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7253
7254 /* This test here is synchronized by napi_schedule()
7255 * and napi_complete() to close the race condition.
7256 */
7257 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7258 tw32(HOSTCC_MODE, tp->coalesce_mode |
7259 HOSTCC_MODE_ENABLE |
7260 tnapi->coal_now);
7261 }
7262 break;
7263 }
7264 }
7265
7266 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7267 return work_done;
7268
7269 tx_recovery:
7270 /* work_done is guaranteed to be less than budget. */
7271 napi_complete(napi);
7272 tg3_reset_task_schedule(tp);
7273 return work_done;
7274 }
7275
tg3_process_error(struct tg3 * tp)7276 static void tg3_process_error(struct tg3 *tp)
7277 {
7278 u32 val;
7279 bool real_error = false;
7280
7281 if (tg3_flag(tp, ERROR_PROCESSED))
7282 return;
7283
7284 /* Check Flow Attention register */
7285 val = tr32(HOSTCC_FLOW_ATTN);
7286 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7287 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7288 real_error = true;
7289 }
7290
7291 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7292 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7293 real_error = true;
7294 }
7295
7296 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7297 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7298 real_error = true;
7299 }
7300
7301 if (!real_error)
7302 return;
7303
7304 tg3_dump_state(tp);
7305
7306 tg3_flag_set(tp, ERROR_PROCESSED);
7307 tg3_reset_task_schedule(tp);
7308 }
7309
tg3_poll(struct napi_struct * napi,int budget)7310 static int tg3_poll(struct napi_struct *napi, int budget)
7311 {
7312 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7313 struct tg3 *tp = tnapi->tp;
7314 int work_done = 0;
7315 struct tg3_hw_status *sblk = tnapi->hw_status;
7316
7317 while (1) {
7318 if (sblk->status & SD_STATUS_ERROR)
7319 tg3_process_error(tp);
7320
7321 tg3_poll_link(tp);
7322
7323 work_done = tg3_poll_work(tnapi, work_done, budget);
7324
7325 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7326 goto tx_recovery;
7327
7328 if (unlikely(work_done >= budget))
7329 break;
7330
7331 if (tg3_flag(tp, TAGGED_STATUS)) {
7332 /* tp->last_tag is used in tg3_int_reenable() below
7333 * to tell the hw how much work has been processed,
7334 * so we must read it before checking for more work.
7335 */
7336 tnapi->last_tag = sblk->status_tag;
7337 tnapi->last_irq_tag = tnapi->last_tag;
7338 rmb();
7339 } else
7340 sblk->status &= ~SD_STATUS_UPDATED;
7341
7342 if (likely(!tg3_has_work(tnapi))) {
7343 napi_complete_done(napi, work_done);
7344 tg3_int_reenable(tnapi);
7345 break;
7346 }
7347 }
7348
7349 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7350 return work_done;
7351
7352 tx_recovery:
7353 /* work_done is guaranteed to be less than budget. */
7354 napi_complete(napi);
7355 tg3_reset_task_schedule(tp);
7356 return work_done;
7357 }
7358
tg3_napi_disable(struct tg3 * tp)7359 static void tg3_napi_disable(struct tg3 *tp)
7360 {
7361 int i;
7362
7363 for (i = tp->irq_cnt - 1; i >= 0; i--)
7364 napi_disable(&tp->napi[i].napi);
7365 }
7366
tg3_napi_enable(struct tg3 * tp)7367 static void tg3_napi_enable(struct tg3 *tp)
7368 {
7369 int i;
7370
7371 for (i = 0; i < tp->irq_cnt; i++)
7372 napi_enable(&tp->napi[i].napi);
7373 }
7374
tg3_napi_init(struct tg3 * tp)7375 static void tg3_napi_init(struct tg3 *tp)
7376 {
7377 int i;
7378
7379 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll);
7380 for (i = 1; i < tp->irq_cnt; i++)
7381 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix);
7382 }
7383
tg3_napi_fini(struct tg3 * tp)7384 static void tg3_napi_fini(struct tg3 *tp)
7385 {
7386 int i;
7387
7388 for (i = 0; i < tp->irq_cnt; i++)
7389 netif_napi_del(&tp->napi[i].napi);
7390 }
7391
tg3_netif_stop(struct tg3 * tp)7392 static inline void tg3_netif_stop(struct tg3 *tp)
7393 {
7394 netif_trans_update(tp->dev); /* prevent tx timeout */
7395 tg3_napi_disable(tp);
7396 netif_carrier_off(tp->dev);
7397 netif_tx_disable(tp->dev);
7398 }
7399
7400 /* tp->lock must be held */
tg3_netif_start(struct tg3 * tp)7401 static inline void tg3_netif_start(struct tg3 *tp)
7402 {
7403 tg3_ptp_resume(tp);
7404
7405 /* NOTE: unconditional netif_tx_wake_all_queues is only
7406 * appropriate so long as all callers are assured to
7407 * have free tx slots (such as after tg3_init_hw)
7408 */
7409 netif_tx_wake_all_queues(tp->dev);
7410
7411 if (tp->link_up)
7412 netif_carrier_on(tp->dev);
7413
7414 tg3_napi_enable(tp);
7415 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7416 tg3_enable_ints(tp);
7417 }
7418
tg3_irq_quiesce(struct tg3 * tp)7419 static void tg3_irq_quiesce(struct tg3 *tp)
7420 __releases(tp->lock)
7421 __acquires(tp->lock)
7422 {
7423 int i;
7424
7425 BUG_ON(tp->irq_sync);
7426
7427 tp->irq_sync = 1;
7428 smp_mb();
7429
7430 spin_unlock_bh(&tp->lock);
7431
7432 for (i = 0; i < tp->irq_cnt; i++)
7433 synchronize_irq(tp->napi[i].irq_vec);
7434
7435 spin_lock_bh(&tp->lock);
7436 }
7437
7438 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7439 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7440 * with as well. Most of the time, this is not necessary except when
7441 * shutting down the device.
7442 */
tg3_full_lock(struct tg3 * tp,int irq_sync)7443 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7444 {
7445 spin_lock_bh(&tp->lock);
7446 if (irq_sync)
7447 tg3_irq_quiesce(tp);
7448 }
7449
tg3_full_unlock(struct tg3 * tp)7450 static inline void tg3_full_unlock(struct tg3 *tp)
7451 {
7452 spin_unlock_bh(&tp->lock);
7453 }
7454
7455 /* One-shot MSI handler - Chip automatically disables interrupt
7456 * after sending MSI so driver doesn't have to do it.
7457 */
tg3_msi_1shot(int irq,void * dev_id)7458 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7459 {
7460 struct tg3_napi *tnapi = dev_id;
7461 struct tg3 *tp = tnapi->tp;
7462
7463 prefetch(tnapi->hw_status);
7464 if (tnapi->rx_rcb)
7465 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7466
7467 if (likely(!tg3_irq_sync(tp)))
7468 napi_schedule(&tnapi->napi);
7469
7470 return IRQ_HANDLED;
7471 }
7472
7473 /* MSI ISR - No need to check for interrupt sharing and no need to
7474 * flush status block and interrupt mailbox. PCI ordering rules
7475 * guarantee that MSI will arrive after the status block.
7476 */
tg3_msi(int irq,void * dev_id)7477 static irqreturn_t tg3_msi(int irq, void *dev_id)
7478 {
7479 struct tg3_napi *tnapi = dev_id;
7480 struct tg3 *tp = tnapi->tp;
7481
7482 prefetch(tnapi->hw_status);
7483 if (tnapi->rx_rcb)
7484 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7485 /*
7486 * Writing any value to intr-mbox-0 clears PCI INTA# and
7487 * chip-internal interrupt pending events.
7488 * Writing non-zero to intr-mbox-0 additional tells the
7489 * NIC to stop sending us irqs, engaging "in-intr-handler"
7490 * event coalescing.
7491 */
7492 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7493 if (likely(!tg3_irq_sync(tp)))
7494 napi_schedule(&tnapi->napi);
7495
7496 return IRQ_RETVAL(1);
7497 }
7498
tg3_interrupt(int irq,void * dev_id)7499 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7500 {
7501 struct tg3_napi *tnapi = dev_id;
7502 struct tg3 *tp = tnapi->tp;
7503 struct tg3_hw_status *sblk = tnapi->hw_status;
7504 unsigned int handled = 1;
7505
7506 /* In INTx mode, it is possible for the interrupt to arrive at
7507 * the CPU before the status block posted prior to the interrupt.
7508 * Reading the PCI State register will confirm whether the
7509 * interrupt is ours and will flush the status block.
7510 */
7511 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7512 if (tg3_flag(tp, CHIP_RESETTING) ||
7513 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7514 handled = 0;
7515 goto out;
7516 }
7517 }
7518
7519 /*
7520 * Writing any value to intr-mbox-0 clears PCI INTA# and
7521 * chip-internal interrupt pending events.
7522 * Writing non-zero to intr-mbox-0 additional tells the
7523 * NIC to stop sending us irqs, engaging "in-intr-handler"
7524 * event coalescing.
7525 *
7526 * Flush the mailbox to de-assert the IRQ immediately to prevent
7527 * spurious interrupts. The flush impacts performance but
7528 * excessive spurious interrupts can be worse in some cases.
7529 */
7530 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7531 if (tg3_irq_sync(tp))
7532 goto out;
7533 sblk->status &= ~SD_STATUS_UPDATED;
7534 if (likely(tg3_has_work(tnapi))) {
7535 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7536 napi_schedule(&tnapi->napi);
7537 } else {
7538 /* No work, shared interrupt perhaps? re-enable
7539 * interrupts, and flush that PCI write
7540 */
7541 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7542 0x00000000);
7543 }
7544 out:
7545 return IRQ_RETVAL(handled);
7546 }
7547
tg3_interrupt_tagged(int irq,void * dev_id)7548 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7549 {
7550 struct tg3_napi *tnapi = dev_id;
7551 struct tg3 *tp = tnapi->tp;
7552 struct tg3_hw_status *sblk = tnapi->hw_status;
7553 unsigned int handled = 1;
7554
7555 /* In INTx mode, it is possible for the interrupt to arrive at
7556 * the CPU before the status block posted prior to the interrupt.
7557 * Reading the PCI State register will confirm whether the
7558 * interrupt is ours and will flush the status block.
7559 */
7560 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7561 if (tg3_flag(tp, CHIP_RESETTING) ||
7562 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7563 handled = 0;
7564 goto out;
7565 }
7566 }
7567
7568 /*
7569 * writing any value to intr-mbox-0 clears PCI INTA# and
7570 * chip-internal interrupt pending events.
7571 * writing non-zero to intr-mbox-0 additional tells the
7572 * NIC to stop sending us irqs, engaging "in-intr-handler"
7573 * event coalescing.
7574 *
7575 * Flush the mailbox to de-assert the IRQ immediately to prevent
7576 * spurious interrupts. The flush impacts performance but
7577 * excessive spurious interrupts can be worse in some cases.
7578 */
7579 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7580
7581 /*
7582 * In a shared interrupt configuration, sometimes other devices'
7583 * interrupts will scream. We record the current status tag here
7584 * so that the above check can report that the screaming interrupts
7585 * are unhandled. Eventually they will be silenced.
7586 */
7587 tnapi->last_irq_tag = sblk->status_tag;
7588
7589 if (tg3_irq_sync(tp))
7590 goto out;
7591
7592 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7593
7594 napi_schedule(&tnapi->napi);
7595
7596 out:
7597 return IRQ_RETVAL(handled);
7598 }
7599
7600 /* ISR for interrupt test */
tg3_test_isr(int irq,void * dev_id)7601 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7602 {
7603 struct tg3_napi *tnapi = dev_id;
7604 struct tg3 *tp = tnapi->tp;
7605 struct tg3_hw_status *sblk = tnapi->hw_status;
7606
7607 if ((sblk->status & SD_STATUS_UPDATED) ||
7608 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7609 tg3_disable_ints(tp);
7610 return IRQ_RETVAL(1);
7611 }
7612 return IRQ_RETVAL(0);
7613 }
7614
7615 #ifdef CONFIG_NET_POLL_CONTROLLER
tg3_poll_controller(struct net_device * dev)7616 static void tg3_poll_controller(struct net_device *dev)
7617 {
7618 int i;
7619 struct tg3 *tp = netdev_priv(dev);
7620
7621 if (tg3_irq_sync(tp))
7622 return;
7623
7624 for (i = 0; i < tp->irq_cnt; i++)
7625 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7626 }
7627 #endif
7628
tg3_tx_timeout(struct net_device * dev,unsigned int txqueue)7629 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
7630 {
7631 struct tg3 *tp = netdev_priv(dev);
7632
7633 if (netif_msg_tx_err(tp)) {
7634 netdev_err(dev, "transmit timed out, resetting\n");
7635 tg3_dump_state(tp);
7636 }
7637
7638 tg3_reset_task_schedule(tp);
7639 }
7640
7641 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
tg3_4g_overflow_test(dma_addr_t mapping,int len)7642 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7643 {
7644 u32 base = (u32) mapping & 0xffffffff;
7645
7646 return base + len + 8 < base;
7647 }
7648
7649 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7650 * of any 4GB boundaries: 4G, 8G, etc
7651 */
tg3_4g_tso_overflow_test(struct tg3 * tp,dma_addr_t mapping,u32 len,u32 mss)7652 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7653 u32 len, u32 mss)
7654 {
7655 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7656 u32 base = (u32) mapping & 0xffffffff;
7657
7658 return ((base + len + (mss & 0x3fff)) < base);
7659 }
7660 return 0;
7661 }
7662
7663 /* Test for DMA addresses > 40-bit */
tg3_40bit_overflow_test(struct tg3 * tp,dma_addr_t mapping,int len)7664 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7665 int len)
7666 {
7667 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7668 if (tg3_flag(tp, 40BIT_DMA_BUG))
7669 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7670 return 0;
7671 #else
7672 return 0;
7673 #endif
7674 }
7675
tg3_tx_set_bd(struct tg3_tx_buffer_desc * txbd,dma_addr_t mapping,u32 len,u32 flags,u32 mss,u32 vlan)7676 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7677 dma_addr_t mapping, u32 len, u32 flags,
7678 u32 mss, u32 vlan)
7679 {
7680 txbd->addr_hi = ((u64) mapping >> 32);
7681 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7682 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7683 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7684 }
7685
tg3_tx_frag_set(struct tg3_napi * tnapi,u32 * entry,u32 * budget,dma_addr_t map,u32 len,u32 flags,u32 mss,u32 vlan)7686 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7687 dma_addr_t map, u32 len, u32 flags,
7688 u32 mss, u32 vlan)
7689 {
7690 struct tg3 *tp = tnapi->tp;
7691 bool hwbug = false;
7692
7693 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7694 hwbug = true;
7695
7696 if (tg3_4g_overflow_test(map, len))
7697 hwbug = true;
7698
7699 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7700 hwbug = true;
7701
7702 if (tg3_40bit_overflow_test(tp, map, len))
7703 hwbug = true;
7704
7705 if (tp->dma_limit) {
7706 u32 prvidx = *entry;
7707 u32 tmp_flag = flags & ~TXD_FLAG_END;
7708 while (len > tp->dma_limit && *budget) {
7709 u32 frag_len = tp->dma_limit;
7710 len -= tp->dma_limit;
7711
7712 /* Avoid the 8byte DMA problem */
7713 if (len <= 8) {
7714 len += tp->dma_limit / 2;
7715 frag_len = tp->dma_limit / 2;
7716 }
7717
7718 tnapi->tx_buffers[*entry].fragmented = true;
7719
7720 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7721 frag_len, tmp_flag, mss, vlan);
7722 *budget -= 1;
7723 prvidx = *entry;
7724 *entry = NEXT_TX(*entry);
7725
7726 map += frag_len;
7727 }
7728
7729 if (len) {
7730 if (*budget) {
7731 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7732 len, flags, mss, vlan);
7733 *budget -= 1;
7734 *entry = NEXT_TX(*entry);
7735 } else {
7736 hwbug = true;
7737 tnapi->tx_buffers[prvidx].fragmented = false;
7738 }
7739 }
7740 } else {
7741 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7742 len, flags, mss, vlan);
7743 *entry = NEXT_TX(*entry);
7744 }
7745
7746 return hwbug;
7747 }
7748
tg3_tx_skb_unmap(struct tg3_napi * tnapi,u32 entry,int last)7749 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7750 {
7751 int i;
7752 struct sk_buff *skb;
7753 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7754
7755 skb = txb->skb;
7756 txb->skb = NULL;
7757
7758 dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping),
7759 skb_headlen(skb), DMA_TO_DEVICE);
7760
7761 while (txb->fragmented) {
7762 txb->fragmented = false;
7763 entry = NEXT_TX(entry);
7764 txb = &tnapi->tx_buffers[entry];
7765 }
7766
7767 for (i = 0; i <= last; i++) {
7768 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7769
7770 entry = NEXT_TX(entry);
7771 txb = &tnapi->tx_buffers[entry];
7772
7773 dma_unmap_page(&tnapi->tp->pdev->dev,
7774 dma_unmap_addr(txb, mapping),
7775 skb_frag_size(frag), DMA_TO_DEVICE);
7776
7777 while (txb->fragmented) {
7778 txb->fragmented = false;
7779 entry = NEXT_TX(entry);
7780 txb = &tnapi->tx_buffers[entry];
7781 }
7782 }
7783 }
7784
7785 /* Workaround 4GB and 40-bit hardware DMA bugs. */
tigon3_dma_hwbug_workaround(struct tg3_napi * tnapi,struct sk_buff ** pskb,u32 * entry,u32 * budget,u32 base_flags,u32 mss,u32 vlan)7786 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7787 struct sk_buff **pskb,
7788 u32 *entry, u32 *budget,
7789 u32 base_flags, u32 mss, u32 vlan)
7790 {
7791 struct tg3 *tp = tnapi->tp;
7792 struct sk_buff *new_skb, *skb = *pskb;
7793 dma_addr_t new_addr = 0;
7794 int ret = 0;
7795
7796 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7797 new_skb = skb_copy(skb, GFP_ATOMIC);
7798 else {
7799 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7800
7801 new_skb = skb_copy_expand(skb,
7802 skb_headroom(skb) + more_headroom,
7803 skb_tailroom(skb), GFP_ATOMIC);
7804 }
7805
7806 if (!new_skb) {
7807 ret = -1;
7808 } else {
7809 /* New SKB is guaranteed to be linear. */
7810 new_addr = dma_map_single(&tp->pdev->dev, new_skb->data,
7811 new_skb->len, DMA_TO_DEVICE);
7812 /* Make sure the mapping succeeded */
7813 if (dma_mapping_error(&tp->pdev->dev, new_addr)) {
7814 dev_kfree_skb_any(new_skb);
7815 ret = -1;
7816 } else {
7817 u32 save_entry = *entry;
7818
7819 base_flags |= TXD_FLAG_END;
7820
7821 tnapi->tx_buffers[*entry].skb = new_skb;
7822 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7823 mapping, new_addr);
7824
7825 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7826 new_skb->len, base_flags,
7827 mss, vlan)) {
7828 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7829 dev_kfree_skb_any(new_skb);
7830 ret = -1;
7831 }
7832 }
7833 }
7834
7835 dev_consume_skb_any(skb);
7836 *pskb = new_skb;
7837 return ret;
7838 }
7839
tg3_tso_bug_gso_check(struct tg3_napi * tnapi,struct sk_buff * skb)7840 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7841 {
7842 /* Check if we will never have enough descriptors,
7843 * as gso_segs can be more than current ring size
7844 */
7845 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7846 }
7847
7848 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7849
7850 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7851 * indicated in tg3_tx_frag_set()
7852 */
tg3_tso_bug(struct tg3 * tp,struct tg3_napi * tnapi,struct netdev_queue * txq,struct sk_buff * skb)7853 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7854 struct netdev_queue *txq, struct sk_buff *skb)
7855 {
7856 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7857 struct sk_buff *segs, *seg, *next;
7858
7859 /* Estimate the number of fragments in the worst case */
7860 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7861 netif_tx_stop_queue(txq);
7862
7863 /* netif_tx_stop_queue() must be done before checking
7864 * checking tx index in tg3_tx_avail() below, because in
7865 * tg3_tx(), we update tx index before checking for
7866 * netif_tx_queue_stopped().
7867 */
7868 smp_mb();
7869 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7870 return NETDEV_TX_BUSY;
7871
7872 netif_tx_wake_queue(txq);
7873 }
7874
7875 segs = skb_gso_segment(skb, tp->dev->features &
7876 ~(NETIF_F_TSO | NETIF_F_TSO6));
7877 if (IS_ERR(segs) || !segs)
7878 goto tg3_tso_bug_end;
7879
7880 skb_list_walk_safe(segs, seg, next) {
7881 skb_mark_not_on_list(seg);
7882 tg3_start_xmit(seg, tp->dev);
7883 }
7884
7885 tg3_tso_bug_end:
7886 dev_consume_skb_any(skb);
7887
7888 return NETDEV_TX_OK;
7889 }
7890
7891 /* hard_start_xmit for all devices */
tg3_start_xmit(struct sk_buff * skb,struct net_device * dev)7892 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7893 {
7894 struct tg3 *tp = netdev_priv(dev);
7895 u32 len, entry, base_flags, mss, vlan = 0;
7896 u32 budget;
7897 int i = -1, would_hit_hwbug;
7898 dma_addr_t mapping;
7899 struct tg3_napi *tnapi;
7900 struct netdev_queue *txq;
7901 unsigned int last;
7902 struct iphdr *iph = NULL;
7903 struct tcphdr *tcph = NULL;
7904 __sum16 tcp_csum = 0, ip_csum = 0;
7905 __be16 ip_tot_len = 0;
7906
7907 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7908 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7909 if (tg3_flag(tp, ENABLE_TSS))
7910 tnapi++;
7911
7912 budget = tg3_tx_avail(tnapi);
7913
7914 /* We are running in BH disabled context with netif_tx_lock
7915 * and TX reclaim runs via tp->napi.poll inside of a software
7916 * interrupt. Furthermore, IRQ processing runs lockless so we have
7917 * no IRQ context deadlocks to worry about either. Rejoice!
7918 */
7919 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7920 if (!netif_tx_queue_stopped(txq)) {
7921 netif_tx_stop_queue(txq);
7922
7923 /* This is a hard error, log it. */
7924 netdev_err(dev,
7925 "BUG! Tx Ring full when queue awake!\n");
7926 }
7927 return NETDEV_TX_BUSY;
7928 }
7929
7930 entry = tnapi->tx_prod;
7931 base_flags = 0;
7932
7933 mss = skb_shinfo(skb)->gso_size;
7934 if (mss) {
7935 u32 tcp_opt_len, hdr_len;
7936
7937 if (skb_cow_head(skb, 0))
7938 goto drop;
7939
7940 iph = ip_hdr(skb);
7941 tcp_opt_len = tcp_optlen(skb);
7942
7943 hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN;
7944
7945 /* HW/FW can not correctly segment packets that have been
7946 * vlan encapsulated.
7947 */
7948 if (skb->protocol == htons(ETH_P_8021Q) ||
7949 skb->protocol == htons(ETH_P_8021AD)) {
7950 if (tg3_tso_bug_gso_check(tnapi, skb))
7951 return tg3_tso_bug(tp, tnapi, txq, skb);
7952 goto drop;
7953 }
7954
7955 if (!skb_is_gso_v6(skb)) {
7956 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7957 tg3_flag(tp, TSO_BUG)) {
7958 if (tg3_tso_bug_gso_check(tnapi, skb))
7959 return tg3_tso_bug(tp, tnapi, txq, skb);
7960 goto drop;
7961 }
7962 ip_csum = iph->check;
7963 ip_tot_len = iph->tot_len;
7964 iph->check = 0;
7965 iph->tot_len = htons(mss + hdr_len);
7966 }
7967
7968 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7969 TXD_FLAG_CPU_POST_DMA);
7970
7971 tcph = tcp_hdr(skb);
7972 tcp_csum = tcph->check;
7973
7974 if (tg3_flag(tp, HW_TSO_1) ||
7975 tg3_flag(tp, HW_TSO_2) ||
7976 tg3_flag(tp, HW_TSO_3)) {
7977 tcph->check = 0;
7978 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7979 } else {
7980 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7981 0, IPPROTO_TCP, 0);
7982 }
7983
7984 if (tg3_flag(tp, HW_TSO_3)) {
7985 mss |= (hdr_len & 0xc) << 12;
7986 if (hdr_len & 0x10)
7987 base_flags |= 0x00000010;
7988 base_flags |= (hdr_len & 0x3e0) << 5;
7989 } else if (tg3_flag(tp, HW_TSO_2))
7990 mss |= hdr_len << 9;
7991 else if (tg3_flag(tp, HW_TSO_1) ||
7992 tg3_asic_rev(tp) == ASIC_REV_5705) {
7993 if (tcp_opt_len || iph->ihl > 5) {
7994 int tsflags;
7995
7996 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7997 mss |= (tsflags << 11);
7998 }
7999 } else {
8000 if (tcp_opt_len || iph->ihl > 5) {
8001 int tsflags;
8002
8003 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8004 base_flags |= tsflags << 12;
8005 }
8006 }
8007 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8008 /* HW/FW can not correctly checksum packets that have been
8009 * vlan encapsulated.
8010 */
8011 if (skb->protocol == htons(ETH_P_8021Q) ||
8012 skb->protocol == htons(ETH_P_8021AD)) {
8013 if (skb_checksum_help(skb))
8014 goto drop;
8015 } else {
8016 base_flags |= TXD_FLAG_TCPUDP_CSUM;
8017 }
8018 }
8019
8020 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8021 !mss && skb->len > VLAN_ETH_FRAME_LEN)
8022 base_flags |= TXD_FLAG_JMB_PKT;
8023
8024 if (skb_vlan_tag_present(skb)) {
8025 base_flags |= TXD_FLAG_VLAN;
8026 vlan = skb_vlan_tag_get(skb);
8027 }
8028
8029 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8030 tg3_flag(tp, TX_TSTAMP_EN)) {
8031 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8032 base_flags |= TXD_FLAG_HWTSTAMP;
8033 }
8034
8035 len = skb_headlen(skb);
8036
8037 mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
8038 DMA_TO_DEVICE);
8039 if (dma_mapping_error(&tp->pdev->dev, mapping))
8040 goto drop;
8041
8042
8043 tnapi->tx_buffers[entry].skb = skb;
8044 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8045
8046 would_hit_hwbug = 0;
8047
8048 if (tg3_flag(tp, 5701_DMA_BUG))
8049 would_hit_hwbug = 1;
8050
8051 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8052 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8053 mss, vlan)) {
8054 would_hit_hwbug = 1;
8055 } else if (skb_shinfo(skb)->nr_frags > 0) {
8056 u32 tmp_mss = mss;
8057
8058 if (!tg3_flag(tp, HW_TSO_1) &&
8059 !tg3_flag(tp, HW_TSO_2) &&
8060 !tg3_flag(tp, HW_TSO_3))
8061 tmp_mss = 0;
8062
8063 /* Now loop through additional data
8064 * fragments, and queue them.
8065 */
8066 last = skb_shinfo(skb)->nr_frags - 1;
8067 for (i = 0; i <= last; i++) {
8068 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8069
8070 len = skb_frag_size(frag);
8071 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8072 len, DMA_TO_DEVICE);
8073
8074 tnapi->tx_buffers[entry].skb = NULL;
8075 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8076 mapping);
8077 if (dma_mapping_error(&tp->pdev->dev, mapping))
8078 goto dma_error;
8079
8080 if (!budget ||
8081 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8082 len, base_flags |
8083 ((i == last) ? TXD_FLAG_END : 0),
8084 tmp_mss, vlan)) {
8085 would_hit_hwbug = 1;
8086 break;
8087 }
8088 }
8089 }
8090
8091 if (would_hit_hwbug) {
8092 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8093
8094 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8095 /* If it's a TSO packet, do GSO instead of
8096 * allocating and copying to a large linear SKB
8097 */
8098 if (ip_tot_len) {
8099 iph->check = ip_csum;
8100 iph->tot_len = ip_tot_len;
8101 }
8102 tcph->check = tcp_csum;
8103 return tg3_tso_bug(tp, tnapi, txq, skb);
8104 }
8105
8106 /* If the workaround fails due to memory/mapping
8107 * failure, silently drop this packet.
8108 */
8109 entry = tnapi->tx_prod;
8110 budget = tg3_tx_avail(tnapi);
8111 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8112 base_flags, mss, vlan))
8113 goto drop_nofree;
8114 }
8115
8116 skb_tx_timestamp(skb);
8117 netdev_tx_sent_queue(txq, skb->len);
8118
8119 /* Sync BD data before updating mailbox */
8120 wmb();
8121
8122 tnapi->tx_prod = entry;
8123 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8124 netif_tx_stop_queue(txq);
8125
8126 /* netif_tx_stop_queue() must be done before checking
8127 * checking tx index in tg3_tx_avail() below, because in
8128 * tg3_tx(), we update tx index before checking for
8129 * netif_tx_queue_stopped().
8130 */
8131 smp_mb();
8132 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8133 netif_tx_wake_queue(txq);
8134 }
8135
8136 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8137 /* Packets are ready, update Tx producer idx on card. */
8138 tw32_tx_mbox(tnapi->prodmbox, entry);
8139 }
8140
8141 return NETDEV_TX_OK;
8142
8143 dma_error:
8144 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8145 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8146 drop:
8147 dev_kfree_skb_any(skb);
8148 drop_nofree:
8149 tp->tx_dropped++;
8150 return NETDEV_TX_OK;
8151 }
8152
tg3_mac_loopback(struct tg3 * tp,bool enable)8153 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8154 {
8155 if (enable) {
8156 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8157 MAC_MODE_PORT_MODE_MASK);
8158
8159 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8160
8161 if (!tg3_flag(tp, 5705_PLUS))
8162 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8163
8164 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8165 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8166 else
8167 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8168 } else {
8169 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8170
8171 if (tg3_flag(tp, 5705_PLUS) ||
8172 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8173 tg3_asic_rev(tp) == ASIC_REV_5700)
8174 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8175 }
8176
8177 tw32(MAC_MODE, tp->mac_mode);
8178 udelay(40);
8179 }
8180
tg3_phy_lpbk_set(struct tg3 * tp,u32 speed,bool extlpbk)8181 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8182 {
8183 u32 val, bmcr, mac_mode, ptest = 0;
8184
8185 tg3_phy_toggle_apd(tp, false);
8186 tg3_phy_toggle_automdix(tp, false);
8187
8188 if (extlpbk && tg3_phy_set_extloopbk(tp))
8189 return -EIO;
8190
8191 bmcr = BMCR_FULLDPLX;
8192 switch (speed) {
8193 case SPEED_10:
8194 break;
8195 case SPEED_100:
8196 bmcr |= BMCR_SPEED100;
8197 break;
8198 case SPEED_1000:
8199 default:
8200 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8201 speed = SPEED_100;
8202 bmcr |= BMCR_SPEED100;
8203 } else {
8204 speed = SPEED_1000;
8205 bmcr |= BMCR_SPEED1000;
8206 }
8207 }
8208
8209 if (extlpbk) {
8210 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8211 tg3_readphy(tp, MII_CTRL1000, &val);
8212 val |= CTL1000_AS_MASTER |
8213 CTL1000_ENABLE_MASTER;
8214 tg3_writephy(tp, MII_CTRL1000, val);
8215 } else {
8216 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8217 MII_TG3_FET_PTEST_TRIM_2;
8218 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8219 }
8220 } else
8221 bmcr |= BMCR_LOOPBACK;
8222
8223 tg3_writephy(tp, MII_BMCR, bmcr);
8224
8225 /* The write needs to be flushed for the FETs */
8226 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8227 tg3_readphy(tp, MII_BMCR, &bmcr);
8228
8229 udelay(40);
8230
8231 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8232 tg3_asic_rev(tp) == ASIC_REV_5785) {
8233 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8234 MII_TG3_FET_PTEST_FRC_TX_LINK |
8235 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8236
8237 /* The write needs to be flushed for the AC131 */
8238 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8239 }
8240
8241 /* Reset to prevent losing 1st rx packet intermittently */
8242 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8243 tg3_flag(tp, 5780_CLASS)) {
8244 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8245 udelay(10);
8246 tw32_f(MAC_RX_MODE, tp->rx_mode);
8247 }
8248
8249 mac_mode = tp->mac_mode &
8250 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8251 if (speed == SPEED_1000)
8252 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8253 else
8254 mac_mode |= MAC_MODE_PORT_MODE_MII;
8255
8256 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8257 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8258
8259 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8260 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8261 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8262 mac_mode |= MAC_MODE_LINK_POLARITY;
8263
8264 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8265 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8266 }
8267
8268 tw32(MAC_MODE, mac_mode);
8269 udelay(40);
8270
8271 return 0;
8272 }
8273
tg3_set_loopback(struct net_device * dev,netdev_features_t features)8274 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8275 {
8276 struct tg3 *tp = netdev_priv(dev);
8277
8278 if (features & NETIF_F_LOOPBACK) {
8279 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8280 return;
8281
8282 spin_lock_bh(&tp->lock);
8283 tg3_mac_loopback(tp, true);
8284 netif_carrier_on(tp->dev);
8285 spin_unlock_bh(&tp->lock);
8286 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8287 } else {
8288 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8289 return;
8290
8291 spin_lock_bh(&tp->lock);
8292 tg3_mac_loopback(tp, false);
8293 /* Force link status check */
8294 tg3_setup_phy(tp, true);
8295 spin_unlock_bh(&tp->lock);
8296 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8297 }
8298 }
8299
tg3_fix_features(struct net_device * dev,netdev_features_t features)8300 static netdev_features_t tg3_fix_features(struct net_device *dev,
8301 netdev_features_t features)
8302 {
8303 struct tg3 *tp = netdev_priv(dev);
8304
8305 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8306 features &= ~NETIF_F_ALL_TSO;
8307
8308 return features;
8309 }
8310
tg3_set_features(struct net_device * dev,netdev_features_t features)8311 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8312 {
8313 netdev_features_t changed = dev->features ^ features;
8314
8315 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8316 tg3_set_loopback(dev, features);
8317
8318 return 0;
8319 }
8320
tg3_rx_prodring_free(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8321 static void tg3_rx_prodring_free(struct tg3 *tp,
8322 struct tg3_rx_prodring_set *tpr)
8323 {
8324 int i;
8325
8326 if (tpr != &tp->napi[0].prodring) {
8327 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8328 i = (i + 1) & tp->rx_std_ring_mask)
8329 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8330 tp->rx_pkt_map_sz);
8331
8332 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8333 for (i = tpr->rx_jmb_cons_idx;
8334 i != tpr->rx_jmb_prod_idx;
8335 i = (i + 1) & tp->rx_jmb_ring_mask) {
8336 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8337 TG3_RX_JMB_MAP_SZ);
8338 }
8339 }
8340
8341 return;
8342 }
8343
8344 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8345 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8346 tp->rx_pkt_map_sz);
8347
8348 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8349 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8350 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8351 TG3_RX_JMB_MAP_SZ);
8352 }
8353 }
8354
8355 /* Initialize rx rings for packet processing.
8356 *
8357 * The chip has been shut down and the driver detached from
8358 * the networking, so no interrupts or new tx packets will
8359 * end up in the driver. tp->{tx,}lock are held and thus
8360 * we may not sleep.
8361 */
tg3_rx_prodring_alloc(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8362 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8363 struct tg3_rx_prodring_set *tpr)
8364 {
8365 u32 i, rx_pkt_dma_sz;
8366
8367 tpr->rx_std_cons_idx = 0;
8368 tpr->rx_std_prod_idx = 0;
8369 tpr->rx_jmb_cons_idx = 0;
8370 tpr->rx_jmb_prod_idx = 0;
8371
8372 if (tpr != &tp->napi[0].prodring) {
8373 memset(&tpr->rx_std_buffers[0], 0,
8374 TG3_RX_STD_BUFF_RING_SIZE(tp));
8375 if (tpr->rx_jmb_buffers)
8376 memset(&tpr->rx_jmb_buffers[0], 0,
8377 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8378 goto done;
8379 }
8380
8381 /* Zero out all descriptors. */
8382 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8383
8384 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8385 if (tg3_flag(tp, 5780_CLASS) &&
8386 tp->dev->mtu > ETH_DATA_LEN)
8387 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8388 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8389
8390 /* Initialize invariants of the rings, we only set this
8391 * stuff once. This works because the card does not
8392 * write into the rx buffer posting rings.
8393 */
8394 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8395 struct tg3_rx_buffer_desc *rxd;
8396
8397 rxd = &tpr->rx_std[i];
8398 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8399 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8400 rxd->opaque = (RXD_OPAQUE_RING_STD |
8401 (i << RXD_OPAQUE_INDEX_SHIFT));
8402 }
8403
8404 /* Now allocate fresh SKBs for each rx ring. */
8405 for (i = 0; i < tp->rx_pending; i++) {
8406 unsigned int frag_size;
8407
8408 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8409 &frag_size) < 0) {
8410 netdev_warn(tp->dev,
8411 "Using a smaller RX standard ring. Only "
8412 "%d out of %d buffers were allocated "
8413 "successfully\n", i, tp->rx_pending);
8414 if (i == 0)
8415 goto initfail;
8416 tp->rx_pending = i;
8417 break;
8418 }
8419 }
8420
8421 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8422 goto done;
8423
8424 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8425
8426 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8427 goto done;
8428
8429 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8430 struct tg3_rx_buffer_desc *rxd;
8431
8432 rxd = &tpr->rx_jmb[i].std;
8433 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8434 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8435 RXD_FLAG_JUMBO;
8436 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8437 (i << RXD_OPAQUE_INDEX_SHIFT));
8438 }
8439
8440 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8441 unsigned int frag_size;
8442
8443 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8444 &frag_size) < 0) {
8445 netdev_warn(tp->dev,
8446 "Using a smaller RX jumbo ring. Only %d "
8447 "out of %d buffers were allocated "
8448 "successfully\n", i, tp->rx_jumbo_pending);
8449 if (i == 0)
8450 goto initfail;
8451 tp->rx_jumbo_pending = i;
8452 break;
8453 }
8454 }
8455
8456 done:
8457 return 0;
8458
8459 initfail:
8460 tg3_rx_prodring_free(tp, tpr);
8461 return -ENOMEM;
8462 }
8463
tg3_rx_prodring_fini(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8464 static void tg3_rx_prodring_fini(struct tg3 *tp,
8465 struct tg3_rx_prodring_set *tpr)
8466 {
8467 kfree(tpr->rx_std_buffers);
8468 tpr->rx_std_buffers = NULL;
8469 kfree(tpr->rx_jmb_buffers);
8470 tpr->rx_jmb_buffers = NULL;
8471 if (tpr->rx_std) {
8472 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8473 tpr->rx_std, tpr->rx_std_mapping);
8474 tpr->rx_std = NULL;
8475 }
8476 if (tpr->rx_jmb) {
8477 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8478 tpr->rx_jmb, tpr->rx_jmb_mapping);
8479 tpr->rx_jmb = NULL;
8480 }
8481 }
8482
tg3_rx_prodring_init(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8483 static int tg3_rx_prodring_init(struct tg3 *tp,
8484 struct tg3_rx_prodring_set *tpr)
8485 {
8486 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8487 GFP_KERNEL);
8488 if (!tpr->rx_std_buffers)
8489 return -ENOMEM;
8490
8491 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8492 TG3_RX_STD_RING_BYTES(tp),
8493 &tpr->rx_std_mapping,
8494 GFP_KERNEL);
8495 if (!tpr->rx_std)
8496 goto err_out;
8497
8498 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8499 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8500 GFP_KERNEL);
8501 if (!tpr->rx_jmb_buffers)
8502 goto err_out;
8503
8504 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8505 TG3_RX_JMB_RING_BYTES(tp),
8506 &tpr->rx_jmb_mapping,
8507 GFP_KERNEL);
8508 if (!tpr->rx_jmb)
8509 goto err_out;
8510 }
8511
8512 return 0;
8513
8514 err_out:
8515 tg3_rx_prodring_fini(tp, tpr);
8516 return -ENOMEM;
8517 }
8518
8519 /* Free up pending packets in all rx/tx rings.
8520 *
8521 * The chip has been shut down and the driver detached from
8522 * the networking, so no interrupts or new tx packets will
8523 * end up in the driver. tp->{tx,}lock is not held and we are not
8524 * in an interrupt context and thus may sleep.
8525 */
tg3_free_rings(struct tg3 * tp)8526 static void tg3_free_rings(struct tg3 *tp)
8527 {
8528 int i, j;
8529
8530 for (j = 0; j < tp->irq_cnt; j++) {
8531 struct tg3_napi *tnapi = &tp->napi[j];
8532
8533 tg3_rx_prodring_free(tp, &tnapi->prodring);
8534
8535 if (!tnapi->tx_buffers)
8536 continue;
8537
8538 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8539 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8540
8541 if (!skb)
8542 continue;
8543
8544 tg3_tx_skb_unmap(tnapi, i,
8545 skb_shinfo(skb)->nr_frags - 1);
8546
8547 dev_consume_skb_any(skb);
8548 }
8549 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8550 }
8551 }
8552
8553 /* Initialize tx/rx rings for packet processing.
8554 *
8555 * The chip has been shut down and the driver detached from
8556 * the networking, so no interrupts or new tx packets will
8557 * end up in the driver. tp->{tx,}lock are held and thus
8558 * we may not sleep.
8559 */
tg3_init_rings(struct tg3 * tp)8560 static int tg3_init_rings(struct tg3 *tp)
8561 {
8562 int i;
8563
8564 /* Free up all the SKBs. */
8565 tg3_free_rings(tp);
8566
8567 for (i = 0; i < tp->irq_cnt; i++) {
8568 struct tg3_napi *tnapi = &tp->napi[i];
8569
8570 tnapi->last_tag = 0;
8571 tnapi->last_irq_tag = 0;
8572 tnapi->hw_status->status = 0;
8573 tnapi->hw_status->status_tag = 0;
8574 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8575
8576 tnapi->tx_prod = 0;
8577 tnapi->tx_cons = 0;
8578 if (tnapi->tx_ring)
8579 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8580
8581 tnapi->rx_rcb_ptr = 0;
8582 if (tnapi->rx_rcb)
8583 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8584
8585 if (tnapi->prodring.rx_std &&
8586 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8587 tg3_free_rings(tp);
8588 return -ENOMEM;
8589 }
8590 }
8591
8592 return 0;
8593 }
8594
tg3_mem_tx_release(struct tg3 * tp)8595 static void tg3_mem_tx_release(struct tg3 *tp)
8596 {
8597 int i;
8598
8599 for (i = 0; i < tp->irq_max; i++) {
8600 struct tg3_napi *tnapi = &tp->napi[i];
8601
8602 if (tnapi->tx_ring) {
8603 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8604 tnapi->tx_ring, tnapi->tx_desc_mapping);
8605 tnapi->tx_ring = NULL;
8606 }
8607
8608 kfree(tnapi->tx_buffers);
8609 tnapi->tx_buffers = NULL;
8610 }
8611 }
8612
tg3_mem_tx_acquire(struct tg3 * tp)8613 static int tg3_mem_tx_acquire(struct tg3 *tp)
8614 {
8615 int i;
8616 struct tg3_napi *tnapi = &tp->napi[0];
8617
8618 /* If multivector TSS is enabled, vector 0 does not handle
8619 * tx interrupts. Don't allocate any resources for it.
8620 */
8621 if (tg3_flag(tp, ENABLE_TSS))
8622 tnapi++;
8623
8624 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8625 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8626 sizeof(struct tg3_tx_ring_info),
8627 GFP_KERNEL);
8628 if (!tnapi->tx_buffers)
8629 goto err_out;
8630
8631 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8632 TG3_TX_RING_BYTES,
8633 &tnapi->tx_desc_mapping,
8634 GFP_KERNEL);
8635 if (!tnapi->tx_ring)
8636 goto err_out;
8637 }
8638
8639 return 0;
8640
8641 err_out:
8642 tg3_mem_tx_release(tp);
8643 return -ENOMEM;
8644 }
8645
tg3_mem_rx_release(struct tg3 * tp)8646 static void tg3_mem_rx_release(struct tg3 *tp)
8647 {
8648 int i;
8649
8650 for (i = 0; i < tp->irq_max; i++) {
8651 struct tg3_napi *tnapi = &tp->napi[i];
8652
8653 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8654
8655 if (!tnapi->rx_rcb)
8656 continue;
8657
8658 dma_free_coherent(&tp->pdev->dev,
8659 TG3_RX_RCB_RING_BYTES(tp),
8660 tnapi->rx_rcb,
8661 tnapi->rx_rcb_mapping);
8662 tnapi->rx_rcb = NULL;
8663 }
8664 }
8665
tg3_mem_rx_acquire(struct tg3 * tp)8666 static int tg3_mem_rx_acquire(struct tg3 *tp)
8667 {
8668 unsigned int i, limit;
8669
8670 limit = tp->rxq_cnt;
8671
8672 /* If RSS is enabled, we need a (dummy) producer ring
8673 * set on vector zero. This is the true hw prodring.
8674 */
8675 if (tg3_flag(tp, ENABLE_RSS))
8676 limit++;
8677
8678 for (i = 0; i < limit; i++) {
8679 struct tg3_napi *tnapi = &tp->napi[i];
8680
8681 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8682 goto err_out;
8683
8684 /* If multivector RSS is enabled, vector 0
8685 * does not handle rx or tx interrupts.
8686 * Don't allocate any resources for it.
8687 */
8688 if (!i && tg3_flag(tp, ENABLE_RSS))
8689 continue;
8690
8691 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8692 TG3_RX_RCB_RING_BYTES(tp),
8693 &tnapi->rx_rcb_mapping,
8694 GFP_KERNEL);
8695 if (!tnapi->rx_rcb)
8696 goto err_out;
8697 }
8698
8699 return 0;
8700
8701 err_out:
8702 tg3_mem_rx_release(tp);
8703 return -ENOMEM;
8704 }
8705
8706 /*
8707 * Must not be invoked with interrupt sources disabled and
8708 * the hardware shutdown down.
8709 */
tg3_free_consistent(struct tg3 * tp)8710 static void tg3_free_consistent(struct tg3 *tp)
8711 {
8712 int i;
8713
8714 for (i = 0; i < tp->irq_cnt; i++) {
8715 struct tg3_napi *tnapi = &tp->napi[i];
8716
8717 if (tnapi->hw_status) {
8718 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8719 tnapi->hw_status,
8720 tnapi->status_mapping);
8721 tnapi->hw_status = NULL;
8722 }
8723 }
8724
8725 tg3_mem_rx_release(tp);
8726 tg3_mem_tx_release(tp);
8727
8728 /* tp->hw_stats can be referenced safely:
8729 * 1. under rtnl_lock
8730 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8731 */
8732 if (tp->hw_stats) {
8733 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8734 tp->hw_stats, tp->stats_mapping);
8735 tp->hw_stats = NULL;
8736 }
8737 }
8738
8739 /*
8740 * Must not be invoked with interrupt sources disabled and
8741 * the hardware shutdown down. Can sleep.
8742 */
tg3_alloc_consistent(struct tg3 * tp)8743 static int tg3_alloc_consistent(struct tg3 *tp)
8744 {
8745 int i;
8746
8747 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8748 sizeof(struct tg3_hw_stats),
8749 &tp->stats_mapping, GFP_KERNEL);
8750 if (!tp->hw_stats)
8751 goto err_out;
8752
8753 for (i = 0; i < tp->irq_cnt; i++) {
8754 struct tg3_napi *tnapi = &tp->napi[i];
8755 struct tg3_hw_status *sblk;
8756
8757 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8758 TG3_HW_STATUS_SIZE,
8759 &tnapi->status_mapping,
8760 GFP_KERNEL);
8761 if (!tnapi->hw_status)
8762 goto err_out;
8763
8764 sblk = tnapi->hw_status;
8765
8766 if (tg3_flag(tp, ENABLE_RSS)) {
8767 u16 *prodptr = NULL;
8768
8769 /*
8770 * When RSS is enabled, the status block format changes
8771 * slightly. The "rx_jumbo_consumer", "reserved",
8772 * and "rx_mini_consumer" members get mapped to the
8773 * other three rx return ring producer indexes.
8774 */
8775 switch (i) {
8776 case 1:
8777 prodptr = &sblk->idx[0].rx_producer;
8778 break;
8779 case 2:
8780 prodptr = &sblk->rx_jumbo_consumer;
8781 break;
8782 case 3:
8783 prodptr = &sblk->reserved;
8784 break;
8785 case 4:
8786 prodptr = &sblk->rx_mini_consumer;
8787 break;
8788 }
8789 tnapi->rx_rcb_prod_idx = prodptr;
8790 } else {
8791 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8792 }
8793 }
8794
8795 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8796 goto err_out;
8797
8798 return 0;
8799
8800 err_out:
8801 tg3_free_consistent(tp);
8802 return -ENOMEM;
8803 }
8804
8805 #define MAX_WAIT_CNT 1000
8806
8807 /* To stop a block, clear the enable bit and poll till it
8808 * clears. tp->lock is held.
8809 */
tg3_stop_block(struct tg3 * tp,unsigned long ofs,u32 enable_bit,bool silent)8810 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8811 {
8812 unsigned int i;
8813 u32 val;
8814
8815 if (tg3_flag(tp, 5705_PLUS)) {
8816 switch (ofs) {
8817 case RCVLSC_MODE:
8818 case DMAC_MODE:
8819 case MBFREE_MODE:
8820 case BUFMGR_MODE:
8821 case MEMARB_MODE:
8822 /* We can't enable/disable these bits of the
8823 * 5705/5750, just say success.
8824 */
8825 return 0;
8826
8827 default:
8828 break;
8829 }
8830 }
8831
8832 val = tr32(ofs);
8833 val &= ~enable_bit;
8834 tw32_f(ofs, val);
8835
8836 for (i = 0; i < MAX_WAIT_CNT; i++) {
8837 if (pci_channel_offline(tp->pdev)) {
8838 dev_err(&tp->pdev->dev,
8839 "tg3_stop_block device offline, "
8840 "ofs=%lx enable_bit=%x\n",
8841 ofs, enable_bit);
8842 return -ENODEV;
8843 }
8844
8845 udelay(100);
8846 val = tr32(ofs);
8847 if ((val & enable_bit) == 0)
8848 break;
8849 }
8850
8851 if (i == MAX_WAIT_CNT && !silent) {
8852 dev_err(&tp->pdev->dev,
8853 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8854 ofs, enable_bit);
8855 return -ENODEV;
8856 }
8857
8858 return 0;
8859 }
8860
8861 /* tp->lock is held. */
tg3_abort_hw(struct tg3 * tp,bool silent)8862 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8863 {
8864 int i, err;
8865
8866 tg3_disable_ints(tp);
8867
8868 if (pci_channel_offline(tp->pdev)) {
8869 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8870 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8871 err = -ENODEV;
8872 goto err_no_dev;
8873 }
8874
8875 tp->rx_mode &= ~RX_MODE_ENABLE;
8876 tw32_f(MAC_RX_MODE, tp->rx_mode);
8877 udelay(10);
8878
8879 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8880 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8881 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8882 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8883 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8884 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8885
8886 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8887 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8888 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8889 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8890 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8891 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8892 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8893
8894 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8895 tw32_f(MAC_MODE, tp->mac_mode);
8896 udelay(40);
8897
8898 tp->tx_mode &= ~TX_MODE_ENABLE;
8899 tw32_f(MAC_TX_MODE, tp->tx_mode);
8900
8901 for (i = 0; i < MAX_WAIT_CNT; i++) {
8902 udelay(100);
8903 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8904 break;
8905 }
8906 if (i >= MAX_WAIT_CNT) {
8907 dev_err(&tp->pdev->dev,
8908 "%s timed out, TX_MODE_ENABLE will not clear "
8909 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8910 err |= -ENODEV;
8911 }
8912
8913 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8914 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8915 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8916
8917 tw32(FTQ_RESET, 0xffffffff);
8918 tw32(FTQ_RESET, 0x00000000);
8919
8920 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8921 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8922
8923 err_no_dev:
8924 for (i = 0; i < tp->irq_cnt; i++) {
8925 struct tg3_napi *tnapi = &tp->napi[i];
8926 if (tnapi->hw_status)
8927 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8928 }
8929
8930 return err;
8931 }
8932
8933 /* Save PCI command register before chip reset */
tg3_save_pci_state(struct tg3 * tp)8934 static void tg3_save_pci_state(struct tg3 *tp)
8935 {
8936 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8937 }
8938
8939 /* Restore PCI state after chip reset */
tg3_restore_pci_state(struct tg3 * tp)8940 static void tg3_restore_pci_state(struct tg3 *tp)
8941 {
8942 u32 val;
8943
8944 /* Re-enable indirect register accesses. */
8945 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8946 tp->misc_host_ctrl);
8947
8948 /* Set MAX PCI retry to zero. */
8949 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8950 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8951 tg3_flag(tp, PCIX_MODE))
8952 val |= PCISTATE_RETRY_SAME_DMA;
8953 /* Allow reads and writes to the APE register and memory space. */
8954 if (tg3_flag(tp, ENABLE_APE))
8955 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8956 PCISTATE_ALLOW_APE_SHMEM_WR |
8957 PCISTATE_ALLOW_APE_PSPACE_WR;
8958 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8959
8960 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8961
8962 if (!tg3_flag(tp, PCI_EXPRESS)) {
8963 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8964 tp->pci_cacheline_sz);
8965 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8966 tp->pci_lat_timer);
8967 }
8968
8969 /* Make sure PCI-X relaxed ordering bit is clear. */
8970 if (tg3_flag(tp, PCIX_MODE)) {
8971 u16 pcix_cmd;
8972
8973 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8974 &pcix_cmd);
8975 pcix_cmd &= ~PCI_X_CMD_ERO;
8976 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8977 pcix_cmd);
8978 }
8979
8980 if (tg3_flag(tp, 5780_CLASS)) {
8981
8982 /* Chip reset on 5780 will reset MSI enable bit,
8983 * so need to restore it.
8984 */
8985 if (tg3_flag(tp, USING_MSI)) {
8986 u16 ctrl;
8987
8988 pci_read_config_word(tp->pdev,
8989 tp->msi_cap + PCI_MSI_FLAGS,
8990 &ctrl);
8991 pci_write_config_word(tp->pdev,
8992 tp->msi_cap + PCI_MSI_FLAGS,
8993 ctrl | PCI_MSI_FLAGS_ENABLE);
8994 val = tr32(MSGINT_MODE);
8995 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8996 }
8997 }
8998 }
8999
tg3_override_clk(struct tg3 * tp)9000 static void tg3_override_clk(struct tg3 *tp)
9001 {
9002 u32 val;
9003
9004 switch (tg3_asic_rev(tp)) {
9005 case ASIC_REV_5717:
9006 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9007 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9008 TG3_CPMU_MAC_ORIDE_ENABLE);
9009 break;
9010
9011 case ASIC_REV_5719:
9012 case ASIC_REV_5720:
9013 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9014 break;
9015
9016 default:
9017 return;
9018 }
9019 }
9020
tg3_restore_clk(struct tg3 * tp)9021 static void tg3_restore_clk(struct tg3 *tp)
9022 {
9023 u32 val;
9024
9025 switch (tg3_asic_rev(tp)) {
9026 case ASIC_REV_5717:
9027 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9028 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9029 val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9030 break;
9031
9032 case ASIC_REV_5719:
9033 case ASIC_REV_5720:
9034 val = tr32(TG3_CPMU_CLCK_ORIDE);
9035 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9036 break;
9037
9038 default:
9039 return;
9040 }
9041 }
9042
9043 /* tp->lock is held. */
tg3_chip_reset(struct tg3 * tp)9044 static int tg3_chip_reset(struct tg3 *tp)
9045 __releases(tp->lock)
9046 __acquires(tp->lock)
9047 {
9048 u32 val;
9049 void (*write_op)(struct tg3 *, u32, u32);
9050 int i, err;
9051
9052 if (!pci_device_is_present(tp->pdev))
9053 return -ENODEV;
9054
9055 tg3_nvram_lock(tp);
9056
9057 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9058
9059 /* No matching tg3_nvram_unlock() after this because
9060 * chip reset below will undo the nvram lock.
9061 */
9062 tp->nvram_lock_cnt = 0;
9063
9064 /* GRC_MISC_CFG core clock reset will clear the memory
9065 * enable bit in PCI register 4 and the MSI enable bit
9066 * on some chips, so we save relevant registers here.
9067 */
9068 tg3_save_pci_state(tp);
9069
9070 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9071 tg3_flag(tp, 5755_PLUS))
9072 tw32(GRC_FASTBOOT_PC, 0);
9073
9074 /*
9075 * We must avoid the readl() that normally takes place.
9076 * It locks machines, causes machine checks, and other
9077 * fun things. So, temporarily disable the 5701
9078 * hardware workaround, while we do the reset.
9079 */
9080 write_op = tp->write32;
9081 if (write_op == tg3_write_flush_reg32)
9082 tp->write32 = tg3_write32;
9083
9084 /* Prevent the irq handler from reading or writing PCI registers
9085 * during chip reset when the memory enable bit in the PCI command
9086 * register may be cleared. The chip does not generate interrupt
9087 * at this time, but the irq handler may still be called due to irq
9088 * sharing or irqpoll.
9089 */
9090 tg3_flag_set(tp, CHIP_RESETTING);
9091 for (i = 0; i < tp->irq_cnt; i++) {
9092 struct tg3_napi *tnapi = &tp->napi[i];
9093 if (tnapi->hw_status) {
9094 tnapi->hw_status->status = 0;
9095 tnapi->hw_status->status_tag = 0;
9096 }
9097 tnapi->last_tag = 0;
9098 tnapi->last_irq_tag = 0;
9099 }
9100 smp_mb();
9101
9102 tg3_full_unlock(tp);
9103
9104 for (i = 0; i < tp->irq_cnt; i++)
9105 synchronize_irq(tp->napi[i].irq_vec);
9106
9107 tg3_full_lock(tp, 0);
9108
9109 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9110 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9111 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9112 }
9113
9114 /* do the reset */
9115 val = GRC_MISC_CFG_CORECLK_RESET;
9116
9117 if (tg3_flag(tp, PCI_EXPRESS)) {
9118 /* Force PCIe 1.0a mode */
9119 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9120 !tg3_flag(tp, 57765_PLUS) &&
9121 tr32(TG3_PCIE_PHY_TSTCTL) ==
9122 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9123 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9124
9125 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9126 tw32(GRC_MISC_CFG, (1 << 29));
9127 val |= (1 << 29);
9128 }
9129 }
9130
9131 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9132 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9133 tw32(GRC_VCPU_EXT_CTRL,
9134 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9135 }
9136
9137 /* Set the clock to the highest frequency to avoid timeouts. With link
9138 * aware mode, the clock speed could be slow and bootcode does not
9139 * complete within the expected time. Override the clock to allow the
9140 * bootcode to finish sooner and then restore it.
9141 */
9142 tg3_override_clk(tp);
9143
9144 /* Manage gphy power for all CPMU absent PCIe devices. */
9145 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9146 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9147
9148 tw32(GRC_MISC_CFG, val);
9149
9150 /* restore 5701 hardware bug workaround write method */
9151 tp->write32 = write_op;
9152
9153 /* Unfortunately, we have to delay before the PCI read back.
9154 * Some 575X chips even will not respond to a PCI cfg access
9155 * when the reset command is given to the chip.
9156 *
9157 * How do these hardware designers expect things to work
9158 * properly if the PCI write is posted for a long period
9159 * of time? It is always necessary to have some method by
9160 * which a register read back can occur to push the write
9161 * out which does the reset.
9162 *
9163 * For most tg3 variants the trick below was working.
9164 * Ho hum...
9165 */
9166 udelay(120);
9167
9168 /* Flush PCI posted writes. The normal MMIO registers
9169 * are inaccessible at this time so this is the only
9170 * way to make this reliably (actually, this is no longer
9171 * the case, see above). I tried to use indirect
9172 * register read/write but this upset some 5701 variants.
9173 */
9174 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9175
9176 udelay(120);
9177
9178 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9179 u16 val16;
9180
9181 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9182 int j;
9183 u32 cfg_val;
9184
9185 /* Wait for link training to complete. */
9186 for (j = 0; j < 5000; j++)
9187 udelay(100);
9188
9189 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9190 pci_write_config_dword(tp->pdev, 0xc4,
9191 cfg_val | (1 << 15));
9192 }
9193
9194 /* Clear the "no snoop" and "relaxed ordering" bits. */
9195 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9196 /*
9197 * Older PCIe devices only support the 128 byte
9198 * MPS setting. Enforce the restriction.
9199 */
9200 if (!tg3_flag(tp, CPMU_PRESENT))
9201 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9202 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9203
9204 /* Clear error status */
9205 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9206 PCI_EXP_DEVSTA_CED |
9207 PCI_EXP_DEVSTA_NFED |
9208 PCI_EXP_DEVSTA_FED |
9209 PCI_EXP_DEVSTA_URD);
9210 }
9211
9212 tg3_restore_pci_state(tp);
9213
9214 tg3_flag_clear(tp, CHIP_RESETTING);
9215 tg3_flag_clear(tp, ERROR_PROCESSED);
9216
9217 val = 0;
9218 if (tg3_flag(tp, 5780_CLASS))
9219 val = tr32(MEMARB_MODE);
9220 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9221
9222 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9223 tg3_stop_fw(tp);
9224 tw32(0x5000, 0x400);
9225 }
9226
9227 if (tg3_flag(tp, IS_SSB_CORE)) {
9228 /*
9229 * BCM4785: In order to avoid repercussions from using
9230 * potentially defective internal ROM, stop the Rx RISC CPU,
9231 * which is not required.
9232 */
9233 tg3_stop_fw(tp);
9234 tg3_halt_cpu(tp, RX_CPU_BASE);
9235 }
9236
9237 err = tg3_poll_fw(tp);
9238 if (err)
9239 return err;
9240
9241 tw32(GRC_MODE, tp->grc_mode);
9242
9243 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9244 val = tr32(0xc4);
9245
9246 tw32(0xc4, val | (1 << 15));
9247 }
9248
9249 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9250 tg3_asic_rev(tp) == ASIC_REV_5705) {
9251 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9252 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9253 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9254 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9255 }
9256
9257 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9258 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9259 val = tp->mac_mode;
9260 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9261 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9262 val = tp->mac_mode;
9263 } else
9264 val = 0;
9265
9266 tw32_f(MAC_MODE, val);
9267 udelay(40);
9268
9269 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9270
9271 tg3_mdio_start(tp);
9272
9273 if (tg3_flag(tp, PCI_EXPRESS) &&
9274 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9275 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9276 !tg3_flag(tp, 57765_PLUS)) {
9277 val = tr32(0x7c00);
9278
9279 tw32(0x7c00, val | (1 << 25));
9280 }
9281
9282 tg3_restore_clk(tp);
9283
9284 /* Increase the core clock speed to fix tx timeout issue for 5762
9285 * with 100Mbps link speed.
9286 */
9287 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9288 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9289 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9290 TG3_CPMU_MAC_ORIDE_ENABLE);
9291 }
9292
9293 /* Reprobe ASF enable state. */
9294 tg3_flag_clear(tp, ENABLE_ASF);
9295 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9296 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9297
9298 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9299 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9300 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9301 u32 nic_cfg;
9302
9303 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9304 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9305 tg3_flag_set(tp, ENABLE_ASF);
9306 tp->last_event_jiffies = jiffies;
9307 if (tg3_flag(tp, 5750_PLUS))
9308 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9309
9310 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9311 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9312 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9313 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9314 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9315 }
9316 }
9317
9318 return 0;
9319 }
9320
9321 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9322 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9323 static void __tg3_set_rx_mode(struct net_device *);
9324
9325 /* tp->lock is held. */
tg3_halt(struct tg3 * tp,int kind,bool silent)9326 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9327 {
9328 int err;
9329
9330 tg3_stop_fw(tp);
9331
9332 tg3_write_sig_pre_reset(tp, kind);
9333
9334 tg3_abort_hw(tp, silent);
9335 err = tg3_chip_reset(tp);
9336
9337 __tg3_set_mac_addr(tp, false);
9338
9339 tg3_write_sig_legacy(tp, kind);
9340 tg3_write_sig_post_reset(tp, kind);
9341
9342 if (tp->hw_stats) {
9343 /* Save the stats across chip resets... */
9344 tg3_get_nstats(tp, &tp->net_stats_prev);
9345 tg3_get_estats(tp, &tp->estats_prev);
9346
9347 /* And make sure the next sample is new data */
9348 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9349 }
9350
9351 return err;
9352 }
9353
tg3_set_mac_addr(struct net_device * dev,void * p)9354 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9355 {
9356 struct tg3 *tp = netdev_priv(dev);
9357 struct sockaddr *addr = p;
9358 int err = 0;
9359 bool skip_mac_1 = false;
9360
9361 if (!is_valid_ether_addr(addr->sa_data))
9362 return -EADDRNOTAVAIL;
9363
9364 eth_hw_addr_set(dev, addr->sa_data);
9365
9366 if (!netif_running(dev))
9367 return 0;
9368
9369 if (tg3_flag(tp, ENABLE_ASF)) {
9370 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9371
9372 addr0_high = tr32(MAC_ADDR_0_HIGH);
9373 addr0_low = tr32(MAC_ADDR_0_LOW);
9374 addr1_high = tr32(MAC_ADDR_1_HIGH);
9375 addr1_low = tr32(MAC_ADDR_1_LOW);
9376
9377 /* Skip MAC addr 1 if ASF is using it. */
9378 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9379 !(addr1_high == 0 && addr1_low == 0))
9380 skip_mac_1 = true;
9381 }
9382 spin_lock_bh(&tp->lock);
9383 __tg3_set_mac_addr(tp, skip_mac_1);
9384 __tg3_set_rx_mode(dev);
9385 spin_unlock_bh(&tp->lock);
9386
9387 return err;
9388 }
9389
9390 /* tp->lock is held. */
tg3_set_bdinfo(struct tg3 * tp,u32 bdinfo_addr,dma_addr_t mapping,u32 maxlen_flags,u32 nic_addr)9391 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9392 dma_addr_t mapping, u32 maxlen_flags,
9393 u32 nic_addr)
9394 {
9395 tg3_write_mem(tp,
9396 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9397 ((u64) mapping >> 32));
9398 tg3_write_mem(tp,
9399 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9400 ((u64) mapping & 0xffffffff));
9401 tg3_write_mem(tp,
9402 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9403 maxlen_flags);
9404
9405 if (!tg3_flag(tp, 5705_PLUS))
9406 tg3_write_mem(tp,
9407 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9408 nic_addr);
9409 }
9410
9411
tg3_coal_tx_init(struct tg3 * tp,struct ethtool_coalesce * ec)9412 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9413 {
9414 int i = 0;
9415
9416 if (!tg3_flag(tp, ENABLE_TSS)) {
9417 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9418 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9419 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9420 } else {
9421 tw32(HOSTCC_TXCOL_TICKS, 0);
9422 tw32(HOSTCC_TXMAX_FRAMES, 0);
9423 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9424
9425 for (; i < tp->txq_cnt; i++) {
9426 u32 reg;
9427
9428 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9429 tw32(reg, ec->tx_coalesce_usecs);
9430 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9431 tw32(reg, ec->tx_max_coalesced_frames);
9432 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9433 tw32(reg, ec->tx_max_coalesced_frames_irq);
9434 }
9435 }
9436
9437 for (; i < tp->irq_max - 1; i++) {
9438 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9439 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9440 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9441 }
9442 }
9443
tg3_coal_rx_init(struct tg3 * tp,struct ethtool_coalesce * ec)9444 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9445 {
9446 int i = 0;
9447 u32 limit = tp->rxq_cnt;
9448
9449 if (!tg3_flag(tp, ENABLE_RSS)) {
9450 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9451 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9452 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9453 limit--;
9454 } else {
9455 tw32(HOSTCC_RXCOL_TICKS, 0);
9456 tw32(HOSTCC_RXMAX_FRAMES, 0);
9457 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9458 }
9459
9460 for (; i < limit; i++) {
9461 u32 reg;
9462
9463 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9464 tw32(reg, ec->rx_coalesce_usecs);
9465 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9466 tw32(reg, ec->rx_max_coalesced_frames);
9467 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9468 tw32(reg, ec->rx_max_coalesced_frames_irq);
9469 }
9470
9471 for (; i < tp->irq_max - 1; i++) {
9472 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9473 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9474 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9475 }
9476 }
9477
__tg3_set_coalesce(struct tg3 * tp,struct ethtool_coalesce * ec)9478 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9479 {
9480 tg3_coal_tx_init(tp, ec);
9481 tg3_coal_rx_init(tp, ec);
9482
9483 if (!tg3_flag(tp, 5705_PLUS)) {
9484 u32 val = ec->stats_block_coalesce_usecs;
9485
9486 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9487 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9488
9489 if (!tp->link_up)
9490 val = 0;
9491
9492 tw32(HOSTCC_STAT_COAL_TICKS, val);
9493 }
9494 }
9495
9496 /* tp->lock is held. */
tg3_tx_rcbs_disable(struct tg3 * tp)9497 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9498 {
9499 u32 txrcb, limit;
9500
9501 /* Disable all transmit rings but the first. */
9502 if (!tg3_flag(tp, 5705_PLUS))
9503 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9504 else if (tg3_flag(tp, 5717_PLUS))
9505 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9506 else if (tg3_flag(tp, 57765_CLASS) ||
9507 tg3_asic_rev(tp) == ASIC_REV_5762)
9508 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9509 else
9510 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9511
9512 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9513 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9514 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9515 BDINFO_FLAGS_DISABLED);
9516 }
9517
9518 /* tp->lock is held. */
tg3_tx_rcbs_init(struct tg3 * tp)9519 static void tg3_tx_rcbs_init(struct tg3 *tp)
9520 {
9521 int i = 0;
9522 u32 txrcb = NIC_SRAM_SEND_RCB;
9523
9524 if (tg3_flag(tp, ENABLE_TSS))
9525 i++;
9526
9527 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9528 struct tg3_napi *tnapi = &tp->napi[i];
9529
9530 if (!tnapi->tx_ring)
9531 continue;
9532
9533 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9534 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9535 NIC_SRAM_TX_BUFFER_DESC);
9536 }
9537 }
9538
9539 /* tp->lock is held. */
tg3_rx_ret_rcbs_disable(struct tg3 * tp)9540 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9541 {
9542 u32 rxrcb, limit;
9543
9544 /* Disable all receive return rings but the first. */
9545 if (tg3_flag(tp, 5717_PLUS))
9546 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9547 else if (!tg3_flag(tp, 5705_PLUS))
9548 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9549 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9550 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9551 tg3_flag(tp, 57765_CLASS))
9552 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9553 else
9554 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9555
9556 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9557 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9558 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9559 BDINFO_FLAGS_DISABLED);
9560 }
9561
9562 /* tp->lock is held. */
tg3_rx_ret_rcbs_init(struct tg3 * tp)9563 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9564 {
9565 int i = 0;
9566 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9567
9568 if (tg3_flag(tp, ENABLE_RSS))
9569 i++;
9570
9571 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9572 struct tg3_napi *tnapi = &tp->napi[i];
9573
9574 if (!tnapi->rx_rcb)
9575 continue;
9576
9577 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9578 (tp->rx_ret_ring_mask + 1) <<
9579 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9580 }
9581 }
9582
9583 /* tp->lock is held. */
tg3_rings_reset(struct tg3 * tp)9584 static void tg3_rings_reset(struct tg3 *tp)
9585 {
9586 int i;
9587 u32 stblk;
9588 struct tg3_napi *tnapi = &tp->napi[0];
9589
9590 tg3_tx_rcbs_disable(tp);
9591
9592 tg3_rx_ret_rcbs_disable(tp);
9593
9594 /* Disable interrupts */
9595 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9596 tp->napi[0].chk_msi_cnt = 0;
9597 tp->napi[0].last_rx_cons = 0;
9598 tp->napi[0].last_tx_cons = 0;
9599
9600 /* Zero mailbox registers. */
9601 if (tg3_flag(tp, SUPPORT_MSIX)) {
9602 for (i = 1; i < tp->irq_max; i++) {
9603 tp->napi[i].tx_prod = 0;
9604 tp->napi[i].tx_cons = 0;
9605 if (tg3_flag(tp, ENABLE_TSS))
9606 tw32_mailbox(tp->napi[i].prodmbox, 0);
9607 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9608 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9609 tp->napi[i].chk_msi_cnt = 0;
9610 tp->napi[i].last_rx_cons = 0;
9611 tp->napi[i].last_tx_cons = 0;
9612 }
9613 if (!tg3_flag(tp, ENABLE_TSS))
9614 tw32_mailbox(tp->napi[0].prodmbox, 0);
9615 } else {
9616 tp->napi[0].tx_prod = 0;
9617 tp->napi[0].tx_cons = 0;
9618 tw32_mailbox(tp->napi[0].prodmbox, 0);
9619 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9620 }
9621
9622 /* Make sure the NIC-based send BD rings are disabled. */
9623 if (!tg3_flag(tp, 5705_PLUS)) {
9624 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9625 for (i = 0; i < 16; i++)
9626 tw32_tx_mbox(mbox + i * 8, 0);
9627 }
9628
9629 /* Clear status block in ram. */
9630 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9631
9632 /* Set status block DMA address */
9633 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9634 ((u64) tnapi->status_mapping >> 32));
9635 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9636 ((u64) tnapi->status_mapping & 0xffffffff));
9637
9638 stblk = HOSTCC_STATBLCK_RING1;
9639
9640 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9641 u64 mapping = (u64)tnapi->status_mapping;
9642 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9643 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9644 stblk += 8;
9645
9646 /* Clear status block in ram. */
9647 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9648 }
9649
9650 tg3_tx_rcbs_init(tp);
9651 tg3_rx_ret_rcbs_init(tp);
9652 }
9653
tg3_setup_rxbd_thresholds(struct tg3 * tp)9654 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9655 {
9656 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9657
9658 if (!tg3_flag(tp, 5750_PLUS) ||
9659 tg3_flag(tp, 5780_CLASS) ||
9660 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9661 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9662 tg3_flag(tp, 57765_PLUS))
9663 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9664 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9665 tg3_asic_rev(tp) == ASIC_REV_5787)
9666 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9667 else
9668 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9669
9670 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9671 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9672
9673 val = min(nic_rep_thresh, host_rep_thresh);
9674 tw32(RCVBDI_STD_THRESH, val);
9675
9676 if (tg3_flag(tp, 57765_PLUS))
9677 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9678
9679 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9680 return;
9681
9682 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9683
9684 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9685
9686 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9687 tw32(RCVBDI_JUMBO_THRESH, val);
9688
9689 if (tg3_flag(tp, 57765_PLUS))
9690 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9691 }
9692
calc_crc(unsigned char * buf,int len)9693 static inline u32 calc_crc(unsigned char *buf, int len)
9694 {
9695 u32 reg;
9696 u32 tmp;
9697 int j, k;
9698
9699 reg = 0xffffffff;
9700
9701 for (j = 0; j < len; j++) {
9702 reg ^= buf[j];
9703
9704 for (k = 0; k < 8; k++) {
9705 tmp = reg & 0x01;
9706
9707 reg >>= 1;
9708
9709 if (tmp)
9710 reg ^= CRC32_POLY_LE;
9711 }
9712 }
9713
9714 return ~reg;
9715 }
9716
tg3_set_multi(struct tg3 * tp,unsigned int accept_all)9717 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9718 {
9719 /* accept or reject all multicast frames */
9720 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9721 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9722 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9723 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9724 }
9725
__tg3_set_rx_mode(struct net_device * dev)9726 static void __tg3_set_rx_mode(struct net_device *dev)
9727 {
9728 struct tg3 *tp = netdev_priv(dev);
9729 u32 rx_mode;
9730
9731 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9732 RX_MODE_KEEP_VLAN_TAG);
9733
9734 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9735 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9736 * flag clear.
9737 */
9738 if (!tg3_flag(tp, ENABLE_ASF))
9739 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9740 #endif
9741
9742 if (dev->flags & IFF_PROMISC) {
9743 /* Promiscuous mode. */
9744 rx_mode |= RX_MODE_PROMISC;
9745 } else if (dev->flags & IFF_ALLMULTI) {
9746 /* Accept all multicast. */
9747 tg3_set_multi(tp, 1);
9748 } else if (netdev_mc_empty(dev)) {
9749 /* Reject all multicast. */
9750 tg3_set_multi(tp, 0);
9751 } else {
9752 /* Accept one or more multicast(s). */
9753 struct netdev_hw_addr *ha;
9754 u32 mc_filter[4] = { 0, };
9755 u32 regidx;
9756 u32 bit;
9757 u32 crc;
9758
9759 netdev_for_each_mc_addr(ha, dev) {
9760 crc = calc_crc(ha->addr, ETH_ALEN);
9761 bit = ~crc & 0x7f;
9762 regidx = (bit & 0x60) >> 5;
9763 bit &= 0x1f;
9764 mc_filter[regidx] |= (1 << bit);
9765 }
9766
9767 tw32(MAC_HASH_REG_0, mc_filter[0]);
9768 tw32(MAC_HASH_REG_1, mc_filter[1]);
9769 tw32(MAC_HASH_REG_2, mc_filter[2]);
9770 tw32(MAC_HASH_REG_3, mc_filter[3]);
9771 }
9772
9773 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9774 rx_mode |= RX_MODE_PROMISC;
9775 } else if (!(dev->flags & IFF_PROMISC)) {
9776 /* Add all entries into to the mac addr filter list */
9777 int i = 0;
9778 struct netdev_hw_addr *ha;
9779
9780 netdev_for_each_uc_addr(ha, dev) {
9781 __tg3_set_one_mac_addr(tp, ha->addr,
9782 i + TG3_UCAST_ADDR_IDX(tp));
9783 i++;
9784 }
9785 }
9786
9787 if (rx_mode != tp->rx_mode) {
9788 tp->rx_mode = rx_mode;
9789 tw32_f(MAC_RX_MODE, rx_mode);
9790 udelay(10);
9791 }
9792 }
9793
tg3_rss_init_dflt_indir_tbl(struct tg3 * tp,u32 qcnt)9794 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9795 {
9796 int i;
9797
9798 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9799 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9800 }
9801
tg3_rss_check_indir_tbl(struct tg3 * tp)9802 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9803 {
9804 int i;
9805
9806 if (!tg3_flag(tp, SUPPORT_MSIX))
9807 return;
9808
9809 if (tp->rxq_cnt == 1) {
9810 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9811 return;
9812 }
9813
9814 /* Validate table against current IRQ count */
9815 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9816 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9817 break;
9818 }
9819
9820 if (i != TG3_RSS_INDIR_TBL_SIZE)
9821 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9822 }
9823
tg3_rss_write_indir_tbl(struct tg3 * tp)9824 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9825 {
9826 int i = 0;
9827 u32 reg = MAC_RSS_INDIR_TBL_0;
9828
9829 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9830 u32 val = tp->rss_ind_tbl[i];
9831 i++;
9832 for (; i % 8; i++) {
9833 val <<= 4;
9834 val |= tp->rss_ind_tbl[i];
9835 }
9836 tw32(reg, val);
9837 reg += 4;
9838 }
9839 }
9840
tg3_lso_rd_dma_workaround_bit(struct tg3 * tp)9841 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9842 {
9843 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9844 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9845 else
9846 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9847 }
9848
9849 /* tp->lock is held. */
tg3_reset_hw(struct tg3 * tp,bool reset_phy)9850 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9851 {
9852 u32 val, rdmac_mode;
9853 int i, err, limit;
9854 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9855
9856 tg3_disable_ints(tp);
9857
9858 tg3_stop_fw(tp);
9859
9860 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9861
9862 if (tg3_flag(tp, INIT_COMPLETE))
9863 tg3_abort_hw(tp, 1);
9864
9865 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9866 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9867 tg3_phy_pull_config(tp);
9868 tg3_eee_pull_config(tp, NULL);
9869 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9870 }
9871
9872 /* Enable MAC control of LPI */
9873 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9874 tg3_setup_eee(tp);
9875
9876 if (reset_phy)
9877 tg3_phy_reset(tp);
9878
9879 err = tg3_chip_reset(tp);
9880 if (err)
9881 return err;
9882
9883 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9884
9885 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9886 val = tr32(TG3_CPMU_CTRL);
9887 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9888 tw32(TG3_CPMU_CTRL, val);
9889
9890 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9891 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9892 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9893 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9894
9895 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9896 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9897 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9898 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9899
9900 val = tr32(TG3_CPMU_HST_ACC);
9901 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9902 val |= CPMU_HST_ACC_MACCLK_6_25;
9903 tw32(TG3_CPMU_HST_ACC, val);
9904 }
9905
9906 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9907 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9908 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9909 PCIE_PWR_MGMT_L1_THRESH_4MS;
9910 tw32(PCIE_PWR_MGMT_THRESH, val);
9911
9912 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9913 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9914
9915 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9916
9917 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9918 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9919 }
9920
9921 if (tg3_flag(tp, L1PLLPD_EN)) {
9922 u32 grc_mode = tr32(GRC_MODE);
9923
9924 /* Access the lower 1K of PL PCIE block registers. */
9925 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9926 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9927
9928 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9929 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9930 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9931
9932 tw32(GRC_MODE, grc_mode);
9933 }
9934
9935 if (tg3_flag(tp, 57765_CLASS)) {
9936 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9937 u32 grc_mode = tr32(GRC_MODE);
9938
9939 /* Access the lower 1K of PL PCIE block registers. */
9940 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9941 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9942
9943 val = tr32(TG3_PCIE_TLDLPL_PORT +
9944 TG3_PCIE_PL_LO_PHYCTL5);
9945 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9946 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9947
9948 tw32(GRC_MODE, grc_mode);
9949 }
9950
9951 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9952 u32 grc_mode;
9953
9954 /* Fix transmit hangs */
9955 val = tr32(TG3_CPMU_PADRNG_CTL);
9956 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9957 tw32(TG3_CPMU_PADRNG_CTL, val);
9958
9959 grc_mode = tr32(GRC_MODE);
9960
9961 /* Access the lower 1K of DL PCIE block registers. */
9962 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9963 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9964
9965 val = tr32(TG3_PCIE_TLDLPL_PORT +
9966 TG3_PCIE_DL_LO_FTSMAX);
9967 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9968 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9969 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9970
9971 tw32(GRC_MODE, grc_mode);
9972 }
9973
9974 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9975 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9976 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9977 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9978 }
9979
9980 /* This works around an issue with Athlon chipsets on
9981 * B3 tigon3 silicon. This bit has no effect on any
9982 * other revision. But do not set this on PCI Express
9983 * chips and don't even touch the clocks if the CPMU is present.
9984 */
9985 if (!tg3_flag(tp, CPMU_PRESENT)) {
9986 if (!tg3_flag(tp, PCI_EXPRESS))
9987 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9988 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9989 }
9990
9991 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9992 tg3_flag(tp, PCIX_MODE)) {
9993 val = tr32(TG3PCI_PCISTATE);
9994 val |= PCISTATE_RETRY_SAME_DMA;
9995 tw32(TG3PCI_PCISTATE, val);
9996 }
9997
9998 if (tg3_flag(tp, ENABLE_APE)) {
9999 /* Allow reads and writes to the
10000 * APE register and memory space.
10001 */
10002 val = tr32(TG3PCI_PCISTATE);
10003 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10004 PCISTATE_ALLOW_APE_SHMEM_WR |
10005 PCISTATE_ALLOW_APE_PSPACE_WR;
10006 tw32(TG3PCI_PCISTATE, val);
10007 }
10008
10009 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10010 /* Enable some hw fixes. */
10011 val = tr32(TG3PCI_MSI_DATA);
10012 val |= (1 << 26) | (1 << 28) | (1 << 29);
10013 tw32(TG3PCI_MSI_DATA, val);
10014 }
10015
10016 /* Descriptor ring init may make accesses to the
10017 * NIC SRAM area to setup the TX descriptors, so we
10018 * can only do this after the hardware has been
10019 * successfully reset.
10020 */
10021 err = tg3_init_rings(tp);
10022 if (err)
10023 return err;
10024
10025 if (tg3_flag(tp, 57765_PLUS)) {
10026 val = tr32(TG3PCI_DMA_RW_CTRL) &
10027 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10028 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10029 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10030 if (!tg3_flag(tp, 57765_CLASS) &&
10031 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10032 tg3_asic_rev(tp) != ASIC_REV_5762)
10033 val |= DMA_RWCTRL_TAGGED_STAT_WA;
10034 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10035 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10036 tg3_asic_rev(tp) != ASIC_REV_5761) {
10037 /* This value is determined during the probe time DMA
10038 * engine test, tg3_test_dma.
10039 */
10040 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10041 }
10042
10043 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10044 GRC_MODE_4X_NIC_SEND_RINGS |
10045 GRC_MODE_NO_TX_PHDR_CSUM |
10046 GRC_MODE_NO_RX_PHDR_CSUM);
10047 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10048
10049 /* Pseudo-header checksum is done by hardware logic and not
10050 * the offload processers, so make the chip do the pseudo-
10051 * header checksums on receive. For transmit it is more
10052 * convenient to do the pseudo-header checksum in software
10053 * as Linux does that on transmit for us in all cases.
10054 */
10055 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10056
10057 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10058 if (tp->rxptpctl)
10059 tw32(TG3_RX_PTP_CTL,
10060 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10061
10062 if (tg3_flag(tp, PTP_CAPABLE))
10063 val |= GRC_MODE_TIME_SYNC_ENABLE;
10064
10065 tw32(GRC_MODE, tp->grc_mode | val);
10066
10067 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10068 * south bridge limitation. As a workaround, Driver is setting MRRS
10069 * to 2048 instead of default 4096.
10070 */
10071 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10072 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10073 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10074 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10075 }
10076
10077 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10078 val = tr32(GRC_MISC_CFG);
10079 val &= ~0xff;
10080 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10081 tw32(GRC_MISC_CFG, val);
10082
10083 /* Initialize MBUF/DESC pool. */
10084 if (tg3_flag(tp, 5750_PLUS)) {
10085 /* Do nothing. */
10086 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10087 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10088 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10089 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10090 else
10091 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10092 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10093 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10094 } else if (tg3_flag(tp, TSO_CAPABLE)) {
10095 int fw_len;
10096
10097 fw_len = tp->fw_len;
10098 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10099 tw32(BUFMGR_MB_POOL_ADDR,
10100 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10101 tw32(BUFMGR_MB_POOL_SIZE,
10102 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10103 }
10104
10105 if (tp->dev->mtu <= ETH_DATA_LEN) {
10106 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10107 tp->bufmgr_config.mbuf_read_dma_low_water);
10108 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10109 tp->bufmgr_config.mbuf_mac_rx_low_water);
10110 tw32(BUFMGR_MB_HIGH_WATER,
10111 tp->bufmgr_config.mbuf_high_water);
10112 } else {
10113 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10114 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10115 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10116 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10117 tw32(BUFMGR_MB_HIGH_WATER,
10118 tp->bufmgr_config.mbuf_high_water_jumbo);
10119 }
10120 tw32(BUFMGR_DMA_LOW_WATER,
10121 tp->bufmgr_config.dma_low_water);
10122 tw32(BUFMGR_DMA_HIGH_WATER,
10123 tp->bufmgr_config.dma_high_water);
10124
10125 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10126 if (tg3_asic_rev(tp) == ASIC_REV_5719)
10127 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10128 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10129 tg3_asic_rev(tp) == ASIC_REV_5762 ||
10130 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10131 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10132 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10133 tw32(BUFMGR_MODE, val);
10134 for (i = 0; i < 2000; i++) {
10135 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10136 break;
10137 udelay(10);
10138 }
10139 if (i >= 2000) {
10140 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10141 return -ENODEV;
10142 }
10143
10144 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10145 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10146
10147 tg3_setup_rxbd_thresholds(tp);
10148
10149 /* Initialize TG3_BDINFO's at:
10150 * RCVDBDI_STD_BD: standard eth size rx ring
10151 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10152 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10153 *
10154 * like so:
10155 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10156 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10157 * ring attribute flags
10158 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10159 *
10160 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10161 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10162 *
10163 * The size of each ring is fixed in the firmware, but the location is
10164 * configurable.
10165 */
10166 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10167 ((u64) tpr->rx_std_mapping >> 32));
10168 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10169 ((u64) tpr->rx_std_mapping & 0xffffffff));
10170 if (!tg3_flag(tp, 5717_PLUS))
10171 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10172 NIC_SRAM_RX_BUFFER_DESC);
10173
10174 /* Disable the mini ring */
10175 if (!tg3_flag(tp, 5705_PLUS))
10176 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10177 BDINFO_FLAGS_DISABLED);
10178
10179 /* Program the jumbo buffer descriptor ring control
10180 * blocks on those devices that have them.
10181 */
10182 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10183 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10184
10185 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10186 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10187 ((u64) tpr->rx_jmb_mapping >> 32));
10188 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10189 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10190 val = TG3_RX_JMB_RING_SIZE(tp) <<
10191 BDINFO_FLAGS_MAXLEN_SHIFT;
10192 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10193 val | BDINFO_FLAGS_USE_EXT_RECV);
10194 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10195 tg3_flag(tp, 57765_CLASS) ||
10196 tg3_asic_rev(tp) == ASIC_REV_5762)
10197 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10198 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10199 } else {
10200 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10201 BDINFO_FLAGS_DISABLED);
10202 }
10203
10204 if (tg3_flag(tp, 57765_PLUS)) {
10205 val = TG3_RX_STD_RING_SIZE(tp);
10206 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10207 val |= (TG3_RX_STD_DMA_SZ << 2);
10208 } else
10209 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10210 } else
10211 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10212
10213 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10214
10215 tpr->rx_std_prod_idx = tp->rx_pending;
10216 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10217
10218 tpr->rx_jmb_prod_idx =
10219 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10220 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10221
10222 tg3_rings_reset(tp);
10223
10224 /* Initialize MAC address and backoff seed. */
10225 __tg3_set_mac_addr(tp, false);
10226
10227 /* MTU + ethernet header + FCS + optional VLAN tag */
10228 tw32(MAC_RX_MTU_SIZE,
10229 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10230
10231 /* The slot time is changed by tg3_setup_phy if we
10232 * run at gigabit with half duplex.
10233 */
10234 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10235 (6 << TX_LENGTHS_IPG_SHIFT) |
10236 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10237
10238 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10239 tg3_asic_rev(tp) == ASIC_REV_5762)
10240 val |= tr32(MAC_TX_LENGTHS) &
10241 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10242 TX_LENGTHS_CNT_DWN_VAL_MSK);
10243
10244 tw32(MAC_TX_LENGTHS, val);
10245
10246 /* Receive rules. */
10247 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10248 tw32(RCVLPC_CONFIG, 0x0181);
10249
10250 /* Calculate RDMAC_MODE setting early, we need it to determine
10251 * the RCVLPC_STATE_ENABLE mask.
10252 */
10253 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10254 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10255 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10256 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10257 RDMAC_MODE_LNGREAD_ENAB);
10258
10259 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10260 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10261
10262 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10263 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10264 tg3_asic_rev(tp) == ASIC_REV_57780)
10265 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10266 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10267 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10268
10269 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10270 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10271 if (tg3_flag(tp, TSO_CAPABLE)) {
10272 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10273 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10274 !tg3_flag(tp, IS_5788)) {
10275 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10276 }
10277 }
10278
10279 if (tg3_flag(tp, PCI_EXPRESS))
10280 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10281
10282 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10283 tp->dma_limit = 0;
10284 if (tp->dev->mtu <= ETH_DATA_LEN) {
10285 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10286 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10287 }
10288 }
10289
10290 if (tg3_flag(tp, HW_TSO_1) ||
10291 tg3_flag(tp, HW_TSO_2) ||
10292 tg3_flag(tp, HW_TSO_3))
10293 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10294
10295 if (tg3_flag(tp, 57765_PLUS) ||
10296 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10297 tg3_asic_rev(tp) == ASIC_REV_57780)
10298 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10299
10300 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10301 tg3_asic_rev(tp) == ASIC_REV_5762)
10302 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10303
10304 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10305 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10306 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10307 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10308 tg3_flag(tp, 57765_PLUS)) {
10309 u32 tgtreg;
10310
10311 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10312 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10313 else
10314 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10315
10316 val = tr32(tgtreg);
10317 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10318 tg3_asic_rev(tp) == ASIC_REV_5762) {
10319 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10320 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10321 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10322 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10323 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10324 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10325 }
10326 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10327 }
10328
10329 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10330 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10331 tg3_asic_rev(tp) == ASIC_REV_5762) {
10332 u32 tgtreg;
10333
10334 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10335 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10336 else
10337 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10338
10339 val = tr32(tgtreg);
10340 tw32(tgtreg, val |
10341 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10342 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10343 }
10344
10345 /* Receive/send statistics. */
10346 if (tg3_flag(tp, 5750_PLUS)) {
10347 val = tr32(RCVLPC_STATS_ENABLE);
10348 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10349 tw32(RCVLPC_STATS_ENABLE, val);
10350 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10351 tg3_flag(tp, TSO_CAPABLE)) {
10352 val = tr32(RCVLPC_STATS_ENABLE);
10353 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10354 tw32(RCVLPC_STATS_ENABLE, val);
10355 } else {
10356 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10357 }
10358 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10359 tw32(SNDDATAI_STATSENAB, 0xffffff);
10360 tw32(SNDDATAI_STATSCTRL,
10361 (SNDDATAI_SCTRL_ENABLE |
10362 SNDDATAI_SCTRL_FASTUPD));
10363
10364 /* Setup host coalescing engine. */
10365 tw32(HOSTCC_MODE, 0);
10366 for (i = 0; i < 2000; i++) {
10367 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10368 break;
10369 udelay(10);
10370 }
10371
10372 __tg3_set_coalesce(tp, &tp->coal);
10373
10374 if (!tg3_flag(tp, 5705_PLUS)) {
10375 /* Status/statistics block address. See tg3_timer,
10376 * the tg3_periodic_fetch_stats call there, and
10377 * tg3_get_stats to see how this works for 5705/5750 chips.
10378 */
10379 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10380 ((u64) tp->stats_mapping >> 32));
10381 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10382 ((u64) tp->stats_mapping & 0xffffffff));
10383 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10384
10385 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10386
10387 /* Clear statistics and status block memory areas */
10388 for (i = NIC_SRAM_STATS_BLK;
10389 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10390 i += sizeof(u32)) {
10391 tg3_write_mem(tp, i, 0);
10392 udelay(40);
10393 }
10394 }
10395
10396 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10397
10398 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10399 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10400 if (!tg3_flag(tp, 5705_PLUS))
10401 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10402
10403 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10404 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10405 /* reset to prevent losing 1st rx packet intermittently */
10406 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10407 udelay(10);
10408 }
10409
10410 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10411 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10412 MAC_MODE_FHDE_ENABLE;
10413 if (tg3_flag(tp, ENABLE_APE))
10414 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10415 if (!tg3_flag(tp, 5705_PLUS) &&
10416 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10417 tg3_asic_rev(tp) != ASIC_REV_5700)
10418 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10419 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10420 udelay(40);
10421
10422 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10423 * If TG3_FLAG_IS_NIC is zero, we should read the
10424 * register to preserve the GPIO settings for LOMs. The GPIOs,
10425 * whether used as inputs or outputs, are set by boot code after
10426 * reset.
10427 */
10428 if (!tg3_flag(tp, IS_NIC)) {
10429 u32 gpio_mask;
10430
10431 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10432 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10433 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10434
10435 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10436 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10437 GRC_LCLCTRL_GPIO_OUTPUT3;
10438
10439 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10440 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10441
10442 tp->grc_local_ctrl &= ~gpio_mask;
10443 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10444
10445 /* GPIO1 must be driven high for eeprom write protect */
10446 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10447 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10448 GRC_LCLCTRL_GPIO_OUTPUT1);
10449 }
10450 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10451 udelay(100);
10452
10453 if (tg3_flag(tp, USING_MSIX)) {
10454 val = tr32(MSGINT_MODE);
10455 val |= MSGINT_MODE_ENABLE;
10456 if (tp->irq_cnt > 1)
10457 val |= MSGINT_MODE_MULTIVEC_EN;
10458 if (!tg3_flag(tp, 1SHOT_MSI))
10459 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10460 tw32(MSGINT_MODE, val);
10461 }
10462
10463 if (!tg3_flag(tp, 5705_PLUS)) {
10464 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10465 udelay(40);
10466 }
10467
10468 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10469 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10470 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10471 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10472 WDMAC_MODE_LNGREAD_ENAB);
10473
10474 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10475 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10476 if (tg3_flag(tp, TSO_CAPABLE) &&
10477 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10478 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10479 /* nothing */
10480 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10481 !tg3_flag(tp, IS_5788)) {
10482 val |= WDMAC_MODE_RX_ACCEL;
10483 }
10484 }
10485
10486 /* Enable host coalescing bug fix */
10487 if (tg3_flag(tp, 5755_PLUS))
10488 val |= WDMAC_MODE_STATUS_TAG_FIX;
10489
10490 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10491 val |= WDMAC_MODE_BURST_ALL_DATA;
10492
10493 tw32_f(WDMAC_MODE, val);
10494 udelay(40);
10495
10496 if (tg3_flag(tp, PCIX_MODE)) {
10497 u16 pcix_cmd;
10498
10499 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10500 &pcix_cmd);
10501 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10502 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10503 pcix_cmd |= PCI_X_CMD_READ_2K;
10504 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10505 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10506 pcix_cmd |= PCI_X_CMD_READ_2K;
10507 }
10508 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10509 pcix_cmd);
10510 }
10511
10512 tw32_f(RDMAC_MODE, rdmac_mode);
10513 udelay(40);
10514
10515 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10516 tg3_asic_rev(tp) == ASIC_REV_5720) {
10517 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10518 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10519 break;
10520 }
10521 if (i < TG3_NUM_RDMA_CHANNELS) {
10522 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10523 val |= tg3_lso_rd_dma_workaround_bit(tp);
10524 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10525 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10526 }
10527 }
10528
10529 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10530 if (!tg3_flag(tp, 5705_PLUS))
10531 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10532
10533 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10534 tw32(SNDDATAC_MODE,
10535 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10536 else
10537 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10538
10539 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10540 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10541 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10542 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10543 val |= RCVDBDI_MODE_LRG_RING_SZ;
10544 tw32(RCVDBDI_MODE, val);
10545 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10546 if (tg3_flag(tp, HW_TSO_1) ||
10547 tg3_flag(tp, HW_TSO_2) ||
10548 tg3_flag(tp, HW_TSO_3))
10549 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10550 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10551 if (tg3_flag(tp, ENABLE_TSS))
10552 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10553 tw32(SNDBDI_MODE, val);
10554 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10555
10556 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10557 err = tg3_load_5701_a0_firmware_fix(tp);
10558 if (err)
10559 return err;
10560 }
10561
10562 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10563 /* Ignore any errors for the firmware download. If download
10564 * fails, the device will operate with EEE disabled
10565 */
10566 tg3_load_57766_firmware(tp);
10567 }
10568
10569 if (tg3_flag(tp, TSO_CAPABLE)) {
10570 err = tg3_load_tso_firmware(tp);
10571 if (err)
10572 return err;
10573 }
10574
10575 tp->tx_mode = TX_MODE_ENABLE;
10576
10577 if (tg3_flag(tp, 5755_PLUS) ||
10578 tg3_asic_rev(tp) == ASIC_REV_5906)
10579 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10580
10581 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10582 tg3_asic_rev(tp) == ASIC_REV_5762) {
10583 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10584 tp->tx_mode &= ~val;
10585 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10586 }
10587
10588 tw32_f(MAC_TX_MODE, tp->tx_mode);
10589 udelay(100);
10590
10591 if (tg3_flag(tp, ENABLE_RSS)) {
10592 u32 rss_key[10];
10593
10594 tg3_rss_write_indir_tbl(tp);
10595
10596 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10597
10598 for (i = 0; i < 10 ; i++)
10599 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10600 }
10601
10602 tp->rx_mode = RX_MODE_ENABLE;
10603 if (tg3_flag(tp, 5755_PLUS))
10604 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10605
10606 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10607 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10608
10609 if (tg3_flag(tp, ENABLE_RSS))
10610 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10611 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10612 RX_MODE_RSS_IPV6_HASH_EN |
10613 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10614 RX_MODE_RSS_IPV4_HASH_EN |
10615 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10616
10617 tw32_f(MAC_RX_MODE, tp->rx_mode);
10618 udelay(10);
10619
10620 tw32(MAC_LED_CTRL, tp->led_ctrl);
10621
10622 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10623 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10624 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10625 udelay(10);
10626 }
10627 tw32_f(MAC_RX_MODE, tp->rx_mode);
10628 udelay(10);
10629
10630 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10631 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10632 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10633 /* Set drive transmission level to 1.2V */
10634 /* only if the signal pre-emphasis bit is not set */
10635 val = tr32(MAC_SERDES_CFG);
10636 val &= 0xfffff000;
10637 val |= 0x880;
10638 tw32(MAC_SERDES_CFG, val);
10639 }
10640 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10641 tw32(MAC_SERDES_CFG, 0x616000);
10642 }
10643
10644 /* Prevent chip from dropping frames when flow control
10645 * is enabled.
10646 */
10647 if (tg3_flag(tp, 57765_CLASS))
10648 val = 1;
10649 else
10650 val = 2;
10651 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10652
10653 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10654 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10655 /* Use hardware link auto-negotiation */
10656 tg3_flag_set(tp, HW_AUTONEG);
10657 }
10658
10659 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10660 tg3_asic_rev(tp) == ASIC_REV_5714) {
10661 u32 tmp;
10662
10663 tmp = tr32(SERDES_RX_CTRL);
10664 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10665 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10666 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10667 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10668 }
10669
10670 if (!tg3_flag(tp, USE_PHYLIB)) {
10671 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10672 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10673
10674 err = tg3_setup_phy(tp, false);
10675 if (err)
10676 return err;
10677
10678 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10679 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10680 u32 tmp;
10681
10682 /* Clear CRC stats. */
10683 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10684 tg3_writephy(tp, MII_TG3_TEST1,
10685 tmp | MII_TG3_TEST1_CRC_EN);
10686 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10687 }
10688 }
10689 }
10690
10691 __tg3_set_rx_mode(tp->dev);
10692
10693 /* Initialize receive rules. */
10694 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10695 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10696 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10697 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10698
10699 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10700 limit = 8;
10701 else
10702 limit = 16;
10703 if (tg3_flag(tp, ENABLE_ASF))
10704 limit -= 4;
10705 switch (limit) {
10706 case 16:
10707 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10708 fallthrough;
10709 case 15:
10710 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10711 fallthrough;
10712 case 14:
10713 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10714 fallthrough;
10715 case 13:
10716 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10717 fallthrough;
10718 case 12:
10719 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10720 fallthrough;
10721 case 11:
10722 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10723 fallthrough;
10724 case 10:
10725 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10726 fallthrough;
10727 case 9:
10728 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10729 fallthrough;
10730 case 8:
10731 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10732 fallthrough;
10733 case 7:
10734 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10735 fallthrough;
10736 case 6:
10737 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10738 fallthrough;
10739 case 5:
10740 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10741 fallthrough;
10742 case 4:
10743 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10744 case 3:
10745 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10746 case 2:
10747 case 1:
10748
10749 default:
10750 break;
10751 }
10752
10753 if (tg3_flag(tp, ENABLE_APE))
10754 /* Write our heartbeat update interval to APE. */
10755 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10756 APE_HOST_HEARTBEAT_INT_5SEC);
10757
10758 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10759
10760 return 0;
10761 }
10762
10763 /* Called at device open time to get the chip ready for
10764 * packet processing. Invoked with tp->lock held.
10765 */
tg3_init_hw(struct tg3 * tp,bool reset_phy)10766 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10767 {
10768 /* Chip may have been just powered on. If so, the boot code may still
10769 * be running initialization. Wait for it to finish to avoid races in
10770 * accessing the hardware.
10771 */
10772 tg3_enable_register_access(tp);
10773 tg3_poll_fw(tp);
10774
10775 tg3_switch_clocks(tp);
10776
10777 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10778
10779 return tg3_reset_hw(tp, reset_phy);
10780 }
10781
10782 #ifdef CONFIG_TIGON3_HWMON
tg3_sd_scan_scratchpad(struct tg3 * tp,struct tg3_ocir * ocir)10783 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10784 {
10785 u32 off, len = TG3_OCIR_LEN;
10786 int i;
10787
10788 for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
10789 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10790
10791 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10792 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10793 memset(ocir, 0, len);
10794 }
10795 }
10796
10797 /* sysfs attributes for hwmon */
tg3_show_temp(struct device * dev,struct device_attribute * devattr,char * buf)10798 static ssize_t tg3_show_temp(struct device *dev,
10799 struct device_attribute *devattr, char *buf)
10800 {
10801 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10802 struct tg3 *tp = dev_get_drvdata(dev);
10803 u32 temperature;
10804
10805 spin_lock_bh(&tp->lock);
10806 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10807 sizeof(temperature));
10808 spin_unlock_bh(&tp->lock);
10809 return sprintf(buf, "%u\n", temperature * 1000);
10810 }
10811
10812
10813 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10814 TG3_TEMP_SENSOR_OFFSET);
10815 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10816 TG3_TEMP_CAUTION_OFFSET);
10817 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10818 TG3_TEMP_MAX_OFFSET);
10819
10820 static struct attribute *tg3_attrs[] = {
10821 &sensor_dev_attr_temp1_input.dev_attr.attr,
10822 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10823 &sensor_dev_attr_temp1_max.dev_attr.attr,
10824 NULL
10825 };
10826 ATTRIBUTE_GROUPS(tg3);
10827
tg3_hwmon_close(struct tg3 * tp)10828 static void tg3_hwmon_close(struct tg3 *tp)
10829 {
10830 if (tp->hwmon_dev) {
10831 hwmon_device_unregister(tp->hwmon_dev);
10832 tp->hwmon_dev = NULL;
10833 }
10834 }
10835
tg3_hwmon_open(struct tg3 * tp)10836 static void tg3_hwmon_open(struct tg3 *tp)
10837 {
10838 int i;
10839 u32 size = 0;
10840 struct pci_dev *pdev = tp->pdev;
10841 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10842
10843 tg3_sd_scan_scratchpad(tp, ocirs);
10844
10845 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10846 if (!ocirs[i].src_data_length)
10847 continue;
10848
10849 size += ocirs[i].src_hdr_length;
10850 size += ocirs[i].src_data_length;
10851 }
10852
10853 if (!size)
10854 return;
10855
10856 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10857 tp, tg3_groups);
10858 if (IS_ERR(tp->hwmon_dev)) {
10859 tp->hwmon_dev = NULL;
10860 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10861 }
10862 }
10863 #else
tg3_hwmon_close(struct tg3 * tp)10864 static inline void tg3_hwmon_close(struct tg3 *tp) { }
tg3_hwmon_open(struct tg3 * tp)10865 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10866 #endif /* CONFIG_TIGON3_HWMON */
10867
10868
10869 #define TG3_STAT_ADD32(PSTAT, REG) \
10870 do { u32 __val = tr32(REG); \
10871 (PSTAT)->low += __val; \
10872 if ((PSTAT)->low < __val) \
10873 (PSTAT)->high += 1; \
10874 } while (0)
10875
tg3_periodic_fetch_stats(struct tg3 * tp)10876 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10877 {
10878 struct tg3_hw_stats *sp = tp->hw_stats;
10879
10880 if (!tp->link_up)
10881 return;
10882
10883 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10884 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10885 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10886 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10887 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10888 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10889 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10890 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10891 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10892 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10893 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10894 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10895 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10896 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10897 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10898 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10899 u32 val;
10900
10901 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10902 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10903 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10904 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10905 }
10906
10907 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10908 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10909 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10910 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10911 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10912 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10913 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10914 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10915 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10916 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10917 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10918 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10919 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10920 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10921
10922 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10923 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10924 tg3_asic_rev(tp) != ASIC_REV_5762 &&
10925 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10926 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10927 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10928 } else {
10929 u32 val = tr32(HOSTCC_FLOW_ATTN);
10930 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10931 if (val) {
10932 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10933 sp->rx_discards.low += val;
10934 if (sp->rx_discards.low < val)
10935 sp->rx_discards.high += 1;
10936 }
10937 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10938 }
10939 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10940 }
10941
tg3_chk_missed_msi(struct tg3 * tp)10942 static void tg3_chk_missed_msi(struct tg3 *tp)
10943 {
10944 u32 i;
10945
10946 for (i = 0; i < tp->irq_cnt; i++) {
10947 struct tg3_napi *tnapi = &tp->napi[i];
10948
10949 if (tg3_has_work(tnapi)) {
10950 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10951 tnapi->last_tx_cons == tnapi->tx_cons) {
10952 if (tnapi->chk_msi_cnt < 1) {
10953 tnapi->chk_msi_cnt++;
10954 return;
10955 }
10956 tg3_msi(0, tnapi);
10957 }
10958 }
10959 tnapi->chk_msi_cnt = 0;
10960 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10961 tnapi->last_tx_cons = tnapi->tx_cons;
10962 }
10963 }
10964
tg3_timer(struct timer_list * t)10965 static void tg3_timer(struct timer_list *t)
10966 {
10967 struct tg3 *tp = from_timer(tp, t, timer);
10968
10969 spin_lock(&tp->lock);
10970
10971 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10972 spin_unlock(&tp->lock);
10973 goto restart_timer;
10974 }
10975
10976 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10977 tg3_flag(tp, 57765_CLASS))
10978 tg3_chk_missed_msi(tp);
10979
10980 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10981 /* BCM4785: Flush posted writes from GbE to host memory. */
10982 tr32(HOSTCC_MODE);
10983 }
10984
10985 if (!tg3_flag(tp, TAGGED_STATUS)) {
10986 /* All of this garbage is because when using non-tagged
10987 * IRQ status the mailbox/status_block protocol the chip
10988 * uses with the cpu is race prone.
10989 */
10990 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10991 tw32(GRC_LOCAL_CTRL,
10992 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10993 } else {
10994 tw32(HOSTCC_MODE, tp->coalesce_mode |
10995 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10996 }
10997
10998 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10999 spin_unlock(&tp->lock);
11000 tg3_reset_task_schedule(tp);
11001 goto restart_timer;
11002 }
11003 }
11004
11005 /* This part only runs once per second. */
11006 if (!--tp->timer_counter) {
11007 if (tg3_flag(tp, 5705_PLUS))
11008 tg3_periodic_fetch_stats(tp);
11009
11010 if (tp->setlpicnt && !--tp->setlpicnt)
11011 tg3_phy_eee_enable(tp);
11012
11013 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11014 u32 mac_stat;
11015 int phy_event;
11016
11017 mac_stat = tr32(MAC_STATUS);
11018
11019 phy_event = 0;
11020 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11021 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11022 phy_event = 1;
11023 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11024 phy_event = 1;
11025
11026 if (phy_event)
11027 tg3_setup_phy(tp, false);
11028 } else if (tg3_flag(tp, POLL_SERDES)) {
11029 u32 mac_stat = tr32(MAC_STATUS);
11030 int need_setup = 0;
11031
11032 if (tp->link_up &&
11033 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11034 need_setup = 1;
11035 }
11036 if (!tp->link_up &&
11037 (mac_stat & (MAC_STATUS_PCS_SYNCED |
11038 MAC_STATUS_SIGNAL_DET))) {
11039 need_setup = 1;
11040 }
11041 if (need_setup) {
11042 if (!tp->serdes_counter) {
11043 tw32_f(MAC_MODE,
11044 (tp->mac_mode &
11045 ~MAC_MODE_PORT_MODE_MASK));
11046 udelay(40);
11047 tw32_f(MAC_MODE, tp->mac_mode);
11048 udelay(40);
11049 }
11050 tg3_setup_phy(tp, false);
11051 }
11052 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11053 tg3_flag(tp, 5780_CLASS)) {
11054 tg3_serdes_parallel_detect(tp);
11055 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11056 u32 cpmu = tr32(TG3_CPMU_STATUS);
11057 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11058 TG3_CPMU_STATUS_LINK_MASK);
11059
11060 if (link_up != tp->link_up)
11061 tg3_setup_phy(tp, false);
11062 }
11063
11064 tp->timer_counter = tp->timer_multiplier;
11065 }
11066
11067 /* Heartbeat is only sent once every 2 seconds.
11068 *
11069 * The heartbeat is to tell the ASF firmware that the host
11070 * driver is still alive. In the event that the OS crashes,
11071 * ASF needs to reset the hardware to free up the FIFO space
11072 * that may be filled with rx packets destined for the host.
11073 * If the FIFO is full, ASF will no longer function properly.
11074 *
11075 * Unintended resets have been reported on real time kernels
11076 * where the timer doesn't run on time. Netpoll will also have
11077 * same problem.
11078 *
11079 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11080 * to check the ring condition when the heartbeat is expiring
11081 * before doing the reset. This will prevent most unintended
11082 * resets.
11083 */
11084 if (!--tp->asf_counter) {
11085 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11086 tg3_wait_for_event_ack(tp);
11087
11088 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11089 FWCMD_NICDRV_ALIVE3);
11090 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11091 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11092 TG3_FW_UPDATE_TIMEOUT_SEC);
11093
11094 tg3_generate_fw_event(tp);
11095 }
11096 tp->asf_counter = tp->asf_multiplier;
11097 }
11098
11099 /* Update the APE heartbeat every 5 seconds.*/
11100 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11101
11102 spin_unlock(&tp->lock);
11103
11104 restart_timer:
11105 tp->timer.expires = jiffies + tp->timer_offset;
11106 add_timer(&tp->timer);
11107 }
11108
tg3_timer_init(struct tg3 * tp)11109 static void tg3_timer_init(struct tg3 *tp)
11110 {
11111 if (tg3_flag(tp, TAGGED_STATUS) &&
11112 tg3_asic_rev(tp) != ASIC_REV_5717 &&
11113 !tg3_flag(tp, 57765_CLASS))
11114 tp->timer_offset = HZ;
11115 else
11116 tp->timer_offset = HZ / 10;
11117
11118 BUG_ON(tp->timer_offset > HZ);
11119
11120 tp->timer_multiplier = (HZ / tp->timer_offset);
11121 tp->asf_multiplier = (HZ / tp->timer_offset) *
11122 TG3_FW_UPDATE_FREQ_SEC;
11123
11124 timer_setup(&tp->timer, tg3_timer, 0);
11125 }
11126
tg3_timer_start(struct tg3 * tp)11127 static void tg3_timer_start(struct tg3 *tp)
11128 {
11129 tp->asf_counter = tp->asf_multiplier;
11130 tp->timer_counter = tp->timer_multiplier;
11131
11132 tp->timer.expires = jiffies + tp->timer_offset;
11133 add_timer(&tp->timer);
11134 }
11135
tg3_timer_stop(struct tg3 * tp)11136 static void tg3_timer_stop(struct tg3 *tp)
11137 {
11138 del_timer_sync(&tp->timer);
11139 }
11140
11141 /* Restart hardware after configuration changes, self-test, etc.
11142 * Invoked with tp->lock held.
11143 */
tg3_restart_hw(struct tg3 * tp,bool reset_phy)11144 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11145 __releases(tp->lock)
11146 __acquires(tp->lock)
11147 {
11148 int err;
11149
11150 err = tg3_init_hw(tp, reset_phy);
11151 if (err) {
11152 netdev_err(tp->dev,
11153 "Failed to re-initialize device, aborting\n");
11154 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11155 tg3_full_unlock(tp);
11156 tg3_timer_stop(tp);
11157 tp->irq_sync = 0;
11158 tg3_napi_enable(tp);
11159 dev_close(tp->dev);
11160 tg3_full_lock(tp, 0);
11161 }
11162 return err;
11163 }
11164
tg3_reset_task(struct work_struct * work)11165 static void tg3_reset_task(struct work_struct *work)
11166 {
11167 struct tg3 *tp = container_of(work, struct tg3, reset_task);
11168 int err;
11169
11170 rtnl_lock();
11171 tg3_full_lock(tp, 0);
11172
11173 if (tp->pcierr_recovery || !netif_running(tp->dev)) {
11174 tg3_flag_clear(tp, RESET_TASK_PENDING);
11175 tg3_full_unlock(tp);
11176 rtnl_unlock();
11177 return;
11178 }
11179
11180 tg3_full_unlock(tp);
11181
11182 tg3_phy_stop(tp);
11183
11184 tg3_netif_stop(tp);
11185
11186 tg3_full_lock(tp, 1);
11187
11188 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11189 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11190 tp->write32_rx_mbox = tg3_write_flush_reg32;
11191 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11192 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11193 }
11194
11195 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11196 err = tg3_init_hw(tp, true);
11197 if (err) {
11198 tg3_full_unlock(tp);
11199 tp->irq_sync = 0;
11200 tg3_napi_enable(tp);
11201 /* Clear this flag so that tg3_reset_task_cancel() will not
11202 * call cancel_work_sync() and wait forever.
11203 */
11204 tg3_flag_clear(tp, RESET_TASK_PENDING);
11205 dev_close(tp->dev);
11206 goto out;
11207 }
11208
11209 tg3_netif_start(tp);
11210 tg3_full_unlock(tp);
11211 tg3_phy_start(tp);
11212 tg3_flag_clear(tp, RESET_TASK_PENDING);
11213 out:
11214 rtnl_unlock();
11215 }
11216
tg3_request_irq(struct tg3 * tp,int irq_num)11217 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11218 {
11219 irq_handler_t fn;
11220 unsigned long flags;
11221 char *name;
11222 struct tg3_napi *tnapi = &tp->napi[irq_num];
11223
11224 if (tp->irq_cnt == 1)
11225 name = tp->dev->name;
11226 else {
11227 name = &tnapi->irq_lbl[0];
11228 if (tnapi->tx_buffers && tnapi->rx_rcb)
11229 snprintf(name, IFNAMSIZ,
11230 "%s-txrx-%d", tp->dev->name, irq_num);
11231 else if (tnapi->tx_buffers)
11232 snprintf(name, IFNAMSIZ,
11233 "%s-tx-%d", tp->dev->name, irq_num);
11234 else if (tnapi->rx_rcb)
11235 snprintf(name, IFNAMSIZ,
11236 "%s-rx-%d", tp->dev->name, irq_num);
11237 else
11238 snprintf(name, IFNAMSIZ,
11239 "%s-%d", tp->dev->name, irq_num);
11240 name[IFNAMSIZ-1] = 0;
11241 }
11242
11243 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11244 fn = tg3_msi;
11245 if (tg3_flag(tp, 1SHOT_MSI))
11246 fn = tg3_msi_1shot;
11247 flags = 0;
11248 } else {
11249 fn = tg3_interrupt;
11250 if (tg3_flag(tp, TAGGED_STATUS))
11251 fn = tg3_interrupt_tagged;
11252 flags = IRQF_SHARED;
11253 }
11254
11255 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11256 }
11257
tg3_test_interrupt(struct tg3 * tp)11258 static int tg3_test_interrupt(struct tg3 *tp)
11259 {
11260 struct tg3_napi *tnapi = &tp->napi[0];
11261 struct net_device *dev = tp->dev;
11262 int err, i, intr_ok = 0;
11263 u32 val;
11264
11265 if (!netif_running(dev))
11266 return -ENODEV;
11267
11268 tg3_disable_ints(tp);
11269
11270 free_irq(tnapi->irq_vec, tnapi);
11271
11272 /*
11273 * Turn off MSI one shot mode. Otherwise this test has no
11274 * observable way to know whether the interrupt was delivered.
11275 */
11276 if (tg3_flag(tp, 57765_PLUS)) {
11277 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11278 tw32(MSGINT_MODE, val);
11279 }
11280
11281 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11282 IRQF_SHARED, dev->name, tnapi);
11283 if (err)
11284 return err;
11285
11286 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11287 tg3_enable_ints(tp);
11288
11289 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11290 tnapi->coal_now);
11291
11292 for (i = 0; i < 5; i++) {
11293 u32 int_mbox, misc_host_ctrl;
11294
11295 int_mbox = tr32_mailbox(tnapi->int_mbox);
11296 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11297
11298 if ((int_mbox != 0) ||
11299 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11300 intr_ok = 1;
11301 break;
11302 }
11303
11304 if (tg3_flag(tp, 57765_PLUS) &&
11305 tnapi->hw_status->status_tag != tnapi->last_tag)
11306 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11307
11308 msleep(10);
11309 }
11310
11311 tg3_disable_ints(tp);
11312
11313 free_irq(tnapi->irq_vec, tnapi);
11314
11315 err = tg3_request_irq(tp, 0);
11316
11317 if (err)
11318 return err;
11319
11320 if (intr_ok) {
11321 /* Reenable MSI one shot mode. */
11322 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11323 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11324 tw32(MSGINT_MODE, val);
11325 }
11326 return 0;
11327 }
11328
11329 return -EIO;
11330 }
11331
11332 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11333 * successfully restored
11334 */
tg3_test_msi(struct tg3 * tp)11335 static int tg3_test_msi(struct tg3 *tp)
11336 {
11337 int err;
11338 u16 pci_cmd;
11339
11340 if (!tg3_flag(tp, USING_MSI))
11341 return 0;
11342
11343 /* Turn off SERR reporting in case MSI terminates with Master
11344 * Abort.
11345 */
11346 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11347 pci_write_config_word(tp->pdev, PCI_COMMAND,
11348 pci_cmd & ~PCI_COMMAND_SERR);
11349
11350 err = tg3_test_interrupt(tp);
11351
11352 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11353
11354 if (!err)
11355 return 0;
11356
11357 /* other failures */
11358 if (err != -EIO)
11359 return err;
11360
11361 /* MSI test failed, go back to INTx mode */
11362 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11363 "to INTx mode. Please report this failure to the PCI "
11364 "maintainer and include system chipset information\n");
11365
11366 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11367
11368 pci_disable_msi(tp->pdev);
11369
11370 tg3_flag_clear(tp, USING_MSI);
11371 tp->napi[0].irq_vec = tp->pdev->irq;
11372
11373 err = tg3_request_irq(tp, 0);
11374 if (err)
11375 return err;
11376
11377 /* Need to reset the chip because the MSI cycle may have terminated
11378 * with Master Abort.
11379 */
11380 tg3_full_lock(tp, 1);
11381
11382 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11383 err = tg3_init_hw(tp, true);
11384
11385 tg3_full_unlock(tp);
11386
11387 if (err)
11388 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11389
11390 return err;
11391 }
11392
tg3_request_firmware(struct tg3 * tp)11393 static int tg3_request_firmware(struct tg3 *tp)
11394 {
11395 const struct tg3_firmware_hdr *fw_hdr;
11396
11397 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11398 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11399 tp->fw_needed);
11400 return -ENOENT;
11401 }
11402
11403 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11404
11405 /* Firmware blob starts with version numbers, followed by
11406 * start address and _full_ length including BSS sections
11407 * (which must be longer than the actual data, of course
11408 */
11409
11410 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11411 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11412 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11413 tp->fw_len, tp->fw_needed);
11414 release_firmware(tp->fw);
11415 tp->fw = NULL;
11416 return -EINVAL;
11417 }
11418
11419 /* We no longer need firmware; we have it. */
11420 tp->fw_needed = NULL;
11421 return 0;
11422 }
11423
tg3_irq_count(struct tg3 * tp)11424 static u32 tg3_irq_count(struct tg3 *tp)
11425 {
11426 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11427
11428 if (irq_cnt > 1) {
11429 /* We want as many rx rings enabled as there are cpus.
11430 * In multiqueue MSI-X mode, the first MSI-X vector
11431 * only deals with link interrupts, etc, so we add
11432 * one to the number of vectors we are requesting.
11433 */
11434 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11435 }
11436
11437 return irq_cnt;
11438 }
11439
tg3_enable_msix(struct tg3 * tp)11440 static bool tg3_enable_msix(struct tg3 *tp)
11441 {
11442 int i, rc;
11443 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11444
11445 tp->txq_cnt = tp->txq_req;
11446 tp->rxq_cnt = tp->rxq_req;
11447 if (!tp->rxq_cnt)
11448 tp->rxq_cnt = netif_get_num_default_rss_queues();
11449 if (tp->rxq_cnt > tp->rxq_max)
11450 tp->rxq_cnt = tp->rxq_max;
11451
11452 /* Disable multiple TX rings by default. Simple round-robin hardware
11453 * scheduling of the TX rings can cause starvation of rings with
11454 * small packets when other rings have TSO or jumbo packets.
11455 */
11456 if (!tp->txq_req)
11457 tp->txq_cnt = 1;
11458
11459 tp->irq_cnt = tg3_irq_count(tp);
11460
11461 for (i = 0; i < tp->irq_max; i++) {
11462 msix_ent[i].entry = i;
11463 msix_ent[i].vector = 0;
11464 }
11465
11466 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11467 if (rc < 0) {
11468 return false;
11469 } else if (rc < tp->irq_cnt) {
11470 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11471 tp->irq_cnt, rc);
11472 tp->irq_cnt = rc;
11473 tp->rxq_cnt = max(rc - 1, 1);
11474 if (tp->txq_cnt)
11475 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11476 }
11477
11478 for (i = 0; i < tp->irq_max; i++)
11479 tp->napi[i].irq_vec = msix_ent[i].vector;
11480
11481 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11482 pci_disable_msix(tp->pdev);
11483 return false;
11484 }
11485
11486 if (tp->irq_cnt == 1)
11487 return true;
11488
11489 tg3_flag_set(tp, ENABLE_RSS);
11490
11491 if (tp->txq_cnt > 1)
11492 tg3_flag_set(tp, ENABLE_TSS);
11493
11494 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11495
11496 return true;
11497 }
11498
tg3_ints_init(struct tg3 * tp)11499 static void tg3_ints_init(struct tg3 *tp)
11500 {
11501 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11502 !tg3_flag(tp, TAGGED_STATUS)) {
11503 /* All MSI supporting chips should support tagged
11504 * status. Assert that this is the case.
11505 */
11506 netdev_warn(tp->dev,
11507 "MSI without TAGGED_STATUS? Not using MSI\n");
11508 goto defcfg;
11509 }
11510
11511 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11512 tg3_flag_set(tp, USING_MSIX);
11513 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11514 tg3_flag_set(tp, USING_MSI);
11515
11516 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11517 u32 msi_mode = tr32(MSGINT_MODE);
11518 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11519 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11520 if (!tg3_flag(tp, 1SHOT_MSI))
11521 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11522 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11523 }
11524 defcfg:
11525 if (!tg3_flag(tp, USING_MSIX)) {
11526 tp->irq_cnt = 1;
11527 tp->napi[0].irq_vec = tp->pdev->irq;
11528 }
11529
11530 if (tp->irq_cnt == 1) {
11531 tp->txq_cnt = 1;
11532 tp->rxq_cnt = 1;
11533 netif_set_real_num_tx_queues(tp->dev, 1);
11534 netif_set_real_num_rx_queues(tp->dev, 1);
11535 }
11536 }
11537
tg3_ints_fini(struct tg3 * tp)11538 static void tg3_ints_fini(struct tg3 *tp)
11539 {
11540 if (tg3_flag(tp, USING_MSIX))
11541 pci_disable_msix(tp->pdev);
11542 else if (tg3_flag(tp, USING_MSI))
11543 pci_disable_msi(tp->pdev);
11544 tg3_flag_clear(tp, USING_MSI);
11545 tg3_flag_clear(tp, USING_MSIX);
11546 tg3_flag_clear(tp, ENABLE_RSS);
11547 tg3_flag_clear(tp, ENABLE_TSS);
11548 }
11549
tg3_start(struct tg3 * tp,bool reset_phy,bool test_irq,bool init)11550 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11551 bool init)
11552 {
11553 struct net_device *dev = tp->dev;
11554 int i, err;
11555
11556 /*
11557 * Setup interrupts first so we know how
11558 * many NAPI resources to allocate
11559 */
11560 tg3_ints_init(tp);
11561
11562 tg3_rss_check_indir_tbl(tp);
11563
11564 /* The placement of this call is tied
11565 * to the setup and use of Host TX descriptors.
11566 */
11567 err = tg3_alloc_consistent(tp);
11568 if (err)
11569 goto out_ints_fini;
11570
11571 tg3_napi_init(tp);
11572
11573 tg3_napi_enable(tp);
11574
11575 for (i = 0; i < tp->irq_cnt; i++) {
11576 err = tg3_request_irq(tp, i);
11577 if (err) {
11578 for (i--; i >= 0; i--) {
11579 struct tg3_napi *tnapi = &tp->napi[i];
11580
11581 free_irq(tnapi->irq_vec, tnapi);
11582 }
11583 goto out_napi_fini;
11584 }
11585 }
11586
11587 tg3_full_lock(tp, 0);
11588
11589 if (init)
11590 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11591
11592 err = tg3_init_hw(tp, reset_phy);
11593 if (err) {
11594 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11595 tg3_free_rings(tp);
11596 }
11597
11598 tg3_full_unlock(tp);
11599
11600 if (err)
11601 goto out_free_irq;
11602
11603 if (test_irq && tg3_flag(tp, USING_MSI)) {
11604 err = tg3_test_msi(tp);
11605
11606 if (err) {
11607 tg3_full_lock(tp, 0);
11608 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11609 tg3_free_rings(tp);
11610 tg3_full_unlock(tp);
11611
11612 goto out_napi_fini;
11613 }
11614
11615 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11616 u32 val = tr32(PCIE_TRANSACTION_CFG);
11617
11618 tw32(PCIE_TRANSACTION_CFG,
11619 val | PCIE_TRANS_CFG_1SHOT_MSI);
11620 }
11621 }
11622
11623 tg3_phy_start(tp);
11624
11625 tg3_hwmon_open(tp);
11626
11627 tg3_full_lock(tp, 0);
11628
11629 tg3_timer_start(tp);
11630 tg3_flag_set(tp, INIT_COMPLETE);
11631 tg3_enable_ints(tp);
11632
11633 tg3_ptp_resume(tp);
11634
11635 tg3_full_unlock(tp);
11636
11637 netif_tx_start_all_queues(dev);
11638
11639 /*
11640 * Reset loopback feature if it was turned on while the device was down
11641 * make sure that it's installed properly now.
11642 */
11643 if (dev->features & NETIF_F_LOOPBACK)
11644 tg3_set_loopback(dev, dev->features);
11645
11646 return 0;
11647
11648 out_free_irq:
11649 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11650 struct tg3_napi *tnapi = &tp->napi[i];
11651 free_irq(tnapi->irq_vec, tnapi);
11652 }
11653
11654 out_napi_fini:
11655 tg3_napi_disable(tp);
11656 tg3_napi_fini(tp);
11657 tg3_free_consistent(tp);
11658
11659 out_ints_fini:
11660 tg3_ints_fini(tp);
11661
11662 return err;
11663 }
11664
tg3_stop(struct tg3 * tp)11665 static void tg3_stop(struct tg3 *tp)
11666 {
11667 int i;
11668
11669 tg3_reset_task_cancel(tp);
11670 tg3_netif_stop(tp);
11671
11672 tg3_timer_stop(tp);
11673
11674 tg3_hwmon_close(tp);
11675
11676 tg3_phy_stop(tp);
11677
11678 tg3_full_lock(tp, 1);
11679
11680 tg3_disable_ints(tp);
11681
11682 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11683 tg3_free_rings(tp);
11684 tg3_flag_clear(tp, INIT_COMPLETE);
11685
11686 tg3_full_unlock(tp);
11687
11688 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11689 struct tg3_napi *tnapi = &tp->napi[i];
11690 free_irq(tnapi->irq_vec, tnapi);
11691 }
11692
11693 tg3_ints_fini(tp);
11694
11695 tg3_napi_fini(tp);
11696
11697 tg3_free_consistent(tp);
11698 }
11699
tg3_open(struct net_device * dev)11700 static int tg3_open(struct net_device *dev)
11701 {
11702 struct tg3 *tp = netdev_priv(dev);
11703 int err;
11704
11705 if (tp->pcierr_recovery) {
11706 netdev_err(dev, "Failed to open device. PCI error recovery "
11707 "in progress\n");
11708 return -EAGAIN;
11709 }
11710
11711 if (tp->fw_needed) {
11712 err = tg3_request_firmware(tp);
11713 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11714 if (err) {
11715 netdev_warn(tp->dev, "EEE capability disabled\n");
11716 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11717 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11718 netdev_warn(tp->dev, "EEE capability restored\n");
11719 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11720 }
11721 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11722 if (err)
11723 return err;
11724 } else if (err) {
11725 netdev_warn(tp->dev, "TSO capability disabled\n");
11726 tg3_flag_clear(tp, TSO_CAPABLE);
11727 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11728 netdev_notice(tp->dev, "TSO capability restored\n");
11729 tg3_flag_set(tp, TSO_CAPABLE);
11730 }
11731 }
11732
11733 tg3_carrier_off(tp);
11734
11735 err = tg3_power_up(tp);
11736 if (err)
11737 return err;
11738
11739 tg3_full_lock(tp, 0);
11740
11741 tg3_disable_ints(tp);
11742 tg3_flag_clear(tp, INIT_COMPLETE);
11743
11744 tg3_full_unlock(tp);
11745
11746 err = tg3_start(tp,
11747 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11748 true, true);
11749 if (err) {
11750 tg3_frob_aux_power(tp, false);
11751 pci_set_power_state(tp->pdev, PCI_D3hot);
11752 }
11753
11754 return err;
11755 }
11756
tg3_close(struct net_device * dev)11757 static int tg3_close(struct net_device *dev)
11758 {
11759 struct tg3 *tp = netdev_priv(dev);
11760
11761 if (tp->pcierr_recovery) {
11762 netdev_err(dev, "Failed to close device. PCI error recovery "
11763 "in progress\n");
11764 return -EAGAIN;
11765 }
11766
11767 tg3_stop(tp);
11768
11769 if (pci_device_is_present(tp->pdev)) {
11770 tg3_power_down_prepare(tp);
11771
11772 tg3_carrier_off(tp);
11773 }
11774 return 0;
11775 }
11776
get_stat64(tg3_stat64_t * val)11777 static inline u64 get_stat64(tg3_stat64_t *val)
11778 {
11779 return ((u64)val->high << 32) | ((u64)val->low);
11780 }
11781
tg3_calc_crc_errors(struct tg3 * tp)11782 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11783 {
11784 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11785
11786 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11787 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11788 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11789 u32 val;
11790
11791 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11792 tg3_writephy(tp, MII_TG3_TEST1,
11793 val | MII_TG3_TEST1_CRC_EN);
11794 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11795 } else
11796 val = 0;
11797
11798 tp->phy_crc_errors += val;
11799
11800 return tp->phy_crc_errors;
11801 }
11802
11803 return get_stat64(&hw_stats->rx_fcs_errors);
11804 }
11805
11806 #define ESTAT_ADD(member) \
11807 estats->member = old_estats->member + \
11808 get_stat64(&hw_stats->member)
11809
tg3_get_estats(struct tg3 * tp,struct tg3_ethtool_stats * estats)11810 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11811 {
11812 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11813 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11814
11815 ESTAT_ADD(rx_octets);
11816 ESTAT_ADD(rx_fragments);
11817 ESTAT_ADD(rx_ucast_packets);
11818 ESTAT_ADD(rx_mcast_packets);
11819 ESTAT_ADD(rx_bcast_packets);
11820 ESTAT_ADD(rx_fcs_errors);
11821 ESTAT_ADD(rx_align_errors);
11822 ESTAT_ADD(rx_xon_pause_rcvd);
11823 ESTAT_ADD(rx_xoff_pause_rcvd);
11824 ESTAT_ADD(rx_mac_ctrl_rcvd);
11825 ESTAT_ADD(rx_xoff_entered);
11826 ESTAT_ADD(rx_frame_too_long_errors);
11827 ESTAT_ADD(rx_jabbers);
11828 ESTAT_ADD(rx_undersize_packets);
11829 ESTAT_ADD(rx_in_length_errors);
11830 ESTAT_ADD(rx_out_length_errors);
11831 ESTAT_ADD(rx_64_or_less_octet_packets);
11832 ESTAT_ADD(rx_65_to_127_octet_packets);
11833 ESTAT_ADD(rx_128_to_255_octet_packets);
11834 ESTAT_ADD(rx_256_to_511_octet_packets);
11835 ESTAT_ADD(rx_512_to_1023_octet_packets);
11836 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11837 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11838 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11839 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11840 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11841
11842 ESTAT_ADD(tx_octets);
11843 ESTAT_ADD(tx_collisions);
11844 ESTAT_ADD(tx_xon_sent);
11845 ESTAT_ADD(tx_xoff_sent);
11846 ESTAT_ADD(tx_flow_control);
11847 ESTAT_ADD(tx_mac_errors);
11848 ESTAT_ADD(tx_single_collisions);
11849 ESTAT_ADD(tx_mult_collisions);
11850 ESTAT_ADD(tx_deferred);
11851 ESTAT_ADD(tx_excessive_collisions);
11852 ESTAT_ADD(tx_late_collisions);
11853 ESTAT_ADD(tx_collide_2times);
11854 ESTAT_ADD(tx_collide_3times);
11855 ESTAT_ADD(tx_collide_4times);
11856 ESTAT_ADD(tx_collide_5times);
11857 ESTAT_ADD(tx_collide_6times);
11858 ESTAT_ADD(tx_collide_7times);
11859 ESTAT_ADD(tx_collide_8times);
11860 ESTAT_ADD(tx_collide_9times);
11861 ESTAT_ADD(tx_collide_10times);
11862 ESTAT_ADD(tx_collide_11times);
11863 ESTAT_ADD(tx_collide_12times);
11864 ESTAT_ADD(tx_collide_13times);
11865 ESTAT_ADD(tx_collide_14times);
11866 ESTAT_ADD(tx_collide_15times);
11867 ESTAT_ADD(tx_ucast_packets);
11868 ESTAT_ADD(tx_mcast_packets);
11869 ESTAT_ADD(tx_bcast_packets);
11870 ESTAT_ADD(tx_carrier_sense_errors);
11871 ESTAT_ADD(tx_discards);
11872 ESTAT_ADD(tx_errors);
11873
11874 ESTAT_ADD(dma_writeq_full);
11875 ESTAT_ADD(dma_write_prioq_full);
11876 ESTAT_ADD(rxbds_empty);
11877 ESTAT_ADD(rx_discards);
11878 ESTAT_ADD(rx_errors);
11879 ESTAT_ADD(rx_threshold_hit);
11880
11881 ESTAT_ADD(dma_readq_full);
11882 ESTAT_ADD(dma_read_prioq_full);
11883 ESTAT_ADD(tx_comp_queue_full);
11884
11885 ESTAT_ADD(ring_set_send_prod_index);
11886 ESTAT_ADD(ring_status_update);
11887 ESTAT_ADD(nic_irqs);
11888 ESTAT_ADD(nic_avoided_irqs);
11889 ESTAT_ADD(nic_tx_threshold_hit);
11890
11891 ESTAT_ADD(mbuf_lwm_thresh_hit);
11892 }
11893
tg3_get_nstats(struct tg3 * tp,struct rtnl_link_stats64 * stats)11894 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11895 {
11896 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11897 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11898
11899 stats->rx_packets = old_stats->rx_packets +
11900 get_stat64(&hw_stats->rx_ucast_packets) +
11901 get_stat64(&hw_stats->rx_mcast_packets) +
11902 get_stat64(&hw_stats->rx_bcast_packets);
11903
11904 stats->tx_packets = old_stats->tx_packets +
11905 get_stat64(&hw_stats->tx_ucast_packets) +
11906 get_stat64(&hw_stats->tx_mcast_packets) +
11907 get_stat64(&hw_stats->tx_bcast_packets);
11908
11909 stats->rx_bytes = old_stats->rx_bytes +
11910 get_stat64(&hw_stats->rx_octets);
11911 stats->tx_bytes = old_stats->tx_bytes +
11912 get_stat64(&hw_stats->tx_octets);
11913
11914 stats->rx_errors = old_stats->rx_errors +
11915 get_stat64(&hw_stats->rx_errors);
11916 stats->tx_errors = old_stats->tx_errors +
11917 get_stat64(&hw_stats->tx_errors) +
11918 get_stat64(&hw_stats->tx_mac_errors) +
11919 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11920 get_stat64(&hw_stats->tx_discards);
11921
11922 stats->multicast = old_stats->multicast +
11923 get_stat64(&hw_stats->rx_mcast_packets);
11924 stats->collisions = old_stats->collisions +
11925 get_stat64(&hw_stats->tx_collisions);
11926
11927 stats->rx_length_errors = old_stats->rx_length_errors +
11928 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11929 get_stat64(&hw_stats->rx_undersize_packets);
11930
11931 stats->rx_frame_errors = old_stats->rx_frame_errors +
11932 get_stat64(&hw_stats->rx_align_errors);
11933 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11934 get_stat64(&hw_stats->tx_discards);
11935 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11936 get_stat64(&hw_stats->tx_carrier_sense_errors);
11937
11938 stats->rx_crc_errors = old_stats->rx_crc_errors +
11939 tg3_calc_crc_errors(tp);
11940
11941 stats->rx_missed_errors = old_stats->rx_missed_errors +
11942 get_stat64(&hw_stats->rx_discards);
11943
11944 stats->rx_dropped = tp->rx_dropped;
11945 stats->tx_dropped = tp->tx_dropped;
11946 }
11947
tg3_get_regs_len(struct net_device * dev)11948 static int tg3_get_regs_len(struct net_device *dev)
11949 {
11950 return TG3_REG_BLK_SIZE;
11951 }
11952
tg3_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * _p)11953 static void tg3_get_regs(struct net_device *dev,
11954 struct ethtool_regs *regs, void *_p)
11955 {
11956 struct tg3 *tp = netdev_priv(dev);
11957
11958 regs->version = 0;
11959
11960 memset(_p, 0, TG3_REG_BLK_SIZE);
11961
11962 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11963 return;
11964
11965 tg3_full_lock(tp, 0);
11966
11967 tg3_dump_legacy_regs(tp, (u32 *)_p);
11968
11969 tg3_full_unlock(tp);
11970 }
11971
tg3_get_eeprom_len(struct net_device * dev)11972 static int tg3_get_eeprom_len(struct net_device *dev)
11973 {
11974 struct tg3 *tp = netdev_priv(dev);
11975
11976 return tp->nvram_size;
11977 }
11978
tg3_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)11979 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11980 {
11981 struct tg3 *tp = netdev_priv(dev);
11982 int ret, cpmu_restore = 0;
11983 u8 *pd;
11984 u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11985 __be32 val;
11986
11987 if (tg3_flag(tp, NO_NVRAM))
11988 return -EINVAL;
11989
11990 offset = eeprom->offset;
11991 len = eeprom->len;
11992 eeprom->len = 0;
11993
11994 eeprom->magic = TG3_EEPROM_MAGIC;
11995
11996 /* Override clock, link aware and link idle modes */
11997 if (tg3_flag(tp, CPMU_PRESENT)) {
11998 cpmu_val = tr32(TG3_CPMU_CTRL);
11999 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12000 CPMU_CTRL_LINK_IDLE_MODE)) {
12001 tw32(TG3_CPMU_CTRL, cpmu_val &
12002 ~(CPMU_CTRL_LINK_AWARE_MODE |
12003 CPMU_CTRL_LINK_IDLE_MODE));
12004 cpmu_restore = 1;
12005 }
12006 }
12007 tg3_override_clk(tp);
12008
12009 if (offset & 3) {
12010 /* adjustments to start on required 4 byte boundary */
12011 b_offset = offset & 3;
12012 b_count = 4 - b_offset;
12013 if (b_count > len) {
12014 /* i.e. offset=1 len=2 */
12015 b_count = len;
12016 }
12017 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12018 if (ret)
12019 goto eeprom_done;
12020 memcpy(data, ((char *)&val) + b_offset, b_count);
12021 len -= b_count;
12022 offset += b_count;
12023 eeprom->len += b_count;
12024 }
12025
12026 /* read bytes up to the last 4 byte boundary */
12027 pd = &data[eeprom->len];
12028 for (i = 0; i < (len - (len & 3)); i += 4) {
12029 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12030 if (ret) {
12031 if (i)
12032 i -= 4;
12033 eeprom->len += i;
12034 goto eeprom_done;
12035 }
12036 memcpy(pd + i, &val, 4);
12037 if (need_resched()) {
12038 if (signal_pending(current)) {
12039 eeprom->len += i;
12040 ret = -EINTR;
12041 goto eeprom_done;
12042 }
12043 cond_resched();
12044 }
12045 }
12046 eeprom->len += i;
12047
12048 if (len & 3) {
12049 /* read last bytes not ending on 4 byte boundary */
12050 pd = &data[eeprom->len];
12051 b_count = len & 3;
12052 b_offset = offset + len - b_count;
12053 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12054 if (ret)
12055 goto eeprom_done;
12056 memcpy(pd, &val, b_count);
12057 eeprom->len += b_count;
12058 }
12059 ret = 0;
12060
12061 eeprom_done:
12062 /* Restore clock, link aware and link idle modes */
12063 tg3_restore_clk(tp);
12064 if (cpmu_restore)
12065 tw32(TG3_CPMU_CTRL, cpmu_val);
12066
12067 return ret;
12068 }
12069
tg3_set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)12070 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12071 {
12072 struct tg3 *tp = netdev_priv(dev);
12073 int ret;
12074 u32 offset, len, b_offset, odd_len;
12075 u8 *buf;
12076 __be32 start = 0, end;
12077
12078 if (tg3_flag(tp, NO_NVRAM) ||
12079 eeprom->magic != TG3_EEPROM_MAGIC)
12080 return -EINVAL;
12081
12082 offset = eeprom->offset;
12083 len = eeprom->len;
12084
12085 if ((b_offset = (offset & 3))) {
12086 /* adjustments to start on required 4 byte boundary */
12087 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12088 if (ret)
12089 return ret;
12090 len += b_offset;
12091 offset &= ~3;
12092 if (len < 4)
12093 len = 4;
12094 }
12095
12096 odd_len = 0;
12097 if (len & 3) {
12098 /* adjustments to end on required 4 byte boundary */
12099 odd_len = 1;
12100 len = (len + 3) & ~3;
12101 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12102 if (ret)
12103 return ret;
12104 }
12105
12106 buf = data;
12107 if (b_offset || odd_len) {
12108 buf = kmalloc(len, GFP_KERNEL);
12109 if (!buf)
12110 return -ENOMEM;
12111 if (b_offset)
12112 memcpy(buf, &start, 4);
12113 if (odd_len)
12114 memcpy(buf+len-4, &end, 4);
12115 memcpy(buf + b_offset, data, eeprom->len);
12116 }
12117
12118 ret = tg3_nvram_write_block(tp, offset, len, buf);
12119
12120 if (buf != data)
12121 kfree(buf);
12122
12123 return ret;
12124 }
12125
tg3_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)12126 static int tg3_get_link_ksettings(struct net_device *dev,
12127 struct ethtool_link_ksettings *cmd)
12128 {
12129 struct tg3 *tp = netdev_priv(dev);
12130 u32 supported, advertising;
12131
12132 if (tg3_flag(tp, USE_PHYLIB)) {
12133 struct phy_device *phydev;
12134 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12135 return -EAGAIN;
12136 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12137 phy_ethtool_ksettings_get(phydev, cmd);
12138
12139 return 0;
12140 }
12141
12142 supported = (SUPPORTED_Autoneg);
12143
12144 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12145 supported |= (SUPPORTED_1000baseT_Half |
12146 SUPPORTED_1000baseT_Full);
12147
12148 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12149 supported |= (SUPPORTED_100baseT_Half |
12150 SUPPORTED_100baseT_Full |
12151 SUPPORTED_10baseT_Half |
12152 SUPPORTED_10baseT_Full |
12153 SUPPORTED_TP);
12154 cmd->base.port = PORT_TP;
12155 } else {
12156 supported |= SUPPORTED_FIBRE;
12157 cmd->base.port = PORT_FIBRE;
12158 }
12159 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12160 supported);
12161
12162 advertising = tp->link_config.advertising;
12163 if (tg3_flag(tp, PAUSE_AUTONEG)) {
12164 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12165 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12166 advertising |= ADVERTISED_Pause;
12167 } else {
12168 advertising |= ADVERTISED_Pause |
12169 ADVERTISED_Asym_Pause;
12170 }
12171 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12172 advertising |= ADVERTISED_Asym_Pause;
12173 }
12174 }
12175 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12176 advertising);
12177
12178 if (netif_running(dev) && tp->link_up) {
12179 cmd->base.speed = tp->link_config.active_speed;
12180 cmd->base.duplex = tp->link_config.active_duplex;
12181 ethtool_convert_legacy_u32_to_link_mode(
12182 cmd->link_modes.lp_advertising,
12183 tp->link_config.rmt_adv);
12184
12185 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12186 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12187 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12188 else
12189 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12190 }
12191 } else {
12192 cmd->base.speed = SPEED_UNKNOWN;
12193 cmd->base.duplex = DUPLEX_UNKNOWN;
12194 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12195 }
12196 cmd->base.phy_address = tp->phy_addr;
12197 cmd->base.autoneg = tp->link_config.autoneg;
12198 return 0;
12199 }
12200
tg3_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)12201 static int tg3_set_link_ksettings(struct net_device *dev,
12202 const struct ethtool_link_ksettings *cmd)
12203 {
12204 struct tg3 *tp = netdev_priv(dev);
12205 u32 speed = cmd->base.speed;
12206 u32 advertising;
12207
12208 if (tg3_flag(tp, USE_PHYLIB)) {
12209 struct phy_device *phydev;
12210 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12211 return -EAGAIN;
12212 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12213 return phy_ethtool_ksettings_set(phydev, cmd);
12214 }
12215
12216 if (cmd->base.autoneg != AUTONEG_ENABLE &&
12217 cmd->base.autoneg != AUTONEG_DISABLE)
12218 return -EINVAL;
12219
12220 if (cmd->base.autoneg == AUTONEG_DISABLE &&
12221 cmd->base.duplex != DUPLEX_FULL &&
12222 cmd->base.duplex != DUPLEX_HALF)
12223 return -EINVAL;
12224
12225 ethtool_convert_link_mode_to_legacy_u32(&advertising,
12226 cmd->link_modes.advertising);
12227
12228 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12229 u32 mask = ADVERTISED_Autoneg |
12230 ADVERTISED_Pause |
12231 ADVERTISED_Asym_Pause;
12232
12233 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12234 mask |= ADVERTISED_1000baseT_Half |
12235 ADVERTISED_1000baseT_Full;
12236
12237 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12238 mask |= ADVERTISED_100baseT_Half |
12239 ADVERTISED_100baseT_Full |
12240 ADVERTISED_10baseT_Half |
12241 ADVERTISED_10baseT_Full |
12242 ADVERTISED_TP;
12243 else
12244 mask |= ADVERTISED_FIBRE;
12245
12246 if (advertising & ~mask)
12247 return -EINVAL;
12248
12249 mask &= (ADVERTISED_1000baseT_Half |
12250 ADVERTISED_1000baseT_Full |
12251 ADVERTISED_100baseT_Half |
12252 ADVERTISED_100baseT_Full |
12253 ADVERTISED_10baseT_Half |
12254 ADVERTISED_10baseT_Full);
12255
12256 advertising &= mask;
12257 } else {
12258 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12259 if (speed != SPEED_1000)
12260 return -EINVAL;
12261
12262 if (cmd->base.duplex != DUPLEX_FULL)
12263 return -EINVAL;
12264 } else {
12265 if (speed != SPEED_100 &&
12266 speed != SPEED_10)
12267 return -EINVAL;
12268 }
12269 }
12270
12271 tg3_full_lock(tp, 0);
12272
12273 tp->link_config.autoneg = cmd->base.autoneg;
12274 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12275 tp->link_config.advertising = (advertising |
12276 ADVERTISED_Autoneg);
12277 tp->link_config.speed = SPEED_UNKNOWN;
12278 tp->link_config.duplex = DUPLEX_UNKNOWN;
12279 } else {
12280 tp->link_config.advertising = 0;
12281 tp->link_config.speed = speed;
12282 tp->link_config.duplex = cmd->base.duplex;
12283 }
12284
12285 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12286
12287 tg3_warn_mgmt_link_flap(tp);
12288
12289 if (netif_running(dev))
12290 tg3_setup_phy(tp, true);
12291
12292 tg3_full_unlock(tp);
12293
12294 return 0;
12295 }
12296
tg3_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)12297 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12298 {
12299 struct tg3 *tp = netdev_priv(dev);
12300
12301 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12302 strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12303 strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12304 }
12305
tg3_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)12306 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12307 {
12308 struct tg3 *tp = netdev_priv(dev);
12309
12310 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12311 wol->supported = WAKE_MAGIC;
12312 else
12313 wol->supported = 0;
12314 wol->wolopts = 0;
12315 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12316 wol->wolopts = WAKE_MAGIC;
12317 memset(&wol->sopass, 0, sizeof(wol->sopass));
12318 }
12319
tg3_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)12320 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12321 {
12322 struct tg3 *tp = netdev_priv(dev);
12323 struct device *dp = &tp->pdev->dev;
12324
12325 if (wol->wolopts & ~WAKE_MAGIC)
12326 return -EINVAL;
12327 if ((wol->wolopts & WAKE_MAGIC) &&
12328 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12329 return -EINVAL;
12330
12331 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12332
12333 if (device_may_wakeup(dp))
12334 tg3_flag_set(tp, WOL_ENABLE);
12335 else
12336 tg3_flag_clear(tp, WOL_ENABLE);
12337
12338 return 0;
12339 }
12340
tg3_get_msglevel(struct net_device * dev)12341 static u32 tg3_get_msglevel(struct net_device *dev)
12342 {
12343 struct tg3 *tp = netdev_priv(dev);
12344 return tp->msg_enable;
12345 }
12346
tg3_set_msglevel(struct net_device * dev,u32 value)12347 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12348 {
12349 struct tg3 *tp = netdev_priv(dev);
12350 tp->msg_enable = value;
12351 }
12352
tg3_nway_reset(struct net_device * dev)12353 static int tg3_nway_reset(struct net_device *dev)
12354 {
12355 struct tg3 *tp = netdev_priv(dev);
12356 int r;
12357
12358 if (!netif_running(dev))
12359 return -EAGAIN;
12360
12361 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12362 return -EINVAL;
12363
12364 tg3_warn_mgmt_link_flap(tp);
12365
12366 if (tg3_flag(tp, USE_PHYLIB)) {
12367 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12368 return -EAGAIN;
12369 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12370 } else {
12371 u32 bmcr;
12372
12373 spin_lock_bh(&tp->lock);
12374 r = -EINVAL;
12375 tg3_readphy(tp, MII_BMCR, &bmcr);
12376 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12377 ((bmcr & BMCR_ANENABLE) ||
12378 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12379 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12380 BMCR_ANENABLE);
12381 r = 0;
12382 }
12383 spin_unlock_bh(&tp->lock);
12384 }
12385
12386 return r;
12387 }
12388
tg3_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)12389 static void tg3_get_ringparam(struct net_device *dev,
12390 struct ethtool_ringparam *ering,
12391 struct kernel_ethtool_ringparam *kernel_ering,
12392 struct netlink_ext_ack *extack)
12393 {
12394 struct tg3 *tp = netdev_priv(dev);
12395
12396 ering->rx_max_pending = tp->rx_std_ring_mask;
12397 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12398 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12399 else
12400 ering->rx_jumbo_max_pending = 0;
12401
12402 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12403
12404 ering->rx_pending = tp->rx_pending;
12405 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12406 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12407 else
12408 ering->rx_jumbo_pending = 0;
12409
12410 ering->tx_pending = tp->napi[0].tx_pending;
12411 }
12412
tg3_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)12413 static int tg3_set_ringparam(struct net_device *dev,
12414 struct ethtool_ringparam *ering,
12415 struct kernel_ethtool_ringparam *kernel_ering,
12416 struct netlink_ext_ack *extack)
12417 {
12418 struct tg3 *tp = netdev_priv(dev);
12419 int i, irq_sync = 0, err = 0;
12420 bool reset_phy = false;
12421
12422 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12423 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12424 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12425 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12426 (tg3_flag(tp, TSO_BUG) &&
12427 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12428 return -EINVAL;
12429
12430 if (netif_running(dev)) {
12431 tg3_phy_stop(tp);
12432 tg3_netif_stop(tp);
12433 irq_sync = 1;
12434 }
12435
12436 tg3_full_lock(tp, irq_sync);
12437
12438 tp->rx_pending = ering->rx_pending;
12439
12440 if (tg3_flag(tp, MAX_RXPEND_64) &&
12441 tp->rx_pending > 63)
12442 tp->rx_pending = 63;
12443
12444 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12445 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12446
12447 for (i = 0; i < tp->irq_max; i++)
12448 tp->napi[i].tx_pending = ering->tx_pending;
12449
12450 if (netif_running(dev)) {
12451 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12452 /* Reset PHY to avoid PHY lock up */
12453 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12454 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12455 tg3_asic_rev(tp) == ASIC_REV_5720)
12456 reset_phy = true;
12457
12458 err = tg3_restart_hw(tp, reset_phy);
12459 if (!err)
12460 tg3_netif_start(tp);
12461 }
12462
12463 tg3_full_unlock(tp);
12464
12465 if (irq_sync && !err)
12466 tg3_phy_start(tp);
12467
12468 return err;
12469 }
12470
tg3_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)12471 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12472 {
12473 struct tg3 *tp = netdev_priv(dev);
12474
12475 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12476
12477 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12478 epause->rx_pause = 1;
12479 else
12480 epause->rx_pause = 0;
12481
12482 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12483 epause->tx_pause = 1;
12484 else
12485 epause->tx_pause = 0;
12486 }
12487
tg3_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)12488 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12489 {
12490 struct tg3 *tp = netdev_priv(dev);
12491 int err = 0;
12492 bool reset_phy = false;
12493
12494 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12495 tg3_warn_mgmt_link_flap(tp);
12496
12497 if (tg3_flag(tp, USE_PHYLIB)) {
12498 struct phy_device *phydev;
12499
12500 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12501
12502 if (!phy_validate_pause(phydev, epause))
12503 return -EINVAL;
12504
12505 tp->link_config.flowctrl = 0;
12506 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12507 if (epause->rx_pause) {
12508 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12509
12510 if (epause->tx_pause) {
12511 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12512 }
12513 } else if (epause->tx_pause) {
12514 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12515 }
12516
12517 if (epause->autoneg)
12518 tg3_flag_set(tp, PAUSE_AUTONEG);
12519 else
12520 tg3_flag_clear(tp, PAUSE_AUTONEG);
12521
12522 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12523 if (phydev->autoneg) {
12524 /* phy_set_asym_pause() will
12525 * renegotiate the link to inform our
12526 * link partner of our flow control
12527 * settings, even if the flow control
12528 * is forced. Let tg3_adjust_link()
12529 * do the final flow control setup.
12530 */
12531 return 0;
12532 }
12533
12534 if (!epause->autoneg)
12535 tg3_setup_flow_control(tp, 0, 0);
12536 }
12537 } else {
12538 int irq_sync = 0;
12539
12540 if (netif_running(dev)) {
12541 tg3_netif_stop(tp);
12542 irq_sync = 1;
12543 }
12544
12545 tg3_full_lock(tp, irq_sync);
12546
12547 if (epause->autoneg)
12548 tg3_flag_set(tp, PAUSE_AUTONEG);
12549 else
12550 tg3_flag_clear(tp, PAUSE_AUTONEG);
12551 if (epause->rx_pause)
12552 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12553 else
12554 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12555 if (epause->tx_pause)
12556 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12557 else
12558 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12559
12560 if (netif_running(dev)) {
12561 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12562 /* Reset PHY to avoid PHY lock up */
12563 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12564 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12565 tg3_asic_rev(tp) == ASIC_REV_5720)
12566 reset_phy = true;
12567
12568 err = tg3_restart_hw(tp, reset_phy);
12569 if (!err)
12570 tg3_netif_start(tp);
12571 }
12572
12573 tg3_full_unlock(tp);
12574 }
12575
12576 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12577
12578 return err;
12579 }
12580
tg3_get_sset_count(struct net_device * dev,int sset)12581 static int tg3_get_sset_count(struct net_device *dev, int sset)
12582 {
12583 switch (sset) {
12584 case ETH_SS_TEST:
12585 return TG3_NUM_TEST;
12586 case ETH_SS_STATS:
12587 return TG3_NUM_STATS;
12588 default:
12589 return -EOPNOTSUPP;
12590 }
12591 }
12592
tg3_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info,u32 * rules __always_unused)12593 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12594 u32 *rules __always_unused)
12595 {
12596 struct tg3 *tp = netdev_priv(dev);
12597
12598 if (!tg3_flag(tp, SUPPORT_MSIX))
12599 return -EOPNOTSUPP;
12600
12601 switch (info->cmd) {
12602 case ETHTOOL_GRXRINGS:
12603 if (netif_running(tp->dev))
12604 info->data = tp->rxq_cnt;
12605 else {
12606 info->data = num_online_cpus();
12607 if (info->data > TG3_RSS_MAX_NUM_QS)
12608 info->data = TG3_RSS_MAX_NUM_QS;
12609 }
12610
12611 return 0;
12612
12613 default:
12614 return -EOPNOTSUPP;
12615 }
12616 }
12617
tg3_get_rxfh_indir_size(struct net_device * dev)12618 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12619 {
12620 u32 size = 0;
12621 struct tg3 *tp = netdev_priv(dev);
12622
12623 if (tg3_flag(tp, SUPPORT_MSIX))
12624 size = TG3_RSS_INDIR_TBL_SIZE;
12625
12626 return size;
12627 }
12628
tg3_get_rxfh(struct net_device * dev,u32 * indir,u8 * key,u8 * hfunc)12629 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12630 {
12631 struct tg3 *tp = netdev_priv(dev);
12632 int i;
12633
12634 if (hfunc)
12635 *hfunc = ETH_RSS_HASH_TOP;
12636 if (!indir)
12637 return 0;
12638
12639 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12640 indir[i] = tp->rss_ind_tbl[i];
12641
12642 return 0;
12643 }
12644
tg3_set_rxfh(struct net_device * dev,const u32 * indir,const u8 * key,const u8 hfunc)12645 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12646 const u8 hfunc)
12647 {
12648 struct tg3 *tp = netdev_priv(dev);
12649 size_t i;
12650
12651 /* We require at least one supported parameter to be changed and no
12652 * change in any of the unsupported parameters
12653 */
12654 if (key ||
12655 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12656 return -EOPNOTSUPP;
12657
12658 if (!indir)
12659 return 0;
12660
12661 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12662 tp->rss_ind_tbl[i] = indir[i];
12663
12664 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12665 return 0;
12666
12667 /* It is legal to write the indirection
12668 * table while the device is running.
12669 */
12670 tg3_full_lock(tp, 0);
12671 tg3_rss_write_indir_tbl(tp);
12672 tg3_full_unlock(tp);
12673
12674 return 0;
12675 }
12676
tg3_get_channels(struct net_device * dev,struct ethtool_channels * channel)12677 static void tg3_get_channels(struct net_device *dev,
12678 struct ethtool_channels *channel)
12679 {
12680 struct tg3 *tp = netdev_priv(dev);
12681 u32 deflt_qs = netif_get_num_default_rss_queues();
12682
12683 channel->max_rx = tp->rxq_max;
12684 channel->max_tx = tp->txq_max;
12685
12686 if (netif_running(dev)) {
12687 channel->rx_count = tp->rxq_cnt;
12688 channel->tx_count = tp->txq_cnt;
12689 } else {
12690 if (tp->rxq_req)
12691 channel->rx_count = tp->rxq_req;
12692 else
12693 channel->rx_count = min(deflt_qs, tp->rxq_max);
12694
12695 if (tp->txq_req)
12696 channel->tx_count = tp->txq_req;
12697 else
12698 channel->tx_count = min(deflt_qs, tp->txq_max);
12699 }
12700 }
12701
tg3_set_channels(struct net_device * dev,struct ethtool_channels * channel)12702 static int tg3_set_channels(struct net_device *dev,
12703 struct ethtool_channels *channel)
12704 {
12705 struct tg3 *tp = netdev_priv(dev);
12706
12707 if (!tg3_flag(tp, SUPPORT_MSIX))
12708 return -EOPNOTSUPP;
12709
12710 if (channel->rx_count > tp->rxq_max ||
12711 channel->tx_count > tp->txq_max)
12712 return -EINVAL;
12713
12714 tp->rxq_req = channel->rx_count;
12715 tp->txq_req = channel->tx_count;
12716
12717 if (!netif_running(dev))
12718 return 0;
12719
12720 tg3_stop(tp);
12721
12722 tg3_carrier_off(tp);
12723
12724 tg3_start(tp, true, false, false);
12725
12726 return 0;
12727 }
12728
tg3_get_strings(struct net_device * dev,u32 stringset,u8 * buf)12729 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12730 {
12731 switch (stringset) {
12732 case ETH_SS_STATS:
12733 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12734 break;
12735 case ETH_SS_TEST:
12736 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12737 break;
12738 default:
12739 WARN_ON(1); /* we need a WARN() */
12740 break;
12741 }
12742 }
12743
tg3_set_phys_id(struct net_device * dev,enum ethtool_phys_id_state state)12744 static int tg3_set_phys_id(struct net_device *dev,
12745 enum ethtool_phys_id_state state)
12746 {
12747 struct tg3 *tp = netdev_priv(dev);
12748
12749 switch (state) {
12750 case ETHTOOL_ID_ACTIVE:
12751 return 1; /* cycle on/off once per second */
12752
12753 case ETHTOOL_ID_ON:
12754 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12755 LED_CTRL_1000MBPS_ON |
12756 LED_CTRL_100MBPS_ON |
12757 LED_CTRL_10MBPS_ON |
12758 LED_CTRL_TRAFFIC_OVERRIDE |
12759 LED_CTRL_TRAFFIC_BLINK |
12760 LED_CTRL_TRAFFIC_LED);
12761 break;
12762
12763 case ETHTOOL_ID_OFF:
12764 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12765 LED_CTRL_TRAFFIC_OVERRIDE);
12766 break;
12767
12768 case ETHTOOL_ID_INACTIVE:
12769 tw32(MAC_LED_CTRL, tp->led_ctrl);
12770 break;
12771 }
12772
12773 return 0;
12774 }
12775
tg3_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * estats,u64 * tmp_stats)12776 static void tg3_get_ethtool_stats(struct net_device *dev,
12777 struct ethtool_stats *estats, u64 *tmp_stats)
12778 {
12779 struct tg3 *tp = netdev_priv(dev);
12780
12781 if (tp->hw_stats)
12782 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12783 else
12784 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12785 }
12786
tg3_vpd_readblock(struct tg3 * tp,unsigned int * vpdlen)12787 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen)
12788 {
12789 int i;
12790 __be32 *buf;
12791 u32 offset = 0, len = 0;
12792 u32 magic, val;
12793
12794 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12795 return NULL;
12796
12797 if (magic == TG3_EEPROM_MAGIC) {
12798 for (offset = TG3_NVM_DIR_START;
12799 offset < TG3_NVM_DIR_END;
12800 offset += TG3_NVM_DIRENT_SIZE) {
12801 if (tg3_nvram_read(tp, offset, &val))
12802 return NULL;
12803
12804 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12805 TG3_NVM_DIRTYPE_EXTVPD)
12806 break;
12807 }
12808
12809 if (offset != TG3_NVM_DIR_END) {
12810 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12811 if (tg3_nvram_read(tp, offset + 4, &offset))
12812 return NULL;
12813
12814 offset = tg3_nvram_logical_addr(tp, offset);
12815 }
12816
12817 if (!offset || !len) {
12818 offset = TG3_NVM_VPD_OFF;
12819 len = TG3_NVM_VPD_LEN;
12820 }
12821
12822 buf = kmalloc(len, GFP_KERNEL);
12823 if (!buf)
12824 return NULL;
12825
12826 for (i = 0; i < len; i += 4) {
12827 /* The data is in little-endian format in NVRAM.
12828 * Use the big-endian read routines to preserve
12829 * the byte order as it exists in NVRAM.
12830 */
12831 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12832 goto error;
12833 }
12834 *vpdlen = len;
12835 } else {
12836 buf = pci_vpd_alloc(tp->pdev, vpdlen);
12837 if (IS_ERR(buf))
12838 return NULL;
12839 }
12840
12841 return buf;
12842
12843 error:
12844 kfree(buf);
12845 return NULL;
12846 }
12847
12848 #define NVRAM_TEST_SIZE 0x100
12849 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12850 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12851 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12852 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12853 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12854 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12855 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12856 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12857
tg3_test_nvram(struct tg3 * tp)12858 static int tg3_test_nvram(struct tg3 *tp)
12859 {
12860 u32 csum, magic;
12861 __be32 *buf;
12862 int i, j, k, err = 0, size;
12863 unsigned int len;
12864
12865 if (tg3_flag(tp, NO_NVRAM))
12866 return 0;
12867
12868 if (tg3_nvram_read(tp, 0, &magic) != 0)
12869 return -EIO;
12870
12871 if (magic == TG3_EEPROM_MAGIC)
12872 size = NVRAM_TEST_SIZE;
12873 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12874 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12875 TG3_EEPROM_SB_FORMAT_1) {
12876 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12877 case TG3_EEPROM_SB_REVISION_0:
12878 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12879 break;
12880 case TG3_EEPROM_SB_REVISION_2:
12881 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12882 break;
12883 case TG3_EEPROM_SB_REVISION_3:
12884 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12885 break;
12886 case TG3_EEPROM_SB_REVISION_4:
12887 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12888 break;
12889 case TG3_EEPROM_SB_REVISION_5:
12890 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12891 break;
12892 case TG3_EEPROM_SB_REVISION_6:
12893 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12894 break;
12895 default:
12896 return -EIO;
12897 }
12898 } else
12899 return 0;
12900 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12901 size = NVRAM_SELFBOOT_HW_SIZE;
12902 else
12903 return -EIO;
12904
12905 buf = kmalloc(size, GFP_KERNEL);
12906 if (buf == NULL)
12907 return -ENOMEM;
12908
12909 err = -EIO;
12910 for (i = 0, j = 0; i < size; i += 4, j++) {
12911 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12912 if (err)
12913 break;
12914 }
12915 if (i < size)
12916 goto out;
12917
12918 /* Selfboot format */
12919 magic = be32_to_cpu(buf[0]);
12920 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12921 TG3_EEPROM_MAGIC_FW) {
12922 u8 *buf8 = (u8 *) buf, csum8 = 0;
12923
12924 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12925 TG3_EEPROM_SB_REVISION_2) {
12926 /* For rev 2, the csum doesn't include the MBA. */
12927 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12928 csum8 += buf8[i];
12929 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12930 csum8 += buf8[i];
12931 } else {
12932 for (i = 0; i < size; i++)
12933 csum8 += buf8[i];
12934 }
12935
12936 if (csum8 == 0) {
12937 err = 0;
12938 goto out;
12939 }
12940
12941 err = -EIO;
12942 goto out;
12943 }
12944
12945 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12946 TG3_EEPROM_MAGIC_HW) {
12947 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12948 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12949 u8 *buf8 = (u8 *) buf;
12950
12951 /* Separate the parity bits and the data bytes. */
12952 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12953 if ((i == 0) || (i == 8)) {
12954 int l;
12955 u8 msk;
12956
12957 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12958 parity[k++] = buf8[i] & msk;
12959 i++;
12960 } else if (i == 16) {
12961 int l;
12962 u8 msk;
12963
12964 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12965 parity[k++] = buf8[i] & msk;
12966 i++;
12967
12968 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12969 parity[k++] = buf8[i] & msk;
12970 i++;
12971 }
12972 data[j++] = buf8[i];
12973 }
12974
12975 err = -EIO;
12976 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12977 u8 hw8 = hweight8(data[i]);
12978
12979 if ((hw8 & 0x1) && parity[i])
12980 goto out;
12981 else if (!(hw8 & 0x1) && !parity[i])
12982 goto out;
12983 }
12984 err = 0;
12985 goto out;
12986 }
12987
12988 err = -EIO;
12989
12990 /* Bootstrap checksum at offset 0x10 */
12991 csum = calc_crc((unsigned char *) buf, 0x10);
12992 if (csum != le32_to_cpu(buf[0x10/4]))
12993 goto out;
12994
12995 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12996 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12997 if (csum != le32_to_cpu(buf[0xfc/4]))
12998 goto out;
12999
13000 kfree(buf);
13001
13002 buf = tg3_vpd_readblock(tp, &len);
13003 if (!buf)
13004 return -ENOMEM;
13005
13006 err = pci_vpd_check_csum(buf, len);
13007 /* go on if no checksum found */
13008 if (err == 1)
13009 err = 0;
13010 out:
13011 kfree(buf);
13012 return err;
13013 }
13014
13015 #define TG3_SERDES_TIMEOUT_SEC 2
13016 #define TG3_COPPER_TIMEOUT_SEC 6
13017
tg3_test_link(struct tg3 * tp)13018 static int tg3_test_link(struct tg3 *tp)
13019 {
13020 int i, max;
13021
13022 if (!netif_running(tp->dev))
13023 return -ENODEV;
13024
13025 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13026 max = TG3_SERDES_TIMEOUT_SEC;
13027 else
13028 max = TG3_COPPER_TIMEOUT_SEC;
13029
13030 for (i = 0; i < max; i++) {
13031 if (tp->link_up)
13032 return 0;
13033
13034 if (msleep_interruptible(1000))
13035 break;
13036 }
13037
13038 return -EIO;
13039 }
13040
13041 /* Only test the commonly used registers */
tg3_test_registers(struct tg3 * tp)13042 static int tg3_test_registers(struct tg3 *tp)
13043 {
13044 int i, is_5705, is_5750;
13045 u32 offset, read_mask, write_mask, val, save_val, read_val;
13046 static struct {
13047 u16 offset;
13048 u16 flags;
13049 #define TG3_FL_5705 0x1
13050 #define TG3_FL_NOT_5705 0x2
13051 #define TG3_FL_NOT_5788 0x4
13052 #define TG3_FL_NOT_5750 0x8
13053 u32 read_mask;
13054 u32 write_mask;
13055 } reg_tbl[] = {
13056 /* MAC Control Registers */
13057 { MAC_MODE, TG3_FL_NOT_5705,
13058 0x00000000, 0x00ef6f8c },
13059 { MAC_MODE, TG3_FL_5705,
13060 0x00000000, 0x01ef6b8c },
13061 { MAC_STATUS, TG3_FL_NOT_5705,
13062 0x03800107, 0x00000000 },
13063 { MAC_STATUS, TG3_FL_5705,
13064 0x03800100, 0x00000000 },
13065 { MAC_ADDR_0_HIGH, 0x0000,
13066 0x00000000, 0x0000ffff },
13067 { MAC_ADDR_0_LOW, 0x0000,
13068 0x00000000, 0xffffffff },
13069 { MAC_RX_MTU_SIZE, 0x0000,
13070 0x00000000, 0x0000ffff },
13071 { MAC_TX_MODE, 0x0000,
13072 0x00000000, 0x00000070 },
13073 { MAC_TX_LENGTHS, 0x0000,
13074 0x00000000, 0x00003fff },
13075 { MAC_RX_MODE, TG3_FL_NOT_5705,
13076 0x00000000, 0x000007fc },
13077 { MAC_RX_MODE, TG3_FL_5705,
13078 0x00000000, 0x000007dc },
13079 { MAC_HASH_REG_0, 0x0000,
13080 0x00000000, 0xffffffff },
13081 { MAC_HASH_REG_1, 0x0000,
13082 0x00000000, 0xffffffff },
13083 { MAC_HASH_REG_2, 0x0000,
13084 0x00000000, 0xffffffff },
13085 { MAC_HASH_REG_3, 0x0000,
13086 0x00000000, 0xffffffff },
13087
13088 /* Receive Data and Receive BD Initiator Control Registers. */
13089 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13090 0x00000000, 0xffffffff },
13091 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13092 0x00000000, 0xffffffff },
13093 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13094 0x00000000, 0x00000003 },
13095 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13096 0x00000000, 0xffffffff },
13097 { RCVDBDI_STD_BD+0, 0x0000,
13098 0x00000000, 0xffffffff },
13099 { RCVDBDI_STD_BD+4, 0x0000,
13100 0x00000000, 0xffffffff },
13101 { RCVDBDI_STD_BD+8, 0x0000,
13102 0x00000000, 0xffff0002 },
13103 { RCVDBDI_STD_BD+0xc, 0x0000,
13104 0x00000000, 0xffffffff },
13105
13106 /* Receive BD Initiator Control Registers. */
13107 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13108 0x00000000, 0xffffffff },
13109 { RCVBDI_STD_THRESH, TG3_FL_5705,
13110 0x00000000, 0x000003ff },
13111 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13112 0x00000000, 0xffffffff },
13113
13114 /* Host Coalescing Control Registers. */
13115 { HOSTCC_MODE, TG3_FL_NOT_5705,
13116 0x00000000, 0x00000004 },
13117 { HOSTCC_MODE, TG3_FL_5705,
13118 0x00000000, 0x000000f6 },
13119 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13120 0x00000000, 0xffffffff },
13121 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13122 0x00000000, 0x000003ff },
13123 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13124 0x00000000, 0xffffffff },
13125 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13126 0x00000000, 0x000003ff },
13127 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13128 0x00000000, 0xffffffff },
13129 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13130 0x00000000, 0x000000ff },
13131 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13132 0x00000000, 0xffffffff },
13133 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13134 0x00000000, 0x000000ff },
13135 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13136 0x00000000, 0xffffffff },
13137 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13138 0x00000000, 0xffffffff },
13139 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13140 0x00000000, 0xffffffff },
13141 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13142 0x00000000, 0x000000ff },
13143 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13144 0x00000000, 0xffffffff },
13145 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13146 0x00000000, 0x000000ff },
13147 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13148 0x00000000, 0xffffffff },
13149 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13150 0x00000000, 0xffffffff },
13151 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13152 0x00000000, 0xffffffff },
13153 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13154 0x00000000, 0xffffffff },
13155 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13156 0x00000000, 0xffffffff },
13157 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13158 0xffffffff, 0x00000000 },
13159 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13160 0xffffffff, 0x00000000 },
13161
13162 /* Buffer Manager Control Registers. */
13163 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13164 0x00000000, 0x007fff80 },
13165 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13166 0x00000000, 0x007fffff },
13167 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13168 0x00000000, 0x0000003f },
13169 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13170 0x00000000, 0x000001ff },
13171 { BUFMGR_MB_HIGH_WATER, 0x0000,
13172 0x00000000, 0x000001ff },
13173 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13174 0xffffffff, 0x00000000 },
13175 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13176 0xffffffff, 0x00000000 },
13177
13178 /* Mailbox Registers */
13179 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13180 0x00000000, 0x000001ff },
13181 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13182 0x00000000, 0x000001ff },
13183 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13184 0x00000000, 0x000007ff },
13185 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13186 0x00000000, 0x000001ff },
13187
13188 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13189 };
13190
13191 is_5705 = is_5750 = 0;
13192 if (tg3_flag(tp, 5705_PLUS)) {
13193 is_5705 = 1;
13194 if (tg3_flag(tp, 5750_PLUS))
13195 is_5750 = 1;
13196 }
13197
13198 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13199 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13200 continue;
13201
13202 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13203 continue;
13204
13205 if (tg3_flag(tp, IS_5788) &&
13206 (reg_tbl[i].flags & TG3_FL_NOT_5788))
13207 continue;
13208
13209 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13210 continue;
13211
13212 offset = (u32) reg_tbl[i].offset;
13213 read_mask = reg_tbl[i].read_mask;
13214 write_mask = reg_tbl[i].write_mask;
13215
13216 /* Save the original register content */
13217 save_val = tr32(offset);
13218
13219 /* Determine the read-only value. */
13220 read_val = save_val & read_mask;
13221
13222 /* Write zero to the register, then make sure the read-only bits
13223 * are not changed and the read/write bits are all zeros.
13224 */
13225 tw32(offset, 0);
13226
13227 val = tr32(offset);
13228
13229 /* Test the read-only and read/write bits. */
13230 if (((val & read_mask) != read_val) || (val & write_mask))
13231 goto out;
13232
13233 /* Write ones to all the bits defined by RdMask and WrMask, then
13234 * make sure the read-only bits are not changed and the
13235 * read/write bits are all ones.
13236 */
13237 tw32(offset, read_mask | write_mask);
13238
13239 val = tr32(offset);
13240
13241 /* Test the read-only bits. */
13242 if ((val & read_mask) != read_val)
13243 goto out;
13244
13245 /* Test the read/write bits. */
13246 if ((val & write_mask) != write_mask)
13247 goto out;
13248
13249 tw32(offset, save_val);
13250 }
13251
13252 return 0;
13253
13254 out:
13255 if (netif_msg_hw(tp))
13256 netdev_err(tp->dev,
13257 "Register test failed at offset %x\n", offset);
13258 tw32(offset, save_val);
13259 return -EIO;
13260 }
13261
tg3_do_mem_test(struct tg3 * tp,u32 offset,u32 len)13262 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13263 {
13264 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13265 int i;
13266 u32 j;
13267
13268 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13269 for (j = 0; j < len; j += 4) {
13270 u32 val;
13271
13272 tg3_write_mem(tp, offset + j, test_pattern[i]);
13273 tg3_read_mem(tp, offset + j, &val);
13274 if (val != test_pattern[i])
13275 return -EIO;
13276 }
13277 }
13278 return 0;
13279 }
13280
tg3_test_memory(struct tg3 * tp)13281 static int tg3_test_memory(struct tg3 *tp)
13282 {
13283 static struct mem_entry {
13284 u32 offset;
13285 u32 len;
13286 } mem_tbl_570x[] = {
13287 { 0x00000000, 0x00b50},
13288 { 0x00002000, 0x1c000},
13289 { 0xffffffff, 0x00000}
13290 }, mem_tbl_5705[] = {
13291 { 0x00000100, 0x0000c},
13292 { 0x00000200, 0x00008},
13293 { 0x00004000, 0x00800},
13294 { 0x00006000, 0x01000},
13295 { 0x00008000, 0x02000},
13296 { 0x00010000, 0x0e000},
13297 { 0xffffffff, 0x00000}
13298 }, mem_tbl_5755[] = {
13299 { 0x00000200, 0x00008},
13300 { 0x00004000, 0x00800},
13301 { 0x00006000, 0x00800},
13302 { 0x00008000, 0x02000},
13303 { 0x00010000, 0x0c000},
13304 { 0xffffffff, 0x00000}
13305 }, mem_tbl_5906[] = {
13306 { 0x00000200, 0x00008},
13307 { 0x00004000, 0x00400},
13308 { 0x00006000, 0x00400},
13309 { 0x00008000, 0x01000},
13310 { 0x00010000, 0x01000},
13311 { 0xffffffff, 0x00000}
13312 }, mem_tbl_5717[] = {
13313 { 0x00000200, 0x00008},
13314 { 0x00010000, 0x0a000},
13315 { 0x00020000, 0x13c00},
13316 { 0xffffffff, 0x00000}
13317 }, mem_tbl_57765[] = {
13318 { 0x00000200, 0x00008},
13319 { 0x00004000, 0x00800},
13320 { 0x00006000, 0x09800},
13321 { 0x00010000, 0x0a000},
13322 { 0xffffffff, 0x00000}
13323 };
13324 struct mem_entry *mem_tbl;
13325 int err = 0;
13326 int i;
13327
13328 if (tg3_flag(tp, 5717_PLUS))
13329 mem_tbl = mem_tbl_5717;
13330 else if (tg3_flag(tp, 57765_CLASS) ||
13331 tg3_asic_rev(tp) == ASIC_REV_5762)
13332 mem_tbl = mem_tbl_57765;
13333 else if (tg3_flag(tp, 5755_PLUS))
13334 mem_tbl = mem_tbl_5755;
13335 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13336 mem_tbl = mem_tbl_5906;
13337 else if (tg3_flag(tp, 5705_PLUS))
13338 mem_tbl = mem_tbl_5705;
13339 else
13340 mem_tbl = mem_tbl_570x;
13341
13342 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13343 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13344 if (err)
13345 break;
13346 }
13347
13348 return err;
13349 }
13350
13351 #define TG3_TSO_MSS 500
13352
13353 #define TG3_TSO_IP_HDR_LEN 20
13354 #define TG3_TSO_TCP_HDR_LEN 20
13355 #define TG3_TSO_TCP_OPT_LEN 12
13356
13357 static const u8 tg3_tso_header[] = {
13358 0x08, 0x00,
13359 0x45, 0x00, 0x00, 0x00,
13360 0x00, 0x00, 0x40, 0x00,
13361 0x40, 0x06, 0x00, 0x00,
13362 0x0a, 0x00, 0x00, 0x01,
13363 0x0a, 0x00, 0x00, 0x02,
13364 0x0d, 0x00, 0xe0, 0x00,
13365 0x00, 0x00, 0x01, 0x00,
13366 0x00, 0x00, 0x02, 0x00,
13367 0x80, 0x10, 0x10, 0x00,
13368 0x14, 0x09, 0x00, 0x00,
13369 0x01, 0x01, 0x08, 0x0a,
13370 0x11, 0x11, 0x11, 0x11,
13371 0x11, 0x11, 0x11, 0x11,
13372 };
13373
tg3_run_loopback(struct tg3 * tp,u32 pktsz,bool tso_loopback)13374 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13375 {
13376 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13377 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13378 u32 budget;
13379 struct sk_buff *skb;
13380 u8 *tx_data, *rx_data;
13381 dma_addr_t map;
13382 int num_pkts, tx_len, rx_len, i, err;
13383 struct tg3_rx_buffer_desc *desc;
13384 struct tg3_napi *tnapi, *rnapi;
13385 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13386
13387 tnapi = &tp->napi[0];
13388 rnapi = &tp->napi[0];
13389 if (tp->irq_cnt > 1) {
13390 if (tg3_flag(tp, ENABLE_RSS))
13391 rnapi = &tp->napi[1];
13392 if (tg3_flag(tp, ENABLE_TSS))
13393 tnapi = &tp->napi[1];
13394 }
13395 coal_now = tnapi->coal_now | rnapi->coal_now;
13396
13397 err = -EIO;
13398
13399 tx_len = pktsz;
13400 skb = netdev_alloc_skb(tp->dev, tx_len);
13401 if (!skb)
13402 return -ENOMEM;
13403
13404 tx_data = skb_put(skb, tx_len);
13405 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13406 memset(tx_data + ETH_ALEN, 0x0, 8);
13407
13408 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13409
13410 if (tso_loopback) {
13411 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13412
13413 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13414 TG3_TSO_TCP_OPT_LEN;
13415
13416 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13417 sizeof(tg3_tso_header));
13418 mss = TG3_TSO_MSS;
13419
13420 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13421 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13422
13423 /* Set the total length field in the IP header */
13424 iph->tot_len = htons((u16)(mss + hdr_len));
13425
13426 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13427 TXD_FLAG_CPU_POST_DMA);
13428
13429 if (tg3_flag(tp, HW_TSO_1) ||
13430 tg3_flag(tp, HW_TSO_2) ||
13431 tg3_flag(tp, HW_TSO_3)) {
13432 struct tcphdr *th;
13433 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13434 th = (struct tcphdr *)&tx_data[val];
13435 th->check = 0;
13436 } else
13437 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13438
13439 if (tg3_flag(tp, HW_TSO_3)) {
13440 mss |= (hdr_len & 0xc) << 12;
13441 if (hdr_len & 0x10)
13442 base_flags |= 0x00000010;
13443 base_flags |= (hdr_len & 0x3e0) << 5;
13444 } else if (tg3_flag(tp, HW_TSO_2))
13445 mss |= hdr_len << 9;
13446 else if (tg3_flag(tp, HW_TSO_1) ||
13447 tg3_asic_rev(tp) == ASIC_REV_5705) {
13448 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13449 } else {
13450 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13451 }
13452
13453 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13454 } else {
13455 num_pkts = 1;
13456 data_off = ETH_HLEN;
13457
13458 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13459 tx_len > VLAN_ETH_FRAME_LEN)
13460 base_flags |= TXD_FLAG_JMB_PKT;
13461 }
13462
13463 for (i = data_off; i < tx_len; i++)
13464 tx_data[i] = (u8) (i & 0xff);
13465
13466 map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE);
13467 if (dma_mapping_error(&tp->pdev->dev, map)) {
13468 dev_kfree_skb(skb);
13469 return -EIO;
13470 }
13471
13472 val = tnapi->tx_prod;
13473 tnapi->tx_buffers[val].skb = skb;
13474 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13475
13476 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13477 rnapi->coal_now);
13478
13479 udelay(10);
13480
13481 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13482
13483 budget = tg3_tx_avail(tnapi);
13484 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13485 base_flags | TXD_FLAG_END, mss, 0)) {
13486 tnapi->tx_buffers[val].skb = NULL;
13487 dev_kfree_skb(skb);
13488 return -EIO;
13489 }
13490
13491 tnapi->tx_prod++;
13492
13493 /* Sync BD data before updating mailbox */
13494 wmb();
13495
13496 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13497 tr32_mailbox(tnapi->prodmbox);
13498
13499 udelay(10);
13500
13501 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13502 for (i = 0; i < 35; i++) {
13503 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13504 coal_now);
13505
13506 udelay(10);
13507
13508 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13509 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13510 if ((tx_idx == tnapi->tx_prod) &&
13511 (rx_idx == (rx_start_idx + num_pkts)))
13512 break;
13513 }
13514
13515 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13516 dev_kfree_skb(skb);
13517
13518 if (tx_idx != tnapi->tx_prod)
13519 goto out;
13520
13521 if (rx_idx != rx_start_idx + num_pkts)
13522 goto out;
13523
13524 val = data_off;
13525 while (rx_idx != rx_start_idx) {
13526 desc = &rnapi->rx_rcb[rx_start_idx++];
13527 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13528 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13529
13530 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13531 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13532 goto out;
13533
13534 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13535 - ETH_FCS_LEN;
13536
13537 if (!tso_loopback) {
13538 if (rx_len != tx_len)
13539 goto out;
13540
13541 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13542 if (opaque_key != RXD_OPAQUE_RING_STD)
13543 goto out;
13544 } else {
13545 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13546 goto out;
13547 }
13548 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13549 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13550 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13551 goto out;
13552 }
13553
13554 if (opaque_key == RXD_OPAQUE_RING_STD) {
13555 rx_data = tpr->rx_std_buffers[desc_idx].data;
13556 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13557 mapping);
13558 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13559 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13560 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13561 mapping);
13562 } else
13563 goto out;
13564
13565 dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len,
13566 DMA_FROM_DEVICE);
13567
13568 rx_data += TG3_RX_OFFSET(tp);
13569 for (i = data_off; i < rx_len; i++, val++) {
13570 if (*(rx_data + i) != (u8) (val & 0xff))
13571 goto out;
13572 }
13573 }
13574
13575 err = 0;
13576
13577 /* tg3_free_rings will unmap and free the rx_data */
13578 out:
13579 return err;
13580 }
13581
13582 #define TG3_STD_LOOPBACK_FAILED 1
13583 #define TG3_JMB_LOOPBACK_FAILED 2
13584 #define TG3_TSO_LOOPBACK_FAILED 4
13585 #define TG3_LOOPBACK_FAILED \
13586 (TG3_STD_LOOPBACK_FAILED | \
13587 TG3_JMB_LOOPBACK_FAILED | \
13588 TG3_TSO_LOOPBACK_FAILED)
13589
tg3_test_loopback(struct tg3 * tp,u64 * data,bool do_extlpbk)13590 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13591 {
13592 int err = -EIO;
13593 u32 eee_cap;
13594 u32 jmb_pkt_sz = 9000;
13595
13596 if (tp->dma_limit)
13597 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13598
13599 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13600 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13601
13602 if (!netif_running(tp->dev)) {
13603 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13604 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13605 if (do_extlpbk)
13606 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13607 goto done;
13608 }
13609
13610 err = tg3_reset_hw(tp, true);
13611 if (err) {
13612 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13613 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13614 if (do_extlpbk)
13615 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13616 goto done;
13617 }
13618
13619 if (tg3_flag(tp, ENABLE_RSS)) {
13620 int i;
13621
13622 /* Reroute all rx packets to the 1st queue */
13623 for (i = MAC_RSS_INDIR_TBL_0;
13624 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13625 tw32(i, 0x0);
13626 }
13627
13628 /* HW errata - mac loopback fails in some cases on 5780.
13629 * Normal traffic and PHY loopback are not affected by
13630 * errata. Also, the MAC loopback test is deprecated for
13631 * all newer ASIC revisions.
13632 */
13633 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13634 !tg3_flag(tp, CPMU_PRESENT)) {
13635 tg3_mac_loopback(tp, true);
13636
13637 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13638 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13639
13640 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13641 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13642 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13643
13644 tg3_mac_loopback(tp, false);
13645 }
13646
13647 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13648 !tg3_flag(tp, USE_PHYLIB)) {
13649 int i;
13650
13651 tg3_phy_lpbk_set(tp, 0, false);
13652
13653 /* Wait for link */
13654 for (i = 0; i < 100; i++) {
13655 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13656 break;
13657 mdelay(1);
13658 }
13659
13660 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13661 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13662 if (tg3_flag(tp, TSO_CAPABLE) &&
13663 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13664 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13665 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13666 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13667 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13668
13669 if (do_extlpbk) {
13670 tg3_phy_lpbk_set(tp, 0, true);
13671
13672 /* All link indications report up, but the hardware
13673 * isn't really ready for about 20 msec. Double it
13674 * to be sure.
13675 */
13676 mdelay(40);
13677
13678 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13679 data[TG3_EXT_LOOPB_TEST] |=
13680 TG3_STD_LOOPBACK_FAILED;
13681 if (tg3_flag(tp, TSO_CAPABLE) &&
13682 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13683 data[TG3_EXT_LOOPB_TEST] |=
13684 TG3_TSO_LOOPBACK_FAILED;
13685 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13686 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13687 data[TG3_EXT_LOOPB_TEST] |=
13688 TG3_JMB_LOOPBACK_FAILED;
13689 }
13690
13691 /* Re-enable gphy autopowerdown. */
13692 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13693 tg3_phy_toggle_apd(tp, true);
13694 }
13695
13696 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13697 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13698
13699 done:
13700 tp->phy_flags |= eee_cap;
13701
13702 return err;
13703 }
13704
tg3_self_test(struct net_device * dev,struct ethtool_test * etest,u64 * data)13705 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13706 u64 *data)
13707 {
13708 struct tg3 *tp = netdev_priv(dev);
13709 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13710
13711 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13712 if (tg3_power_up(tp)) {
13713 etest->flags |= ETH_TEST_FL_FAILED;
13714 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13715 return;
13716 }
13717 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13718 }
13719
13720 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13721
13722 if (tg3_test_nvram(tp) != 0) {
13723 etest->flags |= ETH_TEST_FL_FAILED;
13724 data[TG3_NVRAM_TEST] = 1;
13725 }
13726 if (!doextlpbk && tg3_test_link(tp)) {
13727 etest->flags |= ETH_TEST_FL_FAILED;
13728 data[TG3_LINK_TEST] = 1;
13729 }
13730 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13731 int err, err2 = 0, irq_sync = 0;
13732
13733 if (netif_running(dev)) {
13734 tg3_phy_stop(tp);
13735 tg3_netif_stop(tp);
13736 irq_sync = 1;
13737 }
13738
13739 tg3_full_lock(tp, irq_sync);
13740 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13741 err = tg3_nvram_lock(tp);
13742 tg3_halt_cpu(tp, RX_CPU_BASE);
13743 if (!tg3_flag(tp, 5705_PLUS))
13744 tg3_halt_cpu(tp, TX_CPU_BASE);
13745 if (!err)
13746 tg3_nvram_unlock(tp);
13747
13748 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13749 tg3_phy_reset(tp);
13750
13751 if (tg3_test_registers(tp) != 0) {
13752 etest->flags |= ETH_TEST_FL_FAILED;
13753 data[TG3_REGISTER_TEST] = 1;
13754 }
13755
13756 if (tg3_test_memory(tp) != 0) {
13757 etest->flags |= ETH_TEST_FL_FAILED;
13758 data[TG3_MEMORY_TEST] = 1;
13759 }
13760
13761 if (doextlpbk)
13762 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13763
13764 if (tg3_test_loopback(tp, data, doextlpbk))
13765 etest->flags |= ETH_TEST_FL_FAILED;
13766
13767 tg3_full_unlock(tp);
13768
13769 if (tg3_test_interrupt(tp) != 0) {
13770 etest->flags |= ETH_TEST_FL_FAILED;
13771 data[TG3_INTERRUPT_TEST] = 1;
13772 }
13773
13774 tg3_full_lock(tp, 0);
13775
13776 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13777 if (netif_running(dev)) {
13778 tg3_flag_set(tp, INIT_COMPLETE);
13779 err2 = tg3_restart_hw(tp, true);
13780 if (!err2)
13781 tg3_netif_start(tp);
13782 }
13783
13784 tg3_full_unlock(tp);
13785
13786 if (irq_sync && !err2)
13787 tg3_phy_start(tp);
13788 }
13789 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13790 tg3_power_down_prepare(tp);
13791
13792 }
13793
tg3_hwtstamp_set(struct net_device * dev,struct ifreq * ifr)13794 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13795 {
13796 struct tg3 *tp = netdev_priv(dev);
13797 struct hwtstamp_config stmpconf;
13798
13799 if (!tg3_flag(tp, PTP_CAPABLE))
13800 return -EOPNOTSUPP;
13801
13802 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13803 return -EFAULT;
13804
13805 if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13806 stmpconf.tx_type != HWTSTAMP_TX_OFF)
13807 return -ERANGE;
13808
13809 switch (stmpconf.rx_filter) {
13810 case HWTSTAMP_FILTER_NONE:
13811 tp->rxptpctl = 0;
13812 break;
13813 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13814 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13815 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13816 break;
13817 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13818 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13819 TG3_RX_PTP_CTL_SYNC_EVNT;
13820 break;
13821 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13822 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13823 TG3_RX_PTP_CTL_DELAY_REQ;
13824 break;
13825 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13826 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13827 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13828 break;
13829 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13830 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13831 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13832 break;
13833 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13834 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13835 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13836 break;
13837 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13838 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13839 TG3_RX_PTP_CTL_SYNC_EVNT;
13840 break;
13841 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13842 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13843 TG3_RX_PTP_CTL_SYNC_EVNT;
13844 break;
13845 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13846 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13847 TG3_RX_PTP_CTL_SYNC_EVNT;
13848 break;
13849 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13850 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13851 TG3_RX_PTP_CTL_DELAY_REQ;
13852 break;
13853 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13854 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13855 TG3_RX_PTP_CTL_DELAY_REQ;
13856 break;
13857 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13858 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13859 TG3_RX_PTP_CTL_DELAY_REQ;
13860 break;
13861 default:
13862 return -ERANGE;
13863 }
13864
13865 if (netif_running(dev) && tp->rxptpctl)
13866 tw32(TG3_RX_PTP_CTL,
13867 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13868
13869 if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13870 tg3_flag_set(tp, TX_TSTAMP_EN);
13871 else
13872 tg3_flag_clear(tp, TX_TSTAMP_EN);
13873
13874 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13875 -EFAULT : 0;
13876 }
13877
tg3_hwtstamp_get(struct net_device * dev,struct ifreq * ifr)13878 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13879 {
13880 struct tg3 *tp = netdev_priv(dev);
13881 struct hwtstamp_config stmpconf;
13882
13883 if (!tg3_flag(tp, PTP_CAPABLE))
13884 return -EOPNOTSUPP;
13885
13886 stmpconf.flags = 0;
13887 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13888 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13889
13890 switch (tp->rxptpctl) {
13891 case 0:
13892 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13893 break;
13894 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13895 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13896 break;
13897 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13898 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13899 break;
13900 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13901 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13902 break;
13903 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13904 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13905 break;
13906 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13907 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13908 break;
13909 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13910 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13911 break;
13912 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13913 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13914 break;
13915 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13916 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13917 break;
13918 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13919 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13920 break;
13921 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13922 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13923 break;
13924 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13925 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13926 break;
13927 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13928 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13929 break;
13930 default:
13931 WARN_ON_ONCE(1);
13932 return -ERANGE;
13933 }
13934
13935 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13936 -EFAULT : 0;
13937 }
13938
tg3_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)13939 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13940 {
13941 struct mii_ioctl_data *data = if_mii(ifr);
13942 struct tg3 *tp = netdev_priv(dev);
13943 int err;
13944
13945 if (tg3_flag(tp, USE_PHYLIB)) {
13946 struct phy_device *phydev;
13947 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13948 return -EAGAIN;
13949 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13950 return phy_mii_ioctl(phydev, ifr, cmd);
13951 }
13952
13953 switch (cmd) {
13954 case SIOCGMIIPHY:
13955 data->phy_id = tp->phy_addr;
13956
13957 fallthrough;
13958 case SIOCGMIIREG: {
13959 u32 mii_regval;
13960
13961 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13962 break; /* We have no PHY */
13963
13964 if (!netif_running(dev))
13965 return -EAGAIN;
13966
13967 spin_lock_bh(&tp->lock);
13968 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13969 data->reg_num & 0x1f, &mii_regval);
13970 spin_unlock_bh(&tp->lock);
13971
13972 data->val_out = mii_regval;
13973
13974 return err;
13975 }
13976
13977 case SIOCSMIIREG:
13978 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13979 break; /* We have no PHY */
13980
13981 if (!netif_running(dev))
13982 return -EAGAIN;
13983
13984 spin_lock_bh(&tp->lock);
13985 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13986 data->reg_num & 0x1f, data->val_in);
13987 spin_unlock_bh(&tp->lock);
13988
13989 return err;
13990
13991 case SIOCSHWTSTAMP:
13992 return tg3_hwtstamp_set(dev, ifr);
13993
13994 case SIOCGHWTSTAMP:
13995 return tg3_hwtstamp_get(dev, ifr);
13996
13997 default:
13998 /* do nothing */
13999 break;
14000 }
14001 return -EOPNOTSUPP;
14002 }
14003
tg3_get_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)14004 static int tg3_get_coalesce(struct net_device *dev,
14005 struct ethtool_coalesce *ec,
14006 struct kernel_ethtool_coalesce *kernel_coal,
14007 struct netlink_ext_ack *extack)
14008 {
14009 struct tg3 *tp = netdev_priv(dev);
14010
14011 memcpy(ec, &tp->coal, sizeof(*ec));
14012 return 0;
14013 }
14014
tg3_set_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)14015 static int tg3_set_coalesce(struct net_device *dev,
14016 struct ethtool_coalesce *ec,
14017 struct kernel_ethtool_coalesce *kernel_coal,
14018 struct netlink_ext_ack *extack)
14019 {
14020 struct tg3 *tp = netdev_priv(dev);
14021 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14022 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14023
14024 if (!tg3_flag(tp, 5705_PLUS)) {
14025 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14026 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14027 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14028 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14029 }
14030
14031 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14032 (!ec->rx_coalesce_usecs) ||
14033 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14034 (!ec->tx_coalesce_usecs) ||
14035 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14036 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14037 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14038 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14039 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14040 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14041 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14042 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14043 return -EINVAL;
14044
14045 /* Only copy relevant parameters, ignore all others. */
14046 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14047 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14048 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14049 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14050 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14051 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14052 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14053 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14054 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14055
14056 if (netif_running(dev)) {
14057 tg3_full_lock(tp, 0);
14058 __tg3_set_coalesce(tp, &tp->coal);
14059 tg3_full_unlock(tp);
14060 }
14061 return 0;
14062 }
14063
tg3_set_eee(struct net_device * dev,struct ethtool_eee * edata)14064 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14065 {
14066 struct tg3 *tp = netdev_priv(dev);
14067
14068 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14069 netdev_warn(tp->dev, "Board does not support EEE!\n");
14070 return -EOPNOTSUPP;
14071 }
14072
14073 if (edata->advertised != tp->eee.advertised) {
14074 netdev_warn(tp->dev,
14075 "Direct manipulation of EEE advertisement is not supported\n");
14076 return -EINVAL;
14077 }
14078
14079 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14080 netdev_warn(tp->dev,
14081 "Maximal Tx Lpi timer supported is %#x(u)\n",
14082 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14083 return -EINVAL;
14084 }
14085
14086 tp->eee = *edata;
14087
14088 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14089 tg3_warn_mgmt_link_flap(tp);
14090
14091 if (netif_running(tp->dev)) {
14092 tg3_full_lock(tp, 0);
14093 tg3_setup_eee(tp);
14094 tg3_phy_reset(tp);
14095 tg3_full_unlock(tp);
14096 }
14097
14098 return 0;
14099 }
14100
tg3_get_eee(struct net_device * dev,struct ethtool_eee * edata)14101 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14102 {
14103 struct tg3 *tp = netdev_priv(dev);
14104
14105 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14106 netdev_warn(tp->dev,
14107 "Board does not support EEE!\n");
14108 return -EOPNOTSUPP;
14109 }
14110
14111 *edata = tp->eee;
14112 return 0;
14113 }
14114
14115 static const struct ethtool_ops tg3_ethtool_ops = {
14116 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
14117 ETHTOOL_COALESCE_MAX_FRAMES |
14118 ETHTOOL_COALESCE_USECS_IRQ |
14119 ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
14120 ETHTOOL_COALESCE_STATS_BLOCK_USECS,
14121 .get_drvinfo = tg3_get_drvinfo,
14122 .get_regs_len = tg3_get_regs_len,
14123 .get_regs = tg3_get_regs,
14124 .get_wol = tg3_get_wol,
14125 .set_wol = tg3_set_wol,
14126 .get_msglevel = tg3_get_msglevel,
14127 .set_msglevel = tg3_set_msglevel,
14128 .nway_reset = tg3_nway_reset,
14129 .get_link = ethtool_op_get_link,
14130 .get_eeprom_len = tg3_get_eeprom_len,
14131 .get_eeprom = tg3_get_eeprom,
14132 .set_eeprom = tg3_set_eeprom,
14133 .get_ringparam = tg3_get_ringparam,
14134 .set_ringparam = tg3_set_ringparam,
14135 .get_pauseparam = tg3_get_pauseparam,
14136 .set_pauseparam = tg3_set_pauseparam,
14137 .self_test = tg3_self_test,
14138 .get_strings = tg3_get_strings,
14139 .set_phys_id = tg3_set_phys_id,
14140 .get_ethtool_stats = tg3_get_ethtool_stats,
14141 .get_coalesce = tg3_get_coalesce,
14142 .set_coalesce = tg3_set_coalesce,
14143 .get_sset_count = tg3_get_sset_count,
14144 .get_rxnfc = tg3_get_rxnfc,
14145 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
14146 .get_rxfh = tg3_get_rxfh,
14147 .set_rxfh = tg3_set_rxfh,
14148 .get_channels = tg3_get_channels,
14149 .set_channels = tg3_set_channels,
14150 .get_ts_info = tg3_get_ts_info,
14151 .get_eee = tg3_get_eee,
14152 .set_eee = tg3_set_eee,
14153 .get_link_ksettings = tg3_get_link_ksettings,
14154 .set_link_ksettings = tg3_set_link_ksettings,
14155 };
14156
tg3_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)14157 static void tg3_get_stats64(struct net_device *dev,
14158 struct rtnl_link_stats64 *stats)
14159 {
14160 struct tg3 *tp = netdev_priv(dev);
14161
14162 spin_lock_bh(&tp->lock);
14163 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14164 *stats = tp->net_stats_prev;
14165 spin_unlock_bh(&tp->lock);
14166 return;
14167 }
14168
14169 tg3_get_nstats(tp, stats);
14170 spin_unlock_bh(&tp->lock);
14171 }
14172
tg3_set_rx_mode(struct net_device * dev)14173 static void tg3_set_rx_mode(struct net_device *dev)
14174 {
14175 struct tg3 *tp = netdev_priv(dev);
14176
14177 if (!netif_running(dev))
14178 return;
14179
14180 tg3_full_lock(tp, 0);
14181 __tg3_set_rx_mode(dev);
14182 tg3_full_unlock(tp);
14183 }
14184
tg3_set_mtu(struct net_device * dev,struct tg3 * tp,int new_mtu)14185 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14186 int new_mtu)
14187 {
14188 dev->mtu = new_mtu;
14189
14190 if (new_mtu > ETH_DATA_LEN) {
14191 if (tg3_flag(tp, 5780_CLASS)) {
14192 netdev_update_features(dev);
14193 tg3_flag_clear(tp, TSO_CAPABLE);
14194 } else {
14195 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14196 }
14197 } else {
14198 if (tg3_flag(tp, 5780_CLASS)) {
14199 tg3_flag_set(tp, TSO_CAPABLE);
14200 netdev_update_features(dev);
14201 }
14202 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14203 }
14204 }
14205
tg3_change_mtu(struct net_device * dev,int new_mtu)14206 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14207 {
14208 struct tg3 *tp = netdev_priv(dev);
14209 int err;
14210 bool reset_phy = false;
14211
14212 if (!netif_running(dev)) {
14213 /* We'll just catch it later when the
14214 * device is up'd.
14215 */
14216 tg3_set_mtu(dev, tp, new_mtu);
14217 return 0;
14218 }
14219
14220 tg3_phy_stop(tp);
14221
14222 tg3_netif_stop(tp);
14223
14224 tg3_set_mtu(dev, tp, new_mtu);
14225
14226 tg3_full_lock(tp, 1);
14227
14228 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14229
14230 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14231 * breaks all requests to 256 bytes.
14232 */
14233 if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14234 tg3_asic_rev(tp) == ASIC_REV_5717 ||
14235 tg3_asic_rev(tp) == ASIC_REV_5719 ||
14236 tg3_asic_rev(tp) == ASIC_REV_5720)
14237 reset_phy = true;
14238
14239 err = tg3_restart_hw(tp, reset_phy);
14240
14241 if (!err)
14242 tg3_netif_start(tp);
14243
14244 tg3_full_unlock(tp);
14245
14246 if (!err)
14247 tg3_phy_start(tp);
14248
14249 return err;
14250 }
14251
14252 static const struct net_device_ops tg3_netdev_ops = {
14253 .ndo_open = tg3_open,
14254 .ndo_stop = tg3_close,
14255 .ndo_start_xmit = tg3_start_xmit,
14256 .ndo_get_stats64 = tg3_get_stats64,
14257 .ndo_validate_addr = eth_validate_addr,
14258 .ndo_set_rx_mode = tg3_set_rx_mode,
14259 .ndo_set_mac_address = tg3_set_mac_addr,
14260 .ndo_eth_ioctl = tg3_ioctl,
14261 .ndo_tx_timeout = tg3_tx_timeout,
14262 .ndo_change_mtu = tg3_change_mtu,
14263 .ndo_fix_features = tg3_fix_features,
14264 .ndo_set_features = tg3_set_features,
14265 #ifdef CONFIG_NET_POLL_CONTROLLER
14266 .ndo_poll_controller = tg3_poll_controller,
14267 #endif
14268 };
14269
tg3_get_eeprom_size(struct tg3 * tp)14270 static void tg3_get_eeprom_size(struct tg3 *tp)
14271 {
14272 u32 cursize, val, magic;
14273
14274 tp->nvram_size = EEPROM_CHIP_SIZE;
14275
14276 if (tg3_nvram_read(tp, 0, &magic) != 0)
14277 return;
14278
14279 if ((magic != TG3_EEPROM_MAGIC) &&
14280 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14281 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14282 return;
14283
14284 /*
14285 * Size the chip by reading offsets at increasing powers of two.
14286 * When we encounter our validation signature, we know the addressing
14287 * has wrapped around, and thus have our chip size.
14288 */
14289 cursize = 0x10;
14290
14291 while (cursize < tp->nvram_size) {
14292 if (tg3_nvram_read(tp, cursize, &val) != 0)
14293 return;
14294
14295 if (val == magic)
14296 break;
14297
14298 cursize <<= 1;
14299 }
14300
14301 tp->nvram_size = cursize;
14302 }
14303
tg3_get_nvram_size(struct tg3 * tp)14304 static void tg3_get_nvram_size(struct tg3 *tp)
14305 {
14306 u32 val;
14307
14308 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14309 return;
14310
14311 /* Selfboot format */
14312 if (val != TG3_EEPROM_MAGIC) {
14313 tg3_get_eeprom_size(tp);
14314 return;
14315 }
14316
14317 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14318 if (val != 0) {
14319 /* This is confusing. We want to operate on the
14320 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14321 * call will read from NVRAM and byteswap the data
14322 * according to the byteswapping settings for all
14323 * other register accesses. This ensures the data we
14324 * want will always reside in the lower 16-bits.
14325 * However, the data in NVRAM is in LE format, which
14326 * means the data from the NVRAM read will always be
14327 * opposite the endianness of the CPU. The 16-bit
14328 * byteswap then brings the data to CPU endianness.
14329 */
14330 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14331 return;
14332 }
14333 }
14334 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14335 }
14336
tg3_get_nvram_info(struct tg3 * tp)14337 static void tg3_get_nvram_info(struct tg3 *tp)
14338 {
14339 u32 nvcfg1;
14340
14341 nvcfg1 = tr32(NVRAM_CFG1);
14342 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14343 tg3_flag_set(tp, FLASH);
14344 } else {
14345 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14346 tw32(NVRAM_CFG1, nvcfg1);
14347 }
14348
14349 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14350 tg3_flag(tp, 5780_CLASS)) {
14351 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14352 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14353 tp->nvram_jedecnum = JEDEC_ATMEL;
14354 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14355 tg3_flag_set(tp, NVRAM_BUFFERED);
14356 break;
14357 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14358 tp->nvram_jedecnum = JEDEC_ATMEL;
14359 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14360 break;
14361 case FLASH_VENDOR_ATMEL_EEPROM:
14362 tp->nvram_jedecnum = JEDEC_ATMEL;
14363 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14364 tg3_flag_set(tp, NVRAM_BUFFERED);
14365 break;
14366 case FLASH_VENDOR_ST:
14367 tp->nvram_jedecnum = JEDEC_ST;
14368 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14369 tg3_flag_set(tp, NVRAM_BUFFERED);
14370 break;
14371 case FLASH_VENDOR_SAIFUN:
14372 tp->nvram_jedecnum = JEDEC_SAIFUN;
14373 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14374 break;
14375 case FLASH_VENDOR_SST_SMALL:
14376 case FLASH_VENDOR_SST_LARGE:
14377 tp->nvram_jedecnum = JEDEC_SST;
14378 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14379 break;
14380 }
14381 } else {
14382 tp->nvram_jedecnum = JEDEC_ATMEL;
14383 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14384 tg3_flag_set(tp, NVRAM_BUFFERED);
14385 }
14386 }
14387
tg3_nvram_get_pagesize(struct tg3 * tp,u32 nvmcfg1)14388 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14389 {
14390 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14391 case FLASH_5752PAGE_SIZE_256:
14392 tp->nvram_pagesize = 256;
14393 break;
14394 case FLASH_5752PAGE_SIZE_512:
14395 tp->nvram_pagesize = 512;
14396 break;
14397 case FLASH_5752PAGE_SIZE_1K:
14398 tp->nvram_pagesize = 1024;
14399 break;
14400 case FLASH_5752PAGE_SIZE_2K:
14401 tp->nvram_pagesize = 2048;
14402 break;
14403 case FLASH_5752PAGE_SIZE_4K:
14404 tp->nvram_pagesize = 4096;
14405 break;
14406 case FLASH_5752PAGE_SIZE_264:
14407 tp->nvram_pagesize = 264;
14408 break;
14409 case FLASH_5752PAGE_SIZE_528:
14410 tp->nvram_pagesize = 528;
14411 break;
14412 }
14413 }
14414
tg3_get_5752_nvram_info(struct tg3 * tp)14415 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14416 {
14417 u32 nvcfg1;
14418
14419 nvcfg1 = tr32(NVRAM_CFG1);
14420
14421 /* NVRAM protection for TPM */
14422 if (nvcfg1 & (1 << 27))
14423 tg3_flag_set(tp, PROTECTED_NVRAM);
14424
14425 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14426 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14427 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14428 tp->nvram_jedecnum = JEDEC_ATMEL;
14429 tg3_flag_set(tp, NVRAM_BUFFERED);
14430 break;
14431 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14432 tp->nvram_jedecnum = JEDEC_ATMEL;
14433 tg3_flag_set(tp, NVRAM_BUFFERED);
14434 tg3_flag_set(tp, FLASH);
14435 break;
14436 case FLASH_5752VENDOR_ST_M45PE10:
14437 case FLASH_5752VENDOR_ST_M45PE20:
14438 case FLASH_5752VENDOR_ST_M45PE40:
14439 tp->nvram_jedecnum = JEDEC_ST;
14440 tg3_flag_set(tp, NVRAM_BUFFERED);
14441 tg3_flag_set(tp, FLASH);
14442 break;
14443 }
14444
14445 if (tg3_flag(tp, FLASH)) {
14446 tg3_nvram_get_pagesize(tp, nvcfg1);
14447 } else {
14448 /* For eeprom, set pagesize to maximum eeprom size */
14449 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14450
14451 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14452 tw32(NVRAM_CFG1, nvcfg1);
14453 }
14454 }
14455
tg3_get_5755_nvram_info(struct tg3 * tp)14456 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14457 {
14458 u32 nvcfg1, protect = 0;
14459
14460 nvcfg1 = tr32(NVRAM_CFG1);
14461
14462 /* NVRAM protection for TPM */
14463 if (nvcfg1 & (1 << 27)) {
14464 tg3_flag_set(tp, PROTECTED_NVRAM);
14465 protect = 1;
14466 }
14467
14468 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14469 switch (nvcfg1) {
14470 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14471 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14472 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14473 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14474 tp->nvram_jedecnum = JEDEC_ATMEL;
14475 tg3_flag_set(tp, NVRAM_BUFFERED);
14476 tg3_flag_set(tp, FLASH);
14477 tp->nvram_pagesize = 264;
14478 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14479 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14480 tp->nvram_size = (protect ? 0x3e200 :
14481 TG3_NVRAM_SIZE_512KB);
14482 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14483 tp->nvram_size = (protect ? 0x1f200 :
14484 TG3_NVRAM_SIZE_256KB);
14485 else
14486 tp->nvram_size = (protect ? 0x1f200 :
14487 TG3_NVRAM_SIZE_128KB);
14488 break;
14489 case FLASH_5752VENDOR_ST_M45PE10:
14490 case FLASH_5752VENDOR_ST_M45PE20:
14491 case FLASH_5752VENDOR_ST_M45PE40:
14492 tp->nvram_jedecnum = JEDEC_ST;
14493 tg3_flag_set(tp, NVRAM_BUFFERED);
14494 tg3_flag_set(tp, FLASH);
14495 tp->nvram_pagesize = 256;
14496 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14497 tp->nvram_size = (protect ?
14498 TG3_NVRAM_SIZE_64KB :
14499 TG3_NVRAM_SIZE_128KB);
14500 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14501 tp->nvram_size = (protect ?
14502 TG3_NVRAM_SIZE_64KB :
14503 TG3_NVRAM_SIZE_256KB);
14504 else
14505 tp->nvram_size = (protect ?
14506 TG3_NVRAM_SIZE_128KB :
14507 TG3_NVRAM_SIZE_512KB);
14508 break;
14509 }
14510 }
14511
tg3_get_5787_nvram_info(struct tg3 * tp)14512 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14513 {
14514 u32 nvcfg1;
14515
14516 nvcfg1 = tr32(NVRAM_CFG1);
14517
14518 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14519 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14520 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14521 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14522 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14523 tp->nvram_jedecnum = JEDEC_ATMEL;
14524 tg3_flag_set(tp, NVRAM_BUFFERED);
14525 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14526
14527 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14528 tw32(NVRAM_CFG1, nvcfg1);
14529 break;
14530 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14531 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14532 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14533 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14534 tp->nvram_jedecnum = JEDEC_ATMEL;
14535 tg3_flag_set(tp, NVRAM_BUFFERED);
14536 tg3_flag_set(tp, FLASH);
14537 tp->nvram_pagesize = 264;
14538 break;
14539 case FLASH_5752VENDOR_ST_M45PE10:
14540 case FLASH_5752VENDOR_ST_M45PE20:
14541 case FLASH_5752VENDOR_ST_M45PE40:
14542 tp->nvram_jedecnum = JEDEC_ST;
14543 tg3_flag_set(tp, NVRAM_BUFFERED);
14544 tg3_flag_set(tp, FLASH);
14545 tp->nvram_pagesize = 256;
14546 break;
14547 }
14548 }
14549
tg3_get_5761_nvram_info(struct tg3 * tp)14550 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14551 {
14552 u32 nvcfg1, protect = 0;
14553
14554 nvcfg1 = tr32(NVRAM_CFG1);
14555
14556 /* NVRAM protection for TPM */
14557 if (nvcfg1 & (1 << 27)) {
14558 tg3_flag_set(tp, PROTECTED_NVRAM);
14559 protect = 1;
14560 }
14561
14562 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14563 switch (nvcfg1) {
14564 case FLASH_5761VENDOR_ATMEL_ADB021D:
14565 case FLASH_5761VENDOR_ATMEL_ADB041D:
14566 case FLASH_5761VENDOR_ATMEL_ADB081D:
14567 case FLASH_5761VENDOR_ATMEL_ADB161D:
14568 case FLASH_5761VENDOR_ATMEL_MDB021D:
14569 case FLASH_5761VENDOR_ATMEL_MDB041D:
14570 case FLASH_5761VENDOR_ATMEL_MDB081D:
14571 case FLASH_5761VENDOR_ATMEL_MDB161D:
14572 tp->nvram_jedecnum = JEDEC_ATMEL;
14573 tg3_flag_set(tp, NVRAM_BUFFERED);
14574 tg3_flag_set(tp, FLASH);
14575 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14576 tp->nvram_pagesize = 256;
14577 break;
14578 case FLASH_5761VENDOR_ST_A_M45PE20:
14579 case FLASH_5761VENDOR_ST_A_M45PE40:
14580 case FLASH_5761VENDOR_ST_A_M45PE80:
14581 case FLASH_5761VENDOR_ST_A_M45PE16:
14582 case FLASH_5761VENDOR_ST_M_M45PE20:
14583 case FLASH_5761VENDOR_ST_M_M45PE40:
14584 case FLASH_5761VENDOR_ST_M_M45PE80:
14585 case FLASH_5761VENDOR_ST_M_M45PE16:
14586 tp->nvram_jedecnum = JEDEC_ST;
14587 tg3_flag_set(tp, NVRAM_BUFFERED);
14588 tg3_flag_set(tp, FLASH);
14589 tp->nvram_pagesize = 256;
14590 break;
14591 }
14592
14593 if (protect) {
14594 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14595 } else {
14596 switch (nvcfg1) {
14597 case FLASH_5761VENDOR_ATMEL_ADB161D:
14598 case FLASH_5761VENDOR_ATMEL_MDB161D:
14599 case FLASH_5761VENDOR_ST_A_M45PE16:
14600 case FLASH_5761VENDOR_ST_M_M45PE16:
14601 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14602 break;
14603 case FLASH_5761VENDOR_ATMEL_ADB081D:
14604 case FLASH_5761VENDOR_ATMEL_MDB081D:
14605 case FLASH_5761VENDOR_ST_A_M45PE80:
14606 case FLASH_5761VENDOR_ST_M_M45PE80:
14607 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14608 break;
14609 case FLASH_5761VENDOR_ATMEL_ADB041D:
14610 case FLASH_5761VENDOR_ATMEL_MDB041D:
14611 case FLASH_5761VENDOR_ST_A_M45PE40:
14612 case FLASH_5761VENDOR_ST_M_M45PE40:
14613 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14614 break;
14615 case FLASH_5761VENDOR_ATMEL_ADB021D:
14616 case FLASH_5761VENDOR_ATMEL_MDB021D:
14617 case FLASH_5761VENDOR_ST_A_M45PE20:
14618 case FLASH_5761VENDOR_ST_M_M45PE20:
14619 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14620 break;
14621 }
14622 }
14623 }
14624
tg3_get_5906_nvram_info(struct tg3 * tp)14625 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14626 {
14627 tp->nvram_jedecnum = JEDEC_ATMEL;
14628 tg3_flag_set(tp, NVRAM_BUFFERED);
14629 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14630 }
14631
tg3_get_57780_nvram_info(struct tg3 * tp)14632 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14633 {
14634 u32 nvcfg1;
14635
14636 nvcfg1 = tr32(NVRAM_CFG1);
14637
14638 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14639 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14640 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14641 tp->nvram_jedecnum = JEDEC_ATMEL;
14642 tg3_flag_set(tp, NVRAM_BUFFERED);
14643 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14644
14645 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14646 tw32(NVRAM_CFG1, nvcfg1);
14647 return;
14648 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14649 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14650 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14651 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14652 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14653 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14654 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14655 tp->nvram_jedecnum = JEDEC_ATMEL;
14656 tg3_flag_set(tp, NVRAM_BUFFERED);
14657 tg3_flag_set(tp, FLASH);
14658
14659 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14660 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14661 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14662 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14663 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14664 break;
14665 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14666 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14667 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14668 break;
14669 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14670 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14671 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14672 break;
14673 }
14674 break;
14675 case FLASH_5752VENDOR_ST_M45PE10:
14676 case FLASH_5752VENDOR_ST_M45PE20:
14677 case FLASH_5752VENDOR_ST_M45PE40:
14678 tp->nvram_jedecnum = JEDEC_ST;
14679 tg3_flag_set(tp, NVRAM_BUFFERED);
14680 tg3_flag_set(tp, FLASH);
14681
14682 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14683 case FLASH_5752VENDOR_ST_M45PE10:
14684 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14685 break;
14686 case FLASH_5752VENDOR_ST_M45PE20:
14687 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14688 break;
14689 case FLASH_5752VENDOR_ST_M45PE40:
14690 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14691 break;
14692 }
14693 break;
14694 default:
14695 tg3_flag_set(tp, NO_NVRAM);
14696 return;
14697 }
14698
14699 tg3_nvram_get_pagesize(tp, nvcfg1);
14700 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14701 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14702 }
14703
14704
tg3_get_5717_nvram_info(struct tg3 * tp)14705 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14706 {
14707 u32 nvcfg1;
14708
14709 nvcfg1 = tr32(NVRAM_CFG1);
14710
14711 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14712 case FLASH_5717VENDOR_ATMEL_EEPROM:
14713 case FLASH_5717VENDOR_MICRO_EEPROM:
14714 tp->nvram_jedecnum = JEDEC_ATMEL;
14715 tg3_flag_set(tp, NVRAM_BUFFERED);
14716 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14717
14718 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14719 tw32(NVRAM_CFG1, nvcfg1);
14720 return;
14721 case FLASH_5717VENDOR_ATMEL_MDB011D:
14722 case FLASH_5717VENDOR_ATMEL_ADB011B:
14723 case FLASH_5717VENDOR_ATMEL_ADB011D:
14724 case FLASH_5717VENDOR_ATMEL_MDB021D:
14725 case FLASH_5717VENDOR_ATMEL_ADB021B:
14726 case FLASH_5717VENDOR_ATMEL_ADB021D:
14727 case FLASH_5717VENDOR_ATMEL_45USPT:
14728 tp->nvram_jedecnum = JEDEC_ATMEL;
14729 tg3_flag_set(tp, NVRAM_BUFFERED);
14730 tg3_flag_set(tp, FLASH);
14731
14732 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14733 case FLASH_5717VENDOR_ATMEL_MDB021D:
14734 /* Detect size with tg3_nvram_get_size() */
14735 break;
14736 case FLASH_5717VENDOR_ATMEL_ADB021B:
14737 case FLASH_5717VENDOR_ATMEL_ADB021D:
14738 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14739 break;
14740 default:
14741 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14742 break;
14743 }
14744 break;
14745 case FLASH_5717VENDOR_ST_M_M25PE10:
14746 case FLASH_5717VENDOR_ST_A_M25PE10:
14747 case FLASH_5717VENDOR_ST_M_M45PE10:
14748 case FLASH_5717VENDOR_ST_A_M45PE10:
14749 case FLASH_5717VENDOR_ST_M_M25PE20:
14750 case FLASH_5717VENDOR_ST_A_M25PE20:
14751 case FLASH_5717VENDOR_ST_M_M45PE20:
14752 case FLASH_5717VENDOR_ST_A_M45PE20:
14753 case FLASH_5717VENDOR_ST_25USPT:
14754 case FLASH_5717VENDOR_ST_45USPT:
14755 tp->nvram_jedecnum = JEDEC_ST;
14756 tg3_flag_set(tp, NVRAM_BUFFERED);
14757 tg3_flag_set(tp, FLASH);
14758
14759 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14760 case FLASH_5717VENDOR_ST_M_M25PE20:
14761 case FLASH_5717VENDOR_ST_M_M45PE20:
14762 /* Detect size with tg3_nvram_get_size() */
14763 break;
14764 case FLASH_5717VENDOR_ST_A_M25PE20:
14765 case FLASH_5717VENDOR_ST_A_M45PE20:
14766 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14767 break;
14768 default:
14769 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14770 break;
14771 }
14772 break;
14773 default:
14774 tg3_flag_set(tp, NO_NVRAM);
14775 return;
14776 }
14777
14778 tg3_nvram_get_pagesize(tp, nvcfg1);
14779 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14780 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14781 }
14782
tg3_get_5720_nvram_info(struct tg3 * tp)14783 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14784 {
14785 u32 nvcfg1, nvmpinstrp, nv_status;
14786
14787 nvcfg1 = tr32(NVRAM_CFG1);
14788 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14789
14790 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14791 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14792 tg3_flag_set(tp, NO_NVRAM);
14793 return;
14794 }
14795
14796 switch (nvmpinstrp) {
14797 case FLASH_5762_MX25L_100:
14798 case FLASH_5762_MX25L_200:
14799 case FLASH_5762_MX25L_400:
14800 case FLASH_5762_MX25L_800:
14801 case FLASH_5762_MX25L_160_320:
14802 tp->nvram_pagesize = 4096;
14803 tp->nvram_jedecnum = JEDEC_MACRONIX;
14804 tg3_flag_set(tp, NVRAM_BUFFERED);
14805 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14806 tg3_flag_set(tp, FLASH);
14807 nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14808 tp->nvram_size =
14809 (1 << (nv_status >> AUTOSENSE_DEVID &
14810 AUTOSENSE_DEVID_MASK)
14811 << AUTOSENSE_SIZE_IN_MB);
14812 return;
14813
14814 case FLASH_5762_EEPROM_HD:
14815 nvmpinstrp = FLASH_5720_EEPROM_HD;
14816 break;
14817 case FLASH_5762_EEPROM_LD:
14818 nvmpinstrp = FLASH_5720_EEPROM_LD;
14819 break;
14820 case FLASH_5720VENDOR_M_ST_M45PE20:
14821 /* This pinstrap supports multiple sizes, so force it
14822 * to read the actual size from location 0xf0.
14823 */
14824 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14825 break;
14826 }
14827 }
14828
14829 switch (nvmpinstrp) {
14830 case FLASH_5720_EEPROM_HD:
14831 case FLASH_5720_EEPROM_LD:
14832 tp->nvram_jedecnum = JEDEC_ATMEL;
14833 tg3_flag_set(tp, NVRAM_BUFFERED);
14834
14835 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14836 tw32(NVRAM_CFG1, nvcfg1);
14837 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14838 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14839 else
14840 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14841 return;
14842 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14843 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14844 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14845 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14846 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14847 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14848 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14849 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14850 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14851 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14852 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14853 case FLASH_5720VENDOR_ATMEL_45USPT:
14854 tp->nvram_jedecnum = JEDEC_ATMEL;
14855 tg3_flag_set(tp, NVRAM_BUFFERED);
14856 tg3_flag_set(tp, FLASH);
14857
14858 switch (nvmpinstrp) {
14859 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14860 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14861 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14862 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14863 break;
14864 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14865 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14866 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14867 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14868 break;
14869 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14870 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14871 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14872 break;
14873 default:
14874 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14875 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14876 break;
14877 }
14878 break;
14879 case FLASH_5720VENDOR_M_ST_M25PE10:
14880 case FLASH_5720VENDOR_M_ST_M45PE10:
14881 case FLASH_5720VENDOR_A_ST_M25PE10:
14882 case FLASH_5720VENDOR_A_ST_M45PE10:
14883 case FLASH_5720VENDOR_M_ST_M25PE20:
14884 case FLASH_5720VENDOR_M_ST_M45PE20:
14885 case FLASH_5720VENDOR_A_ST_M25PE20:
14886 case FLASH_5720VENDOR_A_ST_M45PE20:
14887 case FLASH_5720VENDOR_M_ST_M25PE40:
14888 case FLASH_5720VENDOR_M_ST_M45PE40:
14889 case FLASH_5720VENDOR_A_ST_M25PE40:
14890 case FLASH_5720VENDOR_A_ST_M45PE40:
14891 case FLASH_5720VENDOR_M_ST_M25PE80:
14892 case FLASH_5720VENDOR_M_ST_M45PE80:
14893 case FLASH_5720VENDOR_A_ST_M25PE80:
14894 case FLASH_5720VENDOR_A_ST_M45PE80:
14895 case FLASH_5720VENDOR_ST_25USPT:
14896 case FLASH_5720VENDOR_ST_45USPT:
14897 tp->nvram_jedecnum = JEDEC_ST;
14898 tg3_flag_set(tp, NVRAM_BUFFERED);
14899 tg3_flag_set(tp, FLASH);
14900
14901 switch (nvmpinstrp) {
14902 case FLASH_5720VENDOR_M_ST_M25PE20:
14903 case FLASH_5720VENDOR_M_ST_M45PE20:
14904 case FLASH_5720VENDOR_A_ST_M25PE20:
14905 case FLASH_5720VENDOR_A_ST_M45PE20:
14906 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14907 break;
14908 case FLASH_5720VENDOR_M_ST_M25PE40:
14909 case FLASH_5720VENDOR_M_ST_M45PE40:
14910 case FLASH_5720VENDOR_A_ST_M25PE40:
14911 case FLASH_5720VENDOR_A_ST_M45PE40:
14912 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14913 break;
14914 case FLASH_5720VENDOR_M_ST_M25PE80:
14915 case FLASH_5720VENDOR_M_ST_M45PE80:
14916 case FLASH_5720VENDOR_A_ST_M25PE80:
14917 case FLASH_5720VENDOR_A_ST_M45PE80:
14918 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14919 break;
14920 default:
14921 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14922 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14923 break;
14924 }
14925 break;
14926 default:
14927 tg3_flag_set(tp, NO_NVRAM);
14928 return;
14929 }
14930
14931 tg3_nvram_get_pagesize(tp, nvcfg1);
14932 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14933 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14934
14935 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14936 u32 val;
14937
14938 if (tg3_nvram_read(tp, 0, &val))
14939 return;
14940
14941 if (val != TG3_EEPROM_MAGIC &&
14942 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14943 tg3_flag_set(tp, NO_NVRAM);
14944 }
14945 }
14946
14947 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
tg3_nvram_init(struct tg3 * tp)14948 static void tg3_nvram_init(struct tg3 *tp)
14949 {
14950 if (tg3_flag(tp, IS_SSB_CORE)) {
14951 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14952 tg3_flag_clear(tp, NVRAM);
14953 tg3_flag_clear(tp, NVRAM_BUFFERED);
14954 tg3_flag_set(tp, NO_NVRAM);
14955 return;
14956 }
14957
14958 tw32_f(GRC_EEPROM_ADDR,
14959 (EEPROM_ADDR_FSM_RESET |
14960 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14961 EEPROM_ADDR_CLKPERD_SHIFT)));
14962
14963 msleep(1);
14964
14965 /* Enable seeprom accesses. */
14966 tw32_f(GRC_LOCAL_CTRL,
14967 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14968 udelay(100);
14969
14970 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14971 tg3_asic_rev(tp) != ASIC_REV_5701) {
14972 tg3_flag_set(tp, NVRAM);
14973
14974 if (tg3_nvram_lock(tp)) {
14975 netdev_warn(tp->dev,
14976 "Cannot get nvram lock, %s failed\n",
14977 __func__);
14978 return;
14979 }
14980 tg3_enable_nvram_access(tp);
14981
14982 tp->nvram_size = 0;
14983
14984 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14985 tg3_get_5752_nvram_info(tp);
14986 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14987 tg3_get_5755_nvram_info(tp);
14988 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14989 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14990 tg3_asic_rev(tp) == ASIC_REV_5785)
14991 tg3_get_5787_nvram_info(tp);
14992 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14993 tg3_get_5761_nvram_info(tp);
14994 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14995 tg3_get_5906_nvram_info(tp);
14996 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14997 tg3_flag(tp, 57765_CLASS))
14998 tg3_get_57780_nvram_info(tp);
14999 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15000 tg3_asic_rev(tp) == ASIC_REV_5719)
15001 tg3_get_5717_nvram_info(tp);
15002 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15003 tg3_asic_rev(tp) == ASIC_REV_5762)
15004 tg3_get_5720_nvram_info(tp);
15005 else
15006 tg3_get_nvram_info(tp);
15007
15008 if (tp->nvram_size == 0)
15009 tg3_get_nvram_size(tp);
15010
15011 tg3_disable_nvram_access(tp);
15012 tg3_nvram_unlock(tp);
15013
15014 } else {
15015 tg3_flag_clear(tp, NVRAM);
15016 tg3_flag_clear(tp, NVRAM_BUFFERED);
15017
15018 tg3_get_eeprom_size(tp);
15019 }
15020 }
15021
15022 struct subsys_tbl_ent {
15023 u16 subsys_vendor, subsys_devid;
15024 u32 phy_id;
15025 };
15026
15027 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15028 /* Broadcom boards. */
15029 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15030 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15031 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15032 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15033 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15034 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15035 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15036 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15037 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15038 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15039 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15040 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15041 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15042 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15043 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15044 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15045 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15046 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15047 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15048 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15049 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15050 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15051
15052 /* 3com boards. */
15053 { TG3PCI_SUBVENDOR_ID_3COM,
15054 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15055 { TG3PCI_SUBVENDOR_ID_3COM,
15056 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15057 { TG3PCI_SUBVENDOR_ID_3COM,
15058 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15059 { TG3PCI_SUBVENDOR_ID_3COM,
15060 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15061 { TG3PCI_SUBVENDOR_ID_3COM,
15062 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15063
15064 /* DELL boards. */
15065 { TG3PCI_SUBVENDOR_ID_DELL,
15066 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15067 { TG3PCI_SUBVENDOR_ID_DELL,
15068 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15069 { TG3PCI_SUBVENDOR_ID_DELL,
15070 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15071 { TG3PCI_SUBVENDOR_ID_DELL,
15072 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15073
15074 /* Compaq boards. */
15075 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15076 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15077 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15078 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15079 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15080 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15081 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15082 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15083 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15084 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15085
15086 /* IBM boards. */
15087 { TG3PCI_SUBVENDOR_ID_IBM,
15088 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15089 };
15090
tg3_lookup_by_subsys(struct tg3 * tp)15091 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15092 {
15093 int i;
15094
15095 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15096 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15097 tp->pdev->subsystem_vendor) &&
15098 (subsys_id_to_phy_id[i].subsys_devid ==
15099 tp->pdev->subsystem_device))
15100 return &subsys_id_to_phy_id[i];
15101 }
15102 return NULL;
15103 }
15104
tg3_get_eeprom_hw_cfg(struct tg3 * tp)15105 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15106 {
15107 u32 val;
15108
15109 tp->phy_id = TG3_PHY_ID_INVALID;
15110 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15111
15112 /* Assume an onboard device and WOL capable by default. */
15113 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15114 tg3_flag_set(tp, WOL_CAP);
15115
15116 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15117 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15118 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15119 tg3_flag_set(tp, IS_NIC);
15120 }
15121 val = tr32(VCPU_CFGSHDW);
15122 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15123 tg3_flag_set(tp, ASPM_WORKAROUND);
15124 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15125 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15126 tg3_flag_set(tp, WOL_ENABLE);
15127 device_set_wakeup_enable(&tp->pdev->dev, true);
15128 }
15129 goto done;
15130 }
15131
15132 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15133 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15134 u32 nic_cfg, led_cfg;
15135 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15136 u32 nic_phy_id, ver, eeprom_phy_id;
15137 int eeprom_phy_serdes = 0;
15138
15139 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15140 tp->nic_sram_data_cfg = nic_cfg;
15141
15142 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15143 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15144 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15145 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15146 tg3_asic_rev(tp) != ASIC_REV_5703 &&
15147 (ver > 0) && (ver < 0x100))
15148 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15149
15150 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15151 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15152
15153 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15154 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15155 tg3_asic_rev(tp) == ASIC_REV_5720)
15156 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15157
15158 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15159 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15160 eeprom_phy_serdes = 1;
15161
15162 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15163 if (nic_phy_id != 0) {
15164 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15165 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15166
15167 eeprom_phy_id = (id1 >> 16) << 10;
15168 eeprom_phy_id |= (id2 & 0xfc00) << 16;
15169 eeprom_phy_id |= (id2 & 0x03ff) << 0;
15170 } else
15171 eeprom_phy_id = 0;
15172
15173 tp->phy_id = eeprom_phy_id;
15174 if (eeprom_phy_serdes) {
15175 if (!tg3_flag(tp, 5705_PLUS))
15176 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15177 else
15178 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15179 }
15180
15181 if (tg3_flag(tp, 5750_PLUS))
15182 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15183 SHASTA_EXT_LED_MODE_MASK);
15184 else
15185 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15186
15187 switch (led_cfg) {
15188 default:
15189 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15190 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15191 break;
15192
15193 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15194 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15195 break;
15196
15197 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15198 tp->led_ctrl = LED_CTRL_MODE_MAC;
15199
15200 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15201 * read on some older 5700/5701 bootcode.
15202 */
15203 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15204 tg3_asic_rev(tp) == ASIC_REV_5701)
15205 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15206
15207 break;
15208
15209 case SHASTA_EXT_LED_SHARED:
15210 tp->led_ctrl = LED_CTRL_MODE_SHARED;
15211 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15212 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15213 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15214 LED_CTRL_MODE_PHY_2);
15215
15216 if (tg3_flag(tp, 5717_PLUS) ||
15217 tg3_asic_rev(tp) == ASIC_REV_5762)
15218 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15219 LED_CTRL_BLINK_RATE_MASK;
15220
15221 break;
15222
15223 case SHASTA_EXT_LED_MAC:
15224 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15225 break;
15226
15227 case SHASTA_EXT_LED_COMBO:
15228 tp->led_ctrl = LED_CTRL_MODE_COMBO;
15229 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15230 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15231 LED_CTRL_MODE_PHY_2);
15232 break;
15233
15234 }
15235
15236 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15237 tg3_asic_rev(tp) == ASIC_REV_5701) &&
15238 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15239 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15240
15241 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15242 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15243
15244 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15245 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15246 if ((tp->pdev->subsystem_vendor ==
15247 PCI_VENDOR_ID_ARIMA) &&
15248 (tp->pdev->subsystem_device == 0x205a ||
15249 tp->pdev->subsystem_device == 0x2063))
15250 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15251 } else {
15252 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15253 tg3_flag_set(tp, IS_NIC);
15254 }
15255
15256 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15257 tg3_flag_set(tp, ENABLE_ASF);
15258 if (tg3_flag(tp, 5750_PLUS))
15259 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15260 }
15261
15262 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15263 tg3_flag(tp, 5750_PLUS))
15264 tg3_flag_set(tp, ENABLE_APE);
15265
15266 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15267 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15268 tg3_flag_clear(tp, WOL_CAP);
15269
15270 if (tg3_flag(tp, WOL_CAP) &&
15271 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15272 tg3_flag_set(tp, WOL_ENABLE);
15273 device_set_wakeup_enable(&tp->pdev->dev, true);
15274 }
15275
15276 if (cfg2 & (1 << 17))
15277 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15278
15279 /* serdes signal pre-emphasis in register 0x590 set by */
15280 /* bootcode if bit 18 is set */
15281 if (cfg2 & (1 << 18))
15282 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15283
15284 if ((tg3_flag(tp, 57765_PLUS) ||
15285 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15286 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15287 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15288 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15289
15290 if (tg3_flag(tp, PCI_EXPRESS)) {
15291 u32 cfg3;
15292
15293 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15294 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15295 !tg3_flag(tp, 57765_PLUS) &&
15296 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15297 tg3_flag_set(tp, ASPM_WORKAROUND);
15298 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15299 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15300 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15301 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15302 }
15303
15304 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15305 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15306 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15307 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15308 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15309 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15310
15311 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15312 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15313 }
15314 done:
15315 if (tg3_flag(tp, WOL_CAP))
15316 device_set_wakeup_enable(&tp->pdev->dev,
15317 tg3_flag(tp, WOL_ENABLE));
15318 else
15319 device_set_wakeup_capable(&tp->pdev->dev, false);
15320 }
15321
tg3_ape_otp_read(struct tg3 * tp,u32 offset,u32 * val)15322 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15323 {
15324 int i, err;
15325 u32 val2, off = offset * 8;
15326
15327 err = tg3_nvram_lock(tp);
15328 if (err)
15329 return err;
15330
15331 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15332 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15333 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15334 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15335 udelay(10);
15336
15337 for (i = 0; i < 100; i++) {
15338 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15339 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15340 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15341 break;
15342 }
15343 udelay(10);
15344 }
15345
15346 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15347
15348 tg3_nvram_unlock(tp);
15349 if (val2 & APE_OTP_STATUS_CMD_DONE)
15350 return 0;
15351
15352 return -EBUSY;
15353 }
15354
tg3_issue_otp_command(struct tg3 * tp,u32 cmd)15355 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15356 {
15357 int i;
15358 u32 val;
15359
15360 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15361 tw32(OTP_CTRL, cmd);
15362
15363 /* Wait for up to 1 ms for command to execute. */
15364 for (i = 0; i < 100; i++) {
15365 val = tr32(OTP_STATUS);
15366 if (val & OTP_STATUS_CMD_DONE)
15367 break;
15368 udelay(10);
15369 }
15370
15371 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15372 }
15373
15374 /* Read the gphy configuration from the OTP region of the chip. The gphy
15375 * configuration is a 32-bit value that straddles the alignment boundary.
15376 * We do two 32-bit reads and then shift and merge the results.
15377 */
tg3_read_otp_phycfg(struct tg3 * tp)15378 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15379 {
15380 u32 bhalf_otp, thalf_otp;
15381
15382 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15383
15384 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15385 return 0;
15386
15387 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15388
15389 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15390 return 0;
15391
15392 thalf_otp = tr32(OTP_READ_DATA);
15393
15394 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15395
15396 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15397 return 0;
15398
15399 bhalf_otp = tr32(OTP_READ_DATA);
15400
15401 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15402 }
15403
tg3_phy_init_link_config(struct tg3 * tp)15404 static void tg3_phy_init_link_config(struct tg3 *tp)
15405 {
15406 u32 adv = ADVERTISED_Autoneg;
15407
15408 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15409 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15410 adv |= ADVERTISED_1000baseT_Half;
15411 adv |= ADVERTISED_1000baseT_Full;
15412 }
15413
15414 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15415 adv |= ADVERTISED_100baseT_Half |
15416 ADVERTISED_100baseT_Full |
15417 ADVERTISED_10baseT_Half |
15418 ADVERTISED_10baseT_Full |
15419 ADVERTISED_TP;
15420 else
15421 adv |= ADVERTISED_FIBRE;
15422
15423 tp->link_config.advertising = adv;
15424 tp->link_config.speed = SPEED_UNKNOWN;
15425 tp->link_config.duplex = DUPLEX_UNKNOWN;
15426 tp->link_config.autoneg = AUTONEG_ENABLE;
15427 tp->link_config.active_speed = SPEED_UNKNOWN;
15428 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15429
15430 tp->old_link = -1;
15431 }
15432
tg3_phy_probe(struct tg3 * tp)15433 static int tg3_phy_probe(struct tg3 *tp)
15434 {
15435 u32 hw_phy_id_1, hw_phy_id_2;
15436 u32 hw_phy_id, hw_phy_id_masked;
15437 int err;
15438
15439 /* flow control autonegotiation is default behavior */
15440 tg3_flag_set(tp, PAUSE_AUTONEG);
15441 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15442
15443 if (tg3_flag(tp, ENABLE_APE)) {
15444 switch (tp->pci_fn) {
15445 case 0:
15446 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15447 break;
15448 case 1:
15449 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15450 break;
15451 case 2:
15452 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15453 break;
15454 case 3:
15455 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15456 break;
15457 }
15458 }
15459
15460 if (!tg3_flag(tp, ENABLE_ASF) &&
15461 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15462 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15463 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15464 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15465
15466 if (tg3_flag(tp, USE_PHYLIB))
15467 return tg3_phy_init(tp);
15468
15469 /* Reading the PHY ID register can conflict with ASF
15470 * firmware access to the PHY hardware.
15471 */
15472 err = 0;
15473 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15474 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15475 } else {
15476 /* Now read the physical PHY_ID from the chip and verify
15477 * that it is sane. If it doesn't look good, we fall back
15478 * to either the hard-coded table based PHY_ID and failing
15479 * that the value found in the eeprom area.
15480 */
15481 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15482 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15483
15484 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15485 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15486 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15487
15488 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15489 }
15490
15491 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15492 tp->phy_id = hw_phy_id;
15493 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15494 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15495 else
15496 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15497 } else {
15498 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15499 /* Do nothing, phy ID already set up in
15500 * tg3_get_eeprom_hw_cfg().
15501 */
15502 } else {
15503 struct subsys_tbl_ent *p;
15504
15505 /* No eeprom signature? Try the hardcoded
15506 * subsys device table.
15507 */
15508 p = tg3_lookup_by_subsys(tp);
15509 if (p) {
15510 tp->phy_id = p->phy_id;
15511 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15512 /* For now we saw the IDs 0xbc050cd0,
15513 * 0xbc050f80 and 0xbc050c30 on devices
15514 * connected to an BCM4785 and there are
15515 * probably more. Just assume that the phy is
15516 * supported when it is connected to a SSB core
15517 * for now.
15518 */
15519 return -ENODEV;
15520 }
15521
15522 if (!tp->phy_id ||
15523 tp->phy_id == TG3_PHY_ID_BCM8002)
15524 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15525 }
15526 }
15527
15528 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15529 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15530 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15531 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15532 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15533 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15534 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15535 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15536 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15537 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15538
15539 tp->eee.supported = SUPPORTED_100baseT_Full |
15540 SUPPORTED_1000baseT_Full;
15541 tp->eee.advertised = ADVERTISED_100baseT_Full |
15542 ADVERTISED_1000baseT_Full;
15543 tp->eee.eee_enabled = 1;
15544 tp->eee.tx_lpi_enabled = 1;
15545 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15546 }
15547
15548 tg3_phy_init_link_config(tp);
15549
15550 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15551 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15552 !tg3_flag(tp, ENABLE_APE) &&
15553 !tg3_flag(tp, ENABLE_ASF)) {
15554 u32 bmsr, dummy;
15555
15556 tg3_readphy(tp, MII_BMSR, &bmsr);
15557 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15558 (bmsr & BMSR_LSTATUS))
15559 goto skip_phy_reset;
15560
15561 err = tg3_phy_reset(tp);
15562 if (err)
15563 return err;
15564
15565 tg3_phy_set_wirespeed(tp);
15566
15567 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15568 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15569 tp->link_config.flowctrl);
15570
15571 tg3_writephy(tp, MII_BMCR,
15572 BMCR_ANENABLE | BMCR_ANRESTART);
15573 }
15574 }
15575
15576 skip_phy_reset:
15577 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15578 err = tg3_init_5401phy_dsp(tp);
15579 if (err)
15580 return err;
15581
15582 err = tg3_init_5401phy_dsp(tp);
15583 }
15584
15585 return err;
15586 }
15587
tg3_read_vpd(struct tg3 * tp)15588 static void tg3_read_vpd(struct tg3 *tp)
15589 {
15590 u8 *vpd_data;
15591 unsigned int len, vpdlen;
15592 int i;
15593
15594 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15595 if (!vpd_data)
15596 goto out_no_vpd;
15597
15598 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15599 PCI_VPD_RO_KEYWORD_MFR_ID, &len);
15600 if (i < 0)
15601 goto partno;
15602
15603 if (len != 4 || memcmp(vpd_data + i, "1028", 4))
15604 goto partno;
15605
15606 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15607 PCI_VPD_RO_KEYWORD_VENDOR0, &len);
15608 if (i < 0)
15609 goto partno;
15610
15611 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15612 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
15613
15614 partno:
15615 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15616 PCI_VPD_RO_KEYWORD_PARTNO, &len);
15617 if (i < 0)
15618 goto out_not_found;
15619
15620 if (len > TG3_BPN_SIZE)
15621 goto out_not_found;
15622
15623 memcpy(tp->board_part_number, &vpd_data[i], len);
15624
15625 out_not_found:
15626 kfree(vpd_data);
15627 if (tp->board_part_number[0])
15628 return;
15629
15630 out_no_vpd:
15631 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15632 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15633 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15634 strcpy(tp->board_part_number, "BCM5717");
15635 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15636 strcpy(tp->board_part_number, "BCM5718");
15637 else
15638 goto nomatch;
15639 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15640 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15641 strcpy(tp->board_part_number, "BCM57780");
15642 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15643 strcpy(tp->board_part_number, "BCM57760");
15644 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15645 strcpy(tp->board_part_number, "BCM57790");
15646 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15647 strcpy(tp->board_part_number, "BCM57788");
15648 else
15649 goto nomatch;
15650 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15651 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15652 strcpy(tp->board_part_number, "BCM57761");
15653 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15654 strcpy(tp->board_part_number, "BCM57765");
15655 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15656 strcpy(tp->board_part_number, "BCM57781");
15657 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15658 strcpy(tp->board_part_number, "BCM57785");
15659 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15660 strcpy(tp->board_part_number, "BCM57791");
15661 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15662 strcpy(tp->board_part_number, "BCM57795");
15663 else
15664 goto nomatch;
15665 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15666 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15667 strcpy(tp->board_part_number, "BCM57762");
15668 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15669 strcpy(tp->board_part_number, "BCM57766");
15670 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15671 strcpy(tp->board_part_number, "BCM57782");
15672 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15673 strcpy(tp->board_part_number, "BCM57786");
15674 else
15675 goto nomatch;
15676 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15677 strcpy(tp->board_part_number, "BCM95906");
15678 } else {
15679 nomatch:
15680 strcpy(tp->board_part_number, "none");
15681 }
15682 }
15683
tg3_fw_img_is_valid(struct tg3 * tp,u32 offset)15684 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15685 {
15686 u32 val;
15687
15688 if (tg3_nvram_read(tp, offset, &val) ||
15689 (val & 0xfc000000) != 0x0c000000 ||
15690 tg3_nvram_read(tp, offset + 4, &val) ||
15691 val != 0)
15692 return 0;
15693
15694 return 1;
15695 }
15696
tg3_read_bc_ver(struct tg3 * tp)15697 static void tg3_read_bc_ver(struct tg3 *tp)
15698 {
15699 u32 val, offset, start, ver_offset;
15700 int i, dst_off;
15701 bool newver = false;
15702
15703 if (tg3_nvram_read(tp, 0xc, &offset) ||
15704 tg3_nvram_read(tp, 0x4, &start))
15705 return;
15706
15707 offset = tg3_nvram_logical_addr(tp, offset);
15708
15709 if (tg3_nvram_read(tp, offset, &val))
15710 return;
15711
15712 if ((val & 0xfc000000) == 0x0c000000) {
15713 if (tg3_nvram_read(tp, offset + 4, &val))
15714 return;
15715
15716 if (val == 0)
15717 newver = true;
15718 }
15719
15720 dst_off = strlen(tp->fw_ver);
15721
15722 if (newver) {
15723 if (TG3_VER_SIZE - dst_off < 16 ||
15724 tg3_nvram_read(tp, offset + 8, &ver_offset))
15725 return;
15726
15727 offset = offset + ver_offset - start;
15728 for (i = 0; i < 16; i += 4) {
15729 __be32 v;
15730 if (tg3_nvram_read_be32(tp, offset + i, &v))
15731 return;
15732
15733 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15734 }
15735 } else {
15736 u32 major, minor;
15737
15738 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15739 return;
15740
15741 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15742 TG3_NVM_BCVER_MAJSFT;
15743 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15744 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15745 "v%d.%02d", major, minor);
15746 }
15747 }
15748
tg3_read_hwsb_ver(struct tg3 * tp)15749 static void tg3_read_hwsb_ver(struct tg3 *tp)
15750 {
15751 u32 val, major, minor;
15752
15753 /* Use native endian representation */
15754 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15755 return;
15756
15757 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15758 TG3_NVM_HWSB_CFG1_MAJSFT;
15759 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15760 TG3_NVM_HWSB_CFG1_MINSFT;
15761
15762 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15763 }
15764
tg3_read_sb_ver(struct tg3 * tp,u32 val)15765 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15766 {
15767 u32 offset, major, minor, build;
15768
15769 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15770
15771 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15772 return;
15773
15774 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15775 case TG3_EEPROM_SB_REVISION_0:
15776 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15777 break;
15778 case TG3_EEPROM_SB_REVISION_2:
15779 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15780 break;
15781 case TG3_EEPROM_SB_REVISION_3:
15782 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15783 break;
15784 case TG3_EEPROM_SB_REVISION_4:
15785 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15786 break;
15787 case TG3_EEPROM_SB_REVISION_5:
15788 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15789 break;
15790 case TG3_EEPROM_SB_REVISION_6:
15791 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15792 break;
15793 default:
15794 return;
15795 }
15796
15797 if (tg3_nvram_read(tp, offset, &val))
15798 return;
15799
15800 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15801 TG3_EEPROM_SB_EDH_BLD_SHFT;
15802 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15803 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15804 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15805
15806 if (minor > 99 || build > 26)
15807 return;
15808
15809 offset = strlen(tp->fw_ver);
15810 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15811 " v%d.%02d", major, minor);
15812
15813 if (build > 0) {
15814 offset = strlen(tp->fw_ver);
15815 if (offset < TG3_VER_SIZE - 1)
15816 tp->fw_ver[offset] = 'a' + build - 1;
15817 }
15818 }
15819
tg3_read_mgmtfw_ver(struct tg3 * tp)15820 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15821 {
15822 u32 val, offset, start;
15823 int i, vlen;
15824
15825 for (offset = TG3_NVM_DIR_START;
15826 offset < TG3_NVM_DIR_END;
15827 offset += TG3_NVM_DIRENT_SIZE) {
15828 if (tg3_nvram_read(tp, offset, &val))
15829 return;
15830
15831 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15832 break;
15833 }
15834
15835 if (offset == TG3_NVM_DIR_END)
15836 return;
15837
15838 if (!tg3_flag(tp, 5705_PLUS))
15839 start = 0x08000000;
15840 else if (tg3_nvram_read(tp, offset - 4, &start))
15841 return;
15842
15843 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15844 !tg3_fw_img_is_valid(tp, offset) ||
15845 tg3_nvram_read(tp, offset + 8, &val))
15846 return;
15847
15848 offset += val - start;
15849
15850 vlen = strlen(tp->fw_ver);
15851
15852 tp->fw_ver[vlen++] = ',';
15853 tp->fw_ver[vlen++] = ' ';
15854
15855 for (i = 0; i < 4; i++) {
15856 __be32 v;
15857 if (tg3_nvram_read_be32(tp, offset, &v))
15858 return;
15859
15860 offset += sizeof(v);
15861
15862 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15863 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15864 break;
15865 }
15866
15867 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15868 vlen += sizeof(v);
15869 }
15870 }
15871
tg3_probe_ncsi(struct tg3 * tp)15872 static void tg3_probe_ncsi(struct tg3 *tp)
15873 {
15874 u32 apedata;
15875
15876 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15877 if (apedata != APE_SEG_SIG_MAGIC)
15878 return;
15879
15880 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15881 if (!(apedata & APE_FW_STATUS_READY))
15882 return;
15883
15884 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15885 tg3_flag_set(tp, APE_HAS_NCSI);
15886 }
15887
tg3_read_dash_ver(struct tg3 * tp)15888 static void tg3_read_dash_ver(struct tg3 *tp)
15889 {
15890 int vlen;
15891 u32 apedata;
15892 char *fwtype;
15893
15894 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15895
15896 if (tg3_flag(tp, APE_HAS_NCSI))
15897 fwtype = "NCSI";
15898 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15899 fwtype = "SMASH";
15900 else
15901 fwtype = "DASH";
15902
15903 vlen = strlen(tp->fw_ver);
15904
15905 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15906 fwtype,
15907 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15908 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15909 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15910 (apedata & APE_FW_VERSION_BLDMSK));
15911 }
15912
tg3_read_otp_ver(struct tg3 * tp)15913 static void tg3_read_otp_ver(struct tg3 *tp)
15914 {
15915 u32 val, val2;
15916
15917 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15918 return;
15919
15920 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15921 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15922 TG3_OTP_MAGIC0_VALID(val)) {
15923 u64 val64 = (u64) val << 32 | val2;
15924 u32 ver = 0;
15925 int i, vlen;
15926
15927 for (i = 0; i < 7; i++) {
15928 if ((val64 & 0xff) == 0)
15929 break;
15930 ver = val64 & 0xff;
15931 val64 >>= 8;
15932 }
15933 vlen = strlen(tp->fw_ver);
15934 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15935 }
15936 }
15937
tg3_read_fw_ver(struct tg3 * tp)15938 static void tg3_read_fw_ver(struct tg3 *tp)
15939 {
15940 u32 val;
15941 bool vpd_vers = false;
15942
15943 if (tp->fw_ver[0] != 0)
15944 vpd_vers = true;
15945
15946 if (tg3_flag(tp, NO_NVRAM)) {
15947 strcat(tp->fw_ver, "sb");
15948 tg3_read_otp_ver(tp);
15949 return;
15950 }
15951
15952 if (tg3_nvram_read(tp, 0, &val))
15953 return;
15954
15955 if (val == TG3_EEPROM_MAGIC)
15956 tg3_read_bc_ver(tp);
15957 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15958 tg3_read_sb_ver(tp, val);
15959 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15960 tg3_read_hwsb_ver(tp);
15961
15962 if (tg3_flag(tp, ENABLE_ASF)) {
15963 if (tg3_flag(tp, ENABLE_APE)) {
15964 tg3_probe_ncsi(tp);
15965 if (!vpd_vers)
15966 tg3_read_dash_ver(tp);
15967 } else if (!vpd_vers) {
15968 tg3_read_mgmtfw_ver(tp);
15969 }
15970 }
15971
15972 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15973 }
15974
tg3_rx_ret_ring_size(struct tg3 * tp)15975 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15976 {
15977 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15978 return TG3_RX_RET_MAX_SIZE_5717;
15979 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15980 return TG3_RX_RET_MAX_SIZE_5700;
15981 else
15982 return TG3_RX_RET_MAX_SIZE_5705;
15983 }
15984
15985 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
15986 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15987 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15988 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15989 { },
15990 };
15991
tg3_find_peer(struct tg3 * tp)15992 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15993 {
15994 struct pci_dev *peer;
15995 unsigned int func, devnr = tp->pdev->devfn & ~7;
15996
15997 for (func = 0; func < 8; func++) {
15998 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15999 if (peer && peer != tp->pdev)
16000 break;
16001 pci_dev_put(peer);
16002 }
16003 /* 5704 can be configured in single-port mode, set peer to
16004 * tp->pdev in that case.
16005 */
16006 if (!peer) {
16007 peer = tp->pdev;
16008 return peer;
16009 }
16010
16011 /*
16012 * We don't need to keep the refcount elevated; there's no way
16013 * to remove one half of this device without removing the other
16014 */
16015 pci_dev_put(peer);
16016
16017 return peer;
16018 }
16019
tg3_detect_asic_rev(struct tg3 * tp,u32 misc_ctrl_reg)16020 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16021 {
16022 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16023 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16024 u32 reg;
16025
16026 /* All devices that use the alternate
16027 * ASIC REV location have a CPMU.
16028 */
16029 tg3_flag_set(tp, CPMU_PRESENT);
16030
16031 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16032 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16033 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16034 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16035 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16036 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16037 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16038 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16039 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16040 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16041 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16042 reg = TG3PCI_GEN2_PRODID_ASICREV;
16043 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16044 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16045 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16046 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16047 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16048 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16049 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16050 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16051 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16052 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16053 reg = TG3PCI_GEN15_PRODID_ASICREV;
16054 else
16055 reg = TG3PCI_PRODID_ASICREV;
16056
16057 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16058 }
16059
16060 /* Wrong chip ID in 5752 A0. This code can be removed later
16061 * as A0 is not in production.
16062 */
16063 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16064 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16065
16066 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16067 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16068
16069 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16070 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16071 tg3_asic_rev(tp) == ASIC_REV_5720)
16072 tg3_flag_set(tp, 5717_PLUS);
16073
16074 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16075 tg3_asic_rev(tp) == ASIC_REV_57766)
16076 tg3_flag_set(tp, 57765_CLASS);
16077
16078 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16079 tg3_asic_rev(tp) == ASIC_REV_5762)
16080 tg3_flag_set(tp, 57765_PLUS);
16081
16082 /* Intentionally exclude ASIC_REV_5906 */
16083 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16084 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16085 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16086 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16087 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16088 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16089 tg3_flag(tp, 57765_PLUS))
16090 tg3_flag_set(tp, 5755_PLUS);
16091
16092 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16093 tg3_asic_rev(tp) == ASIC_REV_5714)
16094 tg3_flag_set(tp, 5780_CLASS);
16095
16096 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16097 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16098 tg3_asic_rev(tp) == ASIC_REV_5906 ||
16099 tg3_flag(tp, 5755_PLUS) ||
16100 tg3_flag(tp, 5780_CLASS))
16101 tg3_flag_set(tp, 5750_PLUS);
16102
16103 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16104 tg3_flag(tp, 5750_PLUS))
16105 tg3_flag_set(tp, 5705_PLUS);
16106 }
16107
tg3_10_100_only_device(struct tg3 * tp,const struct pci_device_id * ent)16108 static bool tg3_10_100_only_device(struct tg3 *tp,
16109 const struct pci_device_id *ent)
16110 {
16111 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16112
16113 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16114 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16115 (tp->phy_flags & TG3_PHYFLG_IS_FET))
16116 return true;
16117
16118 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16119 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16120 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16121 return true;
16122 } else {
16123 return true;
16124 }
16125 }
16126
16127 return false;
16128 }
16129
tg3_get_invariants(struct tg3 * tp,const struct pci_device_id * ent)16130 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16131 {
16132 u32 misc_ctrl_reg;
16133 u32 pci_state_reg, grc_misc_cfg;
16134 u32 val;
16135 u16 pci_cmd;
16136 int err;
16137
16138 /* Force memory write invalidate off. If we leave it on,
16139 * then on 5700_BX chips we have to enable a workaround.
16140 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16141 * to match the cacheline size. The Broadcom driver have this
16142 * workaround but turns MWI off all the times so never uses
16143 * it. This seems to suggest that the workaround is insufficient.
16144 */
16145 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16146 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16147 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16148
16149 /* Important! -- Make sure register accesses are byteswapped
16150 * correctly. Also, for those chips that require it, make
16151 * sure that indirect register accesses are enabled before
16152 * the first operation.
16153 */
16154 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16155 &misc_ctrl_reg);
16156 tp->misc_host_ctrl |= (misc_ctrl_reg &
16157 MISC_HOST_CTRL_CHIPREV);
16158 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16159 tp->misc_host_ctrl);
16160
16161 tg3_detect_asic_rev(tp, misc_ctrl_reg);
16162
16163 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16164 * we need to disable memory and use config. cycles
16165 * only to access all registers. The 5702/03 chips
16166 * can mistakenly decode the special cycles from the
16167 * ICH chipsets as memory write cycles, causing corruption
16168 * of register and memory space. Only certain ICH bridges
16169 * will drive special cycles with non-zero data during the
16170 * address phase which can fall within the 5703's address
16171 * range. This is not an ICH bug as the PCI spec allows
16172 * non-zero address during special cycles. However, only
16173 * these ICH bridges are known to drive non-zero addresses
16174 * during special cycles.
16175 *
16176 * Since special cycles do not cross PCI bridges, we only
16177 * enable this workaround if the 5703 is on the secondary
16178 * bus of these ICH bridges.
16179 */
16180 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16181 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16182 static struct tg3_dev_id {
16183 u32 vendor;
16184 u32 device;
16185 u32 rev;
16186 } ich_chipsets[] = {
16187 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16188 PCI_ANY_ID },
16189 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16190 PCI_ANY_ID },
16191 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16192 0xa },
16193 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16194 PCI_ANY_ID },
16195 { },
16196 };
16197 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16198 struct pci_dev *bridge = NULL;
16199
16200 while (pci_id->vendor != 0) {
16201 bridge = pci_get_device(pci_id->vendor, pci_id->device,
16202 bridge);
16203 if (!bridge) {
16204 pci_id++;
16205 continue;
16206 }
16207 if (pci_id->rev != PCI_ANY_ID) {
16208 if (bridge->revision > pci_id->rev)
16209 continue;
16210 }
16211 if (bridge->subordinate &&
16212 (bridge->subordinate->number ==
16213 tp->pdev->bus->number)) {
16214 tg3_flag_set(tp, ICH_WORKAROUND);
16215 pci_dev_put(bridge);
16216 break;
16217 }
16218 }
16219 }
16220
16221 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16222 static struct tg3_dev_id {
16223 u32 vendor;
16224 u32 device;
16225 } bridge_chipsets[] = {
16226 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16227 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16228 { },
16229 };
16230 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16231 struct pci_dev *bridge = NULL;
16232
16233 while (pci_id->vendor != 0) {
16234 bridge = pci_get_device(pci_id->vendor,
16235 pci_id->device,
16236 bridge);
16237 if (!bridge) {
16238 pci_id++;
16239 continue;
16240 }
16241 if (bridge->subordinate &&
16242 (bridge->subordinate->number <=
16243 tp->pdev->bus->number) &&
16244 (bridge->subordinate->busn_res.end >=
16245 tp->pdev->bus->number)) {
16246 tg3_flag_set(tp, 5701_DMA_BUG);
16247 pci_dev_put(bridge);
16248 break;
16249 }
16250 }
16251 }
16252
16253 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16254 * DMA addresses > 40-bit. This bridge may have other additional
16255 * 57xx devices behind it in some 4-port NIC designs for example.
16256 * Any tg3 device found behind the bridge will also need the 40-bit
16257 * DMA workaround.
16258 */
16259 if (tg3_flag(tp, 5780_CLASS)) {
16260 tg3_flag_set(tp, 40BIT_DMA_BUG);
16261 tp->msi_cap = tp->pdev->msi_cap;
16262 } else {
16263 struct pci_dev *bridge = NULL;
16264
16265 do {
16266 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16267 PCI_DEVICE_ID_SERVERWORKS_EPB,
16268 bridge);
16269 if (bridge && bridge->subordinate &&
16270 (bridge->subordinate->number <=
16271 tp->pdev->bus->number) &&
16272 (bridge->subordinate->busn_res.end >=
16273 tp->pdev->bus->number)) {
16274 tg3_flag_set(tp, 40BIT_DMA_BUG);
16275 pci_dev_put(bridge);
16276 break;
16277 }
16278 } while (bridge);
16279 }
16280
16281 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16282 tg3_asic_rev(tp) == ASIC_REV_5714)
16283 tp->pdev_peer = tg3_find_peer(tp);
16284
16285 /* Determine TSO capabilities */
16286 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16287 ; /* Do nothing. HW bug. */
16288 else if (tg3_flag(tp, 57765_PLUS))
16289 tg3_flag_set(tp, HW_TSO_3);
16290 else if (tg3_flag(tp, 5755_PLUS) ||
16291 tg3_asic_rev(tp) == ASIC_REV_5906)
16292 tg3_flag_set(tp, HW_TSO_2);
16293 else if (tg3_flag(tp, 5750_PLUS)) {
16294 tg3_flag_set(tp, HW_TSO_1);
16295 tg3_flag_set(tp, TSO_BUG);
16296 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16297 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16298 tg3_flag_clear(tp, TSO_BUG);
16299 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16300 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16301 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16302 tg3_flag_set(tp, FW_TSO);
16303 tg3_flag_set(tp, TSO_BUG);
16304 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16305 tp->fw_needed = FIRMWARE_TG3TSO5;
16306 else
16307 tp->fw_needed = FIRMWARE_TG3TSO;
16308 }
16309
16310 /* Selectively allow TSO based on operating conditions */
16311 if (tg3_flag(tp, HW_TSO_1) ||
16312 tg3_flag(tp, HW_TSO_2) ||
16313 tg3_flag(tp, HW_TSO_3) ||
16314 tg3_flag(tp, FW_TSO)) {
16315 /* For firmware TSO, assume ASF is disabled.
16316 * We'll disable TSO later if we discover ASF
16317 * is enabled in tg3_get_eeprom_hw_cfg().
16318 */
16319 tg3_flag_set(tp, TSO_CAPABLE);
16320 } else {
16321 tg3_flag_clear(tp, TSO_CAPABLE);
16322 tg3_flag_clear(tp, TSO_BUG);
16323 tp->fw_needed = NULL;
16324 }
16325
16326 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16327 tp->fw_needed = FIRMWARE_TG3;
16328
16329 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16330 tp->fw_needed = FIRMWARE_TG357766;
16331
16332 tp->irq_max = 1;
16333
16334 if (tg3_flag(tp, 5750_PLUS)) {
16335 tg3_flag_set(tp, SUPPORT_MSI);
16336 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16337 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16338 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16339 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16340 tp->pdev_peer == tp->pdev))
16341 tg3_flag_clear(tp, SUPPORT_MSI);
16342
16343 if (tg3_flag(tp, 5755_PLUS) ||
16344 tg3_asic_rev(tp) == ASIC_REV_5906) {
16345 tg3_flag_set(tp, 1SHOT_MSI);
16346 }
16347
16348 if (tg3_flag(tp, 57765_PLUS)) {
16349 tg3_flag_set(tp, SUPPORT_MSIX);
16350 tp->irq_max = TG3_IRQ_MAX_VECS;
16351 }
16352 }
16353
16354 tp->txq_max = 1;
16355 tp->rxq_max = 1;
16356 if (tp->irq_max > 1) {
16357 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16358 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16359
16360 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16361 tg3_asic_rev(tp) == ASIC_REV_5720)
16362 tp->txq_max = tp->irq_max - 1;
16363 }
16364
16365 if (tg3_flag(tp, 5755_PLUS) ||
16366 tg3_asic_rev(tp) == ASIC_REV_5906)
16367 tg3_flag_set(tp, SHORT_DMA_BUG);
16368
16369 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16370 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16371
16372 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16373 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16374 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16375 tg3_asic_rev(tp) == ASIC_REV_5762)
16376 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16377
16378 if (tg3_flag(tp, 57765_PLUS) &&
16379 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16380 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16381
16382 if (!tg3_flag(tp, 5705_PLUS) ||
16383 tg3_flag(tp, 5780_CLASS) ||
16384 tg3_flag(tp, USE_JUMBO_BDFLAG))
16385 tg3_flag_set(tp, JUMBO_CAPABLE);
16386
16387 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16388 &pci_state_reg);
16389
16390 if (pci_is_pcie(tp->pdev)) {
16391 u16 lnkctl;
16392
16393 tg3_flag_set(tp, PCI_EXPRESS);
16394
16395 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16396 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16397 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16398 tg3_flag_clear(tp, HW_TSO_2);
16399 tg3_flag_clear(tp, TSO_CAPABLE);
16400 }
16401 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16402 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16403 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16404 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16405 tg3_flag_set(tp, CLKREQ_BUG);
16406 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16407 tg3_flag_set(tp, L1PLLPD_EN);
16408 }
16409 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16410 /* BCM5785 devices are effectively PCIe devices, and should
16411 * follow PCIe codepaths, but do not have a PCIe capabilities
16412 * section.
16413 */
16414 tg3_flag_set(tp, PCI_EXPRESS);
16415 } else if (!tg3_flag(tp, 5705_PLUS) ||
16416 tg3_flag(tp, 5780_CLASS)) {
16417 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16418 if (!tp->pcix_cap) {
16419 dev_err(&tp->pdev->dev,
16420 "Cannot find PCI-X capability, aborting\n");
16421 return -EIO;
16422 }
16423
16424 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16425 tg3_flag_set(tp, PCIX_MODE);
16426 }
16427
16428 /* If we have an AMD 762 or VIA K8T800 chipset, write
16429 * reordering to the mailbox registers done by the host
16430 * controller can cause major troubles. We read back from
16431 * every mailbox register write to force the writes to be
16432 * posted to the chip in order.
16433 */
16434 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16435 !tg3_flag(tp, PCI_EXPRESS))
16436 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16437
16438 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16439 &tp->pci_cacheline_sz);
16440 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16441 &tp->pci_lat_timer);
16442 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16443 tp->pci_lat_timer < 64) {
16444 tp->pci_lat_timer = 64;
16445 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16446 tp->pci_lat_timer);
16447 }
16448
16449 /* Important! -- It is critical that the PCI-X hw workaround
16450 * situation is decided before the first MMIO register access.
16451 */
16452 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16453 /* 5700 BX chips need to have their TX producer index
16454 * mailboxes written twice to workaround a bug.
16455 */
16456 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16457
16458 /* If we are in PCI-X mode, enable register write workaround.
16459 *
16460 * The workaround is to use indirect register accesses
16461 * for all chip writes not to mailbox registers.
16462 */
16463 if (tg3_flag(tp, PCIX_MODE)) {
16464 u32 pm_reg;
16465
16466 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16467
16468 /* The chip can have it's power management PCI config
16469 * space registers clobbered due to this bug.
16470 * So explicitly force the chip into D0 here.
16471 */
16472 pci_read_config_dword(tp->pdev,
16473 tp->pdev->pm_cap + PCI_PM_CTRL,
16474 &pm_reg);
16475 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16476 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16477 pci_write_config_dword(tp->pdev,
16478 tp->pdev->pm_cap + PCI_PM_CTRL,
16479 pm_reg);
16480
16481 /* Also, force SERR#/PERR# in PCI command. */
16482 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16483 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16484 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16485 }
16486 }
16487
16488 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16489 tg3_flag_set(tp, PCI_HIGH_SPEED);
16490 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16491 tg3_flag_set(tp, PCI_32BIT);
16492
16493 /* Chip-specific fixup from Broadcom driver */
16494 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16495 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16496 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16497 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16498 }
16499
16500 /* Default fast path register access methods */
16501 tp->read32 = tg3_read32;
16502 tp->write32 = tg3_write32;
16503 tp->read32_mbox = tg3_read32;
16504 tp->write32_mbox = tg3_write32;
16505 tp->write32_tx_mbox = tg3_write32;
16506 tp->write32_rx_mbox = tg3_write32;
16507
16508 /* Various workaround register access methods */
16509 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16510 tp->write32 = tg3_write_indirect_reg32;
16511 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16512 (tg3_flag(tp, PCI_EXPRESS) &&
16513 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16514 /*
16515 * Back to back register writes can cause problems on these
16516 * chips, the workaround is to read back all reg writes
16517 * except those to mailbox regs.
16518 *
16519 * See tg3_write_indirect_reg32().
16520 */
16521 tp->write32 = tg3_write_flush_reg32;
16522 }
16523
16524 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16525 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16526 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16527 tp->write32_rx_mbox = tg3_write_flush_reg32;
16528 }
16529
16530 if (tg3_flag(tp, ICH_WORKAROUND)) {
16531 tp->read32 = tg3_read_indirect_reg32;
16532 tp->write32 = tg3_write_indirect_reg32;
16533 tp->read32_mbox = tg3_read_indirect_mbox;
16534 tp->write32_mbox = tg3_write_indirect_mbox;
16535 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16536 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16537
16538 iounmap(tp->regs);
16539 tp->regs = NULL;
16540
16541 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16542 pci_cmd &= ~PCI_COMMAND_MEMORY;
16543 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16544 }
16545 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16546 tp->read32_mbox = tg3_read32_mbox_5906;
16547 tp->write32_mbox = tg3_write32_mbox_5906;
16548 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16549 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16550 }
16551
16552 if (tp->write32 == tg3_write_indirect_reg32 ||
16553 (tg3_flag(tp, PCIX_MODE) &&
16554 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16555 tg3_asic_rev(tp) == ASIC_REV_5701)))
16556 tg3_flag_set(tp, SRAM_USE_CONFIG);
16557
16558 /* The memory arbiter has to be enabled in order for SRAM accesses
16559 * to succeed. Normally on powerup the tg3 chip firmware will make
16560 * sure it is enabled, but other entities such as system netboot
16561 * code might disable it.
16562 */
16563 val = tr32(MEMARB_MODE);
16564 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16565
16566 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16567 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16568 tg3_flag(tp, 5780_CLASS)) {
16569 if (tg3_flag(tp, PCIX_MODE)) {
16570 pci_read_config_dword(tp->pdev,
16571 tp->pcix_cap + PCI_X_STATUS,
16572 &val);
16573 tp->pci_fn = val & 0x7;
16574 }
16575 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16576 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16577 tg3_asic_rev(tp) == ASIC_REV_5720) {
16578 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16579 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16580 val = tr32(TG3_CPMU_STATUS);
16581
16582 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16583 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16584 else
16585 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16586 TG3_CPMU_STATUS_FSHFT_5719;
16587 }
16588
16589 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16590 tp->write32_tx_mbox = tg3_write_flush_reg32;
16591 tp->write32_rx_mbox = tg3_write_flush_reg32;
16592 }
16593
16594 /* Get eeprom hw config before calling tg3_set_power_state().
16595 * In particular, the TG3_FLAG_IS_NIC flag must be
16596 * determined before calling tg3_set_power_state() so that
16597 * we know whether or not to switch out of Vaux power.
16598 * When the flag is set, it means that GPIO1 is used for eeprom
16599 * write protect and also implies that it is a LOM where GPIOs
16600 * are not used to switch power.
16601 */
16602 tg3_get_eeprom_hw_cfg(tp);
16603
16604 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16605 tg3_flag_clear(tp, TSO_CAPABLE);
16606 tg3_flag_clear(tp, TSO_BUG);
16607 tp->fw_needed = NULL;
16608 }
16609
16610 if (tg3_flag(tp, ENABLE_APE)) {
16611 /* Allow reads and writes to the
16612 * APE register and memory space.
16613 */
16614 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16615 PCISTATE_ALLOW_APE_SHMEM_WR |
16616 PCISTATE_ALLOW_APE_PSPACE_WR;
16617 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16618 pci_state_reg);
16619
16620 tg3_ape_lock_init(tp);
16621 tp->ape_hb_interval =
16622 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16623 }
16624
16625 /* Set up tp->grc_local_ctrl before calling
16626 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16627 * will bring 5700's external PHY out of reset.
16628 * It is also used as eeprom write protect on LOMs.
16629 */
16630 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16631 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16632 tg3_flag(tp, EEPROM_WRITE_PROT))
16633 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16634 GRC_LCLCTRL_GPIO_OUTPUT1);
16635 /* Unused GPIO3 must be driven as output on 5752 because there
16636 * are no pull-up resistors on unused GPIO pins.
16637 */
16638 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16639 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16640
16641 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16642 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16643 tg3_flag(tp, 57765_CLASS))
16644 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16645
16646 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16647 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16648 /* Turn off the debug UART. */
16649 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16650 if (tg3_flag(tp, IS_NIC))
16651 /* Keep VMain power. */
16652 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16653 GRC_LCLCTRL_GPIO_OUTPUT0;
16654 }
16655
16656 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16657 tp->grc_local_ctrl |=
16658 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16659
16660 /* Switch out of Vaux if it is a NIC */
16661 tg3_pwrsrc_switch_to_vmain(tp);
16662
16663 /* Derive initial jumbo mode from MTU assigned in
16664 * ether_setup() via the alloc_etherdev() call
16665 */
16666 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16667 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16668
16669 /* Determine WakeOnLan speed to use. */
16670 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16671 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16672 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16673 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16674 tg3_flag_clear(tp, WOL_SPEED_100MB);
16675 } else {
16676 tg3_flag_set(tp, WOL_SPEED_100MB);
16677 }
16678
16679 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16680 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16681
16682 /* A few boards don't want Ethernet@WireSpeed phy feature */
16683 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16684 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16685 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16686 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16687 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16688 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16689 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16690
16691 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16692 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16693 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16694 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16695 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16696
16697 if (tg3_flag(tp, 5705_PLUS) &&
16698 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16699 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16700 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16701 !tg3_flag(tp, 57765_PLUS)) {
16702 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16703 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16704 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16705 tg3_asic_rev(tp) == ASIC_REV_5761) {
16706 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16707 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16708 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16709 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16710 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16711 } else
16712 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16713 }
16714
16715 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16716 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16717 tp->phy_otp = tg3_read_otp_phycfg(tp);
16718 if (tp->phy_otp == 0)
16719 tp->phy_otp = TG3_OTP_DEFAULT;
16720 }
16721
16722 if (tg3_flag(tp, CPMU_PRESENT))
16723 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16724 else
16725 tp->mi_mode = MAC_MI_MODE_BASE;
16726
16727 tp->coalesce_mode = 0;
16728 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16729 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16730 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16731
16732 /* Set these bits to enable statistics workaround. */
16733 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16734 tg3_asic_rev(tp) == ASIC_REV_5762 ||
16735 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16736 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16737 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16738 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16739 }
16740
16741 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16742 tg3_asic_rev(tp) == ASIC_REV_57780)
16743 tg3_flag_set(tp, USE_PHYLIB);
16744
16745 err = tg3_mdio_init(tp);
16746 if (err)
16747 return err;
16748
16749 /* Initialize data/descriptor byte/word swapping. */
16750 val = tr32(GRC_MODE);
16751 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16752 tg3_asic_rev(tp) == ASIC_REV_5762)
16753 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16754 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16755 GRC_MODE_B2HRX_ENABLE |
16756 GRC_MODE_HTX2B_ENABLE |
16757 GRC_MODE_HOST_STACKUP);
16758 else
16759 val &= GRC_MODE_HOST_STACKUP;
16760
16761 tw32(GRC_MODE, val | tp->grc_mode);
16762
16763 tg3_switch_clocks(tp);
16764
16765 /* Clear this out for sanity. */
16766 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16767
16768 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16769 tw32(TG3PCI_REG_BASE_ADDR, 0);
16770
16771 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16772 &pci_state_reg);
16773 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16774 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16775 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16776 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16777 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16778 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16779 void __iomem *sram_base;
16780
16781 /* Write some dummy words into the SRAM status block
16782 * area, see if it reads back correctly. If the return
16783 * value is bad, force enable the PCIX workaround.
16784 */
16785 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16786
16787 writel(0x00000000, sram_base);
16788 writel(0x00000000, sram_base + 4);
16789 writel(0xffffffff, sram_base + 4);
16790 if (readl(sram_base) != 0x00000000)
16791 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16792 }
16793 }
16794
16795 udelay(50);
16796 tg3_nvram_init(tp);
16797
16798 /* If the device has an NVRAM, no need to load patch firmware */
16799 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16800 !tg3_flag(tp, NO_NVRAM))
16801 tp->fw_needed = NULL;
16802
16803 grc_misc_cfg = tr32(GRC_MISC_CFG);
16804 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16805
16806 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16807 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16808 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16809 tg3_flag_set(tp, IS_5788);
16810
16811 if (!tg3_flag(tp, IS_5788) &&
16812 tg3_asic_rev(tp) != ASIC_REV_5700)
16813 tg3_flag_set(tp, TAGGED_STATUS);
16814 if (tg3_flag(tp, TAGGED_STATUS)) {
16815 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16816 HOSTCC_MODE_CLRTICK_TXBD);
16817
16818 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16819 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16820 tp->misc_host_ctrl);
16821 }
16822
16823 /* Preserve the APE MAC_MODE bits */
16824 if (tg3_flag(tp, ENABLE_APE))
16825 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16826 else
16827 tp->mac_mode = 0;
16828
16829 if (tg3_10_100_only_device(tp, ent))
16830 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16831
16832 err = tg3_phy_probe(tp);
16833 if (err) {
16834 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16835 /* ... but do not return immediately ... */
16836 tg3_mdio_fini(tp);
16837 }
16838
16839 tg3_read_vpd(tp);
16840 tg3_read_fw_ver(tp);
16841
16842 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16843 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16844 } else {
16845 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16846 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16847 else
16848 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16849 }
16850
16851 /* 5700 {AX,BX} chips have a broken status block link
16852 * change bit implementation, so we must use the
16853 * status register in those cases.
16854 */
16855 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16856 tg3_flag_set(tp, USE_LINKCHG_REG);
16857 else
16858 tg3_flag_clear(tp, USE_LINKCHG_REG);
16859
16860 /* The led_ctrl is set during tg3_phy_probe, here we might
16861 * have to force the link status polling mechanism based
16862 * upon subsystem IDs.
16863 */
16864 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16865 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16866 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16867 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16868 tg3_flag_set(tp, USE_LINKCHG_REG);
16869 }
16870
16871 /* For all SERDES we poll the MAC status register. */
16872 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16873 tg3_flag_set(tp, POLL_SERDES);
16874 else
16875 tg3_flag_clear(tp, POLL_SERDES);
16876
16877 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16878 tg3_flag_set(tp, POLL_CPMU_LINK);
16879
16880 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16881 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16882 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16883 tg3_flag(tp, PCIX_MODE)) {
16884 tp->rx_offset = NET_SKB_PAD;
16885 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16886 tp->rx_copy_thresh = ~(u16)0;
16887 #endif
16888 }
16889
16890 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16891 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16892 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16893
16894 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16895
16896 /* Increment the rx prod index on the rx std ring by at most
16897 * 8 for these chips to workaround hw errata.
16898 */
16899 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16900 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16901 tg3_asic_rev(tp) == ASIC_REV_5755)
16902 tp->rx_std_max_post = 8;
16903
16904 if (tg3_flag(tp, ASPM_WORKAROUND))
16905 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16906 PCIE_PWR_MGMT_L1_THRESH_MSK;
16907
16908 return err;
16909 }
16910
tg3_get_device_address(struct tg3 * tp,u8 * addr)16911 static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
16912 {
16913 u32 hi, lo, mac_offset;
16914 int addr_ok = 0;
16915 int err;
16916
16917 if (!eth_platform_get_mac_address(&tp->pdev->dev, addr))
16918 return 0;
16919
16920 if (tg3_flag(tp, IS_SSB_CORE)) {
16921 err = ssb_gige_get_macaddr(tp->pdev, addr);
16922 if (!err && is_valid_ether_addr(addr))
16923 return 0;
16924 }
16925
16926 mac_offset = 0x7c;
16927 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16928 tg3_flag(tp, 5780_CLASS)) {
16929 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16930 mac_offset = 0xcc;
16931 if (tg3_nvram_lock(tp))
16932 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16933 else
16934 tg3_nvram_unlock(tp);
16935 } else if (tg3_flag(tp, 5717_PLUS)) {
16936 if (tp->pci_fn & 1)
16937 mac_offset = 0xcc;
16938 if (tp->pci_fn > 1)
16939 mac_offset += 0x18c;
16940 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16941 mac_offset = 0x10;
16942
16943 /* First try to get it from MAC address mailbox. */
16944 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16945 if ((hi >> 16) == 0x484b) {
16946 addr[0] = (hi >> 8) & 0xff;
16947 addr[1] = (hi >> 0) & 0xff;
16948
16949 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16950 addr[2] = (lo >> 24) & 0xff;
16951 addr[3] = (lo >> 16) & 0xff;
16952 addr[4] = (lo >> 8) & 0xff;
16953 addr[5] = (lo >> 0) & 0xff;
16954
16955 /* Some old bootcode may report a 0 MAC address in SRAM */
16956 addr_ok = is_valid_ether_addr(addr);
16957 }
16958 if (!addr_ok) {
16959 /* Next, try NVRAM. */
16960 if (!tg3_flag(tp, NO_NVRAM) &&
16961 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16962 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16963 memcpy(&addr[0], ((char *)&hi) + 2, 2);
16964 memcpy(&addr[2], (char *)&lo, sizeof(lo));
16965 }
16966 /* Finally just fetch it out of the MAC control regs. */
16967 else {
16968 hi = tr32(MAC_ADDR_0_HIGH);
16969 lo = tr32(MAC_ADDR_0_LOW);
16970
16971 addr[5] = lo & 0xff;
16972 addr[4] = (lo >> 8) & 0xff;
16973 addr[3] = (lo >> 16) & 0xff;
16974 addr[2] = (lo >> 24) & 0xff;
16975 addr[1] = hi & 0xff;
16976 addr[0] = (hi >> 8) & 0xff;
16977 }
16978 }
16979
16980 if (!is_valid_ether_addr(addr))
16981 return -EINVAL;
16982 return 0;
16983 }
16984
16985 #define BOUNDARY_SINGLE_CACHELINE 1
16986 #define BOUNDARY_MULTI_CACHELINE 2
16987
tg3_calc_dma_bndry(struct tg3 * tp,u32 val)16988 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16989 {
16990 int cacheline_size;
16991 u8 byte;
16992 int goal;
16993
16994 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16995 if (byte == 0)
16996 cacheline_size = 1024;
16997 else
16998 cacheline_size = (int) byte * 4;
16999
17000 /* On 5703 and later chips, the boundary bits have no
17001 * effect.
17002 */
17003 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17004 tg3_asic_rev(tp) != ASIC_REV_5701 &&
17005 !tg3_flag(tp, PCI_EXPRESS))
17006 goto out;
17007
17008 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17009 goal = BOUNDARY_MULTI_CACHELINE;
17010 #else
17011 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17012 goal = BOUNDARY_SINGLE_CACHELINE;
17013 #else
17014 goal = 0;
17015 #endif
17016 #endif
17017
17018 if (tg3_flag(tp, 57765_PLUS)) {
17019 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17020 goto out;
17021 }
17022
17023 if (!goal)
17024 goto out;
17025
17026 /* PCI controllers on most RISC systems tend to disconnect
17027 * when a device tries to burst across a cache-line boundary.
17028 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17029 *
17030 * Unfortunately, for PCI-E there are only limited
17031 * write-side controls for this, and thus for reads
17032 * we will still get the disconnects. We'll also waste
17033 * these PCI cycles for both read and write for chips
17034 * other than 5700 and 5701 which do not implement the
17035 * boundary bits.
17036 */
17037 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17038 switch (cacheline_size) {
17039 case 16:
17040 case 32:
17041 case 64:
17042 case 128:
17043 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17044 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17045 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17046 } else {
17047 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17048 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17049 }
17050 break;
17051
17052 case 256:
17053 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17054 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17055 break;
17056
17057 default:
17058 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17059 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17060 break;
17061 }
17062 } else if (tg3_flag(tp, PCI_EXPRESS)) {
17063 switch (cacheline_size) {
17064 case 16:
17065 case 32:
17066 case 64:
17067 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17068 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17069 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17070 break;
17071 }
17072 fallthrough;
17073 case 128:
17074 default:
17075 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17076 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17077 break;
17078 }
17079 } else {
17080 switch (cacheline_size) {
17081 case 16:
17082 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17083 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17084 DMA_RWCTRL_WRITE_BNDRY_16);
17085 break;
17086 }
17087 fallthrough;
17088 case 32:
17089 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17090 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17091 DMA_RWCTRL_WRITE_BNDRY_32);
17092 break;
17093 }
17094 fallthrough;
17095 case 64:
17096 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17097 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17098 DMA_RWCTRL_WRITE_BNDRY_64);
17099 break;
17100 }
17101 fallthrough;
17102 case 128:
17103 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17104 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17105 DMA_RWCTRL_WRITE_BNDRY_128);
17106 break;
17107 }
17108 fallthrough;
17109 case 256:
17110 val |= (DMA_RWCTRL_READ_BNDRY_256 |
17111 DMA_RWCTRL_WRITE_BNDRY_256);
17112 break;
17113 case 512:
17114 val |= (DMA_RWCTRL_READ_BNDRY_512 |
17115 DMA_RWCTRL_WRITE_BNDRY_512);
17116 break;
17117 case 1024:
17118 default:
17119 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17120 DMA_RWCTRL_WRITE_BNDRY_1024);
17121 break;
17122 }
17123 }
17124
17125 out:
17126 return val;
17127 }
17128
tg3_do_test_dma(struct tg3 * tp,u32 * buf,dma_addr_t buf_dma,int size,bool to_device)17129 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17130 int size, bool to_device)
17131 {
17132 struct tg3_internal_buffer_desc test_desc;
17133 u32 sram_dma_descs;
17134 int i, ret;
17135
17136 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17137
17138 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17139 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17140 tw32(RDMAC_STATUS, 0);
17141 tw32(WDMAC_STATUS, 0);
17142
17143 tw32(BUFMGR_MODE, 0);
17144 tw32(FTQ_RESET, 0);
17145
17146 test_desc.addr_hi = ((u64) buf_dma) >> 32;
17147 test_desc.addr_lo = buf_dma & 0xffffffff;
17148 test_desc.nic_mbuf = 0x00002100;
17149 test_desc.len = size;
17150
17151 /*
17152 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17153 * the *second* time the tg3 driver was getting loaded after an
17154 * initial scan.
17155 *
17156 * Broadcom tells me:
17157 * ...the DMA engine is connected to the GRC block and a DMA
17158 * reset may affect the GRC block in some unpredictable way...
17159 * The behavior of resets to individual blocks has not been tested.
17160 *
17161 * Broadcom noted the GRC reset will also reset all sub-components.
17162 */
17163 if (to_device) {
17164 test_desc.cqid_sqid = (13 << 8) | 2;
17165
17166 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17167 udelay(40);
17168 } else {
17169 test_desc.cqid_sqid = (16 << 8) | 7;
17170
17171 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17172 udelay(40);
17173 }
17174 test_desc.flags = 0x00000005;
17175
17176 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17177 u32 val;
17178
17179 val = *(((u32 *)&test_desc) + i);
17180 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17181 sram_dma_descs + (i * sizeof(u32)));
17182 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17183 }
17184 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17185
17186 if (to_device)
17187 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17188 else
17189 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17190
17191 ret = -ENODEV;
17192 for (i = 0; i < 40; i++) {
17193 u32 val;
17194
17195 if (to_device)
17196 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17197 else
17198 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17199 if ((val & 0xffff) == sram_dma_descs) {
17200 ret = 0;
17201 break;
17202 }
17203
17204 udelay(100);
17205 }
17206
17207 return ret;
17208 }
17209
17210 #define TEST_BUFFER_SIZE 0x2000
17211
17212 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17213 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17214 { },
17215 };
17216
tg3_test_dma(struct tg3 * tp)17217 static int tg3_test_dma(struct tg3 *tp)
17218 {
17219 dma_addr_t buf_dma;
17220 u32 *buf, saved_dma_rwctrl;
17221 int ret = 0;
17222
17223 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17224 &buf_dma, GFP_KERNEL);
17225 if (!buf) {
17226 ret = -ENOMEM;
17227 goto out_nofree;
17228 }
17229
17230 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17231 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17232
17233 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17234
17235 if (tg3_flag(tp, 57765_PLUS))
17236 goto out;
17237
17238 if (tg3_flag(tp, PCI_EXPRESS)) {
17239 /* DMA read watermark not used on PCIE */
17240 tp->dma_rwctrl |= 0x00180000;
17241 } else if (!tg3_flag(tp, PCIX_MODE)) {
17242 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17243 tg3_asic_rev(tp) == ASIC_REV_5750)
17244 tp->dma_rwctrl |= 0x003f0000;
17245 else
17246 tp->dma_rwctrl |= 0x003f000f;
17247 } else {
17248 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17249 tg3_asic_rev(tp) == ASIC_REV_5704) {
17250 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17251 u32 read_water = 0x7;
17252
17253 /* If the 5704 is behind the EPB bridge, we can
17254 * do the less restrictive ONE_DMA workaround for
17255 * better performance.
17256 */
17257 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17258 tg3_asic_rev(tp) == ASIC_REV_5704)
17259 tp->dma_rwctrl |= 0x8000;
17260 else if (ccval == 0x6 || ccval == 0x7)
17261 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17262
17263 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17264 read_water = 4;
17265 /* Set bit 23 to enable PCIX hw bug fix */
17266 tp->dma_rwctrl |=
17267 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17268 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17269 (1 << 23);
17270 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17271 /* 5780 always in PCIX mode */
17272 tp->dma_rwctrl |= 0x00144000;
17273 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17274 /* 5714 always in PCIX mode */
17275 tp->dma_rwctrl |= 0x00148000;
17276 } else {
17277 tp->dma_rwctrl |= 0x001b000f;
17278 }
17279 }
17280 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17281 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17282
17283 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17284 tg3_asic_rev(tp) == ASIC_REV_5704)
17285 tp->dma_rwctrl &= 0xfffffff0;
17286
17287 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17288 tg3_asic_rev(tp) == ASIC_REV_5701) {
17289 /* Remove this if it causes problems for some boards. */
17290 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17291
17292 /* On 5700/5701 chips, we need to set this bit.
17293 * Otherwise the chip will issue cacheline transactions
17294 * to streamable DMA memory with not all the byte
17295 * enables turned on. This is an error on several
17296 * RISC PCI controllers, in particular sparc64.
17297 *
17298 * On 5703/5704 chips, this bit has been reassigned
17299 * a different meaning. In particular, it is used
17300 * on those chips to enable a PCI-X workaround.
17301 */
17302 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17303 }
17304
17305 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17306
17307
17308 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17309 tg3_asic_rev(tp) != ASIC_REV_5701)
17310 goto out;
17311
17312 /* It is best to perform DMA test with maximum write burst size
17313 * to expose the 5700/5701 write DMA bug.
17314 */
17315 saved_dma_rwctrl = tp->dma_rwctrl;
17316 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17317 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17318
17319 while (1) {
17320 u32 *p = buf, i;
17321
17322 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17323 p[i] = i;
17324
17325 /* Send the buffer to the chip. */
17326 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17327 if (ret) {
17328 dev_err(&tp->pdev->dev,
17329 "%s: Buffer write failed. err = %d\n",
17330 __func__, ret);
17331 break;
17332 }
17333
17334 /* Now read it back. */
17335 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17336 if (ret) {
17337 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17338 "err = %d\n", __func__, ret);
17339 break;
17340 }
17341
17342 /* Verify it. */
17343 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17344 if (p[i] == i)
17345 continue;
17346
17347 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17348 DMA_RWCTRL_WRITE_BNDRY_16) {
17349 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17350 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17351 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17352 break;
17353 } else {
17354 dev_err(&tp->pdev->dev,
17355 "%s: Buffer corrupted on read back! "
17356 "(%d != %d)\n", __func__, p[i], i);
17357 ret = -ENODEV;
17358 goto out;
17359 }
17360 }
17361
17362 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17363 /* Success. */
17364 ret = 0;
17365 break;
17366 }
17367 }
17368 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17369 DMA_RWCTRL_WRITE_BNDRY_16) {
17370 /* DMA test passed without adjusting DMA boundary,
17371 * now look for chipsets that are known to expose the
17372 * DMA bug without failing the test.
17373 */
17374 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17375 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17376 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17377 } else {
17378 /* Safe to use the calculated DMA boundary. */
17379 tp->dma_rwctrl = saved_dma_rwctrl;
17380 }
17381
17382 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17383 }
17384
17385 out:
17386 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17387 out_nofree:
17388 return ret;
17389 }
17390
tg3_init_bufmgr_config(struct tg3 * tp)17391 static void tg3_init_bufmgr_config(struct tg3 *tp)
17392 {
17393 if (tg3_flag(tp, 57765_PLUS)) {
17394 tp->bufmgr_config.mbuf_read_dma_low_water =
17395 DEFAULT_MB_RDMA_LOW_WATER_5705;
17396 tp->bufmgr_config.mbuf_mac_rx_low_water =
17397 DEFAULT_MB_MACRX_LOW_WATER_57765;
17398 tp->bufmgr_config.mbuf_high_water =
17399 DEFAULT_MB_HIGH_WATER_57765;
17400
17401 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17402 DEFAULT_MB_RDMA_LOW_WATER_5705;
17403 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17404 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17405 tp->bufmgr_config.mbuf_high_water_jumbo =
17406 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17407 } else if (tg3_flag(tp, 5705_PLUS)) {
17408 tp->bufmgr_config.mbuf_read_dma_low_water =
17409 DEFAULT_MB_RDMA_LOW_WATER_5705;
17410 tp->bufmgr_config.mbuf_mac_rx_low_water =
17411 DEFAULT_MB_MACRX_LOW_WATER_5705;
17412 tp->bufmgr_config.mbuf_high_water =
17413 DEFAULT_MB_HIGH_WATER_5705;
17414 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17415 tp->bufmgr_config.mbuf_mac_rx_low_water =
17416 DEFAULT_MB_MACRX_LOW_WATER_5906;
17417 tp->bufmgr_config.mbuf_high_water =
17418 DEFAULT_MB_HIGH_WATER_5906;
17419 }
17420
17421 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17422 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17423 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17424 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17425 tp->bufmgr_config.mbuf_high_water_jumbo =
17426 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17427 } else {
17428 tp->bufmgr_config.mbuf_read_dma_low_water =
17429 DEFAULT_MB_RDMA_LOW_WATER;
17430 tp->bufmgr_config.mbuf_mac_rx_low_water =
17431 DEFAULT_MB_MACRX_LOW_WATER;
17432 tp->bufmgr_config.mbuf_high_water =
17433 DEFAULT_MB_HIGH_WATER;
17434
17435 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17436 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17437 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17438 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17439 tp->bufmgr_config.mbuf_high_water_jumbo =
17440 DEFAULT_MB_HIGH_WATER_JUMBO;
17441 }
17442
17443 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17444 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17445 }
17446
tg3_phy_string(struct tg3 * tp)17447 static char *tg3_phy_string(struct tg3 *tp)
17448 {
17449 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17450 case TG3_PHY_ID_BCM5400: return "5400";
17451 case TG3_PHY_ID_BCM5401: return "5401";
17452 case TG3_PHY_ID_BCM5411: return "5411";
17453 case TG3_PHY_ID_BCM5701: return "5701";
17454 case TG3_PHY_ID_BCM5703: return "5703";
17455 case TG3_PHY_ID_BCM5704: return "5704";
17456 case TG3_PHY_ID_BCM5705: return "5705";
17457 case TG3_PHY_ID_BCM5750: return "5750";
17458 case TG3_PHY_ID_BCM5752: return "5752";
17459 case TG3_PHY_ID_BCM5714: return "5714";
17460 case TG3_PHY_ID_BCM5780: return "5780";
17461 case TG3_PHY_ID_BCM5755: return "5755";
17462 case TG3_PHY_ID_BCM5787: return "5787";
17463 case TG3_PHY_ID_BCM5784: return "5784";
17464 case TG3_PHY_ID_BCM5756: return "5722/5756";
17465 case TG3_PHY_ID_BCM5906: return "5906";
17466 case TG3_PHY_ID_BCM5761: return "5761";
17467 case TG3_PHY_ID_BCM5718C: return "5718C";
17468 case TG3_PHY_ID_BCM5718S: return "5718S";
17469 case TG3_PHY_ID_BCM57765: return "57765";
17470 case TG3_PHY_ID_BCM5719C: return "5719C";
17471 case TG3_PHY_ID_BCM5720C: return "5720C";
17472 case TG3_PHY_ID_BCM5762: return "5762C";
17473 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17474 case 0: return "serdes";
17475 default: return "unknown";
17476 }
17477 }
17478
tg3_bus_string(struct tg3 * tp,char * str)17479 static char *tg3_bus_string(struct tg3 *tp, char *str)
17480 {
17481 if (tg3_flag(tp, PCI_EXPRESS)) {
17482 strcpy(str, "PCI Express");
17483 return str;
17484 } else if (tg3_flag(tp, PCIX_MODE)) {
17485 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17486
17487 strcpy(str, "PCIX:");
17488
17489 if ((clock_ctrl == 7) ||
17490 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17491 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17492 strcat(str, "133MHz");
17493 else if (clock_ctrl == 0)
17494 strcat(str, "33MHz");
17495 else if (clock_ctrl == 2)
17496 strcat(str, "50MHz");
17497 else if (clock_ctrl == 4)
17498 strcat(str, "66MHz");
17499 else if (clock_ctrl == 6)
17500 strcat(str, "100MHz");
17501 } else {
17502 strcpy(str, "PCI:");
17503 if (tg3_flag(tp, PCI_HIGH_SPEED))
17504 strcat(str, "66MHz");
17505 else
17506 strcat(str, "33MHz");
17507 }
17508 if (tg3_flag(tp, PCI_32BIT))
17509 strcat(str, ":32-bit");
17510 else
17511 strcat(str, ":64-bit");
17512 return str;
17513 }
17514
tg3_init_coal(struct tg3 * tp)17515 static void tg3_init_coal(struct tg3 *tp)
17516 {
17517 struct ethtool_coalesce *ec = &tp->coal;
17518
17519 memset(ec, 0, sizeof(*ec));
17520 ec->cmd = ETHTOOL_GCOALESCE;
17521 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17522 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17523 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17524 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17525 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17526 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17527 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17528 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17529 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17530
17531 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17532 HOSTCC_MODE_CLRTICK_TXBD)) {
17533 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17534 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17535 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17536 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17537 }
17538
17539 if (tg3_flag(tp, 5705_PLUS)) {
17540 ec->rx_coalesce_usecs_irq = 0;
17541 ec->tx_coalesce_usecs_irq = 0;
17542 ec->stats_block_coalesce_usecs = 0;
17543 }
17544 }
17545
tg3_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)17546 static int tg3_init_one(struct pci_dev *pdev,
17547 const struct pci_device_id *ent)
17548 {
17549 struct net_device *dev;
17550 struct tg3 *tp;
17551 int i, err;
17552 u32 sndmbx, rcvmbx, intmbx;
17553 char str[40];
17554 u64 dma_mask, persist_dma_mask;
17555 netdev_features_t features = 0;
17556 u8 addr[ETH_ALEN] __aligned(2);
17557
17558 err = pci_enable_device(pdev);
17559 if (err) {
17560 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17561 return err;
17562 }
17563
17564 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17565 if (err) {
17566 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17567 goto err_out_disable_pdev;
17568 }
17569
17570 pci_set_master(pdev);
17571
17572 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17573 if (!dev) {
17574 err = -ENOMEM;
17575 goto err_out_free_res;
17576 }
17577
17578 SET_NETDEV_DEV(dev, &pdev->dev);
17579
17580 tp = netdev_priv(dev);
17581 tp->pdev = pdev;
17582 tp->dev = dev;
17583 tp->rx_mode = TG3_DEF_RX_MODE;
17584 tp->tx_mode = TG3_DEF_TX_MODE;
17585 tp->irq_sync = 1;
17586 tp->pcierr_recovery = false;
17587
17588 if (tg3_debug > 0)
17589 tp->msg_enable = tg3_debug;
17590 else
17591 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17592
17593 if (pdev_is_ssb_gige_core(pdev)) {
17594 tg3_flag_set(tp, IS_SSB_CORE);
17595 if (ssb_gige_must_flush_posted_writes(pdev))
17596 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17597 if (ssb_gige_one_dma_at_once(pdev))
17598 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17599 if (ssb_gige_have_roboswitch(pdev)) {
17600 tg3_flag_set(tp, USE_PHYLIB);
17601 tg3_flag_set(tp, ROBOSWITCH);
17602 }
17603 if (ssb_gige_is_rgmii(pdev))
17604 tg3_flag_set(tp, RGMII_MODE);
17605 }
17606
17607 /* The word/byte swap controls here control register access byte
17608 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17609 * setting below.
17610 */
17611 tp->misc_host_ctrl =
17612 MISC_HOST_CTRL_MASK_PCI_INT |
17613 MISC_HOST_CTRL_WORD_SWAP |
17614 MISC_HOST_CTRL_INDIR_ACCESS |
17615 MISC_HOST_CTRL_PCISTATE_RW;
17616
17617 /* The NONFRM (non-frame) byte/word swap controls take effect
17618 * on descriptor entries, anything which isn't packet data.
17619 *
17620 * The StrongARM chips on the board (one for tx, one for rx)
17621 * are running in big-endian mode.
17622 */
17623 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17624 GRC_MODE_WSWAP_NONFRM_DATA);
17625 #ifdef __BIG_ENDIAN
17626 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17627 #endif
17628 spin_lock_init(&tp->lock);
17629 spin_lock_init(&tp->indirect_lock);
17630 INIT_WORK(&tp->reset_task, tg3_reset_task);
17631
17632 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17633 if (!tp->regs) {
17634 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17635 err = -ENOMEM;
17636 goto err_out_free_dev;
17637 }
17638
17639 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17640 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17641 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17642 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17643 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17644 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17645 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17646 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17647 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17648 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17649 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17650 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17651 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17652 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17653 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17654 tg3_flag_set(tp, ENABLE_APE);
17655 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17656 if (!tp->aperegs) {
17657 dev_err(&pdev->dev,
17658 "Cannot map APE registers, aborting\n");
17659 err = -ENOMEM;
17660 goto err_out_iounmap;
17661 }
17662 }
17663
17664 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17665 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17666
17667 dev->ethtool_ops = &tg3_ethtool_ops;
17668 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17669 dev->netdev_ops = &tg3_netdev_ops;
17670 dev->irq = pdev->irq;
17671
17672 err = tg3_get_invariants(tp, ent);
17673 if (err) {
17674 dev_err(&pdev->dev,
17675 "Problem fetching invariants of chip, aborting\n");
17676 goto err_out_apeunmap;
17677 }
17678
17679 /* The EPB bridge inside 5714, 5715, and 5780 and any
17680 * device behind the EPB cannot support DMA addresses > 40-bit.
17681 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17682 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17683 * do DMA address check in tg3_start_xmit().
17684 */
17685 if (tg3_flag(tp, IS_5788))
17686 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17687 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17688 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17689 #ifdef CONFIG_HIGHMEM
17690 dma_mask = DMA_BIT_MASK(64);
17691 #endif
17692 } else
17693 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17694
17695 /* Configure DMA attributes. */
17696 if (dma_mask > DMA_BIT_MASK(32)) {
17697 err = dma_set_mask(&pdev->dev, dma_mask);
17698 if (!err) {
17699 features |= NETIF_F_HIGHDMA;
17700 err = dma_set_coherent_mask(&pdev->dev,
17701 persist_dma_mask);
17702 if (err < 0) {
17703 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17704 "DMA for consistent allocations\n");
17705 goto err_out_apeunmap;
17706 }
17707 }
17708 }
17709 if (err || dma_mask == DMA_BIT_MASK(32)) {
17710 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
17711 if (err) {
17712 dev_err(&pdev->dev,
17713 "No usable DMA configuration, aborting\n");
17714 goto err_out_apeunmap;
17715 }
17716 }
17717
17718 tg3_init_bufmgr_config(tp);
17719
17720 /* 5700 B0 chips do not support checksumming correctly due
17721 * to hardware bugs.
17722 */
17723 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17724 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17725
17726 if (tg3_flag(tp, 5755_PLUS))
17727 features |= NETIF_F_IPV6_CSUM;
17728 }
17729
17730 /* TSO is on by default on chips that support hardware TSO.
17731 * Firmware TSO on older chips gives lower performance, so it
17732 * is off by default, but can be enabled using ethtool.
17733 */
17734 if ((tg3_flag(tp, HW_TSO_1) ||
17735 tg3_flag(tp, HW_TSO_2) ||
17736 tg3_flag(tp, HW_TSO_3)) &&
17737 (features & NETIF_F_IP_CSUM))
17738 features |= NETIF_F_TSO;
17739 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17740 if (features & NETIF_F_IPV6_CSUM)
17741 features |= NETIF_F_TSO6;
17742 if (tg3_flag(tp, HW_TSO_3) ||
17743 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17744 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17745 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17746 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17747 tg3_asic_rev(tp) == ASIC_REV_57780)
17748 features |= NETIF_F_TSO_ECN;
17749 }
17750
17751 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17752 NETIF_F_HW_VLAN_CTAG_RX;
17753 dev->vlan_features |= features;
17754
17755 /*
17756 * Add loopback capability only for a subset of devices that support
17757 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17758 * loopback for the remaining devices.
17759 */
17760 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17761 !tg3_flag(tp, CPMU_PRESENT))
17762 /* Add the loopback capability */
17763 features |= NETIF_F_LOOPBACK;
17764
17765 dev->hw_features |= features;
17766 dev->priv_flags |= IFF_UNICAST_FLT;
17767
17768 /* MTU range: 60 - 9000 or 1500, depending on hardware */
17769 dev->min_mtu = TG3_MIN_MTU;
17770 dev->max_mtu = TG3_MAX_MTU(tp);
17771
17772 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17773 !tg3_flag(tp, TSO_CAPABLE) &&
17774 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17775 tg3_flag_set(tp, MAX_RXPEND_64);
17776 tp->rx_pending = 63;
17777 }
17778
17779 err = tg3_get_device_address(tp, addr);
17780 if (err) {
17781 dev_err(&pdev->dev,
17782 "Could not obtain valid ethernet address, aborting\n");
17783 goto err_out_apeunmap;
17784 }
17785 eth_hw_addr_set(dev, addr);
17786
17787 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17788 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17789 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17790 for (i = 0; i < tp->irq_max; i++) {
17791 struct tg3_napi *tnapi = &tp->napi[i];
17792
17793 tnapi->tp = tp;
17794 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17795
17796 tnapi->int_mbox = intmbx;
17797 intmbx += 0x8;
17798
17799 tnapi->consmbox = rcvmbx;
17800 tnapi->prodmbox = sndmbx;
17801
17802 if (i)
17803 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17804 else
17805 tnapi->coal_now = HOSTCC_MODE_NOW;
17806
17807 if (!tg3_flag(tp, SUPPORT_MSIX))
17808 break;
17809
17810 /*
17811 * If we support MSIX, we'll be using RSS. If we're using
17812 * RSS, the first vector only handles link interrupts and the
17813 * remaining vectors handle rx and tx interrupts. Reuse the
17814 * mailbox values for the next iteration. The values we setup
17815 * above are still useful for the single vectored mode.
17816 */
17817 if (!i)
17818 continue;
17819
17820 rcvmbx += 0x8;
17821
17822 if (sndmbx & 0x4)
17823 sndmbx -= 0x4;
17824 else
17825 sndmbx += 0xc;
17826 }
17827
17828 /*
17829 * Reset chip in case UNDI or EFI driver did not shutdown
17830 * DMA self test will enable WDMAC and we'll see (spurious)
17831 * pending DMA on the PCI bus at that point.
17832 */
17833 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17834 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17835 tg3_full_lock(tp, 0);
17836 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17837 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17838 tg3_full_unlock(tp);
17839 }
17840
17841 err = tg3_test_dma(tp);
17842 if (err) {
17843 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17844 goto err_out_apeunmap;
17845 }
17846
17847 tg3_init_coal(tp);
17848
17849 pci_set_drvdata(pdev, dev);
17850
17851 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17852 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17853 tg3_asic_rev(tp) == ASIC_REV_5762)
17854 tg3_flag_set(tp, PTP_CAPABLE);
17855
17856 tg3_timer_init(tp);
17857
17858 tg3_carrier_off(tp);
17859
17860 err = register_netdev(dev);
17861 if (err) {
17862 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17863 goto err_out_apeunmap;
17864 }
17865
17866 if (tg3_flag(tp, PTP_CAPABLE)) {
17867 tg3_ptp_init(tp);
17868 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17869 &tp->pdev->dev);
17870 if (IS_ERR(tp->ptp_clock))
17871 tp->ptp_clock = NULL;
17872 }
17873
17874 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17875 tp->board_part_number,
17876 tg3_chip_rev_id(tp),
17877 tg3_bus_string(tp, str),
17878 dev->dev_addr);
17879
17880 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17881 char *ethtype;
17882
17883 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17884 ethtype = "10/100Base-TX";
17885 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17886 ethtype = "1000Base-SX";
17887 else
17888 ethtype = "10/100/1000Base-T";
17889
17890 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17891 "(WireSpeed[%d], EEE[%d])\n",
17892 tg3_phy_string(tp), ethtype,
17893 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17894 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17895 }
17896
17897 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17898 (dev->features & NETIF_F_RXCSUM) != 0,
17899 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17900 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17901 tg3_flag(tp, ENABLE_ASF) != 0,
17902 tg3_flag(tp, TSO_CAPABLE) != 0);
17903 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17904 tp->dma_rwctrl,
17905 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17906 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17907
17908 pci_save_state(pdev);
17909
17910 return 0;
17911
17912 err_out_apeunmap:
17913 if (tp->aperegs) {
17914 iounmap(tp->aperegs);
17915 tp->aperegs = NULL;
17916 }
17917
17918 err_out_iounmap:
17919 if (tp->regs) {
17920 iounmap(tp->regs);
17921 tp->regs = NULL;
17922 }
17923
17924 err_out_free_dev:
17925 free_netdev(dev);
17926
17927 err_out_free_res:
17928 pci_release_regions(pdev);
17929
17930 err_out_disable_pdev:
17931 if (pci_is_enabled(pdev))
17932 pci_disable_device(pdev);
17933 return err;
17934 }
17935
tg3_remove_one(struct pci_dev * pdev)17936 static void tg3_remove_one(struct pci_dev *pdev)
17937 {
17938 struct net_device *dev = pci_get_drvdata(pdev);
17939
17940 if (dev) {
17941 struct tg3 *tp = netdev_priv(dev);
17942
17943 tg3_ptp_fini(tp);
17944
17945 release_firmware(tp->fw);
17946
17947 tg3_reset_task_cancel(tp);
17948
17949 if (tg3_flag(tp, USE_PHYLIB)) {
17950 tg3_phy_fini(tp);
17951 tg3_mdio_fini(tp);
17952 }
17953
17954 unregister_netdev(dev);
17955 if (tp->aperegs) {
17956 iounmap(tp->aperegs);
17957 tp->aperegs = NULL;
17958 }
17959 if (tp->regs) {
17960 iounmap(tp->regs);
17961 tp->regs = NULL;
17962 }
17963 free_netdev(dev);
17964 pci_release_regions(pdev);
17965 pci_disable_device(pdev);
17966 }
17967 }
17968
17969 #ifdef CONFIG_PM_SLEEP
tg3_suspend(struct device * device)17970 static int tg3_suspend(struct device *device)
17971 {
17972 struct net_device *dev = dev_get_drvdata(device);
17973 struct tg3 *tp = netdev_priv(dev);
17974 int err = 0;
17975
17976 rtnl_lock();
17977
17978 if (!netif_running(dev))
17979 goto unlock;
17980
17981 tg3_reset_task_cancel(tp);
17982 tg3_phy_stop(tp);
17983 tg3_netif_stop(tp);
17984
17985 tg3_timer_stop(tp);
17986
17987 tg3_full_lock(tp, 1);
17988 tg3_disable_ints(tp);
17989 tg3_full_unlock(tp);
17990
17991 netif_device_detach(dev);
17992
17993 tg3_full_lock(tp, 0);
17994 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17995 tg3_flag_clear(tp, INIT_COMPLETE);
17996 tg3_full_unlock(tp);
17997
17998 err = tg3_power_down_prepare(tp);
17999 if (err) {
18000 int err2;
18001
18002 tg3_full_lock(tp, 0);
18003
18004 tg3_flag_set(tp, INIT_COMPLETE);
18005 err2 = tg3_restart_hw(tp, true);
18006 if (err2)
18007 goto out;
18008
18009 tg3_timer_start(tp);
18010
18011 netif_device_attach(dev);
18012 tg3_netif_start(tp);
18013
18014 out:
18015 tg3_full_unlock(tp);
18016
18017 if (!err2)
18018 tg3_phy_start(tp);
18019 }
18020
18021 unlock:
18022 rtnl_unlock();
18023 return err;
18024 }
18025
tg3_resume(struct device * device)18026 static int tg3_resume(struct device *device)
18027 {
18028 struct net_device *dev = dev_get_drvdata(device);
18029 struct tg3 *tp = netdev_priv(dev);
18030 int err = 0;
18031
18032 rtnl_lock();
18033
18034 if (!netif_running(dev))
18035 goto unlock;
18036
18037 netif_device_attach(dev);
18038
18039 tg3_full_lock(tp, 0);
18040
18041 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18042
18043 tg3_flag_set(tp, INIT_COMPLETE);
18044 err = tg3_restart_hw(tp,
18045 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18046 if (err)
18047 goto out;
18048
18049 tg3_timer_start(tp);
18050
18051 tg3_netif_start(tp);
18052
18053 out:
18054 tg3_full_unlock(tp);
18055
18056 if (!err)
18057 tg3_phy_start(tp);
18058
18059 unlock:
18060 rtnl_unlock();
18061 return err;
18062 }
18063 #endif /* CONFIG_PM_SLEEP */
18064
18065 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18066
tg3_shutdown(struct pci_dev * pdev)18067 static void tg3_shutdown(struct pci_dev *pdev)
18068 {
18069 struct net_device *dev = pci_get_drvdata(pdev);
18070 struct tg3 *tp = netdev_priv(dev);
18071
18072 tg3_reset_task_cancel(tp);
18073
18074 rtnl_lock();
18075
18076 netif_device_detach(dev);
18077
18078 if (netif_running(dev))
18079 dev_close(dev);
18080
18081 tg3_power_down(tp);
18082
18083 rtnl_unlock();
18084
18085 pci_disable_device(pdev);
18086 }
18087
18088 /**
18089 * tg3_io_error_detected - called when PCI error is detected
18090 * @pdev: Pointer to PCI device
18091 * @state: The current pci connection state
18092 *
18093 * This function is called after a PCI bus error affecting
18094 * this device has been detected.
18095 */
tg3_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)18096 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18097 pci_channel_state_t state)
18098 {
18099 struct net_device *netdev = pci_get_drvdata(pdev);
18100 struct tg3 *tp = netdev_priv(netdev);
18101 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18102
18103 netdev_info(netdev, "PCI I/O error detected\n");
18104
18105 /* Want to make sure that the reset task doesn't run */
18106 tg3_reset_task_cancel(tp);
18107
18108 rtnl_lock();
18109
18110 /* Could be second call or maybe we don't have netdev yet */
18111 if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18112 goto done;
18113
18114 /* We needn't recover from permanent error */
18115 if (state == pci_channel_io_frozen)
18116 tp->pcierr_recovery = true;
18117
18118 tg3_phy_stop(tp);
18119
18120 tg3_netif_stop(tp);
18121
18122 tg3_timer_stop(tp);
18123
18124 netif_device_detach(netdev);
18125
18126 /* Clean up software state, even if MMIO is blocked */
18127 tg3_full_lock(tp, 0);
18128 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18129 tg3_full_unlock(tp);
18130
18131 done:
18132 if (state == pci_channel_io_perm_failure) {
18133 if (netdev) {
18134 tg3_napi_enable(tp);
18135 dev_close(netdev);
18136 }
18137 err = PCI_ERS_RESULT_DISCONNECT;
18138 } else {
18139 pci_disable_device(pdev);
18140 }
18141
18142 rtnl_unlock();
18143
18144 return err;
18145 }
18146
18147 /**
18148 * tg3_io_slot_reset - called after the pci bus has been reset.
18149 * @pdev: Pointer to PCI device
18150 *
18151 * Restart the card from scratch, as if from a cold-boot.
18152 * At this point, the card has exprienced a hard reset,
18153 * followed by fixups by BIOS, and has its config space
18154 * set up identically to what it was at cold boot.
18155 */
tg3_io_slot_reset(struct pci_dev * pdev)18156 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18157 {
18158 struct net_device *netdev = pci_get_drvdata(pdev);
18159 struct tg3 *tp = netdev_priv(netdev);
18160 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18161 int err;
18162
18163 rtnl_lock();
18164
18165 if (pci_enable_device(pdev)) {
18166 dev_err(&pdev->dev,
18167 "Cannot re-enable PCI device after reset.\n");
18168 goto done;
18169 }
18170
18171 pci_set_master(pdev);
18172 pci_restore_state(pdev);
18173 pci_save_state(pdev);
18174
18175 if (!netdev || !netif_running(netdev)) {
18176 rc = PCI_ERS_RESULT_RECOVERED;
18177 goto done;
18178 }
18179
18180 err = tg3_power_up(tp);
18181 if (err)
18182 goto done;
18183
18184 rc = PCI_ERS_RESULT_RECOVERED;
18185
18186 done:
18187 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18188 tg3_napi_enable(tp);
18189 dev_close(netdev);
18190 }
18191 rtnl_unlock();
18192
18193 return rc;
18194 }
18195
18196 /**
18197 * tg3_io_resume - called when traffic can start flowing again.
18198 * @pdev: Pointer to PCI device
18199 *
18200 * This callback is called when the error recovery driver tells
18201 * us that its OK to resume normal operation.
18202 */
tg3_io_resume(struct pci_dev * pdev)18203 static void tg3_io_resume(struct pci_dev *pdev)
18204 {
18205 struct net_device *netdev = pci_get_drvdata(pdev);
18206 struct tg3 *tp = netdev_priv(netdev);
18207 int err;
18208
18209 rtnl_lock();
18210
18211 if (!netdev || !netif_running(netdev))
18212 goto done;
18213
18214 tg3_full_lock(tp, 0);
18215 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18216 tg3_flag_set(tp, INIT_COMPLETE);
18217 err = tg3_restart_hw(tp, true);
18218 if (err) {
18219 tg3_full_unlock(tp);
18220 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18221 goto done;
18222 }
18223
18224 netif_device_attach(netdev);
18225
18226 tg3_timer_start(tp);
18227
18228 tg3_netif_start(tp);
18229
18230 tg3_full_unlock(tp);
18231
18232 tg3_phy_start(tp);
18233
18234 done:
18235 tp->pcierr_recovery = false;
18236 rtnl_unlock();
18237 }
18238
18239 static const struct pci_error_handlers tg3_err_handler = {
18240 .error_detected = tg3_io_error_detected,
18241 .slot_reset = tg3_io_slot_reset,
18242 .resume = tg3_io_resume
18243 };
18244
18245 static struct pci_driver tg3_driver = {
18246 .name = DRV_MODULE_NAME,
18247 .id_table = tg3_pci_tbl,
18248 .probe = tg3_init_one,
18249 .remove = tg3_remove_one,
18250 .err_handler = &tg3_err_handler,
18251 .driver.pm = &tg3_pm_ops,
18252 .shutdown = tg3_shutdown,
18253 };
18254
18255 module_pci_driver(tg3_driver);
18256