1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited.
9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10 * refers to Broadcom Inc. and/or its subsidiaries.
11 *
12 * Firmware is:
13 * Derived from proprietary unpublished source code,
14 * Copyright (C) 2000-2016 Broadcom Corporation.
15 * Copyright (C) 2016-2017 Broadcom Ltd.
16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17 * refers to Broadcom Inc. and/or its subsidiaries.
18 *
19 * Permission is hereby granted for the distribution of this firmware
20 * data in hexadecimal or equivalent format, provided this copyright
21 * notice is accompanying it.
22 */
23
24
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/in.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
46 #include <linux/if.h>
47 #include <linux/if_vlan.h>
48 #include <linux/ip.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
58
59 #include <net/checksum.h>
60 #include <net/ip.h>
61
62 #include <linux/io.h>
63 #include <asm/byteorder.h>
64 #include <linux/uaccess.h>
65
66 #include <uapi/linux/net_tstamp.h>
67 #include <linux/ptp_clock_kernel.h>
68
69 #ifdef CONFIG_SPARC
70 #include <asm/idprom.h>
71 #include <asm/prom.h>
72 #endif
73
74 #define BAR_0 0
75 #define BAR_2 2
76
77 #include "tg3.h"
78
79 /* Functions & macros to verify TG3_FLAGS types */
80
_tg3_flag(enum TG3_FLAGS flag,unsigned long * bits)81 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
82 {
83 return test_bit(flag, bits);
84 }
85
_tg3_flag_set(enum TG3_FLAGS flag,unsigned long * bits)86 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
87 {
88 set_bit(flag, bits);
89 }
90
_tg3_flag_clear(enum TG3_FLAGS flag,unsigned long * bits)91 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
92 {
93 clear_bit(flag, bits);
94 }
95
96 #define tg3_flag(tp, flag) \
97 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
98 #define tg3_flag_set(tp, flag) \
99 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
100 #define tg3_flag_clear(tp, flag) \
101 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
102
103 #define DRV_MODULE_NAME "tg3"
104 #define TG3_MAJ_NUM 3
105 #define TG3_MIN_NUM 137
106 #define DRV_MODULE_VERSION \
107 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
108 #define DRV_MODULE_RELDATE "May 11, 2014"
109
110 #define RESET_KIND_SHUTDOWN 0
111 #define RESET_KIND_INIT 1
112 #define RESET_KIND_SUSPEND 2
113
114 #define TG3_DEF_RX_MODE 0
115 #define TG3_DEF_TX_MODE 0
116 #define TG3_DEF_MSG_ENABLE \
117 (NETIF_MSG_DRV | \
118 NETIF_MSG_PROBE | \
119 NETIF_MSG_LINK | \
120 NETIF_MSG_TIMER | \
121 NETIF_MSG_IFDOWN | \
122 NETIF_MSG_IFUP | \
123 NETIF_MSG_RX_ERR | \
124 NETIF_MSG_TX_ERR)
125
126 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
127
128 /* length of time before we decide the hardware is borked,
129 * and dev->tx_timeout() should be called to fix the problem
130 */
131
132 #define TG3_TX_TIMEOUT (5 * HZ)
133
134 /* hardware minimum and maximum for a single frame's data payload */
135 #define TG3_MIN_MTU ETH_ZLEN
136 #define TG3_MAX_MTU(tp) \
137 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
138
139 /* These numbers seem to be hard coded in the NIC firmware somehow.
140 * You can't change the ring sizes, but you can change where you place
141 * them in the NIC onboard memory.
142 */
143 #define TG3_RX_STD_RING_SIZE(tp) \
144 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
145 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
146 #define TG3_DEF_RX_RING_PENDING 200
147 #define TG3_RX_JMB_RING_SIZE(tp) \
148 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
149 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
150 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
151
152 /* Do not place this n-ring entries value into the tp struct itself,
153 * we really want to expose these constants to GCC so that modulo et
154 * al. operations are done with shifts and masks instead of with
155 * hw multiply/modulo instructions. Another solution would be to
156 * replace things like '% foo' with '& (foo - 1)'.
157 */
158
159 #define TG3_TX_RING_SIZE 512
160 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
161
162 #define TG3_RX_STD_RING_BYTES(tp) \
163 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
164 #define TG3_RX_JMB_RING_BYTES(tp) \
165 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
166 #define TG3_RX_RCB_RING_BYTES(tp) \
167 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
168 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
169 TG3_TX_RING_SIZE)
170 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
171
172 #define TG3_DMA_BYTE_ENAB 64
173
174 #define TG3_RX_STD_DMA_SZ 1536
175 #define TG3_RX_JMB_DMA_SZ 9046
176
177 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
178
179 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
180 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
181
182 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
183 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
184
185 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
186 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
187
188 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
189 * that are at least dword aligned when used in PCIX mode. The driver
190 * works around this bug by double copying the packet. This workaround
191 * is built into the normal double copy length check for efficiency.
192 *
193 * However, the double copy is only necessary on those architectures
194 * where unaligned memory accesses are inefficient. For those architectures
195 * where unaligned memory accesses incur little penalty, we can reintegrate
196 * the 5701 in the normal rx path. Doing so saves a device structure
197 * dereference by hardcoding the double copy threshold in place.
198 */
199 #define TG3_RX_COPY_THRESHOLD 256
200 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
201 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
202 #else
203 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
204 #endif
205
206 #if (NET_IP_ALIGN != 0)
207 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
208 #else
209 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
210 #endif
211
212 /* minimum number of free TX descriptors required to wake up TX process */
213 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
214 #define TG3_TX_BD_DMA_MAX_2K 2048
215 #define TG3_TX_BD_DMA_MAX_4K 4096
216
217 #define TG3_RAW_IP_ALIGN 2
218
219 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
220 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
221
222 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
223 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
224
225 #define FIRMWARE_TG3 "tigon/tg3.bin"
226 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
227 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
228 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
229
230 static char version[] =
231 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
232
233 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
234 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
235 MODULE_LICENSE("GPL");
236 MODULE_VERSION(DRV_MODULE_VERSION);
237 MODULE_FIRMWARE(FIRMWARE_TG3);
238 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
239 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
240
241 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
242 module_param(tg3_debug, int, 0);
243 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
244
245 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
246 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
247
248 static const struct pci_device_id tg3_pci_tbl[] = {
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
268 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
269 TG3_DRV_DATA_FLAG_5705_10_100},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
271 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
272 TG3_DRV_DATA_FLAG_5705_10_100},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
275 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
276 TG3_DRV_DATA_FLAG_5705_10_100},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
283 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
297 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
298 PCI_VENDOR_ID_LENOVO,
299 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
300 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
303 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
313 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
314 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
315 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
322 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
323 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
324 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
325 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
326 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
327 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
331 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
341 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
343 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
347 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
348 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
349 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
350 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
351 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
352 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
353 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
354 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
355 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
356 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
357 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
358 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
359 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
360 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
361 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
362 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
363 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
364 {}
365 };
366
367 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
368
369 static const struct {
370 const char string[ETH_GSTRING_LEN];
371 } ethtool_stats_keys[] = {
372 { "rx_octets" },
373 { "rx_fragments" },
374 { "rx_ucast_packets" },
375 { "rx_mcast_packets" },
376 { "rx_bcast_packets" },
377 { "rx_fcs_errors" },
378 { "rx_align_errors" },
379 { "rx_xon_pause_rcvd" },
380 { "rx_xoff_pause_rcvd" },
381 { "rx_mac_ctrl_rcvd" },
382 { "rx_xoff_entered" },
383 { "rx_frame_too_long_errors" },
384 { "rx_jabbers" },
385 { "rx_undersize_packets" },
386 { "rx_in_length_errors" },
387 { "rx_out_length_errors" },
388 { "rx_64_or_less_octet_packets" },
389 { "rx_65_to_127_octet_packets" },
390 { "rx_128_to_255_octet_packets" },
391 { "rx_256_to_511_octet_packets" },
392 { "rx_512_to_1023_octet_packets" },
393 { "rx_1024_to_1522_octet_packets" },
394 { "rx_1523_to_2047_octet_packets" },
395 { "rx_2048_to_4095_octet_packets" },
396 { "rx_4096_to_8191_octet_packets" },
397 { "rx_8192_to_9022_octet_packets" },
398
399 { "tx_octets" },
400 { "tx_collisions" },
401
402 { "tx_xon_sent" },
403 { "tx_xoff_sent" },
404 { "tx_flow_control" },
405 { "tx_mac_errors" },
406 { "tx_single_collisions" },
407 { "tx_mult_collisions" },
408 { "tx_deferred" },
409 { "tx_excessive_collisions" },
410 { "tx_late_collisions" },
411 { "tx_collide_2times" },
412 { "tx_collide_3times" },
413 { "tx_collide_4times" },
414 { "tx_collide_5times" },
415 { "tx_collide_6times" },
416 { "tx_collide_7times" },
417 { "tx_collide_8times" },
418 { "tx_collide_9times" },
419 { "tx_collide_10times" },
420 { "tx_collide_11times" },
421 { "tx_collide_12times" },
422 { "tx_collide_13times" },
423 { "tx_collide_14times" },
424 { "tx_collide_15times" },
425 { "tx_ucast_packets" },
426 { "tx_mcast_packets" },
427 { "tx_bcast_packets" },
428 { "tx_carrier_sense_errors" },
429 { "tx_discards" },
430 { "tx_errors" },
431
432 { "dma_writeq_full" },
433 { "dma_write_prioq_full" },
434 { "rxbds_empty" },
435 { "rx_discards" },
436 { "rx_errors" },
437 { "rx_threshold_hit" },
438
439 { "dma_readq_full" },
440 { "dma_read_prioq_full" },
441 { "tx_comp_queue_full" },
442
443 { "ring_set_send_prod_index" },
444 { "ring_status_update" },
445 { "nic_irqs" },
446 { "nic_avoided_irqs" },
447 { "nic_tx_threshold_hit" },
448
449 { "mbuf_lwm_thresh_hit" },
450 };
451
452 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
453 #define TG3_NVRAM_TEST 0
454 #define TG3_LINK_TEST 1
455 #define TG3_REGISTER_TEST 2
456 #define TG3_MEMORY_TEST 3
457 #define TG3_MAC_LOOPB_TEST 4
458 #define TG3_PHY_LOOPB_TEST 5
459 #define TG3_EXT_LOOPB_TEST 6
460 #define TG3_INTERRUPT_TEST 7
461
462
463 static const struct {
464 const char string[ETH_GSTRING_LEN];
465 } ethtool_test_keys[] = {
466 [TG3_NVRAM_TEST] = { "nvram test (online) " },
467 [TG3_LINK_TEST] = { "link test (online) " },
468 [TG3_REGISTER_TEST] = { "register test (offline)" },
469 [TG3_MEMORY_TEST] = { "memory test (offline)" },
470 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
471 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
472 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
473 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
474 };
475
476 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
477
478
tg3_write32(struct tg3 * tp,u32 off,u32 val)479 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
480 {
481 writel(val, tp->regs + off);
482 }
483
tg3_read32(struct tg3 * tp,u32 off)484 static u32 tg3_read32(struct tg3 *tp, u32 off)
485 {
486 return readl(tp->regs + off);
487 }
488
tg3_ape_write32(struct tg3 * tp,u32 off,u32 val)489 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
490 {
491 writel(val, tp->aperegs + off);
492 }
493
tg3_ape_read32(struct tg3 * tp,u32 off)494 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
495 {
496 return readl(tp->aperegs + off);
497 }
498
tg3_write_indirect_reg32(struct tg3 * tp,u32 off,u32 val)499 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
500 {
501 unsigned long flags;
502
503 spin_lock_irqsave(&tp->indirect_lock, flags);
504 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
505 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
506 spin_unlock_irqrestore(&tp->indirect_lock, flags);
507 }
508
tg3_write_flush_reg32(struct tg3 * tp,u32 off,u32 val)509 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
510 {
511 writel(val, tp->regs + off);
512 readl(tp->regs + off);
513 }
514
tg3_read_indirect_reg32(struct tg3 * tp,u32 off)515 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
516 {
517 unsigned long flags;
518 u32 val;
519
520 spin_lock_irqsave(&tp->indirect_lock, flags);
521 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
522 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
523 spin_unlock_irqrestore(&tp->indirect_lock, flags);
524 return val;
525 }
526
tg3_write_indirect_mbox(struct tg3 * tp,u32 off,u32 val)527 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
528 {
529 unsigned long flags;
530
531 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
532 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
533 TG3_64BIT_REG_LOW, val);
534 return;
535 }
536 if (off == TG3_RX_STD_PROD_IDX_REG) {
537 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
538 TG3_64BIT_REG_LOW, val);
539 return;
540 }
541
542 spin_lock_irqsave(&tp->indirect_lock, flags);
543 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
544 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
545 spin_unlock_irqrestore(&tp->indirect_lock, flags);
546
547 /* In indirect mode when disabling interrupts, we also need
548 * to clear the interrupt bit in the GRC local ctrl register.
549 */
550 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
551 (val == 0x1)) {
552 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
553 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
554 }
555 }
556
tg3_read_indirect_mbox(struct tg3 * tp,u32 off)557 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
558 {
559 unsigned long flags;
560 u32 val;
561
562 spin_lock_irqsave(&tp->indirect_lock, flags);
563 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
564 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
565 spin_unlock_irqrestore(&tp->indirect_lock, flags);
566 return val;
567 }
568
569 /* usec_wait specifies the wait time in usec when writing to certain registers
570 * where it is unsafe to read back the register without some delay.
571 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
572 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
573 */
_tw32_flush(struct tg3 * tp,u32 off,u32 val,u32 usec_wait)574 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
575 {
576 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
577 /* Non-posted methods */
578 tp->write32(tp, off, val);
579 else {
580 /* Posted method */
581 tg3_write32(tp, off, val);
582 if (usec_wait)
583 udelay(usec_wait);
584 tp->read32(tp, off);
585 }
586 /* Wait again after the read for the posted method to guarantee that
587 * the wait time is met.
588 */
589 if (usec_wait)
590 udelay(usec_wait);
591 }
592
tw32_mailbox_flush(struct tg3 * tp,u32 off,u32 val)593 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
594 {
595 tp->write32_mbox(tp, off, val);
596 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
597 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
598 !tg3_flag(tp, ICH_WORKAROUND)))
599 tp->read32_mbox(tp, off);
600 }
601
tg3_write32_tx_mbox(struct tg3 * tp,u32 off,u32 val)602 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
603 {
604 void __iomem *mbox = tp->regs + off;
605 writel(val, mbox);
606 if (tg3_flag(tp, TXD_MBOX_HWBUG))
607 writel(val, mbox);
608 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
609 tg3_flag(tp, FLUSH_POSTED_WRITES))
610 readl(mbox);
611 }
612
tg3_read32_mbox_5906(struct tg3 * tp,u32 off)613 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
614 {
615 return readl(tp->regs + off + GRCMBOX_BASE);
616 }
617
tg3_write32_mbox_5906(struct tg3 * tp,u32 off,u32 val)618 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
619 {
620 writel(val, tp->regs + off + GRCMBOX_BASE);
621 }
622
623 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
624 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
625 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
626 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
627 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
628
629 #define tw32(reg, val) tp->write32(tp, reg, val)
630 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
631 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
632 #define tr32(reg) tp->read32(tp, reg)
633
tg3_write_mem(struct tg3 * tp,u32 off,u32 val)634 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
635 {
636 unsigned long flags;
637
638 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
639 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
640 return;
641
642 spin_lock_irqsave(&tp->indirect_lock, flags);
643 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
644 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
645 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
646
647 /* Always leave this as zero. */
648 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
649 } else {
650 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
651 tw32_f(TG3PCI_MEM_WIN_DATA, val);
652
653 /* Always leave this as zero. */
654 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
655 }
656 spin_unlock_irqrestore(&tp->indirect_lock, flags);
657 }
658
tg3_read_mem(struct tg3 * tp,u32 off,u32 * val)659 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
660 {
661 unsigned long flags;
662
663 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
664 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
665 *val = 0;
666 return;
667 }
668
669 spin_lock_irqsave(&tp->indirect_lock, flags);
670 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
671 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
672 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
673
674 /* Always leave this as zero. */
675 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
676 } else {
677 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
678 *val = tr32(TG3PCI_MEM_WIN_DATA);
679
680 /* Always leave this as zero. */
681 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
682 }
683 spin_unlock_irqrestore(&tp->indirect_lock, flags);
684 }
685
tg3_ape_lock_init(struct tg3 * tp)686 static void tg3_ape_lock_init(struct tg3 *tp)
687 {
688 int i;
689 u32 regbase, bit;
690
691 if (tg3_asic_rev(tp) == ASIC_REV_5761)
692 regbase = TG3_APE_LOCK_GRANT;
693 else
694 regbase = TG3_APE_PER_LOCK_GRANT;
695
696 /* Make sure the driver hasn't any stale locks. */
697 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
698 switch (i) {
699 case TG3_APE_LOCK_PHY0:
700 case TG3_APE_LOCK_PHY1:
701 case TG3_APE_LOCK_PHY2:
702 case TG3_APE_LOCK_PHY3:
703 bit = APE_LOCK_GRANT_DRIVER;
704 break;
705 default:
706 if (!tp->pci_fn)
707 bit = APE_LOCK_GRANT_DRIVER;
708 else
709 bit = 1 << tp->pci_fn;
710 }
711 tg3_ape_write32(tp, regbase + 4 * i, bit);
712 }
713
714 }
715
tg3_ape_lock(struct tg3 * tp,int locknum)716 static int tg3_ape_lock(struct tg3 *tp, int locknum)
717 {
718 int i, off;
719 int ret = 0;
720 u32 status, req, gnt, bit;
721
722 if (!tg3_flag(tp, ENABLE_APE))
723 return 0;
724
725 switch (locknum) {
726 case TG3_APE_LOCK_GPIO:
727 if (tg3_asic_rev(tp) == ASIC_REV_5761)
728 return 0;
729 /* else: fall through */
730 case TG3_APE_LOCK_GRC:
731 case TG3_APE_LOCK_MEM:
732 if (!tp->pci_fn)
733 bit = APE_LOCK_REQ_DRIVER;
734 else
735 bit = 1 << tp->pci_fn;
736 break;
737 case TG3_APE_LOCK_PHY0:
738 case TG3_APE_LOCK_PHY1:
739 case TG3_APE_LOCK_PHY2:
740 case TG3_APE_LOCK_PHY3:
741 bit = APE_LOCK_REQ_DRIVER;
742 break;
743 default:
744 return -EINVAL;
745 }
746
747 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
748 req = TG3_APE_LOCK_REQ;
749 gnt = TG3_APE_LOCK_GRANT;
750 } else {
751 req = TG3_APE_PER_LOCK_REQ;
752 gnt = TG3_APE_PER_LOCK_GRANT;
753 }
754
755 off = 4 * locknum;
756
757 tg3_ape_write32(tp, req + off, bit);
758
759 /* Wait for up to 1 millisecond to acquire lock. */
760 for (i = 0; i < 100; i++) {
761 status = tg3_ape_read32(tp, gnt + off);
762 if (status == bit)
763 break;
764 if (pci_channel_offline(tp->pdev))
765 break;
766
767 udelay(10);
768 }
769
770 if (status != bit) {
771 /* Revoke the lock request. */
772 tg3_ape_write32(tp, gnt + off, bit);
773 ret = -EBUSY;
774 }
775
776 return ret;
777 }
778
tg3_ape_unlock(struct tg3 * tp,int locknum)779 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
780 {
781 u32 gnt, bit;
782
783 if (!tg3_flag(tp, ENABLE_APE))
784 return;
785
786 switch (locknum) {
787 case TG3_APE_LOCK_GPIO:
788 if (tg3_asic_rev(tp) == ASIC_REV_5761)
789 return;
790 /* else: fall through */
791 case TG3_APE_LOCK_GRC:
792 case TG3_APE_LOCK_MEM:
793 if (!tp->pci_fn)
794 bit = APE_LOCK_GRANT_DRIVER;
795 else
796 bit = 1 << tp->pci_fn;
797 break;
798 case TG3_APE_LOCK_PHY0:
799 case TG3_APE_LOCK_PHY1:
800 case TG3_APE_LOCK_PHY2:
801 case TG3_APE_LOCK_PHY3:
802 bit = APE_LOCK_GRANT_DRIVER;
803 break;
804 default:
805 return;
806 }
807
808 if (tg3_asic_rev(tp) == ASIC_REV_5761)
809 gnt = TG3_APE_LOCK_GRANT;
810 else
811 gnt = TG3_APE_PER_LOCK_GRANT;
812
813 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
814 }
815
tg3_ape_event_lock(struct tg3 * tp,u32 timeout_us)816 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
817 {
818 u32 apedata;
819
820 while (timeout_us) {
821 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
822 return -EBUSY;
823
824 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
825 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
826 break;
827
828 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
829
830 udelay(10);
831 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
832 }
833
834 return timeout_us ? 0 : -EBUSY;
835 }
836
837 #ifdef CONFIG_TIGON3_HWMON
tg3_ape_wait_for_event(struct tg3 * tp,u32 timeout_us)838 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
839 {
840 u32 i, apedata;
841
842 for (i = 0; i < timeout_us / 10; i++) {
843 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
844
845 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
846 break;
847
848 udelay(10);
849 }
850
851 return i == timeout_us / 10;
852 }
853
tg3_ape_scratchpad_read(struct tg3 * tp,u32 * data,u32 base_off,u32 len)854 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
855 u32 len)
856 {
857 int err;
858 u32 i, bufoff, msgoff, maxlen, apedata;
859
860 if (!tg3_flag(tp, APE_HAS_NCSI))
861 return 0;
862
863 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
864 if (apedata != APE_SEG_SIG_MAGIC)
865 return -ENODEV;
866
867 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
868 if (!(apedata & APE_FW_STATUS_READY))
869 return -EAGAIN;
870
871 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
872 TG3_APE_SHMEM_BASE;
873 msgoff = bufoff + 2 * sizeof(u32);
874 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
875
876 while (len) {
877 u32 length;
878
879 /* Cap xfer sizes to scratchpad limits. */
880 length = (len > maxlen) ? maxlen : len;
881 len -= length;
882
883 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
884 if (!(apedata & APE_FW_STATUS_READY))
885 return -EAGAIN;
886
887 /* Wait for up to 1 msec for APE to service previous event. */
888 err = tg3_ape_event_lock(tp, 1000);
889 if (err)
890 return err;
891
892 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
893 APE_EVENT_STATUS_SCRTCHPD_READ |
894 APE_EVENT_STATUS_EVENT_PENDING;
895 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
896
897 tg3_ape_write32(tp, bufoff, base_off);
898 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
899
900 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
901 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
902
903 base_off += length;
904
905 if (tg3_ape_wait_for_event(tp, 30000))
906 return -EAGAIN;
907
908 for (i = 0; length; i += 4, length -= 4) {
909 u32 val = tg3_ape_read32(tp, msgoff + i);
910 memcpy(data, &val, sizeof(u32));
911 data++;
912 }
913 }
914
915 return 0;
916 }
917 #endif
918
tg3_ape_send_event(struct tg3 * tp,u32 event)919 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
920 {
921 int err;
922 u32 apedata;
923
924 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
925 if (apedata != APE_SEG_SIG_MAGIC)
926 return -EAGAIN;
927
928 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
929 if (!(apedata & APE_FW_STATUS_READY))
930 return -EAGAIN;
931
932 /* Wait for up to 20 millisecond for APE to service previous event. */
933 err = tg3_ape_event_lock(tp, 20000);
934 if (err)
935 return err;
936
937 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
938 event | APE_EVENT_STATUS_EVENT_PENDING);
939
940 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
941 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
942
943 return 0;
944 }
945
tg3_ape_driver_state_change(struct tg3 * tp,int kind)946 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
947 {
948 u32 event;
949 u32 apedata;
950
951 if (!tg3_flag(tp, ENABLE_APE))
952 return;
953
954 switch (kind) {
955 case RESET_KIND_INIT:
956 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
957 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
958 APE_HOST_SEG_SIG_MAGIC);
959 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
960 APE_HOST_SEG_LEN_MAGIC);
961 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
962 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
963 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
964 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
965 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
966 APE_HOST_BEHAV_NO_PHYLOCK);
967 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
968 TG3_APE_HOST_DRVR_STATE_START);
969
970 event = APE_EVENT_STATUS_STATE_START;
971 break;
972 case RESET_KIND_SHUTDOWN:
973 if (device_may_wakeup(&tp->pdev->dev) &&
974 tg3_flag(tp, WOL_ENABLE)) {
975 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
976 TG3_APE_HOST_WOL_SPEED_AUTO);
977 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
978 } else
979 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
980
981 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
982
983 event = APE_EVENT_STATUS_STATE_UNLOAD;
984 break;
985 default:
986 return;
987 }
988
989 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
990
991 tg3_ape_send_event(tp, event);
992 }
993
tg3_send_ape_heartbeat(struct tg3 * tp,unsigned long interval)994 static void tg3_send_ape_heartbeat(struct tg3 *tp,
995 unsigned long interval)
996 {
997 /* Check if hb interval has exceeded */
998 if (!tg3_flag(tp, ENABLE_APE) ||
999 time_before(jiffies, tp->ape_hb_jiffies + interval))
1000 return;
1001
1002 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
1003 tp->ape_hb_jiffies = jiffies;
1004 }
1005
tg3_disable_ints(struct tg3 * tp)1006 static void tg3_disable_ints(struct tg3 *tp)
1007 {
1008 int i;
1009
1010 tw32(TG3PCI_MISC_HOST_CTRL,
1011 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1012 for (i = 0; i < tp->irq_max; i++)
1013 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1014 }
1015
tg3_enable_ints(struct tg3 * tp)1016 static void tg3_enable_ints(struct tg3 *tp)
1017 {
1018 int i;
1019
1020 tp->irq_sync = 0;
1021 wmb();
1022
1023 tw32(TG3PCI_MISC_HOST_CTRL,
1024 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1025
1026 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1027 for (i = 0; i < tp->irq_cnt; i++) {
1028 struct tg3_napi *tnapi = &tp->napi[i];
1029
1030 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1031 if (tg3_flag(tp, 1SHOT_MSI))
1032 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1033
1034 tp->coal_now |= tnapi->coal_now;
1035 }
1036
1037 /* Force an initial interrupt */
1038 if (!tg3_flag(tp, TAGGED_STATUS) &&
1039 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1040 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1041 else
1042 tw32(HOSTCC_MODE, tp->coal_now);
1043
1044 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1045 }
1046
tg3_has_work(struct tg3_napi * tnapi)1047 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1048 {
1049 struct tg3 *tp = tnapi->tp;
1050 struct tg3_hw_status *sblk = tnapi->hw_status;
1051 unsigned int work_exists = 0;
1052
1053 /* check for phy events */
1054 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1055 if (sblk->status & SD_STATUS_LINK_CHG)
1056 work_exists = 1;
1057 }
1058
1059 /* check for TX work to do */
1060 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1061 work_exists = 1;
1062
1063 /* check for RX work to do */
1064 if (tnapi->rx_rcb_prod_idx &&
1065 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1066 work_exists = 1;
1067
1068 return work_exists;
1069 }
1070
1071 /* tg3_int_reenable
1072 * similar to tg3_enable_ints, but it accurately determines whether there
1073 * is new work pending and can return without flushing the PIO write
1074 * which reenables interrupts
1075 */
tg3_int_reenable(struct tg3_napi * tnapi)1076 static void tg3_int_reenable(struct tg3_napi *tnapi)
1077 {
1078 struct tg3 *tp = tnapi->tp;
1079
1080 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1081 mmiowb();
1082
1083 /* When doing tagged status, this work check is unnecessary.
1084 * The last_tag we write above tells the chip which piece of
1085 * work we've completed.
1086 */
1087 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1088 tw32(HOSTCC_MODE, tp->coalesce_mode |
1089 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1090 }
1091
tg3_switch_clocks(struct tg3 * tp)1092 static void tg3_switch_clocks(struct tg3 *tp)
1093 {
1094 u32 clock_ctrl;
1095 u32 orig_clock_ctrl;
1096
1097 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1098 return;
1099
1100 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1101
1102 orig_clock_ctrl = clock_ctrl;
1103 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1104 CLOCK_CTRL_CLKRUN_OENABLE |
1105 0x1f);
1106 tp->pci_clock_ctrl = clock_ctrl;
1107
1108 if (tg3_flag(tp, 5705_PLUS)) {
1109 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1110 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1111 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1112 }
1113 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1114 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1115 clock_ctrl |
1116 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1117 40);
1118 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1119 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1120 40);
1121 }
1122 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1123 }
1124
1125 #define PHY_BUSY_LOOPS 5000
1126
__tg3_readphy(struct tg3 * tp,unsigned int phy_addr,int reg,u32 * val)1127 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1128 u32 *val)
1129 {
1130 u32 frame_val;
1131 unsigned int loops;
1132 int ret;
1133
1134 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1135 tw32_f(MAC_MI_MODE,
1136 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1137 udelay(80);
1138 }
1139
1140 tg3_ape_lock(tp, tp->phy_ape_lock);
1141
1142 *val = 0x0;
1143
1144 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1145 MI_COM_PHY_ADDR_MASK);
1146 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1147 MI_COM_REG_ADDR_MASK);
1148 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1149
1150 tw32_f(MAC_MI_COM, frame_val);
1151
1152 loops = PHY_BUSY_LOOPS;
1153 while (loops != 0) {
1154 udelay(10);
1155 frame_val = tr32(MAC_MI_COM);
1156
1157 if ((frame_val & MI_COM_BUSY) == 0) {
1158 udelay(5);
1159 frame_val = tr32(MAC_MI_COM);
1160 break;
1161 }
1162 loops -= 1;
1163 }
1164
1165 ret = -EBUSY;
1166 if (loops != 0) {
1167 *val = frame_val & MI_COM_DATA_MASK;
1168 ret = 0;
1169 }
1170
1171 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1172 tw32_f(MAC_MI_MODE, tp->mi_mode);
1173 udelay(80);
1174 }
1175
1176 tg3_ape_unlock(tp, tp->phy_ape_lock);
1177
1178 return ret;
1179 }
1180
tg3_readphy(struct tg3 * tp,int reg,u32 * val)1181 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1182 {
1183 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1184 }
1185
__tg3_writephy(struct tg3 * tp,unsigned int phy_addr,int reg,u32 val)1186 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1187 u32 val)
1188 {
1189 u32 frame_val;
1190 unsigned int loops;
1191 int ret;
1192
1193 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1194 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1195 return 0;
1196
1197 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1198 tw32_f(MAC_MI_MODE,
1199 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1200 udelay(80);
1201 }
1202
1203 tg3_ape_lock(tp, tp->phy_ape_lock);
1204
1205 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1206 MI_COM_PHY_ADDR_MASK);
1207 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1208 MI_COM_REG_ADDR_MASK);
1209 frame_val |= (val & MI_COM_DATA_MASK);
1210 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1211
1212 tw32_f(MAC_MI_COM, frame_val);
1213
1214 loops = PHY_BUSY_LOOPS;
1215 while (loops != 0) {
1216 udelay(10);
1217 frame_val = tr32(MAC_MI_COM);
1218 if ((frame_val & MI_COM_BUSY) == 0) {
1219 udelay(5);
1220 frame_val = tr32(MAC_MI_COM);
1221 break;
1222 }
1223 loops -= 1;
1224 }
1225
1226 ret = -EBUSY;
1227 if (loops != 0)
1228 ret = 0;
1229
1230 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1231 tw32_f(MAC_MI_MODE, tp->mi_mode);
1232 udelay(80);
1233 }
1234
1235 tg3_ape_unlock(tp, tp->phy_ape_lock);
1236
1237 return ret;
1238 }
1239
tg3_writephy(struct tg3 * tp,int reg,u32 val)1240 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1241 {
1242 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1243 }
1244
tg3_phy_cl45_write(struct tg3 * tp,u32 devad,u32 addr,u32 val)1245 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1246 {
1247 int err;
1248
1249 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1250 if (err)
1251 goto done;
1252
1253 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1254 if (err)
1255 goto done;
1256
1257 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1258 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1259 if (err)
1260 goto done;
1261
1262 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1263
1264 done:
1265 return err;
1266 }
1267
tg3_phy_cl45_read(struct tg3 * tp,u32 devad,u32 addr,u32 * val)1268 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1269 {
1270 int err;
1271
1272 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1273 if (err)
1274 goto done;
1275
1276 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1277 if (err)
1278 goto done;
1279
1280 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1281 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1282 if (err)
1283 goto done;
1284
1285 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1286
1287 done:
1288 return err;
1289 }
1290
tg3_phydsp_read(struct tg3 * tp,u32 reg,u32 * val)1291 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1292 {
1293 int err;
1294
1295 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1296 if (!err)
1297 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1298
1299 return err;
1300 }
1301
tg3_phydsp_write(struct tg3 * tp,u32 reg,u32 val)1302 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1303 {
1304 int err;
1305
1306 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1307 if (!err)
1308 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1309
1310 return err;
1311 }
1312
tg3_phy_auxctl_read(struct tg3 * tp,int reg,u32 * val)1313 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1314 {
1315 int err;
1316
1317 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1318 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1319 MII_TG3_AUXCTL_SHDWSEL_MISC);
1320 if (!err)
1321 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1322
1323 return err;
1324 }
1325
tg3_phy_auxctl_write(struct tg3 * tp,int reg,u32 set)1326 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1327 {
1328 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1329 set |= MII_TG3_AUXCTL_MISC_WREN;
1330
1331 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1332 }
1333
tg3_phy_toggle_auxctl_smdsp(struct tg3 * tp,bool enable)1334 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1335 {
1336 u32 val;
1337 int err;
1338
1339 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1340
1341 if (err)
1342 return err;
1343
1344 if (enable)
1345 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1346 else
1347 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1348
1349 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1350 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1351
1352 return err;
1353 }
1354
tg3_phy_shdw_write(struct tg3 * tp,int reg,u32 val)1355 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1356 {
1357 return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1358 reg | val | MII_TG3_MISC_SHDW_WREN);
1359 }
1360
tg3_bmcr_reset(struct tg3 * tp)1361 static int tg3_bmcr_reset(struct tg3 *tp)
1362 {
1363 u32 phy_control;
1364 int limit, err;
1365
1366 /* OK, reset it, and poll the BMCR_RESET bit until it
1367 * clears or we time out.
1368 */
1369 phy_control = BMCR_RESET;
1370 err = tg3_writephy(tp, MII_BMCR, phy_control);
1371 if (err != 0)
1372 return -EBUSY;
1373
1374 limit = 5000;
1375 while (limit--) {
1376 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1377 if (err != 0)
1378 return -EBUSY;
1379
1380 if ((phy_control & BMCR_RESET) == 0) {
1381 udelay(40);
1382 break;
1383 }
1384 udelay(10);
1385 }
1386 if (limit < 0)
1387 return -EBUSY;
1388
1389 return 0;
1390 }
1391
tg3_mdio_read(struct mii_bus * bp,int mii_id,int reg)1392 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1393 {
1394 struct tg3 *tp = bp->priv;
1395 u32 val;
1396
1397 spin_lock_bh(&tp->lock);
1398
1399 if (__tg3_readphy(tp, mii_id, reg, &val))
1400 val = -EIO;
1401
1402 spin_unlock_bh(&tp->lock);
1403
1404 return val;
1405 }
1406
tg3_mdio_write(struct mii_bus * bp,int mii_id,int reg,u16 val)1407 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1408 {
1409 struct tg3 *tp = bp->priv;
1410 u32 ret = 0;
1411
1412 spin_lock_bh(&tp->lock);
1413
1414 if (__tg3_writephy(tp, mii_id, reg, val))
1415 ret = -EIO;
1416
1417 spin_unlock_bh(&tp->lock);
1418
1419 return ret;
1420 }
1421
tg3_mdio_config_5785(struct tg3 * tp)1422 static void tg3_mdio_config_5785(struct tg3 *tp)
1423 {
1424 u32 val;
1425 struct phy_device *phydev;
1426
1427 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1428 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1429 case PHY_ID_BCM50610:
1430 case PHY_ID_BCM50610M:
1431 val = MAC_PHYCFG2_50610_LED_MODES;
1432 break;
1433 case PHY_ID_BCMAC131:
1434 val = MAC_PHYCFG2_AC131_LED_MODES;
1435 break;
1436 case PHY_ID_RTL8211C:
1437 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1438 break;
1439 case PHY_ID_RTL8201E:
1440 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1441 break;
1442 default:
1443 return;
1444 }
1445
1446 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1447 tw32(MAC_PHYCFG2, val);
1448
1449 val = tr32(MAC_PHYCFG1);
1450 val &= ~(MAC_PHYCFG1_RGMII_INT |
1451 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1452 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1453 tw32(MAC_PHYCFG1, val);
1454
1455 return;
1456 }
1457
1458 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1459 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1460 MAC_PHYCFG2_FMODE_MASK_MASK |
1461 MAC_PHYCFG2_GMODE_MASK_MASK |
1462 MAC_PHYCFG2_ACT_MASK_MASK |
1463 MAC_PHYCFG2_QUAL_MASK_MASK |
1464 MAC_PHYCFG2_INBAND_ENABLE;
1465
1466 tw32(MAC_PHYCFG2, val);
1467
1468 val = tr32(MAC_PHYCFG1);
1469 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1470 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1471 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1472 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1473 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1474 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1475 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1476 }
1477 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1478 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1479 tw32(MAC_PHYCFG1, val);
1480
1481 val = tr32(MAC_EXT_RGMII_MODE);
1482 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1483 MAC_RGMII_MODE_RX_QUALITY |
1484 MAC_RGMII_MODE_RX_ACTIVITY |
1485 MAC_RGMII_MODE_RX_ENG_DET |
1486 MAC_RGMII_MODE_TX_ENABLE |
1487 MAC_RGMII_MODE_TX_LOWPWR |
1488 MAC_RGMII_MODE_TX_RESET);
1489 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1490 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1491 val |= MAC_RGMII_MODE_RX_INT_B |
1492 MAC_RGMII_MODE_RX_QUALITY |
1493 MAC_RGMII_MODE_RX_ACTIVITY |
1494 MAC_RGMII_MODE_RX_ENG_DET;
1495 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1496 val |= MAC_RGMII_MODE_TX_ENABLE |
1497 MAC_RGMII_MODE_TX_LOWPWR |
1498 MAC_RGMII_MODE_TX_RESET;
1499 }
1500 tw32(MAC_EXT_RGMII_MODE, val);
1501 }
1502
tg3_mdio_start(struct tg3 * tp)1503 static void tg3_mdio_start(struct tg3 *tp)
1504 {
1505 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1506 tw32_f(MAC_MI_MODE, tp->mi_mode);
1507 udelay(80);
1508
1509 if (tg3_flag(tp, MDIOBUS_INITED) &&
1510 tg3_asic_rev(tp) == ASIC_REV_5785)
1511 tg3_mdio_config_5785(tp);
1512 }
1513
tg3_mdio_init(struct tg3 * tp)1514 static int tg3_mdio_init(struct tg3 *tp)
1515 {
1516 int i;
1517 u32 reg;
1518 struct phy_device *phydev;
1519
1520 if (tg3_flag(tp, 5717_PLUS)) {
1521 u32 is_serdes;
1522
1523 tp->phy_addr = tp->pci_fn + 1;
1524
1525 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1526 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1527 else
1528 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1529 TG3_CPMU_PHY_STRAP_IS_SERDES;
1530 if (is_serdes)
1531 tp->phy_addr += 7;
1532 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1533 int addr;
1534
1535 addr = ssb_gige_get_phyaddr(tp->pdev);
1536 if (addr < 0)
1537 return addr;
1538 tp->phy_addr = addr;
1539 } else
1540 tp->phy_addr = TG3_PHY_MII_ADDR;
1541
1542 tg3_mdio_start(tp);
1543
1544 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1545 return 0;
1546
1547 tp->mdio_bus = mdiobus_alloc();
1548 if (tp->mdio_bus == NULL)
1549 return -ENOMEM;
1550
1551 tp->mdio_bus->name = "tg3 mdio bus";
1552 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1553 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1554 tp->mdio_bus->priv = tp;
1555 tp->mdio_bus->parent = &tp->pdev->dev;
1556 tp->mdio_bus->read = &tg3_mdio_read;
1557 tp->mdio_bus->write = &tg3_mdio_write;
1558 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1559
1560 /* The bus registration will look for all the PHYs on the mdio bus.
1561 * Unfortunately, it does not ensure the PHY is powered up before
1562 * accessing the PHY ID registers. A chip reset is the
1563 * quickest way to bring the device back to an operational state..
1564 */
1565 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1566 tg3_bmcr_reset(tp);
1567
1568 i = mdiobus_register(tp->mdio_bus);
1569 if (i) {
1570 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1571 mdiobus_free(tp->mdio_bus);
1572 return i;
1573 }
1574
1575 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1576
1577 if (!phydev || !phydev->drv) {
1578 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1579 mdiobus_unregister(tp->mdio_bus);
1580 mdiobus_free(tp->mdio_bus);
1581 return -ENODEV;
1582 }
1583
1584 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1585 case PHY_ID_BCM57780:
1586 phydev->interface = PHY_INTERFACE_MODE_GMII;
1587 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1588 break;
1589 case PHY_ID_BCM50610:
1590 case PHY_ID_BCM50610M:
1591 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1592 PHY_BRCM_RX_REFCLK_UNUSED |
1593 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1594 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1595 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1596 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1597 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1598 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1599 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1600 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1601 /* fallthru */
1602 case PHY_ID_RTL8211C:
1603 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1604 break;
1605 case PHY_ID_RTL8201E:
1606 case PHY_ID_BCMAC131:
1607 phydev->interface = PHY_INTERFACE_MODE_MII;
1608 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1609 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1610 break;
1611 }
1612
1613 tg3_flag_set(tp, MDIOBUS_INITED);
1614
1615 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1616 tg3_mdio_config_5785(tp);
1617
1618 return 0;
1619 }
1620
tg3_mdio_fini(struct tg3 * tp)1621 static void tg3_mdio_fini(struct tg3 *tp)
1622 {
1623 if (tg3_flag(tp, MDIOBUS_INITED)) {
1624 tg3_flag_clear(tp, MDIOBUS_INITED);
1625 mdiobus_unregister(tp->mdio_bus);
1626 mdiobus_free(tp->mdio_bus);
1627 }
1628 }
1629
1630 /* tp->lock is held. */
tg3_generate_fw_event(struct tg3 * tp)1631 static inline void tg3_generate_fw_event(struct tg3 *tp)
1632 {
1633 u32 val;
1634
1635 val = tr32(GRC_RX_CPU_EVENT);
1636 val |= GRC_RX_CPU_DRIVER_EVENT;
1637 tw32_f(GRC_RX_CPU_EVENT, val);
1638
1639 tp->last_event_jiffies = jiffies;
1640 }
1641
1642 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1643
1644 /* tp->lock is held. */
tg3_wait_for_event_ack(struct tg3 * tp)1645 static void tg3_wait_for_event_ack(struct tg3 *tp)
1646 {
1647 int i;
1648 unsigned int delay_cnt;
1649 long time_remain;
1650
1651 /* If enough time has passed, no wait is necessary. */
1652 time_remain = (long)(tp->last_event_jiffies + 1 +
1653 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1654 (long)jiffies;
1655 if (time_remain < 0)
1656 return;
1657
1658 /* Check if we can shorten the wait time. */
1659 delay_cnt = jiffies_to_usecs(time_remain);
1660 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1661 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1662 delay_cnt = (delay_cnt >> 3) + 1;
1663
1664 for (i = 0; i < delay_cnt; i++) {
1665 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1666 break;
1667 if (pci_channel_offline(tp->pdev))
1668 break;
1669
1670 udelay(8);
1671 }
1672 }
1673
1674 /* tp->lock is held. */
tg3_phy_gather_ump_data(struct tg3 * tp,u32 * data)1675 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1676 {
1677 u32 reg, val;
1678
1679 val = 0;
1680 if (!tg3_readphy(tp, MII_BMCR, ®))
1681 val = reg << 16;
1682 if (!tg3_readphy(tp, MII_BMSR, ®))
1683 val |= (reg & 0xffff);
1684 *data++ = val;
1685
1686 val = 0;
1687 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1688 val = reg << 16;
1689 if (!tg3_readphy(tp, MII_LPA, ®))
1690 val |= (reg & 0xffff);
1691 *data++ = val;
1692
1693 val = 0;
1694 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1695 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1696 val = reg << 16;
1697 if (!tg3_readphy(tp, MII_STAT1000, ®))
1698 val |= (reg & 0xffff);
1699 }
1700 *data++ = val;
1701
1702 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1703 val = reg << 16;
1704 else
1705 val = 0;
1706 *data++ = val;
1707 }
1708
1709 /* tp->lock is held. */
tg3_ump_link_report(struct tg3 * tp)1710 static void tg3_ump_link_report(struct tg3 *tp)
1711 {
1712 u32 data[4];
1713
1714 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1715 return;
1716
1717 tg3_phy_gather_ump_data(tp, data);
1718
1719 tg3_wait_for_event_ack(tp);
1720
1721 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1722 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1723 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1724 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1725 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1726 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1727
1728 tg3_generate_fw_event(tp);
1729 }
1730
1731 /* tp->lock is held. */
tg3_stop_fw(struct tg3 * tp)1732 static void tg3_stop_fw(struct tg3 *tp)
1733 {
1734 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1735 /* Wait for RX cpu to ACK the previous event. */
1736 tg3_wait_for_event_ack(tp);
1737
1738 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1739
1740 tg3_generate_fw_event(tp);
1741
1742 /* Wait for RX cpu to ACK this event. */
1743 tg3_wait_for_event_ack(tp);
1744 }
1745 }
1746
1747 /* tp->lock is held. */
tg3_write_sig_pre_reset(struct tg3 * tp,int kind)1748 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1749 {
1750 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1751 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1752
1753 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1754 switch (kind) {
1755 case RESET_KIND_INIT:
1756 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1757 DRV_STATE_START);
1758 break;
1759
1760 case RESET_KIND_SHUTDOWN:
1761 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1762 DRV_STATE_UNLOAD);
1763 break;
1764
1765 case RESET_KIND_SUSPEND:
1766 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1767 DRV_STATE_SUSPEND);
1768 break;
1769
1770 default:
1771 break;
1772 }
1773 }
1774 }
1775
1776 /* tp->lock is held. */
tg3_write_sig_post_reset(struct tg3 * tp,int kind)1777 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1778 {
1779 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1780 switch (kind) {
1781 case RESET_KIND_INIT:
1782 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1783 DRV_STATE_START_DONE);
1784 break;
1785
1786 case RESET_KIND_SHUTDOWN:
1787 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1788 DRV_STATE_UNLOAD_DONE);
1789 break;
1790
1791 default:
1792 break;
1793 }
1794 }
1795 }
1796
1797 /* tp->lock is held. */
tg3_write_sig_legacy(struct tg3 * tp,int kind)1798 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1799 {
1800 if (tg3_flag(tp, ENABLE_ASF)) {
1801 switch (kind) {
1802 case RESET_KIND_INIT:
1803 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1804 DRV_STATE_START);
1805 break;
1806
1807 case RESET_KIND_SHUTDOWN:
1808 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1809 DRV_STATE_UNLOAD);
1810 break;
1811
1812 case RESET_KIND_SUSPEND:
1813 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1814 DRV_STATE_SUSPEND);
1815 break;
1816
1817 default:
1818 break;
1819 }
1820 }
1821 }
1822
tg3_poll_fw(struct tg3 * tp)1823 static int tg3_poll_fw(struct tg3 *tp)
1824 {
1825 int i;
1826 u32 val;
1827
1828 if (tg3_flag(tp, NO_FWARE_REPORTED))
1829 return 0;
1830
1831 if (tg3_flag(tp, IS_SSB_CORE)) {
1832 /* We don't use firmware. */
1833 return 0;
1834 }
1835
1836 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1837 /* Wait up to 20ms for init done. */
1838 for (i = 0; i < 200; i++) {
1839 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1840 return 0;
1841 if (pci_channel_offline(tp->pdev))
1842 return -ENODEV;
1843
1844 udelay(100);
1845 }
1846 return -ENODEV;
1847 }
1848
1849 /* Wait for firmware initialization to complete. */
1850 for (i = 0; i < 100000; i++) {
1851 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1852 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1853 break;
1854 if (pci_channel_offline(tp->pdev)) {
1855 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1856 tg3_flag_set(tp, NO_FWARE_REPORTED);
1857 netdev_info(tp->dev, "No firmware running\n");
1858 }
1859
1860 break;
1861 }
1862
1863 udelay(10);
1864 }
1865
1866 /* Chip might not be fitted with firmware. Some Sun onboard
1867 * parts are configured like that. So don't signal the timeout
1868 * of the above loop as an error, but do report the lack of
1869 * running firmware once.
1870 */
1871 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1872 tg3_flag_set(tp, NO_FWARE_REPORTED);
1873
1874 netdev_info(tp->dev, "No firmware running\n");
1875 }
1876
1877 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1878 /* The 57765 A0 needs a little more
1879 * time to do some important work.
1880 */
1881 mdelay(10);
1882 }
1883
1884 return 0;
1885 }
1886
tg3_link_report(struct tg3 * tp)1887 static void tg3_link_report(struct tg3 *tp)
1888 {
1889 if (!netif_carrier_ok(tp->dev)) {
1890 netif_info(tp, link, tp->dev, "Link is down\n");
1891 tg3_ump_link_report(tp);
1892 } else if (netif_msg_link(tp)) {
1893 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1894 (tp->link_config.active_speed == SPEED_1000 ?
1895 1000 :
1896 (tp->link_config.active_speed == SPEED_100 ?
1897 100 : 10)),
1898 (tp->link_config.active_duplex == DUPLEX_FULL ?
1899 "full" : "half"));
1900
1901 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1902 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1903 "on" : "off",
1904 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1905 "on" : "off");
1906
1907 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1908 netdev_info(tp->dev, "EEE is %s\n",
1909 tp->setlpicnt ? "enabled" : "disabled");
1910
1911 tg3_ump_link_report(tp);
1912 }
1913
1914 tp->link_up = netif_carrier_ok(tp->dev);
1915 }
1916
tg3_decode_flowctrl_1000T(u32 adv)1917 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1918 {
1919 u32 flowctrl = 0;
1920
1921 if (adv & ADVERTISE_PAUSE_CAP) {
1922 flowctrl |= FLOW_CTRL_RX;
1923 if (!(adv & ADVERTISE_PAUSE_ASYM))
1924 flowctrl |= FLOW_CTRL_TX;
1925 } else if (adv & ADVERTISE_PAUSE_ASYM)
1926 flowctrl |= FLOW_CTRL_TX;
1927
1928 return flowctrl;
1929 }
1930
tg3_advert_flowctrl_1000X(u8 flow_ctrl)1931 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1932 {
1933 u16 miireg;
1934
1935 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1936 miireg = ADVERTISE_1000XPAUSE;
1937 else if (flow_ctrl & FLOW_CTRL_TX)
1938 miireg = ADVERTISE_1000XPSE_ASYM;
1939 else if (flow_ctrl & FLOW_CTRL_RX)
1940 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1941 else
1942 miireg = 0;
1943
1944 return miireg;
1945 }
1946
tg3_decode_flowctrl_1000X(u32 adv)1947 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1948 {
1949 u32 flowctrl = 0;
1950
1951 if (adv & ADVERTISE_1000XPAUSE) {
1952 flowctrl |= FLOW_CTRL_RX;
1953 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1954 flowctrl |= FLOW_CTRL_TX;
1955 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1956 flowctrl |= FLOW_CTRL_TX;
1957
1958 return flowctrl;
1959 }
1960
tg3_resolve_flowctrl_1000X(u16 lcladv,u16 rmtadv)1961 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1962 {
1963 u8 cap = 0;
1964
1965 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1966 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1967 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1968 if (lcladv & ADVERTISE_1000XPAUSE)
1969 cap = FLOW_CTRL_RX;
1970 if (rmtadv & ADVERTISE_1000XPAUSE)
1971 cap = FLOW_CTRL_TX;
1972 }
1973
1974 return cap;
1975 }
1976
tg3_setup_flow_control(struct tg3 * tp,u32 lcladv,u32 rmtadv)1977 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1978 {
1979 u8 autoneg;
1980 u8 flowctrl = 0;
1981 u32 old_rx_mode = tp->rx_mode;
1982 u32 old_tx_mode = tp->tx_mode;
1983
1984 if (tg3_flag(tp, USE_PHYLIB))
1985 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1986 else
1987 autoneg = tp->link_config.autoneg;
1988
1989 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1990 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1991 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1992 else
1993 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1994 } else
1995 flowctrl = tp->link_config.flowctrl;
1996
1997 tp->link_config.active_flowctrl = flowctrl;
1998
1999 if (flowctrl & FLOW_CTRL_RX)
2000 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
2001 else
2002 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
2003
2004 if (old_rx_mode != tp->rx_mode)
2005 tw32_f(MAC_RX_MODE, tp->rx_mode);
2006
2007 if (flowctrl & FLOW_CTRL_TX)
2008 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
2009 else
2010 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
2011
2012 if (old_tx_mode != tp->tx_mode)
2013 tw32_f(MAC_TX_MODE, tp->tx_mode);
2014 }
2015
tg3_adjust_link(struct net_device * dev)2016 static void tg3_adjust_link(struct net_device *dev)
2017 {
2018 u8 oldflowctrl, linkmesg = 0;
2019 u32 mac_mode, lcl_adv, rmt_adv;
2020 struct tg3 *tp = netdev_priv(dev);
2021 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2022
2023 spin_lock_bh(&tp->lock);
2024
2025 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2026 MAC_MODE_HALF_DUPLEX);
2027
2028 oldflowctrl = tp->link_config.active_flowctrl;
2029
2030 if (phydev->link) {
2031 lcl_adv = 0;
2032 rmt_adv = 0;
2033
2034 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2035 mac_mode |= MAC_MODE_PORT_MODE_MII;
2036 else if (phydev->speed == SPEED_1000 ||
2037 tg3_asic_rev(tp) != ASIC_REV_5785)
2038 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2039 else
2040 mac_mode |= MAC_MODE_PORT_MODE_MII;
2041
2042 if (phydev->duplex == DUPLEX_HALF)
2043 mac_mode |= MAC_MODE_HALF_DUPLEX;
2044 else {
2045 lcl_adv = mii_advertise_flowctrl(
2046 tp->link_config.flowctrl);
2047
2048 if (phydev->pause)
2049 rmt_adv = LPA_PAUSE_CAP;
2050 if (phydev->asym_pause)
2051 rmt_adv |= LPA_PAUSE_ASYM;
2052 }
2053
2054 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2055 } else
2056 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2057
2058 if (mac_mode != tp->mac_mode) {
2059 tp->mac_mode = mac_mode;
2060 tw32_f(MAC_MODE, tp->mac_mode);
2061 udelay(40);
2062 }
2063
2064 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2065 if (phydev->speed == SPEED_10)
2066 tw32(MAC_MI_STAT,
2067 MAC_MI_STAT_10MBPS_MODE |
2068 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2069 else
2070 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2071 }
2072
2073 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2074 tw32(MAC_TX_LENGTHS,
2075 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2076 (6 << TX_LENGTHS_IPG_SHIFT) |
2077 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2078 else
2079 tw32(MAC_TX_LENGTHS,
2080 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2081 (6 << TX_LENGTHS_IPG_SHIFT) |
2082 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2083
2084 if (phydev->link != tp->old_link ||
2085 phydev->speed != tp->link_config.active_speed ||
2086 phydev->duplex != tp->link_config.active_duplex ||
2087 oldflowctrl != tp->link_config.active_flowctrl)
2088 linkmesg = 1;
2089
2090 tp->old_link = phydev->link;
2091 tp->link_config.active_speed = phydev->speed;
2092 tp->link_config.active_duplex = phydev->duplex;
2093
2094 spin_unlock_bh(&tp->lock);
2095
2096 if (linkmesg)
2097 tg3_link_report(tp);
2098 }
2099
tg3_phy_init(struct tg3 * tp)2100 static int tg3_phy_init(struct tg3 *tp)
2101 {
2102 struct phy_device *phydev;
2103
2104 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2105 return 0;
2106
2107 /* Bring the PHY back to a known state. */
2108 tg3_bmcr_reset(tp);
2109
2110 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2111
2112 /* Attach the MAC to the PHY. */
2113 phydev = phy_connect(tp->dev, phydev_name(phydev),
2114 tg3_adjust_link, phydev->interface);
2115 if (IS_ERR(phydev)) {
2116 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2117 return PTR_ERR(phydev);
2118 }
2119
2120 /* Mask with MAC supported features. */
2121 switch (phydev->interface) {
2122 case PHY_INTERFACE_MODE_GMII:
2123 case PHY_INTERFACE_MODE_RGMII:
2124 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2125 phydev->supported &= (PHY_GBIT_FEATURES |
2126 SUPPORTED_Pause |
2127 SUPPORTED_Asym_Pause);
2128 break;
2129 }
2130 /* fallthru */
2131 case PHY_INTERFACE_MODE_MII:
2132 phydev->supported &= (PHY_BASIC_FEATURES |
2133 SUPPORTED_Pause |
2134 SUPPORTED_Asym_Pause);
2135 break;
2136 default:
2137 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2138 return -EINVAL;
2139 }
2140
2141 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2142
2143 phydev->advertising = phydev->supported;
2144
2145 phy_attached_info(phydev);
2146
2147 return 0;
2148 }
2149
tg3_phy_start(struct tg3 * tp)2150 static void tg3_phy_start(struct tg3 *tp)
2151 {
2152 struct phy_device *phydev;
2153
2154 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2155 return;
2156
2157 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2158
2159 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2160 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2161 phydev->speed = tp->link_config.speed;
2162 phydev->duplex = tp->link_config.duplex;
2163 phydev->autoneg = tp->link_config.autoneg;
2164 phydev->advertising = tp->link_config.advertising;
2165 }
2166
2167 phy_start(phydev);
2168
2169 phy_start_aneg(phydev);
2170 }
2171
tg3_phy_stop(struct tg3 * tp)2172 static void tg3_phy_stop(struct tg3 *tp)
2173 {
2174 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2175 return;
2176
2177 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2178 }
2179
tg3_phy_fini(struct tg3 * tp)2180 static void tg3_phy_fini(struct tg3 *tp)
2181 {
2182 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2183 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2184 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2185 }
2186 }
2187
tg3_phy_set_extloopbk(struct tg3 * tp)2188 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2189 {
2190 int err;
2191 u32 val;
2192
2193 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2194 return 0;
2195
2196 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2197 /* Cannot do read-modify-write on 5401 */
2198 err = tg3_phy_auxctl_write(tp,
2199 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2200 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2201 0x4c20);
2202 goto done;
2203 }
2204
2205 err = tg3_phy_auxctl_read(tp,
2206 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2207 if (err)
2208 return err;
2209
2210 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2211 err = tg3_phy_auxctl_write(tp,
2212 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2213
2214 done:
2215 return err;
2216 }
2217
tg3_phy_fet_toggle_apd(struct tg3 * tp,bool enable)2218 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2219 {
2220 u32 phytest;
2221
2222 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2223 u32 phy;
2224
2225 tg3_writephy(tp, MII_TG3_FET_TEST,
2226 phytest | MII_TG3_FET_SHADOW_EN);
2227 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2228 if (enable)
2229 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2230 else
2231 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2232 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2233 }
2234 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2235 }
2236 }
2237
tg3_phy_toggle_apd(struct tg3 * tp,bool enable)2238 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2239 {
2240 u32 reg;
2241
2242 if (!tg3_flag(tp, 5705_PLUS) ||
2243 (tg3_flag(tp, 5717_PLUS) &&
2244 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2245 return;
2246
2247 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2248 tg3_phy_fet_toggle_apd(tp, enable);
2249 return;
2250 }
2251
2252 reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2253 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2254 MII_TG3_MISC_SHDW_SCR5_SDTL |
2255 MII_TG3_MISC_SHDW_SCR5_C125OE;
2256 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2257 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2258
2259 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2260
2261
2262 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2263 if (enable)
2264 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2265
2266 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2267 }
2268
tg3_phy_toggle_automdix(struct tg3 * tp,bool enable)2269 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2270 {
2271 u32 phy;
2272
2273 if (!tg3_flag(tp, 5705_PLUS) ||
2274 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2275 return;
2276
2277 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2278 u32 ephy;
2279
2280 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2281 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2282
2283 tg3_writephy(tp, MII_TG3_FET_TEST,
2284 ephy | MII_TG3_FET_SHADOW_EN);
2285 if (!tg3_readphy(tp, reg, &phy)) {
2286 if (enable)
2287 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2288 else
2289 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2290 tg3_writephy(tp, reg, phy);
2291 }
2292 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2293 }
2294 } else {
2295 int ret;
2296
2297 ret = tg3_phy_auxctl_read(tp,
2298 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2299 if (!ret) {
2300 if (enable)
2301 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2302 else
2303 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2304 tg3_phy_auxctl_write(tp,
2305 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2306 }
2307 }
2308 }
2309
tg3_phy_set_wirespeed(struct tg3 * tp)2310 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2311 {
2312 int ret;
2313 u32 val;
2314
2315 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2316 return;
2317
2318 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2319 if (!ret)
2320 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2321 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2322 }
2323
tg3_phy_apply_otp(struct tg3 * tp)2324 static void tg3_phy_apply_otp(struct tg3 *tp)
2325 {
2326 u32 otp, phy;
2327
2328 if (!tp->phy_otp)
2329 return;
2330
2331 otp = tp->phy_otp;
2332
2333 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2334 return;
2335
2336 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2337 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2338 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2339
2340 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2341 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2342 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2343
2344 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2345 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2346 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2347
2348 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2349 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2350
2351 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2352 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2353
2354 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2355 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2356 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2357
2358 tg3_phy_toggle_auxctl_smdsp(tp, false);
2359 }
2360
tg3_eee_pull_config(struct tg3 * tp,struct ethtool_eee * eee)2361 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2362 {
2363 u32 val;
2364 struct ethtool_eee *dest = &tp->eee;
2365
2366 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2367 return;
2368
2369 if (eee)
2370 dest = eee;
2371
2372 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2373 return;
2374
2375 /* Pull eee_active */
2376 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2377 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2378 dest->eee_active = 1;
2379 } else
2380 dest->eee_active = 0;
2381
2382 /* Pull lp advertised settings */
2383 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2384 return;
2385 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2386
2387 /* Pull advertised and eee_enabled settings */
2388 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2389 return;
2390 dest->eee_enabled = !!val;
2391 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2392
2393 /* Pull tx_lpi_enabled */
2394 val = tr32(TG3_CPMU_EEE_MODE);
2395 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2396
2397 /* Pull lpi timer value */
2398 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2399 }
2400
tg3_phy_eee_adjust(struct tg3 * tp,bool current_link_up)2401 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2402 {
2403 u32 val;
2404
2405 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2406 return;
2407
2408 tp->setlpicnt = 0;
2409
2410 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2411 current_link_up &&
2412 tp->link_config.active_duplex == DUPLEX_FULL &&
2413 (tp->link_config.active_speed == SPEED_100 ||
2414 tp->link_config.active_speed == SPEED_1000)) {
2415 u32 eeectl;
2416
2417 if (tp->link_config.active_speed == SPEED_1000)
2418 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2419 else
2420 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2421
2422 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2423
2424 tg3_eee_pull_config(tp, NULL);
2425 if (tp->eee.eee_active)
2426 tp->setlpicnt = 2;
2427 }
2428
2429 if (!tp->setlpicnt) {
2430 if (current_link_up &&
2431 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2432 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2433 tg3_phy_toggle_auxctl_smdsp(tp, false);
2434 }
2435
2436 val = tr32(TG3_CPMU_EEE_MODE);
2437 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2438 }
2439 }
2440
tg3_phy_eee_enable(struct tg3 * tp)2441 static void tg3_phy_eee_enable(struct tg3 *tp)
2442 {
2443 u32 val;
2444
2445 if (tp->link_config.active_speed == SPEED_1000 &&
2446 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2447 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2448 tg3_flag(tp, 57765_CLASS)) &&
2449 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2450 val = MII_TG3_DSP_TAP26_ALNOKO |
2451 MII_TG3_DSP_TAP26_RMRXSTO;
2452 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2453 tg3_phy_toggle_auxctl_smdsp(tp, false);
2454 }
2455
2456 val = tr32(TG3_CPMU_EEE_MODE);
2457 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2458 }
2459
tg3_wait_macro_done(struct tg3 * tp)2460 static int tg3_wait_macro_done(struct tg3 *tp)
2461 {
2462 int limit = 100;
2463
2464 while (limit--) {
2465 u32 tmp32;
2466
2467 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2468 if ((tmp32 & 0x1000) == 0)
2469 break;
2470 }
2471 }
2472 if (limit < 0)
2473 return -EBUSY;
2474
2475 return 0;
2476 }
2477
tg3_phy_write_and_check_testpat(struct tg3 * tp,int * resetp)2478 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2479 {
2480 static const u32 test_pat[4][6] = {
2481 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2482 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2483 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2484 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2485 };
2486 int chan;
2487
2488 for (chan = 0; chan < 4; chan++) {
2489 int i;
2490
2491 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2492 (chan * 0x2000) | 0x0200);
2493 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2494
2495 for (i = 0; i < 6; i++)
2496 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2497 test_pat[chan][i]);
2498
2499 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2500 if (tg3_wait_macro_done(tp)) {
2501 *resetp = 1;
2502 return -EBUSY;
2503 }
2504
2505 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2506 (chan * 0x2000) | 0x0200);
2507 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2508 if (tg3_wait_macro_done(tp)) {
2509 *resetp = 1;
2510 return -EBUSY;
2511 }
2512
2513 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2514 if (tg3_wait_macro_done(tp)) {
2515 *resetp = 1;
2516 return -EBUSY;
2517 }
2518
2519 for (i = 0; i < 6; i += 2) {
2520 u32 low, high;
2521
2522 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2523 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2524 tg3_wait_macro_done(tp)) {
2525 *resetp = 1;
2526 return -EBUSY;
2527 }
2528 low &= 0x7fff;
2529 high &= 0x000f;
2530 if (low != test_pat[chan][i] ||
2531 high != test_pat[chan][i+1]) {
2532 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2533 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2534 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2535
2536 return -EBUSY;
2537 }
2538 }
2539 }
2540
2541 return 0;
2542 }
2543
tg3_phy_reset_chanpat(struct tg3 * tp)2544 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2545 {
2546 int chan;
2547
2548 for (chan = 0; chan < 4; chan++) {
2549 int i;
2550
2551 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2552 (chan * 0x2000) | 0x0200);
2553 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2554 for (i = 0; i < 6; i++)
2555 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2556 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2557 if (tg3_wait_macro_done(tp))
2558 return -EBUSY;
2559 }
2560
2561 return 0;
2562 }
2563
tg3_phy_reset_5703_4_5(struct tg3 * tp)2564 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2565 {
2566 u32 reg32, phy9_orig;
2567 int retries, do_phy_reset, err;
2568
2569 retries = 10;
2570 do_phy_reset = 1;
2571 do {
2572 if (do_phy_reset) {
2573 err = tg3_bmcr_reset(tp);
2574 if (err)
2575 return err;
2576 do_phy_reset = 0;
2577 }
2578
2579 /* Disable transmitter and interrupt. */
2580 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2581 continue;
2582
2583 reg32 |= 0x3000;
2584 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2585
2586 /* Set full-duplex, 1000 mbps. */
2587 tg3_writephy(tp, MII_BMCR,
2588 BMCR_FULLDPLX | BMCR_SPEED1000);
2589
2590 /* Set to master mode. */
2591 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2592 continue;
2593
2594 tg3_writephy(tp, MII_CTRL1000,
2595 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2596
2597 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2598 if (err)
2599 return err;
2600
2601 /* Block the PHY control access. */
2602 tg3_phydsp_write(tp, 0x8005, 0x0800);
2603
2604 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2605 if (!err)
2606 break;
2607 } while (--retries);
2608
2609 err = tg3_phy_reset_chanpat(tp);
2610 if (err)
2611 return err;
2612
2613 tg3_phydsp_write(tp, 0x8005, 0x0000);
2614
2615 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2616 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2617
2618 tg3_phy_toggle_auxctl_smdsp(tp, false);
2619
2620 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2621
2622 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
2623 if (err)
2624 return err;
2625
2626 reg32 &= ~0x3000;
2627 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2628
2629 return 0;
2630 }
2631
tg3_carrier_off(struct tg3 * tp)2632 static void tg3_carrier_off(struct tg3 *tp)
2633 {
2634 netif_carrier_off(tp->dev);
2635 tp->link_up = false;
2636 }
2637
tg3_warn_mgmt_link_flap(struct tg3 * tp)2638 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2639 {
2640 if (tg3_flag(tp, ENABLE_ASF))
2641 netdev_warn(tp->dev,
2642 "Management side-band traffic will be interrupted during phy settings change\n");
2643 }
2644
2645 /* This will reset the tigon3 PHY if there is no valid
2646 * link unless the FORCE argument is non-zero.
2647 */
tg3_phy_reset(struct tg3 * tp)2648 static int tg3_phy_reset(struct tg3 *tp)
2649 {
2650 u32 val, cpmuctrl;
2651 int err;
2652
2653 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2654 val = tr32(GRC_MISC_CFG);
2655 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2656 udelay(40);
2657 }
2658 err = tg3_readphy(tp, MII_BMSR, &val);
2659 err |= tg3_readphy(tp, MII_BMSR, &val);
2660 if (err != 0)
2661 return -EBUSY;
2662
2663 if (netif_running(tp->dev) && tp->link_up) {
2664 netif_carrier_off(tp->dev);
2665 tg3_link_report(tp);
2666 }
2667
2668 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2669 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2670 tg3_asic_rev(tp) == ASIC_REV_5705) {
2671 err = tg3_phy_reset_5703_4_5(tp);
2672 if (err)
2673 return err;
2674 goto out;
2675 }
2676
2677 cpmuctrl = 0;
2678 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2679 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2680 cpmuctrl = tr32(TG3_CPMU_CTRL);
2681 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2682 tw32(TG3_CPMU_CTRL,
2683 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2684 }
2685
2686 err = tg3_bmcr_reset(tp);
2687 if (err)
2688 return err;
2689
2690 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2691 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2692 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2693
2694 tw32(TG3_CPMU_CTRL, cpmuctrl);
2695 }
2696
2697 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2698 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2699 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2700 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2701 CPMU_LSPD_1000MB_MACCLK_12_5) {
2702 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2703 udelay(40);
2704 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2705 }
2706 }
2707
2708 if (tg3_flag(tp, 5717_PLUS) &&
2709 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2710 return 0;
2711
2712 tg3_phy_apply_otp(tp);
2713
2714 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2715 tg3_phy_toggle_apd(tp, true);
2716 else
2717 tg3_phy_toggle_apd(tp, false);
2718
2719 out:
2720 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2721 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2722 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2723 tg3_phydsp_write(tp, 0x000a, 0x0323);
2724 tg3_phy_toggle_auxctl_smdsp(tp, false);
2725 }
2726
2727 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2728 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2729 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2730 }
2731
2732 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2733 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2734 tg3_phydsp_write(tp, 0x000a, 0x310b);
2735 tg3_phydsp_write(tp, 0x201f, 0x9506);
2736 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2737 tg3_phy_toggle_auxctl_smdsp(tp, false);
2738 }
2739 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2740 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2741 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2742 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2743 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2744 tg3_writephy(tp, MII_TG3_TEST1,
2745 MII_TG3_TEST1_TRIM_EN | 0x4);
2746 } else
2747 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2748
2749 tg3_phy_toggle_auxctl_smdsp(tp, false);
2750 }
2751 }
2752
2753 /* Set Extended packet length bit (bit 14) on all chips that */
2754 /* support jumbo frames */
2755 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2756 /* Cannot do read-modify-write on 5401 */
2757 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2758 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2759 /* Set bit 14 with read-modify-write to preserve other bits */
2760 err = tg3_phy_auxctl_read(tp,
2761 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2762 if (!err)
2763 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2764 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2765 }
2766
2767 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2768 * jumbo frames transmission.
2769 */
2770 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2771 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2772 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2773 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2774 }
2775
2776 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2777 /* adjust output voltage */
2778 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2779 }
2780
2781 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2782 tg3_phydsp_write(tp, 0xffb, 0x4000);
2783
2784 tg3_phy_toggle_automdix(tp, true);
2785 tg3_phy_set_wirespeed(tp);
2786 return 0;
2787 }
2788
2789 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2790 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2791 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2792 TG3_GPIO_MSG_NEED_VAUX)
2793 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2794 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2795 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2796 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2797 (TG3_GPIO_MSG_DRVR_PRES << 12))
2798
2799 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2800 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2801 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2802 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2803 (TG3_GPIO_MSG_NEED_VAUX << 12))
2804
tg3_set_function_status(struct tg3 * tp,u32 newstat)2805 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2806 {
2807 u32 status, shift;
2808
2809 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2810 tg3_asic_rev(tp) == ASIC_REV_5719)
2811 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2812 else
2813 status = tr32(TG3_CPMU_DRV_STATUS);
2814
2815 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2816 status &= ~(TG3_GPIO_MSG_MASK << shift);
2817 status |= (newstat << shift);
2818
2819 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2820 tg3_asic_rev(tp) == ASIC_REV_5719)
2821 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2822 else
2823 tw32(TG3_CPMU_DRV_STATUS, status);
2824
2825 return status >> TG3_APE_GPIO_MSG_SHIFT;
2826 }
2827
tg3_pwrsrc_switch_to_vmain(struct tg3 * tp)2828 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2829 {
2830 if (!tg3_flag(tp, IS_NIC))
2831 return 0;
2832
2833 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2834 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2835 tg3_asic_rev(tp) == ASIC_REV_5720) {
2836 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2837 return -EIO;
2838
2839 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2840
2841 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2842 TG3_GRC_LCLCTL_PWRSW_DELAY);
2843
2844 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2845 } else {
2846 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2847 TG3_GRC_LCLCTL_PWRSW_DELAY);
2848 }
2849
2850 return 0;
2851 }
2852
tg3_pwrsrc_die_with_vmain(struct tg3 * tp)2853 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2854 {
2855 u32 grc_local_ctrl;
2856
2857 if (!tg3_flag(tp, IS_NIC) ||
2858 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2859 tg3_asic_rev(tp) == ASIC_REV_5701)
2860 return;
2861
2862 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2863
2864 tw32_wait_f(GRC_LOCAL_CTRL,
2865 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2866 TG3_GRC_LCLCTL_PWRSW_DELAY);
2867
2868 tw32_wait_f(GRC_LOCAL_CTRL,
2869 grc_local_ctrl,
2870 TG3_GRC_LCLCTL_PWRSW_DELAY);
2871
2872 tw32_wait_f(GRC_LOCAL_CTRL,
2873 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2874 TG3_GRC_LCLCTL_PWRSW_DELAY);
2875 }
2876
tg3_pwrsrc_switch_to_vaux(struct tg3 * tp)2877 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2878 {
2879 if (!tg3_flag(tp, IS_NIC))
2880 return;
2881
2882 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2883 tg3_asic_rev(tp) == ASIC_REV_5701) {
2884 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2885 (GRC_LCLCTRL_GPIO_OE0 |
2886 GRC_LCLCTRL_GPIO_OE1 |
2887 GRC_LCLCTRL_GPIO_OE2 |
2888 GRC_LCLCTRL_GPIO_OUTPUT0 |
2889 GRC_LCLCTRL_GPIO_OUTPUT1),
2890 TG3_GRC_LCLCTL_PWRSW_DELAY);
2891 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2892 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2893 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2894 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2895 GRC_LCLCTRL_GPIO_OE1 |
2896 GRC_LCLCTRL_GPIO_OE2 |
2897 GRC_LCLCTRL_GPIO_OUTPUT0 |
2898 GRC_LCLCTRL_GPIO_OUTPUT1 |
2899 tp->grc_local_ctrl;
2900 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2901 TG3_GRC_LCLCTL_PWRSW_DELAY);
2902
2903 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2904 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2905 TG3_GRC_LCLCTL_PWRSW_DELAY);
2906
2907 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2908 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2909 TG3_GRC_LCLCTL_PWRSW_DELAY);
2910 } else {
2911 u32 no_gpio2;
2912 u32 grc_local_ctrl = 0;
2913
2914 /* Workaround to prevent overdrawing Amps. */
2915 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2916 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2917 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2918 grc_local_ctrl,
2919 TG3_GRC_LCLCTL_PWRSW_DELAY);
2920 }
2921
2922 /* On 5753 and variants, GPIO2 cannot be used. */
2923 no_gpio2 = tp->nic_sram_data_cfg &
2924 NIC_SRAM_DATA_CFG_NO_GPIO2;
2925
2926 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2927 GRC_LCLCTRL_GPIO_OE1 |
2928 GRC_LCLCTRL_GPIO_OE2 |
2929 GRC_LCLCTRL_GPIO_OUTPUT1 |
2930 GRC_LCLCTRL_GPIO_OUTPUT2;
2931 if (no_gpio2) {
2932 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2933 GRC_LCLCTRL_GPIO_OUTPUT2);
2934 }
2935 tw32_wait_f(GRC_LOCAL_CTRL,
2936 tp->grc_local_ctrl | grc_local_ctrl,
2937 TG3_GRC_LCLCTL_PWRSW_DELAY);
2938
2939 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2940
2941 tw32_wait_f(GRC_LOCAL_CTRL,
2942 tp->grc_local_ctrl | grc_local_ctrl,
2943 TG3_GRC_LCLCTL_PWRSW_DELAY);
2944
2945 if (!no_gpio2) {
2946 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2947 tw32_wait_f(GRC_LOCAL_CTRL,
2948 tp->grc_local_ctrl | grc_local_ctrl,
2949 TG3_GRC_LCLCTL_PWRSW_DELAY);
2950 }
2951 }
2952 }
2953
tg3_frob_aux_power_5717(struct tg3 * tp,bool wol_enable)2954 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2955 {
2956 u32 msg = 0;
2957
2958 /* Serialize power state transitions */
2959 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2960 return;
2961
2962 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2963 msg = TG3_GPIO_MSG_NEED_VAUX;
2964
2965 msg = tg3_set_function_status(tp, msg);
2966
2967 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2968 goto done;
2969
2970 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2971 tg3_pwrsrc_switch_to_vaux(tp);
2972 else
2973 tg3_pwrsrc_die_with_vmain(tp);
2974
2975 done:
2976 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2977 }
2978
tg3_frob_aux_power(struct tg3 * tp,bool include_wol)2979 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2980 {
2981 bool need_vaux = false;
2982
2983 /* The GPIOs do something completely different on 57765. */
2984 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2985 return;
2986
2987 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2988 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2989 tg3_asic_rev(tp) == ASIC_REV_5720) {
2990 tg3_frob_aux_power_5717(tp, include_wol ?
2991 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2992 return;
2993 }
2994
2995 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2996 struct net_device *dev_peer;
2997
2998 dev_peer = pci_get_drvdata(tp->pdev_peer);
2999
3000 /* remove_one() may have been run on the peer. */
3001 if (dev_peer) {
3002 struct tg3 *tp_peer = netdev_priv(dev_peer);
3003
3004 if (tg3_flag(tp_peer, INIT_COMPLETE))
3005 return;
3006
3007 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
3008 tg3_flag(tp_peer, ENABLE_ASF))
3009 need_vaux = true;
3010 }
3011 }
3012
3013 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
3014 tg3_flag(tp, ENABLE_ASF))
3015 need_vaux = true;
3016
3017 if (need_vaux)
3018 tg3_pwrsrc_switch_to_vaux(tp);
3019 else
3020 tg3_pwrsrc_die_with_vmain(tp);
3021 }
3022
tg3_5700_link_polarity(struct tg3 * tp,u32 speed)3023 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3024 {
3025 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3026 return 1;
3027 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3028 if (speed != SPEED_10)
3029 return 1;
3030 } else if (speed == SPEED_10)
3031 return 1;
3032
3033 return 0;
3034 }
3035
tg3_phy_power_bug(struct tg3 * tp)3036 static bool tg3_phy_power_bug(struct tg3 *tp)
3037 {
3038 switch (tg3_asic_rev(tp)) {
3039 case ASIC_REV_5700:
3040 case ASIC_REV_5704:
3041 return true;
3042 case ASIC_REV_5780:
3043 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3044 return true;
3045 return false;
3046 case ASIC_REV_5717:
3047 if (!tp->pci_fn)
3048 return true;
3049 return false;
3050 case ASIC_REV_5719:
3051 case ASIC_REV_5720:
3052 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3053 !tp->pci_fn)
3054 return true;
3055 return false;
3056 }
3057
3058 return false;
3059 }
3060
tg3_phy_led_bug(struct tg3 * tp)3061 static bool tg3_phy_led_bug(struct tg3 *tp)
3062 {
3063 switch (tg3_asic_rev(tp)) {
3064 case ASIC_REV_5719:
3065 case ASIC_REV_5720:
3066 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3067 !tp->pci_fn)
3068 return true;
3069 return false;
3070 }
3071
3072 return false;
3073 }
3074
tg3_power_down_phy(struct tg3 * tp,bool do_low_power)3075 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3076 {
3077 u32 val;
3078
3079 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3080 return;
3081
3082 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3083 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3084 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3085 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3086
3087 sg_dig_ctrl |=
3088 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3089 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3090 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3091 }
3092 return;
3093 }
3094
3095 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3096 tg3_bmcr_reset(tp);
3097 val = tr32(GRC_MISC_CFG);
3098 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3099 udelay(40);
3100 return;
3101 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3102 u32 phytest;
3103 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3104 u32 phy;
3105
3106 tg3_writephy(tp, MII_ADVERTISE, 0);
3107 tg3_writephy(tp, MII_BMCR,
3108 BMCR_ANENABLE | BMCR_ANRESTART);
3109
3110 tg3_writephy(tp, MII_TG3_FET_TEST,
3111 phytest | MII_TG3_FET_SHADOW_EN);
3112 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3113 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3114 tg3_writephy(tp,
3115 MII_TG3_FET_SHDW_AUXMODE4,
3116 phy);
3117 }
3118 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3119 }
3120 return;
3121 } else if (do_low_power) {
3122 if (!tg3_phy_led_bug(tp))
3123 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3124 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3125
3126 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3127 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3128 MII_TG3_AUXCTL_PCTL_VREG_11V;
3129 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3130 }
3131
3132 /* The PHY should not be powered down on some chips because
3133 * of bugs.
3134 */
3135 if (tg3_phy_power_bug(tp))
3136 return;
3137
3138 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3139 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3140 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3141 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3142 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3143 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3144 }
3145
3146 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3147 }
3148
3149 /* tp->lock is held. */
tg3_nvram_lock(struct tg3 * tp)3150 static int tg3_nvram_lock(struct tg3 *tp)
3151 {
3152 if (tg3_flag(tp, NVRAM)) {
3153 int i;
3154
3155 if (tp->nvram_lock_cnt == 0) {
3156 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3157 for (i = 0; i < 8000; i++) {
3158 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3159 break;
3160 udelay(20);
3161 }
3162 if (i == 8000) {
3163 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3164 return -ENODEV;
3165 }
3166 }
3167 tp->nvram_lock_cnt++;
3168 }
3169 return 0;
3170 }
3171
3172 /* tp->lock is held. */
tg3_nvram_unlock(struct tg3 * tp)3173 static void tg3_nvram_unlock(struct tg3 *tp)
3174 {
3175 if (tg3_flag(tp, NVRAM)) {
3176 if (tp->nvram_lock_cnt > 0)
3177 tp->nvram_lock_cnt--;
3178 if (tp->nvram_lock_cnt == 0)
3179 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3180 }
3181 }
3182
3183 /* tp->lock is held. */
tg3_enable_nvram_access(struct tg3 * tp)3184 static void tg3_enable_nvram_access(struct tg3 *tp)
3185 {
3186 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3187 u32 nvaccess = tr32(NVRAM_ACCESS);
3188
3189 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3190 }
3191 }
3192
3193 /* tp->lock is held. */
tg3_disable_nvram_access(struct tg3 * tp)3194 static void tg3_disable_nvram_access(struct tg3 *tp)
3195 {
3196 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3197 u32 nvaccess = tr32(NVRAM_ACCESS);
3198
3199 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3200 }
3201 }
3202
tg3_nvram_read_using_eeprom(struct tg3 * tp,u32 offset,u32 * val)3203 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3204 u32 offset, u32 *val)
3205 {
3206 u32 tmp;
3207 int i;
3208
3209 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3210 return -EINVAL;
3211
3212 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3213 EEPROM_ADDR_DEVID_MASK |
3214 EEPROM_ADDR_READ);
3215 tw32(GRC_EEPROM_ADDR,
3216 tmp |
3217 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3218 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3219 EEPROM_ADDR_ADDR_MASK) |
3220 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3221
3222 for (i = 0; i < 1000; i++) {
3223 tmp = tr32(GRC_EEPROM_ADDR);
3224
3225 if (tmp & EEPROM_ADDR_COMPLETE)
3226 break;
3227 msleep(1);
3228 }
3229 if (!(tmp & EEPROM_ADDR_COMPLETE))
3230 return -EBUSY;
3231
3232 tmp = tr32(GRC_EEPROM_DATA);
3233
3234 /*
3235 * The data will always be opposite the native endian
3236 * format. Perform a blind byteswap to compensate.
3237 */
3238 *val = swab32(tmp);
3239
3240 return 0;
3241 }
3242
3243 #define NVRAM_CMD_TIMEOUT 10000
3244
tg3_nvram_exec_cmd(struct tg3 * tp,u32 nvram_cmd)3245 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3246 {
3247 int i;
3248
3249 tw32(NVRAM_CMD, nvram_cmd);
3250 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3251 usleep_range(10, 40);
3252 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3253 udelay(10);
3254 break;
3255 }
3256 }
3257
3258 if (i == NVRAM_CMD_TIMEOUT)
3259 return -EBUSY;
3260
3261 return 0;
3262 }
3263
tg3_nvram_phys_addr(struct tg3 * tp,u32 addr)3264 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3265 {
3266 if (tg3_flag(tp, NVRAM) &&
3267 tg3_flag(tp, NVRAM_BUFFERED) &&
3268 tg3_flag(tp, FLASH) &&
3269 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3270 (tp->nvram_jedecnum == JEDEC_ATMEL))
3271
3272 addr = ((addr / tp->nvram_pagesize) <<
3273 ATMEL_AT45DB0X1B_PAGE_POS) +
3274 (addr % tp->nvram_pagesize);
3275
3276 return addr;
3277 }
3278
tg3_nvram_logical_addr(struct tg3 * tp,u32 addr)3279 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3280 {
3281 if (tg3_flag(tp, NVRAM) &&
3282 tg3_flag(tp, NVRAM_BUFFERED) &&
3283 tg3_flag(tp, FLASH) &&
3284 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3285 (tp->nvram_jedecnum == JEDEC_ATMEL))
3286
3287 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3288 tp->nvram_pagesize) +
3289 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3290
3291 return addr;
3292 }
3293
3294 /* NOTE: Data read in from NVRAM is byteswapped according to
3295 * the byteswapping settings for all other register accesses.
3296 * tg3 devices are BE devices, so on a BE machine, the data
3297 * returned will be exactly as it is seen in NVRAM. On a LE
3298 * machine, the 32-bit value will be byteswapped.
3299 */
tg3_nvram_read(struct tg3 * tp,u32 offset,u32 * val)3300 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3301 {
3302 int ret;
3303
3304 if (!tg3_flag(tp, NVRAM))
3305 return tg3_nvram_read_using_eeprom(tp, offset, val);
3306
3307 offset = tg3_nvram_phys_addr(tp, offset);
3308
3309 if (offset > NVRAM_ADDR_MSK)
3310 return -EINVAL;
3311
3312 ret = tg3_nvram_lock(tp);
3313 if (ret)
3314 return ret;
3315
3316 tg3_enable_nvram_access(tp);
3317
3318 tw32(NVRAM_ADDR, offset);
3319 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3320 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3321
3322 if (ret == 0)
3323 *val = tr32(NVRAM_RDDATA);
3324
3325 tg3_disable_nvram_access(tp);
3326
3327 tg3_nvram_unlock(tp);
3328
3329 return ret;
3330 }
3331
3332 /* Ensures NVRAM data is in bytestream format. */
tg3_nvram_read_be32(struct tg3 * tp,u32 offset,__be32 * val)3333 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3334 {
3335 u32 v;
3336 int res = tg3_nvram_read(tp, offset, &v);
3337 if (!res)
3338 *val = cpu_to_be32(v);
3339 return res;
3340 }
3341
tg3_nvram_write_block_using_eeprom(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3342 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3343 u32 offset, u32 len, u8 *buf)
3344 {
3345 int i, j, rc = 0;
3346 u32 val;
3347
3348 for (i = 0; i < len; i += 4) {
3349 u32 addr;
3350 __be32 data;
3351
3352 addr = offset + i;
3353
3354 memcpy(&data, buf + i, 4);
3355
3356 /*
3357 * The SEEPROM interface expects the data to always be opposite
3358 * the native endian format. We accomplish this by reversing
3359 * all the operations that would have been performed on the
3360 * data from a call to tg3_nvram_read_be32().
3361 */
3362 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3363
3364 val = tr32(GRC_EEPROM_ADDR);
3365 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3366
3367 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3368 EEPROM_ADDR_READ);
3369 tw32(GRC_EEPROM_ADDR, val |
3370 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3371 (addr & EEPROM_ADDR_ADDR_MASK) |
3372 EEPROM_ADDR_START |
3373 EEPROM_ADDR_WRITE);
3374
3375 for (j = 0; j < 1000; j++) {
3376 val = tr32(GRC_EEPROM_ADDR);
3377
3378 if (val & EEPROM_ADDR_COMPLETE)
3379 break;
3380 msleep(1);
3381 }
3382 if (!(val & EEPROM_ADDR_COMPLETE)) {
3383 rc = -EBUSY;
3384 break;
3385 }
3386 }
3387
3388 return rc;
3389 }
3390
3391 /* offset and length are dword aligned */
tg3_nvram_write_block_unbuffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3392 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3393 u8 *buf)
3394 {
3395 int ret = 0;
3396 u32 pagesize = tp->nvram_pagesize;
3397 u32 pagemask = pagesize - 1;
3398 u32 nvram_cmd;
3399 u8 *tmp;
3400
3401 tmp = kmalloc(pagesize, GFP_KERNEL);
3402 if (tmp == NULL)
3403 return -ENOMEM;
3404
3405 while (len) {
3406 int j;
3407 u32 phy_addr, page_off, size;
3408
3409 phy_addr = offset & ~pagemask;
3410
3411 for (j = 0; j < pagesize; j += 4) {
3412 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3413 (__be32 *) (tmp + j));
3414 if (ret)
3415 break;
3416 }
3417 if (ret)
3418 break;
3419
3420 page_off = offset & pagemask;
3421 size = pagesize;
3422 if (len < size)
3423 size = len;
3424
3425 len -= size;
3426
3427 memcpy(tmp + page_off, buf, size);
3428
3429 offset = offset + (pagesize - page_off);
3430
3431 tg3_enable_nvram_access(tp);
3432
3433 /*
3434 * Before we can erase the flash page, we need
3435 * to issue a special "write enable" command.
3436 */
3437 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3438
3439 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3440 break;
3441
3442 /* Erase the target page */
3443 tw32(NVRAM_ADDR, phy_addr);
3444
3445 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3446 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3447
3448 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3449 break;
3450
3451 /* Issue another write enable to start the write. */
3452 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3453
3454 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3455 break;
3456
3457 for (j = 0; j < pagesize; j += 4) {
3458 __be32 data;
3459
3460 data = *((__be32 *) (tmp + j));
3461
3462 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3463
3464 tw32(NVRAM_ADDR, phy_addr + j);
3465
3466 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3467 NVRAM_CMD_WR;
3468
3469 if (j == 0)
3470 nvram_cmd |= NVRAM_CMD_FIRST;
3471 else if (j == (pagesize - 4))
3472 nvram_cmd |= NVRAM_CMD_LAST;
3473
3474 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3475 if (ret)
3476 break;
3477 }
3478 if (ret)
3479 break;
3480 }
3481
3482 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3483 tg3_nvram_exec_cmd(tp, nvram_cmd);
3484
3485 kfree(tmp);
3486
3487 return ret;
3488 }
3489
3490 /* offset and length are dword aligned */
tg3_nvram_write_block_buffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3491 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3492 u8 *buf)
3493 {
3494 int i, ret = 0;
3495
3496 for (i = 0; i < len; i += 4, offset += 4) {
3497 u32 page_off, phy_addr, nvram_cmd;
3498 __be32 data;
3499
3500 memcpy(&data, buf + i, 4);
3501 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3502
3503 page_off = offset % tp->nvram_pagesize;
3504
3505 phy_addr = tg3_nvram_phys_addr(tp, offset);
3506
3507 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3508
3509 if (page_off == 0 || i == 0)
3510 nvram_cmd |= NVRAM_CMD_FIRST;
3511 if (page_off == (tp->nvram_pagesize - 4))
3512 nvram_cmd |= NVRAM_CMD_LAST;
3513
3514 if (i == (len - 4))
3515 nvram_cmd |= NVRAM_CMD_LAST;
3516
3517 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3518 !tg3_flag(tp, FLASH) ||
3519 !tg3_flag(tp, 57765_PLUS))
3520 tw32(NVRAM_ADDR, phy_addr);
3521
3522 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3523 !tg3_flag(tp, 5755_PLUS) &&
3524 (tp->nvram_jedecnum == JEDEC_ST) &&
3525 (nvram_cmd & NVRAM_CMD_FIRST)) {
3526 u32 cmd;
3527
3528 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3529 ret = tg3_nvram_exec_cmd(tp, cmd);
3530 if (ret)
3531 break;
3532 }
3533 if (!tg3_flag(tp, FLASH)) {
3534 /* We always do complete word writes to eeprom. */
3535 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3536 }
3537
3538 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3539 if (ret)
3540 break;
3541 }
3542 return ret;
3543 }
3544
3545 /* offset and length are dword aligned */
tg3_nvram_write_block(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3546 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3547 {
3548 int ret;
3549
3550 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3551 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3552 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3553 udelay(40);
3554 }
3555
3556 if (!tg3_flag(tp, NVRAM)) {
3557 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3558 } else {
3559 u32 grc_mode;
3560
3561 ret = tg3_nvram_lock(tp);
3562 if (ret)
3563 return ret;
3564
3565 tg3_enable_nvram_access(tp);
3566 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3567 tw32(NVRAM_WRITE1, 0x406);
3568
3569 grc_mode = tr32(GRC_MODE);
3570 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3571
3572 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3573 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3574 buf);
3575 } else {
3576 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3577 buf);
3578 }
3579
3580 grc_mode = tr32(GRC_MODE);
3581 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3582
3583 tg3_disable_nvram_access(tp);
3584 tg3_nvram_unlock(tp);
3585 }
3586
3587 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3588 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3589 udelay(40);
3590 }
3591
3592 return ret;
3593 }
3594
3595 #define RX_CPU_SCRATCH_BASE 0x30000
3596 #define RX_CPU_SCRATCH_SIZE 0x04000
3597 #define TX_CPU_SCRATCH_BASE 0x34000
3598 #define TX_CPU_SCRATCH_SIZE 0x04000
3599
3600 /* tp->lock is held. */
tg3_pause_cpu(struct tg3 * tp,u32 cpu_base)3601 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3602 {
3603 int i;
3604 const int iters = 10000;
3605
3606 for (i = 0; i < iters; i++) {
3607 tw32(cpu_base + CPU_STATE, 0xffffffff);
3608 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3609 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3610 break;
3611 if (pci_channel_offline(tp->pdev))
3612 return -EBUSY;
3613 }
3614
3615 return (i == iters) ? -EBUSY : 0;
3616 }
3617
3618 /* tp->lock is held. */
tg3_rxcpu_pause(struct tg3 * tp)3619 static int tg3_rxcpu_pause(struct tg3 *tp)
3620 {
3621 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3622
3623 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3624 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3625 udelay(10);
3626
3627 return rc;
3628 }
3629
3630 /* tp->lock is held. */
tg3_txcpu_pause(struct tg3 * tp)3631 static int tg3_txcpu_pause(struct tg3 *tp)
3632 {
3633 return tg3_pause_cpu(tp, TX_CPU_BASE);
3634 }
3635
3636 /* tp->lock is held. */
tg3_resume_cpu(struct tg3 * tp,u32 cpu_base)3637 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3638 {
3639 tw32(cpu_base + CPU_STATE, 0xffffffff);
3640 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3641 }
3642
3643 /* tp->lock is held. */
tg3_rxcpu_resume(struct tg3 * tp)3644 static void tg3_rxcpu_resume(struct tg3 *tp)
3645 {
3646 tg3_resume_cpu(tp, RX_CPU_BASE);
3647 }
3648
3649 /* tp->lock is held. */
tg3_halt_cpu(struct tg3 * tp,u32 cpu_base)3650 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3651 {
3652 int rc;
3653
3654 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3655
3656 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3657 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3658
3659 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3660 return 0;
3661 }
3662 if (cpu_base == RX_CPU_BASE) {
3663 rc = tg3_rxcpu_pause(tp);
3664 } else {
3665 /*
3666 * There is only an Rx CPU for the 5750 derivative in the
3667 * BCM4785.
3668 */
3669 if (tg3_flag(tp, IS_SSB_CORE))
3670 return 0;
3671
3672 rc = tg3_txcpu_pause(tp);
3673 }
3674
3675 if (rc) {
3676 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3677 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3678 return -ENODEV;
3679 }
3680
3681 /* Clear firmware's nvram arbitration. */
3682 if (tg3_flag(tp, NVRAM))
3683 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3684 return 0;
3685 }
3686
tg3_fw_data_len(struct tg3 * tp,const struct tg3_firmware_hdr * fw_hdr)3687 static int tg3_fw_data_len(struct tg3 *tp,
3688 const struct tg3_firmware_hdr *fw_hdr)
3689 {
3690 int fw_len;
3691
3692 /* Non fragmented firmware have one firmware header followed by a
3693 * contiguous chunk of data to be written. The length field in that
3694 * header is not the length of data to be written but the complete
3695 * length of the bss. The data length is determined based on
3696 * tp->fw->size minus headers.
3697 *
3698 * Fragmented firmware have a main header followed by multiple
3699 * fragments. Each fragment is identical to non fragmented firmware
3700 * with a firmware header followed by a contiguous chunk of data. In
3701 * the main header, the length field is unused and set to 0xffffffff.
3702 * In each fragment header the length is the entire size of that
3703 * fragment i.e. fragment data + header length. Data length is
3704 * therefore length field in the header minus TG3_FW_HDR_LEN.
3705 */
3706 if (tp->fw_len == 0xffffffff)
3707 fw_len = be32_to_cpu(fw_hdr->len);
3708 else
3709 fw_len = tp->fw->size;
3710
3711 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3712 }
3713
3714 /* tp->lock is held. */
tg3_load_firmware_cpu(struct tg3 * tp,u32 cpu_base,u32 cpu_scratch_base,int cpu_scratch_size,const struct tg3_firmware_hdr * fw_hdr)3715 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3716 u32 cpu_scratch_base, int cpu_scratch_size,
3717 const struct tg3_firmware_hdr *fw_hdr)
3718 {
3719 int err, i;
3720 void (*write_op)(struct tg3 *, u32, u32);
3721 int total_len = tp->fw->size;
3722
3723 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3724 netdev_err(tp->dev,
3725 "%s: Trying to load TX cpu firmware which is 5705\n",
3726 __func__);
3727 return -EINVAL;
3728 }
3729
3730 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3731 write_op = tg3_write_mem;
3732 else
3733 write_op = tg3_write_indirect_reg32;
3734
3735 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3736 /* It is possible that bootcode is still loading at this point.
3737 * Get the nvram lock first before halting the cpu.
3738 */
3739 int lock_err = tg3_nvram_lock(tp);
3740 err = tg3_halt_cpu(tp, cpu_base);
3741 if (!lock_err)
3742 tg3_nvram_unlock(tp);
3743 if (err)
3744 goto out;
3745
3746 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3747 write_op(tp, cpu_scratch_base + i, 0);
3748 tw32(cpu_base + CPU_STATE, 0xffffffff);
3749 tw32(cpu_base + CPU_MODE,
3750 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3751 } else {
3752 /* Subtract additional main header for fragmented firmware and
3753 * advance to the first fragment
3754 */
3755 total_len -= TG3_FW_HDR_LEN;
3756 fw_hdr++;
3757 }
3758
3759 do {
3760 u32 *fw_data = (u32 *)(fw_hdr + 1);
3761 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3762 write_op(tp, cpu_scratch_base +
3763 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3764 (i * sizeof(u32)),
3765 be32_to_cpu(fw_data[i]));
3766
3767 total_len -= be32_to_cpu(fw_hdr->len);
3768
3769 /* Advance to next fragment */
3770 fw_hdr = (struct tg3_firmware_hdr *)
3771 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3772 } while (total_len > 0);
3773
3774 err = 0;
3775
3776 out:
3777 return err;
3778 }
3779
3780 /* tp->lock is held. */
tg3_pause_cpu_and_set_pc(struct tg3 * tp,u32 cpu_base,u32 pc)3781 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3782 {
3783 int i;
3784 const int iters = 5;
3785
3786 tw32(cpu_base + CPU_STATE, 0xffffffff);
3787 tw32_f(cpu_base + CPU_PC, pc);
3788
3789 for (i = 0; i < iters; i++) {
3790 if (tr32(cpu_base + CPU_PC) == pc)
3791 break;
3792 tw32(cpu_base + CPU_STATE, 0xffffffff);
3793 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3794 tw32_f(cpu_base + CPU_PC, pc);
3795 udelay(1000);
3796 }
3797
3798 return (i == iters) ? -EBUSY : 0;
3799 }
3800
3801 /* tp->lock is held. */
tg3_load_5701_a0_firmware_fix(struct tg3 * tp)3802 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3803 {
3804 const struct tg3_firmware_hdr *fw_hdr;
3805 int err;
3806
3807 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3808
3809 /* Firmware blob starts with version numbers, followed by
3810 start address and length. We are setting complete length.
3811 length = end_address_of_bss - start_address_of_text.
3812 Remainder is the blob to be loaded contiguously
3813 from start address. */
3814
3815 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3816 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3817 fw_hdr);
3818 if (err)
3819 return err;
3820
3821 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3822 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3823 fw_hdr);
3824 if (err)
3825 return err;
3826
3827 /* Now startup only the RX cpu. */
3828 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3829 be32_to_cpu(fw_hdr->base_addr));
3830 if (err) {
3831 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3832 "should be %08x\n", __func__,
3833 tr32(RX_CPU_BASE + CPU_PC),
3834 be32_to_cpu(fw_hdr->base_addr));
3835 return -ENODEV;
3836 }
3837
3838 tg3_rxcpu_resume(tp);
3839
3840 return 0;
3841 }
3842
tg3_validate_rxcpu_state(struct tg3 * tp)3843 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3844 {
3845 const int iters = 1000;
3846 int i;
3847 u32 val;
3848
3849 /* Wait for boot code to complete initialization and enter service
3850 * loop. It is then safe to download service patches
3851 */
3852 for (i = 0; i < iters; i++) {
3853 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3854 break;
3855
3856 udelay(10);
3857 }
3858
3859 if (i == iters) {
3860 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3861 return -EBUSY;
3862 }
3863
3864 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3865 if (val & 0xff) {
3866 netdev_warn(tp->dev,
3867 "Other patches exist. Not downloading EEE patch\n");
3868 return -EEXIST;
3869 }
3870
3871 return 0;
3872 }
3873
3874 /* tp->lock is held. */
tg3_load_57766_firmware(struct tg3 * tp)3875 static void tg3_load_57766_firmware(struct tg3 *tp)
3876 {
3877 struct tg3_firmware_hdr *fw_hdr;
3878
3879 if (!tg3_flag(tp, NO_NVRAM))
3880 return;
3881
3882 if (tg3_validate_rxcpu_state(tp))
3883 return;
3884
3885 if (!tp->fw)
3886 return;
3887
3888 /* This firmware blob has a different format than older firmware
3889 * releases as given below. The main difference is we have fragmented
3890 * data to be written to non-contiguous locations.
3891 *
3892 * In the beginning we have a firmware header identical to other
3893 * firmware which consists of version, base addr and length. The length
3894 * here is unused and set to 0xffffffff.
3895 *
3896 * This is followed by a series of firmware fragments which are
3897 * individually identical to previous firmware. i.e. they have the
3898 * firmware header and followed by data for that fragment. The version
3899 * field of the individual fragment header is unused.
3900 */
3901
3902 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3903 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3904 return;
3905
3906 if (tg3_rxcpu_pause(tp))
3907 return;
3908
3909 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3910 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3911
3912 tg3_rxcpu_resume(tp);
3913 }
3914
3915 /* tp->lock is held. */
tg3_load_tso_firmware(struct tg3 * tp)3916 static int tg3_load_tso_firmware(struct tg3 *tp)
3917 {
3918 const struct tg3_firmware_hdr *fw_hdr;
3919 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3920 int err;
3921
3922 if (!tg3_flag(tp, FW_TSO))
3923 return 0;
3924
3925 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3926
3927 /* Firmware blob starts with version numbers, followed by
3928 start address and length. We are setting complete length.
3929 length = end_address_of_bss - start_address_of_text.
3930 Remainder is the blob to be loaded contiguously
3931 from start address. */
3932
3933 cpu_scratch_size = tp->fw_len;
3934
3935 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3936 cpu_base = RX_CPU_BASE;
3937 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3938 } else {
3939 cpu_base = TX_CPU_BASE;
3940 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3941 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3942 }
3943
3944 err = tg3_load_firmware_cpu(tp, cpu_base,
3945 cpu_scratch_base, cpu_scratch_size,
3946 fw_hdr);
3947 if (err)
3948 return err;
3949
3950 /* Now startup the cpu. */
3951 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3952 be32_to_cpu(fw_hdr->base_addr));
3953 if (err) {
3954 netdev_err(tp->dev,
3955 "%s fails to set CPU PC, is %08x should be %08x\n",
3956 __func__, tr32(cpu_base + CPU_PC),
3957 be32_to_cpu(fw_hdr->base_addr));
3958 return -ENODEV;
3959 }
3960
3961 tg3_resume_cpu(tp, cpu_base);
3962 return 0;
3963 }
3964
3965 /* tp->lock is held. */
__tg3_set_one_mac_addr(struct tg3 * tp,u8 * mac_addr,int index)3966 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3967 {
3968 u32 addr_high, addr_low;
3969
3970 addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3971 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3972 (mac_addr[4] << 8) | mac_addr[5]);
3973
3974 if (index < 4) {
3975 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3976 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3977 } else {
3978 index -= 4;
3979 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3980 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3981 }
3982 }
3983
3984 /* tp->lock is held. */
__tg3_set_mac_addr(struct tg3 * tp,bool skip_mac_1)3985 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3986 {
3987 u32 addr_high;
3988 int i;
3989
3990 for (i = 0; i < 4; i++) {
3991 if (i == 1 && skip_mac_1)
3992 continue;
3993 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3994 }
3995
3996 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3997 tg3_asic_rev(tp) == ASIC_REV_5704) {
3998 for (i = 4; i < 16; i++)
3999 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
4000 }
4001
4002 addr_high = (tp->dev->dev_addr[0] +
4003 tp->dev->dev_addr[1] +
4004 tp->dev->dev_addr[2] +
4005 tp->dev->dev_addr[3] +
4006 tp->dev->dev_addr[4] +
4007 tp->dev->dev_addr[5]) &
4008 TX_BACKOFF_SEED_MASK;
4009 tw32(MAC_TX_BACKOFF_SEED, addr_high);
4010 }
4011
tg3_enable_register_access(struct tg3 * tp)4012 static void tg3_enable_register_access(struct tg3 *tp)
4013 {
4014 /*
4015 * Make sure register accesses (indirect or otherwise) will function
4016 * correctly.
4017 */
4018 pci_write_config_dword(tp->pdev,
4019 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4020 }
4021
tg3_power_up(struct tg3 * tp)4022 static int tg3_power_up(struct tg3 *tp)
4023 {
4024 int err;
4025
4026 tg3_enable_register_access(tp);
4027
4028 err = pci_set_power_state(tp->pdev, PCI_D0);
4029 if (!err) {
4030 /* Switch out of Vaux if it is a NIC */
4031 tg3_pwrsrc_switch_to_vmain(tp);
4032 } else {
4033 netdev_err(tp->dev, "Transition to D0 failed\n");
4034 }
4035
4036 return err;
4037 }
4038
4039 static int tg3_setup_phy(struct tg3 *, bool);
4040
tg3_power_down_prepare(struct tg3 * tp)4041 static int tg3_power_down_prepare(struct tg3 *tp)
4042 {
4043 u32 misc_host_ctrl;
4044 bool device_should_wake, do_low_power;
4045
4046 tg3_enable_register_access(tp);
4047
4048 /* Restore the CLKREQ setting. */
4049 if (tg3_flag(tp, CLKREQ_BUG))
4050 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4051 PCI_EXP_LNKCTL_CLKREQ_EN);
4052
4053 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4054 tw32(TG3PCI_MISC_HOST_CTRL,
4055 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4056
4057 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4058 tg3_flag(tp, WOL_ENABLE);
4059
4060 if (tg3_flag(tp, USE_PHYLIB)) {
4061 do_low_power = false;
4062 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4063 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4064 struct phy_device *phydev;
4065 u32 phyid, advertising;
4066
4067 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4068
4069 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4070
4071 tp->link_config.speed = phydev->speed;
4072 tp->link_config.duplex = phydev->duplex;
4073 tp->link_config.autoneg = phydev->autoneg;
4074 tp->link_config.advertising = phydev->advertising;
4075
4076 advertising = ADVERTISED_TP |
4077 ADVERTISED_Pause |
4078 ADVERTISED_Autoneg |
4079 ADVERTISED_10baseT_Half;
4080
4081 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4082 if (tg3_flag(tp, WOL_SPEED_100MB))
4083 advertising |=
4084 ADVERTISED_100baseT_Half |
4085 ADVERTISED_100baseT_Full |
4086 ADVERTISED_10baseT_Full;
4087 else
4088 advertising |= ADVERTISED_10baseT_Full;
4089 }
4090
4091 phydev->advertising = advertising;
4092
4093 phy_start_aneg(phydev);
4094
4095 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4096 if (phyid != PHY_ID_BCMAC131) {
4097 phyid &= PHY_BCM_OUI_MASK;
4098 if (phyid == PHY_BCM_OUI_1 ||
4099 phyid == PHY_BCM_OUI_2 ||
4100 phyid == PHY_BCM_OUI_3)
4101 do_low_power = true;
4102 }
4103 }
4104 } else {
4105 do_low_power = true;
4106
4107 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4108 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4109
4110 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4111 tg3_setup_phy(tp, false);
4112 }
4113
4114 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4115 u32 val;
4116
4117 val = tr32(GRC_VCPU_EXT_CTRL);
4118 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4119 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4120 int i;
4121 u32 val;
4122
4123 for (i = 0; i < 200; i++) {
4124 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4125 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4126 break;
4127 msleep(1);
4128 }
4129 }
4130 if (tg3_flag(tp, WOL_CAP))
4131 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4132 WOL_DRV_STATE_SHUTDOWN |
4133 WOL_DRV_WOL |
4134 WOL_SET_MAGIC_PKT);
4135
4136 if (device_should_wake) {
4137 u32 mac_mode;
4138
4139 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4140 if (do_low_power &&
4141 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4142 tg3_phy_auxctl_write(tp,
4143 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4144 MII_TG3_AUXCTL_PCTL_WOL_EN |
4145 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4146 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4147 udelay(40);
4148 }
4149
4150 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4151 mac_mode = MAC_MODE_PORT_MODE_GMII;
4152 else if (tp->phy_flags &
4153 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4154 if (tp->link_config.active_speed == SPEED_1000)
4155 mac_mode = MAC_MODE_PORT_MODE_GMII;
4156 else
4157 mac_mode = MAC_MODE_PORT_MODE_MII;
4158 } else
4159 mac_mode = MAC_MODE_PORT_MODE_MII;
4160
4161 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4162 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4163 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4164 SPEED_100 : SPEED_10;
4165 if (tg3_5700_link_polarity(tp, speed))
4166 mac_mode |= MAC_MODE_LINK_POLARITY;
4167 else
4168 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4169 }
4170 } else {
4171 mac_mode = MAC_MODE_PORT_MODE_TBI;
4172 }
4173
4174 if (!tg3_flag(tp, 5750_PLUS))
4175 tw32(MAC_LED_CTRL, tp->led_ctrl);
4176
4177 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4178 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4179 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4180 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4181
4182 if (tg3_flag(tp, ENABLE_APE))
4183 mac_mode |= MAC_MODE_APE_TX_EN |
4184 MAC_MODE_APE_RX_EN |
4185 MAC_MODE_TDE_ENABLE;
4186
4187 tw32_f(MAC_MODE, mac_mode);
4188 udelay(100);
4189
4190 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4191 udelay(10);
4192 }
4193
4194 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4195 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4196 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4197 u32 base_val;
4198
4199 base_val = tp->pci_clock_ctrl;
4200 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4201 CLOCK_CTRL_TXCLK_DISABLE);
4202
4203 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4204 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4205 } else if (tg3_flag(tp, 5780_CLASS) ||
4206 tg3_flag(tp, CPMU_PRESENT) ||
4207 tg3_asic_rev(tp) == ASIC_REV_5906) {
4208 /* do nothing */
4209 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4210 u32 newbits1, newbits2;
4211
4212 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4213 tg3_asic_rev(tp) == ASIC_REV_5701) {
4214 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4215 CLOCK_CTRL_TXCLK_DISABLE |
4216 CLOCK_CTRL_ALTCLK);
4217 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4218 } else if (tg3_flag(tp, 5705_PLUS)) {
4219 newbits1 = CLOCK_CTRL_625_CORE;
4220 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4221 } else {
4222 newbits1 = CLOCK_CTRL_ALTCLK;
4223 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4224 }
4225
4226 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4227 40);
4228
4229 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4230 40);
4231
4232 if (!tg3_flag(tp, 5705_PLUS)) {
4233 u32 newbits3;
4234
4235 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4236 tg3_asic_rev(tp) == ASIC_REV_5701) {
4237 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4238 CLOCK_CTRL_TXCLK_DISABLE |
4239 CLOCK_CTRL_44MHZ_CORE);
4240 } else {
4241 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4242 }
4243
4244 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4245 tp->pci_clock_ctrl | newbits3, 40);
4246 }
4247 }
4248
4249 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4250 tg3_power_down_phy(tp, do_low_power);
4251
4252 tg3_frob_aux_power(tp, true);
4253
4254 /* Workaround for unstable PLL clock */
4255 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4256 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4257 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4258 u32 val = tr32(0x7d00);
4259
4260 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4261 tw32(0x7d00, val);
4262 if (!tg3_flag(tp, ENABLE_ASF)) {
4263 int err;
4264
4265 err = tg3_nvram_lock(tp);
4266 tg3_halt_cpu(tp, RX_CPU_BASE);
4267 if (!err)
4268 tg3_nvram_unlock(tp);
4269 }
4270 }
4271
4272 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4273
4274 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4275
4276 return 0;
4277 }
4278
tg3_power_down(struct tg3 * tp)4279 static void tg3_power_down(struct tg3 *tp)
4280 {
4281 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4282 pci_set_power_state(tp->pdev, PCI_D3hot);
4283 }
4284
tg3_aux_stat_to_speed_duplex(struct tg3 * tp,u32 val,u16 * speed,u8 * duplex)4285 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4286 {
4287 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4288 case MII_TG3_AUX_STAT_10HALF:
4289 *speed = SPEED_10;
4290 *duplex = DUPLEX_HALF;
4291 break;
4292
4293 case MII_TG3_AUX_STAT_10FULL:
4294 *speed = SPEED_10;
4295 *duplex = DUPLEX_FULL;
4296 break;
4297
4298 case MII_TG3_AUX_STAT_100HALF:
4299 *speed = SPEED_100;
4300 *duplex = DUPLEX_HALF;
4301 break;
4302
4303 case MII_TG3_AUX_STAT_100FULL:
4304 *speed = SPEED_100;
4305 *duplex = DUPLEX_FULL;
4306 break;
4307
4308 case MII_TG3_AUX_STAT_1000HALF:
4309 *speed = SPEED_1000;
4310 *duplex = DUPLEX_HALF;
4311 break;
4312
4313 case MII_TG3_AUX_STAT_1000FULL:
4314 *speed = SPEED_1000;
4315 *duplex = DUPLEX_FULL;
4316 break;
4317
4318 default:
4319 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4320 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4321 SPEED_10;
4322 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4323 DUPLEX_HALF;
4324 break;
4325 }
4326 *speed = SPEED_UNKNOWN;
4327 *duplex = DUPLEX_UNKNOWN;
4328 break;
4329 }
4330 }
4331
tg3_phy_autoneg_cfg(struct tg3 * tp,u32 advertise,u32 flowctrl)4332 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4333 {
4334 int err = 0;
4335 u32 val, new_adv;
4336
4337 new_adv = ADVERTISE_CSMA;
4338 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4339 new_adv |= mii_advertise_flowctrl(flowctrl);
4340
4341 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4342 if (err)
4343 goto done;
4344
4345 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4346 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4347
4348 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4349 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4350 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4351
4352 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4353 if (err)
4354 goto done;
4355 }
4356
4357 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4358 goto done;
4359
4360 tw32(TG3_CPMU_EEE_MODE,
4361 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4362
4363 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4364 if (!err) {
4365 u32 err2;
4366
4367 val = 0;
4368 /* Advertise 100-BaseTX EEE ability */
4369 if (advertise & ADVERTISED_100baseT_Full)
4370 val |= MDIO_AN_EEE_ADV_100TX;
4371 /* Advertise 1000-BaseT EEE ability */
4372 if (advertise & ADVERTISED_1000baseT_Full)
4373 val |= MDIO_AN_EEE_ADV_1000T;
4374
4375 if (!tp->eee.eee_enabled) {
4376 val = 0;
4377 tp->eee.advertised = 0;
4378 } else {
4379 tp->eee.advertised = advertise &
4380 (ADVERTISED_100baseT_Full |
4381 ADVERTISED_1000baseT_Full);
4382 }
4383
4384 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4385 if (err)
4386 val = 0;
4387
4388 switch (tg3_asic_rev(tp)) {
4389 case ASIC_REV_5717:
4390 case ASIC_REV_57765:
4391 case ASIC_REV_57766:
4392 case ASIC_REV_5719:
4393 /* If we advertised any eee advertisements above... */
4394 if (val)
4395 val = MII_TG3_DSP_TAP26_ALNOKO |
4396 MII_TG3_DSP_TAP26_RMRXSTO |
4397 MII_TG3_DSP_TAP26_OPCSINPT;
4398 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4399 /* Fall through */
4400 case ASIC_REV_5720:
4401 case ASIC_REV_5762:
4402 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4403 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4404 MII_TG3_DSP_CH34TP2_HIBW01);
4405 }
4406
4407 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4408 if (!err)
4409 err = err2;
4410 }
4411
4412 done:
4413 return err;
4414 }
4415
tg3_phy_copper_begin(struct tg3 * tp)4416 static void tg3_phy_copper_begin(struct tg3 *tp)
4417 {
4418 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4419 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4420 u32 adv, fc;
4421
4422 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4423 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4424 adv = ADVERTISED_10baseT_Half |
4425 ADVERTISED_10baseT_Full;
4426 if (tg3_flag(tp, WOL_SPEED_100MB))
4427 adv |= ADVERTISED_100baseT_Half |
4428 ADVERTISED_100baseT_Full;
4429 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4430 if (!(tp->phy_flags &
4431 TG3_PHYFLG_DISABLE_1G_HD_ADV))
4432 adv |= ADVERTISED_1000baseT_Half;
4433 adv |= ADVERTISED_1000baseT_Full;
4434 }
4435
4436 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4437 } else {
4438 adv = tp->link_config.advertising;
4439 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4440 adv &= ~(ADVERTISED_1000baseT_Half |
4441 ADVERTISED_1000baseT_Full);
4442
4443 fc = tp->link_config.flowctrl;
4444 }
4445
4446 tg3_phy_autoneg_cfg(tp, adv, fc);
4447
4448 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4449 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4450 /* Normally during power down we want to autonegotiate
4451 * the lowest possible speed for WOL. However, to avoid
4452 * link flap, we leave it untouched.
4453 */
4454 return;
4455 }
4456
4457 tg3_writephy(tp, MII_BMCR,
4458 BMCR_ANENABLE | BMCR_ANRESTART);
4459 } else {
4460 int i;
4461 u32 bmcr, orig_bmcr;
4462
4463 tp->link_config.active_speed = tp->link_config.speed;
4464 tp->link_config.active_duplex = tp->link_config.duplex;
4465
4466 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4467 /* With autoneg disabled, 5715 only links up when the
4468 * advertisement register has the configured speed
4469 * enabled.
4470 */
4471 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4472 }
4473
4474 bmcr = 0;
4475 switch (tp->link_config.speed) {
4476 default:
4477 case SPEED_10:
4478 break;
4479
4480 case SPEED_100:
4481 bmcr |= BMCR_SPEED100;
4482 break;
4483
4484 case SPEED_1000:
4485 bmcr |= BMCR_SPEED1000;
4486 break;
4487 }
4488
4489 if (tp->link_config.duplex == DUPLEX_FULL)
4490 bmcr |= BMCR_FULLDPLX;
4491
4492 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4493 (bmcr != orig_bmcr)) {
4494 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4495 for (i = 0; i < 1500; i++) {
4496 u32 tmp;
4497
4498 udelay(10);
4499 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4500 tg3_readphy(tp, MII_BMSR, &tmp))
4501 continue;
4502 if (!(tmp & BMSR_LSTATUS)) {
4503 udelay(40);
4504 break;
4505 }
4506 }
4507 tg3_writephy(tp, MII_BMCR, bmcr);
4508 udelay(40);
4509 }
4510 }
4511 }
4512
tg3_phy_pull_config(struct tg3 * tp)4513 static int tg3_phy_pull_config(struct tg3 *tp)
4514 {
4515 int err;
4516 u32 val;
4517
4518 err = tg3_readphy(tp, MII_BMCR, &val);
4519 if (err)
4520 goto done;
4521
4522 if (!(val & BMCR_ANENABLE)) {
4523 tp->link_config.autoneg = AUTONEG_DISABLE;
4524 tp->link_config.advertising = 0;
4525 tg3_flag_clear(tp, PAUSE_AUTONEG);
4526
4527 err = -EIO;
4528
4529 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4530 case 0:
4531 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4532 goto done;
4533
4534 tp->link_config.speed = SPEED_10;
4535 break;
4536 case BMCR_SPEED100:
4537 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4538 goto done;
4539
4540 tp->link_config.speed = SPEED_100;
4541 break;
4542 case BMCR_SPEED1000:
4543 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4544 tp->link_config.speed = SPEED_1000;
4545 break;
4546 }
4547 /* Fall through */
4548 default:
4549 goto done;
4550 }
4551
4552 if (val & BMCR_FULLDPLX)
4553 tp->link_config.duplex = DUPLEX_FULL;
4554 else
4555 tp->link_config.duplex = DUPLEX_HALF;
4556
4557 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4558
4559 err = 0;
4560 goto done;
4561 }
4562
4563 tp->link_config.autoneg = AUTONEG_ENABLE;
4564 tp->link_config.advertising = ADVERTISED_Autoneg;
4565 tg3_flag_set(tp, PAUSE_AUTONEG);
4566
4567 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4568 u32 adv;
4569
4570 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4571 if (err)
4572 goto done;
4573
4574 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4575 tp->link_config.advertising |= adv | ADVERTISED_TP;
4576
4577 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4578 } else {
4579 tp->link_config.advertising |= ADVERTISED_FIBRE;
4580 }
4581
4582 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4583 u32 adv;
4584
4585 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4586 err = tg3_readphy(tp, MII_CTRL1000, &val);
4587 if (err)
4588 goto done;
4589
4590 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4591 } else {
4592 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4593 if (err)
4594 goto done;
4595
4596 adv = tg3_decode_flowctrl_1000X(val);
4597 tp->link_config.flowctrl = adv;
4598
4599 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4600 adv = mii_adv_to_ethtool_adv_x(val);
4601 }
4602
4603 tp->link_config.advertising |= adv;
4604 }
4605
4606 done:
4607 return err;
4608 }
4609
tg3_init_5401phy_dsp(struct tg3 * tp)4610 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4611 {
4612 int err;
4613
4614 /* Turn off tap power management. */
4615 /* Set Extended packet length bit */
4616 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4617
4618 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4619 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4620 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4621 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4622 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4623
4624 udelay(40);
4625
4626 return err;
4627 }
4628
tg3_phy_eee_config_ok(struct tg3 * tp)4629 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4630 {
4631 struct ethtool_eee eee;
4632
4633 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4634 return true;
4635
4636 tg3_eee_pull_config(tp, &eee);
4637
4638 if (tp->eee.eee_enabled) {
4639 if (tp->eee.advertised != eee.advertised ||
4640 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4641 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4642 return false;
4643 } else {
4644 /* EEE is disabled but we're advertising */
4645 if (eee.advertised)
4646 return false;
4647 }
4648
4649 return true;
4650 }
4651
tg3_phy_copper_an_config_ok(struct tg3 * tp,u32 * lcladv)4652 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4653 {
4654 u32 advmsk, tgtadv, advertising;
4655
4656 advertising = tp->link_config.advertising;
4657 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4658
4659 advmsk = ADVERTISE_ALL;
4660 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4661 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4662 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4663 }
4664
4665 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4666 return false;
4667
4668 if ((*lcladv & advmsk) != tgtadv)
4669 return false;
4670
4671 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4672 u32 tg3_ctrl;
4673
4674 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4675
4676 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4677 return false;
4678
4679 if (tgtadv &&
4680 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4681 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4682 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4683 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4684 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4685 } else {
4686 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4687 }
4688
4689 if (tg3_ctrl != tgtadv)
4690 return false;
4691 }
4692
4693 return true;
4694 }
4695
tg3_phy_copper_fetch_rmtadv(struct tg3 * tp,u32 * rmtadv)4696 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4697 {
4698 u32 lpeth = 0;
4699
4700 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4701 u32 val;
4702
4703 if (tg3_readphy(tp, MII_STAT1000, &val))
4704 return false;
4705
4706 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4707 }
4708
4709 if (tg3_readphy(tp, MII_LPA, rmtadv))
4710 return false;
4711
4712 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4713 tp->link_config.rmt_adv = lpeth;
4714
4715 return true;
4716 }
4717
tg3_test_and_report_link_chg(struct tg3 * tp,bool curr_link_up)4718 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4719 {
4720 if (curr_link_up != tp->link_up) {
4721 if (curr_link_up) {
4722 netif_carrier_on(tp->dev);
4723 } else {
4724 netif_carrier_off(tp->dev);
4725 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4726 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4727 }
4728
4729 tg3_link_report(tp);
4730 return true;
4731 }
4732
4733 return false;
4734 }
4735
tg3_clear_mac_status(struct tg3 * tp)4736 static void tg3_clear_mac_status(struct tg3 *tp)
4737 {
4738 tw32(MAC_EVENT, 0);
4739
4740 tw32_f(MAC_STATUS,
4741 MAC_STATUS_SYNC_CHANGED |
4742 MAC_STATUS_CFG_CHANGED |
4743 MAC_STATUS_MI_COMPLETION |
4744 MAC_STATUS_LNKSTATE_CHANGED);
4745 udelay(40);
4746 }
4747
tg3_setup_eee(struct tg3 * tp)4748 static void tg3_setup_eee(struct tg3 *tp)
4749 {
4750 u32 val;
4751
4752 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4753 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4754 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4755 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4756
4757 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4758
4759 tw32_f(TG3_CPMU_EEE_CTRL,
4760 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4761
4762 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4763 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4764 TG3_CPMU_EEEMD_LPI_IN_RX |
4765 TG3_CPMU_EEEMD_EEE_ENABLE;
4766
4767 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4768 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4769
4770 if (tg3_flag(tp, ENABLE_APE))
4771 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4772
4773 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4774
4775 tw32_f(TG3_CPMU_EEE_DBTMR1,
4776 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4777 (tp->eee.tx_lpi_timer & 0xffff));
4778
4779 tw32_f(TG3_CPMU_EEE_DBTMR2,
4780 TG3_CPMU_DBTMR2_APE_TX_2047US |
4781 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4782 }
4783
tg3_setup_copper_phy(struct tg3 * tp,bool force_reset)4784 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4785 {
4786 bool current_link_up;
4787 u32 bmsr, val;
4788 u32 lcl_adv, rmt_adv;
4789 u16 current_speed;
4790 u8 current_duplex;
4791 int i, err;
4792
4793 tg3_clear_mac_status(tp);
4794
4795 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4796 tw32_f(MAC_MI_MODE,
4797 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4798 udelay(80);
4799 }
4800
4801 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4802
4803 /* Some third-party PHYs need to be reset on link going
4804 * down.
4805 */
4806 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4807 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4808 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4809 tp->link_up) {
4810 tg3_readphy(tp, MII_BMSR, &bmsr);
4811 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4812 !(bmsr & BMSR_LSTATUS))
4813 force_reset = true;
4814 }
4815 if (force_reset)
4816 tg3_phy_reset(tp);
4817
4818 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4819 tg3_readphy(tp, MII_BMSR, &bmsr);
4820 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4821 !tg3_flag(tp, INIT_COMPLETE))
4822 bmsr = 0;
4823
4824 if (!(bmsr & BMSR_LSTATUS)) {
4825 err = tg3_init_5401phy_dsp(tp);
4826 if (err)
4827 return err;
4828
4829 tg3_readphy(tp, MII_BMSR, &bmsr);
4830 for (i = 0; i < 1000; i++) {
4831 udelay(10);
4832 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4833 (bmsr & BMSR_LSTATUS)) {
4834 udelay(40);
4835 break;
4836 }
4837 }
4838
4839 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4840 TG3_PHY_REV_BCM5401_B0 &&
4841 !(bmsr & BMSR_LSTATUS) &&
4842 tp->link_config.active_speed == SPEED_1000) {
4843 err = tg3_phy_reset(tp);
4844 if (!err)
4845 err = tg3_init_5401phy_dsp(tp);
4846 if (err)
4847 return err;
4848 }
4849 }
4850 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4851 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4852 /* 5701 {A0,B0} CRC bug workaround */
4853 tg3_writephy(tp, 0x15, 0x0a75);
4854 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4855 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4856 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4857 }
4858
4859 /* Clear pending interrupts... */
4860 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4861 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4862
4863 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4864 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4865 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4866 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4867
4868 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4869 tg3_asic_rev(tp) == ASIC_REV_5701) {
4870 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4871 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4872 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4873 else
4874 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4875 }
4876
4877 current_link_up = false;
4878 current_speed = SPEED_UNKNOWN;
4879 current_duplex = DUPLEX_UNKNOWN;
4880 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4881 tp->link_config.rmt_adv = 0;
4882
4883 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4884 err = tg3_phy_auxctl_read(tp,
4885 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4886 &val);
4887 if (!err && !(val & (1 << 10))) {
4888 tg3_phy_auxctl_write(tp,
4889 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4890 val | (1 << 10));
4891 goto relink;
4892 }
4893 }
4894
4895 bmsr = 0;
4896 for (i = 0; i < 100; i++) {
4897 tg3_readphy(tp, MII_BMSR, &bmsr);
4898 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4899 (bmsr & BMSR_LSTATUS))
4900 break;
4901 udelay(40);
4902 }
4903
4904 if (bmsr & BMSR_LSTATUS) {
4905 u32 aux_stat, bmcr;
4906
4907 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4908 for (i = 0; i < 2000; i++) {
4909 udelay(10);
4910 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4911 aux_stat)
4912 break;
4913 }
4914
4915 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4916 ¤t_speed,
4917 ¤t_duplex);
4918
4919 bmcr = 0;
4920 for (i = 0; i < 200; i++) {
4921 tg3_readphy(tp, MII_BMCR, &bmcr);
4922 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4923 continue;
4924 if (bmcr && bmcr != 0x7fff)
4925 break;
4926 udelay(10);
4927 }
4928
4929 lcl_adv = 0;
4930 rmt_adv = 0;
4931
4932 tp->link_config.active_speed = current_speed;
4933 tp->link_config.active_duplex = current_duplex;
4934
4935 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4936 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4937
4938 if ((bmcr & BMCR_ANENABLE) &&
4939 eee_config_ok &&
4940 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4941 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4942 current_link_up = true;
4943
4944 /* EEE settings changes take effect only after a phy
4945 * reset. If we have skipped a reset due to Link Flap
4946 * Avoidance being enabled, do it now.
4947 */
4948 if (!eee_config_ok &&
4949 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4950 !force_reset) {
4951 tg3_setup_eee(tp);
4952 tg3_phy_reset(tp);
4953 }
4954 } else {
4955 if (!(bmcr & BMCR_ANENABLE) &&
4956 tp->link_config.speed == current_speed &&
4957 tp->link_config.duplex == current_duplex) {
4958 current_link_up = true;
4959 }
4960 }
4961
4962 if (current_link_up &&
4963 tp->link_config.active_duplex == DUPLEX_FULL) {
4964 u32 reg, bit;
4965
4966 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4967 reg = MII_TG3_FET_GEN_STAT;
4968 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4969 } else {
4970 reg = MII_TG3_EXT_STAT;
4971 bit = MII_TG3_EXT_STAT_MDIX;
4972 }
4973
4974 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4975 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4976
4977 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4978 }
4979 }
4980
4981 relink:
4982 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4983 tg3_phy_copper_begin(tp);
4984
4985 if (tg3_flag(tp, ROBOSWITCH)) {
4986 current_link_up = true;
4987 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4988 current_speed = SPEED_1000;
4989 current_duplex = DUPLEX_FULL;
4990 tp->link_config.active_speed = current_speed;
4991 tp->link_config.active_duplex = current_duplex;
4992 }
4993
4994 tg3_readphy(tp, MII_BMSR, &bmsr);
4995 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4996 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4997 current_link_up = true;
4998 }
4999
5000 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5001 if (current_link_up) {
5002 if (tp->link_config.active_speed == SPEED_100 ||
5003 tp->link_config.active_speed == SPEED_10)
5004 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5005 else
5006 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5007 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
5008 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5009 else
5010 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5011
5012 /* In order for the 5750 core in BCM4785 chip to work properly
5013 * in RGMII mode, the Led Control Register must be set up.
5014 */
5015 if (tg3_flag(tp, RGMII_MODE)) {
5016 u32 led_ctrl = tr32(MAC_LED_CTRL);
5017 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5018
5019 if (tp->link_config.active_speed == SPEED_10)
5020 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5021 else if (tp->link_config.active_speed == SPEED_100)
5022 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5023 LED_CTRL_100MBPS_ON);
5024 else if (tp->link_config.active_speed == SPEED_1000)
5025 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5026 LED_CTRL_1000MBPS_ON);
5027
5028 tw32(MAC_LED_CTRL, led_ctrl);
5029 udelay(40);
5030 }
5031
5032 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5033 if (tp->link_config.active_duplex == DUPLEX_HALF)
5034 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5035
5036 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5037 if (current_link_up &&
5038 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5039 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5040 else
5041 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5042 }
5043
5044 /* ??? Without this setting Netgear GA302T PHY does not
5045 * ??? send/receive packets...
5046 */
5047 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5048 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5049 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5050 tw32_f(MAC_MI_MODE, tp->mi_mode);
5051 udelay(80);
5052 }
5053
5054 tw32_f(MAC_MODE, tp->mac_mode);
5055 udelay(40);
5056
5057 tg3_phy_eee_adjust(tp, current_link_up);
5058
5059 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5060 /* Polled via timer. */
5061 tw32_f(MAC_EVENT, 0);
5062 } else {
5063 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5064 }
5065 udelay(40);
5066
5067 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5068 current_link_up &&
5069 tp->link_config.active_speed == SPEED_1000 &&
5070 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5071 udelay(120);
5072 tw32_f(MAC_STATUS,
5073 (MAC_STATUS_SYNC_CHANGED |
5074 MAC_STATUS_CFG_CHANGED));
5075 udelay(40);
5076 tg3_write_mem(tp,
5077 NIC_SRAM_FIRMWARE_MBOX,
5078 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5079 }
5080
5081 /* Prevent send BD corruption. */
5082 if (tg3_flag(tp, CLKREQ_BUG)) {
5083 if (tp->link_config.active_speed == SPEED_100 ||
5084 tp->link_config.active_speed == SPEED_10)
5085 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5086 PCI_EXP_LNKCTL_CLKREQ_EN);
5087 else
5088 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5089 PCI_EXP_LNKCTL_CLKREQ_EN);
5090 }
5091
5092 tg3_test_and_report_link_chg(tp, current_link_up);
5093
5094 return 0;
5095 }
5096
5097 struct tg3_fiber_aneginfo {
5098 int state;
5099 #define ANEG_STATE_UNKNOWN 0
5100 #define ANEG_STATE_AN_ENABLE 1
5101 #define ANEG_STATE_RESTART_INIT 2
5102 #define ANEG_STATE_RESTART 3
5103 #define ANEG_STATE_DISABLE_LINK_OK 4
5104 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5105 #define ANEG_STATE_ABILITY_DETECT 6
5106 #define ANEG_STATE_ACK_DETECT_INIT 7
5107 #define ANEG_STATE_ACK_DETECT 8
5108 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5109 #define ANEG_STATE_COMPLETE_ACK 10
5110 #define ANEG_STATE_IDLE_DETECT_INIT 11
5111 #define ANEG_STATE_IDLE_DETECT 12
5112 #define ANEG_STATE_LINK_OK 13
5113 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5114 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5115
5116 u32 flags;
5117 #define MR_AN_ENABLE 0x00000001
5118 #define MR_RESTART_AN 0x00000002
5119 #define MR_AN_COMPLETE 0x00000004
5120 #define MR_PAGE_RX 0x00000008
5121 #define MR_NP_LOADED 0x00000010
5122 #define MR_TOGGLE_TX 0x00000020
5123 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5124 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5125 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5126 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5127 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5128 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5129 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5130 #define MR_TOGGLE_RX 0x00002000
5131 #define MR_NP_RX 0x00004000
5132
5133 #define MR_LINK_OK 0x80000000
5134
5135 unsigned long link_time, cur_time;
5136
5137 u32 ability_match_cfg;
5138 int ability_match_count;
5139
5140 char ability_match, idle_match, ack_match;
5141
5142 u32 txconfig, rxconfig;
5143 #define ANEG_CFG_NP 0x00000080
5144 #define ANEG_CFG_ACK 0x00000040
5145 #define ANEG_CFG_RF2 0x00000020
5146 #define ANEG_CFG_RF1 0x00000010
5147 #define ANEG_CFG_PS2 0x00000001
5148 #define ANEG_CFG_PS1 0x00008000
5149 #define ANEG_CFG_HD 0x00004000
5150 #define ANEG_CFG_FD 0x00002000
5151 #define ANEG_CFG_INVAL 0x00001f06
5152
5153 };
5154 #define ANEG_OK 0
5155 #define ANEG_DONE 1
5156 #define ANEG_TIMER_ENAB 2
5157 #define ANEG_FAILED -1
5158
5159 #define ANEG_STATE_SETTLE_TIME 10000
5160
tg3_fiber_aneg_smachine(struct tg3 * tp,struct tg3_fiber_aneginfo * ap)5161 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5162 struct tg3_fiber_aneginfo *ap)
5163 {
5164 u16 flowctrl;
5165 unsigned long delta;
5166 u32 rx_cfg_reg;
5167 int ret;
5168
5169 if (ap->state == ANEG_STATE_UNKNOWN) {
5170 ap->rxconfig = 0;
5171 ap->link_time = 0;
5172 ap->cur_time = 0;
5173 ap->ability_match_cfg = 0;
5174 ap->ability_match_count = 0;
5175 ap->ability_match = 0;
5176 ap->idle_match = 0;
5177 ap->ack_match = 0;
5178 }
5179 ap->cur_time++;
5180
5181 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5182 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5183
5184 if (rx_cfg_reg != ap->ability_match_cfg) {
5185 ap->ability_match_cfg = rx_cfg_reg;
5186 ap->ability_match = 0;
5187 ap->ability_match_count = 0;
5188 } else {
5189 if (++ap->ability_match_count > 1) {
5190 ap->ability_match = 1;
5191 ap->ability_match_cfg = rx_cfg_reg;
5192 }
5193 }
5194 if (rx_cfg_reg & ANEG_CFG_ACK)
5195 ap->ack_match = 1;
5196 else
5197 ap->ack_match = 0;
5198
5199 ap->idle_match = 0;
5200 } else {
5201 ap->idle_match = 1;
5202 ap->ability_match_cfg = 0;
5203 ap->ability_match_count = 0;
5204 ap->ability_match = 0;
5205 ap->ack_match = 0;
5206
5207 rx_cfg_reg = 0;
5208 }
5209
5210 ap->rxconfig = rx_cfg_reg;
5211 ret = ANEG_OK;
5212
5213 switch (ap->state) {
5214 case ANEG_STATE_UNKNOWN:
5215 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5216 ap->state = ANEG_STATE_AN_ENABLE;
5217
5218 /* fallthru */
5219 case ANEG_STATE_AN_ENABLE:
5220 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5221 if (ap->flags & MR_AN_ENABLE) {
5222 ap->link_time = 0;
5223 ap->cur_time = 0;
5224 ap->ability_match_cfg = 0;
5225 ap->ability_match_count = 0;
5226 ap->ability_match = 0;
5227 ap->idle_match = 0;
5228 ap->ack_match = 0;
5229
5230 ap->state = ANEG_STATE_RESTART_INIT;
5231 } else {
5232 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5233 }
5234 break;
5235
5236 case ANEG_STATE_RESTART_INIT:
5237 ap->link_time = ap->cur_time;
5238 ap->flags &= ~(MR_NP_LOADED);
5239 ap->txconfig = 0;
5240 tw32(MAC_TX_AUTO_NEG, 0);
5241 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5242 tw32_f(MAC_MODE, tp->mac_mode);
5243 udelay(40);
5244
5245 ret = ANEG_TIMER_ENAB;
5246 ap->state = ANEG_STATE_RESTART;
5247
5248 /* fallthru */
5249 case ANEG_STATE_RESTART:
5250 delta = ap->cur_time - ap->link_time;
5251 if (delta > ANEG_STATE_SETTLE_TIME)
5252 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5253 else
5254 ret = ANEG_TIMER_ENAB;
5255 break;
5256
5257 case ANEG_STATE_DISABLE_LINK_OK:
5258 ret = ANEG_DONE;
5259 break;
5260
5261 case ANEG_STATE_ABILITY_DETECT_INIT:
5262 ap->flags &= ~(MR_TOGGLE_TX);
5263 ap->txconfig = ANEG_CFG_FD;
5264 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5265 if (flowctrl & ADVERTISE_1000XPAUSE)
5266 ap->txconfig |= ANEG_CFG_PS1;
5267 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5268 ap->txconfig |= ANEG_CFG_PS2;
5269 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5270 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5271 tw32_f(MAC_MODE, tp->mac_mode);
5272 udelay(40);
5273
5274 ap->state = ANEG_STATE_ABILITY_DETECT;
5275 break;
5276
5277 case ANEG_STATE_ABILITY_DETECT:
5278 if (ap->ability_match != 0 && ap->rxconfig != 0)
5279 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5280 break;
5281
5282 case ANEG_STATE_ACK_DETECT_INIT:
5283 ap->txconfig |= ANEG_CFG_ACK;
5284 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5285 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5286 tw32_f(MAC_MODE, tp->mac_mode);
5287 udelay(40);
5288
5289 ap->state = ANEG_STATE_ACK_DETECT;
5290
5291 /* fallthru */
5292 case ANEG_STATE_ACK_DETECT:
5293 if (ap->ack_match != 0) {
5294 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5295 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5296 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5297 } else {
5298 ap->state = ANEG_STATE_AN_ENABLE;
5299 }
5300 } else if (ap->ability_match != 0 &&
5301 ap->rxconfig == 0) {
5302 ap->state = ANEG_STATE_AN_ENABLE;
5303 }
5304 break;
5305
5306 case ANEG_STATE_COMPLETE_ACK_INIT:
5307 if (ap->rxconfig & ANEG_CFG_INVAL) {
5308 ret = ANEG_FAILED;
5309 break;
5310 }
5311 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5312 MR_LP_ADV_HALF_DUPLEX |
5313 MR_LP_ADV_SYM_PAUSE |
5314 MR_LP_ADV_ASYM_PAUSE |
5315 MR_LP_ADV_REMOTE_FAULT1 |
5316 MR_LP_ADV_REMOTE_FAULT2 |
5317 MR_LP_ADV_NEXT_PAGE |
5318 MR_TOGGLE_RX |
5319 MR_NP_RX);
5320 if (ap->rxconfig & ANEG_CFG_FD)
5321 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5322 if (ap->rxconfig & ANEG_CFG_HD)
5323 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5324 if (ap->rxconfig & ANEG_CFG_PS1)
5325 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5326 if (ap->rxconfig & ANEG_CFG_PS2)
5327 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5328 if (ap->rxconfig & ANEG_CFG_RF1)
5329 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5330 if (ap->rxconfig & ANEG_CFG_RF2)
5331 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5332 if (ap->rxconfig & ANEG_CFG_NP)
5333 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5334
5335 ap->link_time = ap->cur_time;
5336
5337 ap->flags ^= (MR_TOGGLE_TX);
5338 if (ap->rxconfig & 0x0008)
5339 ap->flags |= MR_TOGGLE_RX;
5340 if (ap->rxconfig & ANEG_CFG_NP)
5341 ap->flags |= MR_NP_RX;
5342 ap->flags |= MR_PAGE_RX;
5343
5344 ap->state = ANEG_STATE_COMPLETE_ACK;
5345 ret = ANEG_TIMER_ENAB;
5346 break;
5347
5348 case ANEG_STATE_COMPLETE_ACK:
5349 if (ap->ability_match != 0 &&
5350 ap->rxconfig == 0) {
5351 ap->state = ANEG_STATE_AN_ENABLE;
5352 break;
5353 }
5354 delta = ap->cur_time - ap->link_time;
5355 if (delta > ANEG_STATE_SETTLE_TIME) {
5356 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5357 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5358 } else {
5359 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5360 !(ap->flags & MR_NP_RX)) {
5361 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5362 } else {
5363 ret = ANEG_FAILED;
5364 }
5365 }
5366 }
5367 break;
5368
5369 case ANEG_STATE_IDLE_DETECT_INIT:
5370 ap->link_time = ap->cur_time;
5371 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5372 tw32_f(MAC_MODE, tp->mac_mode);
5373 udelay(40);
5374
5375 ap->state = ANEG_STATE_IDLE_DETECT;
5376 ret = ANEG_TIMER_ENAB;
5377 break;
5378
5379 case ANEG_STATE_IDLE_DETECT:
5380 if (ap->ability_match != 0 &&
5381 ap->rxconfig == 0) {
5382 ap->state = ANEG_STATE_AN_ENABLE;
5383 break;
5384 }
5385 delta = ap->cur_time - ap->link_time;
5386 if (delta > ANEG_STATE_SETTLE_TIME) {
5387 /* XXX another gem from the Broadcom driver :( */
5388 ap->state = ANEG_STATE_LINK_OK;
5389 }
5390 break;
5391
5392 case ANEG_STATE_LINK_OK:
5393 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5394 ret = ANEG_DONE;
5395 break;
5396
5397 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5398 /* ??? unimplemented */
5399 break;
5400
5401 case ANEG_STATE_NEXT_PAGE_WAIT:
5402 /* ??? unimplemented */
5403 break;
5404
5405 default:
5406 ret = ANEG_FAILED;
5407 break;
5408 }
5409
5410 return ret;
5411 }
5412
fiber_autoneg(struct tg3 * tp,u32 * txflags,u32 * rxflags)5413 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5414 {
5415 int res = 0;
5416 struct tg3_fiber_aneginfo aninfo;
5417 int status = ANEG_FAILED;
5418 unsigned int tick;
5419 u32 tmp;
5420
5421 tw32_f(MAC_TX_AUTO_NEG, 0);
5422
5423 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5424 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5425 udelay(40);
5426
5427 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5428 udelay(40);
5429
5430 memset(&aninfo, 0, sizeof(aninfo));
5431 aninfo.flags |= MR_AN_ENABLE;
5432 aninfo.state = ANEG_STATE_UNKNOWN;
5433 aninfo.cur_time = 0;
5434 tick = 0;
5435 while (++tick < 195000) {
5436 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5437 if (status == ANEG_DONE || status == ANEG_FAILED)
5438 break;
5439
5440 udelay(1);
5441 }
5442
5443 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5444 tw32_f(MAC_MODE, tp->mac_mode);
5445 udelay(40);
5446
5447 *txflags = aninfo.txconfig;
5448 *rxflags = aninfo.flags;
5449
5450 if (status == ANEG_DONE &&
5451 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5452 MR_LP_ADV_FULL_DUPLEX)))
5453 res = 1;
5454
5455 return res;
5456 }
5457
tg3_init_bcm8002(struct tg3 * tp)5458 static void tg3_init_bcm8002(struct tg3 *tp)
5459 {
5460 u32 mac_status = tr32(MAC_STATUS);
5461 int i;
5462
5463 /* Reset when initting first time or we have a link. */
5464 if (tg3_flag(tp, INIT_COMPLETE) &&
5465 !(mac_status & MAC_STATUS_PCS_SYNCED))
5466 return;
5467
5468 /* Set PLL lock range. */
5469 tg3_writephy(tp, 0x16, 0x8007);
5470
5471 /* SW reset */
5472 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5473
5474 /* Wait for reset to complete. */
5475 /* XXX schedule_timeout() ... */
5476 for (i = 0; i < 500; i++)
5477 udelay(10);
5478
5479 /* Config mode; select PMA/Ch 1 regs. */
5480 tg3_writephy(tp, 0x10, 0x8411);
5481
5482 /* Enable auto-lock and comdet, select txclk for tx. */
5483 tg3_writephy(tp, 0x11, 0x0a10);
5484
5485 tg3_writephy(tp, 0x18, 0x00a0);
5486 tg3_writephy(tp, 0x16, 0x41ff);
5487
5488 /* Assert and deassert POR. */
5489 tg3_writephy(tp, 0x13, 0x0400);
5490 udelay(40);
5491 tg3_writephy(tp, 0x13, 0x0000);
5492
5493 tg3_writephy(tp, 0x11, 0x0a50);
5494 udelay(40);
5495 tg3_writephy(tp, 0x11, 0x0a10);
5496
5497 /* Wait for signal to stabilize */
5498 /* XXX schedule_timeout() ... */
5499 for (i = 0; i < 15000; i++)
5500 udelay(10);
5501
5502 /* Deselect the channel register so we can read the PHYID
5503 * later.
5504 */
5505 tg3_writephy(tp, 0x10, 0x8011);
5506 }
5507
tg3_setup_fiber_hw_autoneg(struct tg3 * tp,u32 mac_status)5508 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5509 {
5510 u16 flowctrl;
5511 bool current_link_up;
5512 u32 sg_dig_ctrl, sg_dig_status;
5513 u32 serdes_cfg, expected_sg_dig_ctrl;
5514 int workaround, port_a;
5515
5516 serdes_cfg = 0;
5517 expected_sg_dig_ctrl = 0;
5518 workaround = 0;
5519 port_a = 1;
5520 current_link_up = false;
5521
5522 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5523 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5524 workaround = 1;
5525 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5526 port_a = 0;
5527
5528 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5529 /* preserve bits 20-23 for voltage regulator */
5530 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5531 }
5532
5533 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5534
5535 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5536 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5537 if (workaround) {
5538 u32 val = serdes_cfg;
5539
5540 if (port_a)
5541 val |= 0xc010000;
5542 else
5543 val |= 0x4010000;
5544 tw32_f(MAC_SERDES_CFG, val);
5545 }
5546
5547 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5548 }
5549 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5550 tg3_setup_flow_control(tp, 0, 0);
5551 current_link_up = true;
5552 }
5553 goto out;
5554 }
5555
5556 /* Want auto-negotiation. */
5557 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5558
5559 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5560 if (flowctrl & ADVERTISE_1000XPAUSE)
5561 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5562 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5563 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5564
5565 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5566 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5567 tp->serdes_counter &&
5568 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5569 MAC_STATUS_RCVD_CFG)) ==
5570 MAC_STATUS_PCS_SYNCED)) {
5571 tp->serdes_counter--;
5572 current_link_up = true;
5573 goto out;
5574 }
5575 restart_autoneg:
5576 if (workaround)
5577 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5578 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5579 udelay(5);
5580 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5581
5582 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5583 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5584 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5585 MAC_STATUS_SIGNAL_DET)) {
5586 sg_dig_status = tr32(SG_DIG_STATUS);
5587 mac_status = tr32(MAC_STATUS);
5588
5589 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5590 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5591 u32 local_adv = 0, remote_adv = 0;
5592
5593 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5594 local_adv |= ADVERTISE_1000XPAUSE;
5595 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5596 local_adv |= ADVERTISE_1000XPSE_ASYM;
5597
5598 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5599 remote_adv |= LPA_1000XPAUSE;
5600 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5601 remote_adv |= LPA_1000XPAUSE_ASYM;
5602
5603 tp->link_config.rmt_adv =
5604 mii_adv_to_ethtool_adv_x(remote_adv);
5605
5606 tg3_setup_flow_control(tp, local_adv, remote_adv);
5607 current_link_up = true;
5608 tp->serdes_counter = 0;
5609 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5610 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5611 if (tp->serdes_counter)
5612 tp->serdes_counter--;
5613 else {
5614 if (workaround) {
5615 u32 val = serdes_cfg;
5616
5617 if (port_a)
5618 val |= 0xc010000;
5619 else
5620 val |= 0x4010000;
5621
5622 tw32_f(MAC_SERDES_CFG, val);
5623 }
5624
5625 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5626 udelay(40);
5627
5628 /* Link parallel detection - link is up */
5629 /* only if we have PCS_SYNC and not */
5630 /* receiving config code words */
5631 mac_status = tr32(MAC_STATUS);
5632 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5633 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5634 tg3_setup_flow_control(tp, 0, 0);
5635 current_link_up = true;
5636 tp->phy_flags |=
5637 TG3_PHYFLG_PARALLEL_DETECT;
5638 tp->serdes_counter =
5639 SERDES_PARALLEL_DET_TIMEOUT;
5640 } else
5641 goto restart_autoneg;
5642 }
5643 }
5644 } else {
5645 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5646 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5647 }
5648
5649 out:
5650 return current_link_up;
5651 }
5652
tg3_setup_fiber_by_hand(struct tg3 * tp,u32 mac_status)5653 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5654 {
5655 bool current_link_up = false;
5656
5657 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5658 goto out;
5659
5660 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5661 u32 txflags, rxflags;
5662 int i;
5663
5664 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5665 u32 local_adv = 0, remote_adv = 0;
5666
5667 if (txflags & ANEG_CFG_PS1)
5668 local_adv |= ADVERTISE_1000XPAUSE;
5669 if (txflags & ANEG_CFG_PS2)
5670 local_adv |= ADVERTISE_1000XPSE_ASYM;
5671
5672 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5673 remote_adv |= LPA_1000XPAUSE;
5674 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5675 remote_adv |= LPA_1000XPAUSE_ASYM;
5676
5677 tp->link_config.rmt_adv =
5678 mii_adv_to_ethtool_adv_x(remote_adv);
5679
5680 tg3_setup_flow_control(tp, local_adv, remote_adv);
5681
5682 current_link_up = true;
5683 }
5684 for (i = 0; i < 30; i++) {
5685 udelay(20);
5686 tw32_f(MAC_STATUS,
5687 (MAC_STATUS_SYNC_CHANGED |
5688 MAC_STATUS_CFG_CHANGED));
5689 udelay(40);
5690 if ((tr32(MAC_STATUS) &
5691 (MAC_STATUS_SYNC_CHANGED |
5692 MAC_STATUS_CFG_CHANGED)) == 0)
5693 break;
5694 }
5695
5696 mac_status = tr32(MAC_STATUS);
5697 if (!current_link_up &&
5698 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5699 !(mac_status & MAC_STATUS_RCVD_CFG))
5700 current_link_up = true;
5701 } else {
5702 tg3_setup_flow_control(tp, 0, 0);
5703
5704 /* Forcing 1000FD link up. */
5705 current_link_up = true;
5706
5707 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5708 udelay(40);
5709
5710 tw32_f(MAC_MODE, tp->mac_mode);
5711 udelay(40);
5712 }
5713
5714 out:
5715 return current_link_up;
5716 }
5717
tg3_setup_fiber_phy(struct tg3 * tp,bool force_reset)5718 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5719 {
5720 u32 orig_pause_cfg;
5721 u16 orig_active_speed;
5722 u8 orig_active_duplex;
5723 u32 mac_status;
5724 bool current_link_up;
5725 int i;
5726
5727 orig_pause_cfg = tp->link_config.active_flowctrl;
5728 orig_active_speed = tp->link_config.active_speed;
5729 orig_active_duplex = tp->link_config.active_duplex;
5730
5731 if (!tg3_flag(tp, HW_AUTONEG) &&
5732 tp->link_up &&
5733 tg3_flag(tp, INIT_COMPLETE)) {
5734 mac_status = tr32(MAC_STATUS);
5735 mac_status &= (MAC_STATUS_PCS_SYNCED |
5736 MAC_STATUS_SIGNAL_DET |
5737 MAC_STATUS_CFG_CHANGED |
5738 MAC_STATUS_RCVD_CFG);
5739 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5740 MAC_STATUS_SIGNAL_DET)) {
5741 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5742 MAC_STATUS_CFG_CHANGED));
5743 return 0;
5744 }
5745 }
5746
5747 tw32_f(MAC_TX_AUTO_NEG, 0);
5748
5749 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5750 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5751 tw32_f(MAC_MODE, tp->mac_mode);
5752 udelay(40);
5753
5754 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5755 tg3_init_bcm8002(tp);
5756
5757 /* Enable link change event even when serdes polling. */
5758 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5759 udelay(40);
5760
5761 current_link_up = false;
5762 tp->link_config.rmt_adv = 0;
5763 mac_status = tr32(MAC_STATUS);
5764
5765 if (tg3_flag(tp, HW_AUTONEG))
5766 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5767 else
5768 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5769
5770 tp->napi[0].hw_status->status =
5771 (SD_STATUS_UPDATED |
5772 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5773
5774 for (i = 0; i < 100; i++) {
5775 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5776 MAC_STATUS_CFG_CHANGED));
5777 udelay(5);
5778 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5779 MAC_STATUS_CFG_CHANGED |
5780 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5781 break;
5782 }
5783
5784 mac_status = tr32(MAC_STATUS);
5785 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5786 current_link_up = false;
5787 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5788 tp->serdes_counter == 0) {
5789 tw32_f(MAC_MODE, (tp->mac_mode |
5790 MAC_MODE_SEND_CONFIGS));
5791 udelay(1);
5792 tw32_f(MAC_MODE, tp->mac_mode);
5793 }
5794 }
5795
5796 if (current_link_up) {
5797 tp->link_config.active_speed = SPEED_1000;
5798 tp->link_config.active_duplex = DUPLEX_FULL;
5799 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5800 LED_CTRL_LNKLED_OVERRIDE |
5801 LED_CTRL_1000MBPS_ON));
5802 } else {
5803 tp->link_config.active_speed = SPEED_UNKNOWN;
5804 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5805 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5806 LED_CTRL_LNKLED_OVERRIDE |
5807 LED_CTRL_TRAFFIC_OVERRIDE));
5808 }
5809
5810 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5811 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5812 if (orig_pause_cfg != now_pause_cfg ||
5813 orig_active_speed != tp->link_config.active_speed ||
5814 orig_active_duplex != tp->link_config.active_duplex)
5815 tg3_link_report(tp);
5816 }
5817
5818 return 0;
5819 }
5820
tg3_setup_fiber_mii_phy(struct tg3 * tp,bool force_reset)5821 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5822 {
5823 int err = 0;
5824 u32 bmsr, bmcr;
5825 u16 current_speed = SPEED_UNKNOWN;
5826 u8 current_duplex = DUPLEX_UNKNOWN;
5827 bool current_link_up = false;
5828 u32 local_adv, remote_adv, sgsr;
5829
5830 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5831 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5832 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5833 (sgsr & SERDES_TG3_SGMII_MODE)) {
5834
5835 if (force_reset)
5836 tg3_phy_reset(tp);
5837
5838 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5839
5840 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5841 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5842 } else {
5843 current_link_up = true;
5844 if (sgsr & SERDES_TG3_SPEED_1000) {
5845 current_speed = SPEED_1000;
5846 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5847 } else if (sgsr & SERDES_TG3_SPEED_100) {
5848 current_speed = SPEED_100;
5849 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5850 } else {
5851 current_speed = SPEED_10;
5852 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5853 }
5854
5855 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5856 current_duplex = DUPLEX_FULL;
5857 else
5858 current_duplex = DUPLEX_HALF;
5859 }
5860
5861 tw32_f(MAC_MODE, tp->mac_mode);
5862 udelay(40);
5863
5864 tg3_clear_mac_status(tp);
5865
5866 goto fiber_setup_done;
5867 }
5868
5869 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5870 tw32_f(MAC_MODE, tp->mac_mode);
5871 udelay(40);
5872
5873 tg3_clear_mac_status(tp);
5874
5875 if (force_reset)
5876 tg3_phy_reset(tp);
5877
5878 tp->link_config.rmt_adv = 0;
5879
5880 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5881 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5882 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5883 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5884 bmsr |= BMSR_LSTATUS;
5885 else
5886 bmsr &= ~BMSR_LSTATUS;
5887 }
5888
5889 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5890
5891 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5892 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5893 /* do nothing, just check for link up at the end */
5894 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5895 u32 adv, newadv;
5896
5897 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5898 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5899 ADVERTISE_1000XPAUSE |
5900 ADVERTISE_1000XPSE_ASYM |
5901 ADVERTISE_SLCT);
5902
5903 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5904 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5905
5906 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5907 tg3_writephy(tp, MII_ADVERTISE, newadv);
5908 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5909 tg3_writephy(tp, MII_BMCR, bmcr);
5910
5911 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5912 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5913 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5914
5915 return err;
5916 }
5917 } else {
5918 u32 new_bmcr;
5919
5920 bmcr &= ~BMCR_SPEED1000;
5921 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5922
5923 if (tp->link_config.duplex == DUPLEX_FULL)
5924 new_bmcr |= BMCR_FULLDPLX;
5925
5926 if (new_bmcr != bmcr) {
5927 /* BMCR_SPEED1000 is a reserved bit that needs
5928 * to be set on write.
5929 */
5930 new_bmcr |= BMCR_SPEED1000;
5931
5932 /* Force a linkdown */
5933 if (tp->link_up) {
5934 u32 adv;
5935
5936 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5937 adv &= ~(ADVERTISE_1000XFULL |
5938 ADVERTISE_1000XHALF |
5939 ADVERTISE_SLCT);
5940 tg3_writephy(tp, MII_ADVERTISE, adv);
5941 tg3_writephy(tp, MII_BMCR, bmcr |
5942 BMCR_ANRESTART |
5943 BMCR_ANENABLE);
5944 udelay(10);
5945 tg3_carrier_off(tp);
5946 }
5947 tg3_writephy(tp, MII_BMCR, new_bmcr);
5948 bmcr = new_bmcr;
5949 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5950 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5951 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5952 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5953 bmsr |= BMSR_LSTATUS;
5954 else
5955 bmsr &= ~BMSR_LSTATUS;
5956 }
5957 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5958 }
5959 }
5960
5961 if (bmsr & BMSR_LSTATUS) {
5962 current_speed = SPEED_1000;
5963 current_link_up = true;
5964 if (bmcr & BMCR_FULLDPLX)
5965 current_duplex = DUPLEX_FULL;
5966 else
5967 current_duplex = DUPLEX_HALF;
5968
5969 local_adv = 0;
5970 remote_adv = 0;
5971
5972 if (bmcr & BMCR_ANENABLE) {
5973 u32 common;
5974
5975 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5976 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5977 common = local_adv & remote_adv;
5978 if (common & (ADVERTISE_1000XHALF |
5979 ADVERTISE_1000XFULL)) {
5980 if (common & ADVERTISE_1000XFULL)
5981 current_duplex = DUPLEX_FULL;
5982 else
5983 current_duplex = DUPLEX_HALF;
5984
5985 tp->link_config.rmt_adv =
5986 mii_adv_to_ethtool_adv_x(remote_adv);
5987 } else if (!tg3_flag(tp, 5780_CLASS)) {
5988 /* Link is up via parallel detect */
5989 } else {
5990 current_link_up = false;
5991 }
5992 }
5993 }
5994
5995 fiber_setup_done:
5996 if (current_link_up && current_duplex == DUPLEX_FULL)
5997 tg3_setup_flow_control(tp, local_adv, remote_adv);
5998
5999 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6000 if (tp->link_config.active_duplex == DUPLEX_HALF)
6001 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
6002
6003 tw32_f(MAC_MODE, tp->mac_mode);
6004 udelay(40);
6005
6006 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
6007
6008 tp->link_config.active_speed = current_speed;
6009 tp->link_config.active_duplex = current_duplex;
6010
6011 tg3_test_and_report_link_chg(tp, current_link_up);
6012 return err;
6013 }
6014
tg3_serdes_parallel_detect(struct tg3 * tp)6015 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6016 {
6017 if (tp->serdes_counter) {
6018 /* Give autoneg time to complete. */
6019 tp->serdes_counter--;
6020 return;
6021 }
6022
6023 if (!tp->link_up &&
6024 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6025 u32 bmcr;
6026
6027 tg3_readphy(tp, MII_BMCR, &bmcr);
6028 if (bmcr & BMCR_ANENABLE) {
6029 u32 phy1, phy2;
6030
6031 /* Select shadow register 0x1f */
6032 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6033 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6034
6035 /* Select expansion interrupt status register */
6036 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6037 MII_TG3_DSP_EXP1_INT_STAT);
6038 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6039 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6040
6041 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6042 /* We have signal detect and not receiving
6043 * config code words, link is up by parallel
6044 * detection.
6045 */
6046
6047 bmcr &= ~BMCR_ANENABLE;
6048 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6049 tg3_writephy(tp, MII_BMCR, bmcr);
6050 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6051 }
6052 }
6053 } else if (tp->link_up &&
6054 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6055 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6056 u32 phy2;
6057
6058 /* Select expansion interrupt status register */
6059 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6060 MII_TG3_DSP_EXP1_INT_STAT);
6061 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6062 if (phy2 & 0x20) {
6063 u32 bmcr;
6064
6065 /* Config code words received, turn on autoneg. */
6066 tg3_readphy(tp, MII_BMCR, &bmcr);
6067 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6068
6069 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6070
6071 }
6072 }
6073 }
6074
tg3_setup_phy(struct tg3 * tp,bool force_reset)6075 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6076 {
6077 u32 val;
6078 int err;
6079
6080 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6081 err = tg3_setup_fiber_phy(tp, force_reset);
6082 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6083 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6084 else
6085 err = tg3_setup_copper_phy(tp, force_reset);
6086
6087 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6088 u32 scale;
6089
6090 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6091 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6092 scale = 65;
6093 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6094 scale = 6;
6095 else
6096 scale = 12;
6097
6098 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6099 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6100 tw32(GRC_MISC_CFG, val);
6101 }
6102
6103 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6104 (6 << TX_LENGTHS_IPG_SHIFT);
6105 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6106 tg3_asic_rev(tp) == ASIC_REV_5762)
6107 val |= tr32(MAC_TX_LENGTHS) &
6108 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6109 TX_LENGTHS_CNT_DWN_VAL_MSK);
6110
6111 if (tp->link_config.active_speed == SPEED_1000 &&
6112 tp->link_config.active_duplex == DUPLEX_HALF)
6113 tw32(MAC_TX_LENGTHS, val |
6114 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6115 else
6116 tw32(MAC_TX_LENGTHS, val |
6117 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6118
6119 if (!tg3_flag(tp, 5705_PLUS)) {
6120 if (tp->link_up) {
6121 tw32(HOSTCC_STAT_COAL_TICKS,
6122 tp->coal.stats_block_coalesce_usecs);
6123 } else {
6124 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6125 }
6126 }
6127
6128 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6129 val = tr32(PCIE_PWR_MGMT_THRESH);
6130 if (!tp->link_up)
6131 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6132 tp->pwrmgmt_thresh;
6133 else
6134 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6135 tw32(PCIE_PWR_MGMT_THRESH, val);
6136 }
6137
6138 return err;
6139 }
6140
6141 /* tp->lock must be held */
tg3_refclk_read(struct tg3 * tp)6142 static u64 tg3_refclk_read(struct tg3 *tp)
6143 {
6144 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6145 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6146 }
6147
6148 /* tp->lock must be held */
tg3_refclk_write(struct tg3 * tp,u64 newval)6149 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6150 {
6151 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6152
6153 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6154 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6155 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6156 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6157 }
6158
6159 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6160 static inline void tg3_full_unlock(struct tg3 *tp);
tg3_get_ts_info(struct net_device * dev,struct ethtool_ts_info * info)6161 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6162 {
6163 struct tg3 *tp = netdev_priv(dev);
6164
6165 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6166 SOF_TIMESTAMPING_RX_SOFTWARE |
6167 SOF_TIMESTAMPING_SOFTWARE;
6168
6169 if (tg3_flag(tp, PTP_CAPABLE)) {
6170 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6171 SOF_TIMESTAMPING_RX_HARDWARE |
6172 SOF_TIMESTAMPING_RAW_HARDWARE;
6173 }
6174
6175 if (tp->ptp_clock)
6176 info->phc_index = ptp_clock_index(tp->ptp_clock);
6177 else
6178 info->phc_index = -1;
6179
6180 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6181
6182 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6183 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6184 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6185 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6186 return 0;
6187 }
6188
tg3_ptp_adjfreq(struct ptp_clock_info * ptp,s32 ppb)6189 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6190 {
6191 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6192 bool neg_adj = false;
6193 u32 correction = 0;
6194
6195 if (ppb < 0) {
6196 neg_adj = true;
6197 ppb = -ppb;
6198 }
6199
6200 /* Frequency adjustment is performed using hardware with a 24 bit
6201 * accumulator and a programmable correction value. On each clk, the
6202 * correction value gets added to the accumulator and when it
6203 * overflows, the time counter is incremented/decremented.
6204 *
6205 * So conversion from ppb to correction value is
6206 * ppb * (1 << 24) / 1000000000
6207 */
6208 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6209 TG3_EAV_REF_CLK_CORRECT_MASK;
6210
6211 tg3_full_lock(tp, 0);
6212
6213 if (correction)
6214 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6215 TG3_EAV_REF_CLK_CORRECT_EN |
6216 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6217 else
6218 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6219
6220 tg3_full_unlock(tp);
6221
6222 return 0;
6223 }
6224
tg3_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)6225 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6226 {
6227 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6228
6229 tg3_full_lock(tp, 0);
6230 tp->ptp_adjust += delta;
6231 tg3_full_unlock(tp);
6232
6233 return 0;
6234 }
6235
tg3_ptp_gettime(struct ptp_clock_info * ptp,struct timespec64 * ts)6236 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
6237 {
6238 u64 ns;
6239 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6240
6241 tg3_full_lock(tp, 0);
6242 ns = tg3_refclk_read(tp);
6243 ns += tp->ptp_adjust;
6244 tg3_full_unlock(tp);
6245
6246 *ts = ns_to_timespec64(ns);
6247
6248 return 0;
6249 }
6250
tg3_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)6251 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6252 const struct timespec64 *ts)
6253 {
6254 u64 ns;
6255 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6256
6257 ns = timespec64_to_ns(ts);
6258
6259 tg3_full_lock(tp, 0);
6260 tg3_refclk_write(tp, ns);
6261 tp->ptp_adjust = 0;
6262 tg3_full_unlock(tp);
6263
6264 return 0;
6265 }
6266
tg3_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)6267 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6268 struct ptp_clock_request *rq, int on)
6269 {
6270 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6271 u32 clock_ctl;
6272 int rval = 0;
6273
6274 switch (rq->type) {
6275 case PTP_CLK_REQ_PEROUT:
6276 if (rq->perout.index != 0)
6277 return -EINVAL;
6278
6279 tg3_full_lock(tp, 0);
6280 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6281 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6282
6283 if (on) {
6284 u64 nsec;
6285
6286 nsec = rq->perout.start.sec * 1000000000ULL +
6287 rq->perout.start.nsec;
6288
6289 if (rq->perout.period.sec || rq->perout.period.nsec) {
6290 netdev_warn(tp->dev,
6291 "Device supports only a one-shot timesync output, period must be 0\n");
6292 rval = -EINVAL;
6293 goto err_out;
6294 }
6295
6296 if (nsec & (1ULL << 63)) {
6297 netdev_warn(tp->dev,
6298 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6299 rval = -EINVAL;
6300 goto err_out;
6301 }
6302
6303 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6304 tw32(TG3_EAV_WATCHDOG0_MSB,
6305 TG3_EAV_WATCHDOG0_EN |
6306 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6307
6308 tw32(TG3_EAV_REF_CLCK_CTL,
6309 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6310 } else {
6311 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6312 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6313 }
6314
6315 err_out:
6316 tg3_full_unlock(tp);
6317 return rval;
6318
6319 default:
6320 break;
6321 }
6322
6323 return -EOPNOTSUPP;
6324 }
6325
6326 static const struct ptp_clock_info tg3_ptp_caps = {
6327 .owner = THIS_MODULE,
6328 .name = "tg3 clock",
6329 .max_adj = 250000000,
6330 .n_alarm = 0,
6331 .n_ext_ts = 0,
6332 .n_per_out = 1,
6333 .n_pins = 0,
6334 .pps = 0,
6335 .adjfreq = tg3_ptp_adjfreq,
6336 .adjtime = tg3_ptp_adjtime,
6337 .gettime64 = tg3_ptp_gettime,
6338 .settime64 = tg3_ptp_settime,
6339 .enable = tg3_ptp_enable,
6340 };
6341
tg3_hwclock_to_timestamp(struct tg3 * tp,u64 hwclock,struct skb_shared_hwtstamps * timestamp)6342 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6343 struct skb_shared_hwtstamps *timestamp)
6344 {
6345 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6346 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6347 tp->ptp_adjust);
6348 }
6349
6350 /* tp->lock must be held */
tg3_ptp_init(struct tg3 * tp)6351 static void tg3_ptp_init(struct tg3 *tp)
6352 {
6353 if (!tg3_flag(tp, PTP_CAPABLE))
6354 return;
6355
6356 /* Initialize the hardware clock to the system time. */
6357 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6358 tp->ptp_adjust = 0;
6359 tp->ptp_info = tg3_ptp_caps;
6360 }
6361
6362 /* tp->lock must be held */
tg3_ptp_resume(struct tg3 * tp)6363 static void tg3_ptp_resume(struct tg3 *tp)
6364 {
6365 if (!tg3_flag(tp, PTP_CAPABLE))
6366 return;
6367
6368 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6369 tp->ptp_adjust = 0;
6370 }
6371
tg3_ptp_fini(struct tg3 * tp)6372 static void tg3_ptp_fini(struct tg3 *tp)
6373 {
6374 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6375 return;
6376
6377 ptp_clock_unregister(tp->ptp_clock);
6378 tp->ptp_clock = NULL;
6379 tp->ptp_adjust = 0;
6380 }
6381
tg3_irq_sync(struct tg3 * tp)6382 static inline int tg3_irq_sync(struct tg3 *tp)
6383 {
6384 return tp->irq_sync;
6385 }
6386
tg3_rd32_loop(struct tg3 * tp,u32 * dst,u32 off,u32 len)6387 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6388 {
6389 int i;
6390
6391 dst = (u32 *)((u8 *)dst + off);
6392 for (i = 0; i < len; i += sizeof(u32))
6393 *dst++ = tr32(off + i);
6394 }
6395
tg3_dump_legacy_regs(struct tg3 * tp,u32 * regs)6396 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6397 {
6398 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6399 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6400 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6401 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6402 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6403 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6404 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6405 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6406 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6407 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6408 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6409 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6410 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6411 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6412 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6413 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6414 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6415 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6416 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6417
6418 if (tg3_flag(tp, SUPPORT_MSIX))
6419 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6420
6421 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6422 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6423 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6424 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6425 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6426 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6427 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6428 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6429
6430 if (!tg3_flag(tp, 5705_PLUS)) {
6431 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6432 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6433 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6434 }
6435
6436 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6437 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6438 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6439 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6440 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6441
6442 if (tg3_flag(tp, NVRAM))
6443 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6444 }
6445
tg3_dump_state(struct tg3 * tp)6446 static void tg3_dump_state(struct tg3 *tp)
6447 {
6448 int i;
6449 u32 *regs;
6450
6451 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6452 if (!regs)
6453 return;
6454
6455 if (tg3_flag(tp, PCI_EXPRESS)) {
6456 /* Read up to but not including private PCI registers */
6457 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6458 regs[i / sizeof(u32)] = tr32(i);
6459 } else
6460 tg3_dump_legacy_regs(tp, regs);
6461
6462 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6463 if (!regs[i + 0] && !regs[i + 1] &&
6464 !regs[i + 2] && !regs[i + 3])
6465 continue;
6466
6467 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6468 i * 4,
6469 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6470 }
6471
6472 kfree(regs);
6473
6474 for (i = 0; i < tp->irq_cnt; i++) {
6475 struct tg3_napi *tnapi = &tp->napi[i];
6476
6477 /* SW status block */
6478 netdev_err(tp->dev,
6479 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6480 i,
6481 tnapi->hw_status->status,
6482 tnapi->hw_status->status_tag,
6483 tnapi->hw_status->rx_jumbo_consumer,
6484 tnapi->hw_status->rx_consumer,
6485 tnapi->hw_status->rx_mini_consumer,
6486 tnapi->hw_status->idx[0].rx_producer,
6487 tnapi->hw_status->idx[0].tx_consumer);
6488
6489 netdev_err(tp->dev,
6490 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6491 i,
6492 tnapi->last_tag, tnapi->last_irq_tag,
6493 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6494 tnapi->rx_rcb_ptr,
6495 tnapi->prodring.rx_std_prod_idx,
6496 tnapi->prodring.rx_std_cons_idx,
6497 tnapi->prodring.rx_jmb_prod_idx,
6498 tnapi->prodring.rx_jmb_cons_idx);
6499 }
6500 }
6501
6502 /* This is called whenever we suspect that the system chipset is re-
6503 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6504 * is bogus tx completions. We try to recover by setting the
6505 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6506 * in the workqueue.
6507 */
tg3_tx_recover(struct tg3 * tp)6508 static void tg3_tx_recover(struct tg3 *tp)
6509 {
6510 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6511 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6512
6513 netdev_warn(tp->dev,
6514 "The system may be re-ordering memory-mapped I/O "
6515 "cycles to the network device, attempting to recover. "
6516 "Please report the problem to the driver maintainer "
6517 "and include system chipset information.\n");
6518
6519 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6520 }
6521
tg3_tx_avail(struct tg3_napi * tnapi)6522 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6523 {
6524 /* Tell compiler to fetch tx indices from memory. */
6525 barrier();
6526 return tnapi->tx_pending -
6527 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6528 }
6529
6530 /* Tigon3 never reports partial packet sends. So we do not
6531 * need special logic to handle SKBs that have not had all
6532 * of their frags sent yet, like SunGEM does.
6533 */
tg3_tx(struct tg3_napi * tnapi)6534 static void tg3_tx(struct tg3_napi *tnapi)
6535 {
6536 struct tg3 *tp = tnapi->tp;
6537 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6538 u32 sw_idx = tnapi->tx_cons;
6539 struct netdev_queue *txq;
6540 int index = tnapi - tp->napi;
6541 unsigned int pkts_compl = 0, bytes_compl = 0;
6542
6543 if (tg3_flag(tp, ENABLE_TSS))
6544 index--;
6545
6546 txq = netdev_get_tx_queue(tp->dev, index);
6547
6548 while (sw_idx != hw_idx) {
6549 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6550 struct sk_buff *skb = ri->skb;
6551 int i, tx_bug = 0;
6552
6553 if (unlikely(skb == NULL)) {
6554 tg3_tx_recover(tp);
6555 return;
6556 }
6557
6558 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6559 struct skb_shared_hwtstamps timestamp;
6560 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6561 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6562
6563 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6564
6565 skb_tstamp_tx(skb, ×tamp);
6566 }
6567
6568 pci_unmap_single(tp->pdev,
6569 dma_unmap_addr(ri, mapping),
6570 skb_headlen(skb),
6571 PCI_DMA_TODEVICE);
6572
6573 ri->skb = NULL;
6574
6575 while (ri->fragmented) {
6576 ri->fragmented = false;
6577 sw_idx = NEXT_TX(sw_idx);
6578 ri = &tnapi->tx_buffers[sw_idx];
6579 }
6580
6581 sw_idx = NEXT_TX(sw_idx);
6582
6583 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6584 ri = &tnapi->tx_buffers[sw_idx];
6585 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6586 tx_bug = 1;
6587
6588 pci_unmap_page(tp->pdev,
6589 dma_unmap_addr(ri, mapping),
6590 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6591 PCI_DMA_TODEVICE);
6592
6593 while (ri->fragmented) {
6594 ri->fragmented = false;
6595 sw_idx = NEXT_TX(sw_idx);
6596 ri = &tnapi->tx_buffers[sw_idx];
6597 }
6598
6599 sw_idx = NEXT_TX(sw_idx);
6600 }
6601
6602 pkts_compl++;
6603 bytes_compl += skb->len;
6604
6605 dev_consume_skb_any(skb);
6606
6607 if (unlikely(tx_bug)) {
6608 tg3_tx_recover(tp);
6609 return;
6610 }
6611 }
6612
6613 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6614
6615 tnapi->tx_cons = sw_idx;
6616
6617 /* Need to make the tx_cons update visible to tg3_start_xmit()
6618 * before checking for netif_queue_stopped(). Without the
6619 * memory barrier, there is a small possibility that tg3_start_xmit()
6620 * will miss it and cause the queue to be stopped forever.
6621 */
6622 smp_mb();
6623
6624 if (unlikely(netif_tx_queue_stopped(txq) &&
6625 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6626 __netif_tx_lock(txq, smp_processor_id());
6627 if (netif_tx_queue_stopped(txq) &&
6628 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6629 netif_tx_wake_queue(txq);
6630 __netif_tx_unlock(txq);
6631 }
6632 }
6633
tg3_frag_free(bool is_frag,void * data)6634 static void tg3_frag_free(bool is_frag, void *data)
6635 {
6636 if (is_frag)
6637 skb_free_frag(data);
6638 else
6639 kfree(data);
6640 }
6641
tg3_rx_data_free(struct tg3 * tp,struct ring_info * ri,u32 map_sz)6642 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6643 {
6644 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6645 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6646
6647 if (!ri->data)
6648 return;
6649
6650 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6651 map_sz, PCI_DMA_FROMDEVICE);
6652 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6653 ri->data = NULL;
6654 }
6655
6656
6657 /* Returns size of skb allocated or < 0 on error.
6658 *
6659 * We only need to fill in the address because the other members
6660 * of the RX descriptor are invariant, see tg3_init_rings.
6661 *
6662 * Note the purposeful assymetry of cpu vs. chip accesses. For
6663 * posting buffers we only dirty the first cache line of the RX
6664 * descriptor (containing the address). Whereas for the RX status
6665 * buffers the cpu only reads the last cacheline of the RX descriptor
6666 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6667 */
tg3_alloc_rx_data(struct tg3 * tp,struct tg3_rx_prodring_set * tpr,u32 opaque_key,u32 dest_idx_unmasked,unsigned int * frag_size)6668 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6669 u32 opaque_key, u32 dest_idx_unmasked,
6670 unsigned int *frag_size)
6671 {
6672 struct tg3_rx_buffer_desc *desc;
6673 struct ring_info *map;
6674 u8 *data;
6675 dma_addr_t mapping;
6676 int skb_size, data_size, dest_idx;
6677
6678 switch (opaque_key) {
6679 case RXD_OPAQUE_RING_STD:
6680 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6681 desc = &tpr->rx_std[dest_idx];
6682 map = &tpr->rx_std_buffers[dest_idx];
6683 data_size = tp->rx_pkt_map_sz;
6684 break;
6685
6686 case RXD_OPAQUE_RING_JUMBO:
6687 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6688 desc = &tpr->rx_jmb[dest_idx].std;
6689 map = &tpr->rx_jmb_buffers[dest_idx];
6690 data_size = TG3_RX_JMB_MAP_SZ;
6691 break;
6692
6693 default:
6694 return -EINVAL;
6695 }
6696
6697 /* Do not overwrite any of the map or rp information
6698 * until we are sure we can commit to a new buffer.
6699 *
6700 * Callers depend upon this behavior and assume that
6701 * we leave everything unchanged if we fail.
6702 */
6703 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6704 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6705 if (skb_size <= PAGE_SIZE) {
6706 data = netdev_alloc_frag(skb_size);
6707 *frag_size = skb_size;
6708 } else {
6709 data = kmalloc(skb_size, GFP_ATOMIC);
6710 *frag_size = 0;
6711 }
6712 if (!data)
6713 return -ENOMEM;
6714
6715 mapping = pci_map_single(tp->pdev,
6716 data + TG3_RX_OFFSET(tp),
6717 data_size,
6718 PCI_DMA_FROMDEVICE);
6719 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6720 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6721 return -EIO;
6722 }
6723
6724 map->data = data;
6725 dma_unmap_addr_set(map, mapping, mapping);
6726
6727 desc->addr_hi = ((u64)mapping >> 32);
6728 desc->addr_lo = ((u64)mapping & 0xffffffff);
6729
6730 return data_size;
6731 }
6732
6733 /* We only need to move over in the address because the other
6734 * members of the RX descriptor are invariant. See notes above
6735 * tg3_alloc_rx_data for full details.
6736 */
tg3_recycle_rx(struct tg3_napi * tnapi,struct tg3_rx_prodring_set * dpr,u32 opaque_key,int src_idx,u32 dest_idx_unmasked)6737 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6738 struct tg3_rx_prodring_set *dpr,
6739 u32 opaque_key, int src_idx,
6740 u32 dest_idx_unmasked)
6741 {
6742 struct tg3 *tp = tnapi->tp;
6743 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6744 struct ring_info *src_map, *dest_map;
6745 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6746 int dest_idx;
6747
6748 switch (opaque_key) {
6749 case RXD_OPAQUE_RING_STD:
6750 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6751 dest_desc = &dpr->rx_std[dest_idx];
6752 dest_map = &dpr->rx_std_buffers[dest_idx];
6753 src_desc = &spr->rx_std[src_idx];
6754 src_map = &spr->rx_std_buffers[src_idx];
6755 break;
6756
6757 case RXD_OPAQUE_RING_JUMBO:
6758 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6759 dest_desc = &dpr->rx_jmb[dest_idx].std;
6760 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6761 src_desc = &spr->rx_jmb[src_idx].std;
6762 src_map = &spr->rx_jmb_buffers[src_idx];
6763 break;
6764
6765 default:
6766 return;
6767 }
6768
6769 dest_map->data = src_map->data;
6770 dma_unmap_addr_set(dest_map, mapping,
6771 dma_unmap_addr(src_map, mapping));
6772 dest_desc->addr_hi = src_desc->addr_hi;
6773 dest_desc->addr_lo = src_desc->addr_lo;
6774
6775 /* Ensure that the update to the skb happens after the physical
6776 * addresses have been transferred to the new BD location.
6777 */
6778 smp_wmb();
6779
6780 src_map->data = NULL;
6781 }
6782
6783 /* The RX ring scheme is composed of multiple rings which post fresh
6784 * buffers to the chip, and one special ring the chip uses to report
6785 * status back to the host.
6786 *
6787 * The special ring reports the status of received packets to the
6788 * host. The chip does not write into the original descriptor the
6789 * RX buffer was obtained from. The chip simply takes the original
6790 * descriptor as provided by the host, updates the status and length
6791 * field, then writes this into the next status ring entry.
6792 *
6793 * Each ring the host uses to post buffers to the chip is described
6794 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6795 * it is first placed into the on-chip ram. When the packet's length
6796 * is known, it walks down the TG3_BDINFO entries to select the ring.
6797 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6798 * which is within the range of the new packet's length is chosen.
6799 *
6800 * The "separate ring for rx status" scheme may sound queer, but it makes
6801 * sense from a cache coherency perspective. If only the host writes
6802 * to the buffer post rings, and only the chip writes to the rx status
6803 * rings, then cache lines never move beyond shared-modified state.
6804 * If both the host and chip were to write into the same ring, cache line
6805 * eviction could occur since both entities want it in an exclusive state.
6806 */
tg3_rx(struct tg3_napi * tnapi,int budget)6807 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6808 {
6809 struct tg3 *tp = tnapi->tp;
6810 u32 work_mask, rx_std_posted = 0;
6811 u32 std_prod_idx, jmb_prod_idx;
6812 u32 sw_idx = tnapi->rx_rcb_ptr;
6813 u16 hw_idx;
6814 int received;
6815 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6816
6817 hw_idx = *(tnapi->rx_rcb_prod_idx);
6818 /*
6819 * We need to order the read of hw_idx and the read of
6820 * the opaque cookie.
6821 */
6822 rmb();
6823 work_mask = 0;
6824 received = 0;
6825 std_prod_idx = tpr->rx_std_prod_idx;
6826 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6827 while (sw_idx != hw_idx && budget > 0) {
6828 struct ring_info *ri;
6829 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6830 unsigned int len;
6831 struct sk_buff *skb;
6832 dma_addr_t dma_addr;
6833 u32 opaque_key, desc_idx, *post_ptr;
6834 u8 *data;
6835 u64 tstamp = 0;
6836
6837 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6838 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6839 if (opaque_key == RXD_OPAQUE_RING_STD) {
6840 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6841 dma_addr = dma_unmap_addr(ri, mapping);
6842 data = ri->data;
6843 post_ptr = &std_prod_idx;
6844 rx_std_posted++;
6845 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6846 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6847 dma_addr = dma_unmap_addr(ri, mapping);
6848 data = ri->data;
6849 post_ptr = &jmb_prod_idx;
6850 } else
6851 goto next_pkt_nopost;
6852
6853 work_mask |= opaque_key;
6854
6855 if (desc->err_vlan & RXD_ERR_MASK) {
6856 drop_it:
6857 tg3_recycle_rx(tnapi, tpr, opaque_key,
6858 desc_idx, *post_ptr);
6859 drop_it_no_recycle:
6860 /* Other statistics kept track of by card. */
6861 tp->rx_dropped++;
6862 goto next_pkt;
6863 }
6864
6865 prefetch(data + TG3_RX_OFFSET(tp));
6866 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6867 ETH_FCS_LEN;
6868
6869 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6870 RXD_FLAG_PTPSTAT_PTPV1 ||
6871 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6872 RXD_FLAG_PTPSTAT_PTPV2) {
6873 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6874 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6875 }
6876
6877 if (len > TG3_RX_COPY_THRESH(tp)) {
6878 int skb_size;
6879 unsigned int frag_size;
6880
6881 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6882 *post_ptr, &frag_size);
6883 if (skb_size < 0)
6884 goto drop_it;
6885
6886 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6887 PCI_DMA_FROMDEVICE);
6888
6889 /* Ensure that the update to the data happens
6890 * after the usage of the old DMA mapping.
6891 */
6892 smp_wmb();
6893
6894 ri->data = NULL;
6895
6896 skb = build_skb(data, frag_size);
6897 if (!skb) {
6898 tg3_frag_free(frag_size != 0, data);
6899 goto drop_it_no_recycle;
6900 }
6901 skb_reserve(skb, TG3_RX_OFFSET(tp));
6902 } else {
6903 tg3_recycle_rx(tnapi, tpr, opaque_key,
6904 desc_idx, *post_ptr);
6905
6906 skb = netdev_alloc_skb(tp->dev,
6907 len + TG3_RAW_IP_ALIGN);
6908 if (skb == NULL)
6909 goto drop_it_no_recycle;
6910
6911 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6912 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6913 memcpy(skb->data,
6914 data + TG3_RX_OFFSET(tp),
6915 len);
6916 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6917 }
6918
6919 skb_put(skb, len);
6920 if (tstamp)
6921 tg3_hwclock_to_timestamp(tp, tstamp,
6922 skb_hwtstamps(skb));
6923
6924 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6925 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6926 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6927 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6928 skb->ip_summed = CHECKSUM_UNNECESSARY;
6929 else
6930 skb_checksum_none_assert(skb);
6931
6932 skb->protocol = eth_type_trans(skb, tp->dev);
6933
6934 if (len > (tp->dev->mtu + ETH_HLEN) &&
6935 skb->protocol != htons(ETH_P_8021Q) &&
6936 skb->protocol != htons(ETH_P_8021AD)) {
6937 dev_kfree_skb_any(skb);
6938 goto drop_it_no_recycle;
6939 }
6940
6941 if (desc->type_flags & RXD_FLAG_VLAN &&
6942 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6943 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6944 desc->err_vlan & RXD_VLAN_MASK);
6945
6946 napi_gro_receive(&tnapi->napi, skb);
6947
6948 received++;
6949 budget--;
6950
6951 next_pkt:
6952 (*post_ptr)++;
6953
6954 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6955 tpr->rx_std_prod_idx = std_prod_idx &
6956 tp->rx_std_ring_mask;
6957 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6958 tpr->rx_std_prod_idx);
6959 work_mask &= ~RXD_OPAQUE_RING_STD;
6960 rx_std_posted = 0;
6961 }
6962 next_pkt_nopost:
6963 sw_idx++;
6964 sw_idx &= tp->rx_ret_ring_mask;
6965
6966 /* Refresh hw_idx to see if there is new work */
6967 if (sw_idx == hw_idx) {
6968 hw_idx = *(tnapi->rx_rcb_prod_idx);
6969 rmb();
6970 }
6971 }
6972
6973 /* ACK the status ring. */
6974 tnapi->rx_rcb_ptr = sw_idx;
6975 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6976
6977 /* Refill RX ring(s). */
6978 if (!tg3_flag(tp, ENABLE_RSS)) {
6979 /* Sync BD data before updating mailbox */
6980 wmb();
6981
6982 if (work_mask & RXD_OPAQUE_RING_STD) {
6983 tpr->rx_std_prod_idx = std_prod_idx &
6984 tp->rx_std_ring_mask;
6985 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6986 tpr->rx_std_prod_idx);
6987 }
6988 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6989 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6990 tp->rx_jmb_ring_mask;
6991 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6992 tpr->rx_jmb_prod_idx);
6993 }
6994 mmiowb();
6995 } else if (work_mask) {
6996 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6997 * updated before the producer indices can be updated.
6998 */
6999 smp_wmb();
7000
7001 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
7002 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
7003
7004 if (tnapi != &tp->napi[1]) {
7005 tp->rx_refill = true;
7006 napi_schedule(&tp->napi[1].napi);
7007 }
7008 }
7009
7010 return received;
7011 }
7012
tg3_poll_link(struct tg3 * tp)7013 static void tg3_poll_link(struct tg3 *tp)
7014 {
7015 /* handle link change and other phy events */
7016 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7017 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7018
7019 if (sblk->status & SD_STATUS_LINK_CHG) {
7020 sblk->status = SD_STATUS_UPDATED |
7021 (sblk->status & ~SD_STATUS_LINK_CHG);
7022 spin_lock(&tp->lock);
7023 if (tg3_flag(tp, USE_PHYLIB)) {
7024 tw32_f(MAC_STATUS,
7025 (MAC_STATUS_SYNC_CHANGED |
7026 MAC_STATUS_CFG_CHANGED |
7027 MAC_STATUS_MI_COMPLETION |
7028 MAC_STATUS_LNKSTATE_CHANGED));
7029 udelay(40);
7030 } else
7031 tg3_setup_phy(tp, false);
7032 spin_unlock(&tp->lock);
7033 }
7034 }
7035 }
7036
tg3_rx_prodring_xfer(struct tg3 * tp,struct tg3_rx_prodring_set * dpr,struct tg3_rx_prodring_set * spr)7037 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7038 struct tg3_rx_prodring_set *dpr,
7039 struct tg3_rx_prodring_set *spr)
7040 {
7041 u32 si, di, cpycnt, src_prod_idx;
7042 int i, err = 0;
7043
7044 while (1) {
7045 src_prod_idx = spr->rx_std_prod_idx;
7046
7047 /* Make sure updates to the rx_std_buffers[] entries and the
7048 * standard producer index are seen in the correct order.
7049 */
7050 smp_rmb();
7051
7052 if (spr->rx_std_cons_idx == src_prod_idx)
7053 break;
7054
7055 if (spr->rx_std_cons_idx < src_prod_idx)
7056 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7057 else
7058 cpycnt = tp->rx_std_ring_mask + 1 -
7059 spr->rx_std_cons_idx;
7060
7061 cpycnt = min(cpycnt,
7062 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7063
7064 si = spr->rx_std_cons_idx;
7065 di = dpr->rx_std_prod_idx;
7066
7067 for (i = di; i < di + cpycnt; i++) {
7068 if (dpr->rx_std_buffers[i].data) {
7069 cpycnt = i - di;
7070 err = -ENOSPC;
7071 break;
7072 }
7073 }
7074
7075 if (!cpycnt)
7076 break;
7077
7078 /* Ensure that updates to the rx_std_buffers ring and the
7079 * shadowed hardware producer ring from tg3_recycle_skb() are
7080 * ordered correctly WRT the skb check above.
7081 */
7082 smp_rmb();
7083
7084 memcpy(&dpr->rx_std_buffers[di],
7085 &spr->rx_std_buffers[si],
7086 cpycnt * sizeof(struct ring_info));
7087
7088 for (i = 0; i < cpycnt; i++, di++, si++) {
7089 struct tg3_rx_buffer_desc *sbd, *dbd;
7090 sbd = &spr->rx_std[si];
7091 dbd = &dpr->rx_std[di];
7092 dbd->addr_hi = sbd->addr_hi;
7093 dbd->addr_lo = sbd->addr_lo;
7094 }
7095
7096 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7097 tp->rx_std_ring_mask;
7098 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7099 tp->rx_std_ring_mask;
7100 }
7101
7102 while (1) {
7103 src_prod_idx = spr->rx_jmb_prod_idx;
7104
7105 /* Make sure updates to the rx_jmb_buffers[] entries and
7106 * the jumbo producer index are seen in the correct order.
7107 */
7108 smp_rmb();
7109
7110 if (spr->rx_jmb_cons_idx == src_prod_idx)
7111 break;
7112
7113 if (spr->rx_jmb_cons_idx < src_prod_idx)
7114 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7115 else
7116 cpycnt = tp->rx_jmb_ring_mask + 1 -
7117 spr->rx_jmb_cons_idx;
7118
7119 cpycnt = min(cpycnt,
7120 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7121
7122 si = spr->rx_jmb_cons_idx;
7123 di = dpr->rx_jmb_prod_idx;
7124
7125 for (i = di; i < di + cpycnt; i++) {
7126 if (dpr->rx_jmb_buffers[i].data) {
7127 cpycnt = i - di;
7128 err = -ENOSPC;
7129 break;
7130 }
7131 }
7132
7133 if (!cpycnt)
7134 break;
7135
7136 /* Ensure that updates to the rx_jmb_buffers ring and the
7137 * shadowed hardware producer ring from tg3_recycle_skb() are
7138 * ordered correctly WRT the skb check above.
7139 */
7140 smp_rmb();
7141
7142 memcpy(&dpr->rx_jmb_buffers[di],
7143 &spr->rx_jmb_buffers[si],
7144 cpycnt * sizeof(struct ring_info));
7145
7146 for (i = 0; i < cpycnt; i++, di++, si++) {
7147 struct tg3_rx_buffer_desc *sbd, *dbd;
7148 sbd = &spr->rx_jmb[si].std;
7149 dbd = &dpr->rx_jmb[di].std;
7150 dbd->addr_hi = sbd->addr_hi;
7151 dbd->addr_lo = sbd->addr_lo;
7152 }
7153
7154 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7155 tp->rx_jmb_ring_mask;
7156 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7157 tp->rx_jmb_ring_mask;
7158 }
7159
7160 return err;
7161 }
7162
tg3_poll_work(struct tg3_napi * tnapi,int work_done,int budget)7163 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7164 {
7165 struct tg3 *tp = tnapi->tp;
7166
7167 /* run TX completion thread */
7168 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7169 tg3_tx(tnapi);
7170 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7171 return work_done;
7172 }
7173
7174 if (!tnapi->rx_rcb_prod_idx)
7175 return work_done;
7176
7177 /* run RX thread, within the bounds set by NAPI.
7178 * All RX "locking" is done by ensuring outside
7179 * code synchronizes with tg3->napi.poll()
7180 */
7181 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7182 work_done += tg3_rx(tnapi, budget - work_done);
7183
7184 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7185 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7186 int i, err = 0;
7187 u32 std_prod_idx = dpr->rx_std_prod_idx;
7188 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7189
7190 tp->rx_refill = false;
7191 for (i = 1; i <= tp->rxq_cnt; i++)
7192 err |= tg3_rx_prodring_xfer(tp, dpr,
7193 &tp->napi[i].prodring);
7194
7195 wmb();
7196
7197 if (std_prod_idx != dpr->rx_std_prod_idx)
7198 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7199 dpr->rx_std_prod_idx);
7200
7201 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7202 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7203 dpr->rx_jmb_prod_idx);
7204
7205 mmiowb();
7206
7207 if (err)
7208 tw32_f(HOSTCC_MODE, tp->coal_now);
7209 }
7210
7211 return work_done;
7212 }
7213
tg3_reset_task_schedule(struct tg3 * tp)7214 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7215 {
7216 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7217 schedule_work(&tp->reset_task);
7218 }
7219
tg3_reset_task_cancel(struct tg3 * tp)7220 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7221 {
7222 cancel_work_sync(&tp->reset_task);
7223 tg3_flag_clear(tp, RESET_TASK_PENDING);
7224 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7225 }
7226
tg3_poll_msix(struct napi_struct * napi,int budget)7227 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7228 {
7229 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7230 struct tg3 *tp = tnapi->tp;
7231 int work_done = 0;
7232 struct tg3_hw_status *sblk = tnapi->hw_status;
7233
7234 while (1) {
7235 work_done = tg3_poll_work(tnapi, work_done, budget);
7236
7237 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7238 goto tx_recovery;
7239
7240 if (unlikely(work_done >= budget))
7241 break;
7242
7243 /* tp->last_tag is used in tg3_int_reenable() below
7244 * to tell the hw how much work has been processed,
7245 * so we must read it before checking for more work.
7246 */
7247 tnapi->last_tag = sblk->status_tag;
7248 tnapi->last_irq_tag = tnapi->last_tag;
7249 rmb();
7250
7251 /* check for RX/TX work to do */
7252 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7253 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7254
7255 /* This test here is not race free, but will reduce
7256 * the number of interrupts by looping again.
7257 */
7258 if (tnapi == &tp->napi[1] && tp->rx_refill)
7259 continue;
7260
7261 napi_complete_done(napi, work_done);
7262 /* Reenable interrupts. */
7263 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7264
7265 /* This test here is synchronized by napi_schedule()
7266 * and napi_complete() to close the race condition.
7267 */
7268 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7269 tw32(HOSTCC_MODE, tp->coalesce_mode |
7270 HOSTCC_MODE_ENABLE |
7271 tnapi->coal_now);
7272 }
7273 mmiowb();
7274 break;
7275 }
7276 }
7277
7278 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7279 return work_done;
7280
7281 tx_recovery:
7282 /* work_done is guaranteed to be less than budget. */
7283 napi_complete(napi);
7284 tg3_reset_task_schedule(tp);
7285 return work_done;
7286 }
7287
tg3_process_error(struct tg3 * tp)7288 static void tg3_process_error(struct tg3 *tp)
7289 {
7290 u32 val;
7291 bool real_error = false;
7292
7293 if (tg3_flag(tp, ERROR_PROCESSED))
7294 return;
7295
7296 /* Check Flow Attention register */
7297 val = tr32(HOSTCC_FLOW_ATTN);
7298 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7299 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7300 real_error = true;
7301 }
7302
7303 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7304 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7305 real_error = true;
7306 }
7307
7308 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7309 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7310 real_error = true;
7311 }
7312
7313 if (!real_error)
7314 return;
7315
7316 tg3_dump_state(tp);
7317
7318 tg3_flag_set(tp, ERROR_PROCESSED);
7319 tg3_reset_task_schedule(tp);
7320 }
7321
tg3_poll(struct napi_struct * napi,int budget)7322 static int tg3_poll(struct napi_struct *napi, int budget)
7323 {
7324 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7325 struct tg3 *tp = tnapi->tp;
7326 int work_done = 0;
7327 struct tg3_hw_status *sblk = tnapi->hw_status;
7328
7329 while (1) {
7330 if (sblk->status & SD_STATUS_ERROR)
7331 tg3_process_error(tp);
7332
7333 tg3_poll_link(tp);
7334
7335 work_done = tg3_poll_work(tnapi, work_done, budget);
7336
7337 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7338 goto tx_recovery;
7339
7340 if (unlikely(work_done >= budget))
7341 break;
7342
7343 if (tg3_flag(tp, TAGGED_STATUS)) {
7344 /* tp->last_tag is used in tg3_int_reenable() below
7345 * to tell the hw how much work has been processed,
7346 * so we must read it before checking for more work.
7347 */
7348 tnapi->last_tag = sblk->status_tag;
7349 tnapi->last_irq_tag = tnapi->last_tag;
7350 rmb();
7351 } else
7352 sblk->status &= ~SD_STATUS_UPDATED;
7353
7354 if (likely(!tg3_has_work(tnapi))) {
7355 napi_complete_done(napi, work_done);
7356 tg3_int_reenable(tnapi);
7357 break;
7358 }
7359 }
7360
7361 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7362 return work_done;
7363
7364 tx_recovery:
7365 /* work_done is guaranteed to be less than budget. */
7366 napi_complete(napi);
7367 tg3_reset_task_schedule(tp);
7368 return work_done;
7369 }
7370
tg3_napi_disable(struct tg3 * tp)7371 static void tg3_napi_disable(struct tg3 *tp)
7372 {
7373 int i;
7374
7375 for (i = tp->irq_cnt - 1; i >= 0; i--)
7376 napi_disable(&tp->napi[i].napi);
7377 }
7378
tg3_napi_enable(struct tg3 * tp)7379 static void tg3_napi_enable(struct tg3 *tp)
7380 {
7381 int i;
7382
7383 for (i = 0; i < tp->irq_cnt; i++)
7384 napi_enable(&tp->napi[i].napi);
7385 }
7386
tg3_napi_init(struct tg3 * tp)7387 static void tg3_napi_init(struct tg3 *tp)
7388 {
7389 int i;
7390
7391 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7392 for (i = 1; i < tp->irq_cnt; i++)
7393 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7394 }
7395
tg3_napi_fini(struct tg3 * tp)7396 static void tg3_napi_fini(struct tg3 *tp)
7397 {
7398 int i;
7399
7400 for (i = 0; i < tp->irq_cnt; i++)
7401 netif_napi_del(&tp->napi[i].napi);
7402 }
7403
tg3_netif_stop(struct tg3 * tp)7404 static inline void tg3_netif_stop(struct tg3 *tp)
7405 {
7406 netif_trans_update(tp->dev); /* prevent tx timeout */
7407 tg3_napi_disable(tp);
7408 netif_carrier_off(tp->dev);
7409 netif_tx_disable(tp->dev);
7410 }
7411
7412 /* tp->lock must be held */
tg3_netif_start(struct tg3 * tp)7413 static inline void tg3_netif_start(struct tg3 *tp)
7414 {
7415 tg3_ptp_resume(tp);
7416
7417 /* NOTE: unconditional netif_tx_wake_all_queues is only
7418 * appropriate so long as all callers are assured to
7419 * have free tx slots (such as after tg3_init_hw)
7420 */
7421 netif_tx_wake_all_queues(tp->dev);
7422
7423 if (tp->link_up)
7424 netif_carrier_on(tp->dev);
7425
7426 tg3_napi_enable(tp);
7427 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7428 tg3_enable_ints(tp);
7429 }
7430
tg3_irq_quiesce(struct tg3 * tp)7431 static void tg3_irq_quiesce(struct tg3 *tp)
7432 __releases(tp->lock)
7433 __acquires(tp->lock)
7434 {
7435 int i;
7436
7437 BUG_ON(tp->irq_sync);
7438
7439 tp->irq_sync = 1;
7440 smp_mb();
7441
7442 spin_unlock_bh(&tp->lock);
7443
7444 for (i = 0; i < tp->irq_cnt; i++)
7445 synchronize_irq(tp->napi[i].irq_vec);
7446
7447 spin_lock_bh(&tp->lock);
7448 }
7449
7450 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7451 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7452 * with as well. Most of the time, this is not necessary except when
7453 * shutting down the device.
7454 */
tg3_full_lock(struct tg3 * tp,int irq_sync)7455 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7456 {
7457 spin_lock_bh(&tp->lock);
7458 if (irq_sync)
7459 tg3_irq_quiesce(tp);
7460 }
7461
tg3_full_unlock(struct tg3 * tp)7462 static inline void tg3_full_unlock(struct tg3 *tp)
7463 {
7464 spin_unlock_bh(&tp->lock);
7465 }
7466
7467 /* One-shot MSI handler - Chip automatically disables interrupt
7468 * after sending MSI so driver doesn't have to do it.
7469 */
tg3_msi_1shot(int irq,void * dev_id)7470 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7471 {
7472 struct tg3_napi *tnapi = dev_id;
7473 struct tg3 *tp = tnapi->tp;
7474
7475 prefetch(tnapi->hw_status);
7476 if (tnapi->rx_rcb)
7477 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7478
7479 if (likely(!tg3_irq_sync(tp)))
7480 napi_schedule(&tnapi->napi);
7481
7482 return IRQ_HANDLED;
7483 }
7484
7485 /* MSI ISR - No need to check for interrupt sharing and no need to
7486 * flush status block and interrupt mailbox. PCI ordering rules
7487 * guarantee that MSI will arrive after the status block.
7488 */
tg3_msi(int irq,void * dev_id)7489 static irqreturn_t tg3_msi(int irq, void *dev_id)
7490 {
7491 struct tg3_napi *tnapi = dev_id;
7492 struct tg3 *tp = tnapi->tp;
7493
7494 prefetch(tnapi->hw_status);
7495 if (tnapi->rx_rcb)
7496 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7497 /*
7498 * Writing any value to intr-mbox-0 clears PCI INTA# and
7499 * chip-internal interrupt pending events.
7500 * Writing non-zero to intr-mbox-0 additional tells the
7501 * NIC to stop sending us irqs, engaging "in-intr-handler"
7502 * event coalescing.
7503 */
7504 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7505 if (likely(!tg3_irq_sync(tp)))
7506 napi_schedule(&tnapi->napi);
7507
7508 return IRQ_RETVAL(1);
7509 }
7510
tg3_interrupt(int irq,void * dev_id)7511 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7512 {
7513 struct tg3_napi *tnapi = dev_id;
7514 struct tg3 *tp = tnapi->tp;
7515 struct tg3_hw_status *sblk = tnapi->hw_status;
7516 unsigned int handled = 1;
7517
7518 /* In INTx mode, it is possible for the interrupt to arrive at
7519 * the CPU before the status block posted prior to the interrupt.
7520 * Reading the PCI State register will confirm whether the
7521 * interrupt is ours and will flush the status block.
7522 */
7523 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7524 if (tg3_flag(tp, CHIP_RESETTING) ||
7525 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7526 handled = 0;
7527 goto out;
7528 }
7529 }
7530
7531 /*
7532 * Writing any value to intr-mbox-0 clears PCI INTA# and
7533 * chip-internal interrupt pending events.
7534 * Writing non-zero to intr-mbox-0 additional tells the
7535 * NIC to stop sending us irqs, engaging "in-intr-handler"
7536 * event coalescing.
7537 *
7538 * Flush the mailbox to de-assert the IRQ immediately to prevent
7539 * spurious interrupts. The flush impacts performance but
7540 * excessive spurious interrupts can be worse in some cases.
7541 */
7542 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7543 if (tg3_irq_sync(tp))
7544 goto out;
7545 sblk->status &= ~SD_STATUS_UPDATED;
7546 if (likely(tg3_has_work(tnapi))) {
7547 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7548 napi_schedule(&tnapi->napi);
7549 } else {
7550 /* No work, shared interrupt perhaps? re-enable
7551 * interrupts, and flush that PCI write
7552 */
7553 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7554 0x00000000);
7555 }
7556 out:
7557 return IRQ_RETVAL(handled);
7558 }
7559
tg3_interrupt_tagged(int irq,void * dev_id)7560 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7561 {
7562 struct tg3_napi *tnapi = dev_id;
7563 struct tg3 *tp = tnapi->tp;
7564 struct tg3_hw_status *sblk = tnapi->hw_status;
7565 unsigned int handled = 1;
7566
7567 /* In INTx mode, it is possible for the interrupt to arrive at
7568 * the CPU before the status block posted prior to the interrupt.
7569 * Reading the PCI State register will confirm whether the
7570 * interrupt is ours and will flush the status block.
7571 */
7572 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7573 if (tg3_flag(tp, CHIP_RESETTING) ||
7574 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7575 handled = 0;
7576 goto out;
7577 }
7578 }
7579
7580 /*
7581 * writing any value to intr-mbox-0 clears PCI INTA# and
7582 * chip-internal interrupt pending events.
7583 * writing non-zero to intr-mbox-0 additional tells the
7584 * NIC to stop sending us irqs, engaging "in-intr-handler"
7585 * event coalescing.
7586 *
7587 * Flush the mailbox to de-assert the IRQ immediately to prevent
7588 * spurious interrupts. The flush impacts performance but
7589 * excessive spurious interrupts can be worse in some cases.
7590 */
7591 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7592
7593 /*
7594 * In a shared interrupt configuration, sometimes other devices'
7595 * interrupts will scream. We record the current status tag here
7596 * so that the above check can report that the screaming interrupts
7597 * are unhandled. Eventually they will be silenced.
7598 */
7599 tnapi->last_irq_tag = sblk->status_tag;
7600
7601 if (tg3_irq_sync(tp))
7602 goto out;
7603
7604 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7605
7606 napi_schedule(&tnapi->napi);
7607
7608 out:
7609 return IRQ_RETVAL(handled);
7610 }
7611
7612 /* ISR for interrupt test */
tg3_test_isr(int irq,void * dev_id)7613 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7614 {
7615 struct tg3_napi *tnapi = dev_id;
7616 struct tg3 *tp = tnapi->tp;
7617 struct tg3_hw_status *sblk = tnapi->hw_status;
7618
7619 if ((sblk->status & SD_STATUS_UPDATED) ||
7620 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7621 tg3_disable_ints(tp);
7622 return IRQ_RETVAL(1);
7623 }
7624 return IRQ_RETVAL(0);
7625 }
7626
7627 #ifdef CONFIG_NET_POLL_CONTROLLER
tg3_poll_controller(struct net_device * dev)7628 static void tg3_poll_controller(struct net_device *dev)
7629 {
7630 int i;
7631 struct tg3 *tp = netdev_priv(dev);
7632
7633 if (tg3_irq_sync(tp))
7634 return;
7635
7636 for (i = 0; i < tp->irq_cnt; i++)
7637 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7638 }
7639 #endif
7640
tg3_tx_timeout(struct net_device * dev)7641 static void tg3_tx_timeout(struct net_device *dev)
7642 {
7643 struct tg3 *tp = netdev_priv(dev);
7644
7645 if (netif_msg_tx_err(tp)) {
7646 netdev_err(dev, "transmit timed out, resetting\n");
7647 tg3_dump_state(tp);
7648 }
7649
7650 tg3_reset_task_schedule(tp);
7651 }
7652
7653 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
tg3_4g_overflow_test(dma_addr_t mapping,int len)7654 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7655 {
7656 u32 base = (u32) mapping & 0xffffffff;
7657
7658 return base + len + 8 < base;
7659 }
7660
7661 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7662 * of any 4GB boundaries: 4G, 8G, etc
7663 */
tg3_4g_tso_overflow_test(struct tg3 * tp,dma_addr_t mapping,u32 len,u32 mss)7664 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7665 u32 len, u32 mss)
7666 {
7667 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7668 u32 base = (u32) mapping & 0xffffffff;
7669
7670 return ((base + len + (mss & 0x3fff)) < base);
7671 }
7672 return 0;
7673 }
7674
7675 /* Test for DMA addresses > 40-bit */
tg3_40bit_overflow_test(struct tg3 * tp,dma_addr_t mapping,int len)7676 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7677 int len)
7678 {
7679 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7680 if (tg3_flag(tp, 40BIT_DMA_BUG))
7681 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7682 return 0;
7683 #else
7684 return 0;
7685 #endif
7686 }
7687
tg3_tx_set_bd(struct tg3_tx_buffer_desc * txbd,dma_addr_t mapping,u32 len,u32 flags,u32 mss,u32 vlan)7688 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7689 dma_addr_t mapping, u32 len, u32 flags,
7690 u32 mss, u32 vlan)
7691 {
7692 txbd->addr_hi = ((u64) mapping >> 32);
7693 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7694 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7695 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7696 }
7697
tg3_tx_frag_set(struct tg3_napi * tnapi,u32 * entry,u32 * budget,dma_addr_t map,u32 len,u32 flags,u32 mss,u32 vlan)7698 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7699 dma_addr_t map, u32 len, u32 flags,
7700 u32 mss, u32 vlan)
7701 {
7702 struct tg3 *tp = tnapi->tp;
7703 bool hwbug = false;
7704
7705 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7706 hwbug = true;
7707
7708 if (tg3_4g_overflow_test(map, len))
7709 hwbug = true;
7710
7711 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7712 hwbug = true;
7713
7714 if (tg3_40bit_overflow_test(tp, map, len))
7715 hwbug = true;
7716
7717 if (tp->dma_limit) {
7718 u32 prvidx = *entry;
7719 u32 tmp_flag = flags & ~TXD_FLAG_END;
7720 while (len > tp->dma_limit && *budget) {
7721 u32 frag_len = tp->dma_limit;
7722 len -= tp->dma_limit;
7723
7724 /* Avoid the 8byte DMA problem */
7725 if (len <= 8) {
7726 len += tp->dma_limit / 2;
7727 frag_len = tp->dma_limit / 2;
7728 }
7729
7730 tnapi->tx_buffers[*entry].fragmented = true;
7731
7732 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7733 frag_len, tmp_flag, mss, vlan);
7734 *budget -= 1;
7735 prvidx = *entry;
7736 *entry = NEXT_TX(*entry);
7737
7738 map += frag_len;
7739 }
7740
7741 if (len) {
7742 if (*budget) {
7743 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7744 len, flags, mss, vlan);
7745 *budget -= 1;
7746 *entry = NEXT_TX(*entry);
7747 } else {
7748 hwbug = true;
7749 tnapi->tx_buffers[prvidx].fragmented = false;
7750 }
7751 }
7752 } else {
7753 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7754 len, flags, mss, vlan);
7755 *entry = NEXT_TX(*entry);
7756 }
7757
7758 return hwbug;
7759 }
7760
tg3_tx_skb_unmap(struct tg3_napi * tnapi,u32 entry,int last)7761 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7762 {
7763 int i;
7764 struct sk_buff *skb;
7765 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7766
7767 skb = txb->skb;
7768 txb->skb = NULL;
7769
7770 pci_unmap_single(tnapi->tp->pdev,
7771 dma_unmap_addr(txb, mapping),
7772 skb_headlen(skb),
7773 PCI_DMA_TODEVICE);
7774
7775 while (txb->fragmented) {
7776 txb->fragmented = false;
7777 entry = NEXT_TX(entry);
7778 txb = &tnapi->tx_buffers[entry];
7779 }
7780
7781 for (i = 0; i <= last; i++) {
7782 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7783
7784 entry = NEXT_TX(entry);
7785 txb = &tnapi->tx_buffers[entry];
7786
7787 pci_unmap_page(tnapi->tp->pdev,
7788 dma_unmap_addr(txb, mapping),
7789 skb_frag_size(frag), PCI_DMA_TODEVICE);
7790
7791 while (txb->fragmented) {
7792 txb->fragmented = false;
7793 entry = NEXT_TX(entry);
7794 txb = &tnapi->tx_buffers[entry];
7795 }
7796 }
7797 }
7798
7799 /* Workaround 4GB and 40-bit hardware DMA bugs. */
tigon3_dma_hwbug_workaround(struct tg3_napi * tnapi,struct sk_buff ** pskb,u32 * entry,u32 * budget,u32 base_flags,u32 mss,u32 vlan)7800 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7801 struct sk_buff **pskb,
7802 u32 *entry, u32 *budget,
7803 u32 base_flags, u32 mss, u32 vlan)
7804 {
7805 struct tg3 *tp = tnapi->tp;
7806 struct sk_buff *new_skb, *skb = *pskb;
7807 dma_addr_t new_addr = 0;
7808 int ret = 0;
7809
7810 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7811 new_skb = skb_copy(skb, GFP_ATOMIC);
7812 else {
7813 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7814
7815 new_skb = skb_copy_expand(skb,
7816 skb_headroom(skb) + more_headroom,
7817 skb_tailroom(skb), GFP_ATOMIC);
7818 }
7819
7820 if (!new_skb) {
7821 ret = -1;
7822 } else {
7823 /* New SKB is guaranteed to be linear. */
7824 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7825 PCI_DMA_TODEVICE);
7826 /* Make sure the mapping succeeded */
7827 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7828 dev_kfree_skb_any(new_skb);
7829 ret = -1;
7830 } else {
7831 u32 save_entry = *entry;
7832
7833 base_flags |= TXD_FLAG_END;
7834
7835 tnapi->tx_buffers[*entry].skb = new_skb;
7836 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7837 mapping, new_addr);
7838
7839 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7840 new_skb->len, base_flags,
7841 mss, vlan)) {
7842 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7843 dev_kfree_skb_any(new_skb);
7844 ret = -1;
7845 }
7846 }
7847 }
7848
7849 dev_consume_skb_any(skb);
7850 *pskb = new_skb;
7851 return ret;
7852 }
7853
tg3_tso_bug_gso_check(struct tg3_napi * tnapi,struct sk_buff * skb)7854 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7855 {
7856 /* Check if we will never have enough descriptors,
7857 * as gso_segs can be more than current ring size
7858 */
7859 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7860 }
7861
7862 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7863
7864 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7865 * indicated in tg3_tx_frag_set()
7866 */
tg3_tso_bug(struct tg3 * tp,struct tg3_napi * tnapi,struct netdev_queue * txq,struct sk_buff * skb)7867 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7868 struct netdev_queue *txq, struct sk_buff *skb)
7869 {
7870 struct sk_buff *segs, *nskb;
7871 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7872
7873 /* Estimate the number of fragments in the worst case */
7874 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7875 netif_tx_stop_queue(txq);
7876
7877 /* netif_tx_stop_queue() must be done before checking
7878 * checking tx index in tg3_tx_avail() below, because in
7879 * tg3_tx(), we update tx index before checking for
7880 * netif_tx_queue_stopped().
7881 */
7882 smp_mb();
7883 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7884 return NETDEV_TX_BUSY;
7885
7886 netif_tx_wake_queue(txq);
7887 }
7888
7889 segs = skb_gso_segment(skb, tp->dev->features &
7890 ~(NETIF_F_TSO | NETIF_F_TSO6));
7891 if (IS_ERR(segs) || !segs)
7892 goto tg3_tso_bug_end;
7893
7894 do {
7895 nskb = segs;
7896 segs = segs->next;
7897 nskb->next = NULL;
7898 tg3_start_xmit(nskb, tp->dev);
7899 } while (segs);
7900
7901 tg3_tso_bug_end:
7902 dev_consume_skb_any(skb);
7903
7904 return NETDEV_TX_OK;
7905 }
7906
7907 /* hard_start_xmit for all devices */
tg3_start_xmit(struct sk_buff * skb,struct net_device * dev)7908 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7909 {
7910 struct tg3 *tp = netdev_priv(dev);
7911 u32 len, entry, base_flags, mss, vlan = 0;
7912 u32 budget;
7913 int i = -1, would_hit_hwbug;
7914 dma_addr_t mapping;
7915 struct tg3_napi *tnapi;
7916 struct netdev_queue *txq;
7917 unsigned int last;
7918 struct iphdr *iph = NULL;
7919 struct tcphdr *tcph = NULL;
7920 __sum16 tcp_csum = 0, ip_csum = 0;
7921 __be16 ip_tot_len = 0;
7922
7923 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7924 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7925 if (tg3_flag(tp, ENABLE_TSS))
7926 tnapi++;
7927
7928 budget = tg3_tx_avail(tnapi);
7929
7930 /* We are running in BH disabled context with netif_tx_lock
7931 * and TX reclaim runs via tp->napi.poll inside of a software
7932 * interrupt. Furthermore, IRQ processing runs lockless so we have
7933 * no IRQ context deadlocks to worry about either. Rejoice!
7934 */
7935 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7936 if (!netif_tx_queue_stopped(txq)) {
7937 netif_tx_stop_queue(txq);
7938
7939 /* This is a hard error, log it. */
7940 netdev_err(dev,
7941 "BUG! Tx Ring full when queue awake!\n");
7942 }
7943 return NETDEV_TX_BUSY;
7944 }
7945
7946 entry = tnapi->tx_prod;
7947 base_flags = 0;
7948
7949 mss = skb_shinfo(skb)->gso_size;
7950 if (mss) {
7951 u32 tcp_opt_len, hdr_len;
7952
7953 if (skb_cow_head(skb, 0))
7954 goto drop;
7955
7956 iph = ip_hdr(skb);
7957 tcp_opt_len = tcp_optlen(skb);
7958
7959 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7960
7961 /* HW/FW can not correctly segment packets that have been
7962 * vlan encapsulated.
7963 */
7964 if (skb->protocol == htons(ETH_P_8021Q) ||
7965 skb->protocol == htons(ETH_P_8021AD)) {
7966 if (tg3_tso_bug_gso_check(tnapi, skb))
7967 return tg3_tso_bug(tp, tnapi, txq, skb);
7968 goto drop;
7969 }
7970
7971 if (!skb_is_gso_v6(skb)) {
7972 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7973 tg3_flag(tp, TSO_BUG)) {
7974 if (tg3_tso_bug_gso_check(tnapi, skb))
7975 return tg3_tso_bug(tp, tnapi, txq, skb);
7976 goto drop;
7977 }
7978 ip_csum = iph->check;
7979 ip_tot_len = iph->tot_len;
7980 iph->check = 0;
7981 iph->tot_len = htons(mss + hdr_len);
7982 }
7983
7984 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7985 TXD_FLAG_CPU_POST_DMA);
7986
7987 tcph = tcp_hdr(skb);
7988 tcp_csum = tcph->check;
7989
7990 if (tg3_flag(tp, HW_TSO_1) ||
7991 tg3_flag(tp, HW_TSO_2) ||
7992 tg3_flag(tp, HW_TSO_3)) {
7993 tcph->check = 0;
7994 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7995 } else {
7996 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7997 0, IPPROTO_TCP, 0);
7998 }
7999
8000 if (tg3_flag(tp, HW_TSO_3)) {
8001 mss |= (hdr_len & 0xc) << 12;
8002 if (hdr_len & 0x10)
8003 base_flags |= 0x00000010;
8004 base_flags |= (hdr_len & 0x3e0) << 5;
8005 } else if (tg3_flag(tp, HW_TSO_2))
8006 mss |= hdr_len << 9;
8007 else if (tg3_flag(tp, HW_TSO_1) ||
8008 tg3_asic_rev(tp) == ASIC_REV_5705) {
8009 if (tcp_opt_len || iph->ihl > 5) {
8010 int tsflags;
8011
8012 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8013 mss |= (tsflags << 11);
8014 }
8015 } else {
8016 if (tcp_opt_len || iph->ihl > 5) {
8017 int tsflags;
8018
8019 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8020 base_flags |= tsflags << 12;
8021 }
8022 }
8023 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8024 /* HW/FW can not correctly checksum packets that have been
8025 * vlan encapsulated.
8026 */
8027 if (skb->protocol == htons(ETH_P_8021Q) ||
8028 skb->protocol == htons(ETH_P_8021AD)) {
8029 if (skb_checksum_help(skb))
8030 goto drop;
8031 } else {
8032 base_flags |= TXD_FLAG_TCPUDP_CSUM;
8033 }
8034 }
8035
8036 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8037 !mss && skb->len > VLAN_ETH_FRAME_LEN)
8038 base_flags |= TXD_FLAG_JMB_PKT;
8039
8040 if (skb_vlan_tag_present(skb)) {
8041 base_flags |= TXD_FLAG_VLAN;
8042 vlan = skb_vlan_tag_get(skb);
8043 }
8044
8045 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8046 tg3_flag(tp, TX_TSTAMP_EN)) {
8047 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8048 base_flags |= TXD_FLAG_HWTSTAMP;
8049 }
8050
8051 len = skb_headlen(skb);
8052
8053 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8054 if (pci_dma_mapping_error(tp->pdev, mapping))
8055 goto drop;
8056
8057
8058 tnapi->tx_buffers[entry].skb = skb;
8059 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8060
8061 would_hit_hwbug = 0;
8062
8063 if (tg3_flag(tp, 5701_DMA_BUG))
8064 would_hit_hwbug = 1;
8065
8066 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8067 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8068 mss, vlan)) {
8069 would_hit_hwbug = 1;
8070 } else if (skb_shinfo(skb)->nr_frags > 0) {
8071 u32 tmp_mss = mss;
8072
8073 if (!tg3_flag(tp, HW_TSO_1) &&
8074 !tg3_flag(tp, HW_TSO_2) &&
8075 !tg3_flag(tp, HW_TSO_3))
8076 tmp_mss = 0;
8077
8078 /* Now loop through additional data
8079 * fragments, and queue them.
8080 */
8081 last = skb_shinfo(skb)->nr_frags - 1;
8082 for (i = 0; i <= last; i++) {
8083 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8084
8085 len = skb_frag_size(frag);
8086 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8087 len, DMA_TO_DEVICE);
8088
8089 tnapi->tx_buffers[entry].skb = NULL;
8090 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8091 mapping);
8092 if (dma_mapping_error(&tp->pdev->dev, mapping))
8093 goto dma_error;
8094
8095 if (!budget ||
8096 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8097 len, base_flags |
8098 ((i == last) ? TXD_FLAG_END : 0),
8099 tmp_mss, vlan)) {
8100 would_hit_hwbug = 1;
8101 break;
8102 }
8103 }
8104 }
8105
8106 if (would_hit_hwbug) {
8107 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8108
8109 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8110 /* If it's a TSO packet, do GSO instead of
8111 * allocating and copying to a large linear SKB
8112 */
8113 if (ip_tot_len) {
8114 iph->check = ip_csum;
8115 iph->tot_len = ip_tot_len;
8116 }
8117 tcph->check = tcp_csum;
8118 return tg3_tso_bug(tp, tnapi, txq, skb);
8119 }
8120
8121 /* If the workaround fails due to memory/mapping
8122 * failure, silently drop this packet.
8123 */
8124 entry = tnapi->tx_prod;
8125 budget = tg3_tx_avail(tnapi);
8126 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8127 base_flags, mss, vlan))
8128 goto drop_nofree;
8129 }
8130
8131 skb_tx_timestamp(skb);
8132 netdev_tx_sent_queue(txq, skb->len);
8133
8134 /* Sync BD data before updating mailbox */
8135 wmb();
8136
8137 tnapi->tx_prod = entry;
8138 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8139 netif_tx_stop_queue(txq);
8140
8141 /* netif_tx_stop_queue() must be done before checking
8142 * checking tx index in tg3_tx_avail() below, because in
8143 * tg3_tx(), we update tx index before checking for
8144 * netif_tx_queue_stopped().
8145 */
8146 smp_mb();
8147 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8148 netif_tx_wake_queue(txq);
8149 }
8150
8151 if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8152 /* Packets are ready, update Tx producer idx on card. */
8153 tw32_tx_mbox(tnapi->prodmbox, entry);
8154 mmiowb();
8155 }
8156
8157 return NETDEV_TX_OK;
8158
8159 dma_error:
8160 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8161 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8162 drop:
8163 dev_kfree_skb_any(skb);
8164 drop_nofree:
8165 tp->tx_dropped++;
8166 return NETDEV_TX_OK;
8167 }
8168
tg3_mac_loopback(struct tg3 * tp,bool enable)8169 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8170 {
8171 if (enable) {
8172 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8173 MAC_MODE_PORT_MODE_MASK);
8174
8175 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8176
8177 if (!tg3_flag(tp, 5705_PLUS))
8178 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8179
8180 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8181 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8182 else
8183 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8184 } else {
8185 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8186
8187 if (tg3_flag(tp, 5705_PLUS) ||
8188 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8189 tg3_asic_rev(tp) == ASIC_REV_5700)
8190 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8191 }
8192
8193 tw32(MAC_MODE, tp->mac_mode);
8194 udelay(40);
8195 }
8196
tg3_phy_lpbk_set(struct tg3 * tp,u32 speed,bool extlpbk)8197 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8198 {
8199 u32 val, bmcr, mac_mode, ptest = 0;
8200
8201 tg3_phy_toggle_apd(tp, false);
8202 tg3_phy_toggle_automdix(tp, false);
8203
8204 if (extlpbk && tg3_phy_set_extloopbk(tp))
8205 return -EIO;
8206
8207 bmcr = BMCR_FULLDPLX;
8208 switch (speed) {
8209 case SPEED_10:
8210 break;
8211 case SPEED_100:
8212 bmcr |= BMCR_SPEED100;
8213 break;
8214 case SPEED_1000:
8215 default:
8216 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8217 speed = SPEED_100;
8218 bmcr |= BMCR_SPEED100;
8219 } else {
8220 speed = SPEED_1000;
8221 bmcr |= BMCR_SPEED1000;
8222 }
8223 }
8224
8225 if (extlpbk) {
8226 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8227 tg3_readphy(tp, MII_CTRL1000, &val);
8228 val |= CTL1000_AS_MASTER |
8229 CTL1000_ENABLE_MASTER;
8230 tg3_writephy(tp, MII_CTRL1000, val);
8231 } else {
8232 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8233 MII_TG3_FET_PTEST_TRIM_2;
8234 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8235 }
8236 } else
8237 bmcr |= BMCR_LOOPBACK;
8238
8239 tg3_writephy(tp, MII_BMCR, bmcr);
8240
8241 /* The write needs to be flushed for the FETs */
8242 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8243 tg3_readphy(tp, MII_BMCR, &bmcr);
8244
8245 udelay(40);
8246
8247 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8248 tg3_asic_rev(tp) == ASIC_REV_5785) {
8249 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8250 MII_TG3_FET_PTEST_FRC_TX_LINK |
8251 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8252
8253 /* The write needs to be flushed for the AC131 */
8254 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8255 }
8256
8257 /* Reset to prevent losing 1st rx packet intermittently */
8258 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8259 tg3_flag(tp, 5780_CLASS)) {
8260 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8261 udelay(10);
8262 tw32_f(MAC_RX_MODE, tp->rx_mode);
8263 }
8264
8265 mac_mode = tp->mac_mode &
8266 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8267 if (speed == SPEED_1000)
8268 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8269 else
8270 mac_mode |= MAC_MODE_PORT_MODE_MII;
8271
8272 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8273 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8274
8275 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8276 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8277 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8278 mac_mode |= MAC_MODE_LINK_POLARITY;
8279
8280 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8281 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8282 }
8283
8284 tw32(MAC_MODE, mac_mode);
8285 udelay(40);
8286
8287 return 0;
8288 }
8289
tg3_set_loopback(struct net_device * dev,netdev_features_t features)8290 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8291 {
8292 struct tg3 *tp = netdev_priv(dev);
8293
8294 if (features & NETIF_F_LOOPBACK) {
8295 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8296 return;
8297
8298 spin_lock_bh(&tp->lock);
8299 tg3_mac_loopback(tp, true);
8300 netif_carrier_on(tp->dev);
8301 spin_unlock_bh(&tp->lock);
8302 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8303 } else {
8304 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8305 return;
8306
8307 spin_lock_bh(&tp->lock);
8308 tg3_mac_loopback(tp, false);
8309 /* Force link status check */
8310 tg3_setup_phy(tp, true);
8311 spin_unlock_bh(&tp->lock);
8312 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8313 }
8314 }
8315
tg3_fix_features(struct net_device * dev,netdev_features_t features)8316 static netdev_features_t tg3_fix_features(struct net_device *dev,
8317 netdev_features_t features)
8318 {
8319 struct tg3 *tp = netdev_priv(dev);
8320
8321 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8322 features &= ~NETIF_F_ALL_TSO;
8323
8324 return features;
8325 }
8326
tg3_set_features(struct net_device * dev,netdev_features_t features)8327 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8328 {
8329 netdev_features_t changed = dev->features ^ features;
8330
8331 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8332 tg3_set_loopback(dev, features);
8333
8334 return 0;
8335 }
8336
tg3_rx_prodring_free(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8337 static void tg3_rx_prodring_free(struct tg3 *tp,
8338 struct tg3_rx_prodring_set *tpr)
8339 {
8340 int i;
8341
8342 if (tpr != &tp->napi[0].prodring) {
8343 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8344 i = (i + 1) & tp->rx_std_ring_mask)
8345 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8346 tp->rx_pkt_map_sz);
8347
8348 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8349 for (i = tpr->rx_jmb_cons_idx;
8350 i != tpr->rx_jmb_prod_idx;
8351 i = (i + 1) & tp->rx_jmb_ring_mask) {
8352 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8353 TG3_RX_JMB_MAP_SZ);
8354 }
8355 }
8356
8357 return;
8358 }
8359
8360 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8361 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8362 tp->rx_pkt_map_sz);
8363
8364 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8365 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8366 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8367 TG3_RX_JMB_MAP_SZ);
8368 }
8369 }
8370
8371 /* Initialize rx rings for packet processing.
8372 *
8373 * The chip has been shut down and the driver detached from
8374 * the networking, so no interrupts or new tx packets will
8375 * end up in the driver. tp->{tx,}lock are held and thus
8376 * we may not sleep.
8377 */
tg3_rx_prodring_alloc(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8378 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8379 struct tg3_rx_prodring_set *tpr)
8380 {
8381 u32 i, rx_pkt_dma_sz;
8382
8383 tpr->rx_std_cons_idx = 0;
8384 tpr->rx_std_prod_idx = 0;
8385 tpr->rx_jmb_cons_idx = 0;
8386 tpr->rx_jmb_prod_idx = 0;
8387
8388 if (tpr != &tp->napi[0].prodring) {
8389 memset(&tpr->rx_std_buffers[0], 0,
8390 TG3_RX_STD_BUFF_RING_SIZE(tp));
8391 if (tpr->rx_jmb_buffers)
8392 memset(&tpr->rx_jmb_buffers[0], 0,
8393 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8394 goto done;
8395 }
8396
8397 /* Zero out all descriptors. */
8398 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8399
8400 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8401 if (tg3_flag(tp, 5780_CLASS) &&
8402 tp->dev->mtu > ETH_DATA_LEN)
8403 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8404 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8405
8406 /* Initialize invariants of the rings, we only set this
8407 * stuff once. This works because the card does not
8408 * write into the rx buffer posting rings.
8409 */
8410 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8411 struct tg3_rx_buffer_desc *rxd;
8412
8413 rxd = &tpr->rx_std[i];
8414 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8415 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8416 rxd->opaque = (RXD_OPAQUE_RING_STD |
8417 (i << RXD_OPAQUE_INDEX_SHIFT));
8418 }
8419
8420 /* Now allocate fresh SKBs for each rx ring. */
8421 for (i = 0; i < tp->rx_pending; i++) {
8422 unsigned int frag_size;
8423
8424 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8425 &frag_size) < 0) {
8426 netdev_warn(tp->dev,
8427 "Using a smaller RX standard ring. Only "
8428 "%d out of %d buffers were allocated "
8429 "successfully\n", i, tp->rx_pending);
8430 if (i == 0)
8431 goto initfail;
8432 tp->rx_pending = i;
8433 break;
8434 }
8435 }
8436
8437 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8438 goto done;
8439
8440 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8441
8442 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8443 goto done;
8444
8445 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8446 struct tg3_rx_buffer_desc *rxd;
8447
8448 rxd = &tpr->rx_jmb[i].std;
8449 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8450 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8451 RXD_FLAG_JUMBO;
8452 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8453 (i << RXD_OPAQUE_INDEX_SHIFT));
8454 }
8455
8456 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8457 unsigned int frag_size;
8458
8459 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8460 &frag_size) < 0) {
8461 netdev_warn(tp->dev,
8462 "Using a smaller RX jumbo ring. Only %d "
8463 "out of %d buffers were allocated "
8464 "successfully\n", i, tp->rx_jumbo_pending);
8465 if (i == 0)
8466 goto initfail;
8467 tp->rx_jumbo_pending = i;
8468 break;
8469 }
8470 }
8471
8472 done:
8473 return 0;
8474
8475 initfail:
8476 tg3_rx_prodring_free(tp, tpr);
8477 return -ENOMEM;
8478 }
8479
tg3_rx_prodring_fini(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8480 static void tg3_rx_prodring_fini(struct tg3 *tp,
8481 struct tg3_rx_prodring_set *tpr)
8482 {
8483 kfree(tpr->rx_std_buffers);
8484 tpr->rx_std_buffers = NULL;
8485 kfree(tpr->rx_jmb_buffers);
8486 tpr->rx_jmb_buffers = NULL;
8487 if (tpr->rx_std) {
8488 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8489 tpr->rx_std, tpr->rx_std_mapping);
8490 tpr->rx_std = NULL;
8491 }
8492 if (tpr->rx_jmb) {
8493 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8494 tpr->rx_jmb, tpr->rx_jmb_mapping);
8495 tpr->rx_jmb = NULL;
8496 }
8497 }
8498
tg3_rx_prodring_init(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8499 static int tg3_rx_prodring_init(struct tg3 *tp,
8500 struct tg3_rx_prodring_set *tpr)
8501 {
8502 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8503 GFP_KERNEL);
8504 if (!tpr->rx_std_buffers)
8505 return -ENOMEM;
8506
8507 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8508 TG3_RX_STD_RING_BYTES(tp),
8509 &tpr->rx_std_mapping,
8510 GFP_KERNEL);
8511 if (!tpr->rx_std)
8512 goto err_out;
8513
8514 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8515 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8516 GFP_KERNEL);
8517 if (!tpr->rx_jmb_buffers)
8518 goto err_out;
8519
8520 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8521 TG3_RX_JMB_RING_BYTES(tp),
8522 &tpr->rx_jmb_mapping,
8523 GFP_KERNEL);
8524 if (!tpr->rx_jmb)
8525 goto err_out;
8526 }
8527
8528 return 0;
8529
8530 err_out:
8531 tg3_rx_prodring_fini(tp, tpr);
8532 return -ENOMEM;
8533 }
8534
8535 /* Free up pending packets in all rx/tx rings.
8536 *
8537 * The chip has been shut down and the driver detached from
8538 * the networking, so no interrupts or new tx packets will
8539 * end up in the driver. tp->{tx,}lock is not held and we are not
8540 * in an interrupt context and thus may sleep.
8541 */
tg3_free_rings(struct tg3 * tp)8542 static void tg3_free_rings(struct tg3 *tp)
8543 {
8544 int i, j;
8545
8546 for (j = 0; j < tp->irq_cnt; j++) {
8547 struct tg3_napi *tnapi = &tp->napi[j];
8548
8549 tg3_rx_prodring_free(tp, &tnapi->prodring);
8550
8551 if (!tnapi->tx_buffers)
8552 continue;
8553
8554 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8555 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8556
8557 if (!skb)
8558 continue;
8559
8560 tg3_tx_skb_unmap(tnapi, i,
8561 skb_shinfo(skb)->nr_frags - 1);
8562
8563 dev_consume_skb_any(skb);
8564 }
8565 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8566 }
8567 }
8568
8569 /* Initialize tx/rx rings for packet processing.
8570 *
8571 * The chip has been shut down and the driver detached from
8572 * the networking, so no interrupts or new tx packets will
8573 * end up in the driver. tp->{tx,}lock are held and thus
8574 * we may not sleep.
8575 */
tg3_init_rings(struct tg3 * tp)8576 static int tg3_init_rings(struct tg3 *tp)
8577 {
8578 int i;
8579
8580 /* Free up all the SKBs. */
8581 tg3_free_rings(tp);
8582
8583 for (i = 0; i < tp->irq_cnt; i++) {
8584 struct tg3_napi *tnapi = &tp->napi[i];
8585
8586 tnapi->last_tag = 0;
8587 tnapi->last_irq_tag = 0;
8588 tnapi->hw_status->status = 0;
8589 tnapi->hw_status->status_tag = 0;
8590 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8591
8592 tnapi->tx_prod = 0;
8593 tnapi->tx_cons = 0;
8594 if (tnapi->tx_ring)
8595 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8596
8597 tnapi->rx_rcb_ptr = 0;
8598 if (tnapi->rx_rcb)
8599 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8600
8601 if (tnapi->prodring.rx_std &&
8602 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8603 tg3_free_rings(tp);
8604 return -ENOMEM;
8605 }
8606 }
8607
8608 return 0;
8609 }
8610
tg3_mem_tx_release(struct tg3 * tp)8611 static void tg3_mem_tx_release(struct tg3 *tp)
8612 {
8613 int i;
8614
8615 for (i = 0; i < tp->irq_max; i++) {
8616 struct tg3_napi *tnapi = &tp->napi[i];
8617
8618 if (tnapi->tx_ring) {
8619 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8620 tnapi->tx_ring, tnapi->tx_desc_mapping);
8621 tnapi->tx_ring = NULL;
8622 }
8623
8624 kfree(tnapi->tx_buffers);
8625 tnapi->tx_buffers = NULL;
8626 }
8627 }
8628
tg3_mem_tx_acquire(struct tg3 * tp)8629 static int tg3_mem_tx_acquire(struct tg3 *tp)
8630 {
8631 int i;
8632 struct tg3_napi *tnapi = &tp->napi[0];
8633
8634 /* If multivector TSS is enabled, vector 0 does not handle
8635 * tx interrupts. Don't allocate any resources for it.
8636 */
8637 if (tg3_flag(tp, ENABLE_TSS))
8638 tnapi++;
8639
8640 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8641 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8642 sizeof(struct tg3_tx_ring_info),
8643 GFP_KERNEL);
8644 if (!tnapi->tx_buffers)
8645 goto err_out;
8646
8647 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8648 TG3_TX_RING_BYTES,
8649 &tnapi->tx_desc_mapping,
8650 GFP_KERNEL);
8651 if (!tnapi->tx_ring)
8652 goto err_out;
8653 }
8654
8655 return 0;
8656
8657 err_out:
8658 tg3_mem_tx_release(tp);
8659 return -ENOMEM;
8660 }
8661
tg3_mem_rx_release(struct tg3 * tp)8662 static void tg3_mem_rx_release(struct tg3 *tp)
8663 {
8664 int i;
8665
8666 for (i = 0; i < tp->irq_max; i++) {
8667 struct tg3_napi *tnapi = &tp->napi[i];
8668
8669 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8670
8671 if (!tnapi->rx_rcb)
8672 continue;
8673
8674 dma_free_coherent(&tp->pdev->dev,
8675 TG3_RX_RCB_RING_BYTES(tp),
8676 tnapi->rx_rcb,
8677 tnapi->rx_rcb_mapping);
8678 tnapi->rx_rcb = NULL;
8679 }
8680 }
8681
tg3_mem_rx_acquire(struct tg3 * tp)8682 static int tg3_mem_rx_acquire(struct tg3 *tp)
8683 {
8684 unsigned int i, limit;
8685
8686 limit = tp->rxq_cnt;
8687
8688 /* If RSS is enabled, we need a (dummy) producer ring
8689 * set on vector zero. This is the true hw prodring.
8690 */
8691 if (tg3_flag(tp, ENABLE_RSS))
8692 limit++;
8693
8694 for (i = 0; i < limit; i++) {
8695 struct tg3_napi *tnapi = &tp->napi[i];
8696
8697 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8698 goto err_out;
8699
8700 /* If multivector RSS is enabled, vector 0
8701 * does not handle rx or tx interrupts.
8702 * Don't allocate any resources for it.
8703 */
8704 if (!i && tg3_flag(tp, ENABLE_RSS))
8705 continue;
8706
8707 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8708 TG3_RX_RCB_RING_BYTES(tp),
8709 &tnapi->rx_rcb_mapping,
8710 GFP_KERNEL);
8711 if (!tnapi->rx_rcb)
8712 goto err_out;
8713 }
8714
8715 return 0;
8716
8717 err_out:
8718 tg3_mem_rx_release(tp);
8719 return -ENOMEM;
8720 }
8721
8722 /*
8723 * Must not be invoked with interrupt sources disabled and
8724 * the hardware shutdown down.
8725 */
tg3_free_consistent(struct tg3 * tp)8726 static void tg3_free_consistent(struct tg3 *tp)
8727 {
8728 int i;
8729
8730 for (i = 0; i < tp->irq_cnt; i++) {
8731 struct tg3_napi *tnapi = &tp->napi[i];
8732
8733 if (tnapi->hw_status) {
8734 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8735 tnapi->hw_status,
8736 tnapi->status_mapping);
8737 tnapi->hw_status = NULL;
8738 }
8739 }
8740
8741 tg3_mem_rx_release(tp);
8742 tg3_mem_tx_release(tp);
8743
8744 /* tp->hw_stats can be referenced safely:
8745 * 1. under rtnl_lock
8746 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8747 */
8748 if (tp->hw_stats) {
8749 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8750 tp->hw_stats, tp->stats_mapping);
8751 tp->hw_stats = NULL;
8752 }
8753 }
8754
8755 /*
8756 * Must not be invoked with interrupt sources disabled and
8757 * the hardware shutdown down. Can sleep.
8758 */
tg3_alloc_consistent(struct tg3 * tp)8759 static int tg3_alloc_consistent(struct tg3 *tp)
8760 {
8761 int i;
8762
8763 tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8764 sizeof(struct tg3_hw_stats),
8765 &tp->stats_mapping, GFP_KERNEL);
8766 if (!tp->hw_stats)
8767 goto err_out;
8768
8769 for (i = 0; i < tp->irq_cnt; i++) {
8770 struct tg3_napi *tnapi = &tp->napi[i];
8771 struct tg3_hw_status *sblk;
8772
8773 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8774 TG3_HW_STATUS_SIZE,
8775 &tnapi->status_mapping,
8776 GFP_KERNEL);
8777 if (!tnapi->hw_status)
8778 goto err_out;
8779
8780 sblk = tnapi->hw_status;
8781
8782 if (tg3_flag(tp, ENABLE_RSS)) {
8783 u16 *prodptr = NULL;
8784
8785 /*
8786 * When RSS is enabled, the status block format changes
8787 * slightly. The "rx_jumbo_consumer", "reserved",
8788 * and "rx_mini_consumer" members get mapped to the
8789 * other three rx return ring producer indexes.
8790 */
8791 switch (i) {
8792 case 1:
8793 prodptr = &sblk->idx[0].rx_producer;
8794 break;
8795 case 2:
8796 prodptr = &sblk->rx_jumbo_consumer;
8797 break;
8798 case 3:
8799 prodptr = &sblk->reserved;
8800 break;
8801 case 4:
8802 prodptr = &sblk->rx_mini_consumer;
8803 break;
8804 }
8805 tnapi->rx_rcb_prod_idx = prodptr;
8806 } else {
8807 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8808 }
8809 }
8810
8811 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8812 goto err_out;
8813
8814 return 0;
8815
8816 err_out:
8817 tg3_free_consistent(tp);
8818 return -ENOMEM;
8819 }
8820
8821 #define MAX_WAIT_CNT 1000
8822
8823 /* To stop a block, clear the enable bit and poll till it
8824 * clears. tp->lock is held.
8825 */
tg3_stop_block(struct tg3 * tp,unsigned long ofs,u32 enable_bit,bool silent)8826 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8827 {
8828 unsigned int i;
8829 u32 val;
8830
8831 if (tg3_flag(tp, 5705_PLUS)) {
8832 switch (ofs) {
8833 case RCVLSC_MODE:
8834 case DMAC_MODE:
8835 case MBFREE_MODE:
8836 case BUFMGR_MODE:
8837 case MEMARB_MODE:
8838 /* We can't enable/disable these bits of the
8839 * 5705/5750, just say success.
8840 */
8841 return 0;
8842
8843 default:
8844 break;
8845 }
8846 }
8847
8848 val = tr32(ofs);
8849 val &= ~enable_bit;
8850 tw32_f(ofs, val);
8851
8852 for (i = 0; i < MAX_WAIT_CNT; i++) {
8853 if (pci_channel_offline(tp->pdev)) {
8854 dev_err(&tp->pdev->dev,
8855 "tg3_stop_block device offline, "
8856 "ofs=%lx enable_bit=%x\n",
8857 ofs, enable_bit);
8858 return -ENODEV;
8859 }
8860
8861 udelay(100);
8862 val = tr32(ofs);
8863 if ((val & enable_bit) == 0)
8864 break;
8865 }
8866
8867 if (i == MAX_WAIT_CNT && !silent) {
8868 dev_err(&tp->pdev->dev,
8869 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8870 ofs, enable_bit);
8871 return -ENODEV;
8872 }
8873
8874 return 0;
8875 }
8876
8877 /* tp->lock is held. */
tg3_abort_hw(struct tg3 * tp,bool silent)8878 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8879 {
8880 int i, err;
8881
8882 tg3_disable_ints(tp);
8883
8884 if (pci_channel_offline(tp->pdev)) {
8885 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8886 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8887 err = -ENODEV;
8888 goto err_no_dev;
8889 }
8890
8891 tp->rx_mode &= ~RX_MODE_ENABLE;
8892 tw32_f(MAC_RX_MODE, tp->rx_mode);
8893 udelay(10);
8894
8895 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8896 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8897 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8898 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8899 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8900 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8901
8902 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8903 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8904 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8905 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8906 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8907 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8908 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8909
8910 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8911 tw32_f(MAC_MODE, tp->mac_mode);
8912 udelay(40);
8913
8914 tp->tx_mode &= ~TX_MODE_ENABLE;
8915 tw32_f(MAC_TX_MODE, tp->tx_mode);
8916
8917 for (i = 0; i < MAX_WAIT_CNT; i++) {
8918 udelay(100);
8919 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8920 break;
8921 }
8922 if (i >= MAX_WAIT_CNT) {
8923 dev_err(&tp->pdev->dev,
8924 "%s timed out, TX_MODE_ENABLE will not clear "
8925 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8926 err |= -ENODEV;
8927 }
8928
8929 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8930 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8931 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8932
8933 tw32(FTQ_RESET, 0xffffffff);
8934 tw32(FTQ_RESET, 0x00000000);
8935
8936 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8937 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8938
8939 err_no_dev:
8940 for (i = 0; i < tp->irq_cnt; i++) {
8941 struct tg3_napi *tnapi = &tp->napi[i];
8942 if (tnapi->hw_status)
8943 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8944 }
8945
8946 return err;
8947 }
8948
8949 /* Save PCI command register before chip reset */
tg3_save_pci_state(struct tg3 * tp)8950 static void tg3_save_pci_state(struct tg3 *tp)
8951 {
8952 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8953 }
8954
8955 /* Restore PCI state after chip reset */
tg3_restore_pci_state(struct tg3 * tp)8956 static void tg3_restore_pci_state(struct tg3 *tp)
8957 {
8958 u32 val;
8959
8960 /* Re-enable indirect register accesses. */
8961 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8962 tp->misc_host_ctrl);
8963
8964 /* Set MAX PCI retry to zero. */
8965 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8966 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8967 tg3_flag(tp, PCIX_MODE))
8968 val |= PCISTATE_RETRY_SAME_DMA;
8969 /* Allow reads and writes to the APE register and memory space. */
8970 if (tg3_flag(tp, ENABLE_APE))
8971 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8972 PCISTATE_ALLOW_APE_SHMEM_WR |
8973 PCISTATE_ALLOW_APE_PSPACE_WR;
8974 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8975
8976 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8977
8978 if (!tg3_flag(tp, PCI_EXPRESS)) {
8979 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8980 tp->pci_cacheline_sz);
8981 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8982 tp->pci_lat_timer);
8983 }
8984
8985 /* Make sure PCI-X relaxed ordering bit is clear. */
8986 if (tg3_flag(tp, PCIX_MODE)) {
8987 u16 pcix_cmd;
8988
8989 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8990 &pcix_cmd);
8991 pcix_cmd &= ~PCI_X_CMD_ERO;
8992 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8993 pcix_cmd);
8994 }
8995
8996 if (tg3_flag(tp, 5780_CLASS)) {
8997
8998 /* Chip reset on 5780 will reset MSI enable bit,
8999 * so need to restore it.
9000 */
9001 if (tg3_flag(tp, USING_MSI)) {
9002 u16 ctrl;
9003
9004 pci_read_config_word(tp->pdev,
9005 tp->msi_cap + PCI_MSI_FLAGS,
9006 &ctrl);
9007 pci_write_config_word(tp->pdev,
9008 tp->msi_cap + PCI_MSI_FLAGS,
9009 ctrl | PCI_MSI_FLAGS_ENABLE);
9010 val = tr32(MSGINT_MODE);
9011 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9012 }
9013 }
9014 }
9015
tg3_override_clk(struct tg3 * tp)9016 static void tg3_override_clk(struct tg3 *tp)
9017 {
9018 u32 val;
9019
9020 switch (tg3_asic_rev(tp)) {
9021 case ASIC_REV_5717:
9022 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9023 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9024 TG3_CPMU_MAC_ORIDE_ENABLE);
9025 break;
9026
9027 case ASIC_REV_5719:
9028 case ASIC_REV_5720:
9029 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9030 break;
9031
9032 default:
9033 return;
9034 }
9035 }
9036
tg3_restore_clk(struct tg3 * tp)9037 static void tg3_restore_clk(struct tg3 *tp)
9038 {
9039 u32 val;
9040
9041 switch (tg3_asic_rev(tp)) {
9042 case ASIC_REV_5717:
9043 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9044 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9045 val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9046 break;
9047
9048 case ASIC_REV_5719:
9049 case ASIC_REV_5720:
9050 val = tr32(TG3_CPMU_CLCK_ORIDE);
9051 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9052 break;
9053
9054 default:
9055 return;
9056 }
9057 }
9058
9059 /* tp->lock is held. */
tg3_chip_reset(struct tg3 * tp)9060 static int tg3_chip_reset(struct tg3 *tp)
9061 __releases(tp->lock)
9062 __acquires(tp->lock)
9063 {
9064 u32 val;
9065 void (*write_op)(struct tg3 *, u32, u32);
9066 int i, err;
9067
9068 if (!pci_device_is_present(tp->pdev))
9069 return -ENODEV;
9070
9071 tg3_nvram_lock(tp);
9072
9073 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9074
9075 /* No matching tg3_nvram_unlock() after this because
9076 * chip reset below will undo the nvram lock.
9077 */
9078 tp->nvram_lock_cnt = 0;
9079
9080 /* GRC_MISC_CFG core clock reset will clear the memory
9081 * enable bit in PCI register 4 and the MSI enable bit
9082 * on some chips, so we save relevant registers here.
9083 */
9084 tg3_save_pci_state(tp);
9085
9086 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9087 tg3_flag(tp, 5755_PLUS))
9088 tw32(GRC_FASTBOOT_PC, 0);
9089
9090 /*
9091 * We must avoid the readl() that normally takes place.
9092 * It locks machines, causes machine checks, and other
9093 * fun things. So, temporarily disable the 5701
9094 * hardware workaround, while we do the reset.
9095 */
9096 write_op = tp->write32;
9097 if (write_op == tg3_write_flush_reg32)
9098 tp->write32 = tg3_write32;
9099
9100 /* Prevent the irq handler from reading or writing PCI registers
9101 * during chip reset when the memory enable bit in the PCI command
9102 * register may be cleared. The chip does not generate interrupt
9103 * at this time, but the irq handler may still be called due to irq
9104 * sharing or irqpoll.
9105 */
9106 tg3_flag_set(tp, CHIP_RESETTING);
9107 for (i = 0; i < tp->irq_cnt; i++) {
9108 struct tg3_napi *tnapi = &tp->napi[i];
9109 if (tnapi->hw_status) {
9110 tnapi->hw_status->status = 0;
9111 tnapi->hw_status->status_tag = 0;
9112 }
9113 tnapi->last_tag = 0;
9114 tnapi->last_irq_tag = 0;
9115 }
9116 smp_mb();
9117
9118 tg3_full_unlock(tp);
9119
9120 for (i = 0; i < tp->irq_cnt; i++)
9121 synchronize_irq(tp->napi[i].irq_vec);
9122
9123 tg3_full_lock(tp, 0);
9124
9125 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9126 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9127 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9128 }
9129
9130 /* do the reset */
9131 val = GRC_MISC_CFG_CORECLK_RESET;
9132
9133 if (tg3_flag(tp, PCI_EXPRESS)) {
9134 /* Force PCIe 1.0a mode */
9135 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9136 !tg3_flag(tp, 57765_PLUS) &&
9137 tr32(TG3_PCIE_PHY_TSTCTL) ==
9138 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9139 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9140
9141 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9142 tw32(GRC_MISC_CFG, (1 << 29));
9143 val |= (1 << 29);
9144 }
9145 }
9146
9147 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9148 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9149 tw32(GRC_VCPU_EXT_CTRL,
9150 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9151 }
9152
9153 /* Set the clock to the highest frequency to avoid timeouts. With link
9154 * aware mode, the clock speed could be slow and bootcode does not
9155 * complete within the expected time. Override the clock to allow the
9156 * bootcode to finish sooner and then restore it.
9157 */
9158 tg3_override_clk(tp);
9159
9160 /* Manage gphy power for all CPMU absent PCIe devices. */
9161 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9162 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9163
9164 tw32(GRC_MISC_CFG, val);
9165
9166 /* restore 5701 hardware bug workaround write method */
9167 tp->write32 = write_op;
9168
9169 /* Unfortunately, we have to delay before the PCI read back.
9170 * Some 575X chips even will not respond to a PCI cfg access
9171 * when the reset command is given to the chip.
9172 *
9173 * How do these hardware designers expect things to work
9174 * properly if the PCI write is posted for a long period
9175 * of time? It is always necessary to have some method by
9176 * which a register read back can occur to push the write
9177 * out which does the reset.
9178 *
9179 * For most tg3 variants the trick below was working.
9180 * Ho hum...
9181 */
9182 udelay(120);
9183
9184 /* Flush PCI posted writes. The normal MMIO registers
9185 * are inaccessible at this time so this is the only
9186 * way to make this reliably (actually, this is no longer
9187 * the case, see above). I tried to use indirect
9188 * register read/write but this upset some 5701 variants.
9189 */
9190 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9191
9192 udelay(120);
9193
9194 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9195 u16 val16;
9196
9197 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9198 int j;
9199 u32 cfg_val;
9200
9201 /* Wait for link training to complete. */
9202 for (j = 0; j < 5000; j++)
9203 udelay(100);
9204
9205 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9206 pci_write_config_dword(tp->pdev, 0xc4,
9207 cfg_val | (1 << 15));
9208 }
9209
9210 /* Clear the "no snoop" and "relaxed ordering" bits. */
9211 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9212 /*
9213 * Older PCIe devices only support the 128 byte
9214 * MPS setting. Enforce the restriction.
9215 */
9216 if (!tg3_flag(tp, CPMU_PRESENT))
9217 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9218 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9219
9220 /* Clear error status */
9221 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9222 PCI_EXP_DEVSTA_CED |
9223 PCI_EXP_DEVSTA_NFED |
9224 PCI_EXP_DEVSTA_FED |
9225 PCI_EXP_DEVSTA_URD);
9226 }
9227
9228 tg3_restore_pci_state(tp);
9229
9230 tg3_flag_clear(tp, CHIP_RESETTING);
9231 tg3_flag_clear(tp, ERROR_PROCESSED);
9232
9233 val = 0;
9234 if (tg3_flag(tp, 5780_CLASS))
9235 val = tr32(MEMARB_MODE);
9236 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9237
9238 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9239 tg3_stop_fw(tp);
9240 tw32(0x5000, 0x400);
9241 }
9242
9243 if (tg3_flag(tp, IS_SSB_CORE)) {
9244 /*
9245 * BCM4785: In order to avoid repercussions from using
9246 * potentially defective internal ROM, stop the Rx RISC CPU,
9247 * which is not required.
9248 */
9249 tg3_stop_fw(tp);
9250 tg3_halt_cpu(tp, RX_CPU_BASE);
9251 }
9252
9253 err = tg3_poll_fw(tp);
9254 if (err)
9255 return err;
9256
9257 tw32(GRC_MODE, tp->grc_mode);
9258
9259 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9260 val = tr32(0xc4);
9261
9262 tw32(0xc4, val | (1 << 15));
9263 }
9264
9265 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9266 tg3_asic_rev(tp) == ASIC_REV_5705) {
9267 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9268 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9269 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9270 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9271 }
9272
9273 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9274 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9275 val = tp->mac_mode;
9276 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9277 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9278 val = tp->mac_mode;
9279 } else
9280 val = 0;
9281
9282 tw32_f(MAC_MODE, val);
9283 udelay(40);
9284
9285 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9286
9287 tg3_mdio_start(tp);
9288
9289 if (tg3_flag(tp, PCI_EXPRESS) &&
9290 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9291 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9292 !tg3_flag(tp, 57765_PLUS)) {
9293 val = tr32(0x7c00);
9294
9295 tw32(0x7c00, val | (1 << 25));
9296 }
9297
9298 tg3_restore_clk(tp);
9299
9300 /* Increase the core clock speed to fix tx timeout issue for 5762
9301 * with 100Mbps link speed.
9302 */
9303 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9304 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9305 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9306 TG3_CPMU_MAC_ORIDE_ENABLE);
9307 }
9308
9309 /* Reprobe ASF enable state. */
9310 tg3_flag_clear(tp, ENABLE_ASF);
9311 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9312 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9313
9314 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9315 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9316 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9317 u32 nic_cfg;
9318
9319 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9320 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9321 tg3_flag_set(tp, ENABLE_ASF);
9322 tp->last_event_jiffies = jiffies;
9323 if (tg3_flag(tp, 5750_PLUS))
9324 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9325
9326 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9327 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9328 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9329 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9330 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9331 }
9332 }
9333
9334 return 0;
9335 }
9336
9337 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9338 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9339 static void __tg3_set_rx_mode(struct net_device *);
9340
9341 /* tp->lock is held. */
tg3_halt(struct tg3 * tp,int kind,bool silent)9342 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9343 {
9344 int err;
9345
9346 tg3_stop_fw(tp);
9347
9348 tg3_write_sig_pre_reset(tp, kind);
9349
9350 tg3_abort_hw(tp, silent);
9351 err = tg3_chip_reset(tp);
9352
9353 __tg3_set_mac_addr(tp, false);
9354
9355 tg3_write_sig_legacy(tp, kind);
9356 tg3_write_sig_post_reset(tp, kind);
9357
9358 if (tp->hw_stats) {
9359 /* Save the stats across chip resets... */
9360 tg3_get_nstats(tp, &tp->net_stats_prev);
9361 tg3_get_estats(tp, &tp->estats_prev);
9362
9363 /* And make sure the next sample is new data */
9364 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9365 }
9366
9367 return err;
9368 }
9369
tg3_set_mac_addr(struct net_device * dev,void * p)9370 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9371 {
9372 struct tg3 *tp = netdev_priv(dev);
9373 struct sockaddr *addr = p;
9374 int err = 0;
9375 bool skip_mac_1 = false;
9376
9377 if (!is_valid_ether_addr(addr->sa_data))
9378 return -EADDRNOTAVAIL;
9379
9380 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9381
9382 if (!netif_running(dev))
9383 return 0;
9384
9385 if (tg3_flag(tp, ENABLE_ASF)) {
9386 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9387
9388 addr0_high = tr32(MAC_ADDR_0_HIGH);
9389 addr0_low = tr32(MAC_ADDR_0_LOW);
9390 addr1_high = tr32(MAC_ADDR_1_HIGH);
9391 addr1_low = tr32(MAC_ADDR_1_LOW);
9392
9393 /* Skip MAC addr 1 if ASF is using it. */
9394 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9395 !(addr1_high == 0 && addr1_low == 0))
9396 skip_mac_1 = true;
9397 }
9398 spin_lock_bh(&tp->lock);
9399 __tg3_set_mac_addr(tp, skip_mac_1);
9400 __tg3_set_rx_mode(dev);
9401 spin_unlock_bh(&tp->lock);
9402
9403 return err;
9404 }
9405
9406 /* tp->lock is held. */
tg3_set_bdinfo(struct tg3 * tp,u32 bdinfo_addr,dma_addr_t mapping,u32 maxlen_flags,u32 nic_addr)9407 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9408 dma_addr_t mapping, u32 maxlen_flags,
9409 u32 nic_addr)
9410 {
9411 tg3_write_mem(tp,
9412 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9413 ((u64) mapping >> 32));
9414 tg3_write_mem(tp,
9415 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9416 ((u64) mapping & 0xffffffff));
9417 tg3_write_mem(tp,
9418 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9419 maxlen_flags);
9420
9421 if (!tg3_flag(tp, 5705_PLUS))
9422 tg3_write_mem(tp,
9423 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9424 nic_addr);
9425 }
9426
9427
tg3_coal_tx_init(struct tg3 * tp,struct ethtool_coalesce * ec)9428 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9429 {
9430 int i = 0;
9431
9432 if (!tg3_flag(tp, ENABLE_TSS)) {
9433 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9434 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9435 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9436 } else {
9437 tw32(HOSTCC_TXCOL_TICKS, 0);
9438 tw32(HOSTCC_TXMAX_FRAMES, 0);
9439 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9440
9441 for (; i < tp->txq_cnt; i++) {
9442 u32 reg;
9443
9444 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9445 tw32(reg, ec->tx_coalesce_usecs);
9446 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9447 tw32(reg, ec->tx_max_coalesced_frames);
9448 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9449 tw32(reg, ec->tx_max_coalesced_frames_irq);
9450 }
9451 }
9452
9453 for (; i < tp->irq_max - 1; i++) {
9454 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9455 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9456 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9457 }
9458 }
9459
tg3_coal_rx_init(struct tg3 * tp,struct ethtool_coalesce * ec)9460 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9461 {
9462 int i = 0;
9463 u32 limit = tp->rxq_cnt;
9464
9465 if (!tg3_flag(tp, ENABLE_RSS)) {
9466 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9467 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9468 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9469 limit--;
9470 } else {
9471 tw32(HOSTCC_RXCOL_TICKS, 0);
9472 tw32(HOSTCC_RXMAX_FRAMES, 0);
9473 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9474 }
9475
9476 for (; i < limit; i++) {
9477 u32 reg;
9478
9479 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9480 tw32(reg, ec->rx_coalesce_usecs);
9481 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9482 tw32(reg, ec->rx_max_coalesced_frames);
9483 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9484 tw32(reg, ec->rx_max_coalesced_frames_irq);
9485 }
9486
9487 for (; i < tp->irq_max - 1; i++) {
9488 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9489 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9490 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9491 }
9492 }
9493
__tg3_set_coalesce(struct tg3 * tp,struct ethtool_coalesce * ec)9494 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9495 {
9496 tg3_coal_tx_init(tp, ec);
9497 tg3_coal_rx_init(tp, ec);
9498
9499 if (!tg3_flag(tp, 5705_PLUS)) {
9500 u32 val = ec->stats_block_coalesce_usecs;
9501
9502 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9503 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9504
9505 if (!tp->link_up)
9506 val = 0;
9507
9508 tw32(HOSTCC_STAT_COAL_TICKS, val);
9509 }
9510 }
9511
9512 /* tp->lock is held. */
tg3_tx_rcbs_disable(struct tg3 * tp)9513 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9514 {
9515 u32 txrcb, limit;
9516
9517 /* Disable all transmit rings but the first. */
9518 if (!tg3_flag(tp, 5705_PLUS))
9519 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9520 else if (tg3_flag(tp, 5717_PLUS))
9521 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9522 else if (tg3_flag(tp, 57765_CLASS) ||
9523 tg3_asic_rev(tp) == ASIC_REV_5762)
9524 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9525 else
9526 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9527
9528 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9529 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9530 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9531 BDINFO_FLAGS_DISABLED);
9532 }
9533
9534 /* tp->lock is held. */
tg3_tx_rcbs_init(struct tg3 * tp)9535 static void tg3_tx_rcbs_init(struct tg3 *tp)
9536 {
9537 int i = 0;
9538 u32 txrcb = NIC_SRAM_SEND_RCB;
9539
9540 if (tg3_flag(tp, ENABLE_TSS))
9541 i++;
9542
9543 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9544 struct tg3_napi *tnapi = &tp->napi[i];
9545
9546 if (!tnapi->tx_ring)
9547 continue;
9548
9549 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9550 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9551 NIC_SRAM_TX_BUFFER_DESC);
9552 }
9553 }
9554
9555 /* tp->lock is held. */
tg3_rx_ret_rcbs_disable(struct tg3 * tp)9556 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9557 {
9558 u32 rxrcb, limit;
9559
9560 /* Disable all receive return rings but the first. */
9561 if (tg3_flag(tp, 5717_PLUS))
9562 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9563 else if (!tg3_flag(tp, 5705_PLUS))
9564 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9565 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9566 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9567 tg3_flag(tp, 57765_CLASS))
9568 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9569 else
9570 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9571
9572 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9573 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9574 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9575 BDINFO_FLAGS_DISABLED);
9576 }
9577
9578 /* tp->lock is held. */
tg3_rx_ret_rcbs_init(struct tg3 * tp)9579 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9580 {
9581 int i = 0;
9582 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9583
9584 if (tg3_flag(tp, ENABLE_RSS))
9585 i++;
9586
9587 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9588 struct tg3_napi *tnapi = &tp->napi[i];
9589
9590 if (!tnapi->rx_rcb)
9591 continue;
9592
9593 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9594 (tp->rx_ret_ring_mask + 1) <<
9595 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9596 }
9597 }
9598
9599 /* tp->lock is held. */
tg3_rings_reset(struct tg3 * tp)9600 static void tg3_rings_reset(struct tg3 *tp)
9601 {
9602 int i;
9603 u32 stblk;
9604 struct tg3_napi *tnapi = &tp->napi[0];
9605
9606 tg3_tx_rcbs_disable(tp);
9607
9608 tg3_rx_ret_rcbs_disable(tp);
9609
9610 /* Disable interrupts */
9611 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9612 tp->napi[0].chk_msi_cnt = 0;
9613 tp->napi[0].last_rx_cons = 0;
9614 tp->napi[0].last_tx_cons = 0;
9615
9616 /* Zero mailbox registers. */
9617 if (tg3_flag(tp, SUPPORT_MSIX)) {
9618 for (i = 1; i < tp->irq_max; i++) {
9619 tp->napi[i].tx_prod = 0;
9620 tp->napi[i].tx_cons = 0;
9621 if (tg3_flag(tp, ENABLE_TSS))
9622 tw32_mailbox(tp->napi[i].prodmbox, 0);
9623 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9624 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9625 tp->napi[i].chk_msi_cnt = 0;
9626 tp->napi[i].last_rx_cons = 0;
9627 tp->napi[i].last_tx_cons = 0;
9628 }
9629 if (!tg3_flag(tp, ENABLE_TSS))
9630 tw32_mailbox(tp->napi[0].prodmbox, 0);
9631 } else {
9632 tp->napi[0].tx_prod = 0;
9633 tp->napi[0].tx_cons = 0;
9634 tw32_mailbox(tp->napi[0].prodmbox, 0);
9635 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9636 }
9637
9638 /* Make sure the NIC-based send BD rings are disabled. */
9639 if (!tg3_flag(tp, 5705_PLUS)) {
9640 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9641 for (i = 0; i < 16; i++)
9642 tw32_tx_mbox(mbox + i * 8, 0);
9643 }
9644
9645 /* Clear status block in ram. */
9646 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9647
9648 /* Set status block DMA address */
9649 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9650 ((u64) tnapi->status_mapping >> 32));
9651 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9652 ((u64) tnapi->status_mapping & 0xffffffff));
9653
9654 stblk = HOSTCC_STATBLCK_RING1;
9655
9656 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9657 u64 mapping = (u64)tnapi->status_mapping;
9658 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9659 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9660 stblk += 8;
9661
9662 /* Clear status block in ram. */
9663 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9664 }
9665
9666 tg3_tx_rcbs_init(tp);
9667 tg3_rx_ret_rcbs_init(tp);
9668 }
9669
tg3_setup_rxbd_thresholds(struct tg3 * tp)9670 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9671 {
9672 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9673
9674 if (!tg3_flag(tp, 5750_PLUS) ||
9675 tg3_flag(tp, 5780_CLASS) ||
9676 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9677 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9678 tg3_flag(tp, 57765_PLUS))
9679 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9680 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9681 tg3_asic_rev(tp) == ASIC_REV_5787)
9682 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9683 else
9684 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9685
9686 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9687 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9688
9689 val = min(nic_rep_thresh, host_rep_thresh);
9690 tw32(RCVBDI_STD_THRESH, val);
9691
9692 if (tg3_flag(tp, 57765_PLUS))
9693 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9694
9695 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9696 return;
9697
9698 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9699
9700 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9701
9702 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9703 tw32(RCVBDI_JUMBO_THRESH, val);
9704
9705 if (tg3_flag(tp, 57765_PLUS))
9706 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9707 }
9708
calc_crc(unsigned char * buf,int len)9709 static inline u32 calc_crc(unsigned char *buf, int len)
9710 {
9711 u32 reg;
9712 u32 tmp;
9713 int j, k;
9714
9715 reg = 0xffffffff;
9716
9717 for (j = 0; j < len; j++) {
9718 reg ^= buf[j];
9719
9720 for (k = 0; k < 8; k++) {
9721 tmp = reg & 0x01;
9722
9723 reg >>= 1;
9724
9725 if (tmp)
9726 reg ^= CRC32_POLY_LE;
9727 }
9728 }
9729
9730 return ~reg;
9731 }
9732
tg3_set_multi(struct tg3 * tp,unsigned int accept_all)9733 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9734 {
9735 /* accept or reject all multicast frames */
9736 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9737 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9738 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9739 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9740 }
9741
__tg3_set_rx_mode(struct net_device * dev)9742 static void __tg3_set_rx_mode(struct net_device *dev)
9743 {
9744 struct tg3 *tp = netdev_priv(dev);
9745 u32 rx_mode;
9746
9747 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9748 RX_MODE_KEEP_VLAN_TAG);
9749
9750 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9751 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9752 * flag clear.
9753 */
9754 if (!tg3_flag(tp, ENABLE_ASF))
9755 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9756 #endif
9757
9758 if (dev->flags & IFF_PROMISC) {
9759 /* Promiscuous mode. */
9760 rx_mode |= RX_MODE_PROMISC;
9761 } else if (dev->flags & IFF_ALLMULTI) {
9762 /* Accept all multicast. */
9763 tg3_set_multi(tp, 1);
9764 } else if (netdev_mc_empty(dev)) {
9765 /* Reject all multicast. */
9766 tg3_set_multi(tp, 0);
9767 } else {
9768 /* Accept one or more multicast(s). */
9769 struct netdev_hw_addr *ha;
9770 u32 mc_filter[4] = { 0, };
9771 u32 regidx;
9772 u32 bit;
9773 u32 crc;
9774
9775 netdev_for_each_mc_addr(ha, dev) {
9776 crc = calc_crc(ha->addr, ETH_ALEN);
9777 bit = ~crc & 0x7f;
9778 regidx = (bit & 0x60) >> 5;
9779 bit &= 0x1f;
9780 mc_filter[regidx] |= (1 << bit);
9781 }
9782
9783 tw32(MAC_HASH_REG_0, mc_filter[0]);
9784 tw32(MAC_HASH_REG_1, mc_filter[1]);
9785 tw32(MAC_HASH_REG_2, mc_filter[2]);
9786 tw32(MAC_HASH_REG_3, mc_filter[3]);
9787 }
9788
9789 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9790 rx_mode |= RX_MODE_PROMISC;
9791 } else if (!(dev->flags & IFF_PROMISC)) {
9792 /* Add all entries into to the mac addr filter list */
9793 int i = 0;
9794 struct netdev_hw_addr *ha;
9795
9796 netdev_for_each_uc_addr(ha, dev) {
9797 __tg3_set_one_mac_addr(tp, ha->addr,
9798 i + TG3_UCAST_ADDR_IDX(tp));
9799 i++;
9800 }
9801 }
9802
9803 if (rx_mode != tp->rx_mode) {
9804 tp->rx_mode = rx_mode;
9805 tw32_f(MAC_RX_MODE, rx_mode);
9806 udelay(10);
9807 }
9808 }
9809
tg3_rss_init_dflt_indir_tbl(struct tg3 * tp,u32 qcnt)9810 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9811 {
9812 int i;
9813
9814 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9815 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9816 }
9817
tg3_rss_check_indir_tbl(struct tg3 * tp)9818 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9819 {
9820 int i;
9821
9822 if (!tg3_flag(tp, SUPPORT_MSIX))
9823 return;
9824
9825 if (tp->rxq_cnt == 1) {
9826 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9827 return;
9828 }
9829
9830 /* Validate table against current IRQ count */
9831 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9832 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9833 break;
9834 }
9835
9836 if (i != TG3_RSS_INDIR_TBL_SIZE)
9837 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9838 }
9839
tg3_rss_write_indir_tbl(struct tg3 * tp)9840 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9841 {
9842 int i = 0;
9843 u32 reg = MAC_RSS_INDIR_TBL_0;
9844
9845 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9846 u32 val = tp->rss_ind_tbl[i];
9847 i++;
9848 for (; i % 8; i++) {
9849 val <<= 4;
9850 val |= tp->rss_ind_tbl[i];
9851 }
9852 tw32(reg, val);
9853 reg += 4;
9854 }
9855 }
9856
tg3_lso_rd_dma_workaround_bit(struct tg3 * tp)9857 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9858 {
9859 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9860 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9861 else
9862 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9863 }
9864
9865 /* tp->lock is held. */
tg3_reset_hw(struct tg3 * tp,bool reset_phy)9866 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9867 {
9868 u32 val, rdmac_mode;
9869 int i, err, limit;
9870 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9871
9872 tg3_disable_ints(tp);
9873
9874 tg3_stop_fw(tp);
9875
9876 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9877
9878 if (tg3_flag(tp, INIT_COMPLETE))
9879 tg3_abort_hw(tp, 1);
9880
9881 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9882 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9883 tg3_phy_pull_config(tp);
9884 tg3_eee_pull_config(tp, NULL);
9885 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9886 }
9887
9888 /* Enable MAC control of LPI */
9889 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9890 tg3_setup_eee(tp);
9891
9892 if (reset_phy)
9893 tg3_phy_reset(tp);
9894
9895 err = tg3_chip_reset(tp);
9896 if (err)
9897 return err;
9898
9899 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9900
9901 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9902 val = tr32(TG3_CPMU_CTRL);
9903 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9904 tw32(TG3_CPMU_CTRL, val);
9905
9906 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9907 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9908 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9909 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9910
9911 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9912 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9913 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9914 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9915
9916 val = tr32(TG3_CPMU_HST_ACC);
9917 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9918 val |= CPMU_HST_ACC_MACCLK_6_25;
9919 tw32(TG3_CPMU_HST_ACC, val);
9920 }
9921
9922 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9923 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9924 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9925 PCIE_PWR_MGMT_L1_THRESH_4MS;
9926 tw32(PCIE_PWR_MGMT_THRESH, val);
9927
9928 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9929 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9930
9931 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9932
9933 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9934 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9935 }
9936
9937 if (tg3_flag(tp, L1PLLPD_EN)) {
9938 u32 grc_mode = tr32(GRC_MODE);
9939
9940 /* Access the lower 1K of PL PCIE block registers. */
9941 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9942 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9943
9944 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9945 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9946 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9947
9948 tw32(GRC_MODE, grc_mode);
9949 }
9950
9951 if (tg3_flag(tp, 57765_CLASS)) {
9952 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9953 u32 grc_mode = tr32(GRC_MODE);
9954
9955 /* Access the lower 1K of PL PCIE block registers. */
9956 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9957 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9958
9959 val = tr32(TG3_PCIE_TLDLPL_PORT +
9960 TG3_PCIE_PL_LO_PHYCTL5);
9961 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9962 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9963
9964 tw32(GRC_MODE, grc_mode);
9965 }
9966
9967 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9968 u32 grc_mode;
9969
9970 /* Fix transmit hangs */
9971 val = tr32(TG3_CPMU_PADRNG_CTL);
9972 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9973 tw32(TG3_CPMU_PADRNG_CTL, val);
9974
9975 grc_mode = tr32(GRC_MODE);
9976
9977 /* Access the lower 1K of DL PCIE block registers. */
9978 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9979 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9980
9981 val = tr32(TG3_PCIE_TLDLPL_PORT +
9982 TG3_PCIE_DL_LO_FTSMAX);
9983 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9984 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9985 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9986
9987 tw32(GRC_MODE, grc_mode);
9988 }
9989
9990 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9991 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9992 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9993 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9994 }
9995
9996 /* This works around an issue with Athlon chipsets on
9997 * B3 tigon3 silicon. This bit has no effect on any
9998 * other revision. But do not set this on PCI Express
9999 * chips and don't even touch the clocks if the CPMU is present.
10000 */
10001 if (!tg3_flag(tp, CPMU_PRESENT)) {
10002 if (!tg3_flag(tp, PCI_EXPRESS))
10003 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
10004 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
10005 }
10006
10007 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
10008 tg3_flag(tp, PCIX_MODE)) {
10009 val = tr32(TG3PCI_PCISTATE);
10010 val |= PCISTATE_RETRY_SAME_DMA;
10011 tw32(TG3PCI_PCISTATE, val);
10012 }
10013
10014 if (tg3_flag(tp, ENABLE_APE)) {
10015 /* Allow reads and writes to the
10016 * APE register and memory space.
10017 */
10018 val = tr32(TG3PCI_PCISTATE);
10019 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10020 PCISTATE_ALLOW_APE_SHMEM_WR |
10021 PCISTATE_ALLOW_APE_PSPACE_WR;
10022 tw32(TG3PCI_PCISTATE, val);
10023 }
10024
10025 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10026 /* Enable some hw fixes. */
10027 val = tr32(TG3PCI_MSI_DATA);
10028 val |= (1 << 26) | (1 << 28) | (1 << 29);
10029 tw32(TG3PCI_MSI_DATA, val);
10030 }
10031
10032 /* Descriptor ring init may make accesses to the
10033 * NIC SRAM area to setup the TX descriptors, so we
10034 * can only do this after the hardware has been
10035 * successfully reset.
10036 */
10037 err = tg3_init_rings(tp);
10038 if (err)
10039 return err;
10040
10041 if (tg3_flag(tp, 57765_PLUS)) {
10042 val = tr32(TG3PCI_DMA_RW_CTRL) &
10043 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10044 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10045 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10046 if (!tg3_flag(tp, 57765_CLASS) &&
10047 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10048 tg3_asic_rev(tp) != ASIC_REV_5762)
10049 val |= DMA_RWCTRL_TAGGED_STAT_WA;
10050 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10051 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10052 tg3_asic_rev(tp) != ASIC_REV_5761) {
10053 /* This value is determined during the probe time DMA
10054 * engine test, tg3_test_dma.
10055 */
10056 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10057 }
10058
10059 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10060 GRC_MODE_4X_NIC_SEND_RINGS |
10061 GRC_MODE_NO_TX_PHDR_CSUM |
10062 GRC_MODE_NO_RX_PHDR_CSUM);
10063 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10064
10065 /* Pseudo-header checksum is done by hardware logic and not
10066 * the offload processers, so make the chip do the pseudo-
10067 * header checksums on receive. For transmit it is more
10068 * convenient to do the pseudo-header checksum in software
10069 * as Linux does that on transmit for us in all cases.
10070 */
10071 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10072
10073 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10074 if (tp->rxptpctl)
10075 tw32(TG3_RX_PTP_CTL,
10076 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10077
10078 if (tg3_flag(tp, PTP_CAPABLE))
10079 val |= GRC_MODE_TIME_SYNC_ENABLE;
10080
10081 tw32(GRC_MODE, tp->grc_mode | val);
10082
10083 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10084 * south bridge limitation. As a workaround, Driver is setting MRRS
10085 * to 2048 instead of default 4096.
10086 */
10087 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10088 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10089 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10090 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10091 }
10092
10093 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10094 val = tr32(GRC_MISC_CFG);
10095 val &= ~0xff;
10096 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10097 tw32(GRC_MISC_CFG, val);
10098
10099 /* Initialize MBUF/DESC pool. */
10100 if (tg3_flag(tp, 5750_PLUS)) {
10101 /* Do nothing. */
10102 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10103 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10104 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10105 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10106 else
10107 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10108 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10109 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10110 } else if (tg3_flag(tp, TSO_CAPABLE)) {
10111 int fw_len;
10112
10113 fw_len = tp->fw_len;
10114 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10115 tw32(BUFMGR_MB_POOL_ADDR,
10116 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10117 tw32(BUFMGR_MB_POOL_SIZE,
10118 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10119 }
10120
10121 if (tp->dev->mtu <= ETH_DATA_LEN) {
10122 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10123 tp->bufmgr_config.mbuf_read_dma_low_water);
10124 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10125 tp->bufmgr_config.mbuf_mac_rx_low_water);
10126 tw32(BUFMGR_MB_HIGH_WATER,
10127 tp->bufmgr_config.mbuf_high_water);
10128 } else {
10129 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10130 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10131 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10132 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10133 tw32(BUFMGR_MB_HIGH_WATER,
10134 tp->bufmgr_config.mbuf_high_water_jumbo);
10135 }
10136 tw32(BUFMGR_DMA_LOW_WATER,
10137 tp->bufmgr_config.dma_low_water);
10138 tw32(BUFMGR_DMA_HIGH_WATER,
10139 tp->bufmgr_config.dma_high_water);
10140
10141 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10142 if (tg3_asic_rev(tp) == ASIC_REV_5719)
10143 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10144 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10145 tg3_asic_rev(tp) == ASIC_REV_5762 ||
10146 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10147 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10148 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10149 tw32(BUFMGR_MODE, val);
10150 for (i = 0; i < 2000; i++) {
10151 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10152 break;
10153 udelay(10);
10154 }
10155 if (i >= 2000) {
10156 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10157 return -ENODEV;
10158 }
10159
10160 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10161 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10162
10163 tg3_setup_rxbd_thresholds(tp);
10164
10165 /* Initialize TG3_BDINFO's at:
10166 * RCVDBDI_STD_BD: standard eth size rx ring
10167 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10168 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10169 *
10170 * like so:
10171 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10172 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10173 * ring attribute flags
10174 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10175 *
10176 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10177 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10178 *
10179 * The size of each ring is fixed in the firmware, but the location is
10180 * configurable.
10181 */
10182 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10183 ((u64) tpr->rx_std_mapping >> 32));
10184 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10185 ((u64) tpr->rx_std_mapping & 0xffffffff));
10186 if (!tg3_flag(tp, 5717_PLUS))
10187 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10188 NIC_SRAM_RX_BUFFER_DESC);
10189
10190 /* Disable the mini ring */
10191 if (!tg3_flag(tp, 5705_PLUS))
10192 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10193 BDINFO_FLAGS_DISABLED);
10194
10195 /* Program the jumbo buffer descriptor ring control
10196 * blocks on those devices that have them.
10197 */
10198 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10199 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10200
10201 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10202 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10203 ((u64) tpr->rx_jmb_mapping >> 32));
10204 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10205 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10206 val = TG3_RX_JMB_RING_SIZE(tp) <<
10207 BDINFO_FLAGS_MAXLEN_SHIFT;
10208 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10209 val | BDINFO_FLAGS_USE_EXT_RECV);
10210 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10211 tg3_flag(tp, 57765_CLASS) ||
10212 tg3_asic_rev(tp) == ASIC_REV_5762)
10213 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10214 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10215 } else {
10216 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10217 BDINFO_FLAGS_DISABLED);
10218 }
10219
10220 if (tg3_flag(tp, 57765_PLUS)) {
10221 val = TG3_RX_STD_RING_SIZE(tp);
10222 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10223 val |= (TG3_RX_STD_DMA_SZ << 2);
10224 } else
10225 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10226 } else
10227 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10228
10229 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10230
10231 tpr->rx_std_prod_idx = tp->rx_pending;
10232 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10233
10234 tpr->rx_jmb_prod_idx =
10235 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10236 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10237
10238 tg3_rings_reset(tp);
10239
10240 /* Initialize MAC address and backoff seed. */
10241 __tg3_set_mac_addr(tp, false);
10242
10243 /* MTU + ethernet header + FCS + optional VLAN tag */
10244 tw32(MAC_RX_MTU_SIZE,
10245 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10246
10247 /* The slot time is changed by tg3_setup_phy if we
10248 * run at gigabit with half duplex.
10249 */
10250 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10251 (6 << TX_LENGTHS_IPG_SHIFT) |
10252 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10253
10254 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10255 tg3_asic_rev(tp) == ASIC_REV_5762)
10256 val |= tr32(MAC_TX_LENGTHS) &
10257 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10258 TX_LENGTHS_CNT_DWN_VAL_MSK);
10259
10260 tw32(MAC_TX_LENGTHS, val);
10261
10262 /* Receive rules. */
10263 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10264 tw32(RCVLPC_CONFIG, 0x0181);
10265
10266 /* Calculate RDMAC_MODE setting early, we need it to determine
10267 * the RCVLPC_STATE_ENABLE mask.
10268 */
10269 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10270 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10271 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10272 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10273 RDMAC_MODE_LNGREAD_ENAB);
10274
10275 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10276 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10277
10278 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10279 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10280 tg3_asic_rev(tp) == ASIC_REV_57780)
10281 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10282 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10283 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10284
10285 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10286 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10287 if (tg3_flag(tp, TSO_CAPABLE) &&
10288 tg3_asic_rev(tp) == ASIC_REV_5705) {
10289 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10290 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10291 !tg3_flag(tp, IS_5788)) {
10292 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10293 }
10294 }
10295
10296 if (tg3_flag(tp, PCI_EXPRESS))
10297 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10298
10299 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10300 tp->dma_limit = 0;
10301 if (tp->dev->mtu <= ETH_DATA_LEN) {
10302 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10303 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10304 }
10305 }
10306
10307 if (tg3_flag(tp, HW_TSO_1) ||
10308 tg3_flag(tp, HW_TSO_2) ||
10309 tg3_flag(tp, HW_TSO_3))
10310 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10311
10312 if (tg3_flag(tp, 57765_PLUS) ||
10313 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10314 tg3_asic_rev(tp) == ASIC_REV_57780)
10315 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10316
10317 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10318 tg3_asic_rev(tp) == ASIC_REV_5762)
10319 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10320
10321 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10322 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10323 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10324 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10325 tg3_flag(tp, 57765_PLUS)) {
10326 u32 tgtreg;
10327
10328 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10329 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10330 else
10331 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10332
10333 val = tr32(tgtreg);
10334 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10335 tg3_asic_rev(tp) == ASIC_REV_5762) {
10336 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10337 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10338 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10339 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10340 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10341 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10342 }
10343 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10344 }
10345
10346 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10347 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10348 tg3_asic_rev(tp) == ASIC_REV_5762) {
10349 u32 tgtreg;
10350
10351 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10352 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10353 else
10354 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10355
10356 val = tr32(tgtreg);
10357 tw32(tgtreg, val |
10358 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10359 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10360 }
10361
10362 /* Receive/send statistics. */
10363 if (tg3_flag(tp, 5750_PLUS)) {
10364 val = tr32(RCVLPC_STATS_ENABLE);
10365 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10366 tw32(RCVLPC_STATS_ENABLE, val);
10367 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10368 tg3_flag(tp, TSO_CAPABLE)) {
10369 val = tr32(RCVLPC_STATS_ENABLE);
10370 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10371 tw32(RCVLPC_STATS_ENABLE, val);
10372 } else {
10373 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10374 }
10375 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10376 tw32(SNDDATAI_STATSENAB, 0xffffff);
10377 tw32(SNDDATAI_STATSCTRL,
10378 (SNDDATAI_SCTRL_ENABLE |
10379 SNDDATAI_SCTRL_FASTUPD));
10380
10381 /* Setup host coalescing engine. */
10382 tw32(HOSTCC_MODE, 0);
10383 for (i = 0; i < 2000; i++) {
10384 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10385 break;
10386 udelay(10);
10387 }
10388
10389 __tg3_set_coalesce(tp, &tp->coal);
10390
10391 if (!tg3_flag(tp, 5705_PLUS)) {
10392 /* Status/statistics block address. See tg3_timer,
10393 * the tg3_periodic_fetch_stats call there, and
10394 * tg3_get_stats to see how this works for 5705/5750 chips.
10395 */
10396 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10397 ((u64) tp->stats_mapping >> 32));
10398 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10399 ((u64) tp->stats_mapping & 0xffffffff));
10400 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10401
10402 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10403
10404 /* Clear statistics and status block memory areas */
10405 for (i = NIC_SRAM_STATS_BLK;
10406 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10407 i += sizeof(u32)) {
10408 tg3_write_mem(tp, i, 0);
10409 udelay(40);
10410 }
10411 }
10412
10413 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10414
10415 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10416 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10417 if (!tg3_flag(tp, 5705_PLUS))
10418 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10419
10420 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10421 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10422 /* reset to prevent losing 1st rx packet intermittently */
10423 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10424 udelay(10);
10425 }
10426
10427 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10428 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10429 MAC_MODE_FHDE_ENABLE;
10430 if (tg3_flag(tp, ENABLE_APE))
10431 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10432 if (!tg3_flag(tp, 5705_PLUS) &&
10433 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10434 tg3_asic_rev(tp) != ASIC_REV_5700)
10435 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10436 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10437 udelay(40);
10438
10439 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10440 * If TG3_FLAG_IS_NIC is zero, we should read the
10441 * register to preserve the GPIO settings for LOMs. The GPIOs,
10442 * whether used as inputs or outputs, are set by boot code after
10443 * reset.
10444 */
10445 if (!tg3_flag(tp, IS_NIC)) {
10446 u32 gpio_mask;
10447
10448 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10449 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10450 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10451
10452 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10453 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10454 GRC_LCLCTRL_GPIO_OUTPUT3;
10455
10456 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10457 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10458
10459 tp->grc_local_ctrl &= ~gpio_mask;
10460 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10461
10462 /* GPIO1 must be driven high for eeprom write protect */
10463 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10464 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10465 GRC_LCLCTRL_GPIO_OUTPUT1);
10466 }
10467 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10468 udelay(100);
10469
10470 if (tg3_flag(tp, USING_MSIX)) {
10471 val = tr32(MSGINT_MODE);
10472 val |= MSGINT_MODE_ENABLE;
10473 if (tp->irq_cnt > 1)
10474 val |= MSGINT_MODE_MULTIVEC_EN;
10475 if (!tg3_flag(tp, 1SHOT_MSI))
10476 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10477 tw32(MSGINT_MODE, val);
10478 }
10479
10480 if (!tg3_flag(tp, 5705_PLUS)) {
10481 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10482 udelay(40);
10483 }
10484
10485 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10486 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10487 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10488 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10489 WDMAC_MODE_LNGREAD_ENAB);
10490
10491 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10492 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10493 if (tg3_flag(tp, TSO_CAPABLE) &&
10494 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10495 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10496 /* nothing */
10497 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10498 !tg3_flag(tp, IS_5788)) {
10499 val |= WDMAC_MODE_RX_ACCEL;
10500 }
10501 }
10502
10503 /* Enable host coalescing bug fix */
10504 if (tg3_flag(tp, 5755_PLUS))
10505 val |= WDMAC_MODE_STATUS_TAG_FIX;
10506
10507 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10508 val |= WDMAC_MODE_BURST_ALL_DATA;
10509
10510 tw32_f(WDMAC_MODE, val);
10511 udelay(40);
10512
10513 if (tg3_flag(tp, PCIX_MODE)) {
10514 u16 pcix_cmd;
10515
10516 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10517 &pcix_cmd);
10518 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10519 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10520 pcix_cmd |= PCI_X_CMD_READ_2K;
10521 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10522 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10523 pcix_cmd |= PCI_X_CMD_READ_2K;
10524 }
10525 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10526 pcix_cmd);
10527 }
10528
10529 tw32_f(RDMAC_MODE, rdmac_mode);
10530 udelay(40);
10531
10532 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10533 tg3_asic_rev(tp) == ASIC_REV_5720) {
10534 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10535 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10536 break;
10537 }
10538 if (i < TG3_NUM_RDMA_CHANNELS) {
10539 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10540 val |= tg3_lso_rd_dma_workaround_bit(tp);
10541 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10542 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10543 }
10544 }
10545
10546 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10547 if (!tg3_flag(tp, 5705_PLUS))
10548 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10549
10550 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10551 tw32(SNDDATAC_MODE,
10552 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10553 else
10554 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10555
10556 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10557 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10558 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10559 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10560 val |= RCVDBDI_MODE_LRG_RING_SZ;
10561 tw32(RCVDBDI_MODE, val);
10562 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10563 if (tg3_flag(tp, HW_TSO_1) ||
10564 tg3_flag(tp, HW_TSO_2) ||
10565 tg3_flag(tp, HW_TSO_3))
10566 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10567 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10568 if (tg3_flag(tp, ENABLE_TSS))
10569 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10570 tw32(SNDBDI_MODE, val);
10571 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10572
10573 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10574 err = tg3_load_5701_a0_firmware_fix(tp);
10575 if (err)
10576 return err;
10577 }
10578
10579 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10580 /* Ignore any errors for the firmware download. If download
10581 * fails, the device will operate with EEE disabled
10582 */
10583 tg3_load_57766_firmware(tp);
10584 }
10585
10586 if (tg3_flag(tp, TSO_CAPABLE)) {
10587 err = tg3_load_tso_firmware(tp);
10588 if (err)
10589 return err;
10590 }
10591
10592 tp->tx_mode = TX_MODE_ENABLE;
10593
10594 if (tg3_flag(tp, 5755_PLUS) ||
10595 tg3_asic_rev(tp) == ASIC_REV_5906)
10596 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10597
10598 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10599 tg3_asic_rev(tp) == ASIC_REV_5762) {
10600 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10601 tp->tx_mode &= ~val;
10602 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10603 }
10604
10605 tw32_f(MAC_TX_MODE, tp->tx_mode);
10606 udelay(100);
10607
10608 if (tg3_flag(tp, ENABLE_RSS)) {
10609 u32 rss_key[10];
10610
10611 tg3_rss_write_indir_tbl(tp);
10612
10613 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10614
10615 for (i = 0; i < 10 ; i++)
10616 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10617 }
10618
10619 tp->rx_mode = RX_MODE_ENABLE;
10620 if (tg3_flag(tp, 5755_PLUS))
10621 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10622
10623 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10624 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10625
10626 if (tg3_flag(tp, ENABLE_RSS))
10627 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10628 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10629 RX_MODE_RSS_IPV6_HASH_EN |
10630 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10631 RX_MODE_RSS_IPV4_HASH_EN |
10632 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10633
10634 tw32_f(MAC_RX_MODE, tp->rx_mode);
10635 udelay(10);
10636
10637 tw32(MAC_LED_CTRL, tp->led_ctrl);
10638
10639 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10640 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10641 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10642 udelay(10);
10643 }
10644 tw32_f(MAC_RX_MODE, tp->rx_mode);
10645 udelay(10);
10646
10647 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10648 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10649 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10650 /* Set drive transmission level to 1.2V */
10651 /* only if the signal pre-emphasis bit is not set */
10652 val = tr32(MAC_SERDES_CFG);
10653 val &= 0xfffff000;
10654 val |= 0x880;
10655 tw32(MAC_SERDES_CFG, val);
10656 }
10657 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10658 tw32(MAC_SERDES_CFG, 0x616000);
10659 }
10660
10661 /* Prevent chip from dropping frames when flow control
10662 * is enabled.
10663 */
10664 if (tg3_flag(tp, 57765_CLASS))
10665 val = 1;
10666 else
10667 val = 2;
10668 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10669
10670 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10671 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10672 /* Use hardware link auto-negotiation */
10673 tg3_flag_set(tp, HW_AUTONEG);
10674 }
10675
10676 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10677 tg3_asic_rev(tp) == ASIC_REV_5714) {
10678 u32 tmp;
10679
10680 tmp = tr32(SERDES_RX_CTRL);
10681 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10682 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10683 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10684 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10685 }
10686
10687 if (!tg3_flag(tp, USE_PHYLIB)) {
10688 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10689 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10690
10691 err = tg3_setup_phy(tp, false);
10692 if (err)
10693 return err;
10694
10695 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10696 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10697 u32 tmp;
10698
10699 /* Clear CRC stats. */
10700 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10701 tg3_writephy(tp, MII_TG3_TEST1,
10702 tmp | MII_TG3_TEST1_CRC_EN);
10703 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10704 }
10705 }
10706 }
10707
10708 __tg3_set_rx_mode(tp->dev);
10709
10710 /* Initialize receive rules. */
10711 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10712 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10713 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10714 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10715
10716 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10717 limit = 8;
10718 else
10719 limit = 16;
10720 if (tg3_flag(tp, ENABLE_ASF))
10721 limit -= 4;
10722 switch (limit) {
10723 case 16:
10724 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10725 /* fall through */
10726 case 15:
10727 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10728 /* fall through */
10729 case 14:
10730 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10731 /* fall through */
10732 case 13:
10733 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10734 /* fall through */
10735 case 12:
10736 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10737 /* fall through */
10738 case 11:
10739 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10740 /* fall through */
10741 case 10:
10742 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10743 /* fall through */
10744 case 9:
10745 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10746 /* fall through */
10747 case 8:
10748 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10749 /* fall through */
10750 case 7:
10751 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10752 /* fall through */
10753 case 6:
10754 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10755 /* fall through */
10756 case 5:
10757 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10758 /* fall through */
10759 case 4:
10760 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10761 case 3:
10762 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10763 case 2:
10764 case 1:
10765
10766 default:
10767 break;
10768 }
10769
10770 if (tg3_flag(tp, ENABLE_APE))
10771 /* Write our heartbeat update interval to APE. */
10772 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10773 APE_HOST_HEARTBEAT_INT_5SEC);
10774
10775 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10776
10777 return 0;
10778 }
10779
10780 /* Called at device open time to get the chip ready for
10781 * packet processing. Invoked with tp->lock held.
10782 */
tg3_init_hw(struct tg3 * tp,bool reset_phy)10783 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10784 {
10785 /* Chip may have been just powered on. If so, the boot code may still
10786 * be running initialization. Wait for it to finish to avoid races in
10787 * accessing the hardware.
10788 */
10789 tg3_enable_register_access(tp);
10790 tg3_poll_fw(tp);
10791
10792 tg3_switch_clocks(tp);
10793
10794 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10795
10796 return tg3_reset_hw(tp, reset_phy);
10797 }
10798
10799 #ifdef CONFIG_TIGON3_HWMON
tg3_sd_scan_scratchpad(struct tg3 * tp,struct tg3_ocir * ocir)10800 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10801 {
10802 int i;
10803
10804 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10805 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10806
10807 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10808 off += len;
10809
10810 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10811 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10812 memset(ocir, 0, TG3_OCIR_LEN);
10813 }
10814 }
10815
10816 /* sysfs attributes for hwmon */
tg3_show_temp(struct device * dev,struct device_attribute * devattr,char * buf)10817 static ssize_t tg3_show_temp(struct device *dev,
10818 struct device_attribute *devattr, char *buf)
10819 {
10820 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10821 struct tg3 *tp = dev_get_drvdata(dev);
10822 u32 temperature;
10823
10824 spin_lock_bh(&tp->lock);
10825 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10826 sizeof(temperature));
10827 spin_unlock_bh(&tp->lock);
10828 return sprintf(buf, "%u\n", temperature * 1000);
10829 }
10830
10831
10832 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10833 TG3_TEMP_SENSOR_OFFSET);
10834 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10835 TG3_TEMP_CAUTION_OFFSET);
10836 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10837 TG3_TEMP_MAX_OFFSET);
10838
10839 static struct attribute *tg3_attrs[] = {
10840 &sensor_dev_attr_temp1_input.dev_attr.attr,
10841 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10842 &sensor_dev_attr_temp1_max.dev_attr.attr,
10843 NULL
10844 };
10845 ATTRIBUTE_GROUPS(tg3);
10846
tg3_hwmon_close(struct tg3 * tp)10847 static void tg3_hwmon_close(struct tg3 *tp)
10848 {
10849 if (tp->hwmon_dev) {
10850 hwmon_device_unregister(tp->hwmon_dev);
10851 tp->hwmon_dev = NULL;
10852 }
10853 }
10854
tg3_hwmon_open(struct tg3 * tp)10855 static void tg3_hwmon_open(struct tg3 *tp)
10856 {
10857 int i;
10858 u32 size = 0;
10859 struct pci_dev *pdev = tp->pdev;
10860 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10861
10862 tg3_sd_scan_scratchpad(tp, ocirs);
10863
10864 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10865 if (!ocirs[i].src_data_length)
10866 continue;
10867
10868 size += ocirs[i].src_hdr_length;
10869 size += ocirs[i].src_data_length;
10870 }
10871
10872 if (!size)
10873 return;
10874
10875 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10876 tp, tg3_groups);
10877 if (IS_ERR(tp->hwmon_dev)) {
10878 tp->hwmon_dev = NULL;
10879 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10880 }
10881 }
10882 #else
tg3_hwmon_close(struct tg3 * tp)10883 static inline void tg3_hwmon_close(struct tg3 *tp) { }
tg3_hwmon_open(struct tg3 * tp)10884 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10885 #endif /* CONFIG_TIGON3_HWMON */
10886
10887
10888 #define TG3_STAT_ADD32(PSTAT, REG) \
10889 do { u32 __val = tr32(REG); \
10890 (PSTAT)->low += __val; \
10891 if ((PSTAT)->low < __val) \
10892 (PSTAT)->high += 1; \
10893 } while (0)
10894
tg3_periodic_fetch_stats(struct tg3 * tp)10895 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10896 {
10897 struct tg3_hw_stats *sp = tp->hw_stats;
10898
10899 if (!tp->link_up)
10900 return;
10901
10902 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10903 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10904 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10905 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10906 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10907 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10908 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10909 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10910 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10911 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10912 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10913 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10914 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10915 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10916 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10917 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10918 u32 val;
10919
10920 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10921 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10922 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10923 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10924 }
10925
10926 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10927 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10928 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10929 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10930 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10931 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10932 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10933 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10934 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10935 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10936 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10937 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10938 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10939 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10940
10941 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10942 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10943 tg3_asic_rev(tp) != ASIC_REV_5762 &&
10944 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10945 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10946 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10947 } else {
10948 u32 val = tr32(HOSTCC_FLOW_ATTN);
10949 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10950 if (val) {
10951 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10952 sp->rx_discards.low += val;
10953 if (sp->rx_discards.low < val)
10954 sp->rx_discards.high += 1;
10955 }
10956 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10957 }
10958 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10959 }
10960
tg3_chk_missed_msi(struct tg3 * tp)10961 static void tg3_chk_missed_msi(struct tg3 *tp)
10962 {
10963 u32 i;
10964
10965 for (i = 0; i < tp->irq_cnt; i++) {
10966 struct tg3_napi *tnapi = &tp->napi[i];
10967
10968 if (tg3_has_work(tnapi)) {
10969 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10970 tnapi->last_tx_cons == tnapi->tx_cons) {
10971 if (tnapi->chk_msi_cnt < 1) {
10972 tnapi->chk_msi_cnt++;
10973 return;
10974 }
10975 tg3_msi(0, tnapi);
10976 }
10977 }
10978 tnapi->chk_msi_cnt = 0;
10979 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10980 tnapi->last_tx_cons = tnapi->tx_cons;
10981 }
10982 }
10983
tg3_timer(struct timer_list * t)10984 static void tg3_timer(struct timer_list *t)
10985 {
10986 struct tg3 *tp = from_timer(tp, t, timer);
10987
10988 spin_lock(&tp->lock);
10989
10990 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10991 spin_unlock(&tp->lock);
10992 goto restart_timer;
10993 }
10994
10995 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10996 tg3_flag(tp, 57765_CLASS))
10997 tg3_chk_missed_msi(tp);
10998
10999 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
11000 /* BCM4785: Flush posted writes from GbE to host memory. */
11001 tr32(HOSTCC_MODE);
11002 }
11003
11004 if (!tg3_flag(tp, TAGGED_STATUS)) {
11005 /* All of this garbage is because when using non-tagged
11006 * IRQ status the mailbox/status_block protocol the chip
11007 * uses with the cpu is race prone.
11008 */
11009 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
11010 tw32(GRC_LOCAL_CTRL,
11011 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
11012 } else {
11013 tw32(HOSTCC_MODE, tp->coalesce_mode |
11014 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11015 }
11016
11017 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11018 spin_unlock(&tp->lock);
11019 tg3_reset_task_schedule(tp);
11020 goto restart_timer;
11021 }
11022 }
11023
11024 /* This part only runs once per second. */
11025 if (!--tp->timer_counter) {
11026 if (tg3_flag(tp, 5705_PLUS))
11027 tg3_periodic_fetch_stats(tp);
11028
11029 if (tp->setlpicnt && !--tp->setlpicnt)
11030 tg3_phy_eee_enable(tp);
11031
11032 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11033 u32 mac_stat;
11034 int phy_event;
11035
11036 mac_stat = tr32(MAC_STATUS);
11037
11038 phy_event = 0;
11039 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11040 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11041 phy_event = 1;
11042 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11043 phy_event = 1;
11044
11045 if (phy_event)
11046 tg3_setup_phy(tp, false);
11047 } else if (tg3_flag(tp, POLL_SERDES)) {
11048 u32 mac_stat = tr32(MAC_STATUS);
11049 int need_setup = 0;
11050
11051 if (tp->link_up &&
11052 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11053 need_setup = 1;
11054 }
11055 if (!tp->link_up &&
11056 (mac_stat & (MAC_STATUS_PCS_SYNCED |
11057 MAC_STATUS_SIGNAL_DET))) {
11058 need_setup = 1;
11059 }
11060 if (need_setup) {
11061 if (!tp->serdes_counter) {
11062 tw32_f(MAC_MODE,
11063 (tp->mac_mode &
11064 ~MAC_MODE_PORT_MODE_MASK));
11065 udelay(40);
11066 tw32_f(MAC_MODE, tp->mac_mode);
11067 udelay(40);
11068 }
11069 tg3_setup_phy(tp, false);
11070 }
11071 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11072 tg3_flag(tp, 5780_CLASS)) {
11073 tg3_serdes_parallel_detect(tp);
11074 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11075 u32 cpmu = tr32(TG3_CPMU_STATUS);
11076 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11077 TG3_CPMU_STATUS_LINK_MASK);
11078
11079 if (link_up != tp->link_up)
11080 tg3_setup_phy(tp, false);
11081 }
11082
11083 tp->timer_counter = tp->timer_multiplier;
11084 }
11085
11086 /* Heartbeat is only sent once every 2 seconds.
11087 *
11088 * The heartbeat is to tell the ASF firmware that the host
11089 * driver is still alive. In the event that the OS crashes,
11090 * ASF needs to reset the hardware to free up the FIFO space
11091 * that may be filled with rx packets destined for the host.
11092 * If the FIFO is full, ASF will no longer function properly.
11093 *
11094 * Unintended resets have been reported on real time kernels
11095 * where the timer doesn't run on time. Netpoll will also have
11096 * same problem.
11097 *
11098 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11099 * to check the ring condition when the heartbeat is expiring
11100 * before doing the reset. This will prevent most unintended
11101 * resets.
11102 */
11103 if (!--tp->asf_counter) {
11104 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11105 tg3_wait_for_event_ack(tp);
11106
11107 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11108 FWCMD_NICDRV_ALIVE3);
11109 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11110 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11111 TG3_FW_UPDATE_TIMEOUT_SEC);
11112
11113 tg3_generate_fw_event(tp);
11114 }
11115 tp->asf_counter = tp->asf_multiplier;
11116 }
11117
11118 /* Update the APE heartbeat every 5 seconds.*/
11119 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11120
11121 spin_unlock(&tp->lock);
11122
11123 restart_timer:
11124 tp->timer.expires = jiffies + tp->timer_offset;
11125 add_timer(&tp->timer);
11126 }
11127
tg3_timer_init(struct tg3 * tp)11128 static void tg3_timer_init(struct tg3 *tp)
11129 {
11130 if (tg3_flag(tp, TAGGED_STATUS) &&
11131 tg3_asic_rev(tp) != ASIC_REV_5717 &&
11132 !tg3_flag(tp, 57765_CLASS))
11133 tp->timer_offset = HZ;
11134 else
11135 tp->timer_offset = HZ / 10;
11136
11137 BUG_ON(tp->timer_offset > HZ);
11138
11139 tp->timer_multiplier = (HZ / tp->timer_offset);
11140 tp->asf_multiplier = (HZ / tp->timer_offset) *
11141 TG3_FW_UPDATE_FREQ_SEC;
11142
11143 timer_setup(&tp->timer, tg3_timer, 0);
11144 }
11145
tg3_timer_start(struct tg3 * tp)11146 static void tg3_timer_start(struct tg3 *tp)
11147 {
11148 tp->asf_counter = tp->asf_multiplier;
11149 tp->timer_counter = tp->timer_multiplier;
11150
11151 tp->timer.expires = jiffies + tp->timer_offset;
11152 add_timer(&tp->timer);
11153 }
11154
tg3_timer_stop(struct tg3 * tp)11155 static void tg3_timer_stop(struct tg3 *tp)
11156 {
11157 del_timer_sync(&tp->timer);
11158 }
11159
11160 /* Restart hardware after configuration changes, self-test, etc.
11161 * Invoked with tp->lock held.
11162 */
tg3_restart_hw(struct tg3 * tp,bool reset_phy)11163 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11164 __releases(tp->lock)
11165 __acquires(tp->lock)
11166 {
11167 int err;
11168
11169 err = tg3_init_hw(tp, reset_phy);
11170 if (err) {
11171 netdev_err(tp->dev,
11172 "Failed to re-initialize device, aborting\n");
11173 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11174 tg3_full_unlock(tp);
11175 tg3_timer_stop(tp);
11176 tp->irq_sync = 0;
11177 tg3_napi_enable(tp);
11178 dev_close(tp->dev);
11179 tg3_full_lock(tp, 0);
11180 }
11181 return err;
11182 }
11183
tg3_reset_task(struct work_struct * work)11184 static void tg3_reset_task(struct work_struct *work)
11185 {
11186 struct tg3 *tp = container_of(work, struct tg3, reset_task);
11187 int err;
11188
11189 rtnl_lock();
11190 tg3_full_lock(tp, 0);
11191
11192 if (!netif_running(tp->dev)) {
11193 tg3_flag_clear(tp, RESET_TASK_PENDING);
11194 tg3_full_unlock(tp);
11195 rtnl_unlock();
11196 return;
11197 }
11198
11199 tg3_full_unlock(tp);
11200
11201 tg3_phy_stop(tp);
11202
11203 tg3_netif_stop(tp);
11204
11205 tg3_full_lock(tp, 1);
11206
11207 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11208 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11209 tp->write32_rx_mbox = tg3_write_flush_reg32;
11210 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11211 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11212 }
11213
11214 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11215 err = tg3_init_hw(tp, true);
11216 if (err)
11217 goto out;
11218
11219 tg3_netif_start(tp);
11220
11221 out:
11222 tg3_full_unlock(tp);
11223
11224 if (!err)
11225 tg3_phy_start(tp);
11226
11227 tg3_flag_clear(tp, RESET_TASK_PENDING);
11228 rtnl_unlock();
11229 }
11230
tg3_request_irq(struct tg3 * tp,int irq_num)11231 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11232 {
11233 irq_handler_t fn;
11234 unsigned long flags;
11235 char *name;
11236 struct tg3_napi *tnapi = &tp->napi[irq_num];
11237
11238 if (tp->irq_cnt == 1)
11239 name = tp->dev->name;
11240 else {
11241 name = &tnapi->irq_lbl[0];
11242 if (tnapi->tx_buffers && tnapi->rx_rcb)
11243 snprintf(name, IFNAMSIZ,
11244 "%s-txrx-%d", tp->dev->name, irq_num);
11245 else if (tnapi->tx_buffers)
11246 snprintf(name, IFNAMSIZ,
11247 "%s-tx-%d", tp->dev->name, irq_num);
11248 else if (tnapi->rx_rcb)
11249 snprintf(name, IFNAMSIZ,
11250 "%s-rx-%d", tp->dev->name, irq_num);
11251 else
11252 snprintf(name, IFNAMSIZ,
11253 "%s-%d", tp->dev->name, irq_num);
11254 name[IFNAMSIZ-1] = 0;
11255 }
11256
11257 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11258 fn = tg3_msi;
11259 if (tg3_flag(tp, 1SHOT_MSI))
11260 fn = tg3_msi_1shot;
11261 flags = 0;
11262 } else {
11263 fn = tg3_interrupt;
11264 if (tg3_flag(tp, TAGGED_STATUS))
11265 fn = tg3_interrupt_tagged;
11266 flags = IRQF_SHARED;
11267 }
11268
11269 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11270 }
11271
tg3_test_interrupt(struct tg3 * tp)11272 static int tg3_test_interrupt(struct tg3 *tp)
11273 {
11274 struct tg3_napi *tnapi = &tp->napi[0];
11275 struct net_device *dev = tp->dev;
11276 int err, i, intr_ok = 0;
11277 u32 val;
11278
11279 if (!netif_running(dev))
11280 return -ENODEV;
11281
11282 tg3_disable_ints(tp);
11283
11284 free_irq(tnapi->irq_vec, tnapi);
11285
11286 /*
11287 * Turn off MSI one shot mode. Otherwise this test has no
11288 * observable way to know whether the interrupt was delivered.
11289 */
11290 if (tg3_flag(tp, 57765_PLUS)) {
11291 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11292 tw32(MSGINT_MODE, val);
11293 }
11294
11295 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11296 IRQF_SHARED, dev->name, tnapi);
11297 if (err)
11298 return err;
11299
11300 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11301 tg3_enable_ints(tp);
11302
11303 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11304 tnapi->coal_now);
11305
11306 for (i = 0; i < 5; i++) {
11307 u32 int_mbox, misc_host_ctrl;
11308
11309 int_mbox = tr32_mailbox(tnapi->int_mbox);
11310 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11311
11312 if ((int_mbox != 0) ||
11313 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11314 intr_ok = 1;
11315 break;
11316 }
11317
11318 if (tg3_flag(tp, 57765_PLUS) &&
11319 tnapi->hw_status->status_tag != tnapi->last_tag)
11320 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11321
11322 msleep(10);
11323 }
11324
11325 tg3_disable_ints(tp);
11326
11327 free_irq(tnapi->irq_vec, tnapi);
11328
11329 err = tg3_request_irq(tp, 0);
11330
11331 if (err)
11332 return err;
11333
11334 if (intr_ok) {
11335 /* Reenable MSI one shot mode. */
11336 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11337 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11338 tw32(MSGINT_MODE, val);
11339 }
11340 return 0;
11341 }
11342
11343 return -EIO;
11344 }
11345
11346 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11347 * successfully restored
11348 */
tg3_test_msi(struct tg3 * tp)11349 static int tg3_test_msi(struct tg3 *tp)
11350 {
11351 int err;
11352 u16 pci_cmd;
11353
11354 if (!tg3_flag(tp, USING_MSI))
11355 return 0;
11356
11357 /* Turn off SERR reporting in case MSI terminates with Master
11358 * Abort.
11359 */
11360 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11361 pci_write_config_word(tp->pdev, PCI_COMMAND,
11362 pci_cmd & ~PCI_COMMAND_SERR);
11363
11364 err = tg3_test_interrupt(tp);
11365
11366 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11367
11368 if (!err)
11369 return 0;
11370
11371 /* other failures */
11372 if (err != -EIO)
11373 return err;
11374
11375 /* MSI test failed, go back to INTx mode */
11376 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11377 "to INTx mode. Please report this failure to the PCI "
11378 "maintainer and include system chipset information\n");
11379
11380 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11381
11382 pci_disable_msi(tp->pdev);
11383
11384 tg3_flag_clear(tp, USING_MSI);
11385 tp->napi[0].irq_vec = tp->pdev->irq;
11386
11387 err = tg3_request_irq(tp, 0);
11388 if (err)
11389 return err;
11390
11391 /* Need to reset the chip because the MSI cycle may have terminated
11392 * with Master Abort.
11393 */
11394 tg3_full_lock(tp, 1);
11395
11396 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11397 err = tg3_init_hw(tp, true);
11398
11399 tg3_full_unlock(tp);
11400
11401 if (err)
11402 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11403
11404 return err;
11405 }
11406
tg3_request_firmware(struct tg3 * tp)11407 static int tg3_request_firmware(struct tg3 *tp)
11408 {
11409 const struct tg3_firmware_hdr *fw_hdr;
11410
11411 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11412 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11413 tp->fw_needed);
11414 return -ENOENT;
11415 }
11416
11417 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11418
11419 /* Firmware blob starts with version numbers, followed by
11420 * start address and _full_ length including BSS sections
11421 * (which must be longer than the actual data, of course
11422 */
11423
11424 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11425 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11426 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11427 tp->fw_len, tp->fw_needed);
11428 release_firmware(tp->fw);
11429 tp->fw = NULL;
11430 return -EINVAL;
11431 }
11432
11433 /* We no longer need firmware; we have it. */
11434 tp->fw_needed = NULL;
11435 return 0;
11436 }
11437
tg3_irq_count(struct tg3 * tp)11438 static u32 tg3_irq_count(struct tg3 *tp)
11439 {
11440 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11441
11442 if (irq_cnt > 1) {
11443 /* We want as many rx rings enabled as there are cpus.
11444 * In multiqueue MSI-X mode, the first MSI-X vector
11445 * only deals with link interrupts, etc, so we add
11446 * one to the number of vectors we are requesting.
11447 */
11448 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11449 }
11450
11451 return irq_cnt;
11452 }
11453
tg3_enable_msix(struct tg3 * tp)11454 static bool tg3_enable_msix(struct tg3 *tp)
11455 {
11456 int i, rc;
11457 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11458
11459 tp->txq_cnt = tp->txq_req;
11460 tp->rxq_cnt = tp->rxq_req;
11461 if (!tp->rxq_cnt)
11462 tp->rxq_cnt = netif_get_num_default_rss_queues();
11463 if (tp->rxq_cnt > tp->rxq_max)
11464 tp->rxq_cnt = tp->rxq_max;
11465
11466 /* Disable multiple TX rings by default. Simple round-robin hardware
11467 * scheduling of the TX rings can cause starvation of rings with
11468 * small packets when other rings have TSO or jumbo packets.
11469 */
11470 if (!tp->txq_req)
11471 tp->txq_cnt = 1;
11472
11473 tp->irq_cnt = tg3_irq_count(tp);
11474
11475 for (i = 0; i < tp->irq_max; i++) {
11476 msix_ent[i].entry = i;
11477 msix_ent[i].vector = 0;
11478 }
11479
11480 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11481 if (rc < 0) {
11482 return false;
11483 } else if (rc < tp->irq_cnt) {
11484 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11485 tp->irq_cnt, rc);
11486 tp->irq_cnt = rc;
11487 tp->rxq_cnt = max(rc - 1, 1);
11488 if (tp->txq_cnt)
11489 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11490 }
11491
11492 for (i = 0; i < tp->irq_max; i++)
11493 tp->napi[i].irq_vec = msix_ent[i].vector;
11494
11495 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11496 pci_disable_msix(tp->pdev);
11497 return false;
11498 }
11499
11500 if (tp->irq_cnt == 1)
11501 return true;
11502
11503 tg3_flag_set(tp, ENABLE_RSS);
11504
11505 if (tp->txq_cnt > 1)
11506 tg3_flag_set(tp, ENABLE_TSS);
11507
11508 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11509
11510 return true;
11511 }
11512
tg3_ints_init(struct tg3 * tp)11513 static void tg3_ints_init(struct tg3 *tp)
11514 {
11515 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11516 !tg3_flag(tp, TAGGED_STATUS)) {
11517 /* All MSI supporting chips should support tagged
11518 * status. Assert that this is the case.
11519 */
11520 netdev_warn(tp->dev,
11521 "MSI without TAGGED_STATUS? Not using MSI\n");
11522 goto defcfg;
11523 }
11524
11525 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11526 tg3_flag_set(tp, USING_MSIX);
11527 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11528 tg3_flag_set(tp, USING_MSI);
11529
11530 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11531 u32 msi_mode = tr32(MSGINT_MODE);
11532 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11533 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11534 if (!tg3_flag(tp, 1SHOT_MSI))
11535 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11536 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11537 }
11538 defcfg:
11539 if (!tg3_flag(tp, USING_MSIX)) {
11540 tp->irq_cnt = 1;
11541 tp->napi[0].irq_vec = tp->pdev->irq;
11542 }
11543
11544 if (tp->irq_cnt == 1) {
11545 tp->txq_cnt = 1;
11546 tp->rxq_cnt = 1;
11547 netif_set_real_num_tx_queues(tp->dev, 1);
11548 netif_set_real_num_rx_queues(tp->dev, 1);
11549 }
11550 }
11551
tg3_ints_fini(struct tg3 * tp)11552 static void tg3_ints_fini(struct tg3 *tp)
11553 {
11554 if (tg3_flag(tp, USING_MSIX))
11555 pci_disable_msix(tp->pdev);
11556 else if (tg3_flag(tp, USING_MSI))
11557 pci_disable_msi(tp->pdev);
11558 tg3_flag_clear(tp, USING_MSI);
11559 tg3_flag_clear(tp, USING_MSIX);
11560 tg3_flag_clear(tp, ENABLE_RSS);
11561 tg3_flag_clear(tp, ENABLE_TSS);
11562 }
11563
tg3_start(struct tg3 * tp,bool reset_phy,bool test_irq,bool init)11564 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11565 bool init)
11566 {
11567 struct net_device *dev = tp->dev;
11568 int i, err;
11569
11570 /*
11571 * Setup interrupts first so we know how
11572 * many NAPI resources to allocate
11573 */
11574 tg3_ints_init(tp);
11575
11576 tg3_rss_check_indir_tbl(tp);
11577
11578 /* The placement of this call is tied
11579 * to the setup and use of Host TX descriptors.
11580 */
11581 err = tg3_alloc_consistent(tp);
11582 if (err)
11583 goto out_ints_fini;
11584
11585 tg3_napi_init(tp);
11586
11587 tg3_napi_enable(tp);
11588
11589 for (i = 0; i < tp->irq_cnt; i++) {
11590 err = tg3_request_irq(tp, i);
11591 if (err) {
11592 for (i--; i >= 0; i--) {
11593 struct tg3_napi *tnapi = &tp->napi[i];
11594
11595 free_irq(tnapi->irq_vec, tnapi);
11596 }
11597 goto out_napi_fini;
11598 }
11599 }
11600
11601 tg3_full_lock(tp, 0);
11602
11603 if (init)
11604 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11605
11606 err = tg3_init_hw(tp, reset_phy);
11607 if (err) {
11608 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11609 tg3_free_rings(tp);
11610 }
11611
11612 tg3_full_unlock(tp);
11613
11614 if (err)
11615 goto out_free_irq;
11616
11617 if (test_irq && tg3_flag(tp, USING_MSI)) {
11618 err = tg3_test_msi(tp);
11619
11620 if (err) {
11621 tg3_full_lock(tp, 0);
11622 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11623 tg3_free_rings(tp);
11624 tg3_full_unlock(tp);
11625
11626 goto out_napi_fini;
11627 }
11628
11629 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11630 u32 val = tr32(PCIE_TRANSACTION_CFG);
11631
11632 tw32(PCIE_TRANSACTION_CFG,
11633 val | PCIE_TRANS_CFG_1SHOT_MSI);
11634 }
11635 }
11636
11637 tg3_phy_start(tp);
11638
11639 tg3_hwmon_open(tp);
11640
11641 tg3_full_lock(tp, 0);
11642
11643 tg3_timer_start(tp);
11644 tg3_flag_set(tp, INIT_COMPLETE);
11645 tg3_enable_ints(tp);
11646
11647 tg3_ptp_resume(tp);
11648
11649 tg3_full_unlock(tp);
11650
11651 netif_tx_start_all_queues(dev);
11652
11653 /*
11654 * Reset loopback feature if it was turned on while the device was down
11655 * make sure that it's installed properly now.
11656 */
11657 if (dev->features & NETIF_F_LOOPBACK)
11658 tg3_set_loopback(dev, dev->features);
11659
11660 return 0;
11661
11662 out_free_irq:
11663 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11664 struct tg3_napi *tnapi = &tp->napi[i];
11665 free_irq(tnapi->irq_vec, tnapi);
11666 }
11667
11668 out_napi_fini:
11669 tg3_napi_disable(tp);
11670 tg3_napi_fini(tp);
11671 tg3_free_consistent(tp);
11672
11673 out_ints_fini:
11674 tg3_ints_fini(tp);
11675
11676 return err;
11677 }
11678
tg3_stop(struct tg3 * tp)11679 static void tg3_stop(struct tg3 *tp)
11680 {
11681 int i;
11682
11683 tg3_reset_task_cancel(tp);
11684 tg3_netif_stop(tp);
11685
11686 tg3_timer_stop(tp);
11687
11688 tg3_hwmon_close(tp);
11689
11690 tg3_phy_stop(tp);
11691
11692 tg3_full_lock(tp, 1);
11693
11694 tg3_disable_ints(tp);
11695
11696 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11697 tg3_free_rings(tp);
11698 tg3_flag_clear(tp, INIT_COMPLETE);
11699
11700 tg3_full_unlock(tp);
11701
11702 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11703 struct tg3_napi *tnapi = &tp->napi[i];
11704 free_irq(tnapi->irq_vec, tnapi);
11705 }
11706
11707 tg3_ints_fini(tp);
11708
11709 tg3_napi_fini(tp);
11710
11711 tg3_free_consistent(tp);
11712 }
11713
tg3_open(struct net_device * dev)11714 static int tg3_open(struct net_device *dev)
11715 {
11716 struct tg3 *tp = netdev_priv(dev);
11717 int err;
11718
11719 if (tp->pcierr_recovery) {
11720 netdev_err(dev, "Failed to open device. PCI error recovery "
11721 "in progress\n");
11722 return -EAGAIN;
11723 }
11724
11725 if (tp->fw_needed) {
11726 err = tg3_request_firmware(tp);
11727 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11728 if (err) {
11729 netdev_warn(tp->dev, "EEE capability disabled\n");
11730 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11731 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11732 netdev_warn(tp->dev, "EEE capability restored\n");
11733 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11734 }
11735 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11736 if (err)
11737 return err;
11738 } else if (err) {
11739 netdev_warn(tp->dev, "TSO capability disabled\n");
11740 tg3_flag_clear(tp, TSO_CAPABLE);
11741 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11742 netdev_notice(tp->dev, "TSO capability restored\n");
11743 tg3_flag_set(tp, TSO_CAPABLE);
11744 }
11745 }
11746
11747 tg3_carrier_off(tp);
11748
11749 err = tg3_power_up(tp);
11750 if (err)
11751 return err;
11752
11753 tg3_full_lock(tp, 0);
11754
11755 tg3_disable_ints(tp);
11756 tg3_flag_clear(tp, INIT_COMPLETE);
11757
11758 tg3_full_unlock(tp);
11759
11760 err = tg3_start(tp,
11761 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11762 true, true);
11763 if (err) {
11764 tg3_frob_aux_power(tp, false);
11765 pci_set_power_state(tp->pdev, PCI_D3hot);
11766 }
11767
11768 return err;
11769 }
11770
tg3_close(struct net_device * dev)11771 static int tg3_close(struct net_device *dev)
11772 {
11773 struct tg3 *tp = netdev_priv(dev);
11774
11775 if (tp->pcierr_recovery) {
11776 netdev_err(dev, "Failed to close device. PCI error recovery "
11777 "in progress\n");
11778 return -EAGAIN;
11779 }
11780
11781 tg3_stop(tp);
11782
11783 if (pci_device_is_present(tp->pdev)) {
11784 tg3_power_down_prepare(tp);
11785
11786 tg3_carrier_off(tp);
11787 }
11788 return 0;
11789 }
11790
get_stat64(tg3_stat64_t * val)11791 static inline u64 get_stat64(tg3_stat64_t *val)
11792 {
11793 return ((u64)val->high << 32) | ((u64)val->low);
11794 }
11795
tg3_calc_crc_errors(struct tg3 * tp)11796 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11797 {
11798 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11799
11800 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11801 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11802 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11803 u32 val;
11804
11805 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11806 tg3_writephy(tp, MII_TG3_TEST1,
11807 val | MII_TG3_TEST1_CRC_EN);
11808 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11809 } else
11810 val = 0;
11811
11812 tp->phy_crc_errors += val;
11813
11814 return tp->phy_crc_errors;
11815 }
11816
11817 return get_stat64(&hw_stats->rx_fcs_errors);
11818 }
11819
11820 #define ESTAT_ADD(member) \
11821 estats->member = old_estats->member + \
11822 get_stat64(&hw_stats->member)
11823
tg3_get_estats(struct tg3 * tp,struct tg3_ethtool_stats * estats)11824 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11825 {
11826 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11827 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11828
11829 ESTAT_ADD(rx_octets);
11830 ESTAT_ADD(rx_fragments);
11831 ESTAT_ADD(rx_ucast_packets);
11832 ESTAT_ADD(rx_mcast_packets);
11833 ESTAT_ADD(rx_bcast_packets);
11834 ESTAT_ADD(rx_fcs_errors);
11835 ESTAT_ADD(rx_align_errors);
11836 ESTAT_ADD(rx_xon_pause_rcvd);
11837 ESTAT_ADD(rx_xoff_pause_rcvd);
11838 ESTAT_ADD(rx_mac_ctrl_rcvd);
11839 ESTAT_ADD(rx_xoff_entered);
11840 ESTAT_ADD(rx_frame_too_long_errors);
11841 ESTAT_ADD(rx_jabbers);
11842 ESTAT_ADD(rx_undersize_packets);
11843 ESTAT_ADD(rx_in_length_errors);
11844 ESTAT_ADD(rx_out_length_errors);
11845 ESTAT_ADD(rx_64_or_less_octet_packets);
11846 ESTAT_ADD(rx_65_to_127_octet_packets);
11847 ESTAT_ADD(rx_128_to_255_octet_packets);
11848 ESTAT_ADD(rx_256_to_511_octet_packets);
11849 ESTAT_ADD(rx_512_to_1023_octet_packets);
11850 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11851 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11852 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11853 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11854 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11855
11856 ESTAT_ADD(tx_octets);
11857 ESTAT_ADD(tx_collisions);
11858 ESTAT_ADD(tx_xon_sent);
11859 ESTAT_ADD(tx_xoff_sent);
11860 ESTAT_ADD(tx_flow_control);
11861 ESTAT_ADD(tx_mac_errors);
11862 ESTAT_ADD(tx_single_collisions);
11863 ESTAT_ADD(tx_mult_collisions);
11864 ESTAT_ADD(tx_deferred);
11865 ESTAT_ADD(tx_excessive_collisions);
11866 ESTAT_ADD(tx_late_collisions);
11867 ESTAT_ADD(tx_collide_2times);
11868 ESTAT_ADD(tx_collide_3times);
11869 ESTAT_ADD(tx_collide_4times);
11870 ESTAT_ADD(tx_collide_5times);
11871 ESTAT_ADD(tx_collide_6times);
11872 ESTAT_ADD(tx_collide_7times);
11873 ESTAT_ADD(tx_collide_8times);
11874 ESTAT_ADD(tx_collide_9times);
11875 ESTAT_ADD(tx_collide_10times);
11876 ESTAT_ADD(tx_collide_11times);
11877 ESTAT_ADD(tx_collide_12times);
11878 ESTAT_ADD(tx_collide_13times);
11879 ESTAT_ADD(tx_collide_14times);
11880 ESTAT_ADD(tx_collide_15times);
11881 ESTAT_ADD(tx_ucast_packets);
11882 ESTAT_ADD(tx_mcast_packets);
11883 ESTAT_ADD(tx_bcast_packets);
11884 ESTAT_ADD(tx_carrier_sense_errors);
11885 ESTAT_ADD(tx_discards);
11886 ESTAT_ADD(tx_errors);
11887
11888 ESTAT_ADD(dma_writeq_full);
11889 ESTAT_ADD(dma_write_prioq_full);
11890 ESTAT_ADD(rxbds_empty);
11891 ESTAT_ADD(rx_discards);
11892 ESTAT_ADD(rx_errors);
11893 ESTAT_ADD(rx_threshold_hit);
11894
11895 ESTAT_ADD(dma_readq_full);
11896 ESTAT_ADD(dma_read_prioq_full);
11897 ESTAT_ADD(tx_comp_queue_full);
11898
11899 ESTAT_ADD(ring_set_send_prod_index);
11900 ESTAT_ADD(ring_status_update);
11901 ESTAT_ADD(nic_irqs);
11902 ESTAT_ADD(nic_avoided_irqs);
11903 ESTAT_ADD(nic_tx_threshold_hit);
11904
11905 ESTAT_ADD(mbuf_lwm_thresh_hit);
11906 }
11907
tg3_get_nstats(struct tg3 * tp,struct rtnl_link_stats64 * stats)11908 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11909 {
11910 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11911 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11912
11913 stats->rx_packets = old_stats->rx_packets +
11914 get_stat64(&hw_stats->rx_ucast_packets) +
11915 get_stat64(&hw_stats->rx_mcast_packets) +
11916 get_stat64(&hw_stats->rx_bcast_packets);
11917
11918 stats->tx_packets = old_stats->tx_packets +
11919 get_stat64(&hw_stats->tx_ucast_packets) +
11920 get_stat64(&hw_stats->tx_mcast_packets) +
11921 get_stat64(&hw_stats->tx_bcast_packets);
11922
11923 stats->rx_bytes = old_stats->rx_bytes +
11924 get_stat64(&hw_stats->rx_octets);
11925 stats->tx_bytes = old_stats->tx_bytes +
11926 get_stat64(&hw_stats->tx_octets);
11927
11928 stats->rx_errors = old_stats->rx_errors +
11929 get_stat64(&hw_stats->rx_errors);
11930 stats->tx_errors = old_stats->tx_errors +
11931 get_stat64(&hw_stats->tx_errors) +
11932 get_stat64(&hw_stats->tx_mac_errors) +
11933 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11934 get_stat64(&hw_stats->tx_discards);
11935
11936 stats->multicast = old_stats->multicast +
11937 get_stat64(&hw_stats->rx_mcast_packets);
11938 stats->collisions = old_stats->collisions +
11939 get_stat64(&hw_stats->tx_collisions);
11940
11941 stats->rx_length_errors = old_stats->rx_length_errors +
11942 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11943 get_stat64(&hw_stats->rx_undersize_packets);
11944
11945 stats->rx_frame_errors = old_stats->rx_frame_errors +
11946 get_stat64(&hw_stats->rx_align_errors);
11947 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11948 get_stat64(&hw_stats->tx_discards);
11949 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11950 get_stat64(&hw_stats->tx_carrier_sense_errors);
11951
11952 stats->rx_crc_errors = old_stats->rx_crc_errors +
11953 tg3_calc_crc_errors(tp);
11954
11955 stats->rx_missed_errors = old_stats->rx_missed_errors +
11956 get_stat64(&hw_stats->rx_discards);
11957
11958 stats->rx_dropped = tp->rx_dropped;
11959 stats->tx_dropped = tp->tx_dropped;
11960 }
11961
tg3_get_regs_len(struct net_device * dev)11962 static int tg3_get_regs_len(struct net_device *dev)
11963 {
11964 return TG3_REG_BLK_SIZE;
11965 }
11966
tg3_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * _p)11967 static void tg3_get_regs(struct net_device *dev,
11968 struct ethtool_regs *regs, void *_p)
11969 {
11970 struct tg3 *tp = netdev_priv(dev);
11971
11972 regs->version = 0;
11973
11974 memset(_p, 0, TG3_REG_BLK_SIZE);
11975
11976 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11977 return;
11978
11979 tg3_full_lock(tp, 0);
11980
11981 tg3_dump_legacy_regs(tp, (u32 *)_p);
11982
11983 tg3_full_unlock(tp);
11984 }
11985
tg3_get_eeprom_len(struct net_device * dev)11986 static int tg3_get_eeprom_len(struct net_device *dev)
11987 {
11988 struct tg3 *tp = netdev_priv(dev);
11989
11990 return tp->nvram_size;
11991 }
11992
tg3_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)11993 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11994 {
11995 struct tg3 *tp = netdev_priv(dev);
11996 int ret, cpmu_restore = 0;
11997 u8 *pd;
11998 u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11999 __be32 val;
12000
12001 if (tg3_flag(tp, NO_NVRAM))
12002 return -EINVAL;
12003
12004 offset = eeprom->offset;
12005 len = eeprom->len;
12006 eeprom->len = 0;
12007
12008 eeprom->magic = TG3_EEPROM_MAGIC;
12009
12010 /* Override clock, link aware and link idle modes */
12011 if (tg3_flag(tp, CPMU_PRESENT)) {
12012 cpmu_val = tr32(TG3_CPMU_CTRL);
12013 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12014 CPMU_CTRL_LINK_IDLE_MODE)) {
12015 tw32(TG3_CPMU_CTRL, cpmu_val &
12016 ~(CPMU_CTRL_LINK_AWARE_MODE |
12017 CPMU_CTRL_LINK_IDLE_MODE));
12018 cpmu_restore = 1;
12019 }
12020 }
12021 tg3_override_clk(tp);
12022
12023 if (offset & 3) {
12024 /* adjustments to start on required 4 byte boundary */
12025 b_offset = offset & 3;
12026 b_count = 4 - b_offset;
12027 if (b_count > len) {
12028 /* i.e. offset=1 len=2 */
12029 b_count = len;
12030 }
12031 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12032 if (ret)
12033 goto eeprom_done;
12034 memcpy(data, ((char *)&val) + b_offset, b_count);
12035 len -= b_count;
12036 offset += b_count;
12037 eeprom->len += b_count;
12038 }
12039
12040 /* read bytes up to the last 4 byte boundary */
12041 pd = &data[eeprom->len];
12042 for (i = 0; i < (len - (len & 3)); i += 4) {
12043 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12044 if (ret) {
12045 if (i)
12046 i -= 4;
12047 eeprom->len += i;
12048 goto eeprom_done;
12049 }
12050 memcpy(pd + i, &val, 4);
12051 if (need_resched()) {
12052 if (signal_pending(current)) {
12053 eeprom->len += i;
12054 ret = -EINTR;
12055 goto eeprom_done;
12056 }
12057 cond_resched();
12058 }
12059 }
12060 eeprom->len += i;
12061
12062 if (len & 3) {
12063 /* read last bytes not ending on 4 byte boundary */
12064 pd = &data[eeprom->len];
12065 b_count = len & 3;
12066 b_offset = offset + len - b_count;
12067 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12068 if (ret)
12069 goto eeprom_done;
12070 memcpy(pd, &val, b_count);
12071 eeprom->len += b_count;
12072 }
12073 ret = 0;
12074
12075 eeprom_done:
12076 /* Restore clock, link aware and link idle modes */
12077 tg3_restore_clk(tp);
12078 if (cpmu_restore)
12079 tw32(TG3_CPMU_CTRL, cpmu_val);
12080
12081 return ret;
12082 }
12083
tg3_set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)12084 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12085 {
12086 struct tg3 *tp = netdev_priv(dev);
12087 int ret;
12088 u32 offset, len, b_offset, odd_len;
12089 u8 *buf;
12090 __be32 start = 0, end;
12091
12092 if (tg3_flag(tp, NO_NVRAM) ||
12093 eeprom->magic != TG3_EEPROM_MAGIC)
12094 return -EINVAL;
12095
12096 offset = eeprom->offset;
12097 len = eeprom->len;
12098
12099 if ((b_offset = (offset & 3))) {
12100 /* adjustments to start on required 4 byte boundary */
12101 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12102 if (ret)
12103 return ret;
12104 len += b_offset;
12105 offset &= ~3;
12106 if (len < 4)
12107 len = 4;
12108 }
12109
12110 odd_len = 0;
12111 if (len & 3) {
12112 /* adjustments to end on required 4 byte boundary */
12113 odd_len = 1;
12114 len = (len + 3) & ~3;
12115 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12116 if (ret)
12117 return ret;
12118 }
12119
12120 buf = data;
12121 if (b_offset || odd_len) {
12122 buf = kmalloc(len, GFP_KERNEL);
12123 if (!buf)
12124 return -ENOMEM;
12125 if (b_offset)
12126 memcpy(buf, &start, 4);
12127 if (odd_len)
12128 memcpy(buf+len-4, &end, 4);
12129 memcpy(buf + b_offset, data, eeprom->len);
12130 }
12131
12132 ret = tg3_nvram_write_block(tp, offset, len, buf);
12133
12134 if (buf != data)
12135 kfree(buf);
12136
12137 return ret;
12138 }
12139
tg3_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)12140 static int tg3_get_link_ksettings(struct net_device *dev,
12141 struct ethtool_link_ksettings *cmd)
12142 {
12143 struct tg3 *tp = netdev_priv(dev);
12144 u32 supported, advertising;
12145
12146 if (tg3_flag(tp, USE_PHYLIB)) {
12147 struct phy_device *phydev;
12148 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12149 return -EAGAIN;
12150 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12151 phy_ethtool_ksettings_get(phydev, cmd);
12152
12153 return 0;
12154 }
12155
12156 supported = (SUPPORTED_Autoneg);
12157
12158 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12159 supported |= (SUPPORTED_1000baseT_Half |
12160 SUPPORTED_1000baseT_Full);
12161
12162 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12163 supported |= (SUPPORTED_100baseT_Half |
12164 SUPPORTED_100baseT_Full |
12165 SUPPORTED_10baseT_Half |
12166 SUPPORTED_10baseT_Full |
12167 SUPPORTED_TP);
12168 cmd->base.port = PORT_TP;
12169 } else {
12170 supported |= SUPPORTED_FIBRE;
12171 cmd->base.port = PORT_FIBRE;
12172 }
12173 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12174 supported);
12175
12176 advertising = tp->link_config.advertising;
12177 if (tg3_flag(tp, PAUSE_AUTONEG)) {
12178 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12179 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12180 advertising |= ADVERTISED_Pause;
12181 } else {
12182 advertising |= ADVERTISED_Pause |
12183 ADVERTISED_Asym_Pause;
12184 }
12185 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12186 advertising |= ADVERTISED_Asym_Pause;
12187 }
12188 }
12189 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12190 advertising);
12191
12192 if (netif_running(dev) && tp->link_up) {
12193 cmd->base.speed = tp->link_config.active_speed;
12194 cmd->base.duplex = tp->link_config.active_duplex;
12195 ethtool_convert_legacy_u32_to_link_mode(
12196 cmd->link_modes.lp_advertising,
12197 tp->link_config.rmt_adv);
12198
12199 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12200 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12201 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12202 else
12203 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12204 }
12205 } else {
12206 cmd->base.speed = SPEED_UNKNOWN;
12207 cmd->base.duplex = DUPLEX_UNKNOWN;
12208 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12209 }
12210 cmd->base.phy_address = tp->phy_addr;
12211 cmd->base.autoneg = tp->link_config.autoneg;
12212 return 0;
12213 }
12214
tg3_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)12215 static int tg3_set_link_ksettings(struct net_device *dev,
12216 const struct ethtool_link_ksettings *cmd)
12217 {
12218 struct tg3 *tp = netdev_priv(dev);
12219 u32 speed = cmd->base.speed;
12220 u32 advertising;
12221
12222 if (tg3_flag(tp, USE_PHYLIB)) {
12223 struct phy_device *phydev;
12224 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12225 return -EAGAIN;
12226 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12227 return phy_ethtool_ksettings_set(phydev, cmd);
12228 }
12229
12230 if (cmd->base.autoneg != AUTONEG_ENABLE &&
12231 cmd->base.autoneg != AUTONEG_DISABLE)
12232 return -EINVAL;
12233
12234 if (cmd->base.autoneg == AUTONEG_DISABLE &&
12235 cmd->base.duplex != DUPLEX_FULL &&
12236 cmd->base.duplex != DUPLEX_HALF)
12237 return -EINVAL;
12238
12239 ethtool_convert_link_mode_to_legacy_u32(&advertising,
12240 cmd->link_modes.advertising);
12241
12242 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12243 u32 mask = ADVERTISED_Autoneg |
12244 ADVERTISED_Pause |
12245 ADVERTISED_Asym_Pause;
12246
12247 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12248 mask |= ADVERTISED_1000baseT_Half |
12249 ADVERTISED_1000baseT_Full;
12250
12251 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12252 mask |= ADVERTISED_100baseT_Half |
12253 ADVERTISED_100baseT_Full |
12254 ADVERTISED_10baseT_Half |
12255 ADVERTISED_10baseT_Full |
12256 ADVERTISED_TP;
12257 else
12258 mask |= ADVERTISED_FIBRE;
12259
12260 if (advertising & ~mask)
12261 return -EINVAL;
12262
12263 mask &= (ADVERTISED_1000baseT_Half |
12264 ADVERTISED_1000baseT_Full |
12265 ADVERTISED_100baseT_Half |
12266 ADVERTISED_100baseT_Full |
12267 ADVERTISED_10baseT_Half |
12268 ADVERTISED_10baseT_Full);
12269
12270 advertising &= mask;
12271 } else {
12272 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12273 if (speed != SPEED_1000)
12274 return -EINVAL;
12275
12276 if (cmd->base.duplex != DUPLEX_FULL)
12277 return -EINVAL;
12278 } else {
12279 if (speed != SPEED_100 &&
12280 speed != SPEED_10)
12281 return -EINVAL;
12282 }
12283 }
12284
12285 tg3_full_lock(tp, 0);
12286
12287 tp->link_config.autoneg = cmd->base.autoneg;
12288 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12289 tp->link_config.advertising = (advertising |
12290 ADVERTISED_Autoneg);
12291 tp->link_config.speed = SPEED_UNKNOWN;
12292 tp->link_config.duplex = DUPLEX_UNKNOWN;
12293 } else {
12294 tp->link_config.advertising = 0;
12295 tp->link_config.speed = speed;
12296 tp->link_config.duplex = cmd->base.duplex;
12297 }
12298
12299 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12300
12301 tg3_warn_mgmt_link_flap(tp);
12302
12303 if (netif_running(dev))
12304 tg3_setup_phy(tp, true);
12305
12306 tg3_full_unlock(tp);
12307
12308 return 0;
12309 }
12310
tg3_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)12311 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12312 {
12313 struct tg3 *tp = netdev_priv(dev);
12314
12315 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12316 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12317 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12318 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12319 }
12320
tg3_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)12321 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12322 {
12323 struct tg3 *tp = netdev_priv(dev);
12324
12325 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12326 wol->supported = WAKE_MAGIC;
12327 else
12328 wol->supported = 0;
12329 wol->wolopts = 0;
12330 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12331 wol->wolopts = WAKE_MAGIC;
12332 memset(&wol->sopass, 0, sizeof(wol->sopass));
12333 }
12334
tg3_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)12335 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12336 {
12337 struct tg3 *tp = netdev_priv(dev);
12338 struct device *dp = &tp->pdev->dev;
12339
12340 if (wol->wolopts & ~WAKE_MAGIC)
12341 return -EINVAL;
12342 if ((wol->wolopts & WAKE_MAGIC) &&
12343 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12344 return -EINVAL;
12345
12346 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12347
12348 if (device_may_wakeup(dp))
12349 tg3_flag_set(tp, WOL_ENABLE);
12350 else
12351 tg3_flag_clear(tp, WOL_ENABLE);
12352
12353 return 0;
12354 }
12355
tg3_get_msglevel(struct net_device * dev)12356 static u32 tg3_get_msglevel(struct net_device *dev)
12357 {
12358 struct tg3 *tp = netdev_priv(dev);
12359 return tp->msg_enable;
12360 }
12361
tg3_set_msglevel(struct net_device * dev,u32 value)12362 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12363 {
12364 struct tg3 *tp = netdev_priv(dev);
12365 tp->msg_enable = value;
12366 }
12367
tg3_nway_reset(struct net_device * dev)12368 static int tg3_nway_reset(struct net_device *dev)
12369 {
12370 struct tg3 *tp = netdev_priv(dev);
12371 int r;
12372
12373 if (!netif_running(dev))
12374 return -EAGAIN;
12375
12376 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12377 return -EINVAL;
12378
12379 tg3_warn_mgmt_link_flap(tp);
12380
12381 if (tg3_flag(tp, USE_PHYLIB)) {
12382 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12383 return -EAGAIN;
12384 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12385 } else {
12386 u32 bmcr;
12387
12388 spin_lock_bh(&tp->lock);
12389 r = -EINVAL;
12390 tg3_readphy(tp, MII_BMCR, &bmcr);
12391 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12392 ((bmcr & BMCR_ANENABLE) ||
12393 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12394 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12395 BMCR_ANENABLE);
12396 r = 0;
12397 }
12398 spin_unlock_bh(&tp->lock);
12399 }
12400
12401 return r;
12402 }
12403
tg3_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ering)12404 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12405 {
12406 struct tg3 *tp = netdev_priv(dev);
12407
12408 ering->rx_max_pending = tp->rx_std_ring_mask;
12409 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12410 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12411 else
12412 ering->rx_jumbo_max_pending = 0;
12413
12414 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12415
12416 ering->rx_pending = tp->rx_pending;
12417 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12418 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12419 else
12420 ering->rx_jumbo_pending = 0;
12421
12422 ering->tx_pending = tp->napi[0].tx_pending;
12423 }
12424
tg3_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ering)12425 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12426 {
12427 struct tg3 *tp = netdev_priv(dev);
12428 int i, irq_sync = 0, err = 0;
12429
12430 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12431 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12432 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12433 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12434 (tg3_flag(tp, TSO_BUG) &&
12435 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12436 return -EINVAL;
12437
12438 if (netif_running(dev)) {
12439 tg3_phy_stop(tp);
12440 tg3_netif_stop(tp);
12441 irq_sync = 1;
12442 }
12443
12444 tg3_full_lock(tp, irq_sync);
12445
12446 tp->rx_pending = ering->rx_pending;
12447
12448 if (tg3_flag(tp, MAX_RXPEND_64) &&
12449 tp->rx_pending > 63)
12450 tp->rx_pending = 63;
12451
12452 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12453 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12454
12455 for (i = 0; i < tp->irq_max; i++)
12456 tp->napi[i].tx_pending = ering->tx_pending;
12457
12458 if (netif_running(dev)) {
12459 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12460 err = tg3_restart_hw(tp, false);
12461 if (!err)
12462 tg3_netif_start(tp);
12463 }
12464
12465 tg3_full_unlock(tp);
12466
12467 if (irq_sync && !err)
12468 tg3_phy_start(tp);
12469
12470 return err;
12471 }
12472
tg3_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)12473 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12474 {
12475 struct tg3 *tp = netdev_priv(dev);
12476
12477 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12478
12479 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12480 epause->rx_pause = 1;
12481 else
12482 epause->rx_pause = 0;
12483
12484 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12485 epause->tx_pause = 1;
12486 else
12487 epause->tx_pause = 0;
12488 }
12489
tg3_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)12490 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12491 {
12492 struct tg3 *tp = netdev_priv(dev);
12493 int err = 0;
12494
12495 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12496 tg3_warn_mgmt_link_flap(tp);
12497
12498 if (tg3_flag(tp, USE_PHYLIB)) {
12499 u32 newadv;
12500 struct phy_device *phydev;
12501
12502 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12503
12504 if (!(phydev->supported & SUPPORTED_Pause) ||
12505 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12506 (epause->rx_pause != epause->tx_pause)))
12507 return -EINVAL;
12508
12509 tp->link_config.flowctrl = 0;
12510 if (epause->rx_pause) {
12511 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12512
12513 if (epause->tx_pause) {
12514 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12515 newadv = ADVERTISED_Pause;
12516 } else
12517 newadv = ADVERTISED_Pause |
12518 ADVERTISED_Asym_Pause;
12519 } else if (epause->tx_pause) {
12520 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12521 newadv = ADVERTISED_Asym_Pause;
12522 } else
12523 newadv = 0;
12524
12525 if (epause->autoneg)
12526 tg3_flag_set(tp, PAUSE_AUTONEG);
12527 else
12528 tg3_flag_clear(tp, PAUSE_AUTONEG);
12529
12530 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12531 u32 oldadv = phydev->advertising &
12532 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12533 if (oldadv != newadv) {
12534 phydev->advertising &=
12535 ~(ADVERTISED_Pause |
12536 ADVERTISED_Asym_Pause);
12537 phydev->advertising |= newadv;
12538 if (phydev->autoneg) {
12539 /*
12540 * Always renegotiate the link to
12541 * inform our link partner of our
12542 * flow control settings, even if the
12543 * flow control is forced. Let
12544 * tg3_adjust_link() do the final
12545 * flow control setup.
12546 */
12547 return phy_start_aneg(phydev);
12548 }
12549 }
12550
12551 if (!epause->autoneg)
12552 tg3_setup_flow_control(tp, 0, 0);
12553 } else {
12554 tp->link_config.advertising &=
12555 ~(ADVERTISED_Pause |
12556 ADVERTISED_Asym_Pause);
12557 tp->link_config.advertising |= newadv;
12558 }
12559 } else {
12560 int irq_sync = 0;
12561
12562 if (netif_running(dev)) {
12563 tg3_netif_stop(tp);
12564 irq_sync = 1;
12565 }
12566
12567 tg3_full_lock(tp, irq_sync);
12568
12569 if (epause->autoneg)
12570 tg3_flag_set(tp, PAUSE_AUTONEG);
12571 else
12572 tg3_flag_clear(tp, PAUSE_AUTONEG);
12573 if (epause->rx_pause)
12574 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12575 else
12576 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12577 if (epause->tx_pause)
12578 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12579 else
12580 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12581
12582 if (netif_running(dev)) {
12583 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12584 err = tg3_restart_hw(tp, false);
12585 if (!err)
12586 tg3_netif_start(tp);
12587 }
12588
12589 tg3_full_unlock(tp);
12590 }
12591
12592 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12593
12594 return err;
12595 }
12596
tg3_get_sset_count(struct net_device * dev,int sset)12597 static int tg3_get_sset_count(struct net_device *dev, int sset)
12598 {
12599 switch (sset) {
12600 case ETH_SS_TEST:
12601 return TG3_NUM_TEST;
12602 case ETH_SS_STATS:
12603 return TG3_NUM_STATS;
12604 default:
12605 return -EOPNOTSUPP;
12606 }
12607 }
12608
tg3_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info,u32 * rules __always_unused)12609 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12610 u32 *rules __always_unused)
12611 {
12612 struct tg3 *tp = netdev_priv(dev);
12613
12614 if (!tg3_flag(tp, SUPPORT_MSIX))
12615 return -EOPNOTSUPP;
12616
12617 switch (info->cmd) {
12618 case ETHTOOL_GRXRINGS:
12619 if (netif_running(tp->dev))
12620 info->data = tp->rxq_cnt;
12621 else {
12622 info->data = num_online_cpus();
12623 if (info->data > TG3_RSS_MAX_NUM_QS)
12624 info->data = TG3_RSS_MAX_NUM_QS;
12625 }
12626
12627 return 0;
12628
12629 default:
12630 return -EOPNOTSUPP;
12631 }
12632 }
12633
tg3_get_rxfh_indir_size(struct net_device * dev)12634 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12635 {
12636 u32 size = 0;
12637 struct tg3 *tp = netdev_priv(dev);
12638
12639 if (tg3_flag(tp, SUPPORT_MSIX))
12640 size = TG3_RSS_INDIR_TBL_SIZE;
12641
12642 return size;
12643 }
12644
tg3_get_rxfh(struct net_device * dev,u32 * indir,u8 * key,u8 * hfunc)12645 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12646 {
12647 struct tg3 *tp = netdev_priv(dev);
12648 int i;
12649
12650 if (hfunc)
12651 *hfunc = ETH_RSS_HASH_TOP;
12652 if (!indir)
12653 return 0;
12654
12655 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12656 indir[i] = tp->rss_ind_tbl[i];
12657
12658 return 0;
12659 }
12660
tg3_set_rxfh(struct net_device * dev,const u32 * indir,const u8 * key,const u8 hfunc)12661 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12662 const u8 hfunc)
12663 {
12664 struct tg3 *tp = netdev_priv(dev);
12665 size_t i;
12666
12667 /* We require at least one supported parameter to be changed and no
12668 * change in any of the unsupported parameters
12669 */
12670 if (key ||
12671 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12672 return -EOPNOTSUPP;
12673
12674 if (!indir)
12675 return 0;
12676
12677 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12678 tp->rss_ind_tbl[i] = indir[i];
12679
12680 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12681 return 0;
12682
12683 /* It is legal to write the indirection
12684 * table while the device is running.
12685 */
12686 tg3_full_lock(tp, 0);
12687 tg3_rss_write_indir_tbl(tp);
12688 tg3_full_unlock(tp);
12689
12690 return 0;
12691 }
12692
tg3_get_channels(struct net_device * dev,struct ethtool_channels * channel)12693 static void tg3_get_channels(struct net_device *dev,
12694 struct ethtool_channels *channel)
12695 {
12696 struct tg3 *tp = netdev_priv(dev);
12697 u32 deflt_qs = netif_get_num_default_rss_queues();
12698
12699 channel->max_rx = tp->rxq_max;
12700 channel->max_tx = tp->txq_max;
12701
12702 if (netif_running(dev)) {
12703 channel->rx_count = tp->rxq_cnt;
12704 channel->tx_count = tp->txq_cnt;
12705 } else {
12706 if (tp->rxq_req)
12707 channel->rx_count = tp->rxq_req;
12708 else
12709 channel->rx_count = min(deflt_qs, tp->rxq_max);
12710
12711 if (tp->txq_req)
12712 channel->tx_count = tp->txq_req;
12713 else
12714 channel->tx_count = min(deflt_qs, tp->txq_max);
12715 }
12716 }
12717
tg3_set_channels(struct net_device * dev,struct ethtool_channels * channel)12718 static int tg3_set_channels(struct net_device *dev,
12719 struct ethtool_channels *channel)
12720 {
12721 struct tg3 *tp = netdev_priv(dev);
12722
12723 if (!tg3_flag(tp, SUPPORT_MSIX))
12724 return -EOPNOTSUPP;
12725
12726 if (channel->rx_count > tp->rxq_max ||
12727 channel->tx_count > tp->txq_max)
12728 return -EINVAL;
12729
12730 tp->rxq_req = channel->rx_count;
12731 tp->txq_req = channel->tx_count;
12732
12733 if (!netif_running(dev))
12734 return 0;
12735
12736 tg3_stop(tp);
12737
12738 tg3_carrier_off(tp);
12739
12740 tg3_start(tp, true, false, false);
12741
12742 return 0;
12743 }
12744
tg3_get_strings(struct net_device * dev,u32 stringset,u8 * buf)12745 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12746 {
12747 switch (stringset) {
12748 case ETH_SS_STATS:
12749 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12750 break;
12751 case ETH_SS_TEST:
12752 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12753 break;
12754 default:
12755 WARN_ON(1); /* we need a WARN() */
12756 break;
12757 }
12758 }
12759
tg3_set_phys_id(struct net_device * dev,enum ethtool_phys_id_state state)12760 static int tg3_set_phys_id(struct net_device *dev,
12761 enum ethtool_phys_id_state state)
12762 {
12763 struct tg3 *tp = netdev_priv(dev);
12764
12765 if (!netif_running(tp->dev))
12766 return -EAGAIN;
12767
12768 switch (state) {
12769 case ETHTOOL_ID_ACTIVE:
12770 return 1; /* cycle on/off once per second */
12771
12772 case ETHTOOL_ID_ON:
12773 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12774 LED_CTRL_1000MBPS_ON |
12775 LED_CTRL_100MBPS_ON |
12776 LED_CTRL_10MBPS_ON |
12777 LED_CTRL_TRAFFIC_OVERRIDE |
12778 LED_CTRL_TRAFFIC_BLINK |
12779 LED_CTRL_TRAFFIC_LED);
12780 break;
12781
12782 case ETHTOOL_ID_OFF:
12783 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12784 LED_CTRL_TRAFFIC_OVERRIDE);
12785 break;
12786
12787 case ETHTOOL_ID_INACTIVE:
12788 tw32(MAC_LED_CTRL, tp->led_ctrl);
12789 break;
12790 }
12791
12792 return 0;
12793 }
12794
tg3_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * estats,u64 * tmp_stats)12795 static void tg3_get_ethtool_stats(struct net_device *dev,
12796 struct ethtool_stats *estats, u64 *tmp_stats)
12797 {
12798 struct tg3 *tp = netdev_priv(dev);
12799
12800 if (tp->hw_stats)
12801 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12802 else
12803 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12804 }
12805
tg3_vpd_readblock(struct tg3 * tp,u32 * vpdlen)12806 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12807 {
12808 int i;
12809 __be32 *buf;
12810 u32 offset = 0, len = 0;
12811 u32 magic, val;
12812
12813 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12814 return NULL;
12815
12816 if (magic == TG3_EEPROM_MAGIC) {
12817 for (offset = TG3_NVM_DIR_START;
12818 offset < TG3_NVM_DIR_END;
12819 offset += TG3_NVM_DIRENT_SIZE) {
12820 if (tg3_nvram_read(tp, offset, &val))
12821 return NULL;
12822
12823 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12824 TG3_NVM_DIRTYPE_EXTVPD)
12825 break;
12826 }
12827
12828 if (offset != TG3_NVM_DIR_END) {
12829 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12830 if (tg3_nvram_read(tp, offset + 4, &offset))
12831 return NULL;
12832
12833 offset = tg3_nvram_logical_addr(tp, offset);
12834 }
12835 }
12836
12837 if (!offset || !len) {
12838 offset = TG3_NVM_VPD_OFF;
12839 len = TG3_NVM_VPD_LEN;
12840 }
12841
12842 buf = kmalloc(len, GFP_KERNEL);
12843 if (buf == NULL)
12844 return NULL;
12845
12846 if (magic == TG3_EEPROM_MAGIC) {
12847 for (i = 0; i < len; i += 4) {
12848 /* The data is in little-endian format in NVRAM.
12849 * Use the big-endian read routines to preserve
12850 * the byte order as it exists in NVRAM.
12851 */
12852 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12853 goto error;
12854 }
12855 } else {
12856 u8 *ptr;
12857 ssize_t cnt;
12858 unsigned int pos = 0;
12859
12860 ptr = (u8 *)&buf[0];
12861 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12862 cnt = pci_read_vpd(tp->pdev, pos,
12863 len - pos, ptr);
12864 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12865 cnt = 0;
12866 else if (cnt < 0)
12867 goto error;
12868 }
12869 if (pos != len)
12870 goto error;
12871 }
12872
12873 *vpdlen = len;
12874
12875 return buf;
12876
12877 error:
12878 kfree(buf);
12879 return NULL;
12880 }
12881
12882 #define NVRAM_TEST_SIZE 0x100
12883 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12884 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12885 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12886 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12887 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12888 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12889 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12890 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12891
tg3_test_nvram(struct tg3 * tp)12892 static int tg3_test_nvram(struct tg3 *tp)
12893 {
12894 u32 csum, magic, len;
12895 __be32 *buf;
12896 int i, j, k, err = 0, size;
12897
12898 if (tg3_flag(tp, NO_NVRAM))
12899 return 0;
12900
12901 if (tg3_nvram_read(tp, 0, &magic) != 0)
12902 return -EIO;
12903
12904 if (magic == TG3_EEPROM_MAGIC)
12905 size = NVRAM_TEST_SIZE;
12906 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12907 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12908 TG3_EEPROM_SB_FORMAT_1) {
12909 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12910 case TG3_EEPROM_SB_REVISION_0:
12911 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12912 break;
12913 case TG3_EEPROM_SB_REVISION_2:
12914 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12915 break;
12916 case TG3_EEPROM_SB_REVISION_3:
12917 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12918 break;
12919 case TG3_EEPROM_SB_REVISION_4:
12920 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12921 break;
12922 case TG3_EEPROM_SB_REVISION_5:
12923 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12924 break;
12925 case TG3_EEPROM_SB_REVISION_6:
12926 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12927 break;
12928 default:
12929 return -EIO;
12930 }
12931 } else
12932 return 0;
12933 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12934 size = NVRAM_SELFBOOT_HW_SIZE;
12935 else
12936 return -EIO;
12937
12938 buf = kmalloc(size, GFP_KERNEL);
12939 if (buf == NULL)
12940 return -ENOMEM;
12941
12942 err = -EIO;
12943 for (i = 0, j = 0; i < size; i += 4, j++) {
12944 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12945 if (err)
12946 break;
12947 }
12948 if (i < size)
12949 goto out;
12950
12951 /* Selfboot format */
12952 magic = be32_to_cpu(buf[0]);
12953 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12954 TG3_EEPROM_MAGIC_FW) {
12955 u8 *buf8 = (u8 *) buf, csum8 = 0;
12956
12957 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12958 TG3_EEPROM_SB_REVISION_2) {
12959 /* For rev 2, the csum doesn't include the MBA. */
12960 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12961 csum8 += buf8[i];
12962 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12963 csum8 += buf8[i];
12964 } else {
12965 for (i = 0; i < size; i++)
12966 csum8 += buf8[i];
12967 }
12968
12969 if (csum8 == 0) {
12970 err = 0;
12971 goto out;
12972 }
12973
12974 err = -EIO;
12975 goto out;
12976 }
12977
12978 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12979 TG3_EEPROM_MAGIC_HW) {
12980 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12981 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12982 u8 *buf8 = (u8 *) buf;
12983
12984 /* Separate the parity bits and the data bytes. */
12985 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12986 if ((i == 0) || (i == 8)) {
12987 int l;
12988 u8 msk;
12989
12990 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12991 parity[k++] = buf8[i] & msk;
12992 i++;
12993 } else if (i == 16) {
12994 int l;
12995 u8 msk;
12996
12997 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12998 parity[k++] = buf8[i] & msk;
12999 i++;
13000
13001 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
13002 parity[k++] = buf8[i] & msk;
13003 i++;
13004 }
13005 data[j++] = buf8[i];
13006 }
13007
13008 err = -EIO;
13009 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
13010 u8 hw8 = hweight8(data[i]);
13011
13012 if ((hw8 & 0x1) && parity[i])
13013 goto out;
13014 else if (!(hw8 & 0x1) && !parity[i])
13015 goto out;
13016 }
13017 err = 0;
13018 goto out;
13019 }
13020
13021 err = -EIO;
13022
13023 /* Bootstrap checksum at offset 0x10 */
13024 csum = calc_crc((unsigned char *) buf, 0x10);
13025 if (csum != le32_to_cpu(buf[0x10/4]))
13026 goto out;
13027
13028 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13029 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13030 if (csum != le32_to_cpu(buf[0xfc/4]))
13031 goto out;
13032
13033 kfree(buf);
13034
13035 buf = tg3_vpd_readblock(tp, &len);
13036 if (!buf)
13037 return -ENOMEM;
13038
13039 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
13040 if (i > 0) {
13041 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
13042 if (j < 0)
13043 goto out;
13044
13045 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
13046 goto out;
13047
13048 i += PCI_VPD_LRDT_TAG_SIZE;
13049 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
13050 PCI_VPD_RO_KEYWORD_CHKSUM);
13051 if (j > 0) {
13052 u8 csum8 = 0;
13053
13054 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13055
13056 for (i = 0; i <= j; i++)
13057 csum8 += ((u8 *)buf)[i];
13058
13059 if (csum8)
13060 goto out;
13061 }
13062 }
13063
13064 err = 0;
13065
13066 out:
13067 kfree(buf);
13068 return err;
13069 }
13070
13071 #define TG3_SERDES_TIMEOUT_SEC 2
13072 #define TG3_COPPER_TIMEOUT_SEC 6
13073
tg3_test_link(struct tg3 * tp)13074 static int tg3_test_link(struct tg3 *tp)
13075 {
13076 int i, max;
13077
13078 if (!netif_running(tp->dev))
13079 return -ENODEV;
13080
13081 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13082 max = TG3_SERDES_TIMEOUT_SEC;
13083 else
13084 max = TG3_COPPER_TIMEOUT_SEC;
13085
13086 for (i = 0; i < max; i++) {
13087 if (tp->link_up)
13088 return 0;
13089
13090 if (msleep_interruptible(1000))
13091 break;
13092 }
13093
13094 return -EIO;
13095 }
13096
13097 /* Only test the commonly used registers */
tg3_test_registers(struct tg3 * tp)13098 static int tg3_test_registers(struct tg3 *tp)
13099 {
13100 int i, is_5705, is_5750;
13101 u32 offset, read_mask, write_mask, val, save_val, read_val;
13102 static struct {
13103 u16 offset;
13104 u16 flags;
13105 #define TG3_FL_5705 0x1
13106 #define TG3_FL_NOT_5705 0x2
13107 #define TG3_FL_NOT_5788 0x4
13108 #define TG3_FL_NOT_5750 0x8
13109 u32 read_mask;
13110 u32 write_mask;
13111 } reg_tbl[] = {
13112 /* MAC Control Registers */
13113 { MAC_MODE, TG3_FL_NOT_5705,
13114 0x00000000, 0x00ef6f8c },
13115 { MAC_MODE, TG3_FL_5705,
13116 0x00000000, 0x01ef6b8c },
13117 { MAC_STATUS, TG3_FL_NOT_5705,
13118 0x03800107, 0x00000000 },
13119 { MAC_STATUS, TG3_FL_5705,
13120 0x03800100, 0x00000000 },
13121 { MAC_ADDR_0_HIGH, 0x0000,
13122 0x00000000, 0x0000ffff },
13123 { MAC_ADDR_0_LOW, 0x0000,
13124 0x00000000, 0xffffffff },
13125 { MAC_RX_MTU_SIZE, 0x0000,
13126 0x00000000, 0x0000ffff },
13127 { MAC_TX_MODE, 0x0000,
13128 0x00000000, 0x00000070 },
13129 { MAC_TX_LENGTHS, 0x0000,
13130 0x00000000, 0x00003fff },
13131 { MAC_RX_MODE, TG3_FL_NOT_5705,
13132 0x00000000, 0x000007fc },
13133 { MAC_RX_MODE, TG3_FL_5705,
13134 0x00000000, 0x000007dc },
13135 { MAC_HASH_REG_0, 0x0000,
13136 0x00000000, 0xffffffff },
13137 { MAC_HASH_REG_1, 0x0000,
13138 0x00000000, 0xffffffff },
13139 { MAC_HASH_REG_2, 0x0000,
13140 0x00000000, 0xffffffff },
13141 { MAC_HASH_REG_3, 0x0000,
13142 0x00000000, 0xffffffff },
13143
13144 /* Receive Data and Receive BD Initiator Control Registers. */
13145 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13146 0x00000000, 0xffffffff },
13147 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13148 0x00000000, 0xffffffff },
13149 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13150 0x00000000, 0x00000003 },
13151 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13152 0x00000000, 0xffffffff },
13153 { RCVDBDI_STD_BD+0, 0x0000,
13154 0x00000000, 0xffffffff },
13155 { RCVDBDI_STD_BD+4, 0x0000,
13156 0x00000000, 0xffffffff },
13157 { RCVDBDI_STD_BD+8, 0x0000,
13158 0x00000000, 0xffff0002 },
13159 { RCVDBDI_STD_BD+0xc, 0x0000,
13160 0x00000000, 0xffffffff },
13161
13162 /* Receive BD Initiator Control Registers. */
13163 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13164 0x00000000, 0xffffffff },
13165 { RCVBDI_STD_THRESH, TG3_FL_5705,
13166 0x00000000, 0x000003ff },
13167 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13168 0x00000000, 0xffffffff },
13169
13170 /* Host Coalescing Control Registers. */
13171 { HOSTCC_MODE, TG3_FL_NOT_5705,
13172 0x00000000, 0x00000004 },
13173 { HOSTCC_MODE, TG3_FL_5705,
13174 0x00000000, 0x000000f6 },
13175 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13176 0x00000000, 0xffffffff },
13177 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13178 0x00000000, 0x000003ff },
13179 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13180 0x00000000, 0xffffffff },
13181 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13182 0x00000000, 0x000003ff },
13183 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13184 0x00000000, 0xffffffff },
13185 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13186 0x00000000, 0x000000ff },
13187 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13188 0x00000000, 0xffffffff },
13189 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13190 0x00000000, 0x000000ff },
13191 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13192 0x00000000, 0xffffffff },
13193 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13194 0x00000000, 0xffffffff },
13195 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13196 0x00000000, 0xffffffff },
13197 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13198 0x00000000, 0x000000ff },
13199 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13200 0x00000000, 0xffffffff },
13201 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13202 0x00000000, 0x000000ff },
13203 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13204 0x00000000, 0xffffffff },
13205 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13206 0x00000000, 0xffffffff },
13207 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13208 0x00000000, 0xffffffff },
13209 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13210 0x00000000, 0xffffffff },
13211 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13212 0x00000000, 0xffffffff },
13213 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13214 0xffffffff, 0x00000000 },
13215 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13216 0xffffffff, 0x00000000 },
13217
13218 /* Buffer Manager Control Registers. */
13219 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13220 0x00000000, 0x007fff80 },
13221 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13222 0x00000000, 0x007fffff },
13223 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13224 0x00000000, 0x0000003f },
13225 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13226 0x00000000, 0x000001ff },
13227 { BUFMGR_MB_HIGH_WATER, 0x0000,
13228 0x00000000, 0x000001ff },
13229 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13230 0xffffffff, 0x00000000 },
13231 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13232 0xffffffff, 0x00000000 },
13233
13234 /* Mailbox Registers */
13235 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13236 0x00000000, 0x000001ff },
13237 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13238 0x00000000, 0x000001ff },
13239 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13240 0x00000000, 0x000007ff },
13241 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13242 0x00000000, 0x000001ff },
13243
13244 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13245 };
13246
13247 is_5705 = is_5750 = 0;
13248 if (tg3_flag(tp, 5705_PLUS)) {
13249 is_5705 = 1;
13250 if (tg3_flag(tp, 5750_PLUS))
13251 is_5750 = 1;
13252 }
13253
13254 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13255 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13256 continue;
13257
13258 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13259 continue;
13260
13261 if (tg3_flag(tp, IS_5788) &&
13262 (reg_tbl[i].flags & TG3_FL_NOT_5788))
13263 continue;
13264
13265 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13266 continue;
13267
13268 offset = (u32) reg_tbl[i].offset;
13269 read_mask = reg_tbl[i].read_mask;
13270 write_mask = reg_tbl[i].write_mask;
13271
13272 /* Save the original register content */
13273 save_val = tr32(offset);
13274
13275 /* Determine the read-only value. */
13276 read_val = save_val & read_mask;
13277
13278 /* Write zero to the register, then make sure the read-only bits
13279 * are not changed and the read/write bits are all zeros.
13280 */
13281 tw32(offset, 0);
13282
13283 val = tr32(offset);
13284
13285 /* Test the read-only and read/write bits. */
13286 if (((val & read_mask) != read_val) || (val & write_mask))
13287 goto out;
13288
13289 /* Write ones to all the bits defined by RdMask and WrMask, then
13290 * make sure the read-only bits are not changed and the
13291 * read/write bits are all ones.
13292 */
13293 tw32(offset, read_mask | write_mask);
13294
13295 val = tr32(offset);
13296
13297 /* Test the read-only bits. */
13298 if ((val & read_mask) != read_val)
13299 goto out;
13300
13301 /* Test the read/write bits. */
13302 if ((val & write_mask) != write_mask)
13303 goto out;
13304
13305 tw32(offset, save_val);
13306 }
13307
13308 return 0;
13309
13310 out:
13311 if (netif_msg_hw(tp))
13312 netdev_err(tp->dev,
13313 "Register test failed at offset %x\n", offset);
13314 tw32(offset, save_val);
13315 return -EIO;
13316 }
13317
tg3_do_mem_test(struct tg3 * tp,u32 offset,u32 len)13318 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13319 {
13320 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13321 int i;
13322 u32 j;
13323
13324 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13325 for (j = 0; j < len; j += 4) {
13326 u32 val;
13327
13328 tg3_write_mem(tp, offset + j, test_pattern[i]);
13329 tg3_read_mem(tp, offset + j, &val);
13330 if (val != test_pattern[i])
13331 return -EIO;
13332 }
13333 }
13334 return 0;
13335 }
13336
tg3_test_memory(struct tg3 * tp)13337 static int tg3_test_memory(struct tg3 *tp)
13338 {
13339 static struct mem_entry {
13340 u32 offset;
13341 u32 len;
13342 } mem_tbl_570x[] = {
13343 { 0x00000000, 0x00b50},
13344 { 0x00002000, 0x1c000},
13345 { 0xffffffff, 0x00000}
13346 }, mem_tbl_5705[] = {
13347 { 0x00000100, 0x0000c},
13348 { 0x00000200, 0x00008},
13349 { 0x00004000, 0x00800},
13350 { 0x00006000, 0x01000},
13351 { 0x00008000, 0x02000},
13352 { 0x00010000, 0x0e000},
13353 { 0xffffffff, 0x00000}
13354 }, mem_tbl_5755[] = {
13355 { 0x00000200, 0x00008},
13356 { 0x00004000, 0x00800},
13357 { 0x00006000, 0x00800},
13358 { 0x00008000, 0x02000},
13359 { 0x00010000, 0x0c000},
13360 { 0xffffffff, 0x00000}
13361 }, mem_tbl_5906[] = {
13362 { 0x00000200, 0x00008},
13363 { 0x00004000, 0x00400},
13364 { 0x00006000, 0x00400},
13365 { 0x00008000, 0x01000},
13366 { 0x00010000, 0x01000},
13367 { 0xffffffff, 0x00000}
13368 }, mem_tbl_5717[] = {
13369 { 0x00000200, 0x00008},
13370 { 0x00010000, 0x0a000},
13371 { 0x00020000, 0x13c00},
13372 { 0xffffffff, 0x00000}
13373 }, mem_tbl_57765[] = {
13374 { 0x00000200, 0x00008},
13375 { 0x00004000, 0x00800},
13376 { 0x00006000, 0x09800},
13377 { 0x00010000, 0x0a000},
13378 { 0xffffffff, 0x00000}
13379 };
13380 struct mem_entry *mem_tbl;
13381 int err = 0;
13382 int i;
13383
13384 if (tg3_flag(tp, 5717_PLUS))
13385 mem_tbl = mem_tbl_5717;
13386 else if (tg3_flag(tp, 57765_CLASS) ||
13387 tg3_asic_rev(tp) == ASIC_REV_5762)
13388 mem_tbl = mem_tbl_57765;
13389 else if (tg3_flag(tp, 5755_PLUS))
13390 mem_tbl = mem_tbl_5755;
13391 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13392 mem_tbl = mem_tbl_5906;
13393 else if (tg3_flag(tp, 5705_PLUS))
13394 mem_tbl = mem_tbl_5705;
13395 else
13396 mem_tbl = mem_tbl_570x;
13397
13398 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13399 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13400 if (err)
13401 break;
13402 }
13403
13404 return err;
13405 }
13406
13407 #define TG3_TSO_MSS 500
13408
13409 #define TG3_TSO_IP_HDR_LEN 20
13410 #define TG3_TSO_TCP_HDR_LEN 20
13411 #define TG3_TSO_TCP_OPT_LEN 12
13412
13413 static const u8 tg3_tso_header[] = {
13414 0x08, 0x00,
13415 0x45, 0x00, 0x00, 0x00,
13416 0x00, 0x00, 0x40, 0x00,
13417 0x40, 0x06, 0x00, 0x00,
13418 0x0a, 0x00, 0x00, 0x01,
13419 0x0a, 0x00, 0x00, 0x02,
13420 0x0d, 0x00, 0xe0, 0x00,
13421 0x00, 0x00, 0x01, 0x00,
13422 0x00, 0x00, 0x02, 0x00,
13423 0x80, 0x10, 0x10, 0x00,
13424 0x14, 0x09, 0x00, 0x00,
13425 0x01, 0x01, 0x08, 0x0a,
13426 0x11, 0x11, 0x11, 0x11,
13427 0x11, 0x11, 0x11, 0x11,
13428 };
13429
tg3_run_loopback(struct tg3 * tp,u32 pktsz,bool tso_loopback)13430 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13431 {
13432 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13433 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13434 u32 budget;
13435 struct sk_buff *skb;
13436 u8 *tx_data, *rx_data;
13437 dma_addr_t map;
13438 int num_pkts, tx_len, rx_len, i, err;
13439 struct tg3_rx_buffer_desc *desc;
13440 struct tg3_napi *tnapi, *rnapi;
13441 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13442
13443 tnapi = &tp->napi[0];
13444 rnapi = &tp->napi[0];
13445 if (tp->irq_cnt > 1) {
13446 if (tg3_flag(tp, ENABLE_RSS))
13447 rnapi = &tp->napi[1];
13448 if (tg3_flag(tp, ENABLE_TSS))
13449 tnapi = &tp->napi[1];
13450 }
13451 coal_now = tnapi->coal_now | rnapi->coal_now;
13452
13453 err = -EIO;
13454
13455 tx_len = pktsz;
13456 skb = netdev_alloc_skb(tp->dev, tx_len);
13457 if (!skb)
13458 return -ENOMEM;
13459
13460 tx_data = skb_put(skb, tx_len);
13461 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13462 memset(tx_data + ETH_ALEN, 0x0, 8);
13463
13464 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13465
13466 if (tso_loopback) {
13467 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13468
13469 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13470 TG3_TSO_TCP_OPT_LEN;
13471
13472 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13473 sizeof(tg3_tso_header));
13474 mss = TG3_TSO_MSS;
13475
13476 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13477 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13478
13479 /* Set the total length field in the IP header */
13480 iph->tot_len = htons((u16)(mss + hdr_len));
13481
13482 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13483 TXD_FLAG_CPU_POST_DMA);
13484
13485 if (tg3_flag(tp, HW_TSO_1) ||
13486 tg3_flag(tp, HW_TSO_2) ||
13487 tg3_flag(tp, HW_TSO_3)) {
13488 struct tcphdr *th;
13489 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13490 th = (struct tcphdr *)&tx_data[val];
13491 th->check = 0;
13492 } else
13493 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13494
13495 if (tg3_flag(tp, HW_TSO_3)) {
13496 mss |= (hdr_len & 0xc) << 12;
13497 if (hdr_len & 0x10)
13498 base_flags |= 0x00000010;
13499 base_flags |= (hdr_len & 0x3e0) << 5;
13500 } else if (tg3_flag(tp, HW_TSO_2))
13501 mss |= hdr_len << 9;
13502 else if (tg3_flag(tp, HW_TSO_1) ||
13503 tg3_asic_rev(tp) == ASIC_REV_5705) {
13504 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13505 } else {
13506 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13507 }
13508
13509 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13510 } else {
13511 num_pkts = 1;
13512 data_off = ETH_HLEN;
13513
13514 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13515 tx_len > VLAN_ETH_FRAME_LEN)
13516 base_flags |= TXD_FLAG_JMB_PKT;
13517 }
13518
13519 for (i = data_off; i < tx_len; i++)
13520 tx_data[i] = (u8) (i & 0xff);
13521
13522 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13523 if (pci_dma_mapping_error(tp->pdev, map)) {
13524 dev_kfree_skb(skb);
13525 return -EIO;
13526 }
13527
13528 val = tnapi->tx_prod;
13529 tnapi->tx_buffers[val].skb = skb;
13530 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13531
13532 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13533 rnapi->coal_now);
13534
13535 udelay(10);
13536
13537 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13538
13539 budget = tg3_tx_avail(tnapi);
13540 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13541 base_flags | TXD_FLAG_END, mss, 0)) {
13542 tnapi->tx_buffers[val].skb = NULL;
13543 dev_kfree_skb(skb);
13544 return -EIO;
13545 }
13546
13547 tnapi->tx_prod++;
13548
13549 /* Sync BD data before updating mailbox */
13550 wmb();
13551
13552 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13553 tr32_mailbox(tnapi->prodmbox);
13554
13555 udelay(10);
13556
13557 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13558 for (i = 0; i < 35; i++) {
13559 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13560 coal_now);
13561
13562 udelay(10);
13563
13564 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13565 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13566 if ((tx_idx == tnapi->tx_prod) &&
13567 (rx_idx == (rx_start_idx + num_pkts)))
13568 break;
13569 }
13570
13571 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13572 dev_kfree_skb(skb);
13573
13574 if (tx_idx != tnapi->tx_prod)
13575 goto out;
13576
13577 if (rx_idx != rx_start_idx + num_pkts)
13578 goto out;
13579
13580 val = data_off;
13581 while (rx_idx != rx_start_idx) {
13582 desc = &rnapi->rx_rcb[rx_start_idx++];
13583 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13584 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13585
13586 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13587 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13588 goto out;
13589
13590 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13591 - ETH_FCS_LEN;
13592
13593 if (!tso_loopback) {
13594 if (rx_len != tx_len)
13595 goto out;
13596
13597 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13598 if (opaque_key != RXD_OPAQUE_RING_STD)
13599 goto out;
13600 } else {
13601 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13602 goto out;
13603 }
13604 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13605 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13606 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13607 goto out;
13608 }
13609
13610 if (opaque_key == RXD_OPAQUE_RING_STD) {
13611 rx_data = tpr->rx_std_buffers[desc_idx].data;
13612 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13613 mapping);
13614 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13615 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13616 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13617 mapping);
13618 } else
13619 goto out;
13620
13621 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13622 PCI_DMA_FROMDEVICE);
13623
13624 rx_data += TG3_RX_OFFSET(tp);
13625 for (i = data_off; i < rx_len; i++, val++) {
13626 if (*(rx_data + i) != (u8) (val & 0xff))
13627 goto out;
13628 }
13629 }
13630
13631 err = 0;
13632
13633 /* tg3_free_rings will unmap and free the rx_data */
13634 out:
13635 return err;
13636 }
13637
13638 #define TG3_STD_LOOPBACK_FAILED 1
13639 #define TG3_JMB_LOOPBACK_FAILED 2
13640 #define TG3_TSO_LOOPBACK_FAILED 4
13641 #define TG3_LOOPBACK_FAILED \
13642 (TG3_STD_LOOPBACK_FAILED | \
13643 TG3_JMB_LOOPBACK_FAILED | \
13644 TG3_TSO_LOOPBACK_FAILED)
13645
tg3_test_loopback(struct tg3 * tp,u64 * data,bool do_extlpbk)13646 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13647 {
13648 int err = -EIO;
13649 u32 eee_cap;
13650 u32 jmb_pkt_sz = 9000;
13651
13652 if (tp->dma_limit)
13653 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13654
13655 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13656 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13657
13658 if (!netif_running(tp->dev)) {
13659 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13660 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13661 if (do_extlpbk)
13662 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13663 goto done;
13664 }
13665
13666 err = tg3_reset_hw(tp, true);
13667 if (err) {
13668 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13669 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13670 if (do_extlpbk)
13671 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13672 goto done;
13673 }
13674
13675 if (tg3_flag(tp, ENABLE_RSS)) {
13676 int i;
13677
13678 /* Reroute all rx packets to the 1st queue */
13679 for (i = MAC_RSS_INDIR_TBL_0;
13680 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13681 tw32(i, 0x0);
13682 }
13683
13684 /* HW errata - mac loopback fails in some cases on 5780.
13685 * Normal traffic and PHY loopback are not affected by
13686 * errata. Also, the MAC loopback test is deprecated for
13687 * all newer ASIC revisions.
13688 */
13689 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13690 !tg3_flag(tp, CPMU_PRESENT)) {
13691 tg3_mac_loopback(tp, true);
13692
13693 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13694 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13695
13696 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13697 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13698 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13699
13700 tg3_mac_loopback(tp, false);
13701 }
13702
13703 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13704 !tg3_flag(tp, USE_PHYLIB)) {
13705 int i;
13706
13707 tg3_phy_lpbk_set(tp, 0, false);
13708
13709 /* Wait for link */
13710 for (i = 0; i < 100; i++) {
13711 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13712 break;
13713 mdelay(1);
13714 }
13715
13716 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13717 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13718 if (tg3_flag(tp, TSO_CAPABLE) &&
13719 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13720 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13721 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13722 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13723 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13724
13725 if (do_extlpbk) {
13726 tg3_phy_lpbk_set(tp, 0, true);
13727
13728 /* All link indications report up, but the hardware
13729 * isn't really ready for about 20 msec. Double it
13730 * to be sure.
13731 */
13732 mdelay(40);
13733
13734 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13735 data[TG3_EXT_LOOPB_TEST] |=
13736 TG3_STD_LOOPBACK_FAILED;
13737 if (tg3_flag(tp, TSO_CAPABLE) &&
13738 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13739 data[TG3_EXT_LOOPB_TEST] |=
13740 TG3_TSO_LOOPBACK_FAILED;
13741 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13742 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13743 data[TG3_EXT_LOOPB_TEST] |=
13744 TG3_JMB_LOOPBACK_FAILED;
13745 }
13746
13747 /* Re-enable gphy autopowerdown. */
13748 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13749 tg3_phy_toggle_apd(tp, true);
13750 }
13751
13752 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13753 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13754
13755 done:
13756 tp->phy_flags |= eee_cap;
13757
13758 return err;
13759 }
13760
tg3_self_test(struct net_device * dev,struct ethtool_test * etest,u64 * data)13761 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13762 u64 *data)
13763 {
13764 struct tg3 *tp = netdev_priv(dev);
13765 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13766
13767 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13768 if (tg3_power_up(tp)) {
13769 etest->flags |= ETH_TEST_FL_FAILED;
13770 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13771 return;
13772 }
13773 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13774 }
13775
13776 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13777
13778 if (tg3_test_nvram(tp) != 0) {
13779 etest->flags |= ETH_TEST_FL_FAILED;
13780 data[TG3_NVRAM_TEST] = 1;
13781 }
13782 if (!doextlpbk && tg3_test_link(tp)) {
13783 etest->flags |= ETH_TEST_FL_FAILED;
13784 data[TG3_LINK_TEST] = 1;
13785 }
13786 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13787 int err, err2 = 0, irq_sync = 0;
13788
13789 if (netif_running(dev)) {
13790 tg3_phy_stop(tp);
13791 tg3_netif_stop(tp);
13792 irq_sync = 1;
13793 }
13794
13795 tg3_full_lock(tp, irq_sync);
13796 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13797 err = tg3_nvram_lock(tp);
13798 tg3_halt_cpu(tp, RX_CPU_BASE);
13799 if (!tg3_flag(tp, 5705_PLUS))
13800 tg3_halt_cpu(tp, TX_CPU_BASE);
13801 if (!err)
13802 tg3_nvram_unlock(tp);
13803
13804 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13805 tg3_phy_reset(tp);
13806
13807 if (tg3_test_registers(tp) != 0) {
13808 etest->flags |= ETH_TEST_FL_FAILED;
13809 data[TG3_REGISTER_TEST] = 1;
13810 }
13811
13812 if (tg3_test_memory(tp) != 0) {
13813 etest->flags |= ETH_TEST_FL_FAILED;
13814 data[TG3_MEMORY_TEST] = 1;
13815 }
13816
13817 if (doextlpbk)
13818 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13819
13820 if (tg3_test_loopback(tp, data, doextlpbk))
13821 etest->flags |= ETH_TEST_FL_FAILED;
13822
13823 tg3_full_unlock(tp);
13824
13825 if (tg3_test_interrupt(tp) != 0) {
13826 etest->flags |= ETH_TEST_FL_FAILED;
13827 data[TG3_INTERRUPT_TEST] = 1;
13828 }
13829
13830 tg3_full_lock(tp, 0);
13831
13832 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13833 if (netif_running(dev)) {
13834 tg3_flag_set(tp, INIT_COMPLETE);
13835 err2 = tg3_restart_hw(tp, true);
13836 if (!err2)
13837 tg3_netif_start(tp);
13838 }
13839
13840 tg3_full_unlock(tp);
13841
13842 if (irq_sync && !err2)
13843 tg3_phy_start(tp);
13844 }
13845 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13846 tg3_power_down_prepare(tp);
13847
13848 }
13849
tg3_hwtstamp_set(struct net_device * dev,struct ifreq * ifr)13850 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13851 {
13852 struct tg3 *tp = netdev_priv(dev);
13853 struct hwtstamp_config stmpconf;
13854
13855 if (!tg3_flag(tp, PTP_CAPABLE))
13856 return -EOPNOTSUPP;
13857
13858 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13859 return -EFAULT;
13860
13861 if (stmpconf.flags)
13862 return -EINVAL;
13863
13864 if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13865 stmpconf.tx_type != HWTSTAMP_TX_OFF)
13866 return -ERANGE;
13867
13868 switch (stmpconf.rx_filter) {
13869 case HWTSTAMP_FILTER_NONE:
13870 tp->rxptpctl = 0;
13871 break;
13872 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13873 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13874 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13875 break;
13876 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13877 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13878 TG3_RX_PTP_CTL_SYNC_EVNT;
13879 break;
13880 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13881 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13882 TG3_RX_PTP_CTL_DELAY_REQ;
13883 break;
13884 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13885 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13886 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13887 break;
13888 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13889 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13890 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13891 break;
13892 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13893 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13894 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13895 break;
13896 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13897 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13898 TG3_RX_PTP_CTL_SYNC_EVNT;
13899 break;
13900 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13901 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13902 TG3_RX_PTP_CTL_SYNC_EVNT;
13903 break;
13904 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13905 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13906 TG3_RX_PTP_CTL_SYNC_EVNT;
13907 break;
13908 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13909 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13910 TG3_RX_PTP_CTL_DELAY_REQ;
13911 break;
13912 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13913 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13914 TG3_RX_PTP_CTL_DELAY_REQ;
13915 break;
13916 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13917 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13918 TG3_RX_PTP_CTL_DELAY_REQ;
13919 break;
13920 default:
13921 return -ERANGE;
13922 }
13923
13924 if (netif_running(dev) && tp->rxptpctl)
13925 tw32(TG3_RX_PTP_CTL,
13926 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13927
13928 if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13929 tg3_flag_set(tp, TX_TSTAMP_EN);
13930 else
13931 tg3_flag_clear(tp, TX_TSTAMP_EN);
13932
13933 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13934 -EFAULT : 0;
13935 }
13936
tg3_hwtstamp_get(struct net_device * dev,struct ifreq * ifr)13937 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13938 {
13939 struct tg3 *tp = netdev_priv(dev);
13940 struct hwtstamp_config stmpconf;
13941
13942 if (!tg3_flag(tp, PTP_CAPABLE))
13943 return -EOPNOTSUPP;
13944
13945 stmpconf.flags = 0;
13946 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13947 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13948
13949 switch (tp->rxptpctl) {
13950 case 0:
13951 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13952 break;
13953 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13954 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13955 break;
13956 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13957 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13958 break;
13959 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13960 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13961 break;
13962 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13963 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13964 break;
13965 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13966 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13967 break;
13968 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13969 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13970 break;
13971 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13972 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13973 break;
13974 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13975 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13976 break;
13977 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13978 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13979 break;
13980 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13981 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13982 break;
13983 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13984 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13985 break;
13986 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13987 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13988 break;
13989 default:
13990 WARN_ON_ONCE(1);
13991 return -ERANGE;
13992 }
13993
13994 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13995 -EFAULT : 0;
13996 }
13997
tg3_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)13998 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13999 {
14000 struct mii_ioctl_data *data = if_mii(ifr);
14001 struct tg3 *tp = netdev_priv(dev);
14002 int err;
14003
14004 if (tg3_flag(tp, USE_PHYLIB)) {
14005 struct phy_device *phydev;
14006 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
14007 return -EAGAIN;
14008 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
14009 return phy_mii_ioctl(phydev, ifr, cmd);
14010 }
14011
14012 switch (cmd) {
14013 case SIOCGMIIPHY:
14014 data->phy_id = tp->phy_addr;
14015
14016 /* fallthru */
14017 case SIOCGMIIREG: {
14018 u32 mii_regval;
14019
14020 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14021 break; /* We have no PHY */
14022
14023 if (!netif_running(dev))
14024 return -EAGAIN;
14025
14026 spin_lock_bh(&tp->lock);
14027 err = __tg3_readphy(tp, data->phy_id & 0x1f,
14028 data->reg_num & 0x1f, &mii_regval);
14029 spin_unlock_bh(&tp->lock);
14030
14031 data->val_out = mii_regval;
14032
14033 return err;
14034 }
14035
14036 case SIOCSMIIREG:
14037 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14038 break; /* We have no PHY */
14039
14040 if (!netif_running(dev))
14041 return -EAGAIN;
14042
14043 spin_lock_bh(&tp->lock);
14044 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14045 data->reg_num & 0x1f, data->val_in);
14046 spin_unlock_bh(&tp->lock);
14047
14048 return err;
14049
14050 case SIOCSHWTSTAMP:
14051 return tg3_hwtstamp_set(dev, ifr);
14052
14053 case SIOCGHWTSTAMP:
14054 return tg3_hwtstamp_get(dev, ifr);
14055
14056 default:
14057 /* do nothing */
14058 break;
14059 }
14060 return -EOPNOTSUPP;
14061 }
14062
tg3_get_coalesce(struct net_device * dev,struct ethtool_coalesce * ec)14063 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14064 {
14065 struct tg3 *tp = netdev_priv(dev);
14066
14067 memcpy(ec, &tp->coal, sizeof(*ec));
14068 return 0;
14069 }
14070
tg3_set_coalesce(struct net_device * dev,struct ethtool_coalesce * ec)14071 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14072 {
14073 struct tg3 *tp = netdev_priv(dev);
14074 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14075 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14076
14077 if (!tg3_flag(tp, 5705_PLUS)) {
14078 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14079 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14080 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14081 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14082 }
14083
14084 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14085 (!ec->rx_coalesce_usecs) ||
14086 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14087 (!ec->tx_coalesce_usecs) ||
14088 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14089 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14090 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14091 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14092 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14093 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14094 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14095 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14096 return -EINVAL;
14097
14098 /* Only copy relevant parameters, ignore all others. */
14099 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14100 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14101 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14102 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14103 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14104 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14105 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14106 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14107 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14108
14109 if (netif_running(dev)) {
14110 tg3_full_lock(tp, 0);
14111 __tg3_set_coalesce(tp, &tp->coal);
14112 tg3_full_unlock(tp);
14113 }
14114 return 0;
14115 }
14116
tg3_set_eee(struct net_device * dev,struct ethtool_eee * edata)14117 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14118 {
14119 struct tg3 *tp = netdev_priv(dev);
14120
14121 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14122 netdev_warn(tp->dev, "Board does not support EEE!\n");
14123 return -EOPNOTSUPP;
14124 }
14125
14126 if (edata->advertised != tp->eee.advertised) {
14127 netdev_warn(tp->dev,
14128 "Direct manipulation of EEE advertisement is not supported\n");
14129 return -EINVAL;
14130 }
14131
14132 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14133 netdev_warn(tp->dev,
14134 "Maximal Tx Lpi timer supported is %#x(u)\n",
14135 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14136 return -EINVAL;
14137 }
14138
14139 tp->eee = *edata;
14140
14141 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14142 tg3_warn_mgmt_link_flap(tp);
14143
14144 if (netif_running(tp->dev)) {
14145 tg3_full_lock(tp, 0);
14146 tg3_setup_eee(tp);
14147 tg3_phy_reset(tp);
14148 tg3_full_unlock(tp);
14149 }
14150
14151 return 0;
14152 }
14153
tg3_get_eee(struct net_device * dev,struct ethtool_eee * edata)14154 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14155 {
14156 struct tg3 *tp = netdev_priv(dev);
14157
14158 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14159 netdev_warn(tp->dev,
14160 "Board does not support EEE!\n");
14161 return -EOPNOTSUPP;
14162 }
14163
14164 *edata = tp->eee;
14165 return 0;
14166 }
14167
14168 static const struct ethtool_ops tg3_ethtool_ops = {
14169 .get_drvinfo = tg3_get_drvinfo,
14170 .get_regs_len = tg3_get_regs_len,
14171 .get_regs = tg3_get_regs,
14172 .get_wol = tg3_get_wol,
14173 .set_wol = tg3_set_wol,
14174 .get_msglevel = tg3_get_msglevel,
14175 .set_msglevel = tg3_set_msglevel,
14176 .nway_reset = tg3_nway_reset,
14177 .get_link = ethtool_op_get_link,
14178 .get_eeprom_len = tg3_get_eeprom_len,
14179 .get_eeprom = tg3_get_eeprom,
14180 .set_eeprom = tg3_set_eeprom,
14181 .get_ringparam = tg3_get_ringparam,
14182 .set_ringparam = tg3_set_ringparam,
14183 .get_pauseparam = tg3_get_pauseparam,
14184 .set_pauseparam = tg3_set_pauseparam,
14185 .self_test = tg3_self_test,
14186 .get_strings = tg3_get_strings,
14187 .set_phys_id = tg3_set_phys_id,
14188 .get_ethtool_stats = tg3_get_ethtool_stats,
14189 .get_coalesce = tg3_get_coalesce,
14190 .set_coalesce = tg3_set_coalesce,
14191 .get_sset_count = tg3_get_sset_count,
14192 .get_rxnfc = tg3_get_rxnfc,
14193 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
14194 .get_rxfh = tg3_get_rxfh,
14195 .set_rxfh = tg3_set_rxfh,
14196 .get_channels = tg3_get_channels,
14197 .set_channels = tg3_set_channels,
14198 .get_ts_info = tg3_get_ts_info,
14199 .get_eee = tg3_get_eee,
14200 .set_eee = tg3_set_eee,
14201 .get_link_ksettings = tg3_get_link_ksettings,
14202 .set_link_ksettings = tg3_set_link_ksettings,
14203 };
14204
tg3_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)14205 static void tg3_get_stats64(struct net_device *dev,
14206 struct rtnl_link_stats64 *stats)
14207 {
14208 struct tg3 *tp = netdev_priv(dev);
14209
14210 spin_lock_bh(&tp->lock);
14211 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14212 *stats = tp->net_stats_prev;
14213 spin_unlock_bh(&tp->lock);
14214 return;
14215 }
14216
14217 tg3_get_nstats(tp, stats);
14218 spin_unlock_bh(&tp->lock);
14219 }
14220
tg3_set_rx_mode(struct net_device * dev)14221 static void tg3_set_rx_mode(struct net_device *dev)
14222 {
14223 struct tg3 *tp = netdev_priv(dev);
14224
14225 if (!netif_running(dev))
14226 return;
14227
14228 tg3_full_lock(tp, 0);
14229 __tg3_set_rx_mode(dev);
14230 tg3_full_unlock(tp);
14231 }
14232
tg3_set_mtu(struct net_device * dev,struct tg3 * tp,int new_mtu)14233 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14234 int new_mtu)
14235 {
14236 dev->mtu = new_mtu;
14237
14238 if (new_mtu > ETH_DATA_LEN) {
14239 if (tg3_flag(tp, 5780_CLASS)) {
14240 netdev_update_features(dev);
14241 tg3_flag_clear(tp, TSO_CAPABLE);
14242 } else {
14243 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14244 }
14245 } else {
14246 if (tg3_flag(tp, 5780_CLASS)) {
14247 tg3_flag_set(tp, TSO_CAPABLE);
14248 netdev_update_features(dev);
14249 }
14250 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14251 }
14252 }
14253
tg3_change_mtu(struct net_device * dev,int new_mtu)14254 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14255 {
14256 struct tg3 *tp = netdev_priv(dev);
14257 int err;
14258 bool reset_phy = false;
14259
14260 if (!netif_running(dev)) {
14261 /* We'll just catch it later when the
14262 * device is up'd.
14263 */
14264 tg3_set_mtu(dev, tp, new_mtu);
14265 return 0;
14266 }
14267
14268 tg3_phy_stop(tp);
14269
14270 tg3_netif_stop(tp);
14271
14272 tg3_set_mtu(dev, tp, new_mtu);
14273
14274 tg3_full_lock(tp, 1);
14275
14276 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14277
14278 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14279 * breaks all requests to 256 bytes.
14280 */
14281 if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14282 tg3_asic_rev(tp) == ASIC_REV_5717 ||
14283 tg3_asic_rev(tp) == ASIC_REV_5719 ||
14284 tg3_asic_rev(tp) == ASIC_REV_5720)
14285 reset_phy = true;
14286
14287 err = tg3_restart_hw(tp, reset_phy);
14288
14289 if (!err)
14290 tg3_netif_start(tp);
14291
14292 tg3_full_unlock(tp);
14293
14294 if (!err)
14295 tg3_phy_start(tp);
14296
14297 return err;
14298 }
14299
14300 static const struct net_device_ops tg3_netdev_ops = {
14301 .ndo_open = tg3_open,
14302 .ndo_stop = tg3_close,
14303 .ndo_start_xmit = tg3_start_xmit,
14304 .ndo_get_stats64 = tg3_get_stats64,
14305 .ndo_validate_addr = eth_validate_addr,
14306 .ndo_set_rx_mode = tg3_set_rx_mode,
14307 .ndo_set_mac_address = tg3_set_mac_addr,
14308 .ndo_do_ioctl = tg3_ioctl,
14309 .ndo_tx_timeout = tg3_tx_timeout,
14310 .ndo_change_mtu = tg3_change_mtu,
14311 .ndo_fix_features = tg3_fix_features,
14312 .ndo_set_features = tg3_set_features,
14313 #ifdef CONFIG_NET_POLL_CONTROLLER
14314 .ndo_poll_controller = tg3_poll_controller,
14315 #endif
14316 };
14317
tg3_get_eeprom_size(struct tg3 * tp)14318 static void tg3_get_eeprom_size(struct tg3 *tp)
14319 {
14320 u32 cursize, val, magic;
14321
14322 tp->nvram_size = EEPROM_CHIP_SIZE;
14323
14324 if (tg3_nvram_read(tp, 0, &magic) != 0)
14325 return;
14326
14327 if ((magic != TG3_EEPROM_MAGIC) &&
14328 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14329 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14330 return;
14331
14332 /*
14333 * Size the chip by reading offsets at increasing powers of two.
14334 * When we encounter our validation signature, we know the addressing
14335 * has wrapped around, and thus have our chip size.
14336 */
14337 cursize = 0x10;
14338
14339 while (cursize < tp->nvram_size) {
14340 if (tg3_nvram_read(tp, cursize, &val) != 0)
14341 return;
14342
14343 if (val == magic)
14344 break;
14345
14346 cursize <<= 1;
14347 }
14348
14349 tp->nvram_size = cursize;
14350 }
14351
tg3_get_nvram_size(struct tg3 * tp)14352 static void tg3_get_nvram_size(struct tg3 *tp)
14353 {
14354 u32 val;
14355
14356 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14357 return;
14358
14359 /* Selfboot format */
14360 if (val != TG3_EEPROM_MAGIC) {
14361 tg3_get_eeprom_size(tp);
14362 return;
14363 }
14364
14365 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14366 if (val != 0) {
14367 /* This is confusing. We want to operate on the
14368 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14369 * call will read from NVRAM and byteswap the data
14370 * according to the byteswapping settings for all
14371 * other register accesses. This ensures the data we
14372 * want will always reside in the lower 16-bits.
14373 * However, the data in NVRAM is in LE format, which
14374 * means the data from the NVRAM read will always be
14375 * opposite the endianness of the CPU. The 16-bit
14376 * byteswap then brings the data to CPU endianness.
14377 */
14378 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14379 return;
14380 }
14381 }
14382 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14383 }
14384
tg3_get_nvram_info(struct tg3 * tp)14385 static void tg3_get_nvram_info(struct tg3 *tp)
14386 {
14387 u32 nvcfg1;
14388
14389 nvcfg1 = tr32(NVRAM_CFG1);
14390 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14391 tg3_flag_set(tp, FLASH);
14392 } else {
14393 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14394 tw32(NVRAM_CFG1, nvcfg1);
14395 }
14396
14397 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14398 tg3_flag(tp, 5780_CLASS)) {
14399 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14400 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14401 tp->nvram_jedecnum = JEDEC_ATMEL;
14402 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14403 tg3_flag_set(tp, NVRAM_BUFFERED);
14404 break;
14405 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14406 tp->nvram_jedecnum = JEDEC_ATMEL;
14407 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14408 break;
14409 case FLASH_VENDOR_ATMEL_EEPROM:
14410 tp->nvram_jedecnum = JEDEC_ATMEL;
14411 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14412 tg3_flag_set(tp, NVRAM_BUFFERED);
14413 break;
14414 case FLASH_VENDOR_ST:
14415 tp->nvram_jedecnum = JEDEC_ST;
14416 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14417 tg3_flag_set(tp, NVRAM_BUFFERED);
14418 break;
14419 case FLASH_VENDOR_SAIFUN:
14420 tp->nvram_jedecnum = JEDEC_SAIFUN;
14421 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14422 break;
14423 case FLASH_VENDOR_SST_SMALL:
14424 case FLASH_VENDOR_SST_LARGE:
14425 tp->nvram_jedecnum = JEDEC_SST;
14426 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14427 break;
14428 }
14429 } else {
14430 tp->nvram_jedecnum = JEDEC_ATMEL;
14431 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14432 tg3_flag_set(tp, NVRAM_BUFFERED);
14433 }
14434 }
14435
tg3_nvram_get_pagesize(struct tg3 * tp,u32 nvmcfg1)14436 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14437 {
14438 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14439 case FLASH_5752PAGE_SIZE_256:
14440 tp->nvram_pagesize = 256;
14441 break;
14442 case FLASH_5752PAGE_SIZE_512:
14443 tp->nvram_pagesize = 512;
14444 break;
14445 case FLASH_5752PAGE_SIZE_1K:
14446 tp->nvram_pagesize = 1024;
14447 break;
14448 case FLASH_5752PAGE_SIZE_2K:
14449 tp->nvram_pagesize = 2048;
14450 break;
14451 case FLASH_5752PAGE_SIZE_4K:
14452 tp->nvram_pagesize = 4096;
14453 break;
14454 case FLASH_5752PAGE_SIZE_264:
14455 tp->nvram_pagesize = 264;
14456 break;
14457 case FLASH_5752PAGE_SIZE_528:
14458 tp->nvram_pagesize = 528;
14459 break;
14460 }
14461 }
14462
tg3_get_5752_nvram_info(struct tg3 * tp)14463 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14464 {
14465 u32 nvcfg1;
14466
14467 nvcfg1 = tr32(NVRAM_CFG1);
14468
14469 /* NVRAM protection for TPM */
14470 if (nvcfg1 & (1 << 27))
14471 tg3_flag_set(tp, PROTECTED_NVRAM);
14472
14473 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14474 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14475 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14476 tp->nvram_jedecnum = JEDEC_ATMEL;
14477 tg3_flag_set(tp, NVRAM_BUFFERED);
14478 break;
14479 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14480 tp->nvram_jedecnum = JEDEC_ATMEL;
14481 tg3_flag_set(tp, NVRAM_BUFFERED);
14482 tg3_flag_set(tp, FLASH);
14483 break;
14484 case FLASH_5752VENDOR_ST_M45PE10:
14485 case FLASH_5752VENDOR_ST_M45PE20:
14486 case FLASH_5752VENDOR_ST_M45PE40:
14487 tp->nvram_jedecnum = JEDEC_ST;
14488 tg3_flag_set(tp, NVRAM_BUFFERED);
14489 tg3_flag_set(tp, FLASH);
14490 break;
14491 }
14492
14493 if (tg3_flag(tp, FLASH)) {
14494 tg3_nvram_get_pagesize(tp, nvcfg1);
14495 } else {
14496 /* For eeprom, set pagesize to maximum eeprom size */
14497 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14498
14499 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14500 tw32(NVRAM_CFG1, nvcfg1);
14501 }
14502 }
14503
tg3_get_5755_nvram_info(struct tg3 * tp)14504 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14505 {
14506 u32 nvcfg1, protect = 0;
14507
14508 nvcfg1 = tr32(NVRAM_CFG1);
14509
14510 /* NVRAM protection for TPM */
14511 if (nvcfg1 & (1 << 27)) {
14512 tg3_flag_set(tp, PROTECTED_NVRAM);
14513 protect = 1;
14514 }
14515
14516 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14517 switch (nvcfg1) {
14518 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14519 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14520 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14521 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14522 tp->nvram_jedecnum = JEDEC_ATMEL;
14523 tg3_flag_set(tp, NVRAM_BUFFERED);
14524 tg3_flag_set(tp, FLASH);
14525 tp->nvram_pagesize = 264;
14526 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14527 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14528 tp->nvram_size = (protect ? 0x3e200 :
14529 TG3_NVRAM_SIZE_512KB);
14530 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14531 tp->nvram_size = (protect ? 0x1f200 :
14532 TG3_NVRAM_SIZE_256KB);
14533 else
14534 tp->nvram_size = (protect ? 0x1f200 :
14535 TG3_NVRAM_SIZE_128KB);
14536 break;
14537 case FLASH_5752VENDOR_ST_M45PE10:
14538 case FLASH_5752VENDOR_ST_M45PE20:
14539 case FLASH_5752VENDOR_ST_M45PE40:
14540 tp->nvram_jedecnum = JEDEC_ST;
14541 tg3_flag_set(tp, NVRAM_BUFFERED);
14542 tg3_flag_set(tp, FLASH);
14543 tp->nvram_pagesize = 256;
14544 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14545 tp->nvram_size = (protect ?
14546 TG3_NVRAM_SIZE_64KB :
14547 TG3_NVRAM_SIZE_128KB);
14548 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14549 tp->nvram_size = (protect ?
14550 TG3_NVRAM_SIZE_64KB :
14551 TG3_NVRAM_SIZE_256KB);
14552 else
14553 tp->nvram_size = (protect ?
14554 TG3_NVRAM_SIZE_128KB :
14555 TG3_NVRAM_SIZE_512KB);
14556 break;
14557 }
14558 }
14559
tg3_get_5787_nvram_info(struct tg3 * tp)14560 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14561 {
14562 u32 nvcfg1;
14563
14564 nvcfg1 = tr32(NVRAM_CFG1);
14565
14566 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14567 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14568 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14569 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14570 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14571 tp->nvram_jedecnum = JEDEC_ATMEL;
14572 tg3_flag_set(tp, NVRAM_BUFFERED);
14573 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14574
14575 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14576 tw32(NVRAM_CFG1, nvcfg1);
14577 break;
14578 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14579 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14580 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14581 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14582 tp->nvram_jedecnum = JEDEC_ATMEL;
14583 tg3_flag_set(tp, NVRAM_BUFFERED);
14584 tg3_flag_set(tp, FLASH);
14585 tp->nvram_pagesize = 264;
14586 break;
14587 case FLASH_5752VENDOR_ST_M45PE10:
14588 case FLASH_5752VENDOR_ST_M45PE20:
14589 case FLASH_5752VENDOR_ST_M45PE40:
14590 tp->nvram_jedecnum = JEDEC_ST;
14591 tg3_flag_set(tp, NVRAM_BUFFERED);
14592 tg3_flag_set(tp, FLASH);
14593 tp->nvram_pagesize = 256;
14594 break;
14595 }
14596 }
14597
tg3_get_5761_nvram_info(struct tg3 * tp)14598 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14599 {
14600 u32 nvcfg1, protect = 0;
14601
14602 nvcfg1 = tr32(NVRAM_CFG1);
14603
14604 /* NVRAM protection for TPM */
14605 if (nvcfg1 & (1 << 27)) {
14606 tg3_flag_set(tp, PROTECTED_NVRAM);
14607 protect = 1;
14608 }
14609
14610 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14611 switch (nvcfg1) {
14612 case FLASH_5761VENDOR_ATMEL_ADB021D:
14613 case FLASH_5761VENDOR_ATMEL_ADB041D:
14614 case FLASH_5761VENDOR_ATMEL_ADB081D:
14615 case FLASH_5761VENDOR_ATMEL_ADB161D:
14616 case FLASH_5761VENDOR_ATMEL_MDB021D:
14617 case FLASH_5761VENDOR_ATMEL_MDB041D:
14618 case FLASH_5761VENDOR_ATMEL_MDB081D:
14619 case FLASH_5761VENDOR_ATMEL_MDB161D:
14620 tp->nvram_jedecnum = JEDEC_ATMEL;
14621 tg3_flag_set(tp, NVRAM_BUFFERED);
14622 tg3_flag_set(tp, FLASH);
14623 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14624 tp->nvram_pagesize = 256;
14625 break;
14626 case FLASH_5761VENDOR_ST_A_M45PE20:
14627 case FLASH_5761VENDOR_ST_A_M45PE40:
14628 case FLASH_5761VENDOR_ST_A_M45PE80:
14629 case FLASH_5761VENDOR_ST_A_M45PE16:
14630 case FLASH_5761VENDOR_ST_M_M45PE20:
14631 case FLASH_5761VENDOR_ST_M_M45PE40:
14632 case FLASH_5761VENDOR_ST_M_M45PE80:
14633 case FLASH_5761VENDOR_ST_M_M45PE16:
14634 tp->nvram_jedecnum = JEDEC_ST;
14635 tg3_flag_set(tp, NVRAM_BUFFERED);
14636 tg3_flag_set(tp, FLASH);
14637 tp->nvram_pagesize = 256;
14638 break;
14639 }
14640
14641 if (protect) {
14642 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14643 } else {
14644 switch (nvcfg1) {
14645 case FLASH_5761VENDOR_ATMEL_ADB161D:
14646 case FLASH_5761VENDOR_ATMEL_MDB161D:
14647 case FLASH_5761VENDOR_ST_A_M45PE16:
14648 case FLASH_5761VENDOR_ST_M_M45PE16:
14649 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14650 break;
14651 case FLASH_5761VENDOR_ATMEL_ADB081D:
14652 case FLASH_5761VENDOR_ATMEL_MDB081D:
14653 case FLASH_5761VENDOR_ST_A_M45PE80:
14654 case FLASH_5761VENDOR_ST_M_M45PE80:
14655 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14656 break;
14657 case FLASH_5761VENDOR_ATMEL_ADB041D:
14658 case FLASH_5761VENDOR_ATMEL_MDB041D:
14659 case FLASH_5761VENDOR_ST_A_M45PE40:
14660 case FLASH_5761VENDOR_ST_M_M45PE40:
14661 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14662 break;
14663 case FLASH_5761VENDOR_ATMEL_ADB021D:
14664 case FLASH_5761VENDOR_ATMEL_MDB021D:
14665 case FLASH_5761VENDOR_ST_A_M45PE20:
14666 case FLASH_5761VENDOR_ST_M_M45PE20:
14667 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14668 break;
14669 }
14670 }
14671 }
14672
tg3_get_5906_nvram_info(struct tg3 * tp)14673 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14674 {
14675 tp->nvram_jedecnum = JEDEC_ATMEL;
14676 tg3_flag_set(tp, NVRAM_BUFFERED);
14677 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14678 }
14679
tg3_get_57780_nvram_info(struct tg3 * tp)14680 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14681 {
14682 u32 nvcfg1;
14683
14684 nvcfg1 = tr32(NVRAM_CFG1);
14685
14686 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14687 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14688 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14689 tp->nvram_jedecnum = JEDEC_ATMEL;
14690 tg3_flag_set(tp, NVRAM_BUFFERED);
14691 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14692
14693 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14694 tw32(NVRAM_CFG1, nvcfg1);
14695 return;
14696 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14697 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14698 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14699 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14700 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14701 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14702 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14703 tp->nvram_jedecnum = JEDEC_ATMEL;
14704 tg3_flag_set(tp, NVRAM_BUFFERED);
14705 tg3_flag_set(tp, FLASH);
14706
14707 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14708 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14709 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14710 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14711 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14712 break;
14713 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14714 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14715 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14716 break;
14717 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14718 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14719 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14720 break;
14721 }
14722 break;
14723 case FLASH_5752VENDOR_ST_M45PE10:
14724 case FLASH_5752VENDOR_ST_M45PE20:
14725 case FLASH_5752VENDOR_ST_M45PE40:
14726 tp->nvram_jedecnum = JEDEC_ST;
14727 tg3_flag_set(tp, NVRAM_BUFFERED);
14728 tg3_flag_set(tp, FLASH);
14729
14730 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14731 case FLASH_5752VENDOR_ST_M45PE10:
14732 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14733 break;
14734 case FLASH_5752VENDOR_ST_M45PE20:
14735 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14736 break;
14737 case FLASH_5752VENDOR_ST_M45PE40:
14738 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14739 break;
14740 }
14741 break;
14742 default:
14743 tg3_flag_set(tp, NO_NVRAM);
14744 return;
14745 }
14746
14747 tg3_nvram_get_pagesize(tp, nvcfg1);
14748 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14749 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14750 }
14751
14752
tg3_get_5717_nvram_info(struct tg3 * tp)14753 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14754 {
14755 u32 nvcfg1;
14756
14757 nvcfg1 = tr32(NVRAM_CFG1);
14758
14759 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14760 case FLASH_5717VENDOR_ATMEL_EEPROM:
14761 case FLASH_5717VENDOR_MICRO_EEPROM:
14762 tp->nvram_jedecnum = JEDEC_ATMEL;
14763 tg3_flag_set(tp, NVRAM_BUFFERED);
14764 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14765
14766 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14767 tw32(NVRAM_CFG1, nvcfg1);
14768 return;
14769 case FLASH_5717VENDOR_ATMEL_MDB011D:
14770 case FLASH_5717VENDOR_ATMEL_ADB011B:
14771 case FLASH_5717VENDOR_ATMEL_ADB011D:
14772 case FLASH_5717VENDOR_ATMEL_MDB021D:
14773 case FLASH_5717VENDOR_ATMEL_ADB021B:
14774 case FLASH_5717VENDOR_ATMEL_ADB021D:
14775 case FLASH_5717VENDOR_ATMEL_45USPT:
14776 tp->nvram_jedecnum = JEDEC_ATMEL;
14777 tg3_flag_set(tp, NVRAM_BUFFERED);
14778 tg3_flag_set(tp, FLASH);
14779
14780 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14781 case FLASH_5717VENDOR_ATMEL_MDB021D:
14782 /* Detect size with tg3_nvram_get_size() */
14783 break;
14784 case FLASH_5717VENDOR_ATMEL_ADB021B:
14785 case FLASH_5717VENDOR_ATMEL_ADB021D:
14786 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14787 break;
14788 default:
14789 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14790 break;
14791 }
14792 break;
14793 case FLASH_5717VENDOR_ST_M_M25PE10:
14794 case FLASH_5717VENDOR_ST_A_M25PE10:
14795 case FLASH_5717VENDOR_ST_M_M45PE10:
14796 case FLASH_5717VENDOR_ST_A_M45PE10:
14797 case FLASH_5717VENDOR_ST_M_M25PE20:
14798 case FLASH_5717VENDOR_ST_A_M25PE20:
14799 case FLASH_5717VENDOR_ST_M_M45PE20:
14800 case FLASH_5717VENDOR_ST_A_M45PE20:
14801 case FLASH_5717VENDOR_ST_25USPT:
14802 case FLASH_5717VENDOR_ST_45USPT:
14803 tp->nvram_jedecnum = JEDEC_ST;
14804 tg3_flag_set(tp, NVRAM_BUFFERED);
14805 tg3_flag_set(tp, FLASH);
14806
14807 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14808 case FLASH_5717VENDOR_ST_M_M25PE20:
14809 case FLASH_5717VENDOR_ST_M_M45PE20:
14810 /* Detect size with tg3_nvram_get_size() */
14811 break;
14812 case FLASH_5717VENDOR_ST_A_M25PE20:
14813 case FLASH_5717VENDOR_ST_A_M45PE20:
14814 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14815 break;
14816 default:
14817 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14818 break;
14819 }
14820 break;
14821 default:
14822 tg3_flag_set(tp, NO_NVRAM);
14823 return;
14824 }
14825
14826 tg3_nvram_get_pagesize(tp, nvcfg1);
14827 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14828 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14829 }
14830
tg3_get_5720_nvram_info(struct tg3 * tp)14831 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14832 {
14833 u32 nvcfg1, nvmpinstrp, nv_status;
14834
14835 nvcfg1 = tr32(NVRAM_CFG1);
14836 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14837
14838 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14839 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14840 tg3_flag_set(tp, NO_NVRAM);
14841 return;
14842 }
14843
14844 switch (nvmpinstrp) {
14845 case FLASH_5762_MX25L_100:
14846 case FLASH_5762_MX25L_200:
14847 case FLASH_5762_MX25L_400:
14848 case FLASH_5762_MX25L_800:
14849 case FLASH_5762_MX25L_160_320:
14850 tp->nvram_pagesize = 4096;
14851 tp->nvram_jedecnum = JEDEC_MACRONIX;
14852 tg3_flag_set(tp, NVRAM_BUFFERED);
14853 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14854 tg3_flag_set(tp, FLASH);
14855 nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14856 tp->nvram_size =
14857 (1 << (nv_status >> AUTOSENSE_DEVID &
14858 AUTOSENSE_DEVID_MASK)
14859 << AUTOSENSE_SIZE_IN_MB);
14860 return;
14861
14862 case FLASH_5762_EEPROM_HD:
14863 nvmpinstrp = FLASH_5720_EEPROM_HD;
14864 break;
14865 case FLASH_5762_EEPROM_LD:
14866 nvmpinstrp = FLASH_5720_EEPROM_LD;
14867 break;
14868 case FLASH_5720VENDOR_M_ST_M45PE20:
14869 /* This pinstrap supports multiple sizes, so force it
14870 * to read the actual size from location 0xf0.
14871 */
14872 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14873 break;
14874 }
14875 }
14876
14877 switch (nvmpinstrp) {
14878 case FLASH_5720_EEPROM_HD:
14879 case FLASH_5720_EEPROM_LD:
14880 tp->nvram_jedecnum = JEDEC_ATMEL;
14881 tg3_flag_set(tp, NVRAM_BUFFERED);
14882
14883 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14884 tw32(NVRAM_CFG1, nvcfg1);
14885 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14886 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14887 else
14888 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14889 return;
14890 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14891 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14892 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14893 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14894 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14895 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14896 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14897 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14898 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14899 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14900 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14901 case FLASH_5720VENDOR_ATMEL_45USPT:
14902 tp->nvram_jedecnum = JEDEC_ATMEL;
14903 tg3_flag_set(tp, NVRAM_BUFFERED);
14904 tg3_flag_set(tp, FLASH);
14905
14906 switch (nvmpinstrp) {
14907 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14908 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14909 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14910 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14911 break;
14912 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14913 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14914 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14915 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14916 break;
14917 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14918 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14919 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14920 break;
14921 default:
14922 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14923 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14924 break;
14925 }
14926 break;
14927 case FLASH_5720VENDOR_M_ST_M25PE10:
14928 case FLASH_5720VENDOR_M_ST_M45PE10:
14929 case FLASH_5720VENDOR_A_ST_M25PE10:
14930 case FLASH_5720VENDOR_A_ST_M45PE10:
14931 case FLASH_5720VENDOR_M_ST_M25PE20:
14932 case FLASH_5720VENDOR_M_ST_M45PE20:
14933 case FLASH_5720VENDOR_A_ST_M25PE20:
14934 case FLASH_5720VENDOR_A_ST_M45PE20:
14935 case FLASH_5720VENDOR_M_ST_M25PE40:
14936 case FLASH_5720VENDOR_M_ST_M45PE40:
14937 case FLASH_5720VENDOR_A_ST_M25PE40:
14938 case FLASH_5720VENDOR_A_ST_M45PE40:
14939 case FLASH_5720VENDOR_M_ST_M25PE80:
14940 case FLASH_5720VENDOR_M_ST_M45PE80:
14941 case FLASH_5720VENDOR_A_ST_M25PE80:
14942 case FLASH_5720VENDOR_A_ST_M45PE80:
14943 case FLASH_5720VENDOR_ST_25USPT:
14944 case FLASH_5720VENDOR_ST_45USPT:
14945 tp->nvram_jedecnum = JEDEC_ST;
14946 tg3_flag_set(tp, NVRAM_BUFFERED);
14947 tg3_flag_set(tp, FLASH);
14948
14949 switch (nvmpinstrp) {
14950 case FLASH_5720VENDOR_M_ST_M25PE20:
14951 case FLASH_5720VENDOR_M_ST_M45PE20:
14952 case FLASH_5720VENDOR_A_ST_M25PE20:
14953 case FLASH_5720VENDOR_A_ST_M45PE20:
14954 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14955 break;
14956 case FLASH_5720VENDOR_M_ST_M25PE40:
14957 case FLASH_5720VENDOR_M_ST_M45PE40:
14958 case FLASH_5720VENDOR_A_ST_M25PE40:
14959 case FLASH_5720VENDOR_A_ST_M45PE40:
14960 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14961 break;
14962 case FLASH_5720VENDOR_M_ST_M25PE80:
14963 case FLASH_5720VENDOR_M_ST_M45PE80:
14964 case FLASH_5720VENDOR_A_ST_M25PE80:
14965 case FLASH_5720VENDOR_A_ST_M45PE80:
14966 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14967 break;
14968 default:
14969 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14970 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14971 break;
14972 }
14973 break;
14974 default:
14975 tg3_flag_set(tp, NO_NVRAM);
14976 return;
14977 }
14978
14979 tg3_nvram_get_pagesize(tp, nvcfg1);
14980 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14981 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14982
14983 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14984 u32 val;
14985
14986 if (tg3_nvram_read(tp, 0, &val))
14987 return;
14988
14989 if (val != TG3_EEPROM_MAGIC &&
14990 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14991 tg3_flag_set(tp, NO_NVRAM);
14992 }
14993 }
14994
14995 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
tg3_nvram_init(struct tg3 * tp)14996 static void tg3_nvram_init(struct tg3 *tp)
14997 {
14998 if (tg3_flag(tp, IS_SSB_CORE)) {
14999 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
15000 tg3_flag_clear(tp, NVRAM);
15001 tg3_flag_clear(tp, NVRAM_BUFFERED);
15002 tg3_flag_set(tp, NO_NVRAM);
15003 return;
15004 }
15005
15006 tw32_f(GRC_EEPROM_ADDR,
15007 (EEPROM_ADDR_FSM_RESET |
15008 (EEPROM_DEFAULT_CLOCK_PERIOD <<
15009 EEPROM_ADDR_CLKPERD_SHIFT)));
15010
15011 msleep(1);
15012
15013 /* Enable seeprom accesses. */
15014 tw32_f(GRC_LOCAL_CTRL,
15015 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
15016 udelay(100);
15017
15018 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15019 tg3_asic_rev(tp) != ASIC_REV_5701) {
15020 tg3_flag_set(tp, NVRAM);
15021
15022 if (tg3_nvram_lock(tp)) {
15023 netdev_warn(tp->dev,
15024 "Cannot get nvram lock, %s failed\n",
15025 __func__);
15026 return;
15027 }
15028 tg3_enable_nvram_access(tp);
15029
15030 tp->nvram_size = 0;
15031
15032 if (tg3_asic_rev(tp) == ASIC_REV_5752)
15033 tg3_get_5752_nvram_info(tp);
15034 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15035 tg3_get_5755_nvram_info(tp);
15036 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15037 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15038 tg3_asic_rev(tp) == ASIC_REV_5785)
15039 tg3_get_5787_nvram_info(tp);
15040 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15041 tg3_get_5761_nvram_info(tp);
15042 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15043 tg3_get_5906_nvram_info(tp);
15044 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15045 tg3_flag(tp, 57765_CLASS))
15046 tg3_get_57780_nvram_info(tp);
15047 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15048 tg3_asic_rev(tp) == ASIC_REV_5719)
15049 tg3_get_5717_nvram_info(tp);
15050 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15051 tg3_asic_rev(tp) == ASIC_REV_5762)
15052 tg3_get_5720_nvram_info(tp);
15053 else
15054 tg3_get_nvram_info(tp);
15055
15056 if (tp->nvram_size == 0)
15057 tg3_get_nvram_size(tp);
15058
15059 tg3_disable_nvram_access(tp);
15060 tg3_nvram_unlock(tp);
15061
15062 } else {
15063 tg3_flag_clear(tp, NVRAM);
15064 tg3_flag_clear(tp, NVRAM_BUFFERED);
15065
15066 tg3_get_eeprom_size(tp);
15067 }
15068 }
15069
15070 struct subsys_tbl_ent {
15071 u16 subsys_vendor, subsys_devid;
15072 u32 phy_id;
15073 };
15074
15075 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15076 /* Broadcom boards. */
15077 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15078 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15079 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15080 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15081 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15082 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15083 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15084 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15085 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15086 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15087 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15088 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15089 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15090 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15091 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15092 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15093 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15094 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15095 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15096 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15097 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15098 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15099
15100 /* 3com boards. */
15101 { TG3PCI_SUBVENDOR_ID_3COM,
15102 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15103 { TG3PCI_SUBVENDOR_ID_3COM,
15104 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15105 { TG3PCI_SUBVENDOR_ID_3COM,
15106 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15107 { TG3PCI_SUBVENDOR_ID_3COM,
15108 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15109 { TG3PCI_SUBVENDOR_ID_3COM,
15110 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15111
15112 /* DELL boards. */
15113 { TG3PCI_SUBVENDOR_ID_DELL,
15114 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15115 { TG3PCI_SUBVENDOR_ID_DELL,
15116 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15117 { TG3PCI_SUBVENDOR_ID_DELL,
15118 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15119 { TG3PCI_SUBVENDOR_ID_DELL,
15120 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15121
15122 /* Compaq boards. */
15123 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15124 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15125 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15126 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15127 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15128 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15129 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15130 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15131 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15132 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15133
15134 /* IBM boards. */
15135 { TG3PCI_SUBVENDOR_ID_IBM,
15136 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15137 };
15138
tg3_lookup_by_subsys(struct tg3 * tp)15139 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15140 {
15141 int i;
15142
15143 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15144 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15145 tp->pdev->subsystem_vendor) &&
15146 (subsys_id_to_phy_id[i].subsys_devid ==
15147 tp->pdev->subsystem_device))
15148 return &subsys_id_to_phy_id[i];
15149 }
15150 return NULL;
15151 }
15152
tg3_get_eeprom_hw_cfg(struct tg3 * tp)15153 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15154 {
15155 u32 val;
15156
15157 tp->phy_id = TG3_PHY_ID_INVALID;
15158 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15159
15160 /* Assume an onboard device and WOL capable by default. */
15161 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15162 tg3_flag_set(tp, WOL_CAP);
15163
15164 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15165 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15166 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15167 tg3_flag_set(tp, IS_NIC);
15168 }
15169 val = tr32(VCPU_CFGSHDW);
15170 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15171 tg3_flag_set(tp, ASPM_WORKAROUND);
15172 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15173 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15174 tg3_flag_set(tp, WOL_ENABLE);
15175 device_set_wakeup_enable(&tp->pdev->dev, true);
15176 }
15177 goto done;
15178 }
15179
15180 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15181 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15182 u32 nic_cfg, led_cfg;
15183 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15184 u32 nic_phy_id, ver, eeprom_phy_id;
15185 int eeprom_phy_serdes = 0;
15186
15187 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15188 tp->nic_sram_data_cfg = nic_cfg;
15189
15190 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15191 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15192 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15193 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15194 tg3_asic_rev(tp) != ASIC_REV_5703 &&
15195 (ver > 0) && (ver < 0x100))
15196 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15197
15198 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15199 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15200
15201 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15202 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15203 tg3_asic_rev(tp) == ASIC_REV_5720)
15204 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15205
15206 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15207 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15208 eeprom_phy_serdes = 1;
15209
15210 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15211 if (nic_phy_id != 0) {
15212 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15213 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15214
15215 eeprom_phy_id = (id1 >> 16) << 10;
15216 eeprom_phy_id |= (id2 & 0xfc00) << 16;
15217 eeprom_phy_id |= (id2 & 0x03ff) << 0;
15218 } else
15219 eeprom_phy_id = 0;
15220
15221 tp->phy_id = eeprom_phy_id;
15222 if (eeprom_phy_serdes) {
15223 if (!tg3_flag(tp, 5705_PLUS))
15224 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15225 else
15226 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15227 }
15228
15229 if (tg3_flag(tp, 5750_PLUS))
15230 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15231 SHASTA_EXT_LED_MODE_MASK);
15232 else
15233 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15234
15235 switch (led_cfg) {
15236 default:
15237 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15238 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15239 break;
15240
15241 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15242 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15243 break;
15244
15245 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15246 tp->led_ctrl = LED_CTRL_MODE_MAC;
15247
15248 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15249 * read on some older 5700/5701 bootcode.
15250 */
15251 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15252 tg3_asic_rev(tp) == ASIC_REV_5701)
15253 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15254
15255 break;
15256
15257 case SHASTA_EXT_LED_SHARED:
15258 tp->led_ctrl = LED_CTRL_MODE_SHARED;
15259 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15260 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15261 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15262 LED_CTRL_MODE_PHY_2);
15263
15264 if (tg3_flag(tp, 5717_PLUS) ||
15265 tg3_asic_rev(tp) == ASIC_REV_5762)
15266 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15267 LED_CTRL_BLINK_RATE_MASK;
15268
15269 break;
15270
15271 case SHASTA_EXT_LED_MAC:
15272 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15273 break;
15274
15275 case SHASTA_EXT_LED_COMBO:
15276 tp->led_ctrl = LED_CTRL_MODE_COMBO;
15277 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15278 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15279 LED_CTRL_MODE_PHY_2);
15280 break;
15281
15282 }
15283
15284 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15285 tg3_asic_rev(tp) == ASIC_REV_5701) &&
15286 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15287 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15288
15289 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15290 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15291
15292 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15293 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15294 if ((tp->pdev->subsystem_vendor ==
15295 PCI_VENDOR_ID_ARIMA) &&
15296 (tp->pdev->subsystem_device == 0x205a ||
15297 tp->pdev->subsystem_device == 0x2063))
15298 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15299 } else {
15300 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15301 tg3_flag_set(tp, IS_NIC);
15302 }
15303
15304 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15305 tg3_flag_set(tp, ENABLE_ASF);
15306 if (tg3_flag(tp, 5750_PLUS))
15307 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15308 }
15309
15310 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15311 tg3_flag(tp, 5750_PLUS))
15312 tg3_flag_set(tp, ENABLE_APE);
15313
15314 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15315 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15316 tg3_flag_clear(tp, WOL_CAP);
15317
15318 if (tg3_flag(tp, WOL_CAP) &&
15319 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15320 tg3_flag_set(tp, WOL_ENABLE);
15321 device_set_wakeup_enable(&tp->pdev->dev, true);
15322 }
15323
15324 if (cfg2 & (1 << 17))
15325 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15326
15327 /* serdes signal pre-emphasis in register 0x590 set by */
15328 /* bootcode if bit 18 is set */
15329 if (cfg2 & (1 << 18))
15330 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15331
15332 if ((tg3_flag(tp, 57765_PLUS) ||
15333 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15334 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15335 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15336 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15337
15338 if (tg3_flag(tp, PCI_EXPRESS)) {
15339 u32 cfg3;
15340
15341 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15342 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15343 !tg3_flag(tp, 57765_PLUS) &&
15344 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15345 tg3_flag_set(tp, ASPM_WORKAROUND);
15346 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15347 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15348 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15349 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15350 }
15351
15352 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15353 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15354 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15355 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15356 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15357 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15358
15359 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15360 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15361 }
15362 done:
15363 if (tg3_flag(tp, WOL_CAP))
15364 device_set_wakeup_enable(&tp->pdev->dev,
15365 tg3_flag(tp, WOL_ENABLE));
15366 else
15367 device_set_wakeup_capable(&tp->pdev->dev, false);
15368 }
15369
tg3_ape_otp_read(struct tg3 * tp,u32 offset,u32 * val)15370 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15371 {
15372 int i, err;
15373 u32 val2, off = offset * 8;
15374
15375 err = tg3_nvram_lock(tp);
15376 if (err)
15377 return err;
15378
15379 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15380 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15381 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15382 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15383 udelay(10);
15384
15385 for (i = 0; i < 100; i++) {
15386 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15387 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15388 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15389 break;
15390 }
15391 udelay(10);
15392 }
15393
15394 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15395
15396 tg3_nvram_unlock(tp);
15397 if (val2 & APE_OTP_STATUS_CMD_DONE)
15398 return 0;
15399
15400 return -EBUSY;
15401 }
15402
tg3_issue_otp_command(struct tg3 * tp,u32 cmd)15403 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15404 {
15405 int i;
15406 u32 val;
15407
15408 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15409 tw32(OTP_CTRL, cmd);
15410
15411 /* Wait for up to 1 ms for command to execute. */
15412 for (i = 0; i < 100; i++) {
15413 val = tr32(OTP_STATUS);
15414 if (val & OTP_STATUS_CMD_DONE)
15415 break;
15416 udelay(10);
15417 }
15418
15419 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15420 }
15421
15422 /* Read the gphy configuration from the OTP region of the chip. The gphy
15423 * configuration is a 32-bit value that straddles the alignment boundary.
15424 * We do two 32-bit reads and then shift and merge the results.
15425 */
tg3_read_otp_phycfg(struct tg3 * tp)15426 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15427 {
15428 u32 bhalf_otp, thalf_otp;
15429
15430 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15431
15432 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15433 return 0;
15434
15435 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15436
15437 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15438 return 0;
15439
15440 thalf_otp = tr32(OTP_READ_DATA);
15441
15442 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15443
15444 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15445 return 0;
15446
15447 bhalf_otp = tr32(OTP_READ_DATA);
15448
15449 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15450 }
15451
tg3_phy_init_link_config(struct tg3 * tp)15452 static void tg3_phy_init_link_config(struct tg3 *tp)
15453 {
15454 u32 adv = ADVERTISED_Autoneg;
15455
15456 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15457 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15458 adv |= ADVERTISED_1000baseT_Half;
15459 adv |= ADVERTISED_1000baseT_Full;
15460 }
15461
15462 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15463 adv |= ADVERTISED_100baseT_Half |
15464 ADVERTISED_100baseT_Full |
15465 ADVERTISED_10baseT_Half |
15466 ADVERTISED_10baseT_Full |
15467 ADVERTISED_TP;
15468 else
15469 adv |= ADVERTISED_FIBRE;
15470
15471 tp->link_config.advertising = adv;
15472 tp->link_config.speed = SPEED_UNKNOWN;
15473 tp->link_config.duplex = DUPLEX_UNKNOWN;
15474 tp->link_config.autoneg = AUTONEG_ENABLE;
15475 tp->link_config.active_speed = SPEED_UNKNOWN;
15476 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15477
15478 tp->old_link = -1;
15479 }
15480
tg3_phy_probe(struct tg3 * tp)15481 static int tg3_phy_probe(struct tg3 *tp)
15482 {
15483 u32 hw_phy_id_1, hw_phy_id_2;
15484 u32 hw_phy_id, hw_phy_id_masked;
15485 int err;
15486
15487 /* flow control autonegotiation is default behavior */
15488 tg3_flag_set(tp, PAUSE_AUTONEG);
15489 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15490
15491 if (tg3_flag(tp, ENABLE_APE)) {
15492 switch (tp->pci_fn) {
15493 case 0:
15494 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15495 break;
15496 case 1:
15497 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15498 break;
15499 case 2:
15500 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15501 break;
15502 case 3:
15503 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15504 break;
15505 }
15506 }
15507
15508 if (!tg3_flag(tp, ENABLE_ASF) &&
15509 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15510 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15511 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15512 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15513
15514 if (tg3_flag(tp, USE_PHYLIB))
15515 return tg3_phy_init(tp);
15516
15517 /* Reading the PHY ID register can conflict with ASF
15518 * firmware access to the PHY hardware.
15519 */
15520 err = 0;
15521 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15522 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15523 } else {
15524 /* Now read the physical PHY_ID from the chip and verify
15525 * that it is sane. If it doesn't look good, we fall back
15526 * to either the hard-coded table based PHY_ID and failing
15527 * that the value found in the eeprom area.
15528 */
15529 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15530 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15531
15532 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15533 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15534 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15535
15536 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15537 }
15538
15539 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15540 tp->phy_id = hw_phy_id;
15541 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15542 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15543 else
15544 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15545 } else {
15546 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15547 /* Do nothing, phy ID already set up in
15548 * tg3_get_eeprom_hw_cfg().
15549 */
15550 } else {
15551 struct subsys_tbl_ent *p;
15552
15553 /* No eeprom signature? Try the hardcoded
15554 * subsys device table.
15555 */
15556 p = tg3_lookup_by_subsys(tp);
15557 if (p) {
15558 tp->phy_id = p->phy_id;
15559 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15560 /* For now we saw the IDs 0xbc050cd0,
15561 * 0xbc050f80 and 0xbc050c30 on devices
15562 * connected to an BCM4785 and there are
15563 * probably more. Just assume that the phy is
15564 * supported when it is connected to a SSB core
15565 * for now.
15566 */
15567 return -ENODEV;
15568 }
15569
15570 if (!tp->phy_id ||
15571 tp->phy_id == TG3_PHY_ID_BCM8002)
15572 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15573 }
15574 }
15575
15576 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15577 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15578 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15579 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15580 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15581 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15582 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15583 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15584 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15585 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15586
15587 tp->eee.supported = SUPPORTED_100baseT_Full |
15588 SUPPORTED_1000baseT_Full;
15589 tp->eee.advertised = ADVERTISED_100baseT_Full |
15590 ADVERTISED_1000baseT_Full;
15591 tp->eee.eee_enabled = 1;
15592 tp->eee.tx_lpi_enabled = 1;
15593 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15594 }
15595
15596 tg3_phy_init_link_config(tp);
15597
15598 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15599 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15600 !tg3_flag(tp, ENABLE_APE) &&
15601 !tg3_flag(tp, ENABLE_ASF)) {
15602 u32 bmsr, dummy;
15603
15604 tg3_readphy(tp, MII_BMSR, &bmsr);
15605 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15606 (bmsr & BMSR_LSTATUS))
15607 goto skip_phy_reset;
15608
15609 err = tg3_phy_reset(tp);
15610 if (err)
15611 return err;
15612
15613 tg3_phy_set_wirespeed(tp);
15614
15615 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15616 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15617 tp->link_config.flowctrl);
15618
15619 tg3_writephy(tp, MII_BMCR,
15620 BMCR_ANENABLE | BMCR_ANRESTART);
15621 }
15622 }
15623
15624 skip_phy_reset:
15625 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15626 err = tg3_init_5401phy_dsp(tp);
15627 if (err)
15628 return err;
15629
15630 err = tg3_init_5401phy_dsp(tp);
15631 }
15632
15633 return err;
15634 }
15635
tg3_read_vpd(struct tg3 * tp)15636 static void tg3_read_vpd(struct tg3 *tp)
15637 {
15638 u8 *vpd_data;
15639 unsigned int block_end, rosize, len;
15640 u32 vpdlen;
15641 int j, i = 0;
15642
15643 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15644 if (!vpd_data)
15645 goto out_no_vpd;
15646
15647 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15648 if (i < 0)
15649 goto out_not_found;
15650
15651 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15652 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15653 i += PCI_VPD_LRDT_TAG_SIZE;
15654
15655 if (block_end > vpdlen)
15656 goto out_not_found;
15657
15658 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15659 PCI_VPD_RO_KEYWORD_MFR_ID);
15660 if (j > 0) {
15661 len = pci_vpd_info_field_size(&vpd_data[j]);
15662
15663 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15664 if (j + len > block_end || len != 4 ||
15665 memcmp(&vpd_data[j], "1028", 4))
15666 goto partno;
15667
15668 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15669 PCI_VPD_RO_KEYWORD_VENDOR0);
15670 if (j < 0)
15671 goto partno;
15672
15673 len = pci_vpd_info_field_size(&vpd_data[j]);
15674
15675 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15676 if (j + len > block_end)
15677 goto partno;
15678
15679 if (len >= sizeof(tp->fw_ver))
15680 len = sizeof(tp->fw_ver) - 1;
15681 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15682 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15683 &vpd_data[j]);
15684 }
15685
15686 partno:
15687 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15688 PCI_VPD_RO_KEYWORD_PARTNO);
15689 if (i < 0)
15690 goto out_not_found;
15691
15692 len = pci_vpd_info_field_size(&vpd_data[i]);
15693
15694 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15695 if (len > TG3_BPN_SIZE ||
15696 (len + i) > vpdlen)
15697 goto out_not_found;
15698
15699 memcpy(tp->board_part_number, &vpd_data[i], len);
15700
15701 out_not_found:
15702 kfree(vpd_data);
15703 if (tp->board_part_number[0])
15704 return;
15705
15706 out_no_vpd:
15707 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15708 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15709 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15710 strcpy(tp->board_part_number, "BCM5717");
15711 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15712 strcpy(tp->board_part_number, "BCM5718");
15713 else
15714 goto nomatch;
15715 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15716 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15717 strcpy(tp->board_part_number, "BCM57780");
15718 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15719 strcpy(tp->board_part_number, "BCM57760");
15720 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15721 strcpy(tp->board_part_number, "BCM57790");
15722 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15723 strcpy(tp->board_part_number, "BCM57788");
15724 else
15725 goto nomatch;
15726 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15727 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15728 strcpy(tp->board_part_number, "BCM57761");
15729 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15730 strcpy(tp->board_part_number, "BCM57765");
15731 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15732 strcpy(tp->board_part_number, "BCM57781");
15733 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15734 strcpy(tp->board_part_number, "BCM57785");
15735 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15736 strcpy(tp->board_part_number, "BCM57791");
15737 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15738 strcpy(tp->board_part_number, "BCM57795");
15739 else
15740 goto nomatch;
15741 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15742 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15743 strcpy(tp->board_part_number, "BCM57762");
15744 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15745 strcpy(tp->board_part_number, "BCM57766");
15746 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15747 strcpy(tp->board_part_number, "BCM57782");
15748 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15749 strcpy(tp->board_part_number, "BCM57786");
15750 else
15751 goto nomatch;
15752 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15753 strcpy(tp->board_part_number, "BCM95906");
15754 } else {
15755 nomatch:
15756 strcpy(tp->board_part_number, "none");
15757 }
15758 }
15759
tg3_fw_img_is_valid(struct tg3 * tp,u32 offset)15760 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15761 {
15762 u32 val;
15763
15764 if (tg3_nvram_read(tp, offset, &val) ||
15765 (val & 0xfc000000) != 0x0c000000 ||
15766 tg3_nvram_read(tp, offset + 4, &val) ||
15767 val != 0)
15768 return 0;
15769
15770 return 1;
15771 }
15772
tg3_read_bc_ver(struct tg3 * tp)15773 static void tg3_read_bc_ver(struct tg3 *tp)
15774 {
15775 u32 val, offset, start, ver_offset;
15776 int i, dst_off;
15777 bool newver = false;
15778
15779 if (tg3_nvram_read(tp, 0xc, &offset) ||
15780 tg3_nvram_read(tp, 0x4, &start))
15781 return;
15782
15783 offset = tg3_nvram_logical_addr(tp, offset);
15784
15785 if (tg3_nvram_read(tp, offset, &val))
15786 return;
15787
15788 if ((val & 0xfc000000) == 0x0c000000) {
15789 if (tg3_nvram_read(tp, offset + 4, &val))
15790 return;
15791
15792 if (val == 0)
15793 newver = true;
15794 }
15795
15796 dst_off = strlen(tp->fw_ver);
15797
15798 if (newver) {
15799 if (TG3_VER_SIZE - dst_off < 16 ||
15800 tg3_nvram_read(tp, offset + 8, &ver_offset))
15801 return;
15802
15803 offset = offset + ver_offset - start;
15804 for (i = 0; i < 16; i += 4) {
15805 __be32 v;
15806 if (tg3_nvram_read_be32(tp, offset + i, &v))
15807 return;
15808
15809 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15810 }
15811 } else {
15812 u32 major, minor;
15813
15814 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15815 return;
15816
15817 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15818 TG3_NVM_BCVER_MAJSFT;
15819 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15820 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15821 "v%d.%02d", major, minor);
15822 }
15823 }
15824
tg3_read_hwsb_ver(struct tg3 * tp)15825 static void tg3_read_hwsb_ver(struct tg3 *tp)
15826 {
15827 u32 val, major, minor;
15828
15829 /* Use native endian representation */
15830 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15831 return;
15832
15833 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15834 TG3_NVM_HWSB_CFG1_MAJSFT;
15835 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15836 TG3_NVM_HWSB_CFG1_MINSFT;
15837
15838 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15839 }
15840
tg3_read_sb_ver(struct tg3 * tp,u32 val)15841 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15842 {
15843 u32 offset, major, minor, build;
15844
15845 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15846
15847 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15848 return;
15849
15850 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15851 case TG3_EEPROM_SB_REVISION_0:
15852 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15853 break;
15854 case TG3_EEPROM_SB_REVISION_2:
15855 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15856 break;
15857 case TG3_EEPROM_SB_REVISION_3:
15858 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15859 break;
15860 case TG3_EEPROM_SB_REVISION_4:
15861 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15862 break;
15863 case TG3_EEPROM_SB_REVISION_5:
15864 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15865 break;
15866 case TG3_EEPROM_SB_REVISION_6:
15867 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15868 break;
15869 default:
15870 return;
15871 }
15872
15873 if (tg3_nvram_read(tp, offset, &val))
15874 return;
15875
15876 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15877 TG3_EEPROM_SB_EDH_BLD_SHFT;
15878 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15879 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15880 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15881
15882 if (minor > 99 || build > 26)
15883 return;
15884
15885 offset = strlen(tp->fw_ver);
15886 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15887 " v%d.%02d", major, minor);
15888
15889 if (build > 0) {
15890 offset = strlen(tp->fw_ver);
15891 if (offset < TG3_VER_SIZE - 1)
15892 tp->fw_ver[offset] = 'a' + build - 1;
15893 }
15894 }
15895
tg3_read_mgmtfw_ver(struct tg3 * tp)15896 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15897 {
15898 u32 val, offset, start;
15899 int i, vlen;
15900
15901 for (offset = TG3_NVM_DIR_START;
15902 offset < TG3_NVM_DIR_END;
15903 offset += TG3_NVM_DIRENT_SIZE) {
15904 if (tg3_nvram_read(tp, offset, &val))
15905 return;
15906
15907 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15908 break;
15909 }
15910
15911 if (offset == TG3_NVM_DIR_END)
15912 return;
15913
15914 if (!tg3_flag(tp, 5705_PLUS))
15915 start = 0x08000000;
15916 else if (tg3_nvram_read(tp, offset - 4, &start))
15917 return;
15918
15919 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15920 !tg3_fw_img_is_valid(tp, offset) ||
15921 tg3_nvram_read(tp, offset + 8, &val))
15922 return;
15923
15924 offset += val - start;
15925
15926 vlen = strlen(tp->fw_ver);
15927
15928 tp->fw_ver[vlen++] = ',';
15929 tp->fw_ver[vlen++] = ' ';
15930
15931 for (i = 0; i < 4; i++) {
15932 __be32 v;
15933 if (tg3_nvram_read_be32(tp, offset, &v))
15934 return;
15935
15936 offset += sizeof(v);
15937
15938 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15939 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15940 break;
15941 }
15942
15943 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15944 vlen += sizeof(v);
15945 }
15946 }
15947
tg3_probe_ncsi(struct tg3 * tp)15948 static void tg3_probe_ncsi(struct tg3 *tp)
15949 {
15950 u32 apedata;
15951
15952 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15953 if (apedata != APE_SEG_SIG_MAGIC)
15954 return;
15955
15956 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15957 if (!(apedata & APE_FW_STATUS_READY))
15958 return;
15959
15960 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15961 tg3_flag_set(tp, APE_HAS_NCSI);
15962 }
15963
tg3_read_dash_ver(struct tg3 * tp)15964 static void tg3_read_dash_ver(struct tg3 *tp)
15965 {
15966 int vlen;
15967 u32 apedata;
15968 char *fwtype;
15969
15970 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15971
15972 if (tg3_flag(tp, APE_HAS_NCSI))
15973 fwtype = "NCSI";
15974 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15975 fwtype = "SMASH";
15976 else
15977 fwtype = "DASH";
15978
15979 vlen = strlen(tp->fw_ver);
15980
15981 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15982 fwtype,
15983 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15984 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15985 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15986 (apedata & APE_FW_VERSION_BLDMSK));
15987 }
15988
tg3_read_otp_ver(struct tg3 * tp)15989 static void tg3_read_otp_ver(struct tg3 *tp)
15990 {
15991 u32 val, val2;
15992
15993 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15994 return;
15995
15996 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15997 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15998 TG3_OTP_MAGIC0_VALID(val)) {
15999 u64 val64 = (u64) val << 32 | val2;
16000 u32 ver = 0;
16001 int i, vlen;
16002
16003 for (i = 0; i < 7; i++) {
16004 if ((val64 & 0xff) == 0)
16005 break;
16006 ver = val64 & 0xff;
16007 val64 >>= 8;
16008 }
16009 vlen = strlen(tp->fw_ver);
16010 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
16011 }
16012 }
16013
tg3_read_fw_ver(struct tg3 * tp)16014 static void tg3_read_fw_ver(struct tg3 *tp)
16015 {
16016 u32 val;
16017 bool vpd_vers = false;
16018
16019 if (tp->fw_ver[0] != 0)
16020 vpd_vers = true;
16021
16022 if (tg3_flag(tp, NO_NVRAM)) {
16023 strcat(tp->fw_ver, "sb");
16024 tg3_read_otp_ver(tp);
16025 return;
16026 }
16027
16028 if (tg3_nvram_read(tp, 0, &val))
16029 return;
16030
16031 if (val == TG3_EEPROM_MAGIC)
16032 tg3_read_bc_ver(tp);
16033 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16034 tg3_read_sb_ver(tp, val);
16035 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16036 tg3_read_hwsb_ver(tp);
16037
16038 if (tg3_flag(tp, ENABLE_ASF)) {
16039 if (tg3_flag(tp, ENABLE_APE)) {
16040 tg3_probe_ncsi(tp);
16041 if (!vpd_vers)
16042 tg3_read_dash_ver(tp);
16043 } else if (!vpd_vers) {
16044 tg3_read_mgmtfw_ver(tp);
16045 }
16046 }
16047
16048 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16049 }
16050
tg3_rx_ret_ring_size(struct tg3 * tp)16051 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16052 {
16053 if (tg3_flag(tp, LRG_PROD_RING_CAP))
16054 return TG3_RX_RET_MAX_SIZE_5717;
16055 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16056 return TG3_RX_RET_MAX_SIZE_5700;
16057 else
16058 return TG3_RX_RET_MAX_SIZE_5705;
16059 }
16060
16061 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16062 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16063 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16064 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16065 { },
16066 };
16067
tg3_find_peer(struct tg3 * tp)16068 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16069 {
16070 struct pci_dev *peer;
16071 unsigned int func, devnr = tp->pdev->devfn & ~7;
16072
16073 for (func = 0; func < 8; func++) {
16074 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16075 if (peer && peer != tp->pdev)
16076 break;
16077 pci_dev_put(peer);
16078 }
16079 /* 5704 can be configured in single-port mode, set peer to
16080 * tp->pdev in that case.
16081 */
16082 if (!peer) {
16083 peer = tp->pdev;
16084 return peer;
16085 }
16086
16087 /*
16088 * We don't need to keep the refcount elevated; there's no way
16089 * to remove one half of this device without removing the other
16090 */
16091 pci_dev_put(peer);
16092
16093 return peer;
16094 }
16095
tg3_detect_asic_rev(struct tg3 * tp,u32 misc_ctrl_reg)16096 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16097 {
16098 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16099 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16100 u32 reg;
16101
16102 /* All devices that use the alternate
16103 * ASIC REV location have a CPMU.
16104 */
16105 tg3_flag_set(tp, CPMU_PRESENT);
16106
16107 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16108 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16109 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16110 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16111 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16112 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16113 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16114 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16115 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16116 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16117 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16118 reg = TG3PCI_GEN2_PRODID_ASICREV;
16119 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16120 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16121 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16122 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16123 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16124 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16125 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16126 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16127 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16128 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16129 reg = TG3PCI_GEN15_PRODID_ASICREV;
16130 else
16131 reg = TG3PCI_PRODID_ASICREV;
16132
16133 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16134 }
16135
16136 /* Wrong chip ID in 5752 A0. This code can be removed later
16137 * as A0 is not in production.
16138 */
16139 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16140 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16141
16142 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16143 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16144
16145 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16146 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16147 tg3_asic_rev(tp) == ASIC_REV_5720)
16148 tg3_flag_set(tp, 5717_PLUS);
16149
16150 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16151 tg3_asic_rev(tp) == ASIC_REV_57766)
16152 tg3_flag_set(tp, 57765_CLASS);
16153
16154 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16155 tg3_asic_rev(tp) == ASIC_REV_5762)
16156 tg3_flag_set(tp, 57765_PLUS);
16157
16158 /* Intentionally exclude ASIC_REV_5906 */
16159 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16160 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16161 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16162 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16163 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16164 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16165 tg3_flag(tp, 57765_PLUS))
16166 tg3_flag_set(tp, 5755_PLUS);
16167
16168 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16169 tg3_asic_rev(tp) == ASIC_REV_5714)
16170 tg3_flag_set(tp, 5780_CLASS);
16171
16172 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16173 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16174 tg3_asic_rev(tp) == ASIC_REV_5906 ||
16175 tg3_flag(tp, 5755_PLUS) ||
16176 tg3_flag(tp, 5780_CLASS))
16177 tg3_flag_set(tp, 5750_PLUS);
16178
16179 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16180 tg3_flag(tp, 5750_PLUS))
16181 tg3_flag_set(tp, 5705_PLUS);
16182 }
16183
tg3_10_100_only_device(struct tg3 * tp,const struct pci_device_id * ent)16184 static bool tg3_10_100_only_device(struct tg3 *tp,
16185 const struct pci_device_id *ent)
16186 {
16187 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16188
16189 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16190 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16191 (tp->phy_flags & TG3_PHYFLG_IS_FET))
16192 return true;
16193
16194 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16195 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16196 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16197 return true;
16198 } else {
16199 return true;
16200 }
16201 }
16202
16203 return false;
16204 }
16205
tg3_get_invariants(struct tg3 * tp,const struct pci_device_id * ent)16206 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16207 {
16208 u32 misc_ctrl_reg;
16209 u32 pci_state_reg, grc_misc_cfg;
16210 u32 val;
16211 u16 pci_cmd;
16212 int err;
16213
16214 /* Force memory write invalidate off. If we leave it on,
16215 * then on 5700_BX chips we have to enable a workaround.
16216 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16217 * to match the cacheline size. The Broadcom driver have this
16218 * workaround but turns MWI off all the times so never uses
16219 * it. This seems to suggest that the workaround is insufficient.
16220 */
16221 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16222 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16223 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16224
16225 /* Important! -- Make sure register accesses are byteswapped
16226 * correctly. Also, for those chips that require it, make
16227 * sure that indirect register accesses are enabled before
16228 * the first operation.
16229 */
16230 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16231 &misc_ctrl_reg);
16232 tp->misc_host_ctrl |= (misc_ctrl_reg &
16233 MISC_HOST_CTRL_CHIPREV);
16234 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16235 tp->misc_host_ctrl);
16236
16237 tg3_detect_asic_rev(tp, misc_ctrl_reg);
16238
16239 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16240 * we need to disable memory and use config. cycles
16241 * only to access all registers. The 5702/03 chips
16242 * can mistakenly decode the special cycles from the
16243 * ICH chipsets as memory write cycles, causing corruption
16244 * of register and memory space. Only certain ICH bridges
16245 * will drive special cycles with non-zero data during the
16246 * address phase which can fall within the 5703's address
16247 * range. This is not an ICH bug as the PCI spec allows
16248 * non-zero address during special cycles. However, only
16249 * these ICH bridges are known to drive non-zero addresses
16250 * during special cycles.
16251 *
16252 * Since special cycles do not cross PCI bridges, we only
16253 * enable this workaround if the 5703 is on the secondary
16254 * bus of these ICH bridges.
16255 */
16256 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16257 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16258 static struct tg3_dev_id {
16259 u32 vendor;
16260 u32 device;
16261 u32 rev;
16262 } ich_chipsets[] = {
16263 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16264 PCI_ANY_ID },
16265 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16266 PCI_ANY_ID },
16267 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16268 0xa },
16269 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16270 PCI_ANY_ID },
16271 { },
16272 };
16273 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16274 struct pci_dev *bridge = NULL;
16275
16276 while (pci_id->vendor != 0) {
16277 bridge = pci_get_device(pci_id->vendor, pci_id->device,
16278 bridge);
16279 if (!bridge) {
16280 pci_id++;
16281 continue;
16282 }
16283 if (pci_id->rev != PCI_ANY_ID) {
16284 if (bridge->revision > pci_id->rev)
16285 continue;
16286 }
16287 if (bridge->subordinate &&
16288 (bridge->subordinate->number ==
16289 tp->pdev->bus->number)) {
16290 tg3_flag_set(tp, ICH_WORKAROUND);
16291 pci_dev_put(bridge);
16292 break;
16293 }
16294 }
16295 }
16296
16297 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16298 static struct tg3_dev_id {
16299 u32 vendor;
16300 u32 device;
16301 } bridge_chipsets[] = {
16302 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16303 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16304 { },
16305 };
16306 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16307 struct pci_dev *bridge = NULL;
16308
16309 while (pci_id->vendor != 0) {
16310 bridge = pci_get_device(pci_id->vendor,
16311 pci_id->device,
16312 bridge);
16313 if (!bridge) {
16314 pci_id++;
16315 continue;
16316 }
16317 if (bridge->subordinate &&
16318 (bridge->subordinate->number <=
16319 tp->pdev->bus->number) &&
16320 (bridge->subordinate->busn_res.end >=
16321 tp->pdev->bus->number)) {
16322 tg3_flag_set(tp, 5701_DMA_BUG);
16323 pci_dev_put(bridge);
16324 break;
16325 }
16326 }
16327 }
16328
16329 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16330 * DMA addresses > 40-bit. This bridge may have other additional
16331 * 57xx devices behind it in some 4-port NIC designs for example.
16332 * Any tg3 device found behind the bridge will also need the 40-bit
16333 * DMA workaround.
16334 */
16335 if (tg3_flag(tp, 5780_CLASS)) {
16336 tg3_flag_set(tp, 40BIT_DMA_BUG);
16337 tp->msi_cap = tp->pdev->msi_cap;
16338 } else {
16339 struct pci_dev *bridge = NULL;
16340
16341 do {
16342 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16343 PCI_DEVICE_ID_SERVERWORKS_EPB,
16344 bridge);
16345 if (bridge && bridge->subordinate &&
16346 (bridge->subordinate->number <=
16347 tp->pdev->bus->number) &&
16348 (bridge->subordinate->busn_res.end >=
16349 tp->pdev->bus->number)) {
16350 tg3_flag_set(tp, 40BIT_DMA_BUG);
16351 pci_dev_put(bridge);
16352 break;
16353 }
16354 } while (bridge);
16355 }
16356
16357 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16358 tg3_asic_rev(tp) == ASIC_REV_5714)
16359 tp->pdev_peer = tg3_find_peer(tp);
16360
16361 /* Determine TSO capabilities */
16362 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16363 ; /* Do nothing. HW bug. */
16364 else if (tg3_flag(tp, 57765_PLUS))
16365 tg3_flag_set(tp, HW_TSO_3);
16366 else if (tg3_flag(tp, 5755_PLUS) ||
16367 tg3_asic_rev(tp) == ASIC_REV_5906)
16368 tg3_flag_set(tp, HW_TSO_2);
16369 else if (tg3_flag(tp, 5750_PLUS)) {
16370 tg3_flag_set(tp, HW_TSO_1);
16371 tg3_flag_set(tp, TSO_BUG);
16372 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16373 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16374 tg3_flag_clear(tp, TSO_BUG);
16375 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16376 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16377 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16378 tg3_flag_set(tp, FW_TSO);
16379 tg3_flag_set(tp, TSO_BUG);
16380 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16381 tp->fw_needed = FIRMWARE_TG3TSO5;
16382 else
16383 tp->fw_needed = FIRMWARE_TG3TSO;
16384 }
16385
16386 /* Selectively allow TSO based on operating conditions */
16387 if (tg3_flag(tp, HW_TSO_1) ||
16388 tg3_flag(tp, HW_TSO_2) ||
16389 tg3_flag(tp, HW_TSO_3) ||
16390 tg3_flag(tp, FW_TSO)) {
16391 /* For firmware TSO, assume ASF is disabled.
16392 * We'll disable TSO later if we discover ASF
16393 * is enabled in tg3_get_eeprom_hw_cfg().
16394 */
16395 tg3_flag_set(tp, TSO_CAPABLE);
16396 } else {
16397 tg3_flag_clear(tp, TSO_CAPABLE);
16398 tg3_flag_clear(tp, TSO_BUG);
16399 tp->fw_needed = NULL;
16400 }
16401
16402 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16403 tp->fw_needed = FIRMWARE_TG3;
16404
16405 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16406 tp->fw_needed = FIRMWARE_TG357766;
16407
16408 tp->irq_max = 1;
16409
16410 if (tg3_flag(tp, 5750_PLUS)) {
16411 tg3_flag_set(tp, SUPPORT_MSI);
16412 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16413 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16414 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16415 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16416 tp->pdev_peer == tp->pdev))
16417 tg3_flag_clear(tp, SUPPORT_MSI);
16418
16419 if (tg3_flag(tp, 5755_PLUS) ||
16420 tg3_asic_rev(tp) == ASIC_REV_5906) {
16421 tg3_flag_set(tp, 1SHOT_MSI);
16422 }
16423
16424 if (tg3_flag(tp, 57765_PLUS)) {
16425 tg3_flag_set(tp, SUPPORT_MSIX);
16426 tp->irq_max = TG3_IRQ_MAX_VECS;
16427 }
16428 }
16429
16430 tp->txq_max = 1;
16431 tp->rxq_max = 1;
16432 if (tp->irq_max > 1) {
16433 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16434 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16435
16436 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16437 tg3_asic_rev(tp) == ASIC_REV_5720)
16438 tp->txq_max = tp->irq_max - 1;
16439 }
16440
16441 if (tg3_flag(tp, 5755_PLUS) ||
16442 tg3_asic_rev(tp) == ASIC_REV_5906)
16443 tg3_flag_set(tp, SHORT_DMA_BUG);
16444
16445 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16446 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16447
16448 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16449 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16450 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16451 tg3_asic_rev(tp) == ASIC_REV_5762)
16452 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16453
16454 if (tg3_flag(tp, 57765_PLUS) &&
16455 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16456 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16457
16458 if (!tg3_flag(tp, 5705_PLUS) ||
16459 tg3_flag(tp, 5780_CLASS) ||
16460 tg3_flag(tp, USE_JUMBO_BDFLAG))
16461 tg3_flag_set(tp, JUMBO_CAPABLE);
16462
16463 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16464 &pci_state_reg);
16465
16466 if (pci_is_pcie(tp->pdev)) {
16467 u16 lnkctl;
16468
16469 tg3_flag_set(tp, PCI_EXPRESS);
16470
16471 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16472 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16473 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16474 tg3_flag_clear(tp, HW_TSO_2);
16475 tg3_flag_clear(tp, TSO_CAPABLE);
16476 }
16477 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16478 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16479 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16480 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16481 tg3_flag_set(tp, CLKREQ_BUG);
16482 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16483 tg3_flag_set(tp, L1PLLPD_EN);
16484 }
16485 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16486 /* BCM5785 devices are effectively PCIe devices, and should
16487 * follow PCIe codepaths, but do not have a PCIe capabilities
16488 * section.
16489 */
16490 tg3_flag_set(tp, PCI_EXPRESS);
16491 } else if (!tg3_flag(tp, 5705_PLUS) ||
16492 tg3_flag(tp, 5780_CLASS)) {
16493 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16494 if (!tp->pcix_cap) {
16495 dev_err(&tp->pdev->dev,
16496 "Cannot find PCI-X capability, aborting\n");
16497 return -EIO;
16498 }
16499
16500 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16501 tg3_flag_set(tp, PCIX_MODE);
16502 }
16503
16504 /* If we have an AMD 762 or VIA K8T800 chipset, write
16505 * reordering to the mailbox registers done by the host
16506 * controller can cause major troubles. We read back from
16507 * every mailbox register write to force the writes to be
16508 * posted to the chip in order.
16509 */
16510 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16511 !tg3_flag(tp, PCI_EXPRESS))
16512 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16513
16514 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16515 &tp->pci_cacheline_sz);
16516 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16517 &tp->pci_lat_timer);
16518 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16519 tp->pci_lat_timer < 64) {
16520 tp->pci_lat_timer = 64;
16521 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16522 tp->pci_lat_timer);
16523 }
16524
16525 /* Important! -- It is critical that the PCI-X hw workaround
16526 * situation is decided before the first MMIO register access.
16527 */
16528 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16529 /* 5700 BX chips need to have their TX producer index
16530 * mailboxes written twice to workaround a bug.
16531 */
16532 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16533
16534 /* If we are in PCI-X mode, enable register write workaround.
16535 *
16536 * The workaround is to use indirect register accesses
16537 * for all chip writes not to mailbox registers.
16538 */
16539 if (tg3_flag(tp, PCIX_MODE)) {
16540 u32 pm_reg;
16541
16542 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16543
16544 /* The chip can have it's power management PCI config
16545 * space registers clobbered due to this bug.
16546 * So explicitly force the chip into D0 here.
16547 */
16548 pci_read_config_dword(tp->pdev,
16549 tp->pdev->pm_cap + PCI_PM_CTRL,
16550 &pm_reg);
16551 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16552 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16553 pci_write_config_dword(tp->pdev,
16554 tp->pdev->pm_cap + PCI_PM_CTRL,
16555 pm_reg);
16556
16557 /* Also, force SERR#/PERR# in PCI command. */
16558 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16559 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16560 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16561 }
16562 }
16563
16564 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16565 tg3_flag_set(tp, PCI_HIGH_SPEED);
16566 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16567 tg3_flag_set(tp, PCI_32BIT);
16568
16569 /* Chip-specific fixup from Broadcom driver */
16570 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16571 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16572 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16573 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16574 }
16575
16576 /* Default fast path register access methods */
16577 tp->read32 = tg3_read32;
16578 tp->write32 = tg3_write32;
16579 tp->read32_mbox = tg3_read32;
16580 tp->write32_mbox = tg3_write32;
16581 tp->write32_tx_mbox = tg3_write32;
16582 tp->write32_rx_mbox = tg3_write32;
16583
16584 /* Various workaround register access methods */
16585 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16586 tp->write32 = tg3_write_indirect_reg32;
16587 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16588 (tg3_flag(tp, PCI_EXPRESS) &&
16589 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16590 /*
16591 * Back to back register writes can cause problems on these
16592 * chips, the workaround is to read back all reg writes
16593 * except those to mailbox regs.
16594 *
16595 * See tg3_write_indirect_reg32().
16596 */
16597 tp->write32 = tg3_write_flush_reg32;
16598 }
16599
16600 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16601 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16602 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16603 tp->write32_rx_mbox = tg3_write_flush_reg32;
16604 }
16605
16606 if (tg3_flag(tp, ICH_WORKAROUND)) {
16607 tp->read32 = tg3_read_indirect_reg32;
16608 tp->write32 = tg3_write_indirect_reg32;
16609 tp->read32_mbox = tg3_read_indirect_mbox;
16610 tp->write32_mbox = tg3_write_indirect_mbox;
16611 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16612 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16613
16614 iounmap(tp->regs);
16615 tp->regs = NULL;
16616
16617 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16618 pci_cmd &= ~PCI_COMMAND_MEMORY;
16619 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16620 }
16621 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16622 tp->read32_mbox = tg3_read32_mbox_5906;
16623 tp->write32_mbox = tg3_write32_mbox_5906;
16624 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16625 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16626 }
16627
16628 if (tp->write32 == tg3_write_indirect_reg32 ||
16629 (tg3_flag(tp, PCIX_MODE) &&
16630 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16631 tg3_asic_rev(tp) == ASIC_REV_5701)))
16632 tg3_flag_set(tp, SRAM_USE_CONFIG);
16633
16634 /* The memory arbiter has to be enabled in order for SRAM accesses
16635 * to succeed. Normally on powerup the tg3 chip firmware will make
16636 * sure it is enabled, but other entities such as system netboot
16637 * code might disable it.
16638 */
16639 val = tr32(MEMARB_MODE);
16640 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16641
16642 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16643 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16644 tg3_flag(tp, 5780_CLASS)) {
16645 if (tg3_flag(tp, PCIX_MODE)) {
16646 pci_read_config_dword(tp->pdev,
16647 tp->pcix_cap + PCI_X_STATUS,
16648 &val);
16649 tp->pci_fn = val & 0x7;
16650 }
16651 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16652 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16653 tg3_asic_rev(tp) == ASIC_REV_5720) {
16654 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16655 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16656 val = tr32(TG3_CPMU_STATUS);
16657
16658 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16659 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16660 else
16661 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16662 TG3_CPMU_STATUS_FSHFT_5719;
16663 }
16664
16665 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16666 tp->write32_tx_mbox = tg3_write_flush_reg32;
16667 tp->write32_rx_mbox = tg3_write_flush_reg32;
16668 }
16669
16670 /* Get eeprom hw config before calling tg3_set_power_state().
16671 * In particular, the TG3_FLAG_IS_NIC flag must be
16672 * determined before calling tg3_set_power_state() so that
16673 * we know whether or not to switch out of Vaux power.
16674 * When the flag is set, it means that GPIO1 is used for eeprom
16675 * write protect and also implies that it is a LOM where GPIOs
16676 * are not used to switch power.
16677 */
16678 tg3_get_eeprom_hw_cfg(tp);
16679
16680 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16681 tg3_flag_clear(tp, TSO_CAPABLE);
16682 tg3_flag_clear(tp, TSO_BUG);
16683 tp->fw_needed = NULL;
16684 }
16685
16686 if (tg3_flag(tp, ENABLE_APE)) {
16687 /* Allow reads and writes to the
16688 * APE register and memory space.
16689 */
16690 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16691 PCISTATE_ALLOW_APE_SHMEM_WR |
16692 PCISTATE_ALLOW_APE_PSPACE_WR;
16693 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16694 pci_state_reg);
16695
16696 tg3_ape_lock_init(tp);
16697 tp->ape_hb_interval =
16698 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16699 }
16700
16701 /* Set up tp->grc_local_ctrl before calling
16702 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16703 * will bring 5700's external PHY out of reset.
16704 * It is also used as eeprom write protect on LOMs.
16705 */
16706 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16707 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16708 tg3_flag(tp, EEPROM_WRITE_PROT))
16709 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16710 GRC_LCLCTRL_GPIO_OUTPUT1);
16711 /* Unused GPIO3 must be driven as output on 5752 because there
16712 * are no pull-up resistors on unused GPIO pins.
16713 */
16714 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16715 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16716
16717 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16718 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16719 tg3_flag(tp, 57765_CLASS))
16720 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16721
16722 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16723 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16724 /* Turn off the debug UART. */
16725 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16726 if (tg3_flag(tp, IS_NIC))
16727 /* Keep VMain power. */
16728 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16729 GRC_LCLCTRL_GPIO_OUTPUT0;
16730 }
16731
16732 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16733 tp->grc_local_ctrl |=
16734 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16735
16736 /* Switch out of Vaux if it is a NIC */
16737 tg3_pwrsrc_switch_to_vmain(tp);
16738
16739 /* Derive initial jumbo mode from MTU assigned in
16740 * ether_setup() via the alloc_etherdev() call
16741 */
16742 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16743 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16744
16745 /* Determine WakeOnLan speed to use. */
16746 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16747 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16748 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16749 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16750 tg3_flag_clear(tp, WOL_SPEED_100MB);
16751 } else {
16752 tg3_flag_set(tp, WOL_SPEED_100MB);
16753 }
16754
16755 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16756 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16757
16758 /* A few boards don't want Ethernet@WireSpeed phy feature */
16759 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16760 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16761 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16762 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16763 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16764 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16765 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16766
16767 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16768 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16769 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16770 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16771 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16772
16773 if (tg3_flag(tp, 5705_PLUS) &&
16774 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16775 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16776 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16777 !tg3_flag(tp, 57765_PLUS)) {
16778 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16779 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16780 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16781 tg3_asic_rev(tp) == ASIC_REV_5761) {
16782 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16783 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16784 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16785 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16786 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16787 } else
16788 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16789 }
16790
16791 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16792 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16793 tp->phy_otp = tg3_read_otp_phycfg(tp);
16794 if (tp->phy_otp == 0)
16795 tp->phy_otp = TG3_OTP_DEFAULT;
16796 }
16797
16798 if (tg3_flag(tp, CPMU_PRESENT))
16799 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16800 else
16801 tp->mi_mode = MAC_MI_MODE_BASE;
16802
16803 tp->coalesce_mode = 0;
16804 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16805 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16806 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16807
16808 /* Set these bits to enable statistics workaround. */
16809 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16810 tg3_asic_rev(tp) == ASIC_REV_5762 ||
16811 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16812 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16813 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16814 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16815 }
16816
16817 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16818 tg3_asic_rev(tp) == ASIC_REV_57780)
16819 tg3_flag_set(tp, USE_PHYLIB);
16820
16821 err = tg3_mdio_init(tp);
16822 if (err)
16823 return err;
16824
16825 /* Initialize data/descriptor byte/word swapping. */
16826 val = tr32(GRC_MODE);
16827 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16828 tg3_asic_rev(tp) == ASIC_REV_5762)
16829 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16830 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16831 GRC_MODE_B2HRX_ENABLE |
16832 GRC_MODE_HTX2B_ENABLE |
16833 GRC_MODE_HOST_STACKUP);
16834 else
16835 val &= GRC_MODE_HOST_STACKUP;
16836
16837 tw32(GRC_MODE, val | tp->grc_mode);
16838
16839 tg3_switch_clocks(tp);
16840
16841 /* Clear this out for sanity. */
16842 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16843
16844 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16845 tw32(TG3PCI_REG_BASE_ADDR, 0);
16846
16847 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16848 &pci_state_reg);
16849 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16850 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16851 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16852 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16853 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16854 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16855 void __iomem *sram_base;
16856
16857 /* Write some dummy words into the SRAM status block
16858 * area, see if it reads back correctly. If the return
16859 * value is bad, force enable the PCIX workaround.
16860 */
16861 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16862
16863 writel(0x00000000, sram_base);
16864 writel(0x00000000, sram_base + 4);
16865 writel(0xffffffff, sram_base + 4);
16866 if (readl(sram_base) != 0x00000000)
16867 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16868 }
16869 }
16870
16871 udelay(50);
16872 tg3_nvram_init(tp);
16873
16874 /* If the device has an NVRAM, no need to load patch firmware */
16875 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16876 !tg3_flag(tp, NO_NVRAM))
16877 tp->fw_needed = NULL;
16878
16879 grc_misc_cfg = tr32(GRC_MISC_CFG);
16880 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16881
16882 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16883 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16884 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16885 tg3_flag_set(tp, IS_5788);
16886
16887 if (!tg3_flag(tp, IS_5788) &&
16888 tg3_asic_rev(tp) != ASIC_REV_5700)
16889 tg3_flag_set(tp, TAGGED_STATUS);
16890 if (tg3_flag(tp, TAGGED_STATUS)) {
16891 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16892 HOSTCC_MODE_CLRTICK_TXBD);
16893
16894 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16895 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16896 tp->misc_host_ctrl);
16897 }
16898
16899 /* Preserve the APE MAC_MODE bits */
16900 if (tg3_flag(tp, ENABLE_APE))
16901 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16902 else
16903 tp->mac_mode = 0;
16904
16905 if (tg3_10_100_only_device(tp, ent))
16906 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16907
16908 err = tg3_phy_probe(tp);
16909 if (err) {
16910 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16911 /* ... but do not return immediately ... */
16912 tg3_mdio_fini(tp);
16913 }
16914
16915 tg3_read_vpd(tp);
16916 tg3_read_fw_ver(tp);
16917
16918 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16919 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16920 } else {
16921 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16922 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16923 else
16924 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16925 }
16926
16927 /* 5700 {AX,BX} chips have a broken status block link
16928 * change bit implementation, so we must use the
16929 * status register in those cases.
16930 */
16931 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16932 tg3_flag_set(tp, USE_LINKCHG_REG);
16933 else
16934 tg3_flag_clear(tp, USE_LINKCHG_REG);
16935
16936 /* The led_ctrl is set during tg3_phy_probe, here we might
16937 * have to force the link status polling mechanism based
16938 * upon subsystem IDs.
16939 */
16940 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16941 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16942 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16943 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16944 tg3_flag_set(tp, USE_LINKCHG_REG);
16945 }
16946
16947 /* For all SERDES we poll the MAC status register. */
16948 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16949 tg3_flag_set(tp, POLL_SERDES);
16950 else
16951 tg3_flag_clear(tp, POLL_SERDES);
16952
16953 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16954 tg3_flag_set(tp, POLL_CPMU_LINK);
16955
16956 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16957 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16958 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16959 tg3_flag(tp, PCIX_MODE)) {
16960 tp->rx_offset = NET_SKB_PAD;
16961 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16962 tp->rx_copy_thresh = ~(u16)0;
16963 #endif
16964 }
16965
16966 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16967 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16968 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16969
16970 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16971
16972 /* Increment the rx prod index on the rx std ring by at most
16973 * 8 for these chips to workaround hw errata.
16974 */
16975 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16976 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16977 tg3_asic_rev(tp) == ASIC_REV_5755)
16978 tp->rx_std_max_post = 8;
16979
16980 if (tg3_flag(tp, ASPM_WORKAROUND))
16981 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16982 PCIE_PWR_MGMT_L1_THRESH_MSK;
16983
16984 return err;
16985 }
16986
16987 #ifdef CONFIG_SPARC
tg3_get_macaddr_sparc(struct tg3 * tp)16988 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16989 {
16990 struct net_device *dev = tp->dev;
16991 struct pci_dev *pdev = tp->pdev;
16992 struct device_node *dp = pci_device_to_OF_node(pdev);
16993 const unsigned char *addr;
16994 int len;
16995
16996 addr = of_get_property(dp, "local-mac-address", &len);
16997 if (addr && len == ETH_ALEN) {
16998 memcpy(dev->dev_addr, addr, ETH_ALEN);
16999 return 0;
17000 }
17001 return -ENODEV;
17002 }
17003
tg3_get_default_macaddr_sparc(struct tg3 * tp)17004 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
17005 {
17006 struct net_device *dev = tp->dev;
17007
17008 memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
17009 return 0;
17010 }
17011 #endif
17012
tg3_get_device_address(struct tg3 * tp)17013 static int tg3_get_device_address(struct tg3 *tp)
17014 {
17015 struct net_device *dev = tp->dev;
17016 u32 hi, lo, mac_offset;
17017 int addr_ok = 0;
17018 int err;
17019
17020 #ifdef CONFIG_SPARC
17021 if (!tg3_get_macaddr_sparc(tp))
17022 return 0;
17023 #endif
17024
17025 if (tg3_flag(tp, IS_SSB_CORE)) {
17026 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
17027 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
17028 return 0;
17029 }
17030
17031 mac_offset = 0x7c;
17032 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17033 tg3_flag(tp, 5780_CLASS)) {
17034 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17035 mac_offset = 0xcc;
17036 if (tg3_nvram_lock(tp))
17037 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17038 else
17039 tg3_nvram_unlock(tp);
17040 } else if (tg3_flag(tp, 5717_PLUS)) {
17041 if (tp->pci_fn & 1)
17042 mac_offset = 0xcc;
17043 if (tp->pci_fn > 1)
17044 mac_offset += 0x18c;
17045 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17046 mac_offset = 0x10;
17047
17048 /* First try to get it from MAC address mailbox. */
17049 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17050 if ((hi >> 16) == 0x484b) {
17051 dev->dev_addr[0] = (hi >> 8) & 0xff;
17052 dev->dev_addr[1] = (hi >> 0) & 0xff;
17053
17054 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17055 dev->dev_addr[2] = (lo >> 24) & 0xff;
17056 dev->dev_addr[3] = (lo >> 16) & 0xff;
17057 dev->dev_addr[4] = (lo >> 8) & 0xff;
17058 dev->dev_addr[5] = (lo >> 0) & 0xff;
17059
17060 /* Some old bootcode may report a 0 MAC address in SRAM */
17061 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
17062 }
17063 if (!addr_ok) {
17064 /* Next, try NVRAM. */
17065 if (!tg3_flag(tp, NO_NVRAM) &&
17066 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17067 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17068 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
17069 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
17070 }
17071 /* Finally just fetch it out of the MAC control regs. */
17072 else {
17073 hi = tr32(MAC_ADDR_0_HIGH);
17074 lo = tr32(MAC_ADDR_0_LOW);
17075
17076 dev->dev_addr[5] = lo & 0xff;
17077 dev->dev_addr[4] = (lo >> 8) & 0xff;
17078 dev->dev_addr[3] = (lo >> 16) & 0xff;
17079 dev->dev_addr[2] = (lo >> 24) & 0xff;
17080 dev->dev_addr[1] = hi & 0xff;
17081 dev->dev_addr[0] = (hi >> 8) & 0xff;
17082 }
17083 }
17084
17085 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
17086 #ifdef CONFIG_SPARC
17087 if (!tg3_get_default_macaddr_sparc(tp))
17088 return 0;
17089 #endif
17090 return -EINVAL;
17091 }
17092 return 0;
17093 }
17094
17095 #define BOUNDARY_SINGLE_CACHELINE 1
17096 #define BOUNDARY_MULTI_CACHELINE 2
17097
tg3_calc_dma_bndry(struct tg3 * tp,u32 val)17098 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17099 {
17100 int cacheline_size;
17101 u8 byte;
17102 int goal;
17103
17104 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17105 if (byte == 0)
17106 cacheline_size = 1024;
17107 else
17108 cacheline_size = (int) byte * 4;
17109
17110 /* On 5703 and later chips, the boundary bits have no
17111 * effect.
17112 */
17113 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17114 tg3_asic_rev(tp) != ASIC_REV_5701 &&
17115 !tg3_flag(tp, PCI_EXPRESS))
17116 goto out;
17117
17118 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17119 goal = BOUNDARY_MULTI_CACHELINE;
17120 #else
17121 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17122 goal = BOUNDARY_SINGLE_CACHELINE;
17123 #else
17124 goal = 0;
17125 #endif
17126 #endif
17127
17128 if (tg3_flag(tp, 57765_PLUS)) {
17129 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17130 goto out;
17131 }
17132
17133 if (!goal)
17134 goto out;
17135
17136 /* PCI controllers on most RISC systems tend to disconnect
17137 * when a device tries to burst across a cache-line boundary.
17138 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17139 *
17140 * Unfortunately, for PCI-E there are only limited
17141 * write-side controls for this, and thus for reads
17142 * we will still get the disconnects. We'll also waste
17143 * these PCI cycles for both read and write for chips
17144 * other than 5700 and 5701 which do not implement the
17145 * boundary bits.
17146 */
17147 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17148 switch (cacheline_size) {
17149 case 16:
17150 case 32:
17151 case 64:
17152 case 128:
17153 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17154 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17155 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17156 } else {
17157 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17158 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17159 }
17160 break;
17161
17162 case 256:
17163 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17164 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17165 break;
17166
17167 default:
17168 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17169 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17170 break;
17171 }
17172 } else if (tg3_flag(tp, PCI_EXPRESS)) {
17173 switch (cacheline_size) {
17174 case 16:
17175 case 32:
17176 case 64:
17177 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17178 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17179 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17180 break;
17181 }
17182 /* fallthrough */
17183 case 128:
17184 default:
17185 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17186 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17187 break;
17188 }
17189 } else {
17190 switch (cacheline_size) {
17191 case 16:
17192 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17193 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17194 DMA_RWCTRL_WRITE_BNDRY_16);
17195 break;
17196 }
17197 /* fallthrough */
17198 case 32:
17199 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17200 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17201 DMA_RWCTRL_WRITE_BNDRY_32);
17202 break;
17203 }
17204 /* fallthrough */
17205 case 64:
17206 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17207 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17208 DMA_RWCTRL_WRITE_BNDRY_64);
17209 break;
17210 }
17211 /* fallthrough */
17212 case 128:
17213 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17214 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17215 DMA_RWCTRL_WRITE_BNDRY_128);
17216 break;
17217 }
17218 /* fallthrough */
17219 case 256:
17220 val |= (DMA_RWCTRL_READ_BNDRY_256 |
17221 DMA_RWCTRL_WRITE_BNDRY_256);
17222 break;
17223 case 512:
17224 val |= (DMA_RWCTRL_READ_BNDRY_512 |
17225 DMA_RWCTRL_WRITE_BNDRY_512);
17226 break;
17227 case 1024:
17228 default:
17229 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17230 DMA_RWCTRL_WRITE_BNDRY_1024);
17231 break;
17232 }
17233 }
17234
17235 out:
17236 return val;
17237 }
17238
tg3_do_test_dma(struct tg3 * tp,u32 * buf,dma_addr_t buf_dma,int size,bool to_device)17239 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17240 int size, bool to_device)
17241 {
17242 struct tg3_internal_buffer_desc test_desc;
17243 u32 sram_dma_descs;
17244 int i, ret;
17245
17246 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17247
17248 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17249 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17250 tw32(RDMAC_STATUS, 0);
17251 tw32(WDMAC_STATUS, 0);
17252
17253 tw32(BUFMGR_MODE, 0);
17254 tw32(FTQ_RESET, 0);
17255
17256 test_desc.addr_hi = ((u64) buf_dma) >> 32;
17257 test_desc.addr_lo = buf_dma & 0xffffffff;
17258 test_desc.nic_mbuf = 0x00002100;
17259 test_desc.len = size;
17260
17261 /*
17262 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17263 * the *second* time the tg3 driver was getting loaded after an
17264 * initial scan.
17265 *
17266 * Broadcom tells me:
17267 * ...the DMA engine is connected to the GRC block and a DMA
17268 * reset may affect the GRC block in some unpredictable way...
17269 * The behavior of resets to individual blocks has not been tested.
17270 *
17271 * Broadcom noted the GRC reset will also reset all sub-components.
17272 */
17273 if (to_device) {
17274 test_desc.cqid_sqid = (13 << 8) | 2;
17275
17276 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17277 udelay(40);
17278 } else {
17279 test_desc.cqid_sqid = (16 << 8) | 7;
17280
17281 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17282 udelay(40);
17283 }
17284 test_desc.flags = 0x00000005;
17285
17286 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17287 u32 val;
17288
17289 val = *(((u32 *)&test_desc) + i);
17290 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17291 sram_dma_descs + (i * sizeof(u32)));
17292 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17293 }
17294 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17295
17296 if (to_device)
17297 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17298 else
17299 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17300
17301 ret = -ENODEV;
17302 for (i = 0; i < 40; i++) {
17303 u32 val;
17304
17305 if (to_device)
17306 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17307 else
17308 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17309 if ((val & 0xffff) == sram_dma_descs) {
17310 ret = 0;
17311 break;
17312 }
17313
17314 udelay(100);
17315 }
17316
17317 return ret;
17318 }
17319
17320 #define TEST_BUFFER_SIZE 0x2000
17321
17322 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17323 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17324 { },
17325 };
17326
tg3_test_dma(struct tg3 * tp)17327 static int tg3_test_dma(struct tg3 *tp)
17328 {
17329 dma_addr_t buf_dma;
17330 u32 *buf, saved_dma_rwctrl;
17331 int ret = 0;
17332
17333 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17334 &buf_dma, GFP_KERNEL);
17335 if (!buf) {
17336 ret = -ENOMEM;
17337 goto out_nofree;
17338 }
17339
17340 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17341 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17342
17343 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17344
17345 if (tg3_flag(tp, 57765_PLUS))
17346 goto out;
17347
17348 if (tg3_flag(tp, PCI_EXPRESS)) {
17349 /* DMA read watermark not used on PCIE */
17350 tp->dma_rwctrl |= 0x00180000;
17351 } else if (!tg3_flag(tp, PCIX_MODE)) {
17352 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17353 tg3_asic_rev(tp) == ASIC_REV_5750)
17354 tp->dma_rwctrl |= 0x003f0000;
17355 else
17356 tp->dma_rwctrl |= 0x003f000f;
17357 } else {
17358 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17359 tg3_asic_rev(tp) == ASIC_REV_5704) {
17360 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17361 u32 read_water = 0x7;
17362
17363 /* If the 5704 is behind the EPB bridge, we can
17364 * do the less restrictive ONE_DMA workaround for
17365 * better performance.
17366 */
17367 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17368 tg3_asic_rev(tp) == ASIC_REV_5704)
17369 tp->dma_rwctrl |= 0x8000;
17370 else if (ccval == 0x6 || ccval == 0x7)
17371 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17372
17373 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17374 read_water = 4;
17375 /* Set bit 23 to enable PCIX hw bug fix */
17376 tp->dma_rwctrl |=
17377 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17378 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17379 (1 << 23);
17380 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17381 /* 5780 always in PCIX mode */
17382 tp->dma_rwctrl |= 0x00144000;
17383 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17384 /* 5714 always in PCIX mode */
17385 tp->dma_rwctrl |= 0x00148000;
17386 } else {
17387 tp->dma_rwctrl |= 0x001b000f;
17388 }
17389 }
17390 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17391 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17392
17393 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17394 tg3_asic_rev(tp) == ASIC_REV_5704)
17395 tp->dma_rwctrl &= 0xfffffff0;
17396
17397 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17398 tg3_asic_rev(tp) == ASIC_REV_5701) {
17399 /* Remove this if it causes problems for some boards. */
17400 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17401
17402 /* On 5700/5701 chips, we need to set this bit.
17403 * Otherwise the chip will issue cacheline transactions
17404 * to streamable DMA memory with not all the byte
17405 * enables turned on. This is an error on several
17406 * RISC PCI controllers, in particular sparc64.
17407 *
17408 * On 5703/5704 chips, this bit has been reassigned
17409 * a different meaning. In particular, it is used
17410 * on those chips to enable a PCI-X workaround.
17411 */
17412 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17413 }
17414
17415 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17416
17417
17418 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17419 tg3_asic_rev(tp) != ASIC_REV_5701)
17420 goto out;
17421
17422 /* It is best to perform DMA test with maximum write burst size
17423 * to expose the 5700/5701 write DMA bug.
17424 */
17425 saved_dma_rwctrl = tp->dma_rwctrl;
17426 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17427 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17428
17429 while (1) {
17430 u32 *p = buf, i;
17431
17432 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17433 p[i] = i;
17434
17435 /* Send the buffer to the chip. */
17436 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17437 if (ret) {
17438 dev_err(&tp->pdev->dev,
17439 "%s: Buffer write failed. err = %d\n",
17440 __func__, ret);
17441 break;
17442 }
17443
17444 /* Now read it back. */
17445 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17446 if (ret) {
17447 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17448 "err = %d\n", __func__, ret);
17449 break;
17450 }
17451
17452 /* Verify it. */
17453 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17454 if (p[i] == i)
17455 continue;
17456
17457 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17458 DMA_RWCTRL_WRITE_BNDRY_16) {
17459 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17460 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17461 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17462 break;
17463 } else {
17464 dev_err(&tp->pdev->dev,
17465 "%s: Buffer corrupted on read back! "
17466 "(%d != %d)\n", __func__, p[i], i);
17467 ret = -ENODEV;
17468 goto out;
17469 }
17470 }
17471
17472 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17473 /* Success. */
17474 ret = 0;
17475 break;
17476 }
17477 }
17478 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17479 DMA_RWCTRL_WRITE_BNDRY_16) {
17480 /* DMA test passed without adjusting DMA boundary,
17481 * now look for chipsets that are known to expose the
17482 * DMA bug without failing the test.
17483 */
17484 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17485 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17486 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17487 } else {
17488 /* Safe to use the calculated DMA boundary. */
17489 tp->dma_rwctrl = saved_dma_rwctrl;
17490 }
17491
17492 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17493 }
17494
17495 out:
17496 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17497 out_nofree:
17498 return ret;
17499 }
17500
tg3_init_bufmgr_config(struct tg3 * tp)17501 static void tg3_init_bufmgr_config(struct tg3 *tp)
17502 {
17503 if (tg3_flag(tp, 57765_PLUS)) {
17504 tp->bufmgr_config.mbuf_read_dma_low_water =
17505 DEFAULT_MB_RDMA_LOW_WATER_5705;
17506 tp->bufmgr_config.mbuf_mac_rx_low_water =
17507 DEFAULT_MB_MACRX_LOW_WATER_57765;
17508 tp->bufmgr_config.mbuf_high_water =
17509 DEFAULT_MB_HIGH_WATER_57765;
17510
17511 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17512 DEFAULT_MB_RDMA_LOW_WATER_5705;
17513 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17514 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17515 tp->bufmgr_config.mbuf_high_water_jumbo =
17516 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17517 } else if (tg3_flag(tp, 5705_PLUS)) {
17518 tp->bufmgr_config.mbuf_read_dma_low_water =
17519 DEFAULT_MB_RDMA_LOW_WATER_5705;
17520 tp->bufmgr_config.mbuf_mac_rx_low_water =
17521 DEFAULT_MB_MACRX_LOW_WATER_5705;
17522 tp->bufmgr_config.mbuf_high_water =
17523 DEFAULT_MB_HIGH_WATER_5705;
17524 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17525 tp->bufmgr_config.mbuf_mac_rx_low_water =
17526 DEFAULT_MB_MACRX_LOW_WATER_5906;
17527 tp->bufmgr_config.mbuf_high_water =
17528 DEFAULT_MB_HIGH_WATER_5906;
17529 }
17530
17531 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17532 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17533 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17534 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17535 tp->bufmgr_config.mbuf_high_water_jumbo =
17536 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17537 } else {
17538 tp->bufmgr_config.mbuf_read_dma_low_water =
17539 DEFAULT_MB_RDMA_LOW_WATER;
17540 tp->bufmgr_config.mbuf_mac_rx_low_water =
17541 DEFAULT_MB_MACRX_LOW_WATER;
17542 tp->bufmgr_config.mbuf_high_water =
17543 DEFAULT_MB_HIGH_WATER;
17544
17545 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17546 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17547 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17548 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17549 tp->bufmgr_config.mbuf_high_water_jumbo =
17550 DEFAULT_MB_HIGH_WATER_JUMBO;
17551 }
17552
17553 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17554 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17555 }
17556
tg3_phy_string(struct tg3 * tp)17557 static char *tg3_phy_string(struct tg3 *tp)
17558 {
17559 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17560 case TG3_PHY_ID_BCM5400: return "5400";
17561 case TG3_PHY_ID_BCM5401: return "5401";
17562 case TG3_PHY_ID_BCM5411: return "5411";
17563 case TG3_PHY_ID_BCM5701: return "5701";
17564 case TG3_PHY_ID_BCM5703: return "5703";
17565 case TG3_PHY_ID_BCM5704: return "5704";
17566 case TG3_PHY_ID_BCM5705: return "5705";
17567 case TG3_PHY_ID_BCM5750: return "5750";
17568 case TG3_PHY_ID_BCM5752: return "5752";
17569 case TG3_PHY_ID_BCM5714: return "5714";
17570 case TG3_PHY_ID_BCM5780: return "5780";
17571 case TG3_PHY_ID_BCM5755: return "5755";
17572 case TG3_PHY_ID_BCM5787: return "5787";
17573 case TG3_PHY_ID_BCM5784: return "5784";
17574 case TG3_PHY_ID_BCM5756: return "5722/5756";
17575 case TG3_PHY_ID_BCM5906: return "5906";
17576 case TG3_PHY_ID_BCM5761: return "5761";
17577 case TG3_PHY_ID_BCM5718C: return "5718C";
17578 case TG3_PHY_ID_BCM5718S: return "5718S";
17579 case TG3_PHY_ID_BCM57765: return "57765";
17580 case TG3_PHY_ID_BCM5719C: return "5719C";
17581 case TG3_PHY_ID_BCM5720C: return "5720C";
17582 case TG3_PHY_ID_BCM5762: return "5762C";
17583 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17584 case 0: return "serdes";
17585 default: return "unknown";
17586 }
17587 }
17588
tg3_bus_string(struct tg3 * tp,char * str)17589 static char *tg3_bus_string(struct tg3 *tp, char *str)
17590 {
17591 if (tg3_flag(tp, PCI_EXPRESS)) {
17592 strcpy(str, "PCI Express");
17593 return str;
17594 } else if (tg3_flag(tp, PCIX_MODE)) {
17595 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17596
17597 strcpy(str, "PCIX:");
17598
17599 if ((clock_ctrl == 7) ||
17600 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17601 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17602 strcat(str, "133MHz");
17603 else if (clock_ctrl == 0)
17604 strcat(str, "33MHz");
17605 else if (clock_ctrl == 2)
17606 strcat(str, "50MHz");
17607 else if (clock_ctrl == 4)
17608 strcat(str, "66MHz");
17609 else if (clock_ctrl == 6)
17610 strcat(str, "100MHz");
17611 } else {
17612 strcpy(str, "PCI:");
17613 if (tg3_flag(tp, PCI_HIGH_SPEED))
17614 strcat(str, "66MHz");
17615 else
17616 strcat(str, "33MHz");
17617 }
17618 if (tg3_flag(tp, PCI_32BIT))
17619 strcat(str, ":32-bit");
17620 else
17621 strcat(str, ":64-bit");
17622 return str;
17623 }
17624
tg3_init_coal(struct tg3 * tp)17625 static void tg3_init_coal(struct tg3 *tp)
17626 {
17627 struct ethtool_coalesce *ec = &tp->coal;
17628
17629 memset(ec, 0, sizeof(*ec));
17630 ec->cmd = ETHTOOL_GCOALESCE;
17631 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17632 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17633 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17634 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17635 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17636 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17637 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17638 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17639 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17640
17641 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17642 HOSTCC_MODE_CLRTICK_TXBD)) {
17643 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17644 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17645 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17646 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17647 }
17648
17649 if (tg3_flag(tp, 5705_PLUS)) {
17650 ec->rx_coalesce_usecs_irq = 0;
17651 ec->tx_coalesce_usecs_irq = 0;
17652 ec->stats_block_coalesce_usecs = 0;
17653 }
17654 }
17655
tg3_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)17656 static int tg3_init_one(struct pci_dev *pdev,
17657 const struct pci_device_id *ent)
17658 {
17659 struct net_device *dev;
17660 struct tg3 *tp;
17661 int i, err;
17662 u32 sndmbx, rcvmbx, intmbx;
17663 char str[40];
17664 u64 dma_mask, persist_dma_mask;
17665 netdev_features_t features = 0;
17666
17667 printk_once(KERN_INFO "%s\n", version);
17668
17669 err = pci_enable_device(pdev);
17670 if (err) {
17671 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17672 return err;
17673 }
17674
17675 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17676 if (err) {
17677 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17678 goto err_out_disable_pdev;
17679 }
17680
17681 pci_set_master(pdev);
17682
17683 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17684 if (!dev) {
17685 err = -ENOMEM;
17686 goto err_out_free_res;
17687 }
17688
17689 SET_NETDEV_DEV(dev, &pdev->dev);
17690
17691 tp = netdev_priv(dev);
17692 tp->pdev = pdev;
17693 tp->dev = dev;
17694 tp->rx_mode = TG3_DEF_RX_MODE;
17695 tp->tx_mode = TG3_DEF_TX_MODE;
17696 tp->irq_sync = 1;
17697 tp->pcierr_recovery = false;
17698
17699 if (tg3_debug > 0)
17700 tp->msg_enable = tg3_debug;
17701 else
17702 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17703
17704 if (pdev_is_ssb_gige_core(pdev)) {
17705 tg3_flag_set(tp, IS_SSB_CORE);
17706 if (ssb_gige_must_flush_posted_writes(pdev))
17707 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17708 if (ssb_gige_one_dma_at_once(pdev))
17709 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17710 if (ssb_gige_have_roboswitch(pdev)) {
17711 tg3_flag_set(tp, USE_PHYLIB);
17712 tg3_flag_set(tp, ROBOSWITCH);
17713 }
17714 if (ssb_gige_is_rgmii(pdev))
17715 tg3_flag_set(tp, RGMII_MODE);
17716 }
17717
17718 /* The word/byte swap controls here control register access byte
17719 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17720 * setting below.
17721 */
17722 tp->misc_host_ctrl =
17723 MISC_HOST_CTRL_MASK_PCI_INT |
17724 MISC_HOST_CTRL_WORD_SWAP |
17725 MISC_HOST_CTRL_INDIR_ACCESS |
17726 MISC_HOST_CTRL_PCISTATE_RW;
17727
17728 /* The NONFRM (non-frame) byte/word swap controls take effect
17729 * on descriptor entries, anything which isn't packet data.
17730 *
17731 * The StrongARM chips on the board (one for tx, one for rx)
17732 * are running in big-endian mode.
17733 */
17734 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17735 GRC_MODE_WSWAP_NONFRM_DATA);
17736 #ifdef __BIG_ENDIAN
17737 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17738 #endif
17739 spin_lock_init(&tp->lock);
17740 spin_lock_init(&tp->indirect_lock);
17741 INIT_WORK(&tp->reset_task, tg3_reset_task);
17742
17743 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17744 if (!tp->regs) {
17745 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17746 err = -ENOMEM;
17747 goto err_out_free_dev;
17748 }
17749
17750 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17751 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17752 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17753 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17754 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17755 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17756 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17757 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17758 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17759 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17760 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17761 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17762 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17763 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17764 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17765 tg3_flag_set(tp, ENABLE_APE);
17766 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17767 if (!tp->aperegs) {
17768 dev_err(&pdev->dev,
17769 "Cannot map APE registers, aborting\n");
17770 err = -ENOMEM;
17771 goto err_out_iounmap;
17772 }
17773 }
17774
17775 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17776 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17777
17778 dev->ethtool_ops = &tg3_ethtool_ops;
17779 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17780 dev->netdev_ops = &tg3_netdev_ops;
17781 dev->irq = pdev->irq;
17782
17783 err = tg3_get_invariants(tp, ent);
17784 if (err) {
17785 dev_err(&pdev->dev,
17786 "Problem fetching invariants of chip, aborting\n");
17787 goto err_out_apeunmap;
17788 }
17789
17790 /* The EPB bridge inside 5714, 5715, and 5780 and any
17791 * device behind the EPB cannot support DMA addresses > 40-bit.
17792 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17793 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17794 * do DMA address check in tg3_start_xmit().
17795 */
17796 if (tg3_flag(tp, IS_5788))
17797 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17798 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17799 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17800 #ifdef CONFIG_HIGHMEM
17801 dma_mask = DMA_BIT_MASK(64);
17802 #endif
17803 } else
17804 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17805
17806 /* Configure DMA attributes. */
17807 if (dma_mask > DMA_BIT_MASK(32)) {
17808 err = pci_set_dma_mask(pdev, dma_mask);
17809 if (!err) {
17810 features |= NETIF_F_HIGHDMA;
17811 err = pci_set_consistent_dma_mask(pdev,
17812 persist_dma_mask);
17813 if (err < 0) {
17814 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17815 "DMA for consistent allocations\n");
17816 goto err_out_apeunmap;
17817 }
17818 }
17819 }
17820 if (err || dma_mask == DMA_BIT_MASK(32)) {
17821 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17822 if (err) {
17823 dev_err(&pdev->dev,
17824 "No usable DMA configuration, aborting\n");
17825 goto err_out_apeunmap;
17826 }
17827 }
17828
17829 tg3_init_bufmgr_config(tp);
17830
17831 /* 5700 B0 chips do not support checksumming correctly due
17832 * to hardware bugs.
17833 */
17834 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17835 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17836
17837 if (tg3_flag(tp, 5755_PLUS))
17838 features |= NETIF_F_IPV6_CSUM;
17839 }
17840
17841 /* TSO is on by default on chips that support hardware TSO.
17842 * Firmware TSO on older chips gives lower performance, so it
17843 * is off by default, but can be enabled using ethtool.
17844 */
17845 if ((tg3_flag(tp, HW_TSO_1) ||
17846 tg3_flag(tp, HW_TSO_2) ||
17847 tg3_flag(tp, HW_TSO_3)) &&
17848 (features & NETIF_F_IP_CSUM))
17849 features |= NETIF_F_TSO;
17850 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17851 if (features & NETIF_F_IPV6_CSUM)
17852 features |= NETIF_F_TSO6;
17853 if (tg3_flag(tp, HW_TSO_3) ||
17854 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17855 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17856 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17857 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17858 tg3_asic_rev(tp) == ASIC_REV_57780)
17859 features |= NETIF_F_TSO_ECN;
17860 }
17861
17862 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17863 NETIF_F_HW_VLAN_CTAG_RX;
17864 dev->vlan_features |= features;
17865
17866 /*
17867 * Add loopback capability only for a subset of devices that support
17868 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17869 * loopback for the remaining devices.
17870 */
17871 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17872 !tg3_flag(tp, CPMU_PRESENT))
17873 /* Add the loopback capability */
17874 features |= NETIF_F_LOOPBACK;
17875
17876 dev->hw_features |= features;
17877 dev->priv_flags |= IFF_UNICAST_FLT;
17878
17879 /* MTU range: 60 - 9000 or 1500, depending on hardware */
17880 dev->min_mtu = TG3_MIN_MTU;
17881 dev->max_mtu = TG3_MAX_MTU(tp);
17882
17883 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17884 !tg3_flag(tp, TSO_CAPABLE) &&
17885 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17886 tg3_flag_set(tp, MAX_RXPEND_64);
17887 tp->rx_pending = 63;
17888 }
17889
17890 err = tg3_get_device_address(tp);
17891 if (err) {
17892 dev_err(&pdev->dev,
17893 "Could not obtain valid ethernet address, aborting\n");
17894 goto err_out_apeunmap;
17895 }
17896
17897 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17898 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17899 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17900 for (i = 0; i < tp->irq_max; i++) {
17901 struct tg3_napi *tnapi = &tp->napi[i];
17902
17903 tnapi->tp = tp;
17904 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17905
17906 tnapi->int_mbox = intmbx;
17907 if (i <= 4)
17908 intmbx += 0x8;
17909 else
17910 intmbx += 0x4;
17911
17912 tnapi->consmbox = rcvmbx;
17913 tnapi->prodmbox = sndmbx;
17914
17915 if (i)
17916 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17917 else
17918 tnapi->coal_now = HOSTCC_MODE_NOW;
17919
17920 if (!tg3_flag(tp, SUPPORT_MSIX))
17921 break;
17922
17923 /*
17924 * If we support MSIX, we'll be using RSS. If we're using
17925 * RSS, the first vector only handles link interrupts and the
17926 * remaining vectors handle rx and tx interrupts. Reuse the
17927 * mailbox values for the next iteration. The values we setup
17928 * above are still useful for the single vectored mode.
17929 */
17930 if (!i)
17931 continue;
17932
17933 rcvmbx += 0x8;
17934
17935 if (sndmbx & 0x4)
17936 sndmbx -= 0x4;
17937 else
17938 sndmbx += 0xc;
17939 }
17940
17941 /*
17942 * Reset chip in case UNDI or EFI driver did not shutdown
17943 * DMA self test will enable WDMAC and we'll see (spurious)
17944 * pending DMA on the PCI bus at that point.
17945 */
17946 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17947 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17948 tg3_full_lock(tp, 0);
17949 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17950 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17951 tg3_full_unlock(tp);
17952 }
17953
17954 err = tg3_test_dma(tp);
17955 if (err) {
17956 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17957 goto err_out_apeunmap;
17958 }
17959
17960 tg3_init_coal(tp);
17961
17962 pci_set_drvdata(pdev, dev);
17963
17964 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17965 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17966 tg3_asic_rev(tp) == ASIC_REV_5762)
17967 tg3_flag_set(tp, PTP_CAPABLE);
17968
17969 tg3_timer_init(tp);
17970
17971 tg3_carrier_off(tp);
17972
17973 err = register_netdev(dev);
17974 if (err) {
17975 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17976 goto err_out_apeunmap;
17977 }
17978
17979 if (tg3_flag(tp, PTP_CAPABLE)) {
17980 tg3_ptp_init(tp);
17981 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17982 &tp->pdev->dev);
17983 if (IS_ERR(tp->ptp_clock))
17984 tp->ptp_clock = NULL;
17985 }
17986
17987 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17988 tp->board_part_number,
17989 tg3_chip_rev_id(tp),
17990 tg3_bus_string(tp, str),
17991 dev->dev_addr);
17992
17993 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17994 char *ethtype;
17995
17996 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17997 ethtype = "10/100Base-TX";
17998 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17999 ethtype = "1000Base-SX";
18000 else
18001 ethtype = "10/100/1000Base-T";
18002
18003 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
18004 "(WireSpeed[%d], EEE[%d])\n",
18005 tg3_phy_string(tp), ethtype,
18006 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
18007 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
18008 }
18009
18010 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
18011 (dev->features & NETIF_F_RXCSUM) != 0,
18012 tg3_flag(tp, USE_LINKCHG_REG) != 0,
18013 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
18014 tg3_flag(tp, ENABLE_ASF) != 0,
18015 tg3_flag(tp, TSO_CAPABLE) != 0);
18016 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
18017 tp->dma_rwctrl,
18018 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
18019 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
18020
18021 pci_save_state(pdev);
18022
18023 return 0;
18024
18025 err_out_apeunmap:
18026 if (tp->aperegs) {
18027 iounmap(tp->aperegs);
18028 tp->aperegs = NULL;
18029 }
18030
18031 err_out_iounmap:
18032 if (tp->regs) {
18033 iounmap(tp->regs);
18034 tp->regs = NULL;
18035 }
18036
18037 err_out_free_dev:
18038 free_netdev(dev);
18039
18040 err_out_free_res:
18041 pci_release_regions(pdev);
18042
18043 err_out_disable_pdev:
18044 if (pci_is_enabled(pdev))
18045 pci_disable_device(pdev);
18046 return err;
18047 }
18048
tg3_remove_one(struct pci_dev * pdev)18049 static void tg3_remove_one(struct pci_dev *pdev)
18050 {
18051 struct net_device *dev = pci_get_drvdata(pdev);
18052
18053 if (dev) {
18054 struct tg3 *tp = netdev_priv(dev);
18055
18056 tg3_ptp_fini(tp);
18057
18058 release_firmware(tp->fw);
18059
18060 tg3_reset_task_cancel(tp);
18061
18062 if (tg3_flag(tp, USE_PHYLIB)) {
18063 tg3_phy_fini(tp);
18064 tg3_mdio_fini(tp);
18065 }
18066
18067 unregister_netdev(dev);
18068 if (tp->aperegs) {
18069 iounmap(tp->aperegs);
18070 tp->aperegs = NULL;
18071 }
18072 if (tp->regs) {
18073 iounmap(tp->regs);
18074 tp->regs = NULL;
18075 }
18076 free_netdev(dev);
18077 pci_release_regions(pdev);
18078 pci_disable_device(pdev);
18079 }
18080 }
18081
18082 #ifdef CONFIG_PM_SLEEP
tg3_suspend(struct device * device)18083 static int tg3_suspend(struct device *device)
18084 {
18085 struct pci_dev *pdev = to_pci_dev(device);
18086 struct net_device *dev = pci_get_drvdata(pdev);
18087 struct tg3 *tp = netdev_priv(dev);
18088 int err = 0;
18089
18090 rtnl_lock();
18091
18092 if (!netif_running(dev))
18093 goto unlock;
18094
18095 tg3_reset_task_cancel(tp);
18096 tg3_phy_stop(tp);
18097 tg3_netif_stop(tp);
18098
18099 tg3_timer_stop(tp);
18100
18101 tg3_full_lock(tp, 1);
18102 tg3_disable_ints(tp);
18103 tg3_full_unlock(tp);
18104
18105 netif_device_detach(dev);
18106
18107 tg3_full_lock(tp, 0);
18108 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18109 tg3_flag_clear(tp, INIT_COMPLETE);
18110 tg3_full_unlock(tp);
18111
18112 err = tg3_power_down_prepare(tp);
18113 if (err) {
18114 int err2;
18115
18116 tg3_full_lock(tp, 0);
18117
18118 tg3_flag_set(tp, INIT_COMPLETE);
18119 err2 = tg3_restart_hw(tp, true);
18120 if (err2)
18121 goto out;
18122
18123 tg3_timer_start(tp);
18124
18125 netif_device_attach(dev);
18126 tg3_netif_start(tp);
18127
18128 out:
18129 tg3_full_unlock(tp);
18130
18131 if (!err2)
18132 tg3_phy_start(tp);
18133 }
18134
18135 unlock:
18136 rtnl_unlock();
18137 return err;
18138 }
18139
tg3_resume(struct device * device)18140 static int tg3_resume(struct device *device)
18141 {
18142 struct pci_dev *pdev = to_pci_dev(device);
18143 struct net_device *dev = pci_get_drvdata(pdev);
18144 struct tg3 *tp = netdev_priv(dev);
18145 int err = 0;
18146
18147 rtnl_lock();
18148
18149 if (!netif_running(dev))
18150 goto unlock;
18151
18152 netif_device_attach(dev);
18153
18154 tg3_full_lock(tp, 0);
18155
18156 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18157
18158 tg3_flag_set(tp, INIT_COMPLETE);
18159 err = tg3_restart_hw(tp,
18160 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18161 if (err)
18162 goto out;
18163
18164 tg3_timer_start(tp);
18165
18166 tg3_netif_start(tp);
18167
18168 out:
18169 tg3_full_unlock(tp);
18170
18171 if (!err)
18172 tg3_phy_start(tp);
18173
18174 unlock:
18175 rtnl_unlock();
18176 return err;
18177 }
18178 #endif /* CONFIG_PM_SLEEP */
18179
18180 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18181
tg3_shutdown(struct pci_dev * pdev)18182 static void tg3_shutdown(struct pci_dev *pdev)
18183 {
18184 struct net_device *dev = pci_get_drvdata(pdev);
18185 struct tg3 *tp = netdev_priv(dev);
18186
18187 rtnl_lock();
18188 netif_device_detach(dev);
18189
18190 if (netif_running(dev))
18191 dev_close(dev);
18192
18193 if (system_state == SYSTEM_POWER_OFF)
18194 tg3_power_down(tp);
18195
18196 rtnl_unlock();
18197 }
18198
18199 /**
18200 * tg3_io_error_detected - called when PCI error is detected
18201 * @pdev: Pointer to PCI device
18202 * @state: The current pci connection state
18203 *
18204 * This function is called after a PCI bus error affecting
18205 * this device has been detected.
18206 */
tg3_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)18207 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18208 pci_channel_state_t state)
18209 {
18210 struct net_device *netdev = pci_get_drvdata(pdev);
18211 struct tg3 *tp = netdev_priv(netdev);
18212 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18213
18214 netdev_info(netdev, "PCI I/O error detected\n");
18215
18216 rtnl_lock();
18217
18218 /* We probably don't have netdev yet */
18219 if (!netdev || !netif_running(netdev))
18220 goto done;
18221
18222 /* We needn't recover from permanent error */
18223 if (state == pci_channel_io_frozen)
18224 tp->pcierr_recovery = true;
18225
18226 tg3_phy_stop(tp);
18227
18228 tg3_netif_stop(tp);
18229
18230 tg3_timer_stop(tp);
18231
18232 /* Want to make sure that the reset task doesn't run */
18233 tg3_reset_task_cancel(tp);
18234
18235 netif_device_detach(netdev);
18236
18237 /* Clean up software state, even if MMIO is blocked */
18238 tg3_full_lock(tp, 0);
18239 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18240 tg3_full_unlock(tp);
18241
18242 done:
18243 if (state == pci_channel_io_perm_failure) {
18244 if (netdev) {
18245 tg3_napi_enable(tp);
18246 dev_close(netdev);
18247 }
18248 err = PCI_ERS_RESULT_DISCONNECT;
18249 } else {
18250 pci_disable_device(pdev);
18251 }
18252
18253 rtnl_unlock();
18254
18255 return err;
18256 }
18257
18258 /**
18259 * tg3_io_slot_reset - called after the pci bus has been reset.
18260 * @pdev: Pointer to PCI device
18261 *
18262 * Restart the card from scratch, as if from a cold-boot.
18263 * At this point, the card has exprienced a hard reset,
18264 * followed by fixups by BIOS, and has its config space
18265 * set up identically to what it was at cold boot.
18266 */
tg3_io_slot_reset(struct pci_dev * pdev)18267 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18268 {
18269 struct net_device *netdev = pci_get_drvdata(pdev);
18270 struct tg3 *tp = netdev_priv(netdev);
18271 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18272 int err;
18273
18274 rtnl_lock();
18275
18276 if (pci_enable_device(pdev)) {
18277 dev_err(&pdev->dev,
18278 "Cannot re-enable PCI device after reset.\n");
18279 goto done;
18280 }
18281
18282 pci_set_master(pdev);
18283 pci_restore_state(pdev);
18284 pci_save_state(pdev);
18285
18286 if (!netdev || !netif_running(netdev)) {
18287 rc = PCI_ERS_RESULT_RECOVERED;
18288 goto done;
18289 }
18290
18291 err = tg3_power_up(tp);
18292 if (err)
18293 goto done;
18294
18295 rc = PCI_ERS_RESULT_RECOVERED;
18296
18297 done:
18298 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18299 tg3_napi_enable(tp);
18300 dev_close(netdev);
18301 }
18302 rtnl_unlock();
18303
18304 return rc;
18305 }
18306
18307 /**
18308 * tg3_io_resume - called when traffic can start flowing again.
18309 * @pdev: Pointer to PCI device
18310 *
18311 * This callback is called when the error recovery driver tells
18312 * us that its OK to resume normal operation.
18313 */
tg3_io_resume(struct pci_dev * pdev)18314 static void tg3_io_resume(struct pci_dev *pdev)
18315 {
18316 struct net_device *netdev = pci_get_drvdata(pdev);
18317 struct tg3 *tp = netdev_priv(netdev);
18318 int err;
18319
18320 rtnl_lock();
18321
18322 if (!netdev || !netif_running(netdev))
18323 goto done;
18324
18325 tg3_full_lock(tp, 0);
18326 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18327 tg3_flag_set(tp, INIT_COMPLETE);
18328 err = tg3_restart_hw(tp, true);
18329 if (err) {
18330 tg3_full_unlock(tp);
18331 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18332 goto done;
18333 }
18334
18335 netif_device_attach(netdev);
18336
18337 tg3_timer_start(tp);
18338
18339 tg3_netif_start(tp);
18340
18341 tg3_full_unlock(tp);
18342
18343 tg3_phy_start(tp);
18344
18345 done:
18346 tp->pcierr_recovery = false;
18347 rtnl_unlock();
18348 }
18349
18350 static const struct pci_error_handlers tg3_err_handler = {
18351 .error_detected = tg3_io_error_detected,
18352 .slot_reset = tg3_io_slot_reset,
18353 .resume = tg3_io_resume
18354 };
18355
18356 static struct pci_driver tg3_driver = {
18357 .name = DRV_MODULE_NAME,
18358 .id_table = tg3_pci_tbl,
18359 .probe = tg3_init_one,
18360 .remove = tg3_remove_one,
18361 .err_handler = &tg3_err_handler,
18362 .driver.pm = &tg3_pm_ops,
18363 .shutdown = tg3_shutdown,
18364 };
18365
18366 module_pci_driver(tg3_driver);
18367