1 /*****************************************************************************
2  *                                                                           *
3  * File: cxgb2.c                                                             *
4  * $Revision: 1.25 $                                                         *
5  * $Date: 2005/06/22 00:43:25 $                                              *
6  * Description:                                                              *
7  *  Chelsio 10Gb Ethernet Driver.                                            *
8  *                                                                           *
9  * This program is free software; you can redistribute it and/or modify      *
10  * it under the terms of the GNU General Public License, version 2, as       *
11  * published by the Free Software Foundation.                                *
12  *                                                                           *
13  * You should have received a copy of the GNU General Public License along   *
14  * with this program; if not, see <http://www.gnu.org/licenses/>.            *
15  *                                                                           *
16  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
17  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
18  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
19  *                                                                           *
20  * http://www.chelsio.com                                                    *
21  *                                                                           *
22  * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
23  * All rights reserved.                                                      *
24  *                                                                           *
25  * Maintainers: maintainers@chelsio.com                                      *
26  *                                                                           *
27  * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
28  *          Tina Yang               <tainay@chelsio.com>                     *
29  *          Felix Marti             <felix@chelsio.com>                      *
30  *          Scott Bardone           <sbardone@chelsio.com>                   *
31  *          Kurt Ottaway            <kottaway@chelsio.com>                   *
32  *          Frank DiMambro          <frank@chelsio.com>                      *
33  *                                                                           *
34  * History:                                                                  *
35  *                                                                           *
36  ****************************************************************************/
37 
38 #include "common.h"
39 #include <linux/module.h>
40 #include <linux/pci.h>
41 #include <linux/netdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/if_vlan.h>
44 #include <linux/mii.h>
45 #include <linux/sockios.h>
46 #include <linux/dma-mapping.h>
47 #include <linux/uaccess.h>
48 
49 #include "cpl5_cmd.h"
50 #include "regs.h"
51 #include "gmac.h"
52 #include "cphy.h"
53 #include "sge.h"
54 #include "tp.h"
55 #include "espi.h"
56 #include "elmer0.h"
57 
58 #include <linux/workqueue.h>
59 
schedule_mac_stats_update(struct adapter * ap,int secs)60 static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
61 {
62 	schedule_delayed_work(&ap->stats_update_task, secs * HZ);
63 }
64 
cancel_mac_stats_update(struct adapter * ap)65 static inline void cancel_mac_stats_update(struct adapter *ap)
66 {
67 	cancel_delayed_work(&ap->stats_update_task);
68 }
69 
70 #define MAX_CMDQ_ENTRIES	16384
71 #define MAX_CMDQ1_ENTRIES	1024
72 #define MAX_RX_BUFFERS		16384
73 #define MAX_RX_JUMBO_BUFFERS	16384
74 #define MAX_TX_BUFFERS_HIGH	16384U
75 #define MAX_TX_BUFFERS_LOW	1536U
76 #define MAX_TX_BUFFERS		1460U
77 #define MIN_FL_ENTRIES		32
78 
79 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
80 			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
81 			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
82 
83 /*
84  * The EEPROM is actually bigger but only the first few bytes are used so we
85  * only report those.
86  */
87 #define EEPROM_SIZE 32
88 
89 MODULE_DESCRIPTION(DRV_DESCRIPTION);
90 MODULE_AUTHOR("Chelsio Communications");
91 MODULE_LICENSE("GPL");
92 
93 static int dflt_msg_enable = DFLT_MSG_ENABLE;
94 
95 module_param(dflt_msg_enable, int, 0);
96 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
97 
98 #define HCLOCK 0x0
99 #define LCLOCK 0x1
100 
101 /* T1 cards powersave mode */
102 static int t1_clock(struct adapter *adapter, int mode);
103 static int t1powersave = 1;	/* HW default is powersave mode. */
104 
105 module_param(t1powersave, int, 0);
106 MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
107 
108 static int disable_msi = 0;
109 module_param(disable_msi, int, 0);
110 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
111 
112 /*
113  * Setup MAC to receive the types of packets we want.
114  */
t1_set_rxmode(struct net_device * dev)115 static void t1_set_rxmode(struct net_device *dev)
116 {
117 	struct adapter *adapter = dev->ml_priv;
118 	struct cmac *mac = adapter->port[dev->if_port].mac;
119 	struct t1_rx_mode rm;
120 
121 	rm.dev = dev;
122 	mac->ops->set_rx_mode(mac, &rm);
123 }
124 
link_report(struct port_info * p)125 static void link_report(struct port_info *p)
126 {
127 	if (!netif_carrier_ok(p->dev))
128 		netdev_info(p->dev, "link down\n");
129 	else {
130 		const char *s = "10Mbps";
131 
132 		switch (p->link_config.speed) {
133 			case SPEED_10000: s = "10Gbps"; break;
134 			case SPEED_1000:  s = "1000Mbps"; break;
135 			case SPEED_100:   s = "100Mbps"; break;
136 		}
137 
138 		netdev_info(p->dev, "link up, %s, %s-duplex\n",
139 			    s, p->link_config.duplex == DUPLEX_FULL
140 			    ? "full" : "half");
141 	}
142 }
143 
t1_link_negotiated(struct adapter * adapter,int port_id,int link_stat,int speed,int duplex,int pause)144 void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
145 			int speed, int duplex, int pause)
146 {
147 	struct port_info *p = &adapter->port[port_id];
148 
149 	if (link_stat != netif_carrier_ok(p->dev)) {
150 		if (link_stat)
151 			netif_carrier_on(p->dev);
152 		else
153 			netif_carrier_off(p->dev);
154 		link_report(p);
155 
156 		/* multi-ports: inform toe */
157 		if ((speed > 0) && (adapter->params.nports > 1)) {
158 			unsigned int sched_speed = 10;
159 			switch (speed) {
160 			case SPEED_1000:
161 				sched_speed = 1000;
162 				break;
163 			case SPEED_100:
164 				sched_speed = 100;
165 				break;
166 			case SPEED_10:
167 				sched_speed = 10;
168 				break;
169 			}
170 			t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
171 		}
172 	}
173 }
174 
link_start(struct port_info * p)175 static void link_start(struct port_info *p)
176 {
177 	struct cmac *mac = p->mac;
178 
179 	mac->ops->reset(mac);
180 	if (mac->ops->macaddress_set)
181 		mac->ops->macaddress_set(mac, p->dev->dev_addr);
182 	t1_set_rxmode(p->dev);
183 	t1_link_start(p->phy, mac, &p->link_config);
184 	mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
185 }
186 
enable_hw_csum(struct adapter * adapter)187 static void enable_hw_csum(struct adapter *adapter)
188 {
189 	if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
190 		t1_tp_set_ip_checksum_offload(adapter->tp, 1);	/* for TSO only */
191 	t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
192 }
193 
194 /*
195  * Things to do upon first use of a card.
196  * This must run with the rtnl lock held.
197  */
cxgb_up(struct adapter * adapter)198 static int cxgb_up(struct adapter *adapter)
199 {
200 	int err = 0;
201 
202 	if (!(adapter->flags & FULL_INIT_DONE)) {
203 		err = t1_init_hw_modules(adapter);
204 		if (err)
205 			goto out_err;
206 
207 		enable_hw_csum(adapter);
208 		adapter->flags |= FULL_INIT_DONE;
209 	}
210 
211 	t1_interrupts_clear(adapter);
212 
213 	adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
214 	err = request_irq(adapter->pdev->irq, t1_interrupt,
215 			  adapter->params.has_msi ? 0 : IRQF_SHARED,
216 			  adapter->name, adapter);
217 	if (err) {
218 		if (adapter->params.has_msi)
219 			pci_disable_msi(adapter->pdev);
220 
221 		goto out_err;
222 	}
223 
224 	t1_sge_start(adapter->sge);
225 	t1_interrupts_enable(adapter);
226 out_err:
227 	return err;
228 }
229 
230 /*
231  * Release resources when all the ports have been stopped.
232  */
cxgb_down(struct adapter * adapter)233 static void cxgb_down(struct adapter *adapter)
234 {
235 	t1_sge_stop(adapter->sge);
236 	t1_interrupts_disable(adapter);
237 	free_irq(adapter->pdev->irq, adapter);
238 	if (adapter->params.has_msi)
239 		pci_disable_msi(adapter->pdev);
240 }
241 
cxgb_open(struct net_device * dev)242 static int cxgb_open(struct net_device *dev)
243 {
244 	int err;
245 	struct adapter *adapter = dev->ml_priv;
246 	int other_ports = adapter->open_device_map & PORT_MASK;
247 
248 	napi_enable(&adapter->napi);
249 	if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
250 		napi_disable(&adapter->napi);
251 		return err;
252 	}
253 
254 	__set_bit(dev->if_port, &adapter->open_device_map);
255 	link_start(&adapter->port[dev->if_port]);
256 	netif_start_queue(dev);
257 	if (!other_ports && adapter->params.stats_update_period)
258 		schedule_mac_stats_update(adapter,
259 					  adapter->params.stats_update_period);
260 
261 	t1_vlan_mode(adapter, dev->features);
262 	return 0;
263 }
264 
cxgb_close(struct net_device * dev)265 static int cxgb_close(struct net_device *dev)
266 {
267 	struct adapter *adapter = dev->ml_priv;
268 	struct port_info *p = &adapter->port[dev->if_port];
269 	struct cmac *mac = p->mac;
270 
271 	netif_stop_queue(dev);
272 	napi_disable(&adapter->napi);
273 	mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
274 	netif_carrier_off(dev);
275 
276 	clear_bit(dev->if_port, &adapter->open_device_map);
277 	if (adapter->params.stats_update_period &&
278 	    !(adapter->open_device_map & PORT_MASK)) {
279 		/* Stop statistics accumulation. */
280 		smp_mb__after_atomic();
281 		spin_lock(&adapter->work_lock);   /* sync with update task */
282 		spin_unlock(&adapter->work_lock);
283 		cancel_mac_stats_update(adapter);
284 	}
285 
286 	if (!adapter->open_device_map)
287 		cxgb_down(adapter);
288 	return 0;
289 }
290 
t1_get_stats(struct net_device * dev)291 static struct net_device_stats *t1_get_stats(struct net_device *dev)
292 {
293 	struct adapter *adapter = dev->ml_priv;
294 	struct port_info *p = &adapter->port[dev->if_port];
295 	struct net_device_stats *ns = &dev->stats;
296 	const struct cmac_statistics *pstats;
297 
298 	/* Do a full update of the MAC stats */
299 	pstats = p->mac->ops->statistics_update(p->mac,
300 						MAC_STATS_UPDATE_FULL);
301 
302 	ns->tx_packets = pstats->TxUnicastFramesOK +
303 		pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
304 
305 	ns->rx_packets = pstats->RxUnicastFramesOK +
306 		pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
307 
308 	ns->tx_bytes = pstats->TxOctetsOK;
309 	ns->rx_bytes = pstats->RxOctetsOK;
310 
311 	ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
312 		pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
313 	ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
314 		pstats->RxFCSErrors + pstats->RxAlignErrors +
315 		pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
316 		pstats->RxSymbolErrors + pstats->RxRuntErrors;
317 
318 	ns->multicast  = pstats->RxMulticastFramesOK;
319 	ns->collisions = pstats->TxTotalCollisions;
320 
321 	/* detailed rx_errors */
322 	ns->rx_length_errors = pstats->RxFrameTooLongErrors +
323 		pstats->RxJabberErrors;
324 	ns->rx_over_errors   = 0;
325 	ns->rx_crc_errors    = pstats->RxFCSErrors;
326 	ns->rx_frame_errors  = pstats->RxAlignErrors;
327 	ns->rx_fifo_errors   = 0;
328 	ns->rx_missed_errors = 0;
329 
330 	/* detailed tx_errors */
331 	ns->tx_aborted_errors   = pstats->TxFramesAbortedDueToXSCollisions;
332 	ns->tx_carrier_errors   = 0;
333 	ns->tx_fifo_errors      = pstats->TxUnderrun;
334 	ns->tx_heartbeat_errors = 0;
335 	ns->tx_window_errors    = pstats->TxLateCollisions;
336 	return ns;
337 }
338 
get_msglevel(struct net_device * dev)339 static u32 get_msglevel(struct net_device *dev)
340 {
341 	struct adapter *adapter = dev->ml_priv;
342 
343 	return adapter->msg_enable;
344 }
345 
set_msglevel(struct net_device * dev,u32 val)346 static void set_msglevel(struct net_device *dev, u32 val)
347 {
348 	struct adapter *adapter = dev->ml_priv;
349 
350 	adapter->msg_enable = val;
351 }
352 
353 static const char stats_strings[][ETH_GSTRING_LEN] = {
354 	"TxOctetsOK",
355 	"TxOctetsBad",
356 	"TxUnicastFramesOK",
357 	"TxMulticastFramesOK",
358 	"TxBroadcastFramesOK",
359 	"TxPauseFrames",
360 	"TxFramesWithDeferredXmissions",
361 	"TxLateCollisions",
362 	"TxTotalCollisions",
363 	"TxFramesAbortedDueToXSCollisions",
364 	"TxUnderrun",
365 	"TxLengthErrors",
366 	"TxInternalMACXmitError",
367 	"TxFramesWithExcessiveDeferral",
368 	"TxFCSErrors",
369 	"TxJumboFramesOk",
370 	"TxJumboOctetsOk",
371 
372 	"RxOctetsOK",
373 	"RxOctetsBad",
374 	"RxUnicastFramesOK",
375 	"RxMulticastFramesOK",
376 	"RxBroadcastFramesOK",
377 	"RxPauseFrames",
378 	"RxFCSErrors",
379 	"RxAlignErrors",
380 	"RxSymbolErrors",
381 	"RxDataErrors",
382 	"RxSequenceErrors",
383 	"RxRuntErrors",
384 	"RxJabberErrors",
385 	"RxInternalMACRcvError",
386 	"RxInRangeLengthErrors",
387 	"RxOutOfRangeLengthField",
388 	"RxFrameTooLongErrors",
389 	"RxJumboFramesOk",
390 	"RxJumboOctetsOk",
391 
392 	/* Port stats */
393 	"RxCsumGood",
394 	"TxCsumOffload",
395 	"TxTso",
396 	"RxVlan",
397 	"TxVlan",
398 	"TxNeedHeadroom",
399 
400 	/* Interrupt stats */
401 	"rx drops",
402 	"pure_rsps",
403 	"unhandled irqs",
404 	"respQ_empty",
405 	"respQ_overflow",
406 	"freelistQ_empty",
407 	"pkt_too_big",
408 	"pkt_mismatch",
409 	"cmdQ_full0",
410 	"cmdQ_full1",
411 
412 	"espi_DIP2ParityErr",
413 	"espi_DIP4Err",
414 	"espi_RxDrops",
415 	"espi_TxDrops",
416 	"espi_RxOvfl",
417 	"espi_ParityErr"
418 };
419 
420 #define T2_REGMAP_SIZE (3 * 1024)
421 
get_regs_len(struct net_device * dev)422 static int get_regs_len(struct net_device *dev)
423 {
424 	return T2_REGMAP_SIZE;
425 }
426 
get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)427 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
428 {
429 	struct adapter *adapter = dev->ml_priv;
430 
431 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
432 	strlcpy(info->bus_info, pci_name(adapter->pdev),
433 		sizeof(info->bus_info));
434 }
435 
get_sset_count(struct net_device * dev,int sset)436 static int get_sset_count(struct net_device *dev, int sset)
437 {
438 	switch (sset) {
439 	case ETH_SS_STATS:
440 		return ARRAY_SIZE(stats_strings);
441 	default:
442 		return -EOPNOTSUPP;
443 	}
444 }
445 
get_strings(struct net_device * dev,u32 stringset,u8 * data)446 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
447 {
448 	if (stringset == ETH_SS_STATS)
449 		memcpy(data, stats_strings, sizeof(stats_strings));
450 }
451 
get_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)452 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
453 		      u64 *data)
454 {
455 	struct adapter *adapter = dev->ml_priv;
456 	struct cmac *mac = adapter->port[dev->if_port].mac;
457 	const struct cmac_statistics *s;
458 	const struct sge_intr_counts *t;
459 	struct sge_port_stats ss;
460 
461 	s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
462 	t = t1_sge_get_intr_counts(adapter->sge);
463 	t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
464 
465 	*data++ = s->TxOctetsOK;
466 	*data++ = s->TxOctetsBad;
467 	*data++ = s->TxUnicastFramesOK;
468 	*data++ = s->TxMulticastFramesOK;
469 	*data++ = s->TxBroadcastFramesOK;
470 	*data++ = s->TxPauseFrames;
471 	*data++ = s->TxFramesWithDeferredXmissions;
472 	*data++ = s->TxLateCollisions;
473 	*data++ = s->TxTotalCollisions;
474 	*data++ = s->TxFramesAbortedDueToXSCollisions;
475 	*data++ = s->TxUnderrun;
476 	*data++ = s->TxLengthErrors;
477 	*data++ = s->TxInternalMACXmitError;
478 	*data++ = s->TxFramesWithExcessiveDeferral;
479 	*data++ = s->TxFCSErrors;
480 	*data++ = s->TxJumboFramesOK;
481 	*data++ = s->TxJumboOctetsOK;
482 
483 	*data++ = s->RxOctetsOK;
484 	*data++ = s->RxOctetsBad;
485 	*data++ = s->RxUnicastFramesOK;
486 	*data++ = s->RxMulticastFramesOK;
487 	*data++ = s->RxBroadcastFramesOK;
488 	*data++ = s->RxPauseFrames;
489 	*data++ = s->RxFCSErrors;
490 	*data++ = s->RxAlignErrors;
491 	*data++ = s->RxSymbolErrors;
492 	*data++ = s->RxDataErrors;
493 	*data++ = s->RxSequenceErrors;
494 	*data++ = s->RxRuntErrors;
495 	*data++ = s->RxJabberErrors;
496 	*data++ = s->RxInternalMACRcvError;
497 	*data++ = s->RxInRangeLengthErrors;
498 	*data++ = s->RxOutOfRangeLengthField;
499 	*data++ = s->RxFrameTooLongErrors;
500 	*data++ = s->RxJumboFramesOK;
501 	*data++ = s->RxJumboOctetsOK;
502 
503 	*data++ = ss.rx_cso_good;
504 	*data++ = ss.tx_cso;
505 	*data++ = ss.tx_tso;
506 	*data++ = ss.vlan_xtract;
507 	*data++ = ss.vlan_insert;
508 	*data++ = ss.tx_need_hdrroom;
509 
510 	*data++ = t->rx_drops;
511 	*data++ = t->pure_rsps;
512 	*data++ = t->unhandled_irqs;
513 	*data++ = t->respQ_empty;
514 	*data++ = t->respQ_overflow;
515 	*data++ = t->freelistQ_empty;
516 	*data++ = t->pkt_too_big;
517 	*data++ = t->pkt_mismatch;
518 	*data++ = t->cmdQ_full[0];
519 	*data++ = t->cmdQ_full[1];
520 
521 	if (adapter->espi) {
522 		const struct espi_intr_counts *e;
523 
524 		e = t1_espi_get_intr_counts(adapter->espi);
525 		*data++ = e->DIP2_parity_err;
526 		*data++ = e->DIP4_err;
527 		*data++ = e->rx_drops;
528 		*data++ = e->tx_drops;
529 		*data++ = e->rx_ovflw;
530 		*data++ = e->parity_err;
531 	}
532 }
533 
reg_block_dump(struct adapter * ap,void * buf,unsigned int start,unsigned int end)534 static inline void reg_block_dump(struct adapter *ap, void *buf,
535 				  unsigned int start, unsigned int end)
536 {
537 	u32 *p = buf + start;
538 
539 	for ( ; start <= end; start += sizeof(u32))
540 		*p++ = readl(ap->regs + start);
541 }
542 
get_regs(struct net_device * dev,struct ethtool_regs * regs,void * buf)543 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
544 		     void *buf)
545 {
546 	struct adapter *ap = dev->ml_priv;
547 
548 	/*
549 	 * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
550 	 */
551 	regs->version = 2;
552 
553 	memset(buf, 0, T2_REGMAP_SIZE);
554 	reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
555 	reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
556 	reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
557 	reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
558 	reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
559 	reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
560 	reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
561 	reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
562 	reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
563 	reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
564 }
565 
get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)566 static int get_link_ksettings(struct net_device *dev,
567 			      struct ethtool_link_ksettings *cmd)
568 {
569 	struct adapter *adapter = dev->ml_priv;
570 	struct port_info *p = &adapter->port[dev->if_port];
571 	u32 supported, advertising;
572 
573 	supported = p->link_config.supported;
574 	advertising = p->link_config.advertising;
575 
576 	if (netif_carrier_ok(dev)) {
577 		cmd->base.speed = p->link_config.speed;
578 		cmd->base.duplex = p->link_config.duplex;
579 	} else {
580 		cmd->base.speed = SPEED_UNKNOWN;
581 		cmd->base.duplex = DUPLEX_UNKNOWN;
582 	}
583 
584 	cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
585 	cmd->base.phy_address = p->phy->mdio.prtad;
586 	cmd->base.autoneg = p->link_config.autoneg;
587 
588 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
589 						supported);
590 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
591 						advertising);
592 
593 	return 0;
594 }
595 
speed_duplex_to_caps(int speed,int duplex)596 static int speed_duplex_to_caps(int speed, int duplex)
597 {
598 	int cap = 0;
599 
600 	switch (speed) {
601 	case SPEED_10:
602 		if (duplex == DUPLEX_FULL)
603 			cap = SUPPORTED_10baseT_Full;
604 		else
605 			cap = SUPPORTED_10baseT_Half;
606 		break;
607 	case SPEED_100:
608 		if (duplex == DUPLEX_FULL)
609 			cap = SUPPORTED_100baseT_Full;
610 		else
611 			cap = SUPPORTED_100baseT_Half;
612 		break;
613 	case SPEED_1000:
614 		if (duplex == DUPLEX_FULL)
615 			cap = SUPPORTED_1000baseT_Full;
616 		else
617 			cap = SUPPORTED_1000baseT_Half;
618 		break;
619 	case SPEED_10000:
620 		if (duplex == DUPLEX_FULL)
621 			cap = SUPPORTED_10000baseT_Full;
622 	}
623 	return cap;
624 }
625 
626 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
627 		      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
628 		      ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
629 		      ADVERTISED_10000baseT_Full)
630 
set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)631 static int set_link_ksettings(struct net_device *dev,
632 			      const struct ethtool_link_ksettings *cmd)
633 {
634 	struct adapter *adapter = dev->ml_priv;
635 	struct port_info *p = &adapter->port[dev->if_port];
636 	struct link_config *lc = &p->link_config;
637 	u32 advertising;
638 
639 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
640 						cmd->link_modes.advertising);
641 
642 	if (!(lc->supported & SUPPORTED_Autoneg))
643 		return -EOPNOTSUPP;             /* can't change speed/duplex */
644 
645 	if (cmd->base.autoneg == AUTONEG_DISABLE) {
646 		u32 speed = cmd->base.speed;
647 		int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
648 
649 		if (!(lc->supported & cap) || (speed == SPEED_1000))
650 			return -EINVAL;
651 		lc->requested_speed = speed;
652 		lc->requested_duplex = cmd->base.duplex;
653 		lc->advertising = 0;
654 	} else {
655 		advertising &= ADVERTISED_MASK;
656 		if (advertising & (advertising - 1))
657 			advertising = lc->supported;
658 		advertising &= lc->supported;
659 		if (!advertising)
660 			return -EINVAL;
661 		lc->requested_speed = SPEED_INVALID;
662 		lc->requested_duplex = DUPLEX_INVALID;
663 		lc->advertising = advertising | ADVERTISED_Autoneg;
664 	}
665 	lc->autoneg = cmd->base.autoneg;
666 	if (netif_running(dev))
667 		t1_link_start(p->phy, p->mac, lc);
668 	return 0;
669 }
670 
get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)671 static void get_pauseparam(struct net_device *dev,
672 			   struct ethtool_pauseparam *epause)
673 {
674 	struct adapter *adapter = dev->ml_priv;
675 	struct port_info *p = &adapter->port[dev->if_port];
676 
677 	epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
678 	epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
679 	epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
680 }
681 
set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)682 static int set_pauseparam(struct net_device *dev,
683 			  struct ethtool_pauseparam *epause)
684 {
685 	struct adapter *adapter = dev->ml_priv;
686 	struct port_info *p = &adapter->port[dev->if_port];
687 	struct link_config *lc = &p->link_config;
688 
689 	if (epause->autoneg == AUTONEG_DISABLE)
690 		lc->requested_fc = 0;
691 	else if (lc->supported & SUPPORTED_Autoneg)
692 		lc->requested_fc = PAUSE_AUTONEG;
693 	else
694 		return -EINVAL;
695 
696 	if (epause->rx_pause)
697 		lc->requested_fc |= PAUSE_RX;
698 	if (epause->tx_pause)
699 		lc->requested_fc |= PAUSE_TX;
700 	if (lc->autoneg == AUTONEG_ENABLE) {
701 		if (netif_running(dev))
702 			t1_link_start(p->phy, p->mac, lc);
703 	} else {
704 		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
705 		if (netif_running(dev))
706 			p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
707 							 lc->fc);
708 	}
709 	return 0;
710 }
711 
get_sge_param(struct net_device * dev,struct ethtool_ringparam * e)712 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
713 {
714 	struct adapter *adapter = dev->ml_priv;
715 	int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
716 
717 	e->rx_max_pending = MAX_RX_BUFFERS;
718 	e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
719 	e->tx_max_pending = MAX_CMDQ_ENTRIES;
720 
721 	e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
722 	e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
723 	e->tx_pending = adapter->params.sge.cmdQ_size[0];
724 }
725 
set_sge_param(struct net_device * dev,struct ethtool_ringparam * e)726 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
727 {
728 	struct adapter *adapter = dev->ml_priv;
729 	int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
730 
731 	if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
732 	    e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
733 	    e->tx_pending > MAX_CMDQ_ENTRIES ||
734 	    e->rx_pending < MIN_FL_ENTRIES ||
735 	    e->rx_jumbo_pending < MIN_FL_ENTRIES ||
736 	    e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
737 		return -EINVAL;
738 
739 	if (adapter->flags & FULL_INIT_DONE)
740 		return -EBUSY;
741 
742 	adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
743 	adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
744 	adapter->params.sge.cmdQ_size[0] = e->tx_pending;
745 	adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
746 		MAX_CMDQ1_ENTRIES : e->tx_pending;
747 	return 0;
748 }
749 
set_coalesce(struct net_device * dev,struct ethtool_coalesce * c)750 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
751 {
752 	struct adapter *adapter = dev->ml_priv;
753 
754 	adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
755 	adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
756 	adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
757 	t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
758 	return 0;
759 }
760 
get_coalesce(struct net_device * dev,struct ethtool_coalesce * c)761 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
762 {
763 	struct adapter *adapter = dev->ml_priv;
764 
765 	c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
766 	c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
767 	c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
768 	return 0;
769 }
770 
get_eeprom_len(struct net_device * dev)771 static int get_eeprom_len(struct net_device *dev)
772 {
773 	struct adapter *adapter = dev->ml_priv;
774 
775 	return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
776 }
777 
778 #define EEPROM_MAGIC(ap) \
779 	(PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
780 
get_eeprom(struct net_device * dev,struct ethtool_eeprom * e,u8 * data)781 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
782 		      u8 *data)
783 {
784 	int i;
785 	u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
786 	struct adapter *adapter = dev->ml_priv;
787 
788 	e->magic = EEPROM_MAGIC(adapter);
789 	for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
790 		t1_seeprom_read(adapter, i, (__le32 *)&buf[i]);
791 	memcpy(data, buf + e->offset, e->len);
792 	return 0;
793 }
794 
795 static const struct ethtool_ops t1_ethtool_ops = {
796 	.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
797 				     ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
798 				     ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL,
799 	.get_drvinfo       = get_drvinfo,
800 	.get_msglevel      = get_msglevel,
801 	.set_msglevel      = set_msglevel,
802 	.get_ringparam     = get_sge_param,
803 	.set_ringparam     = set_sge_param,
804 	.get_coalesce      = get_coalesce,
805 	.set_coalesce      = set_coalesce,
806 	.get_eeprom_len    = get_eeprom_len,
807 	.get_eeprom        = get_eeprom,
808 	.get_pauseparam    = get_pauseparam,
809 	.set_pauseparam    = set_pauseparam,
810 	.get_link          = ethtool_op_get_link,
811 	.get_strings       = get_strings,
812 	.get_sset_count	   = get_sset_count,
813 	.get_ethtool_stats = get_stats,
814 	.get_regs_len      = get_regs_len,
815 	.get_regs          = get_regs,
816 	.get_link_ksettings = get_link_ksettings,
817 	.set_link_ksettings = set_link_ksettings,
818 };
819 
t1_ioctl(struct net_device * dev,struct ifreq * req,int cmd)820 static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
821 {
822 	struct adapter *adapter = dev->ml_priv;
823 	struct mdio_if_info *mdio = &adapter->port[dev->if_port].phy->mdio;
824 
825 	return mdio_mii_ioctl(mdio, if_mii(req), cmd);
826 }
827 
t1_change_mtu(struct net_device * dev,int new_mtu)828 static int t1_change_mtu(struct net_device *dev, int new_mtu)
829 {
830 	int ret;
831 	struct adapter *adapter = dev->ml_priv;
832 	struct cmac *mac = adapter->port[dev->if_port].mac;
833 
834 	if (!mac->ops->set_mtu)
835 		return -EOPNOTSUPP;
836 	if ((ret = mac->ops->set_mtu(mac, new_mtu)))
837 		return ret;
838 	dev->mtu = new_mtu;
839 	return 0;
840 }
841 
t1_set_mac_addr(struct net_device * dev,void * p)842 static int t1_set_mac_addr(struct net_device *dev, void *p)
843 {
844 	struct adapter *adapter = dev->ml_priv;
845 	struct cmac *mac = adapter->port[dev->if_port].mac;
846 	struct sockaddr *addr = p;
847 
848 	if (!mac->ops->macaddress_set)
849 		return -EOPNOTSUPP;
850 
851 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
852 	mac->ops->macaddress_set(mac, dev->dev_addr);
853 	return 0;
854 }
855 
t1_fix_features(struct net_device * dev,netdev_features_t features)856 static netdev_features_t t1_fix_features(struct net_device *dev,
857 	netdev_features_t features)
858 {
859 	/*
860 	 * Since there is no support for separate rx/tx vlan accel
861 	 * enable/disable make sure tx flag is always in same state as rx.
862 	 */
863 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
864 		features |= NETIF_F_HW_VLAN_CTAG_TX;
865 	else
866 		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
867 
868 	return features;
869 }
870 
t1_set_features(struct net_device * dev,netdev_features_t features)871 static int t1_set_features(struct net_device *dev, netdev_features_t features)
872 {
873 	netdev_features_t changed = dev->features ^ features;
874 	struct adapter *adapter = dev->ml_priv;
875 
876 	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
877 		t1_vlan_mode(adapter, features);
878 
879 	return 0;
880 }
881 #ifdef CONFIG_NET_POLL_CONTROLLER
t1_netpoll(struct net_device * dev)882 static void t1_netpoll(struct net_device *dev)
883 {
884 	unsigned long flags;
885 	struct adapter *adapter = dev->ml_priv;
886 
887 	local_irq_save(flags);
888 	t1_interrupt(adapter->pdev->irq, adapter);
889 	local_irq_restore(flags);
890 }
891 #endif
892 
893 /*
894  * Periodic accumulation of MAC statistics.  This is used only if the MAC
895  * does not have any other way to prevent stats counter overflow.
896  */
mac_stats_task(struct work_struct * work)897 static void mac_stats_task(struct work_struct *work)
898 {
899 	int i;
900 	struct adapter *adapter =
901 		container_of(work, struct adapter, stats_update_task.work);
902 
903 	for_each_port(adapter, i) {
904 		struct port_info *p = &adapter->port[i];
905 
906 		if (netif_running(p->dev))
907 			p->mac->ops->statistics_update(p->mac,
908 						       MAC_STATS_UPDATE_FAST);
909 	}
910 
911 	/* Schedule the next statistics update if any port is active. */
912 	spin_lock(&adapter->work_lock);
913 	if (adapter->open_device_map & PORT_MASK)
914 		schedule_mac_stats_update(adapter,
915 					  adapter->params.stats_update_period);
916 	spin_unlock(&adapter->work_lock);
917 }
918 
919 /*
920  * Processes elmer0 external interrupts in process context.
921  */
ext_intr_task(struct work_struct * work)922 static void ext_intr_task(struct work_struct *work)
923 {
924 	struct adapter *adapter =
925 		container_of(work, struct adapter, ext_intr_handler_task);
926 
927 	t1_elmer0_ext_intr_handler(adapter);
928 
929 	/* Now reenable external interrupts */
930 	spin_lock_irq(&adapter->async_lock);
931 	adapter->slow_intr_mask |= F_PL_INTR_EXT;
932 	writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
933 	writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
934 		   adapter->regs + A_PL_ENABLE);
935 	spin_unlock_irq(&adapter->async_lock);
936 }
937 
938 /*
939  * Interrupt-context handler for elmer0 external interrupts.
940  */
t1_elmer0_ext_intr(struct adapter * adapter)941 void t1_elmer0_ext_intr(struct adapter *adapter)
942 {
943 	/*
944 	 * Schedule a task to handle external interrupts as we require
945 	 * a process context.  We disable EXT interrupts in the interim
946 	 * and let the task reenable them when it's done.
947 	 */
948 	adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
949 	writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
950 		   adapter->regs + A_PL_ENABLE);
951 	schedule_work(&adapter->ext_intr_handler_task);
952 }
953 
t1_fatal_err(struct adapter * adapter)954 void t1_fatal_err(struct adapter *adapter)
955 {
956 	if (adapter->flags & FULL_INIT_DONE) {
957 		t1_sge_stop(adapter->sge);
958 		t1_interrupts_disable(adapter);
959 	}
960 	pr_alert("%s: encountered fatal error, operation suspended\n",
961 		 adapter->name);
962 }
963 
964 static const struct net_device_ops cxgb_netdev_ops = {
965 	.ndo_open		= cxgb_open,
966 	.ndo_stop		= cxgb_close,
967 	.ndo_start_xmit		= t1_start_xmit,
968 	.ndo_get_stats		= t1_get_stats,
969 	.ndo_validate_addr	= eth_validate_addr,
970 	.ndo_set_rx_mode	= t1_set_rxmode,
971 	.ndo_do_ioctl		= t1_ioctl,
972 	.ndo_change_mtu		= t1_change_mtu,
973 	.ndo_set_mac_address	= t1_set_mac_addr,
974 	.ndo_fix_features	= t1_fix_features,
975 	.ndo_set_features	= t1_set_features,
976 #ifdef CONFIG_NET_POLL_CONTROLLER
977 	.ndo_poll_controller	= t1_netpoll,
978 #endif
979 };
980 
init_one(struct pci_dev * pdev,const struct pci_device_id * ent)981 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
982 {
983 	int i, err, pci_using_dac = 0;
984 	unsigned long mmio_start, mmio_len;
985 	const struct board_info *bi;
986 	struct adapter *adapter = NULL;
987 	struct port_info *pi;
988 
989 	err = pci_enable_device(pdev);
990 	if (err)
991 		return err;
992 
993 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
994 		pr_err("%s: cannot find PCI device memory base address\n",
995 		       pci_name(pdev));
996 		err = -ENODEV;
997 		goto out_disable_pdev;
998 	}
999 
1000 	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
1001 		pci_using_dac = 1;
1002 
1003 		if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
1004 			pr_err("%s: unable to obtain 64-bit DMA for coherent allocations\n",
1005 			       pci_name(pdev));
1006 			err = -ENODEV;
1007 			goto out_disable_pdev;
1008 		}
1009 
1010 	} else if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) {
1011 		pr_err("%s: no usable DMA configuration\n", pci_name(pdev));
1012 		goto out_disable_pdev;
1013 	}
1014 
1015 	err = pci_request_regions(pdev, DRV_NAME);
1016 	if (err) {
1017 		pr_err("%s: cannot obtain PCI resources\n", pci_name(pdev));
1018 		goto out_disable_pdev;
1019 	}
1020 
1021 	pci_set_master(pdev);
1022 
1023 	mmio_start = pci_resource_start(pdev, 0);
1024 	mmio_len = pci_resource_len(pdev, 0);
1025 	bi = t1_get_board_info(ent->driver_data);
1026 
1027 	for (i = 0; i < bi->port_number; ++i) {
1028 		struct net_device *netdev;
1029 
1030 		netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1031 		if (!netdev) {
1032 			err = -ENOMEM;
1033 			goto out_free_dev;
1034 		}
1035 
1036 		SET_NETDEV_DEV(netdev, &pdev->dev);
1037 
1038 		if (!adapter) {
1039 			adapter = netdev_priv(netdev);
1040 			adapter->pdev = pdev;
1041 			adapter->port[0].dev = netdev;  /* so we don't leak it */
1042 
1043 			adapter->regs = ioremap(mmio_start, mmio_len);
1044 			if (!adapter->regs) {
1045 				pr_err("%s: cannot map device registers\n",
1046 				       pci_name(pdev));
1047 				err = -ENOMEM;
1048 				goto out_free_dev;
1049 			}
1050 
1051 			if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1052 				err = -ENODEV;	  /* Can't handle this chip rev */
1053 				goto out_free_dev;
1054 			}
1055 
1056 			adapter->name = pci_name(pdev);
1057 			adapter->msg_enable = dflt_msg_enable;
1058 			adapter->mmio_len = mmio_len;
1059 
1060 			spin_lock_init(&adapter->tpi_lock);
1061 			spin_lock_init(&adapter->work_lock);
1062 			spin_lock_init(&adapter->async_lock);
1063 			spin_lock_init(&adapter->mac_lock);
1064 
1065 			INIT_WORK(&adapter->ext_intr_handler_task,
1066 				  ext_intr_task);
1067 			INIT_DELAYED_WORK(&adapter->stats_update_task,
1068 					  mac_stats_task);
1069 
1070 			pci_set_drvdata(pdev, netdev);
1071 		}
1072 
1073 		pi = &adapter->port[i];
1074 		pi->dev = netdev;
1075 		netif_carrier_off(netdev);
1076 		netdev->irq = pdev->irq;
1077 		netdev->if_port = i;
1078 		netdev->mem_start = mmio_start;
1079 		netdev->mem_end = mmio_start + mmio_len - 1;
1080 		netdev->ml_priv = adapter;
1081 		netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1082 			NETIF_F_RXCSUM;
1083 		netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1084 			NETIF_F_RXCSUM | NETIF_F_LLTX;
1085 
1086 		if (pci_using_dac)
1087 			netdev->features |= NETIF_F_HIGHDMA;
1088 		if (vlan_tso_capable(adapter)) {
1089 			netdev->features |=
1090 				NETIF_F_HW_VLAN_CTAG_TX |
1091 				NETIF_F_HW_VLAN_CTAG_RX;
1092 			netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1093 
1094 			/* T204: disable TSO */
1095 			if (!(is_T2(adapter)) || bi->port_number != 4) {
1096 				netdev->hw_features |= NETIF_F_TSO;
1097 				netdev->features |= NETIF_F_TSO;
1098 			}
1099 		}
1100 
1101 		netdev->netdev_ops = &cxgb_netdev_ops;
1102 		netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ?
1103 			sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
1104 
1105 		netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
1106 
1107 		netdev->ethtool_ops = &t1_ethtool_ops;
1108 
1109 		switch (bi->board) {
1110 		case CHBT_BOARD_CHT110:
1111 		case CHBT_BOARD_N110:
1112 		case CHBT_BOARD_N210:
1113 		case CHBT_BOARD_CHT210:
1114 			netdev->max_mtu = PM3393_MAX_FRAME_SIZE -
1115 					  (ETH_HLEN + ETH_FCS_LEN);
1116 			break;
1117 		case CHBT_BOARD_CHN204:
1118 			netdev->max_mtu = VSC7326_MAX_MTU;
1119 			break;
1120 		default:
1121 			netdev->max_mtu = ETH_DATA_LEN;
1122 			break;
1123 		}
1124 	}
1125 
1126 	if (t1_init_sw_modules(adapter, bi) < 0) {
1127 		err = -ENODEV;
1128 		goto out_free_dev;
1129 	}
1130 
1131 	/*
1132 	 * The card is now ready to go.  If any errors occur during device
1133 	 * registration we do not fail the whole card but rather proceed only
1134 	 * with the ports we manage to register successfully.  However we must
1135 	 * register at least one net device.
1136 	 */
1137 	for (i = 0; i < bi->port_number; ++i) {
1138 		err = register_netdev(adapter->port[i].dev);
1139 		if (err)
1140 			pr_warn("%s: cannot register net device %s, skipping\n",
1141 				pci_name(pdev), adapter->port[i].dev->name);
1142 		else {
1143 			/*
1144 			 * Change the name we use for messages to the name of
1145 			 * the first successfully registered interface.
1146 			 */
1147 			if (!adapter->registered_device_map)
1148 				adapter->name = adapter->port[i].dev->name;
1149 
1150 			__set_bit(i, &adapter->registered_device_map);
1151 		}
1152 	}
1153 	if (!adapter->registered_device_map) {
1154 		pr_err("%s: could not register any net devices\n",
1155 		       pci_name(pdev));
1156 		goto out_release_adapter_res;
1157 	}
1158 
1159 	pr_info("%s: %s (rev %d), %s %dMHz/%d-bit\n",
1160 		adapter->name, bi->desc, adapter->params.chip_revision,
1161 		adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1162 		adapter->params.pci.speed, adapter->params.pci.width);
1163 
1164 	/*
1165 	 * Set the T1B ASIC and memory clocks.
1166 	 */
1167 	if (t1powersave)
1168 		adapter->t1powersave = LCLOCK;	/* HW default is powersave mode. */
1169 	else
1170 		adapter->t1powersave = HCLOCK;
1171 	if (t1_is_T1B(adapter))
1172 		t1_clock(adapter, t1powersave);
1173 
1174 	return 0;
1175 
1176 out_release_adapter_res:
1177 	t1_free_sw_modules(adapter);
1178 out_free_dev:
1179 	if (adapter) {
1180 		if (adapter->regs)
1181 			iounmap(adapter->regs);
1182 		for (i = bi->port_number - 1; i >= 0; --i)
1183 			if (adapter->port[i].dev)
1184 				free_netdev(adapter->port[i].dev);
1185 	}
1186 	pci_release_regions(pdev);
1187 out_disable_pdev:
1188 	pci_disable_device(pdev);
1189 	return err;
1190 }
1191 
bit_bang(struct adapter * adapter,int bitdata,int nbits)1192 static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
1193 {
1194 	int data;
1195 	int i;
1196 	u32 val;
1197 
1198 	enum {
1199 		S_CLOCK = 1 << 3,
1200 		S_DATA = 1 << 4
1201 	};
1202 
1203 	for (i = (nbits - 1); i > -1; i--) {
1204 
1205 		udelay(50);
1206 
1207 		data = ((bitdata >> i) & 0x1);
1208 		__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1209 
1210 		if (data)
1211 			val |= S_DATA;
1212 		else
1213 			val &= ~S_DATA;
1214 
1215 		udelay(50);
1216 
1217 		/* Set SCLOCK low */
1218 		val &= ~S_CLOCK;
1219 		__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1220 
1221 		udelay(50);
1222 
1223 		/* Write SCLOCK high */
1224 		val |= S_CLOCK;
1225 		__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1226 
1227 	}
1228 }
1229 
t1_clock(struct adapter * adapter,int mode)1230 static int t1_clock(struct adapter *adapter, int mode)
1231 {
1232 	u32 val;
1233 	int M_CORE_VAL;
1234 	int M_MEM_VAL;
1235 
1236 	enum {
1237 		M_CORE_BITS	= 9,
1238 		T_CORE_VAL	= 0,
1239 		T_CORE_BITS	= 2,
1240 		N_CORE_VAL	= 0,
1241 		N_CORE_BITS	= 2,
1242 		M_MEM_BITS	= 9,
1243 		T_MEM_VAL	= 0,
1244 		T_MEM_BITS	= 2,
1245 		N_MEM_VAL	= 0,
1246 		N_MEM_BITS	= 2,
1247 		NP_LOAD		= 1 << 17,
1248 		S_LOAD_MEM	= 1 << 5,
1249 		S_LOAD_CORE	= 1 << 6,
1250 		S_CLOCK		= 1 << 3
1251 	};
1252 
1253 	if (!t1_is_T1B(adapter))
1254 		return -ENODEV;	/* Can't re-clock this chip. */
1255 
1256 	if (mode & 2)
1257 		return 0;	/* show current mode. */
1258 
1259 	if ((adapter->t1powersave & 1) == (mode & 1))
1260 		return -EALREADY;	/* ASIC already running in mode. */
1261 
1262 	if ((mode & 1) == HCLOCK) {
1263 		M_CORE_VAL = 0x14;
1264 		M_MEM_VAL = 0x18;
1265 		adapter->t1powersave = HCLOCK;	/* overclock */
1266 	} else {
1267 		M_CORE_VAL = 0xe;
1268 		M_MEM_VAL = 0x10;
1269 		adapter->t1powersave = LCLOCK;	/* underclock */
1270 	}
1271 
1272 	/* Don't interrupt this serial stream! */
1273 	spin_lock(&adapter->tpi_lock);
1274 
1275 	/* Initialize for ASIC core */
1276 	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1277 	val |= NP_LOAD;
1278 	udelay(50);
1279 	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1280 	udelay(50);
1281 	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1282 	val &= ~S_LOAD_CORE;
1283 	val &= ~S_CLOCK;
1284 	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1285 	udelay(50);
1286 
1287 	/* Serial program the ASIC clock synthesizer */
1288 	bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
1289 	bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
1290 	bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
1291 	udelay(50);
1292 
1293 	/* Finish ASIC core */
1294 	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1295 	val |= S_LOAD_CORE;
1296 	udelay(50);
1297 	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1298 	udelay(50);
1299 	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1300 	val &= ~S_LOAD_CORE;
1301 	udelay(50);
1302 	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1303 	udelay(50);
1304 
1305 	/* Initialize for memory */
1306 	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1307 	val |= NP_LOAD;
1308 	udelay(50);
1309 	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1310 	udelay(50);
1311 	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1312 	val &= ~S_LOAD_MEM;
1313 	val &= ~S_CLOCK;
1314 	udelay(50);
1315 	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1316 	udelay(50);
1317 
1318 	/* Serial program the memory clock synthesizer */
1319 	bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
1320 	bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
1321 	bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
1322 	udelay(50);
1323 
1324 	/* Finish memory */
1325 	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1326 	val |= S_LOAD_MEM;
1327 	udelay(50);
1328 	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1329 	udelay(50);
1330 	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1331 	val &= ~S_LOAD_MEM;
1332 	udelay(50);
1333 	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1334 
1335 	spin_unlock(&adapter->tpi_lock);
1336 
1337 	return 0;
1338 }
1339 
t1_sw_reset(struct pci_dev * pdev)1340 static inline void t1_sw_reset(struct pci_dev *pdev)
1341 {
1342 	pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1343 	pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1344 }
1345 
remove_one(struct pci_dev * pdev)1346 static void remove_one(struct pci_dev *pdev)
1347 {
1348 	struct net_device *dev = pci_get_drvdata(pdev);
1349 	struct adapter *adapter = dev->ml_priv;
1350 	int i;
1351 
1352 	for_each_port(adapter, i) {
1353 		if (test_bit(i, &adapter->registered_device_map))
1354 			unregister_netdev(adapter->port[i].dev);
1355 	}
1356 
1357 	t1_free_sw_modules(adapter);
1358 	iounmap(adapter->regs);
1359 
1360 	while (--i >= 0) {
1361 		if (adapter->port[i].dev)
1362 			free_netdev(adapter->port[i].dev);
1363 	}
1364 
1365 	pci_release_regions(pdev);
1366 	pci_disable_device(pdev);
1367 	t1_sw_reset(pdev);
1368 }
1369 
1370 static struct pci_driver cxgb_pci_driver = {
1371 	.name     = DRV_NAME,
1372 	.id_table = t1_pci_tbl,
1373 	.probe    = init_one,
1374 	.remove   = remove_one,
1375 };
1376 
1377 module_pci_driver(cxgb_pci_driver);
1378