1 /*
2  * QLogic qlge NIC HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  * See LICENSE.qlge for copyright and licensing details.
5  * Author:     Linux qlge network device driver by
6  *                      Ron Mercer <ron.mercer@qlogic.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/bitops.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/ipv6.h>
28 #include <net/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/if_vlan.h>
37 #include <linux/skbuff.h>
38 #include <linux/delay.h>
39 #include <linux/mm.h>
40 #include <linux/vmalloc.h>
41 #include <linux/prefetch.h>
42 #include <net/ip6_checksum.h>
43 
44 #include "qlge.h"
45 
46 char qlge_driver_name[] = DRV_NAME;
47 const char qlge_driver_version[] = DRV_VERSION;
48 
49 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50 MODULE_DESCRIPTION(DRV_STRING " ");
51 MODULE_LICENSE("GPL");
52 MODULE_VERSION(DRV_VERSION);
53 
54 static const u32 default_msg =
55     NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
56 /* NETIF_MSG_TIMER |	*/
57     NETIF_MSG_IFDOWN |
58     NETIF_MSG_IFUP |
59     NETIF_MSG_RX_ERR |
60     NETIF_MSG_TX_ERR |
61 /*  NETIF_MSG_TX_QUEUED | */
62 /*  NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
63 /* NETIF_MSG_PKTDATA | */
64     NETIF_MSG_HW | NETIF_MSG_WOL | 0;
65 
66 static int debug = -1;	/* defaults above */
67 module_param(debug, int, 0664);
68 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69 
70 #define MSIX_IRQ 0
71 #define MSI_IRQ 1
72 #define LEG_IRQ 2
73 static int qlge_irq_type = MSIX_IRQ;
74 module_param(qlge_irq_type, int, 0664);
75 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
76 
77 static int qlge_mpi_coredump;
78 module_param(qlge_mpi_coredump, int, 0);
79 MODULE_PARM_DESC(qlge_mpi_coredump,
80 		"Option to enable MPI firmware dump. "
81 		"Default is OFF - Do Not allocate memory. ");
82 
83 static int qlge_force_coredump;
84 module_param(qlge_force_coredump, int, 0);
85 MODULE_PARM_DESC(qlge_force_coredump,
86 		"Option to allow force of firmware core dump. "
87 		"Default is OFF - Do not allow.");
88 
89 static const struct pci_device_id qlge_pci_tbl[] = {
90 	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
91 	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
92 	/* required last entry */
93 	{0,}
94 };
95 
96 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
97 
98 static int ql_wol(struct ql_adapter *);
99 static void qlge_set_multicast_list(struct net_device *);
100 static int ql_adapter_down(struct ql_adapter *);
101 static int ql_adapter_up(struct ql_adapter *);
102 
103 /* This hardware semaphore causes exclusive access to
104  * resources shared between the NIC driver, MPI firmware,
105  * FCOE firmware and the FC driver.
106  */
ql_sem_trylock(struct ql_adapter * qdev,u32 sem_mask)107 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
108 {
109 	u32 sem_bits = 0;
110 
111 	switch (sem_mask) {
112 	case SEM_XGMAC0_MASK:
113 		sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
114 		break;
115 	case SEM_XGMAC1_MASK:
116 		sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
117 		break;
118 	case SEM_ICB_MASK:
119 		sem_bits = SEM_SET << SEM_ICB_SHIFT;
120 		break;
121 	case SEM_MAC_ADDR_MASK:
122 		sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
123 		break;
124 	case SEM_FLASH_MASK:
125 		sem_bits = SEM_SET << SEM_FLASH_SHIFT;
126 		break;
127 	case SEM_PROBE_MASK:
128 		sem_bits = SEM_SET << SEM_PROBE_SHIFT;
129 		break;
130 	case SEM_RT_IDX_MASK:
131 		sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
132 		break;
133 	case SEM_PROC_REG_MASK:
134 		sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
135 		break;
136 	default:
137 		netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
138 		return -EINVAL;
139 	}
140 
141 	ql_write32(qdev, SEM, sem_bits | sem_mask);
142 	return !(ql_read32(qdev, SEM) & sem_bits);
143 }
144 
ql_sem_spinlock(struct ql_adapter * qdev,u32 sem_mask)145 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
146 {
147 	unsigned int wait_count = 30;
148 	do {
149 		if (!ql_sem_trylock(qdev, sem_mask))
150 			return 0;
151 		udelay(100);
152 	} while (--wait_count);
153 	return -ETIMEDOUT;
154 }
155 
ql_sem_unlock(struct ql_adapter * qdev,u32 sem_mask)156 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
157 {
158 	ql_write32(qdev, SEM, sem_mask);
159 	ql_read32(qdev, SEM);	/* flush */
160 }
161 
162 /* This function waits for a specific bit to come ready
163  * in a given register.  It is used mostly by the initialize
164  * process, but is also used in kernel thread API such as
165  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
166  */
ql_wait_reg_rdy(struct ql_adapter * qdev,u32 reg,u32 bit,u32 err_bit)167 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
168 {
169 	u32 temp;
170 	int count = UDELAY_COUNT;
171 
172 	while (count) {
173 		temp = ql_read32(qdev, reg);
174 
175 		/* check for errors */
176 		if (temp & err_bit) {
177 			netif_alert(qdev, probe, qdev->ndev,
178 				    "register 0x%.08x access error, value = 0x%.08x!.\n",
179 				    reg, temp);
180 			return -EIO;
181 		} else if (temp & bit)
182 			return 0;
183 		udelay(UDELAY_DELAY);
184 		count--;
185 	}
186 	netif_alert(qdev, probe, qdev->ndev,
187 		    "Timed out waiting for reg %x to come ready.\n", reg);
188 	return -ETIMEDOUT;
189 }
190 
191 /* The CFG register is used to download TX and RX control blocks
192  * to the chip. This function waits for an operation to complete.
193  */
ql_wait_cfg(struct ql_adapter * qdev,u32 bit)194 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
195 {
196 	int count = UDELAY_COUNT;
197 	u32 temp;
198 
199 	while (count) {
200 		temp = ql_read32(qdev, CFG);
201 		if (temp & CFG_LE)
202 			return -EIO;
203 		if (!(temp & bit))
204 			return 0;
205 		udelay(UDELAY_DELAY);
206 		count--;
207 	}
208 	return -ETIMEDOUT;
209 }
210 
211 
212 /* Used to issue init control blocks to hw. Maps control block,
213  * sets address, triggers download, waits for completion.
214  */
ql_write_cfg(struct ql_adapter * qdev,void * ptr,int size,u32 bit,u16 q_id)215 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
216 		 u16 q_id)
217 {
218 	u64 map;
219 	int status = 0;
220 	int direction;
221 	u32 mask;
222 	u32 value;
223 
224 	direction =
225 	    (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
226 	    PCI_DMA_FROMDEVICE;
227 
228 	map = pci_map_single(qdev->pdev, ptr, size, direction);
229 	if (pci_dma_mapping_error(qdev->pdev, map)) {
230 		netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
231 		return -ENOMEM;
232 	}
233 
234 	status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
235 	if (status)
236 		return status;
237 
238 	status = ql_wait_cfg(qdev, bit);
239 	if (status) {
240 		netif_err(qdev, ifup, qdev->ndev,
241 			  "Timed out waiting for CFG to come ready.\n");
242 		goto exit;
243 	}
244 
245 	ql_write32(qdev, ICB_L, (u32) map);
246 	ql_write32(qdev, ICB_H, (u32) (map >> 32));
247 
248 	mask = CFG_Q_MASK | (bit << 16);
249 	value = bit | (q_id << CFG_Q_SHIFT);
250 	ql_write32(qdev, CFG, (mask | value));
251 
252 	/*
253 	 * Wait for the bit to clear after signaling hw.
254 	 */
255 	status = ql_wait_cfg(qdev, bit);
256 exit:
257 	ql_sem_unlock(qdev, SEM_ICB_MASK);	/* does flush too */
258 	pci_unmap_single(qdev->pdev, map, size, direction);
259 	return status;
260 }
261 
262 /* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
ql_get_mac_addr_reg(struct ql_adapter * qdev,u32 type,u16 index,u32 * value)263 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
264 			u32 *value)
265 {
266 	u32 offset = 0;
267 	int status;
268 
269 	switch (type) {
270 	case MAC_ADDR_TYPE_MULTI_MAC:
271 	case MAC_ADDR_TYPE_CAM_MAC:
272 		{
273 			status =
274 			    ql_wait_reg_rdy(qdev,
275 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
276 			if (status)
277 				goto exit;
278 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
279 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
280 				   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
281 			status =
282 			    ql_wait_reg_rdy(qdev,
283 				MAC_ADDR_IDX, MAC_ADDR_MR, 0);
284 			if (status)
285 				goto exit;
286 			*value++ = ql_read32(qdev, MAC_ADDR_DATA);
287 			status =
288 			    ql_wait_reg_rdy(qdev,
289 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
290 			if (status)
291 				goto exit;
292 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
293 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
294 				   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
295 			status =
296 			    ql_wait_reg_rdy(qdev,
297 				MAC_ADDR_IDX, MAC_ADDR_MR, 0);
298 			if (status)
299 				goto exit;
300 			*value++ = ql_read32(qdev, MAC_ADDR_DATA);
301 			if (type == MAC_ADDR_TYPE_CAM_MAC) {
302 				status =
303 				    ql_wait_reg_rdy(qdev,
304 					MAC_ADDR_IDX, MAC_ADDR_MW, 0);
305 				if (status)
306 					goto exit;
307 				ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
308 					   (index << MAC_ADDR_IDX_SHIFT) | /* index */
309 					   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
310 				status =
311 				    ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
312 						    MAC_ADDR_MR, 0);
313 				if (status)
314 					goto exit;
315 				*value++ = ql_read32(qdev, MAC_ADDR_DATA);
316 			}
317 			break;
318 		}
319 	case MAC_ADDR_TYPE_VLAN:
320 	case MAC_ADDR_TYPE_MULTI_FLTR:
321 	default:
322 		netif_crit(qdev, ifup, qdev->ndev,
323 			   "Address type %d not yet supported.\n", type);
324 		status = -EPERM;
325 	}
326 exit:
327 	return status;
328 }
329 
330 /* Set up a MAC, multicast or VLAN address for the
331  * inbound frame matching.
332  */
ql_set_mac_addr_reg(struct ql_adapter * qdev,u8 * addr,u32 type,u16 index)333 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
334 			       u16 index)
335 {
336 	u32 offset = 0;
337 	int status = 0;
338 
339 	switch (type) {
340 	case MAC_ADDR_TYPE_MULTI_MAC:
341 		{
342 			u32 upper = (addr[0] << 8) | addr[1];
343 			u32 lower = (addr[2] << 24) | (addr[3] << 16) |
344 					(addr[4] << 8) | (addr[5]);
345 
346 			status =
347 				ql_wait_reg_rdy(qdev,
348 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
349 			if (status)
350 				goto exit;
351 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
352 				(index << MAC_ADDR_IDX_SHIFT) |
353 				type | MAC_ADDR_E);
354 			ql_write32(qdev, MAC_ADDR_DATA, lower);
355 			status =
356 				ql_wait_reg_rdy(qdev,
357 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
358 			if (status)
359 				goto exit;
360 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
361 				(index << MAC_ADDR_IDX_SHIFT) |
362 				type | MAC_ADDR_E);
363 
364 			ql_write32(qdev, MAC_ADDR_DATA, upper);
365 			status =
366 				ql_wait_reg_rdy(qdev,
367 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
368 			if (status)
369 				goto exit;
370 			break;
371 		}
372 	case MAC_ADDR_TYPE_CAM_MAC:
373 		{
374 			u32 cam_output;
375 			u32 upper = (addr[0] << 8) | addr[1];
376 			u32 lower =
377 			    (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
378 			    (addr[5]);
379 			status =
380 			    ql_wait_reg_rdy(qdev,
381 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
382 			if (status)
383 				goto exit;
384 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
385 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
386 				   type);	/* type */
387 			ql_write32(qdev, MAC_ADDR_DATA, lower);
388 			status =
389 			    ql_wait_reg_rdy(qdev,
390 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
391 			if (status)
392 				goto exit;
393 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
394 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
395 				   type);	/* type */
396 			ql_write32(qdev, MAC_ADDR_DATA, upper);
397 			status =
398 			    ql_wait_reg_rdy(qdev,
399 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
400 			if (status)
401 				goto exit;
402 			ql_write32(qdev, MAC_ADDR_IDX, (offset) |	/* offset */
403 				   (index << MAC_ADDR_IDX_SHIFT) |	/* index */
404 				   type);	/* type */
405 			/* This field should also include the queue id
406 			   and possibly the function id.  Right now we hardcode
407 			   the route field to NIC core.
408 			 */
409 			cam_output = (CAM_OUT_ROUTE_NIC |
410 				      (qdev->
411 				       func << CAM_OUT_FUNC_SHIFT) |
412 					(0 << CAM_OUT_CQ_ID_SHIFT));
413 			if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
414 				cam_output |= CAM_OUT_RV;
415 			/* route to NIC core */
416 			ql_write32(qdev, MAC_ADDR_DATA, cam_output);
417 			break;
418 		}
419 	case MAC_ADDR_TYPE_VLAN:
420 		{
421 			u32 enable_bit = *((u32 *) &addr[0]);
422 			/* For VLAN, the addr actually holds a bit that
423 			 * either enables or disables the vlan id we are
424 			 * addressing. It's either MAC_ADDR_E on or off.
425 			 * That's bit-27 we're talking about.
426 			 */
427 			status =
428 			    ql_wait_reg_rdy(qdev,
429 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
430 			if (status)
431 				goto exit;
432 			ql_write32(qdev, MAC_ADDR_IDX, offset |	/* offset */
433 				   (index << MAC_ADDR_IDX_SHIFT) |	/* index */
434 				   type |	/* type */
435 				   enable_bit);	/* enable/disable */
436 			break;
437 		}
438 	case MAC_ADDR_TYPE_MULTI_FLTR:
439 	default:
440 		netif_crit(qdev, ifup, qdev->ndev,
441 			   "Address type %d not yet supported.\n", type);
442 		status = -EPERM;
443 	}
444 exit:
445 	return status;
446 }
447 
448 /* Set or clear MAC address in hardware. We sometimes
449  * have to clear it to prevent wrong frame routing
450  * especially in a bonding environment.
451  */
ql_set_mac_addr(struct ql_adapter * qdev,int set)452 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
453 {
454 	int status;
455 	char zero_mac_addr[ETH_ALEN];
456 	char *addr;
457 
458 	if (set) {
459 		addr = &qdev->current_mac_addr[0];
460 		netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
461 			     "Set Mac addr %pM\n", addr);
462 	} else {
463 		eth_zero_addr(zero_mac_addr);
464 		addr = &zero_mac_addr[0];
465 		netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
466 			     "Clearing MAC address\n");
467 	}
468 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
469 	if (status)
470 		return status;
471 	status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
472 			MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
473 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
474 	if (status)
475 		netif_err(qdev, ifup, qdev->ndev,
476 			  "Failed to init mac address.\n");
477 	return status;
478 }
479 
ql_link_on(struct ql_adapter * qdev)480 void ql_link_on(struct ql_adapter *qdev)
481 {
482 	netif_err(qdev, link, qdev->ndev, "Link is up.\n");
483 	netif_carrier_on(qdev->ndev);
484 	ql_set_mac_addr(qdev, 1);
485 }
486 
ql_link_off(struct ql_adapter * qdev)487 void ql_link_off(struct ql_adapter *qdev)
488 {
489 	netif_err(qdev, link, qdev->ndev, "Link is down.\n");
490 	netif_carrier_off(qdev->ndev);
491 	ql_set_mac_addr(qdev, 0);
492 }
493 
494 /* Get a specific frame routing value from the CAM.
495  * Used for debug and reg dump.
496  */
ql_get_routing_reg(struct ql_adapter * qdev,u32 index,u32 * value)497 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
498 {
499 	int status = 0;
500 
501 	status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
502 	if (status)
503 		goto exit;
504 
505 	ql_write32(qdev, RT_IDX,
506 		   RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
507 	status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
508 	if (status)
509 		goto exit;
510 	*value = ql_read32(qdev, RT_DATA);
511 exit:
512 	return status;
513 }
514 
515 /* The NIC function for this chip has 16 routing indexes.  Each one can be used
516  * to route different frame types to various inbound queues.  We send broadcast/
517  * multicast/error frames to the default queue for slow handling,
518  * and CAM hit/RSS frames to the fast handling queues.
519  */
ql_set_routing_reg(struct ql_adapter * qdev,u32 index,u32 mask,int enable)520 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
521 			      int enable)
522 {
523 	int status = -EINVAL; /* Return error if no mask match. */
524 	u32 value = 0;
525 
526 	switch (mask) {
527 	case RT_IDX_CAM_HIT:
528 		{
529 			value = RT_IDX_DST_CAM_Q |	/* dest */
530 			    RT_IDX_TYPE_NICQ |	/* type */
531 			    (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
532 			break;
533 		}
534 	case RT_IDX_VALID:	/* Promiscuous Mode frames. */
535 		{
536 			value = RT_IDX_DST_DFLT_Q |	/* dest */
537 			    RT_IDX_TYPE_NICQ |	/* type */
538 			    (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
539 			break;
540 		}
541 	case RT_IDX_ERR:	/* Pass up MAC,IP,TCP/UDP error frames. */
542 		{
543 			value = RT_IDX_DST_DFLT_Q |	/* dest */
544 			    RT_IDX_TYPE_NICQ |	/* type */
545 			    (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
546 			break;
547 		}
548 	case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
549 		{
550 			value = RT_IDX_DST_DFLT_Q | /* dest */
551 				RT_IDX_TYPE_NICQ | /* type */
552 				(RT_IDX_IP_CSUM_ERR_SLOT <<
553 				RT_IDX_IDX_SHIFT); /* index */
554 			break;
555 		}
556 	case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
557 		{
558 			value = RT_IDX_DST_DFLT_Q | /* dest */
559 				RT_IDX_TYPE_NICQ | /* type */
560 				(RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
561 				RT_IDX_IDX_SHIFT); /* index */
562 			break;
563 		}
564 	case RT_IDX_BCAST:	/* Pass up Broadcast frames to default Q. */
565 		{
566 			value = RT_IDX_DST_DFLT_Q |	/* dest */
567 			    RT_IDX_TYPE_NICQ |	/* type */
568 			    (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
569 			break;
570 		}
571 	case RT_IDX_MCAST:	/* Pass up All Multicast frames. */
572 		{
573 			value = RT_IDX_DST_DFLT_Q |	/* dest */
574 			    RT_IDX_TYPE_NICQ |	/* type */
575 			    (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
576 			break;
577 		}
578 	case RT_IDX_MCAST_MATCH:	/* Pass up matched Multicast frames. */
579 		{
580 			value = RT_IDX_DST_DFLT_Q |	/* dest */
581 			    RT_IDX_TYPE_NICQ |	/* type */
582 			    (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
583 			break;
584 		}
585 	case RT_IDX_RSS_MATCH:	/* Pass up matched RSS frames. */
586 		{
587 			value = RT_IDX_DST_RSS |	/* dest */
588 			    RT_IDX_TYPE_NICQ |	/* type */
589 			    (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
590 			break;
591 		}
592 	case 0:		/* Clear the E-bit on an entry. */
593 		{
594 			value = RT_IDX_DST_DFLT_Q |	/* dest */
595 			    RT_IDX_TYPE_NICQ |	/* type */
596 			    (index << RT_IDX_IDX_SHIFT);/* index */
597 			break;
598 		}
599 	default:
600 		netif_err(qdev, ifup, qdev->ndev,
601 			  "Mask type %d not yet supported.\n", mask);
602 		status = -EPERM;
603 		goto exit;
604 	}
605 
606 	if (value) {
607 		status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
608 		if (status)
609 			goto exit;
610 		value |= (enable ? RT_IDX_E : 0);
611 		ql_write32(qdev, RT_IDX, value);
612 		ql_write32(qdev, RT_DATA, enable ? mask : 0);
613 	}
614 exit:
615 	return status;
616 }
617 
ql_enable_interrupts(struct ql_adapter * qdev)618 static void ql_enable_interrupts(struct ql_adapter *qdev)
619 {
620 	ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
621 }
622 
ql_disable_interrupts(struct ql_adapter * qdev)623 static void ql_disable_interrupts(struct ql_adapter *qdev)
624 {
625 	ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
626 }
627 
628 /* If we're running with multiple MSI-X vectors then we enable on the fly.
629  * Otherwise, we may have multiple outstanding workers and don't want to
630  * enable until the last one finishes. In this case, the irq_cnt gets
631  * incremented every time we queue a worker and decremented every time
632  * a worker finishes.  Once it hits zero we enable the interrupt.
633  */
ql_enable_completion_interrupt(struct ql_adapter * qdev,u32 intr)634 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
635 {
636 	u32 var = 0;
637 	unsigned long hw_flags = 0;
638 	struct intr_context *ctx = qdev->intr_context + intr;
639 
640 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
641 		/* Always enable if we're MSIX multi interrupts and
642 		 * it's not the default (zeroeth) interrupt.
643 		 */
644 		ql_write32(qdev, INTR_EN,
645 			   ctx->intr_en_mask);
646 		var = ql_read32(qdev, STS);
647 		return var;
648 	}
649 
650 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
651 	if (atomic_dec_and_test(&ctx->irq_cnt)) {
652 		ql_write32(qdev, INTR_EN,
653 			   ctx->intr_en_mask);
654 		var = ql_read32(qdev, STS);
655 	}
656 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
657 	return var;
658 }
659 
ql_disable_completion_interrupt(struct ql_adapter * qdev,u32 intr)660 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
661 {
662 	u32 var = 0;
663 	struct intr_context *ctx;
664 
665 	/* HW disables for us if we're MSIX multi interrupts and
666 	 * it's not the default (zeroeth) interrupt.
667 	 */
668 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
669 		return 0;
670 
671 	ctx = qdev->intr_context + intr;
672 	spin_lock(&qdev->hw_lock);
673 	if (!atomic_read(&ctx->irq_cnt)) {
674 		ql_write32(qdev, INTR_EN,
675 		ctx->intr_dis_mask);
676 		var = ql_read32(qdev, STS);
677 	}
678 	atomic_inc(&ctx->irq_cnt);
679 	spin_unlock(&qdev->hw_lock);
680 	return var;
681 }
682 
ql_enable_all_completion_interrupts(struct ql_adapter * qdev)683 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
684 {
685 	int i;
686 	for (i = 0; i < qdev->intr_count; i++) {
687 		/* The enable call does a atomic_dec_and_test
688 		 * and enables only if the result is zero.
689 		 * So we precharge it here.
690 		 */
691 		if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
692 			i == 0))
693 			atomic_set(&qdev->intr_context[i].irq_cnt, 1);
694 		ql_enable_completion_interrupt(qdev, i);
695 	}
696 
697 }
698 
ql_validate_flash(struct ql_adapter * qdev,u32 size,const char * str)699 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
700 {
701 	int status, i;
702 	u16 csum = 0;
703 	__le16 *flash = (__le16 *)&qdev->flash;
704 
705 	status = strncmp((char *)&qdev->flash, str, 4);
706 	if (status) {
707 		netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
708 		return	status;
709 	}
710 
711 	for (i = 0; i < size; i++)
712 		csum += le16_to_cpu(*flash++);
713 
714 	if (csum)
715 		netif_err(qdev, ifup, qdev->ndev,
716 			  "Invalid flash checksum, csum = 0x%.04x.\n", csum);
717 
718 	return csum;
719 }
720 
ql_read_flash_word(struct ql_adapter * qdev,int offset,__le32 * data)721 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
722 {
723 	int status = 0;
724 	/* wait for reg to come ready */
725 	status = ql_wait_reg_rdy(qdev,
726 			FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
727 	if (status)
728 		goto exit;
729 	/* set up for reg read */
730 	ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
731 	/* wait for reg to come ready */
732 	status = ql_wait_reg_rdy(qdev,
733 			FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
734 	if (status)
735 		goto exit;
736 	 /* This data is stored on flash as an array of
737 	 * __le32.  Since ql_read32() returns cpu endian
738 	 * we need to swap it back.
739 	 */
740 	*data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
741 exit:
742 	return status;
743 }
744 
ql_get_8000_flash_params(struct ql_adapter * qdev)745 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
746 {
747 	u32 i, size;
748 	int status;
749 	__le32 *p = (__le32 *)&qdev->flash;
750 	u32 offset;
751 	u8 mac_addr[6];
752 
753 	/* Get flash offset for function and adjust
754 	 * for dword access.
755 	 */
756 	if (!qdev->port)
757 		offset = FUNC0_FLASH_OFFSET / sizeof(u32);
758 	else
759 		offset = FUNC1_FLASH_OFFSET / sizeof(u32);
760 
761 	if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
762 		return -ETIMEDOUT;
763 
764 	size = sizeof(struct flash_params_8000) / sizeof(u32);
765 	for (i = 0; i < size; i++, p++) {
766 		status = ql_read_flash_word(qdev, i+offset, p);
767 		if (status) {
768 			netif_err(qdev, ifup, qdev->ndev,
769 				  "Error reading flash.\n");
770 			goto exit;
771 		}
772 	}
773 
774 	status = ql_validate_flash(qdev,
775 			sizeof(struct flash_params_8000) / sizeof(u16),
776 			"8000");
777 	if (status) {
778 		netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
779 		status = -EINVAL;
780 		goto exit;
781 	}
782 
783 	/* Extract either manufacturer or BOFM modified
784 	 * MAC address.
785 	 */
786 	if (qdev->flash.flash_params_8000.data_type1 == 2)
787 		memcpy(mac_addr,
788 			qdev->flash.flash_params_8000.mac_addr1,
789 			qdev->ndev->addr_len);
790 	else
791 		memcpy(mac_addr,
792 			qdev->flash.flash_params_8000.mac_addr,
793 			qdev->ndev->addr_len);
794 
795 	if (!is_valid_ether_addr(mac_addr)) {
796 		netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
797 		status = -EINVAL;
798 		goto exit;
799 	}
800 
801 	memcpy(qdev->ndev->dev_addr,
802 		mac_addr,
803 		qdev->ndev->addr_len);
804 
805 exit:
806 	ql_sem_unlock(qdev, SEM_FLASH_MASK);
807 	return status;
808 }
809 
ql_get_8012_flash_params(struct ql_adapter * qdev)810 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
811 {
812 	int i;
813 	int status;
814 	__le32 *p = (__le32 *)&qdev->flash;
815 	u32 offset = 0;
816 	u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
817 
818 	/* Second function's parameters follow the first
819 	 * function's.
820 	 */
821 	if (qdev->port)
822 		offset = size;
823 
824 	if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
825 		return -ETIMEDOUT;
826 
827 	for (i = 0; i < size; i++, p++) {
828 		status = ql_read_flash_word(qdev, i+offset, p);
829 		if (status) {
830 			netif_err(qdev, ifup, qdev->ndev,
831 				  "Error reading flash.\n");
832 			goto exit;
833 		}
834 
835 	}
836 
837 	status = ql_validate_flash(qdev,
838 			sizeof(struct flash_params_8012) / sizeof(u16),
839 			"8012");
840 	if (status) {
841 		netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
842 		status = -EINVAL;
843 		goto exit;
844 	}
845 
846 	if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
847 		status = -EINVAL;
848 		goto exit;
849 	}
850 
851 	memcpy(qdev->ndev->dev_addr,
852 		qdev->flash.flash_params_8012.mac_addr,
853 		qdev->ndev->addr_len);
854 
855 exit:
856 	ql_sem_unlock(qdev, SEM_FLASH_MASK);
857 	return status;
858 }
859 
860 /* xgmac register are located behind the xgmac_addr and xgmac_data
861  * register pair.  Each read/write requires us to wait for the ready
862  * bit before reading/writing the data.
863  */
ql_write_xgmac_reg(struct ql_adapter * qdev,u32 reg,u32 data)864 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
865 {
866 	int status;
867 	/* wait for reg to come ready */
868 	status = ql_wait_reg_rdy(qdev,
869 			XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
870 	if (status)
871 		return status;
872 	/* write the data to the data reg */
873 	ql_write32(qdev, XGMAC_DATA, data);
874 	/* trigger the write */
875 	ql_write32(qdev, XGMAC_ADDR, reg);
876 	return status;
877 }
878 
879 /* xgmac register are located behind the xgmac_addr and xgmac_data
880  * register pair.  Each read/write requires us to wait for the ready
881  * bit before reading/writing the data.
882  */
ql_read_xgmac_reg(struct ql_adapter * qdev,u32 reg,u32 * data)883 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
884 {
885 	int status = 0;
886 	/* wait for reg to come ready */
887 	status = ql_wait_reg_rdy(qdev,
888 			XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
889 	if (status)
890 		goto exit;
891 	/* set up for reg read */
892 	ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
893 	/* wait for reg to come ready */
894 	status = ql_wait_reg_rdy(qdev,
895 			XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
896 	if (status)
897 		goto exit;
898 	/* get the data */
899 	*data = ql_read32(qdev, XGMAC_DATA);
900 exit:
901 	return status;
902 }
903 
904 /* This is used for reading the 64-bit statistics regs. */
ql_read_xgmac_reg64(struct ql_adapter * qdev,u32 reg,u64 * data)905 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
906 {
907 	int status = 0;
908 	u32 hi = 0;
909 	u32 lo = 0;
910 
911 	status = ql_read_xgmac_reg(qdev, reg, &lo);
912 	if (status)
913 		goto exit;
914 
915 	status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
916 	if (status)
917 		goto exit;
918 
919 	*data = (u64) lo | ((u64) hi << 32);
920 
921 exit:
922 	return status;
923 }
924 
ql_8000_port_initialize(struct ql_adapter * qdev)925 static int ql_8000_port_initialize(struct ql_adapter *qdev)
926 {
927 	int status;
928 	/*
929 	 * Get MPI firmware version for driver banner
930 	 * and ethool info.
931 	 */
932 	status = ql_mb_about_fw(qdev);
933 	if (status)
934 		goto exit;
935 	status = ql_mb_get_fw_state(qdev);
936 	if (status)
937 		goto exit;
938 	/* Wake up a worker to get/set the TX/RX frame sizes. */
939 	queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
940 exit:
941 	return status;
942 }
943 
944 /* Take the MAC Core out of reset.
945  * Enable statistics counting.
946  * Take the transmitter/receiver out of reset.
947  * This functionality may be done in the MPI firmware at a
948  * later date.
949  */
ql_8012_port_initialize(struct ql_adapter * qdev)950 static int ql_8012_port_initialize(struct ql_adapter *qdev)
951 {
952 	int status = 0;
953 	u32 data;
954 
955 	if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
956 		/* Another function has the semaphore, so
957 		 * wait for the port init bit to come ready.
958 		 */
959 		netif_info(qdev, link, qdev->ndev,
960 			   "Another function has the semaphore, so wait for the port init bit to come ready.\n");
961 		status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
962 		if (status) {
963 			netif_crit(qdev, link, qdev->ndev,
964 				   "Port initialize timed out.\n");
965 		}
966 		return status;
967 	}
968 
969 	netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
970 	/* Set the core reset. */
971 	status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
972 	if (status)
973 		goto end;
974 	data |= GLOBAL_CFG_RESET;
975 	status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
976 	if (status)
977 		goto end;
978 
979 	/* Clear the core reset and turn on jumbo for receiver. */
980 	data &= ~GLOBAL_CFG_RESET;	/* Clear core reset. */
981 	data |= GLOBAL_CFG_JUMBO;	/* Turn on jumbo. */
982 	data |= GLOBAL_CFG_TX_STAT_EN;
983 	data |= GLOBAL_CFG_RX_STAT_EN;
984 	status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
985 	if (status)
986 		goto end;
987 
988 	/* Enable transmitter, and clear it's reset. */
989 	status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
990 	if (status)
991 		goto end;
992 	data &= ~TX_CFG_RESET;	/* Clear the TX MAC reset. */
993 	data |= TX_CFG_EN;	/* Enable the transmitter. */
994 	status = ql_write_xgmac_reg(qdev, TX_CFG, data);
995 	if (status)
996 		goto end;
997 
998 	/* Enable receiver and clear it's reset. */
999 	status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1000 	if (status)
1001 		goto end;
1002 	data &= ~RX_CFG_RESET;	/* Clear the RX MAC reset. */
1003 	data |= RX_CFG_EN;	/* Enable the receiver. */
1004 	status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1005 	if (status)
1006 		goto end;
1007 
1008 	/* Turn on jumbo. */
1009 	status =
1010 	    ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1011 	if (status)
1012 		goto end;
1013 	status =
1014 	    ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1015 	if (status)
1016 		goto end;
1017 
1018 	/* Signal to the world that the port is enabled.        */
1019 	ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1020 end:
1021 	ql_sem_unlock(qdev, qdev->xg_sem_mask);
1022 	return status;
1023 }
1024 
ql_lbq_block_size(struct ql_adapter * qdev)1025 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1026 {
1027 	return PAGE_SIZE << qdev->lbq_buf_order;
1028 }
1029 
1030 /* Get the next large buffer. */
ql_get_curr_lbuf(struct rx_ring * rx_ring)1031 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1032 {
1033 	struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1034 	rx_ring->lbq_curr_idx++;
1035 	if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1036 		rx_ring->lbq_curr_idx = 0;
1037 	rx_ring->lbq_free_cnt++;
1038 	return lbq_desc;
1039 }
1040 
ql_get_curr_lchunk(struct ql_adapter * qdev,struct rx_ring * rx_ring)1041 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1042 		struct rx_ring *rx_ring)
1043 {
1044 	struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1045 
1046 	pci_dma_sync_single_for_cpu(qdev->pdev,
1047 					dma_unmap_addr(lbq_desc, mapaddr),
1048 				    rx_ring->lbq_buf_size,
1049 					PCI_DMA_FROMDEVICE);
1050 
1051 	/* If it's the last chunk of our master page then
1052 	 * we unmap it.
1053 	 */
1054 	if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1055 					== ql_lbq_block_size(qdev))
1056 		pci_unmap_page(qdev->pdev,
1057 				lbq_desc->p.pg_chunk.map,
1058 				ql_lbq_block_size(qdev),
1059 				PCI_DMA_FROMDEVICE);
1060 	return lbq_desc;
1061 }
1062 
1063 /* Get the next small buffer. */
ql_get_curr_sbuf(struct rx_ring * rx_ring)1064 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1065 {
1066 	struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1067 	rx_ring->sbq_curr_idx++;
1068 	if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1069 		rx_ring->sbq_curr_idx = 0;
1070 	rx_ring->sbq_free_cnt++;
1071 	return sbq_desc;
1072 }
1073 
1074 /* Update an rx ring index. */
ql_update_cq(struct rx_ring * rx_ring)1075 static void ql_update_cq(struct rx_ring *rx_ring)
1076 {
1077 	rx_ring->cnsmr_idx++;
1078 	rx_ring->curr_entry++;
1079 	if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1080 		rx_ring->cnsmr_idx = 0;
1081 		rx_ring->curr_entry = rx_ring->cq_base;
1082 	}
1083 }
1084 
ql_write_cq_idx(struct rx_ring * rx_ring)1085 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1086 {
1087 	ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1088 }
1089 
ql_get_next_chunk(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct bq_desc * lbq_desc)1090 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1091 						struct bq_desc *lbq_desc)
1092 {
1093 	if (!rx_ring->pg_chunk.page) {
1094 		u64 map;
1095 		rx_ring->pg_chunk.page = alloc_pages(__GFP_COMP | GFP_ATOMIC,
1096 						qdev->lbq_buf_order);
1097 		if (unlikely(!rx_ring->pg_chunk.page)) {
1098 			netif_err(qdev, drv, qdev->ndev,
1099 				  "page allocation failed.\n");
1100 			return -ENOMEM;
1101 		}
1102 		rx_ring->pg_chunk.offset = 0;
1103 		map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1104 					0, ql_lbq_block_size(qdev),
1105 					PCI_DMA_FROMDEVICE);
1106 		if (pci_dma_mapping_error(qdev->pdev, map)) {
1107 			__free_pages(rx_ring->pg_chunk.page,
1108 					qdev->lbq_buf_order);
1109 			rx_ring->pg_chunk.page = NULL;
1110 			netif_err(qdev, drv, qdev->ndev,
1111 				  "PCI mapping failed.\n");
1112 			return -ENOMEM;
1113 		}
1114 		rx_ring->pg_chunk.map = map;
1115 		rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1116 	}
1117 
1118 	/* Copy the current master pg_chunk info
1119 	 * to the current descriptor.
1120 	 */
1121 	lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1122 
1123 	/* Adjust the master page chunk for next
1124 	 * buffer get.
1125 	 */
1126 	rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1127 	if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1128 		rx_ring->pg_chunk.page = NULL;
1129 		lbq_desc->p.pg_chunk.last_flag = 1;
1130 	} else {
1131 		rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1132 		get_page(rx_ring->pg_chunk.page);
1133 		lbq_desc->p.pg_chunk.last_flag = 0;
1134 	}
1135 	return 0;
1136 }
1137 /* Process (refill) a large buffer queue. */
ql_update_lbq(struct ql_adapter * qdev,struct rx_ring * rx_ring)1138 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1139 {
1140 	u32 clean_idx = rx_ring->lbq_clean_idx;
1141 	u32 start_idx = clean_idx;
1142 	struct bq_desc *lbq_desc;
1143 	u64 map;
1144 	int i;
1145 
1146 	while (rx_ring->lbq_free_cnt > 32) {
1147 		for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
1148 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1149 				     "lbq: try cleaning clean_idx = %d.\n",
1150 				     clean_idx);
1151 			lbq_desc = &rx_ring->lbq[clean_idx];
1152 			if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1153 				rx_ring->lbq_clean_idx = clean_idx;
1154 				netif_err(qdev, ifup, qdev->ndev,
1155 						"Could not get a page chunk, i=%d, clean_idx =%d .\n",
1156 						i, clean_idx);
1157 				return;
1158 			}
1159 
1160 			map = lbq_desc->p.pg_chunk.map +
1161 				lbq_desc->p.pg_chunk.offset;
1162 				dma_unmap_addr_set(lbq_desc, mapaddr, map);
1163 			dma_unmap_len_set(lbq_desc, maplen,
1164 					rx_ring->lbq_buf_size);
1165 				*lbq_desc->addr = cpu_to_le64(map);
1166 
1167 			pci_dma_sync_single_for_device(qdev->pdev, map,
1168 						rx_ring->lbq_buf_size,
1169 						PCI_DMA_FROMDEVICE);
1170 			clean_idx++;
1171 			if (clean_idx == rx_ring->lbq_len)
1172 				clean_idx = 0;
1173 		}
1174 
1175 		rx_ring->lbq_clean_idx = clean_idx;
1176 		rx_ring->lbq_prod_idx += 16;
1177 		if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1178 			rx_ring->lbq_prod_idx = 0;
1179 		rx_ring->lbq_free_cnt -= 16;
1180 	}
1181 
1182 	if (start_idx != clean_idx) {
1183 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1184 			     "lbq: updating prod idx = %d.\n",
1185 			     rx_ring->lbq_prod_idx);
1186 		ql_write_db_reg(rx_ring->lbq_prod_idx,
1187 				rx_ring->lbq_prod_idx_db_reg);
1188 	}
1189 }
1190 
1191 /* Process (refill) a small buffer queue. */
ql_update_sbq(struct ql_adapter * qdev,struct rx_ring * rx_ring)1192 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1193 {
1194 	u32 clean_idx = rx_ring->sbq_clean_idx;
1195 	u32 start_idx = clean_idx;
1196 	struct bq_desc *sbq_desc;
1197 	u64 map;
1198 	int i;
1199 
1200 	while (rx_ring->sbq_free_cnt > 16) {
1201 		for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
1202 			sbq_desc = &rx_ring->sbq[clean_idx];
1203 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1204 				     "sbq: try cleaning clean_idx = %d.\n",
1205 				     clean_idx);
1206 			if (sbq_desc->p.skb == NULL) {
1207 				netif_printk(qdev, rx_status, KERN_DEBUG,
1208 					     qdev->ndev,
1209 					     "sbq: getting new skb for index %d.\n",
1210 					     sbq_desc->index);
1211 				sbq_desc->p.skb =
1212 				    netdev_alloc_skb(qdev->ndev,
1213 						     SMALL_BUFFER_SIZE);
1214 				if (sbq_desc->p.skb == NULL) {
1215 					rx_ring->sbq_clean_idx = clean_idx;
1216 					return;
1217 				}
1218 				skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1219 				map = pci_map_single(qdev->pdev,
1220 						     sbq_desc->p.skb->data,
1221 						     rx_ring->sbq_buf_size,
1222 						     PCI_DMA_FROMDEVICE);
1223 				if (pci_dma_mapping_error(qdev->pdev, map)) {
1224 					netif_err(qdev, ifup, qdev->ndev,
1225 						  "PCI mapping failed.\n");
1226 					rx_ring->sbq_clean_idx = clean_idx;
1227 					dev_kfree_skb_any(sbq_desc->p.skb);
1228 					sbq_desc->p.skb = NULL;
1229 					return;
1230 				}
1231 				dma_unmap_addr_set(sbq_desc, mapaddr, map);
1232 				dma_unmap_len_set(sbq_desc, maplen,
1233 						  rx_ring->sbq_buf_size);
1234 				*sbq_desc->addr = cpu_to_le64(map);
1235 			}
1236 
1237 			clean_idx++;
1238 			if (clean_idx == rx_ring->sbq_len)
1239 				clean_idx = 0;
1240 		}
1241 		rx_ring->sbq_clean_idx = clean_idx;
1242 		rx_ring->sbq_prod_idx += 16;
1243 		if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1244 			rx_ring->sbq_prod_idx = 0;
1245 		rx_ring->sbq_free_cnt -= 16;
1246 	}
1247 
1248 	if (start_idx != clean_idx) {
1249 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1250 			     "sbq: updating prod idx = %d.\n",
1251 			     rx_ring->sbq_prod_idx);
1252 		ql_write_db_reg(rx_ring->sbq_prod_idx,
1253 				rx_ring->sbq_prod_idx_db_reg);
1254 	}
1255 }
1256 
ql_update_buffer_queues(struct ql_adapter * qdev,struct rx_ring * rx_ring)1257 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1258 				    struct rx_ring *rx_ring)
1259 {
1260 	ql_update_sbq(qdev, rx_ring);
1261 	ql_update_lbq(qdev, rx_ring);
1262 }
1263 
1264 /* Unmaps tx buffers.  Can be called from send() if a pci mapping
1265  * fails at some stage, or from the interrupt when a tx completes.
1266  */
ql_unmap_send(struct ql_adapter * qdev,struct tx_ring_desc * tx_ring_desc,int mapped)1267 static void ql_unmap_send(struct ql_adapter *qdev,
1268 			  struct tx_ring_desc *tx_ring_desc, int mapped)
1269 {
1270 	int i;
1271 	for (i = 0; i < mapped; i++) {
1272 		if (i == 0 || (i == 7 && mapped > 7)) {
1273 			/*
1274 			 * Unmap the skb->data area, or the
1275 			 * external sglist (AKA the Outbound
1276 			 * Address List (OAL)).
1277 			 * If its the zeroeth element, then it's
1278 			 * the skb->data area.  If it's the 7th
1279 			 * element and there is more than 6 frags,
1280 			 * then its an OAL.
1281 			 */
1282 			if (i == 7) {
1283 				netif_printk(qdev, tx_done, KERN_DEBUG,
1284 					     qdev->ndev,
1285 					     "unmapping OAL area.\n");
1286 			}
1287 			pci_unmap_single(qdev->pdev,
1288 					 dma_unmap_addr(&tx_ring_desc->map[i],
1289 							mapaddr),
1290 					 dma_unmap_len(&tx_ring_desc->map[i],
1291 						       maplen),
1292 					 PCI_DMA_TODEVICE);
1293 		} else {
1294 			netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1295 				     "unmapping frag %d.\n", i);
1296 			pci_unmap_page(qdev->pdev,
1297 				       dma_unmap_addr(&tx_ring_desc->map[i],
1298 						      mapaddr),
1299 				       dma_unmap_len(&tx_ring_desc->map[i],
1300 						     maplen), PCI_DMA_TODEVICE);
1301 		}
1302 	}
1303 
1304 }
1305 
1306 /* Map the buffers for this transmit.  This will return
1307  * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1308  */
ql_map_send(struct ql_adapter * qdev,struct ob_mac_iocb_req * mac_iocb_ptr,struct sk_buff * skb,struct tx_ring_desc * tx_ring_desc)1309 static int ql_map_send(struct ql_adapter *qdev,
1310 		       struct ob_mac_iocb_req *mac_iocb_ptr,
1311 		       struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1312 {
1313 	int len = skb_headlen(skb);
1314 	dma_addr_t map;
1315 	int frag_idx, err, map_idx = 0;
1316 	struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1317 	int frag_cnt = skb_shinfo(skb)->nr_frags;
1318 
1319 	if (frag_cnt) {
1320 		netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1321 			     "frag_cnt = %d.\n", frag_cnt);
1322 	}
1323 	/*
1324 	 * Map the skb buffer first.
1325 	 */
1326 	map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1327 
1328 	err = pci_dma_mapping_error(qdev->pdev, map);
1329 	if (err) {
1330 		netif_err(qdev, tx_queued, qdev->ndev,
1331 			  "PCI mapping failed with error: %d\n", err);
1332 
1333 		return NETDEV_TX_BUSY;
1334 	}
1335 
1336 	tbd->len = cpu_to_le32(len);
1337 	tbd->addr = cpu_to_le64(map);
1338 	dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1339 	dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1340 	map_idx++;
1341 
1342 	/*
1343 	 * This loop fills the remainder of the 8 address descriptors
1344 	 * in the IOCB.  If there are more than 7 fragments, then the
1345 	 * eighth address desc will point to an external list (OAL).
1346 	 * When this happens, the remainder of the frags will be stored
1347 	 * in this list.
1348 	 */
1349 	for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1350 		skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1351 		tbd++;
1352 		if (frag_idx == 6 && frag_cnt > 7) {
1353 			/* Let's tack on an sglist.
1354 			 * Our control block will now
1355 			 * look like this:
1356 			 * iocb->seg[0] = skb->data
1357 			 * iocb->seg[1] = frag[0]
1358 			 * iocb->seg[2] = frag[1]
1359 			 * iocb->seg[3] = frag[2]
1360 			 * iocb->seg[4] = frag[3]
1361 			 * iocb->seg[5] = frag[4]
1362 			 * iocb->seg[6] = frag[5]
1363 			 * iocb->seg[7] = ptr to OAL (external sglist)
1364 			 * oal->seg[0] = frag[6]
1365 			 * oal->seg[1] = frag[7]
1366 			 * oal->seg[2] = frag[8]
1367 			 * oal->seg[3] = frag[9]
1368 			 * oal->seg[4] = frag[10]
1369 			 *      etc...
1370 			 */
1371 			/* Tack on the OAL in the eighth segment of IOCB. */
1372 			map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1373 					     sizeof(struct oal),
1374 					     PCI_DMA_TODEVICE);
1375 			err = pci_dma_mapping_error(qdev->pdev, map);
1376 			if (err) {
1377 				netif_err(qdev, tx_queued, qdev->ndev,
1378 					  "PCI mapping outbound address list with error: %d\n",
1379 					  err);
1380 				goto map_error;
1381 			}
1382 
1383 			tbd->addr = cpu_to_le64(map);
1384 			/*
1385 			 * The length is the number of fragments
1386 			 * that remain to be mapped times the length
1387 			 * of our sglist (OAL).
1388 			 */
1389 			tbd->len =
1390 			    cpu_to_le32((sizeof(struct tx_buf_desc) *
1391 					 (frag_cnt - frag_idx)) | TX_DESC_C);
1392 			dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1393 					   map);
1394 			dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1395 					  sizeof(struct oal));
1396 			tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1397 			map_idx++;
1398 		}
1399 
1400 		map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1401 				       DMA_TO_DEVICE);
1402 
1403 		err = dma_mapping_error(&qdev->pdev->dev, map);
1404 		if (err) {
1405 			netif_err(qdev, tx_queued, qdev->ndev,
1406 				  "PCI mapping frags failed with error: %d.\n",
1407 				  err);
1408 			goto map_error;
1409 		}
1410 
1411 		tbd->addr = cpu_to_le64(map);
1412 		tbd->len = cpu_to_le32(skb_frag_size(frag));
1413 		dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1414 		dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1415 				  skb_frag_size(frag));
1416 
1417 	}
1418 	/* Save the number of segments we've mapped. */
1419 	tx_ring_desc->map_cnt = map_idx;
1420 	/* Terminate the last segment. */
1421 	tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1422 	return NETDEV_TX_OK;
1423 
1424 map_error:
1425 	/*
1426 	 * If the first frag mapping failed, then i will be zero.
1427 	 * This causes the unmap of the skb->data area.  Otherwise
1428 	 * we pass in the number of frags that mapped successfully
1429 	 * so they can be umapped.
1430 	 */
1431 	ql_unmap_send(qdev, tx_ring_desc, map_idx);
1432 	return NETDEV_TX_BUSY;
1433 }
1434 
1435 /* Categorizing receive firmware frame errors */
ql_categorize_rx_err(struct ql_adapter * qdev,u8 rx_err,struct rx_ring * rx_ring)1436 static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1437 				 struct rx_ring *rx_ring)
1438 {
1439 	struct nic_stats *stats = &qdev->nic_stats;
1440 
1441 	stats->rx_err_count++;
1442 	rx_ring->rx_errors++;
1443 
1444 	switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1445 	case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1446 		stats->rx_code_err++;
1447 		break;
1448 	case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1449 		stats->rx_oversize_err++;
1450 		break;
1451 	case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1452 		stats->rx_undersize_err++;
1453 		break;
1454 	case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1455 		stats->rx_preamble_err++;
1456 		break;
1457 	case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1458 		stats->rx_frame_len_err++;
1459 		break;
1460 	case IB_MAC_IOCB_RSP_ERR_CRC:
1461 		stats->rx_crc_err++;
1462 	default:
1463 		break;
1464 	}
1465 }
1466 
1467 /**
1468  * ql_update_mac_hdr_len - helper routine to update the mac header length
1469  * based on vlan tags if present
1470  */
ql_update_mac_hdr_len(struct ql_adapter * qdev,struct ib_mac_iocb_rsp * ib_mac_rsp,void * page,size_t * len)1471 static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
1472 				  struct ib_mac_iocb_rsp *ib_mac_rsp,
1473 				  void *page, size_t *len)
1474 {
1475 	u16 *tags;
1476 
1477 	if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1478 		return;
1479 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1480 		tags = (u16 *)page;
1481 		/* Look for stacked vlan tags in ethertype field */
1482 		if (tags[6] == ETH_P_8021Q &&
1483 		    tags[8] == ETH_P_8021Q)
1484 			*len += 2 * VLAN_HLEN;
1485 		else
1486 			*len += VLAN_HLEN;
1487 	}
1488 }
1489 
1490 /* Process an inbound completion from an rx ring. */
ql_process_mac_rx_gro_page(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp,u32 length,u16 vlan_id)1491 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1492 					struct rx_ring *rx_ring,
1493 					struct ib_mac_iocb_rsp *ib_mac_rsp,
1494 					u32 length,
1495 					u16 vlan_id)
1496 {
1497 	struct sk_buff *skb;
1498 	struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1499 	struct napi_struct *napi = &rx_ring->napi;
1500 
1501 	/* Frame error, so drop the packet. */
1502 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1503 		ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1504 		put_page(lbq_desc->p.pg_chunk.page);
1505 		return;
1506 	}
1507 	napi->dev = qdev->ndev;
1508 
1509 	skb = napi_get_frags(napi);
1510 	if (!skb) {
1511 		netif_err(qdev, drv, qdev->ndev,
1512 			  "Couldn't get an skb, exiting.\n");
1513 		rx_ring->rx_dropped++;
1514 		put_page(lbq_desc->p.pg_chunk.page);
1515 		return;
1516 	}
1517 	prefetch(lbq_desc->p.pg_chunk.va);
1518 	__skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1519 			     lbq_desc->p.pg_chunk.page,
1520 			     lbq_desc->p.pg_chunk.offset,
1521 			     length);
1522 
1523 	skb->len += length;
1524 	skb->data_len += length;
1525 	skb->truesize += length;
1526 	skb_shinfo(skb)->nr_frags++;
1527 
1528 	rx_ring->rx_packets++;
1529 	rx_ring->rx_bytes += length;
1530 	skb->ip_summed = CHECKSUM_UNNECESSARY;
1531 	skb_record_rx_queue(skb, rx_ring->cq_id);
1532 	if (vlan_id != 0xffff)
1533 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1534 	napi_gro_frags(napi);
1535 }
1536 
1537 /* Process an inbound completion from an rx ring. */
ql_process_mac_rx_page(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp,u32 length,u16 vlan_id)1538 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1539 					struct rx_ring *rx_ring,
1540 					struct ib_mac_iocb_rsp *ib_mac_rsp,
1541 					u32 length,
1542 					u16 vlan_id)
1543 {
1544 	struct net_device *ndev = qdev->ndev;
1545 	struct sk_buff *skb = NULL;
1546 	void *addr;
1547 	struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1548 	struct napi_struct *napi = &rx_ring->napi;
1549 	size_t hlen = ETH_HLEN;
1550 
1551 	skb = netdev_alloc_skb(ndev, length);
1552 	if (!skb) {
1553 		rx_ring->rx_dropped++;
1554 		put_page(lbq_desc->p.pg_chunk.page);
1555 		return;
1556 	}
1557 
1558 	addr = lbq_desc->p.pg_chunk.va;
1559 	prefetch(addr);
1560 
1561 	/* Frame error, so drop the packet. */
1562 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1563 		ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1564 		goto err_out;
1565 	}
1566 
1567 	/* Update the MAC header length*/
1568 	ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1569 
1570 	/* The max framesize filter on this chip is set higher than
1571 	 * MTU since FCoE uses 2k frames.
1572 	 */
1573 	if (skb->len > ndev->mtu + hlen) {
1574 		netif_err(qdev, drv, qdev->ndev,
1575 			  "Segment too small, dropping.\n");
1576 		rx_ring->rx_dropped++;
1577 		goto err_out;
1578 	}
1579 	skb_put_data(skb, addr, hlen);
1580 	netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1581 		     "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1582 		     length);
1583 	skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1584 				lbq_desc->p.pg_chunk.offset + hlen,
1585 				length - hlen);
1586 	skb->len += length - hlen;
1587 	skb->data_len += length - hlen;
1588 	skb->truesize += length - hlen;
1589 
1590 	rx_ring->rx_packets++;
1591 	rx_ring->rx_bytes += skb->len;
1592 	skb->protocol = eth_type_trans(skb, ndev);
1593 	skb_checksum_none_assert(skb);
1594 
1595 	if ((ndev->features & NETIF_F_RXCSUM) &&
1596 		!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1597 		/* TCP frame. */
1598 		if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1599 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1600 				     "TCP checksum done!\n");
1601 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1602 		} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1603 				(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1604 			/* Unfragmented ipv4 UDP frame. */
1605 			struct iphdr *iph =
1606 				(struct iphdr *)((u8 *)addr + hlen);
1607 			if (!(iph->frag_off &
1608 				htons(IP_MF|IP_OFFSET))) {
1609 				skb->ip_summed = CHECKSUM_UNNECESSARY;
1610 				netif_printk(qdev, rx_status, KERN_DEBUG,
1611 					     qdev->ndev,
1612 					     "UDP checksum done!\n");
1613 			}
1614 		}
1615 	}
1616 
1617 	skb_record_rx_queue(skb, rx_ring->cq_id);
1618 	if (vlan_id != 0xffff)
1619 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1620 	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1621 		napi_gro_receive(napi, skb);
1622 	else
1623 		netif_receive_skb(skb);
1624 	return;
1625 err_out:
1626 	dev_kfree_skb_any(skb);
1627 	put_page(lbq_desc->p.pg_chunk.page);
1628 }
1629 
1630 /* Process an inbound completion from an rx ring. */
ql_process_mac_rx_skb(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp,u32 length,u16 vlan_id)1631 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1632 					struct rx_ring *rx_ring,
1633 					struct ib_mac_iocb_rsp *ib_mac_rsp,
1634 					u32 length,
1635 					u16 vlan_id)
1636 {
1637 	struct net_device *ndev = qdev->ndev;
1638 	struct sk_buff *skb = NULL;
1639 	struct sk_buff *new_skb = NULL;
1640 	struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1641 
1642 	skb = sbq_desc->p.skb;
1643 	/* Allocate new_skb and copy */
1644 	new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1645 	if (new_skb == NULL) {
1646 		rx_ring->rx_dropped++;
1647 		return;
1648 	}
1649 	skb_reserve(new_skb, NET_IP_ALIGN);
1650 
1651 	pci_dma_sync_single_for_cpu(qdev->pdev,
1652 				    dma_unmap_addr(sbq_desc, mapaddr),
1653 				    dma_unmap_len(sbq_desc, maplen),
1654 				    PCI_DMA_FROMDEVICE);
1655 
1656 	skb_put_data(new_skb, skb->data, length);
1657 
1658 	pci_dma_sync_single_for_device(qdev->pdev,
1659 				       dma_unmap_addr(sbq_desc, mapaddr),
1660 				       dma_unmap_len(sbq_desc, maplen),
1661 				       PCI_DMA_FROMDEVICE);
1662 	skb = new_skb;
1663 
1664 	/* Frame error, so drop the packet. */
1665 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1666 		ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1667 		dev_kfree_skb_any(skb);
1668 		return;
1669 	}
1670 
1671 	/* loopback self test for ethtool */
1672 	if (test_bit(QL_SELFTEST, &qdev->flags)) {
1673 		ql_check_lb_frame(qdev, skb);
1674 		dev_kfree_skb_any(skb);
1675 		return;
1676 	}
1677 
1678 	/* The max framesize filter on this chip is set higher than
1679 	 * MTU since FCoE uses 2k frames.
1680 	 */
1681 	if (skb->len > ndev->mtu + ETH_HLEN) {
1682 		dev_kfree_skb_any(skb);
1683 		rx_ring->rx_dropped++;
1684 		return;
1685 	}
1686 
1687 	prefetch(skb->data);
1688 	if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1689 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1690 			     "%s Multicast.\n",
1691 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1692 			     IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1693 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1694 			     IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1695 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1696 			     IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1697 	}
1698 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1699 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1700 			     "Promiscuous Packet.\n");
1701 
1702 	rx_ring->rx_packets++;
1703 	rx_ring->rx_bytes += skb->len;
1704 	skb->protocol = eth_type_trans(skb, ndev);
1705 	skb_checksum_none_assert(skb);
1706 
1707 	/* If rx checksum is on, and there are no
1708 	 * csum or frame errors.
1709 	 */
1710 	if ((ndev->features & NETIF_F_RXCSUM) &&
1711 		!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1712 		/* TCP frame. */
1713 		if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1714 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1715 				     "TCP checksum done!\n");
1716 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1717 		} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1718 				(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1719 			/* Unfragmented ipv4 UDP frame. */
1720 			struct iphdr *iph = (struct iphdr *) skb->data;
1721 			if (!(iph->frag_off &
1722 				htons(IP_MF|IP_OFFSET))) {
1723 				skb->ip_summed = CHECKSUM_UNNECESSARY;
1724 				netif_printk(qdev, rx_status, KERN_DEBUG,
1725 					     qdev->ndev,
1726 					     "UDP checksum done!\n");
1727 			}
1728 		}
1729 	}
1730 
1731 	skb_record_rx_queue(skb, rx_ring->cq_id);
1732 	if (vlan_id != 0xffff)
1733 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1734 	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1735 		napi_gro_receive(&rx_ring->napi, skb);
1736 	else
1737 		netif_receive_skb(skb);
1738 }
1739 
ql_realign_skb(struct sk_buff * skb,int len)1740 static void ql_realign_skb(struct sk_buff *skb, int len)
1741 {
1742 	void *temp_addr = skb->data;
1743 
1744 	/* Undo the skb_reserve(skb,32) we did before
1745 	 * giving to hardware, and realign data on
1746 	 * a 2-byte boundary.
1747 	 */
1748 	skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1749 	skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1750 	memmove(skb->data, temp_addr, len);
1751 }
1752 
1753 /*
1754  * This function builds an skb for the given inbound
1755  * completion.  It will be rewritten for readability in the near
1756  * future, but for not it works well.
1757  */
ql_build_rx_skb(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp)1758 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1759 				       struct rx_ring *rx_ring,
1760 				       struct ib_mac_iocb_rsp *ib_mac_rsp)
1761 {
1762 	struct bq_desc *lbq_desc;
1763 	struct bq_desc *sbq_desc;
1764 	struct sk_buff *skb = NULL;
1765 	u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1766 	u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1767 	size_t hlen = ETH_HLEN;
1768 
1769 	/*
1770 	 * Handle the header buffer if present.
1771 	 */
1772 	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1773 	    ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1774 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1775 			     "Header of %d bytes in small buffer.\n", hdr_len);
1776 		/*
1777 		 * Headers fit nicely into a small buffer.
1778 		 */
1779 		sbq_desc = ql_get_curr_sbuf(rx_ring);
1780 		pci_unmap_single(qdev->pdev,
1781 				dma_unmap_addr(sbq_desc, mapaddr),
1782 				dma_unmap_len(sbq_desc, maplen),
1783 				PCI_DMA_FROMDEVICE);
1784 		skb = sbq_desc->p.skb;
1785 		ql_realign_skb(skb, hdr_len);
1786 		skb_put(skb, hdr_len);
1787 		sbq_desc->p.skb = NULL;
1788 	}
1789 
1790 	/*
1791 	 * Handle the data buffer(s).
1792 	 */
1793 	if (unlikely(!length)) {	/* Is there data too? */
1794 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1795 			     "No Data buffer in this packet.\n");
1796 		return skb;
1797 	}
1798 
1799 	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1800 		if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1801 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1802 				     "Headers in small, data of %d bytes in small, combine them.\n",
1803 				     length);
1804 			/*
1805 			 * Data is less than small buffer size so it's
1806 			 * stuffed in a small buffer.
1807 			 * For this case we append the data
1808 			 * from the "data" small buffer to the "header" small
1809 			 * buffer.
1810 			 */
1811 			sbq_desc = ql_get_curr_sbuf(rx_ring);
1812 			pci_dma_sync_single_for_cpu(qdev->pdev,
1813 						    dma_unmap_addr
1814 						    (sbq_desc, mapaddr),
1815 						    dma_unmap_len
1816 						    (sbq_desc, maplen),
1817 						    PCI_DMA_FROMDEVICE);
1818 			skb_put_data(skb, sbq_desc->p.skb->data, length);
1819 			pci_dma_sync_single_for_device(qdev->pdev,
1820 						       dma_unmap_addr
1821 						       (sbq_desc,
1822 							mapaddr),
1823 						       dma_unmap_len
1824 						       (sbq_desc,
1825 							maplen),
1826 						       PCI_DMA_FROMDEVICE);
1827 		} else {
1828 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1829 				     "%d bytes in a single small buffer.\n",
1830 				     length);
1831 			sbq_desc = ql_get_curr_sbuf(rx_ring);
1832 			skb = sbq_desc->p.skb;
1833 			ql_realign_skb(skb, length);
1834 			skb_put(skb, length);
1835 			pci_unmap_single(qdev->pdev,
1836 					 dma_unmap_addr(sbq_desc,
1837 							mapaddr),
1838 					 dma_unmap_len(sbq_desc,
1839 						       maplen),
1840 					 PCI_DMA_FROMDEVICE);
1841 			sbq_desc->p.skb = NULL;
1842 		}
1843 	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1844 		if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1845 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1846 				     "Header in small, %d bytes in large. Chain large to small!\n",
1847 				     length);
1848 			/*
1849 			 * The data is in a single large buffer.  We
1850 			 * chain it to the header buffer's skb and let
1851 			 * it rip.
1852 			 */
1853 			lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1854 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1855 				     "Chaining page at offset = %d, for %d bytes  to skb.\n",
1856 				     lbq_desc->p.pg_chunk.offset, length);
1857 			skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1858 						lbq_desc->p.pg_chunk.offset,
1859 						length);
1860 			skb->len += length;
1861 			skb->data_len += length;
1862 			skb->truesize += length;
1863 		} else {
1864 			/*
1865 			 * The headers and data are in a single large buffer. We
1866 			 * copy it to a new skb and let it go. This can happen with
1867 			 * jumbo mtu on a non-TCP/UDP frame.
1868 			 */
1869 			lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1870 			skb = netdev_alloc_skb(qdev->ndev, length);
1871 			if (skb == NULL) {
1872 				netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1873 					     "No skb available, drop the packet.\n");
1874 				return NULL;
1875 			}
1876 			pci_unmap_page(qdev->pdev,
1877 				       dma_unmap_addr(lbq_desc,
1878 						      mapaddr),
1879 				       dma_unmap_len(lbq_desc, maplen),
1880 				       PCI_DMA_FROMDEVICE);
1881 			skb_reserve(skb, NET_IP_ALIGN);
1882 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1883 				     "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1884 				     length);
1885 			skb_fill_page_desc(skb, 0,
1886 						lbq_desc->p.pg_chunk.page,
1887 						lbq_desc->p.pg_chunk.offset,
1888 						length);
1889 			skb->len += length;
1890 			skb->data_len += length;
1891 			skb->truesize += length;
1892 			ql_update_mac_hdr_len(qdev, ib_mac_rsp,
1893 					      lbq_desc->p.pg_chunk.va,
1894 					      &hlen);
1895 			__pskb_pull_tail(skb, hlen);
1896 		}
1897 	} else {
1898 		/*
1899 		 * The data is in a chain of large buffers
1900 		 * pointed to by a small buffer.  We loop
1901 		 * thru and chain them to the our small header
1902 		 * buffer's skb.
1903 		 * frags:  There are 18 max frags and our small
1904 		 *         buffer will hold 32 of them. The thing is,
1905 		 *         we'll use 3 max for our 9000 byte jumbo
1906 		 *         frames.  If the MTU goes up we could
1907 		 *          eventually be in trouble.
1908 		 */
1909 		int size, i = 0;
1910 		sbq_desc = ql_get_curr_sbuf(rx_ring);
1911 		pci_unmap_single(qdev->pdev,
1912 				 dma_unmap_addr(sbq_desc, mapaddr),
1913 				 dma_unmap_len(sbq_desc, maplen),
1914 				 PCI_DMA_FROMDEVICE);
1915 		if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1916 			/*
1917 			 * This is an non TCP/UDP IP frame, so
1918 			 * the headers aren't split into a small
1919 			 * buffer.  We have to use the small buffer
1920 			 * that contains our sg list as our skb to
1921 			 * send upstairs. Copy the sg list here to
1922 			 * a local buffer and use it to find the
1923 			 * pages to chain.
1924 			 */
1925 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1926 				     "%d bytes of headers & data in chain of large.\n",
1927 				     length);
1928 			skb = sbq_desc->p.skb;
1929 			sbq_desc->p.skb = NULL;
1930 			skb_reserve(skb, NET_IP_ALIGN);
1931 		}
1932 		do {
1933 			lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1934 			size = (length < rx_ring->lbq_buf_size) ? length :
1935 				rx_ring->lbq_buf_size;
1936 
1937 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1938 				     "Adding page %d to skb for %d bytes.\n",
1939 				     i, size);
1940 			skb_fill_page_desc(skb, i,
1941 						lbq_desc->p.pg_chunk.page,
1942 						lbq_desc->p.pg_chunk.offset,
1943 						size);
1944 			skb->len += size;
1945 			skb->data_len += size;
1946 			skb->truesize += size;
1947 			length -= size;
1948 			i++;
1949 		} while (length > 0);
1950 		ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1951 				      &hlen);
1952 		__pskb_pull_tail(skb, hlen);
1953 	}
1954 	return skb;
1955 }
1956 
1957 /* Process an inbound completion from an rx ring. */
ql_process_mac_split_rx_intr(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp,u16 vlan_id)1958 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1959 				   struct rx_ring *rx_ring,
1960 				   struct ib_mac_iocb_rsp *ib_mac_rsp,
1961 				   u16 vlan_id)
1962 {
1963 	struct net_device *ndev = qdev->ndev;
1964 	struct sk_buff *skb = NULL;
1965 
1966 	QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1967 
1968 	skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1969 	if (unlikely(!skb)) {
1970 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1971 			     "No skb available, drop packet.\n");
1972 		rx_ring->rx_dropped++;
1973 		return;
1974 	}
1975 
1976 	/* Frame error, so drop the packet. */
1977 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1978 		ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1979 		dev_kfree_skb_any(skb);
1980 		return;
1981 	}
1982 
1983 	/* The max framesize filter on this chip is set higher than
1984 	 * MTU since FCoE uses 2k frames.
1985 	 */
1986 	if (skb->len > ndev->mtu + ETH_HLEN) {
1987 		dev_kfree_skb_any(skb);
1988 		rx_ring->rx_dropped++;
1989 		return;
1990 	}
1991 
1992 	/* loopback self test for ethtool */
1993 	if (test_bit(QL_SELFTEST, &qdev->flags)) {
1994 		ql_check_lb_frame(qdev, skb);
1995 		dev_kfree_skb_any(skb);
1996 		return;
1997 	}
1998 
1999 	prefetch(skb->data);
2000 	if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
2001 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
2002 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2003 			     IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
2004 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2005 			     IB_MAC_IOCB_RSP_M_REG ? "Registered" :
2006 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2007 			     IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
2008 		rx_ring->rx_multicast++;
2009 	}
2010 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
2011 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2012 			     "Promiscuous Packet.\n");
2013 	}
2014 
2015 	skb->protocol = eth_type_trans(skb, ndev);
2016 	skb_checksum_none_assert(skb);
2017 
2018 	/* If rx checksum is on, and there are no
2019 	 * csum or frame errors.
2020 	 */
2021 	if ((ndev->features & NETIF_F_RXCSUM) &&
2022 		!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2023 		/* TCP frame. */
2024 		if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
2025 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2026 				     "TCP checksum done!\n");
2027 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2028 		} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2029 				(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2030 		/* Unfragmented ipv4 UDP frame. */
2031 			struct iphdr *iph = (struct iphdr *) skb->data;
2032 			if (!(iph->frag_off &
2033 				htons(IP_MF|IP_OFFSET))) {
2034 				skb->ip_summed = CHECKSUM_UNNECESSARY;
2035 				netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2036 					     "TCP checksum done!\n");
2037 			}
2038 		}
2039 	}
2040 
2041 	rx_ring->rx_packets++;
2042 	rx_ring->rx_bytes += skb->len;
2043 	skb_record_rx_queue(skb, rx_ring->cq_id);
2044 	if (vlan_id != 0xffff)
2045 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
2046 	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2047 		napi_gro_receive(&rx_ring->napi, skb);
2048 	else
2049 		netif_receive_skb(skb);
2050 }
2051 
2052 /* Process an inbound completion from an rx ring. */
ql_process_mac_rx_intr(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp)2053 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2054 					struct rx_ring *rx_ring,
2055 					struct ib_mac_iocb_rsp *ib_mac_rsp)
2056 {
2057 	u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2058 	u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2059 			(qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
2060 			((le16_to_cpu(ib_mac_rsp->vlan_id) &
2061 			IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2062 
2063 	QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2064 
2065 	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2066 		/* The data and headers are split into
2067 		 * separate buffers.
2068 		 */
2069 		ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2070 						vlan_id);
2071 	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2072 		/* The data fit in a single small buffer.
2073 		 * Allocate a new skb, copy the data and
2074 		 * return the buffer to the free pool.
2075 		 */
2076 		ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2077 						length, vlan_id);
2078 	} else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2079 		!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2080 		(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2081 		/* TCP packet in a page chunk that's been checksummed.
2082 		 * Tack it on to our GRO skb and let it go.
2083 		 */
2084 		ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2085 						length, vlan_id);
2086 	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2087 		/* Non-TCP packet in a page chunk. Allocate an
2088 		 * skb, tack it on frags, and send it up.
2089 		 */
2090 		ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2091 						length, vlan_id);
2092 	} else {
2093 		/* Non-TCP/UDP large frames that span multiple buffers
2094 		 * can be processed corrrectly by the split frame logic.
2095 		 */
2096 		ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2097 						vlan_id);
2098 	}
2099 
2100 	return (unsigned long)length;
2101 }
2102 
2103 /* Process an outbound completion from an rx ring. */
ql_process_mac_tx_intr(struct ql_adapter * qdev,struct ob_mac_iocb_rsp * mac_rsp)2104 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2105 				   struct ob_mac_iocb_rsp *mac_rsp)
2106 {
2107 	struct tx_ring *tx_ring;
2108 	struct tx_ring_desc *tx_ring_desc;
2109 
2110 	QL_DUMP_OB_MAC_RSP(mac_rsp);
2111 	tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2112 	tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2113 	ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2114 	tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2115 	tx_ring->tx_packets++;
2116 	dev_kfree_skb(tx_ring_desc->skb);
2117 	tx_ring_desc->skb = NULL;
2118 
2119 	if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2120 					OB_MAC_IOCB_RSP_S |
2121 					OB_MAC_IOCB_RSP_L |
2122 					OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2123 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2124 			netif_warn(qdev, tx_done, qdev->ndev,
2125 				   "Total descriptor length did not match transfer length.\n");
2126 		}
2127 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2128 			netif_warn(qdev, tx_done, qdev->ndev,
2129 				   "Frame too short to be valid, not sent.\n");
2130 		}
2131 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2132 			netif_warn(qdev, tx_done, qdev->ndev,
2133 				   "Frame too long, but sent anyway.\n");
2134 		}
2135 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2136 			netif_warn(qdev, tx_done, qdev->ndev,
2137 				   "PCI backplane error. Frame not sent.\n");
2138 		}
2139 	}
2140 	atomic_inc(&tx_ring->tx_count);
2141 }
2142 
2143 /* Fire up a handler to reset the MPI processor. */
ql_queue_fw_error(struct ql_adapter * qdev)2144 void ql_queue_fw_error(struct ql_adapter *qdev)
2145 {
2146 	ql_link_off(qdev);
2147 	queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2148 }
2149 
ql_queue_asic_error(struct ql_adapter * qdev)2150 void ql_queue_asic_error(struct ql_adapter *qdev)
2151 {
2152 	ql_link_off(qdev);
2153 	ql_disable_interrupts(qdev);
2154 	/* Clear adapter up bit to signal the recovery
2155 	 * process that it shouldn't kill the reset worker
2156 	 * thread
2157 	 */
2158 	clear_bit(QL_ADAPTER_UP, &qdev->flags);
2159 	/* Set asic recovery bit to indicate reset process that we are
2160 	 * in fatal error recovery process rather than normal close
2161 	 */
2162 	set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2163 	queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2164 }
2165 
ql_process_chip_ae_intr(struct ql_adapter * qdev,struct ib_ae_iocb_rsp * ib_ae_rsp)2166 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2167 				    struct ib_ae_iocb_rsp *ib_ae_rsp)
2168 {
2169 	switch (ib_ae_rsp->event) {
2170 	case MGMT_ERR_EVENT:
2171 		netif_err(qdev, rx_err, qdev->ndev,
2172 			  "Management Processor Fatal Error.\n");
2173 		ql_queue_fw_error(qdev);
2174 		return;
2175 
2176 	case CAM_LOOKUP_ERR_EVENT:
2177 		netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2178 		netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2179 		ql_queue_asic_error(qdev);
2180 		return;
2181 
2182 	case SOFT_ECC_ERROR_EVENT:
2183 		netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2184 		ql_queue_asic_error(qdev);
2185 		break;
2186 
2187 	case PCI_ERR_ANON_BUF_RD:
2188 		netdev_err(qdev->ndev, "PCI error occurred when reading "
2189 					"anonymous buffers from rx_ring %d.\n",
2190 					ib_ae_rsp->q_id);
2191 		ql_queue_asic_error(qdev);
2192 		break;
2193 
2194 	default:
2195 		netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2196 			  ib_ae_rsp->event);
2197 		ql_queue_asic_error(qdev);
2198 		break;
2199 	}
2200 }
2201 
ql_clean_outbound_rx_ring(struct rx_ring * rx_ring)2202 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2203 {
2204 	struct ql_adapter *qdev = rx_ring->qdev;
2205 	u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2206 	struct ob_mac_iocb_rsp *net_rsp = NULL;
2207 	int count = 0;
2208 
2209 	struct tx_ring *tx_ring;
2210 	/* While there are entries in the completion queue. */
2211 	while (prod != rx_ring->cnsmr_idx) {
2212 
2213 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2214 			     "cq_id = %d, prod = %d, cnsmr = %d\n",
2215 			     rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2216 
2217 		net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2218 		rmb();
2219 		switch (net_rsp->opcode) {
2220 
2221 		case OPCODE_OB_MAC_TSO_IOCB:
2222 		case OPCODE_OB_MAC_IOCB:
2223 			ql_process_mac_tx_intr(qdev, net_rsp);
2224 			break;
2225 		default:
2226 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2227 				     "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2228 				     net_rsp->opcode);
2229 		}
2230 		count++;
2231 		ql_update_cq(rx_ring);
2232 		prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2233 	}
2234 	if (!net_rsp)
2235 		return 0;
2236 	ql_write_cq_idx(rx_ring);
2237 	tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2238 	if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2239 		if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2240 			/*
2241 			 * The queue got stopped because the tx_ring was full.
2242 			 * Wake it up, because it's now at least 25% empty.
2243 			 */
2244 			netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2245 	}
2246 
2247 	return count;
2248 }
2249 
ql_clean_inbound_rx_ring(struct rx_ring * rx_ring,int budget)2250 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2251 {
2252 	struct ql_adapter *qdev = rx_ring->qdev;
2253 	u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2254 	struct ql_net_rsp_iocb *net_rsp;
2255 	int count = 0;
2256 
2257 	/* While there are entries in the completion queue. */
2258 	while (prod != rx_ring->cnsmr_idx) {
2259 
2260 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2261 			     "cq_id = %d, prod = %d, cnsmr = %d\n",
2262 			     rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2263 
2264 		net_rsp = rx_ring->curr_entry;
2265 		rmb();
2266 		switch (net_rsp->opcode) {
2267 		case OPCODE_IB_MAC_IOCB:
2268 			ql_process_mac_rx_intr(qdev, rx_ring,
2269 					       (struct ib_mac_iocb_rsp *)
2270 					       net_rsp);
2271 			break;
2272 
2273 		case OPCODE_IB_AE_IOCB:
2274 			ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2275 						net_rsp);
2276 			break;
2277 		default:
2278 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2279 				     "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2280 				     net_rsp->opcode);
2281 			break;
2282 		}
2283 		count++;
2284 		ql_update_cq(rx_ring);
2285 		prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2286 		if (count == budget)
2287 			break;
2288 	}
2289 	ql_update_buffer_queues(qdev, rx_ring);
2290 	ql_write_cq_idx(rx_ring);
2291 	return count;
2292 }
2293 
ql_napi_poll_msix(struct napi_struct * napi,int budget)2294 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2295 {
2296 	struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2297 	struct ql_adapter *qdev = rx_ring->qdev;
2298 	struct rx_ring *trx_ring;
2299 	int i, work_done = 0;
2300 	struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2301 
2302 	netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2303 		     "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2304 
2305 	/* Service the TX rings first.  They start
2306 	 * right after the RSS rings. */
2307 	for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2308 		trx_ring = &qdev->rx_ring[i];
2309 		/* If this TX completion ring belongs to this vector and
2310 		 * it's not empty then service it.
2311 		 */
2312 		if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2313 			(ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2314 					trx_ring->cnsmr_idx)) {
2315 			netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2316 				     "%s: Servicing TX completion ring %d.\n",
2317 				     __func__, trx_ring->cq_id);
2318 			ql_clean_outbound_rx_ring(trx_ring);
2319 		}
2320 	}
2321 
2322 	/*
2323 	 * Now service the RSS ring if it's active.
2324 	 */
2325 	if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2326 					rx_ring->cnsmr_idx) {
2327 		netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2328 			     "%s: Servicing RX completion ring %d.\n",
2329 			     __func__, rx_ring->cq_id);
2330 		work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2331 	}
2332 
2333 	if (work_done < budget) {
2334 		napi_complete_done(napi, work_done);
2335 		ql_enable_completion_interrupt(qdev, rx_ring->irq);
2336 	}
2337 	return work_done;
2338 }
2339 
qlge_vlan_mode(struct net_device * ndev,netdev_features_t features)2340 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2341 {
2342 	struct ql_adapter *qdev = netdev_priv(ndev);
2343 
2344 	if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2345 		ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2346 				 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2347 	} else {
2348 		ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2349 	}
2350 }
2351 
2352 /**
2353  * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
2354  * based on the features to enable/disable hardware vlan accel
2355  */
qlge_update_hw_vlan_features(struct net_device * ndev,netdev_features_t features)2356 static int qlge_update_hw_vlan_features(struct net_device *ndev,
2357 					netdev_features_t features)
2358 {
2359 	struct ql_adapter *qdev = netdev_priv(ndev);
2360 	int status = 0;
2361 	bool need_restart = netif_running(ndev);
2362 
2363 	if (need_restart) {
2364 		status = ql_adapter_down(qdev);
2365 		if (status) {
2366 			netif_err(qdev, link, qdev->ndev,
2367 				  "Failed to bring down the adapter\n");
2368 			return status;
2369 		}
2370 	}
2371 
2372 	/* update the features with resent change */
2373 	ndev->features = features;
2374 
2375 	if (need_restart) {
2376 		status = ql_adapter_up(qdev);
2377 		if (status) {
2378 			netif_err(qdev, link, qdev->ndev,
2379 				  "Failed to bring up the adapter\n");
2380 			return status;
2381 		}
2382 	}
2383 
2384 	return status;
2385 }
2386 
qlge_set_features(struct net_device * ndev,netdev_features_t features)2387 static int qlge_set_features(struct net_device *ndev,
2388 	netdev_features_t features)
2389 {
2390 	netdev_features_t changed = ndev->features ^ features;
2391 	int err;
2392 
2393 	if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
2394 		/* Update the behavior of vlan accel in the adapter */
2395 		err = qlge_update_hw_vlan_features(ndev, features);
2396 		if (err)
2397 			return err;
2398 
2399 		qlge_vlan_mode(ndev, features);
2400 	}
2401 
2402 	return 0;
2403 }
2404 
__qlge_vlan_rx_add_vid(struct ql_adapter * qdev,u16 vid)2405 static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2406 {
2407 	u32 enable_bit = MAC_ADDR_E;
2408 	int err;
2409 
2410 	err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2411 				  MAC_ADDR_TYPE_VLAN, vid);
2412 	if (err)
2413 		netif_err(qdev, ifup, qdev->ndev,
2414 			  "Failed to init vlan address.\n");
2415 	return err;
2416 }
2417 
qlge_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)2418 static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
2419 {
2420 	struct ql_adapter *qdev = netdev_priv(ndev);
2421 	int status;
2422 	int err;
2423 
2424 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2425 	if (status)
2426 		return status;
2427 
2428 	err = __qlge_vlan_rx_add_vid(qdev, vid);
2429 	set_bit(vid, qdev->active_vlans);
2430 
2431 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2432 
2433 	return err;
2434 }
2435 
__qlge_vlan_rx_kill_vid(struct ql_adapter * qdev,u16 vid)2436 static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2437 {
2438 	u32 enable_bit = 0;
2439 	int err;
2440 
2441 	err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2442 				  MAC_ADDR_TYPE_VLAN, vid);
2443 	if (err)
2444 		netif_err(qdev, ifup, qdev->ndev,
2445 			  "Failed to clear vlan address.\n");
2446 	return err;
2447 }
2448 
qlge_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)2449 static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
2450 {
2451 	struct ql_adapter *qdev = netdev_priv(ndev);
2452 	int status;
2453 	int err;
2454 
2455 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2456 	if (status)
2457 		return status;
2458 
2459 	err = __qlge_vlan_rx_kill_vid(qdev, vid);
2460 	clear_bit(vid, qdev->active_vlans);
2461 
2462 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2463 
2464 	return err;
2465 }
2466 
qlge_restore_vlan(struct ql_adapter * qdev)2467 static void qlge_restore_vlan(struct ql_adapter *qdev)
2468 {
2469 	int status;
2470 	u16 vid;
2471 
2472 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2473 	if (status)
2474 		return;
2475 
2476 	for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2477 		__qlge_vlan_rx_add_vid(qdev, vid);
2478 
2479 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2480 }
2481 
2482 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
qlge_msix_rx_isr(int irq,void * dev_id)2483 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2484 {
2485 	struct rx_ring *rx_ring = dev_id;
2486 	napi_schedule(&rx_ring->napi);
2487 	return IRQ_HANDLED;
2488 }
2489 
2490 /* This handles a fatal error, MPI activity, and the default
2491  * rx_ring in an MSI-X multiple vector environment.
2492  * In MSI/Legacy environment it also process the rest of
2493  * the rx_rings.
2494  */
qlge_isr(int irq,void * dev_id)2495 static irqreturn_t qlge_isr(int irq, void *dev_id)
2496 {
2497 	struct rx_ring *rx_ring = dev_id;
2498 	struct ql_adapter *qdev = rx_ring->qdev;
2499 	struct intr_context *intr_context = &qdev->intr_context[0];
2500 	u32 var;
2501 	int work_done = 0;
2502 
2503 	spin_lock(&qdev->hw_lock);
2504 	if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2505 		netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2506 			     "Shared Interrupt, Not ours!\n");
2507 		spin_unlock(&qdev->hw_lock);
2508 		return IRQ_NONE;
2509 	}
2510 	spin_unlock(&qdev->hw_lock);
2511 
2512 	var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2513 
2514 	/*
2515 	 * Check for fatal error.
2516 	 */
2517 	if (var & STS_FE) {
2518 		ql_queue_asic_error(qdev);
2519 		netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2520 		var = ql_read32(qdev, ERR_STS);
2521 		netdev_err(qdev->ndev, "Resetting chip. "
2522 					"Error Status Register = 0x%x\n", var);
2523 		return IRQ_HANDLED;
2524 	}
2525 
2526 	/*
2527 	 * Check MPI processor activity.
2528 	 */
2529 	if ((var & STS_PI) &&
2530 		(ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2531 		/*
2532 		 * We've got an async event or mailbox completion.
2533 		 * Handle it and clear the source of the interrupt.
2534 		 */
2535 		netif_err(qdev, intr, qdev->ndev,
2536 			  "Got MPI processor interrupt.\n");
2537 		ql_disable_completion_interrupt(qdev, intr_context->intr);
2538 		ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2539 		queue_delayed_work_on(smp_processor_id(),
2540 				qdev->workqueue, &qdev->mpi_work, 0);
2541 		work_done++;
2542 	}
2543 
2544 	/*
2545 	 * Get the bit-mask that shows the active queues for this
2546 	 * pass.  Compare it to the queues that this irq services
2547 	 * and call napi if there's a match.
2548 	 */
2549 	var = ql_read32(qdev, ISR1);
2550 	if (var & intr_context->irq_mask) {
2551 		netif_info(qdev, intr, qdev->ndev,
2552 			   "Waking handler for rx_ring[0].\n");
2553 		ql_disable_completion_interrupt(qdev, intr_context->intr);
2554 		napi_schedule(&rx_ring->napi);
2555 		work_done++;
2556 	}
2557 	ql_enable_completion_interrupt(qdev, intr_context->intr);
2558 	return work_done ? IRQ_HANDLED : IRQ_NONE;
2559 }
2560 
ql_tso(struct sk_buff * skb,struct ob_mac_tso_iocb_req * mac_iocb_ptr)2561 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2562 {
2563 
2564 	if (skb_is_gso(skb)) {
2565 		int err;
2566 		__be16 l3_proto = vlan_get_protocol(skb);
2567 
2568 		err = skb_cow_head(skb, 0);
2569 		if (err < 0)
2570 			return err;
2571 
2572 		mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2573 		mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2574 		mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2575 		mac_iocb_ptr->total_hdrs_len =
2576 		    cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2577 		mac_iocb_ptr->net_trans_offset =
2578 		    cpu_to_le16(skb_network_offset(skb) |
2579 				skb_transport_offset(skb)
2580 				<< OB_MAC_TRANSPORT_HDR_SHIFT);
2581 		mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2582 		mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2583 		if (likely(l3_proto == htons(ETH_P_IP))) {
2584 			struct iphdr *iph = ip_hdr(skb);
2585 			iph->check = 0;
2586 			mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2587 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2588 								 iph->daddr, 0,
2589 								 IPPROTO_TCP,
2590 								 0);
2591 		} else if (l3_proto == htons(ETH_P_IPV6)) {
2592 			mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2593 			tcp_hdr(skb)->check =
2594 			    ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2595 					     &ipv6_hdr(skb)->daddr,
2596 					     0, IPPROTO_TCP, 0);
2597 		}
2598 		return 1;
2599 	}
2600 	return 0;
2601 }
2602 
ql_hw_csum_setup(struct sk_buff * skb,struct ob_mac_tso_iocb_req * mac_iocb_ptr)2603 static void ql_hw_csum_setup(struct sk_buff *skb,
2604 			     struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2605 {
2606 	int len;
2607 	struct iphdr *iph = ip_hdr(skb);
2608 	__sum16 *check;
2609 	mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2610 	mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2611 	mac_iocb_ptr->net_trans_offset =
2612 		cpu_to_le16(skb_network_offset(skb) |
2613 		skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2614 
2615 	mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2616 	len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2617 	if (likely(iph->protocol == IPPROTO_TCP)) {
2618 		check = &(tcp_hdr(skb)->check);
2619 		mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2620 		mac_iocb_ptr->total_hdrs_len =
2621 		    cpu_to_le16(skb_transport_offset(skb) +
2622 				(tcp_hdr(skb)->doff << 2));
2623 	} else {
2624 		check = &(udp_hdr(skb)->check);
2625 		mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2626 		mac_iocb_ptr->total_hdrs_len =
2627 		    cpu_to_le16(skb_transport_offset(skb) +
2628 				sizeof(struct udphdr));
2629 	}
2630 	*check = ~csum_tcpudp_magic(iph->saddr,
2631 				    iph->daddr, len, iph->protocol, 0);
2632 }
2633 
qlge_send(struct sk_buff * skb,struct net_device * ndev)2634 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2635 {
2636 	struct tx_ring_desc *tx_ring_desc;
2637 	struct ob_mac_iocb_req *mac_iocb_ptr;
2638 	struct ql_adapter *qdev = netdev_priv(ndev);
2639 	int tso;
2640 	struct tx_ring *tx_ring;
2641 	u32 tx_ring_idx = (u32) skb->queue_mapping;
2642 
2643 	tx_ring = &qdev->tx_ring[tx_ring_idx];
2644 
2645 	if (skb_padto(skb, ETH_ZLEN))
2646 		return NETDEV_TX_OK;
2647 
2648 	if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2649 		netif_info(qdev, tx_queued, qdev->ndev,
2650 			   "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2651 			   __func__, tx_ring_idx);
2652 		netif_stop_subqueue(ndev, tx_ring->wq_id);
2653 		tx_ring->tx_errors++;
2654 		return NETDEV_TX_BUSY;
2655 	}
2656 	tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2657 	mac_iocb_ptr = tx_ring_desc->queue_entry;
2658 	memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2659 
2660 	mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2661 	mac_iocb_ptr->tid = tx_ring_desc->index;
2662 	/* We use the upper 32-bits to store the tx queue for this IO.
2663 	 * When we get the completion we can use it to establish the context.
2664 	 */
2665 	mac_iocb_ptr->txq_idx = tx_ring_idx;
2666 	tx_ring_desc->skb = skb;
2667 
2668 	mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2669 
2670 	if (skb_vlan_tag_present(skb)) {
2671 		netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2672 			     "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
2673 		mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2674 		mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
2675 	}
2676 	tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2677 	if (tso < 0) {
2678 		dev_kfree_skb_any(skb);
2679 		return NETDEV_TX_OK;
2680 	} else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2681 		ql_hw_csum_setup(skb,
2682 				 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2683 	}
2684 	if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2685 			NETDEV_TX_OK) {
2686 		netif_err(qdev, tx_queued, qdev->ndev,
2687 			  "Could not map the segments.\n");
2688 		tx_ring->tx_errors++;
2689 		return NETDEV_TX_BUSY;
2690 	}
2691 	QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2692 	tx_ring->prod_idx++;
2693 	if (tx_ring->prod_idx == tx_ring->wq_len)
2694 		tx_ring->prod_idx = 0;
2695 	wmb();
2696 
2697 	ql_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2698 	mmiowb();
2699 	netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2700 		     "tx queued, slot %d, len %d\n",
2701 		     tx_ring->prod_idx, skb->len);
2702 
2703 	atomic_dec(&tx_ring->tx_count);
2704 
2705 	if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2706 		netif_stop_subqueue(ndev, tx_ring->wq_id);
2707 		if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2708 			/*
2709 			 * The queue got stopped because the tx_ring was full.
2710 			 * Wake it up, because it's now at least 25% empty.
2711 			 */
2712 			netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2713 	}
2714 	return NETDEV_TX_OK;
2715 }
2716 
2717 
ql_free_shadow_space(struct ql_adapter * qdev)2718 static void ql_free_shadow_space(struct ql_adapter *qdev)
2719 {
2720 	if (qdev->rx_ring_shadow_reg_area) {
2721 		pci_free_consistent(qdev->pdev,
2722 				    PAGE_SIZE,
2723 				    qdev->rx_ring_shadow_reg_area,
2724 				    qdev->rx_ring_shadow_reg_dma);
2725 		qdev->rx_ring_shadow_reg_area = NULL;
2726 	}
2727 	if (qdev->tx_ring_shadow_reg_area) {
2728 		pci_free_consistent(qdev->pdev,
2729 				    PAGE_SIZE,
2730 				    qdev->tx_ring_shadow_reg_area,
2731 				    qdev->tx_ring_shadow_reg_dma);
2732 		qdev->tx_ring_shadow_reg_area = NULL;
2733 	}
2734 }
2735 
ql_alloc_shadow_space(struct ql_adapter * qdev)2736 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2737 {
2738 	qdev->rx_ring_shadow_reg_area =
2739 		pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2740 				      &qdev->rx_ring_shadow_reg_dma);
2741 	if (qdev->rx_ring_shadow_reg_area == NULL) {
2742 		netif_err(qdev, ifup, qdev->ndev,
2743 			  "Allocation of RX shadow space failed.\n");
2744 		return -ENOMEM;
2745 	}
2746 
2747 	qdev->tx_ring_shadow_reg_area =
2748 		pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2749 				      &qdev->tx_ring_shadow_reg_dma);
2750 	if (qdev->tx_ring_shadow_reg_area == NULL) {
2751 		netif_err(qdev, ifup, qdev->ndev,
2752 			  "Allocation of TX shadow space failed.\n");
2753 		goto err_wqp_sh_area;
2754 	}
2755 	return 0;
2756 
2757 err_wqp_sh_area:
2758 	pci_free_consistent(qdev->pdev,
2759 			    PAGE_SIZE,
2760 			    qdev->rx_ring_shadow_reg_area,
2761 			    qdev->rx_ring_shadow_reg_dma);
2762 	return -ENOMEM;
2763 }
2764 
ql_init_tx_ring(struct ql_adapter * qdev,struct tx_ring * tx_ring)2765 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2766 {
2767 	struct tx_ring_desc *tx_ring_desc;
2768 	int i;
2769 	struct ob_mac_iocb_req *mac_iocb_ptr;
2770 
2771 	mac_iocb_ptr = tx_ring->wq_base;
2772 	tx_ring_desc = tx_ring->q;
2773 	for (i = 0; i < tx_ring->wq_len; i++) {
2774 		tx_ring_desc->index = i;
2775 		tx_ring_desc->skb = NULL;
2776 		tx_ring_desc->queue_entry = mac_iocb_ptr;
2777 		mac_iocb_ptr++;
2778 		tx_ring_desc++;
2779 	}
2780 	atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2781 }
2782 
ql_free_tx_resources(struct ql_adapter * qdev,struct tx_ring * tx_ring)2783 static void ql_free_tx_resources(struct ql_adapter *qdev,
2784 				 struct tx_ring *tx_ring)
2785 {
2786 	if (tx_ring->wq_base) {
2787 		pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2788 				    tx_ring->wq_base, tx_ring->wq_base_dma);
2789 		tx_ring->wq_base = NULL;
2790 	}
2791 	kfree(tx_ring->q);
2792 	tx_ring->q = NULL;
2793 }
2794 
ql_alloc_tx_resources(struct ql_adapter * qdev,struct tx_ring * tx_ring)2795 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2796 				 struct tx_ring *tx_ring)
2797 {
2798 	tx_ring->wq_base =
2799 	    pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2800 				 &tx_ring->wq_base_dma);
2801 
2802 	if ((tx_ring->wq_base == NULL) ||
2803 	    tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2804 		goto pci_alloc_err;
2805 
2806 	tx_ring->q =
2807 	    kmalloc_array(tx_ring->wq_len, sizeof(struct tx_ring_desc),
2808 			  GFP_KERNEL);
2809 	if (tx_ring->q == NULL)
2810 		goto err;
2811 
2812 	return 0;
2813 err:
2814 	pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2815 			    tx_ring->wq_base, tx_ring->wq_base_dma);
2816 	tx_ring->wq_base = NULL;
2817 pci_alloc_err:
2818 	netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2819 	return -ENOMEM;
2820 }
2821 
ql_free_lbq_buffers(struct ql_adapter * qdev,struct rx_ring * rx_ring)2822 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2823 {
2824 	struct bq_desc *lbq_desc;
2825 
2826 	uint32_t  curr_idx, clean_idx;
2827 
2828 	curr_idx = rx_ring->lbq_curr_idx;
2829 	clean_idx = rx_ring->lbq_clean_idx;
2830 	while (curr_idx != clean_idx) {
2831 		lbq_desc = &rx_ring->lbq[curr_idx];
2832 
2833 		if (lbq_desc->p.pg_chunk.last_flag) {
2834 			pci_unmap_page(qdev->pdev,
2835 				lbq_desc->p.pg_chunk.map,
2836 				ql_lbq_block_size(qdev),
2837 				       PCI_DMA_FROMDEVICE);
2838 			lbq_desc->p.pg_chunk.last_flag = 0;
2839 		}
2840 
2841 		put_page(lbq_desc->p.pg_chunk.page);
2842 		lbq_desc->p.pg_chunk.page = NULL;
2843 
2844 		if (++curr_idx == rx_ring->lbq_len)
2845 			curr_idx = 0;
2846 
2847 	}
2848 	if (rx_ring->pg_chunk.page) {
2849 		pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
2850 			ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
2851 		put_page(rx_ring->pg_chunk.page);
2852 		rx_ring->pg_chunk.page = NULL;
2853 	}
2854 }
2855 
ql_free_sbq_buffers(struct ql_adapter * qdev,struct rx_ring * rx_ring)2856 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2857 {
2858 	int i;
2859 	struct bq_desc *sbq_desc;
2860 
2861 	for (i = 0; i < rx_ring->sbq_len; i++) {
2862 		sbq_desc = &rx_ring->sbq[i];
2863 		if (sbq_desc == NULL) {
2864 			netif_err(qdev, ifup, qdev->ndev,
2865 				  "sbq_desc %d is NULL.\n", i);
2866 			return;
2867 		}
2868 		if (sbq_desc->p.skb) {
2869 			pci_unmap_single(qdev->pdev,
2870 					 dma_unmap_addr(sbq_desc, mapaddr),
2871 					 dma_unmap_len(sbq_desc, maplen),
2872 					 PCI_DMA_FROMDEVICE);
2873 			dev_kfree_skb(sbq_desc->p.skb);
2874 			sbq_desc->p.skb = NULL;
2875 		}
2876 	}
2877 }
2878 
2879 /* Free all large and small rx buffers associated
2880  * with the completion queues for this device.
2881  */
ql_free_rx_buffers(struct ql_adapter * qdev)2882 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2883 {
2884 	int i;
2885 	struct rx_ring *rx_ring;
2886 
2887 	for (i = 0; i < qdev->rx_ring_count; i++) {
2888 		rx_ring = &qdev->rx_ring[i];
2889 		if (rx_ring->lbq)
2890 			ql_free_lbq_buffers(qdev, rx_ring);
2891 		if (rx_ring->sbq)
2892 			ql_free_sbq_buffers(qdev, rx_ring);
2893 	}
2894 }
2895 
ql_alloc_rx_buffers(struct ql_adapter * qdev)2896 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2897 {
2898 	struct rx_ring *rx_ring;
2899 	int i;
2900 
2901 	for (i = 0; i < qdev->rx_ring_count; i++) {
2902 		rx_ring = &qdev->rx_ring[i];
2903 		if (rx_ring->type != TX_Q)
2904 			ql_update_buffer_queues(qdev, rx_ring);
2905 	}
2906 }
2907 
ql_init_lbq_ring(struct ql_adapter * qdev,struct rx_ring * rx_ring)2908 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2909 				struct rx_ring *rx_ring)
2910 {
2911 	int i;
2912 	struct bq_desc *lbq_desc;
2913 	__le64 *bq = rx_ring->lbq_base;
2914 
2915 	memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2916 	for (i = 0; i < rx_ring->lbq_len; i++) {
2917 		lbq_desc = &rx_ring->lbq[i];
2918 		memset(lbq_desc, 0, sizeof(*lbq_desc));
2919 		lbq_desc->index = i;
2920 		lbq_desc->addr = bq;
2921 		bq++;
2922 	}
2923 }
2924 
ql_init_sbq_ring(struct ql_adapter * qdev,struct rx_ring * rx_ring)2925 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2926 				struct rx_ring *rx_ring)
2927 {
2928 	int i;
2929 	struct bq_desc *sbq_desc;
2930 	__le64 *bq = rx_ring->sbq_base;
2931 
2932 	memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2933 	for (i = 0; i < rx_ring->sbq_len; i++) {
2934 		sbq_desc = &rx_ring->sbq[i];
2935 		memset(sbq_desc, 0, sizeof(*sbq_desc));
2936 		sbq_desc->index = i;
2937 		sbq_desc->addr = bq;
2938 		bq++;
2939 	}
2940 }
2941 
ql_free_rx_resources(struct ql_adapter * qdev,struct rx_ring * rx_ring)2942 static void ql_free_rx_resources(struct ql_adapter *qdev,
2943 				 struct rx_ring *rx_ring)
2944 {
2945 	/* Free the small buffer queue. */
2946 	if (rx_ring->sbq_base) {
2947 		pci_free_consistent(qdev->pdev,
2948 				    rx_ring->sbq_size,
2949 				    rx_ring->sbq_base, rx_ring->sbq_base_dma);
2950 		rx_ring->sbq_base = NULL;
2951 	}
2952 
2953 	/* Free the small buffer queue control blocks. */
2954 	kfree(rx_ring->sbq);
2955 	rx_ring->sbq = NULL;
2956 
2957 	/* Free the large buffer queue. */
2958 	if (rx_ring->lbq_base) {
2959 		pci_free_consistent(qdev->pdev,
2960 				    rx_ring->lbq_size,
2961 				    rx_ring->lbq_base, rx_ring->lbq_base_dma);
2962 		rx_ring->lbq_base = NULL;
2963 	}
2964 
2965 	/* Free the large buffer queue control blocks. */
2966 	kfree(rx_ring->lbq);
2967 	rx_ring->lbq = NULL;
2968 
2969 	/* Free the rx queue. */
2970 	if (rx_ring->cq_base) {
2971 		pci_free_consistent(qdev->pdev,
2972 				    rx_ring->cq_size,
2973 				    rx_ring->cq_base, rx_ring->cq_base_dma);
2974 		rx_ring->cq_base = NULL;
2975 	}
2976 }
2977 
2978 /* Allocate queues and buffers for this completions queue based
2979  * on the values in the parameter structure. */
ql_alloc_rx_resources(struct ql_adapter * qdev,struct rx_ring * rx_ring)2980 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2981 				 struct rx_ring *rx_ring)
2982 {
2983 
2984 	/*
2985 	 * Allocate the completion queue for this rx_ring.
2986 	 */
2987 	rx_ring->cq_base =
2988 	    pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2989 				 &rx_ring->cq_base_dma);
2990 
2991 	if (rx_ring->cq_base == NULL) {
2992 		netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2993 		return -ENOMEM;
2994 	}
2995 
2996 	if (rx_ring->sbq_len) {
2997 		/*
2998 		 * Allocate small buffer queue.
2999 		 */
3000 		rx_ring->sbq_base =
3001 		    pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
3002 					 &rx_ring->sbq_base_dma);
3003 
3004 		if (rx_ring->sbq_base == NULL) {
3005 			netif_err(qdev, ifup, qdev->ndev,
3006 				  "Small buffer queue allocation failed.\n");
3007 			goto err_mem;
3008 		}
3009 
3010 		/*
3011 		 * Allocate small buffer queue control blocks.
3012 		 */
3013 		rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
3014 					     sizeof(struct bq_desc),
3015 					     GFP_KERNEL);
3016 		if (rx_ring->sbq == NULL)
3017 			goto err_mem;
3018 
3019 		ql_init_sbq_ring(qdev, rx_ring);
3020 	}
3021 
3022 	if (rx_ring->lbq_len) {
3023 		/*
3024 		 * Allocate large buffer queue.
3025 		 */
3026 		rx_ring->lbq_base =
3027 		    pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
3028 					 &rx_ring->lbq_base_dma);
3029 
3030 		if (rx_ring->lbq_base == NULL) {
3031 			netif_err(qdev, ifup, qdev->ndev,
3032 				  "Large buffer queue allocation failed.\n");
3033 			goto err_mem;
3034 		}
3035 		/*
3036 		 * Allocate large buffer queue control blocks.
3037 		 */
3038 		rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
3039 					     sizeof(struct bq_desc),
3040 					     GFP_KERNEL);
3041 		if (rx_ring->lbq == NULL)
3042 			goto err_mem;
3043 
3044 		ql_init_lbq_ring(qdev, rx_ring);
3045 	}
3046 
3047 	return 0;
3048 
3049 err_mem:
3050 	ql_free_rx_resources(qdev, rx_ring);
3051 	return -ENOMEM;
3052 }
3053 
ql_tx_ring_clean(struct ql_adapter * qdev)3054 static void ql_tx_ring_clean(struct ql_adapter *qdev)
3055 {
3056 	struct tx_ring *tx_ring;
3057 	struct tx_ring_desc *tx_ring_desc;
3058 	int i, j;
3059 
3060 	/*
3061 	 * Loop through all queues and free
3062 	 * any resources.
3063 	 */
3064 	for (j = 0; j < qdev->tx_ring_count; j++) {
3065 		tx_ring = &qdev->tx_ring[j];
3066 		for (i = 0; i < tx_ring->wq_len; i++) {
3067 			tx_ring_desc = &tx_ring->q[i];
3068 			if (tx_ring_desc && tx_ring_desc->skb) {
3069 				netif_err(qdev, ifdown, qdev->ndev,
3070 					  "Freeing lost SKB %p, from queue %d, index %d.\n",
3071 					  tx_ring_desc->skb, j,
3072 					  tx_ring_desc->index);
3073 				ql_unmap_send(qdev, tx_ring_desc,
3074 					      tx_ring_desc->map_cnt);
3075 				dev_kfree_skb(tx_ring_desc->skb);
3076 				tx_ring_desc->skb = NULL;
3077 			}
3078 		}
3079 	}
3080 }
3081 
ql_free_mem_resources(struct ql_adapter * qdev)3082 static void ql_free_mem_resources(struct ql_adapter *qdev)
3083 {
3084 	int i;
3085 
3086 	for (i = 0; i < qdev->tx_ring_count; i++)
3087 		ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3088 	for (i = 0; i < qdev->rx_ring_count; i++)
3089 		ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3090 	ql_free_shadow_space(qdev);
3091 }
3092 
ql_alloc_mem_resources(struct ql_adapter * qdev)3093 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3094 {
3095 	int i;
3096 
3097 	/* Allocate space for our shadow registers and such. */
3098 	if (ql_alloc_shadow_space(qdev))
3099 		return -ENOMEM;
3100 
3101 	for (i = 0; i < qdev->rx_ring_count; i++) {
3102 		if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3103 			netif_err(qdev, ifup, qdev->ndev,
3104 				  "RX resource allocation failed.\n");
3105 			goto err_mem;
3106 		}
3107 	}
3108 	/* Allocate tx queue resources */
3109 	for (i = 0; i < qdev->tx_ring_count; i++) {
3110 		if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3111 			netif_err(qdev, ifup, qdev->ndev,
3112 				  "TX resource allocation failed.\n");
3113 			goto err_mem;
3114 		}
3115 	}
3116 	return 0;
3117 
3118 err_mem:
3119 	ql_free_mem_resources(qdev);
3120 	return -ENOMEM;
3121 }
3122 
3123 /* Set up the rx ring control block and pass it to the chip.
3124  * The control block is defined as
3125  * "Completion Queue Initialization Control Block", or cqicb.
3126  */
ql_start_rx_ring(struct ql_adapter * qdev,struct rx_ring * rx_ring)3127 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3128 {
3129 	struct cqicb *cqicb = &rx_ring->cqicb;
3130 	void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3131 		(rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3132 	u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3133 		(rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3134 	void __iomem *doorbell_area =
3135 	    qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3136 	int err = 0;
3137 	u16 bq_len;
3138 	u64 tmp;
3139 	__le64 *base_indirect_ptr;
3140 	int page_entries;
3141 
3142 	/* Set up the shadow registers for this ring. */
3143 	rx_ring->prod_idx_sh_reg = shadow_reg;
3144 	rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3145 	*rx_ring->prod_idx_sh_reg = 0;
3146 	shadow_reg += sizeof(u64);
3147 	shadow_reg_dma += sizeof(u64);
3148 	rx_ring->lbq_base_indirect = shadow_reg;
3149 	rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3150 	shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3151 	shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3152 	rx_ring->sbq_base_indirect = shadow_reg;
3153 	rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3154 
3155 	/* PCI doorbell mem area + 0x00 for consumer index register */
3156 	rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3157 	rx_ring->cnsmr_idx = 0;
3158 	rx_ring->curr_entry = rx_ring->cq_base;
3159 
3160 	/* PCI doorbell mem area + 0x04 for valid register */
3161 	rx_ring->valid_db_reg = doorbell_area + 0x04;
3162 
3163 	/* PCI doorbell mem area + 0x18 for large buffer consumer */
3164 	rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3165 
3166 	/* PCI doorbell mem area + 0x1c */
3167 	rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3168 
3169 	memset((void *)cqicb, 0, sizeof(struct cqicb));
3170 	cqicb->msix_vect = rx_ring->irq;
3171 
3172 	bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3173 	cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3174 
3175 	cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3176 
3177 	cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3178 
3179 	/*
3180 	 * Set up the control block load flags.
3181 	 */
3182 	cqicb->flags = FLAGS_LC |	/* Load queue base address */
3183 	    FLAGS_LV |		/* Load MSI-X vector */
3184 	    FLAGS_LI;		/* Load irq delay values */
3185 	if (rx_ring->lbq_len) {
3186 		cqicb->flags |= FLAGS_LL;	/* Load lbq values */
3187 		tmp = (u64)rx_ring->lbq_base_dma;
3188 		base_indirect_ptr = rx_ring->lbq_base_indirect;
3189 		page_entries = 0;
3190 		do {
3191 			*base_indirect_ptr = cpu_to_le64(tmp);
3192 			tmp += DB_PAGE_SIZE;
3193 			base_indirect_ptr++;
3194 			page_entries++;
3195 		} while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3196 		cqicb->lbq_addr =
3197 		    cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3198 		bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3199 			(u16) rx_ring->lbq_buf_size;
3200 		cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3201 		bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3202 			(u16) rx_ring->lbq_len;
3203 		cqicb->lbq_len = cpu_to_le16(bq_len);
3204 		rx_ring->lbq_prod_idx = 0;
3205 		rx_ring->lbq_curr_idx = 0;
3206 		rx_ring->lbq_clean_idx = 0;
3207 		rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3208 	}
3209 	if (rx_ring->sbq_len) {
3210 		cqicb->flags |= FLAGS_LS;	/* Load sbq values */
3211 		tmp = (u64)rx_ring->sbq_base_dma;
3212 		base_indirect_ptr = rx_ring->sbq_base_indirect;
3213 		page_entries = 0;
3214 		do {
3215 			*base_indirect_ptr = cpu_to_le64(tmp);
3216 			tmp += DB_PAGE_SIZE;
3217 			base_indirect_ptr++;
3218 			page_entries++;
3219 		} while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3220 		cqicb->sbq_addr =
3221 		    cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3222 		cqicb->sbq_buf_size =
3223 		    cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3224 		bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3225 			(u16) rx_ring->sbq_len;
3226 		cqicb->sbq_len = cpu_to_le16(bq_len);
3227 		rx_ring->sbq_prod_idx = 0;
3228 		rx_ring->sbq_curr_idx = 0;
3229 		rx_ring->sbq_clean_idx = 0;
3230 		rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3231 	}
3232 	switch (rx_ring->type) {
3233 	case TX_Q:
3234 		cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3235 		cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3236 		break;
3237 	case RX_Q:
3238 		/* Inbound completion handling rx_rings run in
3239 		 * separate NAPI contexts.
3240 		 */
3241 		netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3242 			       64);
3243 		cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3244 		cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3245 		break;
3246 	default:
3247 		netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3248 			     "Invalid rx_ring->type = %d.\n", rx_ring->type);
3249 	}
3250 	err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3251 			   CFG_LCQ, rx_ring->cq_id);
3252 	if (err) {
3253 		netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3254 		return err;
3255 	}
3256 	return err;
3257 }
3258 
ql_start_tx_ring(struct ql_adapter * qdev,struct tx_ring * tx_ring)3259 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3260 {
3261 	struct wqicb *wqicb = (struct wqicb *)tx_ring;
3262 	void __iomem *doorbell_area =
3263 	    qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3264 	void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3265 	    (tx_ring->wq_id * sizeof(u64));
3266 	u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3267 	    (tx_ring->wq_id * sizeof(u64));
3268 	int err = 0;
3269 
3270 	/*
3271 	 * Assign doorbell registers for this tx_ring.
3272 	 */
3273 	/* TX PCI doorbell mem area for tx producer index */
3274 	tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3275 	tx_ring->prod_idx = 0;
3276 	/* TX PCI doorbell mem area + 0x04 */
3277 	tx_ring->valid_db_reg = doorbell_area + 0x04;
3278 
3279 	/*
3280 	 * Assign shadow registers for this tx_ring.
3281 	 */
3282 	tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3283 	tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3284 
3285 	wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3286 	wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3287 				   Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3288 	wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3289 	wqicb->rid = 0;
3290 	wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3291 
3292 	wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3293 
3294 	ql_init_tx_ring(qdev, tx_ring);
3295 
3296 	err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3297 			   (u16) tx_ring->wq_id);
3298 	if (err) {
3299 		netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3300 		return err;
3301 	}
3302 	return err;
3303 }
3304 
ql_disable_msix(struct ql_adapter * qdev)3305 static void ql_disable_msix(struct ql_adapter *qdev)
3306 {
3307 	if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3308 		pci_disable_msix(qdev->pdev);
3309 		clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3310 		kfree(qdev->msi_x_entry);
3311 		qdev->msi_x_entry = NULL;
3312 	} else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3313 		pci_disable_msi(qdev->pdev);
3314 		clear_bit(QL_MSI_ENABLED, &qdev->flags);
3315 	}
3316 }
3317 
3318 /* We start by trying to get the number of vectors
3319  * stored in qdev->intr_count. If we don't get that
3320  * many then we reduce the count and try again.
3321  */
ql_enable_msix(struct ql_adapter * qdev)3322 static void ql_enable_msix(struct ql_adapter *qdev)
3323 {
3324 	int i, err;
3325 
3326 	/* Get the MSIX vectors. */
3327 	if (qlge_irq_type == MSIX_IRQ) {
3328 		/* Try to alloc space for the msix struct,
3329 		 * if it fails then go to MSI/legacy.
3330 		 */
3331 		qdev->msi_x_entry = kcalloc(qdev->intr_count,
3332 					    sizeof(struct msix_entry),
3333 					    GFP_KERNEL);
3334 		if (!qdev->msi_x_entry) {
3335 			qlge_irq_type = MSI_IRQ;
3336 			goto msi;
3337 		}
3338 
3339 		for (i = 0; i < qdev->intr_count; i++)
3340 			qdev->msi_x_entry[i].entry = i;
3341 
3342 		err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
3343 					    1, qdev->intr_count);
3344 		if (err < 0) {
3345 			kfree(qdev->msi_x_entry);
3346 			qdev->msi_x_entry = NULL;
3347 			netif_warn(qdev, ifup, qdev->ndev,
3348 				   "MSI-X Enable failed, trying MSI.\n");
3349 			qlge_irq_type = MSI_IRQ;
3350 		} else {
3351 			qdev->intr_count = err;
3352 			set_bit(QL_MSIX_ENABLED, &qdev->flags);
3353 			netif_info(qdev, ifup, qdev->ndev,
3354 				   "MSI-X Enabled, got %d vectors.\n",
3355 				   qdev->intr_count);
3356 			return;
3357 		}
3358 	}
3359 msi:
3360 	qdev->intr_count = 1;
3361 	if (qlge_irq_type == MSI_IRQ) {
3362 		if (!pci_enable_msi(qdev->pdev)) {
3363 			set_bit(QL_MSI_ENABLED, &qdev->flags);
3364 			netif_info(qdev, ifup, qdev->ndev,
3365 				   "Running with MSI interrupts.\n");
3366 			return;
3367 		}
3368 	}
3369 	qlge_irq_type = LEG_IRQ;
3370 	netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3371 		     "Running with legacy interrupts.\n");
3372 }
3373 
3374 /* Each vector services 1 RSS ring and and 1 or more
3375  * TX completion rings.  This function loops through
3376  * the TX completion rings and assigns the vector that
3377  * will service it.  An example would be if there are
3378  * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3379  * This would mean that vector 0 would service RSS ring 0
3380  * and TX completion rings 0,1,2 and 3.  Vector 1 would
3381  * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3382  */
ql_set_tx_vect(struct ql_adapter * qdev)3383 static void ql_set_tx_vect(struct ql_adapter *qdev)
3384 {
3385 	int i, j, vect;
3386 	u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3387 
3388 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3389 		/* Assign irq vectors to TX rx_rings.*/
3390 		for (vect = 0, j = 0, i = qdev->rss_ring_count;
3391 					 i < qdev->rx_ring_count; i++) {
3392 			if (j == tx_rings_per_vector) {
3393 				vect++;
3394 				j = 0;
3395 			}
3396 			qdev->rx_ring[i].irq = vect;
3397 			j++;
3398 		}
3399 	} else {
3400 		/* For single vector all rings have an irq
3401 		 * of zero.
3402 		 */
3403 		for (i = 0; i < qdev->rx_ring_count; i++)
3404 			qdev->rx_ring[i].irq = 0;
3405 	}
3406 }
3407 
3408 /* Set the interrupt mask for this vector.  Each vector
3409  * will service 1 RSS ring and 1 or more TX completion
3410  * rings.  This function sets up a bit mask per vector
3411  * that indicates which rings it services.
3412  */
ql_set_irq_mask(struct ql_adapter * qdev,struct intr_context * ctx)3413 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3414 {
3415 	int j, vect = ctx->intr;
3416 	u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3417 
3418 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3419 		/* Add the RSS ring serviced by this vector
3420 		 * to the mask.
3421 		 */
3422 		ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3423 		/* Add the TX ring(s) serviced by this vector
3424 		 * to the mask. */
3425 		for (j = 0; j < tx_rings_per_vector; j++) {
3426 			ctx->irq_mask |=
3427 			(1 << qdev->rx_ring[qdev->rss_ring_count +
3428 			(vect * tx_rings_per_vector) + j].cq_id);
3429 		}
3430 	} else {
3431 		/* For single vector we just shift each queue's
3432 		 * ID into the mask.
3433 		 */
3434 		for (j = 0; j < qdev->rx_ring_count; j++)
3435 			ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3436 	}
3437 }
3438 
3439 /*
3440  * Here we build the intr_context structures based on
3441  * our rx_ring count and intr vector count.
3442  * The intr_context structure is used to hook each vector
3443  * to possibly different handlers.
3444  */
ql_resolve_queues_to_irqs(struct ql_adapter * qdev)3445 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3446 {
3447 	int i = 0;
3448 	struct intr_context *intr_context = &qdev->intr_context[0];
3449 
3450 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3451 		/* Each rx_ring has it's
3452 		 * own intr_context since we have separate
3453 		 * vectors for each queue.
3454 		 */
3455 		for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3456 			qdev->rx_ring[i].irq = i;
3457 			intr_context->intr = i;
3458 			intr_context->qdev = qdev;
3459 			/* Set up this vector's bit-mask that indicates
3460 			 * which queues it services.
3461 			 */
3462 			ql_set_irq_mask(qdev, intr_context);
3463 			/*
3464 			 * We set up each vectors enable/disable/read bits so
3465 			 * there's no bit/mask calculations in the critical path.
3466 			 */
3467 			intr_context->intr_en_mask =
3468 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3469 			    INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3470 			    | i;
3471 			intr_context->intr_dis_mask =
3472 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3473 			    INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3474 			    INTR_EN_IHD | i;
3475 			intr_context->intr_read_mask =
3476 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3477 			    INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3478 			    i;
3479 			if (i == 0) {
3480 				/* The first vector/queue handles
3481 				 * broadcast/multicast, fatal errors,
3482 				 * and firmware events.  This in addition
3483 				 * to normal inbound NAPI processing.
3484 				 */
3485 				intr_context->handler = qlge_isr;
3486 				sprintf(intr_context->name, "%s-rx-%d",
3487 					qdev->ndev->name, i);
3488 			} else {
3489 				/*
3490 				 * Inbound queues handle unicast frames only.
3491 				 */
3492 				intr_context->handler = qlge_msix_rx_isr;
3493 				sprintf(intr_context->name, "%s-rx-%d",
3494 					qdev->ndev->name, i);
3495 			}
3496 		}
3497 	} else {
3498 		/*
3499 		 * All rx_rings use the same intr_context since
3500 		 * there is only one vector.
3501 		 */
3502 		intr_context->intr = 0;
3503 		intr_context->qdev = qdev;
3504 		/*
3505 		 * We set up each vectors enable/disable/read bits so
3506 		 * there's no bit/mask calculations in the critical path.
3507 		 */
3508 		intr_context->intr_en_mask =
3509 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3510 		intr_context->intr_dis_mask =
3511 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3512 		    INTR_EN_TYPE_DISABLE;
3513 		intr_context->intr_read_mask =
3514 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3515 		/*
3516 		 * Single interrupt means one handler for all rings.
3517 		 */
3518 		intr_context->handler = qlge_isr;
3519 		sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3520 		/* Set up this vector's bit-mask that indicates
3521 		 * which queues it services. In this case there is
3522 		 * a single vector so it will service all RSS and
3523 		 * TX completion rings.
3524 		 */
3525 		ql_set_irq_mask(qdev, intr_context);
3526 	}
3527 	/* Tell the TX completion rings which MSIx vector
3528 	 * they will be using.
3529 	 */
3530 	ql_set_tx_vect(qdev);
3531 }
3532 
ql_free_irq(struct ql_adapter * qdev)3533 static void ql_free_irq(struct ql_adapter *qdev)
3534 {
3535 	int i;
3536 	struct intr_context *intr_context = &qdev->intr_context[0];
3537 
3538 	for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3539 		if (intr_context->hooked) {
3540 			if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3541 				free_irq(qdev->msi_x_entry[i].vector,
3542 					 &qdev->rx_ring[i]);
3543 			} else {
3544 				free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3545 			}
3546 		}
3547 	}
3548 	ql_disable_msix(qdev);
3549 }
3550 
ql_request_irq(struct ql_adapter * qdev)3551 static int ql_request_irq(struct ql_adapter *qdev)
3552 {
3553 	int i;
3554 	int status = 0;
3555 	struct pci_dev *pdev = qdev->pdev;
3556 	struct intr_context *intr_context = &qdev->intr_context[0];
3557 
3558 	ql_resolve_queues_to_irqs(qdev);
3559 
3560 	for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3561 		atomic_set(&intr_context->irq_cnt, 0);
3562 		if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3563 			status = request_irq(qdev->msi_x_entry[i].vector,
3564 					     intr_context->handler,
3565 					     0,
3566 					     intr_context->name,
3567 					     &qdev->rx_ring[i]);
3568 			if (status) {
3569 				netif_err(qdev, ifup, qdev->ndev,
3570 					  "Failed request for MSIX interrupt %d.\n",
3571 					  i);
3572 				goto err_irq;
3573 			}
3574 		} else {
3575 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3576 				     "trying msi or legacy interrupts.\n");
3577 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3578 				     "%s: irq = %d.\n", __func__, pdev->irq);
3579 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3580 				     "%s: context->name = %s.\n", __func__,
3581 				     intr_context->name);
3582 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3583 				     "%s: dev_id = 0x%p.\n", __func__,
3584 				     &qdev->rx_ring[0]);
3585 			status =
3586 			    request_irq(pdev->irq, qlge_isr,
3587 					test_bit(QL_MSI_ENABLED,
3588 						 &qdev->
3589 						 flags) ? 0 : IRQF_SHARED,
3590 					intr_context->name, &qdev->rx_ring[0]);
3591 			if (status)
3592 				goto err_irq;
3593 
3594 			netif_err(qdev, ifup, qdev->ndev,
3595 				  "Hooked intr %d, queue type %s, with name %s.\n",
3596 				  i,
3597 				  qdev->rx_ring[0].type == DEFAULT_Q ?
3598 				  "DEFAULT_Q" :
3599 				  qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3600 				  qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3601 				  intr_context->name);
3602 		}
3603 		intr_context->hooked = 1;
3604 	}
3605 	return status;
3606 err_irq:
3607 	netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
3608 	ql_free_irq(qdev);
3609 	return status;
3610 }
3611 
ql_start_rss(struct ql_adapter * qdev)3612 static int ql_start_rss(struct ql_adapter *qdev)
3613 {
3614 	static const u8 init_hash_seed[] = {
3615 		0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3616 		0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3617 		0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3618 		0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3619 		0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3620 	};
3621 	struct ricb *ricb = &qdev->ricb;
3622 	int status = 0;
3623 	int i;
3624 	u8 *hash_id = (u8 *) ricb->hash_cq_id;
3625 
3626 	memset((void *)ricb, 0, sizeof(*ricb));
3627 
3628 	ricb->base_cq = RSS_L4K;
3629 	ricb->flags =
3630 		(RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3631 	ricb->mask = cpu_to_le16((u16)(0x3ff));
3632 
3633 	/*
3634 	 * Fill out the Indirection Table.
3635 	 */
3636 	for (i = 0; i < 1024; i++)
3637 		hash_id[i] = (i & (qdev->rss_ring_count - 1));
3638 
3639 	memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3640 	memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3641 
3642 	status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3643 	if (status) {
3644 		netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3645 		return status;
3646 	}
3647 	return status;
3648 }
3649 
ql_clear_routing_entries(struct ql_adapter * qdev)3650 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3651 {
3652 	int i, status = 0;
3653 
3654 	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3655 	if (status)
3656 		return status;
3657 	/* Clear all the entries in the routing table. */
3658 	for (i = 0; i < 16; i++) {
3659 		status = ql_set_routing_reg(qdev, i, 0, 0);
3660 		if (status) {
3661 			netif_err(qdev, ifup, qdev->ndev,
3662 				  "Failed to init routing register for CAM packets.\n");
3663 			break;
3664 		}
3665 	}
3666 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3667 	return status;
3668 }
3669 
3670 /* Initialize the frame-to-queue routing. */
ql_route_initialize(struct ql_adapter * qdev)3671 static int ql_route_initialize(struct ql_adapter *qdev)
3672 {
3673 	int status = 0;
3674 
3675 	/* Clear all the entries in the routing table. */
3676 	status = ql_clear_routing_entries(qdev);
3677 	if (status)
3678 		return status;
3679 
3680 	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3681 	if (status)
3682 		return status;
3683 
3684 	status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3685 						RT_IDX_IP_CSUM_ERR, 1);
3686 	if (status) {
3687 		netif_err(qdev, ifup, qdev->ndev,
3688 			"Failed to init routing register "
3689 			"for IP CSUM error packets.\n");
3690 		goto exit;
3691 	}
3692 	status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3693 						RT_IDX_TU_CSUM_ERR, 1);
3694 	if (status) {
3695 		netif_err(qdev, ifup, qdev->ndev,
3696 			"Failed to init routing register "
3697 			"for TCP/UDP CSUM error packets.\n");
3698 		goto exit;
3699 	}
3700 	status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3701 	if (status) {
3702 		netif_err(qdev, ifup, qdev->ndev,
3703 			  "Failed to init routing register for broadcast packets.\n");
3704 		goto exit;
3705 	}
3706 	/* If we have more than one inbound queue, then turn on RSS in the
3707 	 * routing block.
3708 	 */
3709 	if (qdev->rss_ring_count > 1) {
3710 		status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3711 					RT_IDX_RSS_MATCH, 1);
3712 		if (status) {
3713 			netif_err(qdev, ifup, qdev->ndev,
3714 				  "Failed to init routing register for MATCH RSS packets.\n");
3715 			goto exit;
3716 		}
3717 	}
3718 
3719 	status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3720 				    RT_IDX_CAM_HIT, 1);
3721 	if (status)
3722 		netif_err(qdev, ifup, qdev->ndev,
3723 			  "Failed to init routing register for CAM packets.\n");
3724 exit:
3725 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3726 	return status;
3727 }
3728 
ql_cam_route_initialize(struct ql_adapter * qdev)3729 int ql_cam_route_initialize(struct ql_adapter *qdev)
3730 {
3731 	int status, set;
3732 
3733 	/* If check if the link is up and use to
3734 	 * determine if we are setting or clearing
3735 	 * the MAC address in the CAM.
3736 	 */
3737 	set = ql_read32(qdev, STS);
3738 	set &= qdev->port_link_up;
3739 	status = ql_set_mac_addr(qdev, set);
3740 	if (status) {
3741 		netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3742 		return status;
3743 	}
3744 
3745 	status = ql_route_initialize(qdev);
3746 	if (status)
3747 		netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3748 
3749 	return status;
3750 }
3751 
ql_adapter_initialize(struct ql_adapter * qdev)3752 static int ql_adapter_initialize(struct ql_adapter *qdev)
3753 {
3754 	u32 value, mask;
3755 	int i;
3756 	int status = 0;
3757 
3758 	/*
3759 	 * Set up the System register to halt on errors.
3760 	 */
3761 	value = SYS_EFE | SYS_FAE;
3762 	mask = value << 16;
3763 	ql_write32(qdev, SYS, mask | value);
3764 
3765 	/* Set the default queue, and VLAN behavior. */
3766 	value = NIC_RCV_CFG_DFQ;
3767 	mask = NIC_RCV_CFG_DFQ_MASK;
3768 	if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3769 		value |= NIC_RCV_CFG_RV;
3770 		mask |= (NIC_RCV_CFG_RV << 16);
3771 	}
3772 	ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3773 
3774 	/* Set the MPI interrupt to enabled. */
3775 	ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3776 
3777 	/* Enable the function, set pagesize, enable error checking. */
3778 	value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3779 	    FSC_EC | FSC_VM_PAGE_4K;
3780 	value |= SPLT_SETTING;
3781 
3782 	/* Set/clear header splitting. */
3783 	mask = FSC_VM_PAGESIZE_MASK |
3784 	    FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3785 	ql_write32(qdev, FSC, mask | value);
3786 
3787 	ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3788 
3789 	/* Set RX packet routing to use port/pci function on which the
3790 	 * packet arrived on in addition to usual frame routing.
3791 	 * This is helpful on bonding where both interfaces can have
3792 	 * the same MAC address.
3793 	 */
3794 	ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3795 	/* Reroute all packets to our Interface.
3796 	 * They may have been routed to MPI firmware
3797 	 * due to WOL.
3798 	 */
3799 	value = ql_read32(qdev, MGMT_RCV_CFG);
3800 	value &= ~MGMT_RCV_CFG_RM;
3801 	mask = 0xffff0000;
3802 
3803 	/* Sticky reg needs clearing due to WOL. */
3804 	ql_write32(qdev, MGMT_RCV_CFG, mask);
3805 	ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3806 
3807 	/* Default WOL is enable on Mezz cards */
3808 	if (qdev->pdev->subsystem_device == 0x0068 ||
3809 			qdev->pdev->subsystem_device == 0x0180)
3810 		qdev->wol = WAKE_MAGIC;
3811 
3812 	/* Start up the rx queues. */
3813 	for (i = 0; i < qdev->rx_ring_count; i++) {
3814 		status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3815 		if (status) {
3816 			netif_err(qdev, ifup, qdev->ndev,
3817 				  "Failed to start rx ring[%d].\n", i);
3818 			return status;
3819 		}
3820 	}
3821 
3822 	/* If there is more than one inbound completion queue
3823 	 * then download a RICB to configure RSS.
3824 	 */
3825 	if (qdev->rss_ring_count > 1) {
3826 		status = ql_start_rss(qdev);
3827 		if (status) {
3828 			netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3829 			return status;
3830 		}
3831 	}
3832 
3833 	/* Start up the tx queues. */
3834 	for (i = 0; i < qdev->tx_ring_count; i++) {
3835 		status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3836 		if (status) {
3837 			netif_err(qdev, ifup, qdev->ndev,
3838 				  "Failed to start tx ring[%d].\n", i);
3839 			return status;
3840 		}
3841 	}
3842 
3843 	/* Initialize the port and set the max framesize. */
3844 	status = qdev->nic_ops->port_initialize(qdev);
3845 	if (status)
3846 		netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3847 
3848 	/* Set up the MAC address and frame routing filter. */
3849 	status = ql_cam_route_initialize(qdev);
3850 	if (status) {
3851 		netif_err(qdev, ifup, qdev->ndev,
3852 			  "Failed to init CAM/Routing tables.\n");
3853 		return status;
3854 	}
3855 
3856 	/* Start NAPI for the RSS queues. */
3857 	for (i = 0; i < qdev->rss_ring_count; i++)
3858 		napi_enable(&qdev->rx_ring[i].napi);
3859 
3860 	return status;
3861 }
3862 
3863 /* Issue soft reset to chip. */
ql_adapter_reset(struct ql_adapter * qdev)3864 static int ql_adapter_reset(struct ql_adapter *qdev)
3865 {
3866 	u32 value;
3867 	int status = 0;
3868 	unsigned long end_jiffies;
3869 
3870 	/* Clear all the entries in the routing table. */
3871 	status = ql_clear_routing_entries(qdev);
3872 	if (status) {
3873 		netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3874 		return status;
3875 	}
3876 
3877 	/* Check if bit is set then skip the mailbox command and
3878 	 * clear the bit, else we are in normal reset process.
3879 	 */
3880 	if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3881 		/* Stop management traffic. */
3882 		ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3883 
3884 		/* Wait for the NIC and MGMNT FIFOs to empty. */
3885 		ql_wait_fifo_empty(qdev);
3886 	} else
3887 		clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3888 
3889 	ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3890 
3891 	end_jiffies = jiffies + usecs_to_jiffies(30);
3892 	do {
3893 		value = ql_read32(qdev, RST_FO);
3894 		if ((value & RST_FO_FR) == 0)
3895 			break;
3896 		cpu_relax();
3897 	} while (time_before(jiffies, end_jiffies));
3898 
3899 	if (value & RST_FO_FR) {
3900 		netif_err(qdev, ifdown, qdev->ndev,
3901 			  "ETIMEDOUT!!! errored out of resetting the chip!\n");
3902 		status = -ETIMEDOUT;
3903 	}
3904 
3905 	/* Resume management traffic. */
3906 	ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3907 	return status;
3908 }
3909 
ql_display_dev_info(struct net_device * ndev)3910 static void ql_display_dev_info(struct net_device *ndev)
3911 {
3912 	struct ql_adapter *qdev = netdev_priv(ndev);
3913 
3914 	netif_info(qdev, probe, qdev->ndev,
3915 		   "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3916 		   "XG Roll = %d, XG Rev = %d.\n",
3917 		   qdev->func,
3918 		   qdev->port,
3919 		   qdev->chip_rev_id & 0x0000000f,
3920 		   qdev->chip_rev_id >> 4 & 0x0000000f,
3921 		   qdev->chip_rev_id >> 8 & 0x0000000f,
3922 		   qdev->chip_rev_id >> 12 & 0x0000000f);
3923 	netif_info(qdev, probe, qdev->ndev,
3924 		   "MAC address %pM\n", ndev->dev_addr);
3925 }
3926 
ql_wol(struct ql_adapter * qdev)3927 static int ql_wol(struct ql_adapter *qdev)
3928 {
3929 	int status = 0;
3930 	u32 wol = MB_WOL_DISABLE;
3931 
3932 	/* The CAM is still intact after a reset, but if we
3933 	 * are doing WOL, then we may need to program the
3934 	 * routing regs. We would also need to issue the mailbox
3935 	 * commands to instruct the MPI what to do per the ethtool
3936 	 * settings.
3937 	 */
3938 
3939 	if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3940 			WAKE_MCAST | WAKE_BCAST)) {
3941 		netif_err(qdev, ifdown, qdev->ndev,
3942 			  "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3943 			  qdev->wol);
3944 		return -EINVAL;
3945 	}
3946 
3947 	if (qdev->wol & WAKE_MAGIC) {
3948 		status = ql_mb_wol_set_magic(qdev, 1);
3949 		if (status) {
3950 			netif_err(qdev, ifdown, qdev->ndev,
3951 				  "Failed to set magic packet on %s.\n",
3952 				  qdev->ndev->name);
3953 			return status;
3954 		} else
3955 			netif_info(qdev, drv, qdev->ndev,
3956 				   "Enabled magic packet successfully on %s.\n",
3957 				   qdev->ndev->name);
3958 
3959 		wol |= MB_WOL_MAGIC_PKT;
3960 	}
3961 
3962 	if (qdev->wol) {
3963 		wol |= MB_WOL_MODE_ON;
3964 		status = ql_mb_wol_mode(qdev, wol);
3965 		netif_err(qdev, drv, qdev->ndev,
3966 			  "WOL %s (wol code 0x%x) on %s\n",
3967 			  (status == 0) ? "Successfully set" : "Failed",
3968 			  wol, qdev->ndev->name);
3969 	}
3970 
3971 	return status;
3972 }
3973 
ql_cancel_all_work_sync(struct ql_adapter * qdev)3974 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3975 {
3976 
3977 	/* Don't kill the reset worker thread if we
3978 	 * are in the process of recovery.
3979 	 */
3980 	if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3981 		cancel_delayed_work_sync(&qdev->asic_reset_work);
3982 	cancel_delayed_work_sync(&qdev->mpi_reset_work);
3983 	cancel_delayed_work_sync(&qdev->mpi_work);
3984 	cancel_delayed_work_sync(&qdev->mpi_idc_work);
3985 	cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3986 	cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3987 }
3988 
ql_adapter_down(struct ql_adapter * qdev)3989 static int ql_adapter_down(struct ql_adapter *qdev)
3990 {
3991 	int i, status = 0;
3992 
3993 	ql_link_off(qdev);
3994 
3995 	ql_cancel_all_work_sync(qdev);
3996 
3997 	for (i = 0; i < qdev->rss_ring_count; i++)
3998 		napi_disable(&qdev->rx_ring[i].napi);
3999 
4000 	clear_bit(QL_ADAPTER_UP, &qdev->flags);
4001 
4002 	ql_disable_interrupts(qdev);
4003 
4004 	ql_tx_ring_clean(qdev);
4005 
4006 	/* Call netif_napi_del() from common point.
4007 	 */
4008 	for (i = 0; i < qdev->rss_ring_count; i++)
4009 		netif_napi_del(&qdev->rx_ring[i].napi);
4010 
4011 	status = ql_adapter_reset(qdev);
4012 	if (status)
4013 		netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
4014 			  qdev->func);
4015 	ql_free_rx_buffers(qdev);
4016 
4017 	return status;
4018 }
4019 
ql_adapter_up(struct ql_adapter * qdev)4020 static int ql_adapter_up(struct ql_adapter *qdev)
4021 {
4022 	int err = 0;
4023 
4024 	err = ql_adapter_initialize(qdev);
4025 	if (err) {
4026 		netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
4027 		goto err_init;
4028 	}
4029 	set_bit(QL_ADAPTER_UP, &qdev->flags);
4030 	ql_alloc_rx_buffers(qdev);
4031 	/* If the port is initialized and the
4032 	 * link is up the turn on the carrier.
4033 	 */
4034 	if ((ql_read32(qdev, STS) & qdev->port_init) &&
4035 			(ql_read32(qdev, STS) & qdev->port_link_up))
4036 		ql_link_on(qdev);
4037 	/* Restore rx mode. */
4038 	clear_bit(QL_ALLMULTI, &qdev->flags);
4039 	clear_bit(QL_PROMISCUOUS, &qdev->flags);
4040 	qlge_set_multicast_list(qdev->ndev);
4041 
4042 	/* Restore vlan setting. */
4043 	qlge_restore_vlan(qdev);
4044 
4045 	ql_enable_interrupts(qdev);
4046 	ql_enable_all_completion_interrupts(qdev);
4047 	netif_tx_start_all_queues(qdev->ndev);
4048 
4049 	return 0;
4050 err_init:
4051 	ql_adapter_reset(qdev);
4052 	return err;
4053 }
4054 
ql_release_adapter_resources(struct ql_adapter * qdev)4055 static void ql_release_adapter_resources(struct ql_adapter *qdev)
4056 {
4057 	ql_free_mem_resources(qdev);
4058 	ql_free_irq(qdev);
4059 }
4060 
ql_get_adapter_resources(struct ql_adapter * qdev)4061 static int ql_get_adapter_resources(struct ql_adapter *qdev)
4062 {
4063 	int status = 0;
4064 
4065 	if (ql_alloc_mem_resources(qdev)) {
4066 		netif_err(qdev, ifup, qdev->ndev, "Unable to  allocate memory.\n");
4067 		return -ENOMEM;
4068 	}
4069 	status = ql_request_irq(qdev);
4070 	return status;
4071 }
4072 
qlge_close(struct net_device * ndev)4073 static int qlge_close(struct net_device *ndev)
4074 {
4075 	struct ql_adapter *qdev = netdev_priv(ndev);
4076 
4077 	/* If we hit pci_channel_io_perm_failure
4078 	 * failure condition, then we already
4079 	 * brought the adapter down.
4080 	 */
4081 	if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
4082 		netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
4083 		clear_bit(QL_EEH_FATAL, &qdev->flags);
4084 		return 0;
4085 	}
4086 
4087 	/*
4088 	 * Wait for device to recover from a reset.
4089 	 * (Rarely happens, but possible.)
4090 	 */
4091 	while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4092 		msleep(1);
4093 	ql_adapter_down(qdev);
4094 	ql_release_adapter_resources(qdev);
4095 	return 0;
4096 }
4097 
ql_configure_rings(struct ql_adapter * qdev)4098 static int ql_configure_rings(struct ql_adapter *qdev)
4099 {
4100 	int i;
4101 	struct rx_ring *rx_ring;
4102 	struct tx_ring *tx_ring;
4103 	int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4104 	unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4105 		LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4106 
4107 	qdev->lbq_buf_order = get_order(lbq_buf_len);
4108 
4109 	/* In a perfect world we have one RSS ring for each CPU
4110 	 * and each has it's own vector.  To do that we ask for
4111 	 * cpu_cnt vectors.  ql_enable_msix() will adjust the
4112 	 * vector count to what we actually get.  We then
4113 	 * allocate an RSS ring for each.
4114 	 * Essentially, we are doing min(cpu_count, msix_vector_count).
4115 	 */
4116 	qdev->intr_count = cpu_cnt;
4117 	ql_enable_msix(qdev);
4118 	/* Adjust the RSS ring count to the actual vector count. */
4119 	qdev->rss_ring_count = qdev->intr_count;
4120 	qdev->tx_ring_count = cpu_cnt;
4121 	qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4122 
4123 	for (i = 0; i < qdev->tx_ring_count; i++) {
4124 		tx_ring = &qdev->tx_ring[i];
4125 		memset((void *)tx_ring, 0, sizeof(*tx_ring));
4126 		tx_ring->qdev = qdev;
4127 		tx_ring->wq_id = i;
4128 		tx_ring->wq_len = qdev->tx_ring_size;
4129 		tx_ring->wq_size =
4130 		    tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4131 
4132 		/*
4133 		 * The completion queue ID for the tx rings start
4134 		 * immediately after the rss rings.
4135 		 */
4136 		tx_ring->cq_id = qdev->rss_ring_count + i;
4137 	}
4138 
4139 	for (i = 0; i < qdev->rx_ring_count; i++) {
4140 		rx_ring = &qdev->rx_ring[i];
4141 		memset((void *)rx_ring, 0, sizeof(*rx_ring));
4142 		rx_ring->qdev = qdev;
4143 		rx_ring->cq_id = i;
4144 		rx_ring->cpu = i % cpu_cnt;	/* CPU to run handler on. */
4145 		if (i < qdev->rss_ring_count) {
4146 			/*
4147 			 * Inbound (RSS) queues.
4148 			 */
4149 			rx_ring->cq_len = qdev->rx_ring_size;
4150 			rx_ring->cq_size =
4151 			    rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4152 			rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4153 			rx_ring->lbq_size =
4154 			    rx_ring->lbq_len * sizeof(__le64);
4155 			rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4156 			rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4157 			rx_ring->sbq_size =
4158 			    rx_ring->sbq_len * sizeof(__le64);
4159 			rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4160 			rx_ring->type = RX_Q;
4161 		} else {
4162 			/*
4163 			 * Outbound queue handles outbound completions only.
4164 			 */
4165 			/* outbound cq is same size as tx_ring it services. */
4166 			rx_ring->cq_len = qdev->tx_ring_size;
4167 			rx_ring->cq_size =
4168 			    rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4169 			rx_ring->lbq_len = 0;
4170 			rx_ring->lbq_size = 0;
4171 			rx_ring->lbq_buf_size = 0;
4172 			rx_ring->sbq_len = 0;
4173 			rx_ring->sbq_size = 0;
4174 			rx_ring->sbq_buf_size = 0;
4175 			rx_ring->type = TX_Q;
4176 		}
4177 	}
4178 	return 0;
4179 }
4180 
qlge_open(struct net_device * ndev)4181 static int qlge_open(struct net_device *ndev)
4182 {
4183 	int err = 0;
4184 	struct ql_adapter *qdev = netdev_priv(ndev);
4185 
4186 	err = ql_adapter_reset(qdev);
4187 	if (err)
4188 		return err;
4189 
4190 	err = ql_configure_rings(qdev);
4191 	if (err)
4192 		return err;
4193 
4194 	err = ql_get_adapter_resources(qdev);
4195 	if (err)
4196 		goto error_up;
4197 
4198 	err = ql_adapter_up(qdev);
4199 	if (err)
4200 		goto error_up;
4201 
4202 	return err;
4203 
4204 error_up:
4205 	ql_release_adapter_resources(qdev);
4206 	return err;
4207 }
4208 
ql_change_rx_buffers(struct ql_adapter * qdev)4209 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4210 {
4211 	struct rx_ring *rx_ring;
4212 	int i, status;
4213 	u32 lbq_buf_len;
4214 
4215 	/* Wait for an outstanding reset to complete. */
4216 	if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4217 		int i = 4;
4218 
4219 		while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4220 			netif_err(qdev, ifup, qdev->ndev,
4221 				  "Waiting for adapter UP...\n");
4222 			ssleep(1);
4223 		}
4224 
4225 		if (!i) {
4226 			netif_err(qdev, ifup, qdev->ndev,
4227 				  "Timed out waiting for adapter UP\n");
4228 			return -ETIMEDOUT;
4229 		}
4230 	}
4231 
4232 	status = ql_adapter_down(qdev);
4233 	if (status)
4234 		goto error;
4235 
4236 	/* Get the new rx buffer size. */
4237 	lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4238 		LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4239 	qdev->lbq_buf_order = get_order(lbq_buf_len);
4240 
4241 	for (i = 0; i < qdev->rss_ring_count; i++) {
4242 		rx_ring = &qdev->rx_ring[i];
4243 		/* Set the new size. */
4244 		rx_ring->lbq_buf_size = lbq_buf_len;
4245 	}
4246 
4247 	status = ql_adapter_up(qdev);
4248 	if (status)
4249 		goto error;
4250 
4251 	return status;
4252 error:
4253 	netif_alert(qdev, ifup, qdev->ndev,
4254 		    "Driver up/down cycle failed, closing device.\n");
4255 	set_bit(QL_ADAPTER_UP, &qdev->flags);
4256 	dev_close(qdev->ndev);
4257 	return status;
4258 }
4259 
qlge_change_mtu(struct net_device * ndev,int new_mtu)4260 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4261 {
4262 	struct ql_adapter *qdev = netdev_priv(ndev);
4263 	int status;
4264 
4265 	if (ndev->mtu == 1500 && new_mtu == 9000) {
4266 		netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4267 	} else if (ndev->mtu == 9000 && new_mtu == 1500) {
4268 		netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4269 	} else
4270 		return -EINVAL;
4271 
4272 	queue_delayed_work(qdev->workqueue,
4273 			&qdev->mpi_port_cfg_work, 3*HZ);
4274 
4275 	ndev->mtu = new_mtu;
4276 
4277 	if (!netif_running(qdev->ndev)) {
4278 		return 0;
4279 	}
4280 
4281 	status = ql_change_rx_buffers(qdev);
4282 	if (status) {
4283 		netif_err(qdev, ifup, qdev->ndev,
4284 			  "Changing MTU failed.\n");
4285 	}
4286 
4287 	return status;
4288 }
4289 
qlge_get_stats(struct net_device * ndev)4290 static struct net_device_stats *qlge_get_stats(struct net_device
4291 					       *ndev)
4292 {
4293 	struct ql_adapter *qdev = netdev_priv(ndev);
4294 	struct rx_ring *rx_ring = &qdev->rx_ring[0];
4295 	struct tx_ring *tx_ring = &qdev->tx_ring[0];
4296 	unsigned long pkts, mcast, dropped, errors, bytes;
4297 	int i;
4298 
4299 	/* Get RX stats. */
4300 	pkts = mcast = dropped = errors = bytes = 0;
4301 	for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4302 			pkts += rx_ring->rx_packets;
4303 			bytes += rx_ring->rx_bytes;
4304 			dropped += rx_ring->rx_dropped;
4305 			errors += rx_ring->rx_errors;
4306 			mcast += rx_ring->rx_multicast;
4307 	}
4308 	ndev->stats.rx_packets = pkts;
4309 	ndev->stats.rx_bytes = bytes;
4310 	ndev->stats.rx_dropped = dropped;
4311 	ndev->stats.rx_errors = errors;
4312 	ndev->stats.multicast = mcast;
4313 
4314 	/* Get TX stats. */
4315 	pkts = errors = bytes = 0;
4316 	for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4317 			pkts += tx_ring->tx_packets;
4318 			bytes += tx_ring->tx_bytes;
4319 			errors += tx_ring->tx_errors;
4320 	}
4321 	ndev->stats.tx_packets = pkts;
4322 	ndev->stats.tx_bytes = bytes;
4323 	ndev->stats.tx_errors = errors;
4324 	return &ndev->stats;
4325 }
4326 
qlge_set_multicast_list(struct net_device * ndev)4327 static void qlge_set_multicast_list(struct net_device *ndev)
4328 {
4329 	struct ql_adapter *qdev = netdev_priv(ndev);
4330 	struct netdev_hw_addr *ha;
4331 	int i, status;
4332 
4333 	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4334 	if (status)
4335 		return;
4336 	/*
4337 	 * Set or clear promiscuous mode if a
4338 	 * transition is taking place.
4339 	 */
4340 	if (ndev->flags & IFF_PROMISC) {
4341 		if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4342 			if (ql_set_routing_reg
4343 			    (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4344 				netif_err(qdev, hw, qdev->ndev,
4345 					  "Failed to set promiscuous mode.\n");
4346 			} else {
4347 				set_bit(QL_PROMISCUOUS, &qdev->flags);
4348 			}
4349 		}
4350 	} else {
4351 		if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4352 			if (ql_set_routing_reg
4353 			    (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4354 				netif_err(qdev, hw, qdev->ndev,
4355 					  "Failed to clear promiscuous mode.\n");
4356 			} else {
4357 				clear_bit(QL_PROMISCUOUS, &qdev->flags);
4358 			}
4359 		}
4360 	}
4361 
4362 	/*
4363 	 * Set or clear all multicast mode if a
4364 	 * transition is taking place.
4365 	 */
4366 	if ((ndev->flags & IFF_ALLMULTI) ||
4367 	    (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4368 		if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4369 			if (ql_set_routing_reg
4370 			    (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4371 				netif_err(qdev, hw, qdev->ndev,
4372 					  "Failed to set all-multi mode.\n");
4373 			} else {
4374 				set_bit(QL_ALLMULTI, &qdev->flags);
4375 			}
4376 		}
4377 	} else {
4378 		if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4379 			if (ql_set_routing_reg
4380 			    (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4381 				netif_err(qdev, hw, qdev->ndev,
4382 					  "Failed to clear all-multi mode.\n");
4383 			} else {
4384 				clear_bit(QL_ALLMULTI, &qdev->flags);
4385 			}
4386 		}
4387 	}
4388 
4389 	if (!netdev_mc_empty(ndev)) {
4390 		status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4391 		if (status)
4392 			goto exit;
4393 		i = 0;
4394 		netdev_for_each_mc_addr(ha, ndev) {
4395 			if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4396 						MAC_ADDR_TYPE_MULTI_MAC, i)) {
4397 				netif_err(qdev, hw, qdev->ndev,
4398 					  "Failed to loadmulticast address.\n");
4399 				ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4400 				goto exit;
4401 			}
4402 			i++;
4403 		}
4404 		ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4405 		if (ql_set_routing_reg
4406 		    (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4407 			netif_err(qdev, hw, qdev->ndev,
4408 				  "Failed to set multicast match mode.\n");
4409 		} else {
4410 			set_bit(QL_ALLMULTI, &qdev->flags);
4411 		}
4412 	}
4413 exit:
4414 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4415 }
4416 
qlge_set_mac_address(struct net_device * ndev,void * p)4417 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4418 {
4419 	struct ql_adapter *qdev = netdev_priv(ndev);
4420 	struct sockaddr *addr = p;
4421 	int status;
4422 
4423 	if (!is_valid_ether_addr(addr->sa_data))
4424 		return -EADDRNOTAVAIL;
4425 	memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4426 	/* Update local copy of current mac address. */
4427 	memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4428 
4429 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4430 	if (status)
4431 		return status;
4432 	status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4433 			MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4434 	if (status)
4435 		netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4436 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4437 	return status;
4438 }
4439 
qlge_tx_timeout(struct net_device * ndev)4440 static void qlge_tx_timeout(struct net_device *ndev)
4441 {
4442 	struct ql_adapter *qdev = netdev_priv(ndev);
4443 	ql_queue_asic_error(qdev);
4444 }
4445 
ql_asic_reset_work(struct work_struct * work)4446 static void ql_asic_reset_work(struct work_struct *work)
4447 {
4448 	struct ql_adapter *qdev =
4449 	    container_of(work, struct ql_adapter, asic_reset_work.work);
4450 	int status;
4451 	rtnl_lock();
4452 	status = ql_adapter_down(qdev);
4453 	if (status)
4454 		goto error;
4455 
4456 	status = ql_adapter_up(qdev);
4457 	if (status)
4458 		goto error;
4459 
4460 	/* Restore rx mode. */
4461 	clear_bit(QL_ALLMULTI, &qdev->flags);
4462 	clear_bit(QL_PROMISCUOUS, &qdev->flags);
4463 	qlge_set_multicast_list(qdev->ndev);
4464 
4465 	rtnl_unlock();
4466 	return;
4467 error:
4468 	netif_alert(qdev, ifup, qdev->ndev,
4469 		    "Driver up/down cycle failed, closing device\n");
4470 
4471 	set_bit(QL_ADAPTER_UP, &qdev->flags);
4472 	dev_close(qdev->ndev);
4473 	rtnl_unlock();
4474 }
4475 
4476 static const struct nic_operations qla8012_nic_ops = {
4477 	.get_flash		= ql_get_8012_flash_params,
4478 	.port_initialize	= ql_8012_port_initialize,
4479 };
4480 
4481 static const struct nic_operations qla8000_nic_ops = {
4482 	.get_flash		= ql_get_8000_flash_params,
4483 	.port_initialize	= ql_8000_port_initialize,
4484 };
4485 
4486 /* Find the pcie function number for the other NIC
4487  * on this chip.  Since both NIC functions share a
4488  * common firmware we have the lowest enabled function
4489  * do any common work.  Examples would be resetting
4490  * after a fatal firmware error, or doing a firmware
4491  * coredump.
4492  */
ql_get_alt_pcie_func(struct ql_adapter * qdev)4493 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4494 {
4495 	int status = 0;
4496 	u32 temp;
4497 	u32 nic_func1, nic_func2;
4498 
4499 	status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4500 			&temp);
4501 	if (status)
4502 		return status;
4503 
4504 	nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4505 			MPI_TEST_NIC_FUNC_MASK);
4506 	nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4507 			MPI_TEST_NIC_FUNC_MASK);
4508 
4509 	if (qdev->func == nic_func1)
4510 		qdev->alt_func = nic_func2;
4511 	else if (qdev->func == nic_func2)
4512 		qdev->alt_func = nic_func1;
4513 	else
4514 		status = -EIO;
4515 
4516 	return status;
4517 }
4518 
ql_get_board_info(struct ql_adapter * qdev)4519 static int ql_get_board_info(struct ql_adapter *qdev)
4520 {
4521 	int status;
4522 	qdev->func =
4523 	    (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4524 	if (qdev->func > 3)
4525 		return -EIO;
4526 
4527 	status = ql_get_alt_pcie_func(qdev);
4528 	if (status)
4529 		return status;
4530 
4531 	qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4532 	if (qdev->port) {
4533 		qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4534 		qdev->port_link_up = STS_PL1;
4535 		qdev->port_init = STS_PI1;
4536 		qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4537 		qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4538 	} else {
4539 		qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4540 		qdev->port_link_up = STS_PL0;
4541 		qdev->port_init = STS_PI0;
4542 		qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4543 		qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4544 	}
4545 	qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4546 	qdev->device_id = qdev->pdev->device;
4547 	if (qdev->device_id == QLGE_DEVICE_ID_8012)
4548 		qdev->nic_ops = &qla8012_nic_ops;
4549 	else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4550 		qdev->nic_ops = &qla8000_nic_ops;
4551 	return status;
4552 }
4553 
ql_release_all(struct pci_dev * pdev)4554 static void ql_release_all(struct pci_dev *pdev)
4555 {
4556 	struct net_device *ndev = pci_get_drvdata(pdev);
4557 	struct ql_adapter *qdev = netdev_priv(ndev);
4558 
4559 	if (qdev->workqueue) {
4560 		destroy_workqueue(qdev->workqueue);
4561 		qdev->workqueue = NULL;
4562 	}
4563 
4564 	if (qdev->reg_base)
4565 		iounmap(qdev->reg_base);
4566 	if (qdev->doorbell_area)
4567 		iounmap(qdev->doorbell_area);
4568 	vfree(qdev->mpi_coredump);
4569 	pci_release_regions(pdev);
4570 }
4571 
ql_init_device(struct pci_dev * pdev,struct net_device * ndev,int cards_found)4572 static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4573 			  int cards_found)
4574 {
4575 	struct ql_adapter *qdev = netdev_priv(ndev);
4576 	int err = 0;
4577 
4578 	memset((void *)qdev, 0, sizeof(*qdev));
4579 	err = pci_enable_device(pdev);
4580 	if (err) {
4581 		dev_err(&pdev->dev, "PCI device enable failed.\n");
4582 		return err;
4583 	}
4584 
4585 	qdev->ndev = ndev;
4586 	qdev->pdev = pdev;
4587 	pci_set_drvdata(pdev, ndev);
4588 
4589 	/* Set PCIe read request size */
4590 	err = pcie_set_readrq(pdev, 4096);
4591 	if (err) {
4592 		dev_err(&pdev->dev, "Set readrq failed.\n");
4593 		goto err_out1;
4594 	}
4595 
4596 	err = pci_request_regions(pdev, DRV_NAME);
4597 	if (err) {
4598 		dev_err(&pdev->dev, "PCI region request failed.\n");
4599 		return err;
4600 	}
4601 
4602 	pci_set_master(pdev);
4603 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4604 		set_bit(QL_DMA64, &qdev->flags);
4605 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4606 	} else {
4607 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4608 		if (!err)
4609 		       err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4610 	}
4611 
4612 	if (err) {
4613 		dev_err(&pdev->dev, "No usable DMA configuration.\n");
4614 		goto err_out2;
4615 	}
4616 
4617 	/* Set PCIe reset type for EEH to fundamental. */
4618 	pdev->needs_freset = 1;
4619 	pci_save_state(pdev);
4620 	qdev->reg_base =
4621 	    ioremap_nocache(pci_resource_start(pdev, 1),
4622 			    pci_resource_len(pdev, 1));
4623 	if (!qdev->reg_base) {
4624 		dev_err(&pdev->dev, "Register mapping failed.\n");
4625 		err = -ENOMEM;
4626 		goto err_out2;
4627 	}
4628 
4629 	qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4630 	qdev->doorbell_area =
4631 	    ioremap_nocache(pci_resource_start(pdev, 3),
4632 			    pci_resource_len(pdev, 3));
4633 	if (!qdev->doorbell_area) {
4634 		dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4635 		err = -ENOMEM;
4636 		goto err_out2;
4637 	}
4638 
4639 	err = ql_get_board_info(qdev);
4640 	if (err) {
4641 		dev_err(&pdev->dev, "Register access failed.\n");
4642 		err = -EIO;
4643 		goto err_out2;
4644 	}
4645 	qdev->msg_enable = netif_msg_init(debug, default_msg);
4646 	spin_lock_init(&qdev->hw_lock);
4647 	spin_lock_init(&qdev->stats_lock);
4648 
4649 	if (qlge_mpi_coredump) {
4650 		qdev->mpi_coredump =
4651 			vmalloc(sizeof(struct ql_mpi_coredump));
4652 		if (qdev->mpi_coredump == NULL) {
4653 			err = -ENOMEM;
4654 			goto err_out2;
4655 		}
4656 		if (qlge_force_coredump)
4657 			set_bit(QL_FRC_COREDUMP, &qdev->flags);
4658 	}
4659 	/* make sure the EEPROM is good */
4660 	err = qdev->nic_ops->get_flash(qdev);
4661 	if (err) {
4662 		dev_err(&pdev->dev, "Invalid FLASH.\n");
4663 		goto err_out2;
4664 	}
4665 
4666 	/* Keep local copy of current mac address. */
4667 	memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4668 
4669 	/* Set up the default ring sizes. */
4670 	qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4671 	qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4672 
4673 	/* Set up the coalescing parameters. */
4674 	qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4675 	qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4676 	qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4677 	qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4678 
4679 	/*
4680 	 * Set up the operating parameters.
4681 	 */
4682 	qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
4683 						  ndev->name);
4684 	INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4685 	INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4686 	INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4687 	INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4688 	INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4689 	INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4690 	init_completion(&qdev->ide_completion);
4691 	mutex_init(&qdev->mpi_mutex);
4692 
4693 	if (!cards_found) {
4694 		dev_info(&pdev->dev, "%s\n", DRV_STRING);
4695 		dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4696 			 DRV_NAME, DRV_VERSION);
4697 	}
4698 	return 0;
4699 err_out2:
4700 	ql_release_all(pdev);
4701 err_out1:
4702 	pci_disable_device(pdev);
4703 	return err;
4704 }
4705 
4706 static const struct net_device_ops qlge_netdev_ops = {
4707 	.ndo_open		= qlge_open,
4708 	.ndo_stop		= qlge_close,
4709 	.ndo_start_xmit		= qlge_send,
4710 	.ndo_change_mtu		= qlge_change_mtu,
4711 	.ndo_get_stats		= qlge_get_stats,
4712 	.ndo_set_rx_mode	= qlge_set_multicast_list,
4713 	.ndo_set_mac_address	= qlge_set_mac_address,
4714 	.ndo_validate_addr	= eth_validate_addr,
4715 	.ndo_tx_timeout		= qlge_tx_timeout,
4716 	.ndo_set_features	= qlge_set_features,
4717 	.ndo_vlan_rx_add_vid	= qlge_vlan_rx_add_vid,
4718 	.ndo_vlan_rx_kill_vid	= qlge_vlan_rx_kill_vid,
4719 };
4720 
ql_timer(struct timer_list * t)4721 static void ql_timer(struct timer_list *t)
4722 {
4723 	struct ql_adapter *qdev = from_timer(qdev, t, timer);
4724 	u32 var = 0;
4725 
4726 	var = ql_read32(qdev, STS);
4727 	if (pci_channel_offline(qdev->pdev)) {
4728 		netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4729 		return;
4730 	}
4731 
4732 	mod_timer(&qdev->timer, jiffies + (5*HZ));
4733 }
4734 
qlge_probe(struct pci_dev * pdev,const struct pci_device_id * pci_entry)4735 static int qlge_probe(struct pci_dev *pdev,
4736 		      const struct pci_device_id *pci_entry)
4737 {
4738 	struct net_device *ndev = NULL;
4739 	struct ql_adapter *qdev = NULL;
4740 	static int cards_found = 0;
4741 	int err = 0;
4742 
4743 	ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4744 			min(MAX_CPUS, netif_get_num_default_rss_queues()));
4745 	if (!ndev)
4746 		return -ENOMEM;
4747 
4748 	err = ql_init_device(pdev, ndev, cards_found);
4749 	if (err < 0) {
4750 		free_netdev(ndev);
4751 		return err;
4752 	}
4753 
4754 	qdev = netdev_priv(ndev);
4755 	SET_NETDEV_DEV(ndev, &pdev->dev);
4756 	ndev->hw_features = NETIF_F_SG |
4757 			    NETIF_F_IP_CSUM |
4758 			    NETIF_F_TSO |
4759 			    NETIF_F_TSO_ECN |
4760 			    NETIF_F_HW_VLAN_CTAG_TX |
4761 			    NETIF_F_HW_VLAN_CTAG_RX |
4762 			    NETIF_F_HW_VLAN_CTAG_FILTER |
4763 			    NETIF_F_RXCSUM;
4764 	ndev->features = ndev->hw_features;
4765 	ndev->vlan_features = ndev->hw_features;
4766 	/* vlan gets same features (except vlan filter) */
4767 	ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
4768 				 NETIF_F_HW_VLAN_CTAG_TX |
4769 				 NETIF_F_HW_VLAN_CTAG_RX);
4770 
4771 	if (test_bit(QL_DMA64, &qdev->flags))
4772 		ndev->features |= NETIF_F_HIGHDMA;
4773 
4774 	/*
4775 	 * Set up net_device structure.
4776 	 */
4777 	ndev->tx_queue_len = qdev->tx_ring_size;
4778 	ndev->irq = pdev->irq;
4779 
4780 	ndev->netdev_ops = &qlge_netdev_ops;
4781 	ndev->ethtool_ops = &qlge_ethtool_ops;
4782 	ndev->watchdog_timeo = 10 * HZ;
4783 
4784 	/* MTU range: this driver only supports 1500 or 9000, so this only
4785 	 * filters out values above or below, and we'll rely on
4786 	 * qlge_change_mtu to make sure only 1500 or 9000 are allowed
4787 	 */
4788 	ndev->min_mtu = ETH_DATA_LEN;
4789 	ndev->max_mtu = 9000;
4790 
4791 	err = register_netdev(ndev);
4792 	if (err) {
4793 		dev_err(&pdev->dev, "net device registration failed.\n");
4794 		ql_release_all(pdev);
4795 		pci_disable_device(pdev);
4796 		free_netdev(ndev);
4797 		return err;
4798 	}
4799 	/* Start up the timer to trigger EEH if
4800 	 * the bus goes dead
4801 	 */
4802 	timer_setup(&qdev->timer, ql_timer, TIMER_DEFERRABLE);
4803 	mod_timer(&qdev->timer, jiffies + (5*HZ));
4804 	ql_link_off(qdev);
4805 	ql_display_dev_info(ndev);
4806 	atomic_set(&qdev->lb_count, 0);
4807 	cards_found++;
4808 	return 0;
4809 }
4810 
ql_lb_send(struct sk_buff * skb,struct net_device * ndev)4811 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4812 {
4813 	return qlge_send(skb, ndev);
4814 }
4815 
ql_clean_lb_rx_ring(struct rx_ring * rx_ring,int budget)4816 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4817 {
4818 	return ql_clean_inbound_rx_ring(rx_ring, budget);
4819 }
4820 
qlge_remove(struct pci_dev * pdev)4821 static void qlge_remove(struct pci_dev *pdev)
4822 {
4823 	struct net_device *ndev = pci_get_drvdata(pdev);
4824 	struct ql_adapter *qdev = netdev_priv(ndev);
4825 	del_timer_sync(&qdev->timer);
4826 	ql_cancel_all_work_sync(qdev);
4827 	unregister_netdev(ndev);
4828 	ql_release_all(pdev);
4829 	pci_disable_device(pdev);
4830 	free_netdev(ndev);
4831 }
4832 
4833 /* Clean up resources without touching hardware. */
ql_eeh_close(struct net_device * ndev)4834 static void ql_eeh_close(struct net_device *ndev)
4835 {
4836 	int i;
4837 	struct ql_adapter *qdev = netdev_priv(ndev);
4838 
4839 	if (netif_carrier_ok(ndev)) {
4840 		netif_carrier_off(ndev);
4841 		netif_stop_queue(ndev);
4842 	}
4843 
4844 	/* Disabling the timer */
4845 	ql_cancel_all_work_sync(qdev);
4846 
4847 	for (i = 0; i < qdev->rss_ring_count; i++)
4848 		netif_napi_del(&qdev->rx_ring[i].napi);
4849 
4850 	clear_bit(QL_ADAPTER_UP, &qdev->flags);
4851 	ql_tx_ring_clean(qdev);
4852 	ql_free_rx_buffers(qdev);
4853 	ql_release_adapter_resources(qdev);
4854 }
4855 
4856 /*
4857  * This callback is called by the PCI subsystem whenever
4858  * a PCI bus error is detected.
4859  */
qlge_io_error_detected(struct pci_dev * pdev,enum pci_channel_state state)4860 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4861 					       enum pci_channel_state state)
4862 {
4863 	struct net_device *ndev = pci_get_drvdata(pdev);
4864 	struct ql_adapter *qdev = netdev_priv(ndev);
4865 
4866 	switch (state) {
4867 	case pci_channel_io_normal:
4868 		return PCI_ERS_RESULT_CAN_RECOVER;
4869 	case pci_channel_io_frozen:
4870 		netif_device_detach(ndev);
4871 		del_timer_sync(&qdev->timer);
4872 		if (netif_running(ndev))
4873 			ql_eeh_close(ndev);
4874 		pci_disable_device(pdev);
4875 		return PCI_ERS_RESULT_NEED_RESET;
4876 	case pci_channel_io_perm_failure:
4877 		dev_err(&pdev->dev,
4878 			"%s: pci_channel_io_perm_failure.\n", __func__);
4879 		del_timer_sync(&qdev->timer);
4880 		ql_eeh_close(ndev);
4881 		set_bit(QL_EEH_FATAL, &qdev->flags);
4882 		return PCI_ERS_RESULT_DISCONNECT;
4883 	}
4884 
4885 	/* Request a slot reset. */
4886 	return PCI_ERS_RESULT_NEED_RESET;
4887 }
4888 
4889 /*
4890  * This callback is called after the PCI buss has been reset.
4891  * Basically, this tries to restart the card from scratch.
4892  * This is a shortened version of the device probe/discovery code,
4893  * it resembles the first-half of the () routine.
4894  */
qlge_io_slot_reset(struct pci_dev * pdev)4895 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4896 {
4897 	struct net_device *ndev = pci_get_drvdata(pdev);
4898 	struct ql_adapter *qdev = netdev_priv(ndev);
4899 
4900 	pdev->error_state = pci_channel_io_normal;
4901 
4902 	pci_restore_state(pdev);
4903 	if (pci_enable_device(pdev)) {
4904 		netif_err(qdev, ifup, qdev->ndev,
4905 			  "Cannot re-enable PCI device after reset.\n");
4906 		return PCI_ERS_RESULT_DISCONNECT;
4907 	}
4908 	pci_set_master(pdev);
4909 
4910 	if (ql_adapter_reset(qdev)) {
4911 		netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4912 		set_bit(QL_EEH_FATAL, &qdev->flags);
4913 		return PCI_ERS_RESULT_DISCONNECT;
4914 	}
4915 
4916 	return PCI_ERS_RESULT_RECOVERED;
4917 }
4918 
qlge_io_resume(struct pci_dev * pdev)4919 static void qlge_io_resume(struct pci_dev *pdev)
4920 {
4921 	struct net_device *ndev = pci_get_drvdata(pdev);
4922 	struct ql_adapter *qdev = netdev_priv(ndev);
4923 	int err = 0;
4924 
4925 	if (netif_running(ndev)) {
4926 		err = qlge_open(ndev);
4927 		if (err) {
4928 			netif_err(qdev, ifup, qdev->ndev,
4929 				  "Device initialization failed after reset.\n");
4930 			return;
4931 		}
4932 	} else {
4933 		netif_err(qdev, ifup, qdev->ndev,
4934 			  "Device was not running prior to EEH.\n");
4935 	}
4936 	mod_timer(&qdev->timer, jiffies + (5*HZ));
4937 	netif_device_attach(ndev);
4938 }
4939 
4940 static const struct pci_error_handlers qlge_err_handler = {
4941 	.error_detected = qlge_io_error_detected,
4942 	.slot_reset = qlge_io_slot_reset,
4943 	.resume = qlge_io_resume,
4944 };
4945 
qlge_suspend(struct pci_dev * pdev,pm_message_t state)4946 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4947 {
4948 	struct net_device *ndev = pci_get_drvdata(pdev);
4949 	struct ql_adapter *qdev = netdev_priv(ndev);
4950 	int err;
4951 
4952 	netif_device_detach(ndev);
4953 	del_timer_sync(&qdev->timer);
4954 
4955 	if (netif_running(ndev)) {
4956 		err = ql_adapter_down(qdev);
4957 		if (!err)
4958 			return err;
4959 	}
4960 
4961 	ql_wol(qdev);
4962 	err = pci_save_state(pdev);
4963 	if (err)
4964 		return err;
4965 
4966 	pci_disable_device(pdev);
4967 
4968 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
4969 
4970 	return 0;
4971 }
4972 
4973 #ifdef CONFIG_PM
qlge_resume(struct pci_dev * pdev)4974 static int qlge_resume(struct pci_dev *pdev)
4975 {
4976 	struct net_device *ndev = pci_get_drvdata(pdev);
4977 	struct ql_adapter *qdev = netdev_priv(ndev);
4978 	int err;
4979 
4980 	pci_set_power_state(pdev, PCI_D0);
4981 	pci_restore_state(pdev);
4982 	err = pci_enable_device(pdev);
4983 	if (err) {
4984 		netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4985 		return err;
4986 	}
4987 	pci_set_master(pdev);
4988 
4989 	pci_enable_wake(pdev, PCI_D3hot, 0);
4990 	pci_enable_wake(pdev, PCI_D3cold, 0);
4991 
4992 	if (netif_running(ndev)) {
4993 		err = ql_adapter_up(qdev);
4994 		if (err)
4995 			return err;
4996 	}
4997 
4998 	mod_timer(&qdev->timer, jiffies + (5*HZ));
4999 	netif_device_attach(ndev);
5000 
5001 	return 0;
5002 }
5003 #endif /* CONFIG_PM */
5004 
qlge_shutdown(struct pci_dev * pdev)5005 static void qlge_shutdown(struct pci_dev *pdev)
5006 {
5007 	qlge_suspend(pdev, PMSG_SUSPEND);
5008 }
5009 
5010 static struct pci_driver qlge_driver = {
5011 	.name = DRV_NAME,
5012 	.id_table = qlge_pci_tbl,
5013 	.probe = qlge_probe,
5014 	.remove = qlge_remove,
5015 #ifdef CONFIG_PM
5016 	.suspend = qlge_suspend,
5017 	.resume = qlge_resume,
5018 #endif
5019 	.shutdown = qlge_shutdown,
5020 	.err_handler = &qlge_err_handler
5021 };
5022 
5023 module_pci_driver(qlge_driver);
5024