1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c)  2018 Intel Corporation */
3 
4 #include <linux/module.h>
5 #include <linux/types.h>
6 #include <linux/if_vlan.h>
7 #include <linux/aer.h>
8 #include <linux/tcp.h>
9 #include <linux/udp.h>
10 #include <linux/ip.h>
11 #include <linux/pm_runtime.h>
12 #include <net/pkt_sched.h>
13 
14 #include <net/ipv6.h>
15 
16 #include "igc.h"
17 #include "igc_hw.h"
18 #include "igc_tsn.h"
19 
20 #define DRV_SUMMARY	"Intel(R) 2.5G Ethernet Linux Driver"
21 
22 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
23 
24 static int debug = -1;
25 
26 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
27 MODULE_DESCRIPTION(DRV_SUMMARY);
28 MODULE_LICENSE("GPL v2");
29 module_param(debug, int, 0);
30 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
31 
32 char igc_driver_name[] = "igc";
33 static const char igc_driver_string[] = DRV_SUMMARY;
34 static const char igc_copyright[] =
35 	"Copyright(c) 2018 Intel Corporation.";
36 
37 static const struct igc_info *igc_info_tbl[] = {
38 	[board_base] = &igc_base_info,
39 };
40 
41 static const struct pci_device_id igc_pci_tbl[] = {
42 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base },
43 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base },
44 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base },
45 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base },
46 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base },
47 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base },
48 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base },
49 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base },
50 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base },
51 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base },
52 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base },
53 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base },
54 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base },
55 	{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base },
56 	/* required last entry */
57 	{0, }
58 };
59 
60 MODULE_DEVICE_TABLE(pci, igc_pci_tbl);
61 
62 enum latency_range {
63 	lowest_latency = 0,
64 	low_latency = 1,
65 	bulk_latency = 2,
66 	latency_invalid = 255
67 };
68 
igc_reset(struct igc_adapter * adapter)69 void igc_reset(struct igc_adapter *adapter)
70 {
71 	struct net_device *dev = adapter->netdev;
72 	struct igc_hw *hw = &adapter->hw;
73 	struct igc_fc_info *fc = &hw->fc;
74 	u32 pba, hwm;
75 
76 	/* Repartition PBA for greater than 9k MTU if required */
77 	pba = IGC_PBA_34K;
78 
79 	/* flow control settings
80 	 * The high water mark must be low enough to fit one full frame
81 	 * after transmitting the pause frame.  As such we must have enough
82 	 * space to allow for us to complete our current transmit and then
83 	 * receive the frame that is in progress from the link partner.
84 	 * Set it to:
85 	 * - the full Rx FIFO size minus one full Tx plus one full Rx frame
86 	 */
87 	hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
88 
89 	fc->high_water = hwm & 0xFFFFFFF0;	/* 16-byte granularity */
90 	fc->low_water = fc->high_water - 16;
91 	fc->pause_time = 0xFFFF;
92 	fc->send_xon = 1;
93 	fc->current_mode = fc->requested_mode;
94 
95 	hw->mac.ops.reset_hw(hw);
96 
97 	if (hw->mac.ops.init_hw(hw))
98 		netdev_err(dev, "Error on hardware initialization\n");
99 
100 	/* Re-establish EEE setting */
101 	igc_set_eee_i225(hw, true, true, true);
102 
103 	if (!netif_running(adapter->netdev))
104 		igc_power_down_phy_copper_base(&adapter->hw);
105 
106 	/* Re-enable PTP, where applicable. */
107 	igc_ptp_reset(adapter);
108 
109 	/* Re-enable TSN offloading, where applicable. */
110 	igc_tsn_offload_apply(adapter);
111 
112 	igc_get_phy_info(hw);
113 }
114 
115 /**
116  * igc_power_up_link - Power up the phy link
117  * @adapter: address of board private structure
118  */
igc_power_up_link(struct igc_adapter * adapter)119 static void igc_power_up_link(struct igc_adapter *adapter)
120 {
121 	igc_reset_phy(&adapter->hw);
122 
123 	igc_power_up_phy_copper(&adapter->hw);
124 
125 	igc_setup_link(&adapter->hw);
126 }
127 
128 /**
129  * igc_release_hw_control - release control of the h/w to f/w
130  * @adapter: address of board private structure
131  *
132  * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
133  * For ASF and Pass Through versions of f/w this means that the
134  * driver is no longer loaded.
135  */
igc_release_hw_control(struct igc_adapter * adapter)136 static void igc_release_hw_control(struct igc_adapter *adapter)
137 {
138 	struct igc_hw *hw = &adapter->hw;
139 	u32 ctrl_ext;
140 
141 	/* Let firmware take over control of h/w */
142 	ctrl_ext = rd32(IGC_CTRL_EXT);
143 	wr32(IGC_CTRL_EXT,
144 	     ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
145 }
146 
147 /**
148  * igc_get_hw_control - get control of the h/w from f/w
149  * @adapter: address of board private structure
150  *
151  * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
152  * For ASF and Pass Through versions of f/w this means that
153  * the driver is loaded.
154  */
igc_get_hw_control(struct igc_adapter * adapter)155 static void igc_get_hw_control(struct igc_adapter *adapter)
156 {
157 	struct igc_hw *hw = &adapter->hw;
158 	u32 ctrl_ext;
159 
160 	/* Let firmware know the driver has taken over */
161 	ctrl_ext = rd32(IGC_CTRL_EXT);
162 	wr32(IGC_CTRL_EXT,
163 	     ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
164 }
165 
166 /**
167  * igc_clean_tx_ring - Free Tx Buffers
168  * @tx_ring: ring to be cleaned
169  */
igc_clean_tx_ring(struct igc_ring * tx_ring)170 static void igc_clean_tx_ring(struct igc_ring *tx_ring)
171 {
172 	u16 i = tx_ring->next_to_clean;
173 	struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
174 
175 	while (i != tx_ring->next_to_use) {
176 		union igc_adv_tx_desc *eop_desc, *tx_desc;
177 
178 		/* Free all the Tx ring sk_buffs */
179 		dev_kfree_skb_any(tx_buffer->skb);
180 
181 		/* unmap skb header data */
182 		dma_unmap_single(tx_ring->dev,
183 				 dma_unmap_addr(tx_buffer, dma),
184 				 dma_unmap_len(tx_buffer, len),
185 				 DMA_TO_DEVICE);
186 
187 		/* check for eop_desc to determine the end of the packet */
188 		eop_desc = tx_buffer->next_to_watch;
189 		tx_desc = IGC_TX_DESC(tx_ring, i);
190 
191 		/* unmap remaining buffers */
192 		while (tx_desc != eop_desc) {
193 			tx_buffer++;
194 			tx_desc++;
195 			i++;
196 			if (unlikely(i == tx_ring->count)) {
197 				i = 0;
198 				tx_buffer = tx_ring->tx_buffer_info;
199 				tx_desc = IGC_TX_DESC(tx_ring, 0);
200 			}
201 
202 			/* unmap any remaining paged data */
203 			if (dma_unmap_len(tx_buffer, len))
204 				dma_unmap_page(tx_ring->dev,
205 					       dma_unmap_addr(tx_buffer, dma),
206 					       dma_unmap_len(tx_buffer, len),
207 					       DMA_TO_DEVICE);
208 		}
209 
210 		/* move us one more past the eop_desc for start of next pkt */
211 		tx_buffer++;
212 		i++;
213 		if (unlikely(i == tx_ring->count)) {
214 			i = 0;
215 			tx_buffer = tx_ring->tx_buffer_info;
216 		}
217 	}
218 
219 	/* reset BQL for queue */
220 	netdev_tx_reset_queue(txring_txq(tx_ring));
221 
222 	/* reset next_to_use and next_to_clean */
223 	tx_ring->next_to_use = 0;
224 	tx_ring->next_to_clean = 0;
225 }
226 
227 /**
228  * igc_free_tx_resources - Free Tx Resources per Queue
229  * @tx_ring: Tx descriptor ring for a specific queue
230  *
231  * Free all transmit software resources
232  */
igc_free_tx_resources(struct igc_ring * tx_ring)233 void igc_free_tx_resources(struct igc_ring *tx_ring)
234 {
235 	igc_clean_tx_ring(tx_ring);
236 
237 	vfree(tx_ring->tx_buffer_info);
238 	tx_ring->tx_buffer_info = NULL;
239 
240 	/* if not set, then don't free */
241 	if (!tx_ring->desc)
242 		return;
243 
244 	dma_free_coherent(tx_ring->dev, tx_ring->size,
245 			  tx_ring->desc, tx_ring->dma);
246 
247 	tx_ring->desc = NULL;
248 }
249 
250 /**
251  * igc_free_all_tx_resources - Free Tx Resources for All Queues
252  * @adapter: board private structure
253  *
254  * Free all transmit software resources
255  */
igc_free_all_tx_resources(struct igc_adapter * adapter)256 static void igc_free_all_tx_resources(struct igc_adapter *adapter)
257 {
258 	int i;
259 
260 	for (i = 0; i < adapter->num_tx_queues; i++)
261 		igc_free_tx_resources(adapter->tx_ring[i]);
262 }
263 
264 /**
265  * igc_clean_all_tx_rings - Free Tx Buffers for all queues
266  * @adapter: board private structure
267  */
igc_clean_all_tx_rings(struct igc_adapter * adapter)268 static void igc_clean_all_tx_rings(struct igc_adapter *adapter)
269 {
270 	int i;
271 
272 	for (i = 0; i < adapter->num_tx_queues; i++)
273 		if (adapter->tx_ring[i])
274 			igc_clean_tx_ring(adapter->tx_ring[i]);
275 }
276 
277 /**
278  * igc_setup_tx_resources - allocate Tx resources (Descriptors)
279  * @tx_ring: tx descriptor ring (for a specific queue) to setup
280  *
281  * Return 0 on success, negative on failure
282  */
igc_setup_tx_resources(struct igc_ring * tx_ring)283 int igc_setup_tx_resources(struct igc_ring *tx_ring)
284 {
285 	struct net_device *ndev = tx_ring->netdev;
286 	struct device *dev = tx_ring->dev;
287 	int size = 0;
288 
289 	size = sizeof(struct igc_tx_buffer) * tx_ring->count;
290 	tx_ring->tx_buffer_info = vzalloc(size);
291 	if (!tx_ring->tx_buffer_info)
292 		goto err;
293 
294 	/* round up to nearest 4K */
295 	tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc);
296 	tx_ring->size = ALIGN(tx_ring->size, 4096);
297 
298 	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
299 					   &tx_ring->dma, GFP_KERNEL);
300 
301 	if (!tx_ring->desc)
302 		goto err;
303 
304 	tx_ring->next_to_use = 0;
305 	tx_ring->next_to_clean = 0;
306 
307 	return 0;
308 
309 err:
310 	vfree(tx_ring->tx_buffer_info);
311 	netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n");
312 	return -ENOMEM;
313 }
314 
315 /**
316  * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues
317  * @adapter: board private structure
318  *
319  * Return 0 on success, negative on failure
320  */
igc_setup_all_tx_resources(struct igc_adapter * adapter)321 static int igc_setup_all_tx_resources(struct igc_adapter *adapter)
322 {
323 	struct net_device *dev = adapter->netdev;
324 	int i, err = 0;
325 
326 	for (i = 0; i < adapter->num_tx_queues; i++) {
327 		err = igc_setup_tx_resources(adapter->tx_ring[i]);
328 		if (err) {
329 			netdev_err(dev, "Error on Tx queue %u setup\n", i);
330 			for (i--; i >= 0; i--)
331 				igc_free_tx_resources(adapter->tx_ring[i]);
332 			break;
333 		}
334 	}
335 
336 	return err;
337 }
338 
339 /**
340  * igc_clean_rx_ring - Free Rx Buffers per Queue
341  * @rx_ring: ring to free buffers from
342  */
igc_clean_rx_ring(struct igc_ring * rx_ring)343 static void igc_clean_rx_ring(struct igc_ring *rx_ring)
344 {
345 	u16 i = rx_ring->next_to_clean;
346 
347 	dev_kfree_skb(rx_ring->skb);
348 	rx_ring->skb = NULL;
349 
350 	/* Free all the Rx ring sk_buffs */
351 	while (i != rx_ring->next_to_alloc) {
352 		struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
353 
354 		/* Invalidate cache lines that may have been written to by
355 		 * device so that we avoid corrupting memory.
356 		 */
357 		dma_sync_single_range_for_cpu(rx_ring->dev,
358 					      buffer_info->dma,
359 					      buffer_info->page_offset,
360 					      igc_rx_bufsz(rx_ring),
361 					      DMA_FROM_DEVICE);
362 
363 		/* free resources associated with mapping */
364 		dma_unmap_page_attrs(rx_ring->dev,
365 				     buffer_info->dma,
366 				     igc_rx_pg_size(rx_ring),
367 				     DMA_FROM_DEVICE,
368 				     IGC_RX_DMA_ATTR);
369 		__page_frag_cache_drain(buffer_info->page,
370 					buffer_info->pagecnt_bias);
371 
372 		i++;
373 		if (i == rx_ring->count)
374 			i = 0;
375 	}
376 
377 	rx_ring->next_to_alloc = 0;
378 	rx_ring->next_to_clean = 0;
379 	rx_ring->next_to_use = 0;
380 }
381 
382 /**
383  * igc_clean_all_rx_rings - Free Rx Buffers for all queues
384  * @adapter: board private structure
385  */
igc_clean_all_rx_rings(struct igc_adapter * adapter)386 static void igc_clean_all_rx_rings(struct igc_adapter *adapter)
387 {
388 	int i;
389 
390 	for (i = 0; i < adapter->num_rx_queues; i++)
391 		if (adapter->rx_ring[i])
392 			igc_clean_rx_ring(adapter->rx_ring[i]);
393 }
394 
395 /**
396  * igc_free_rx_resources - Free Rx Resources
397  * @rx_ring: ring to clean the resources from
398  *
399  * Free all receive software resources
400  */
igc_free_rx_resources(struct igc_ring * rx_ring)401 void igc_free_rx_resources(struct igc_ring *rx_ring)
402 {
403 	igc_clean_rx_ring(rx_ring);
404 
405 	vfree(rx_ring->rx_buffer_info);
406 	rx_ring->rx_buffer_info = NULL;
407 
408 	/* if not set, then don't free */
409 	if (!rx_ring->desc)
410 		return;
411 
412 	dma_free_coherent(rx_ring->dev, rx_ring->size,
413 			  rx_ring->desc, rx_ring->dma);
414 
415 	rx_ring->desc = NULL;
416 }
417 
418 /**
419  * igc_free_all_rx_resources - Free Rx Resources for All Queues
420  * @adapter: board private structure
421  *
422  * Free all receive software resources
423  */
igc_free_all_rx_resources(struct igc_adapter * adapter)424 static void igc_free_all_rx_resources(struct igc_adapter *adapter)
425 {
426 	int i;
427 
428 	for (i = 0; i < adapter->num_rx_queues; i++)
429 		igc_free_rx_resources(adapter->rx_ring[i]);
430 }
431 
432 /**
433  * igc_setup_rx_resources - allocate Rx resources (Descriptors)
434  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
435  *
436  * Returns 0 on success, negative on failure
437  */
igc_setup_rx_resources(struct igc_ring * rx_ring)438 int igc_setup_rx_resources(struct igc_ring *rx_ring)
439 {
440 	struct net_device *ndev = rx_ring->netdev;
441 	struct device *dev = rx_ring->dev;
442 	int size, desc_len;
443 
444 	size = sizeof(struct igc_rx_buffer) * rx_ring->count;
445 	rx_ring->rx_buffer_info = vzalloc(size);
446 	if (!rx_ring->rx_buffer_info)
447 		goto err;
448 
449 	desc_len = sizeof(union igc_adv_rx_desc);
450 
451 	/* Round up to nearest 4K */
452 	rx_ring->size = rx_ring->count * desc_len;
453 	rx_ring->size = ALIGN(rx_ring->size, 4096);
454 
455 	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
456 					   &rx_ring->dma, GFP_KERNEL);
457 
458 	if (!rx_ring->desc)
459 		goto err;
460 
461 	rx_ring->next_to_alloc = 0;
462 	rx_ring->next_to_clean = 0;
463 	rx_ring->next_to_use = 0;
464 
465 	return 0;
466 
467 err:
468 	vfree(rx_ring->rx_buffer_info);
469 	rx_ring->rx_buffer_info = NULL;
470 	netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n");
471 	return -ENOMEM;
472 }
473 
474 /**
475  * igc_setup_all_rx_resources - wrapper to allocate Rx resources
476  *                                (Descriptors) for all queues
477  * @adapter: board private structure
478  *
479  * Return 0 on success, negative on failure
480  */
igc_setup_all_rx_resources(struct igc_adapter * adapter)481 static int igc_setup_all_rx_resources(struct igc_adapter *adapter)
482 {
483 	struct net_device *dev = adapter->netdev;
484 	int i, err = 0;
485 
486 	for (i = 0; i < adapter->num_rx_queues; i++) {
487 		err = igc_setup_rx_resources(adapter->rx_ring[i]);
488 		if (err) {
489 			netdev_err(dev, "Error on Rx queue %u setup\n", i);
490 			for (i--; i >= 0; i--)
491 				igc_free_rx_resources(adapter->rx_ring[i]);
492 			break;
493 		}
494 	}
495 
496 	return err;
497 }
498 
499 /**
500  * igc_configure_rx_ring - Configure a receive ring after Reset
501  * @adapter: board private structure
502  * @ring: receive ring to be configured
503  *
504  * Configure the Rx unit of the MAC after a reset.
505  */
igc_configure_rx_ring(struct igc_adapter * adapter,struct igc_ring * ring)506 static void igc_configure_rx_ring(struct igc_adapter *adapter,
507 				  struct igc_ring *ring)
508 {
509 	struct igc_hw *hw = &adapter->hw;
510 	union igc_adv_rx_desc *rx_desc;
511 	int reg_idx = ring->reg_idx;
512 	u32 srrctl = 0, rxdctl = 0;
513 	u64 rdba = ring->dma;
514 
515 	/* disable the queue */
516 	wr32(IGC_RXDCTL(reg_idx), 0);
517 
518 	/* Set DMA base address registers */
519 	wr32(IGC_RDBAL(reg_idx),
520 	     rdba & 0x00000000ffffffffULL);
521 	wr32(IGC_RDBAH(reg_idx), rdba >> 32);
522 	wr32(IGC_RDLEN(reg_idx),
523 	     ring->count * sizeof(union igc_adv_rx_desc));
524 
525 	/* initialize head and tail */
526 	ring->tail = adapter->io_addr + IGC_RDT(reg_idx);
527 	wr32(IGC_RDH(reg_idx), 0);
528 	writel(0, ring->tail);
529 
530 	/* reset next-to- use/clean to place SW in sync with hardware */
531 	ring->next_to_clean = 0;
532 	ring->next_to_use = 0;
533 
534 	/* set descriptor configuration */
535 	srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT;
536 	if (ring_uses_large_buffer(ring))
537 		srrctl |= IGC_RXBUFFER_3072 >> IGC_SRRCTL_BSIZEPKT_SHIFT;
538 	else
539 		srrctl |= IGC_RXBUFFER_2048 >> IGC_SRRCTL_BSIZEPKT_SHIFT;
540 	srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
541 
542 	wr32(IGC_SRRCTL(reg_idx), srrctl);
543 
544 	rxdctl |= IGC_RX_PTHRESH;
545 	rxdctl |= IGC_RX_HTHRESH << 8;
546 	rxdctl |= IGC_RX_WTHRESH << 16;
547 
548 	/* initialize rx_buffer_info */
549 	memset(ring->rx_buffer_info, 0,
550 	       sizeof(struct igc_rx_buffer) * ring->count);
551 
552 	/* initialize Rx descriptor 0 */
553 	rx_desc = IGC_RX_DESC(ring, 0);
554 	rx_desc->wb.upper.length = 0;
555 
556 	/* enable receive descriptor fetching */
557 	rxdctl |= IGC_RXDCTL_QUEUE_ENABLE;
558 
559 	wr32(IGC_RXDCTL(reg_idx), rxdctl);
560 }
561 
562 /**
563  * igc_configure_rx - Configure receive Unit after Reset
564  * @adapter: board private structure
565  *
566  * Configure the Rx unit of the MAC after a reset.
567  */
igc_configure_rx(struct igc_adapter * adapter)568 static void igc_configure_rx(struct igc_adapter *adapter)
569 {
570 	int i;
571 
572 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
573 	 * the Base and Length of the Rx Descriptor Ring
574 	 */
575 	for (i = 0; i < adapter->num_rx_queues; i++)
576 		igc_configure_rx_ring(adapter, adapter->rx_ring[i]);
577 }
578 
579 /**
580  * igc_configure_tx_ring - Configure transmit ring after Reset
581  * @adapter: board private structure
582  * @ring: tx ring to configure
583  *
584  * Configure a transmit ring after a reset.
585  */
igc_configure_tx_ring(struct igc_adapter * adapter,struct igc_ring * ring)586 static void igc_configure_tx_ring(struct igc_adapter *adapter,
587 				  struct igc_ring *ring)
588 {
589 	struct igc_hw *hw = &adapter->hw;
590 	int reg_idx = ring->reg_idx;
591 	u64 tdba = ring->dma;
592 	u32 txdctl = 0;
593 
594 	/* disable the queue */
595 	wr32(IGC_TXDCTL(reg_idx), 0);
596 	wrfl();
597 	mdelay(10);
598 
599 	wr32(IGC_TDLEN(reg_idx),
600 	     ring->count * sizeof(union igc_adv_tx_desc));
601 	wr32(IGC_TDBAL(reg_idx),
602 	     tdba & 0x00000000ffffffffULL);
603 	wr32(IGC_TDBAH(reg_idx), tdba >> 32);
604 
605 	ring->tail = adapter->io_addr + IGC_TDT(reg_idx);
606 	wr32(IGC_TDH(reg_idx), 0);
607 	writel(0, ring->tail);
608 
609 	txdctl |= IGC_TX_PTHRESH;
610 	txdctl |= IGC_TX_HTHRESH << 8;
611 	txdctl |= IGC_TX_WTHRESH << 16;
612 
613 	txdctl |= IGC_TXDCTL_QUEUE_ENABLE;
614 	wr32(IGC_TXDCTL(reg_idx), txdctl);
615 }
616 
617 /**
618  * igc_configure_tx - Configure transmit Unit after Reset
619  * @adapter: board private structure
620  *
621  * Configure the Tx unit of the MAC after a reset.
622  */
igc_configure_tx(struct igc_adapter * adapter)623 static void igc_configure_tx(struct igc_adapter *adapter)
624 {
625 	int i;
626 
627 	for (i = 0; i < adapter->num_tx_queues; i++)
628 		igc_configure_tx_ring(adapter, adapter->tx_ring[i]);
629 }
630 
631 /**
632  * igc_setup_mrqc - configure the multiple receive queue control registers
633  * @adapter: Board private structure
634  */
igc_setup_mrqc(struct igc_adapter * adapter)635 static void igc_setup_mrqc(struct igc_adapter *adapter)
636 {
637 	struct igc_hw *hw = &adapter->hw;
638 	u32 j, num_rx_queues;
639 	u32 mrqc, rxcsum;
640 	u32 rss_key[10];
641 
642 	netdev_rss_key_fill(rss_key, sizeof(rss_key));
643 	for (j = 0; j < 10; j++)
644 		wr32(IGC_RSSRK(j), rss_key[j]);
645 
646 	num_rx_queues = adapter->rss_queues;
647 
648 	if (adapter->rss_indir_tbl_init != num_rx_queues) {
649 		for (j = 0; j < IGC_RETA_SIZE; j++)
650 			adapter->rss_indir_tbl[j] =
651 			(j * num_rx_queues) / IGC_RETA_SIZE;
652 		adapter->rss_indir_tbl_init = num_rx_queues;
653 	}
654 	igc_write_rss_indir_tbl(adapter);
655 
656 	/* Disable raw packet checksumming so that RSS hash is placed in
657 	 * descriptor on writeback.  No need to enable TCP/UDP/IP checksum
658 	 * offloads as they are enabled by default
659 	 */
660 	rxcsum = rd32(IGC_RXCSUM);
661 	rxcsum |= IGC_RXCSUM_PCSD;
662 
663 	/* Enable Receive Checksum Offload for SCTP */
664 	rxcsum |= IGC_RXCSUM_CRCOFL;
665 
666 	/* Don't need to set TUOFL or IPOFL, they default to 1 */
667 	wr32(IGC_RXCSUM, rxcsum);
668 
669 	/* Generate RSS hash based on packet types, TCP/UDP
670 	 * port numbers and/or IPv4/v6 src and dst addresses
671 	 */
672 	mrqc = IGC_MRQC_RSS_FIELD_IPV4 |
673 	       IGC_MRQC_RSS_FIELD_IPV4_TCP |
674 	       IGC_MRQC_RSS_FIELD_IPV6 |
675 	       IGC_MRQC_RSS_FIELD_IPV6_TCP |
676 	       IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
677 
678 	if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP)
679 		mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
680 	if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP)
681 		mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
682 
683 	mrqc |= IGC_MRQC_ENABLE_RSS_MQ;
684 
685 	wr32(IGC_MRQC, mrqc);
686 }
687 
688 /**
689  * igc_setup_rctl - configure the receive control registers
690  * @adapter: Board private structure
691  */
igc_setup_rctl(struct igc_adapter * adapter)692 static void igc_setup_rctl(struct igc_adapter *adapter)
693 {
694 	struct igc_hw *hw = &adapter->hw;
695 	u32 rctl;
696 
697 	rctl = rd32(IGC_RCTL);
698 
699 	rctl &= ~(3 << IGC_RCTL_MO_SHIFT);
700 	rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC);
701 
702 	rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF |
703 		(hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT);
704 
705 	/* enable stripping of CRC. Newer features require
706 	 * that the HW strips the CRC.
707 	 */
708 	rctl |= IGC_RCTL_SECRC;
709 
710 	/* disable store bad packets and clear size bits. */
711 	rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256);
712 
713 	/* enable LPE to allow for reception of jumbo frames */
714 	rctl |= IGC_RCTL_LPE;
715 
716 	/* disable queue 0 to prevent tail write w/o re-config */
717 	wr32(IGC_RXDCTL(0), 0);
718 
719 	/* This is useful for sniffing bad packets. */
720 	if (adapter->netdev->features & NETIF_F_RXALL) {
721 		/* UPE and MPE will be handled by normal PROMISC logic
722 		 * in set_rx_mode
723 		 */
724 		rctl |= (IGC_RCTL_SBP | /* Receive bad packets */
725 			 IGC_RCTL_BAM | /* RX All Bcast Pkts */
726 			 IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
727 
728 		rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */
729 			  IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */
730 	}
731 
732 	wr32(IGC_RCTL, rctl);
733 }
734 
735 /**
736  * igc_setup_tctl - configure the transmit control registers
737  * @adapter: Board private structure
738  */
igc_setup_tctl(struct igc_adapter * adapter)739 static void igc_setup_tctl(struct igc_adapter *adapter)
740 {
741 	struct igc_hw *hw = &adapter->hw;
742 	u32 tctl;
743 
744 	/* disable queue 0 which icould be enabled by default */
745 	wr32(IGC_TXDCTL(0), 0);
746 
747 	/* Program the Transmit Control Register */
748 	tctl = rd32(IGC_TCTL);
749 	tctl &= ~IGC_TCTL_CT;
750 	tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC |
751 		(IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT);
752 
753 	/* Enable transmits */
754 	tctl |= IGC_TCTL_EN;
755 
756 	wr32(IGC_TCTL, tctl);
757 }
758 
759 /**
760  * igc_set_mac_filter_hw() - Set MAC address filter in hardware
761  * @adapter: Pointer to adapter where the filter should be set
762  * @index: Filter index
763  * @type: MAC address filter type (source or destination)
764  * @addr: MAC address
765  * @queue: If non-negative, queue assignment feature is enabled and frames
766  *         matching the filter are enqueued onto 'queue'. Otherwise, queue
767  *         assignment is disabled.
768  */
igc_set_mac_filter_hw(struct igc_adapter * adapter,int index,enum igc_mac_filter_type type,const u8 * addr,int queue)769 static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index,
770 				  enum igc_mac_filter_type type,
771 				  const u8 *addr, int queue)
772 {
773 	struct net_device *dev = adapter->netdev;
774 	struct igc_hw *hw = &adapter->hw;
775 	u32 ral, rah;
776 
777 	if (WARN_ON(index >= hw->mac.rar_entry_count))
778 		return;
779 
780 	ral = le32_to_cpup((__le32 *)(addr));
781 	rah = le16_to_cpup((__le16 *)(addr + 4));
782 
783 	if (type == IGC_MAC_FILTER_TYPE_SRC) {
784 		rah &= ~IGC_RAH_ASEL_MASK;
785 		rah |= IGC_RAH_ASEL_SRC_ADDR;
786 	}
787 
788 	if (queue >= 0) {
789 		rah &= ~IGC_RAH_QSEL_MASK;
790 		rah |= (queue << IGC_RAH_QSEL_SHIFT);
791 		rah |= IGC_RAH_QSEL_ENABLE;
792 	}
793 
794 	rah |= IGC_RAH_AV;
795 
796 	wr32(IGC_RAL(index), ral);
797 	wr32(IGC_RAH(index), rah);
798 
799 	netdev_dbg(dev, "MAC address filter set in HW: index %d", index);
800 }
801 
802 /**
803  * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware
804  * @adapter: Pointer to adapter where the filter should be cleared
805  * @index: Filter index
806  */
igc_clear_mac_filter_hw(struct igc_adapter * adapter,int index)807 static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index)
808 {
809 	struct net_device *dev = adapter->netdev;
810 	struct igc_hw *hw = &adapter->hw;
811 
812 	if (WARN_ON(index >= hw->mac.rar_entry_count))
813 		return;
814 
815 	wr32(IGC_RAL(index), 0);
816 	wr32(IGC_RAH(index), 0);
817 
818 	netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index);
819 }
820 
821 /* Set default MAC address for the PF in the first RAR entry */
igc_set_default_mac_filter(struct igc_adapter * adapter)822 static void igc_set_default_mac_filter(struct igc_adapter *adapter)
823 {
824 	struct net_device *dev = adapter->netdev;
825 	u8 *addr = adapter->hw.mac.addr;
826 
827 	netdev_dbg(dev, "Set default MAC address filter: address %pM", addr);
828 
829 	igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1);
830 }
831 
832 /**
833  * igc_set_mac - Change the Ethernet Address of the NIC
834  * @netdev: network interface device structure
835  * @p: pointer to an address structure
836  *
837  * Returns 0 on success, negative on failure
838  */
igc_set_mac(struct net_device * netdev,void * p)839 static int igc_set_mac(struct net_device *netdev, void *p)
840 {
841 	struct igc_adapter *adapter = netdev_priv(netdev);
842 	struct igc_hw *hw = &adapter->hw;
843 	struct sockaddr *addr = p;
844 
845 	if (!is_valid_ether_addr(addr->sa_data))
846 		return -EADDRNOTAVAIL;
847 
848 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
849 	memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
850 
851 	/* set the correct pool for the new PF MAC address in entry 0 */
852 	igc_set_default_mac_filter(adapter);
853 
854 	return 0;
855 }
856 
857 /**
858  *  igc_write_mc_addr_list - write multicast addresses to MTA
859  *  @netdev: network interface device structure
860  *
861  *  Writes multicast address list to the MTA hash table.
862  *  Returns: -ENOMEM on failure
863  *           0 on no addresses written
864  *           X on writing X addresses to MTA
865  **/
igc_write_mc_addr_list(struct net_device * netdev)866 static int igc_write_mc_addr_list(struct net_device *netdev)
867 {
868 	struct igc_adapter *adapter = netdev_priv(netdev);
869 	struct igc_hw *hw = &adapter->hw;
870 	struct netdev_hw_addr *ha;
871 	u8  *mta_list;
872 	int i;
873 
874 	if (netdev_mc_empty(netdev)) {
875 		/* nothing to program, so clear mc list */
876 		igc_update_mc_addr_list(hw, NULL, 0);
877 		return 0;
878 	}
879 
880 	mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
881 	if (!mta_list)
882 		return -ENOMEM;
883 
884 	/* The shared function expects a packed array of only addresses. */
885 	i = 0;
886 	netdev_for_each_mc_addr(ha, netdev)
887 		memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
888 
889 	igc_update_mc_addr_list(hw, mta_list, i);
890 	kfree(mta_list);
891 
892 	return netdev_mc_count(netdev);
893 }
894 
igc_tx_launchtime(struct igc_adapter * adapter,ktime_t txtime)895 static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime)
896 {
897 	ktime_t cycle_time = adapter->cycle_time;
898 	ktime_t base_time = adapter->base_time;
899 	u32 launchtime;
900 
901 	/* FIXME: when using ETF together with taprio, we may have a
902 	 * case where 'delta' is larger than the cycle_time, this may
903 	 * cause problems if we don't read the current value of
904 	 * IGC_BASET, as the value writen into the launchtime
905 	 * descriptor field may be misinterpreted.
906 	 */
907 	div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime);
908 
909 	return cpu_to_le32(launchtime);
910 }
911 
igc_tx_ctxtdesc(struct igc_ring * tx_ring,struct igc_tx_buffer * first,u32 vlan_macip_lens,u32 type_tucmd,u32 mss_l4len_idx)912 static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
913 			    struct igc_tx_buffer *first,
914 			    u32 vlan_macip_lens, u32 type_tucmd,
915 			    u32 mss_l4len_idx)
916 {
917 	struct igc_adv_tx_context_desc *context_desc;
918 	u16 i = tx_ring->next_to_use;
919 
920 	context_desc = IGC_TX_CTXTDESC(tx_ring, i);
921 
922 	i++;
923 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
924 
925 	/* set bits to identify this as an advanced context descriptor */
926 	type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT;
927 
928 	/* For i225, context index must be unique per ring. */
929 	if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
930 		mss_l4len_idx |= tx_ring->reg_idx << 4;
931 
932 	context_desc->vlan_macip_lens	= cpu_to_le32(vlan_macip_lens);
933 	context_desc->type_tucmd_mlhl	= cpu_to_le32(type_tucmd);
934 	context_desc->mss_l4len_idx	= cpu_to_le32(mss_l4len_idx);
935 
936 	/* We assume there is always a valid Tx time available. Invalid times
937 	 * should have been handled by the upper layers.
938 	 */
939 	if (tx_ring->launchtime_enable) {
940 		struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
941 		ktime_t txtime = first->skb->tstamp;
942 
943 		first->skb->tstamp = ktime_set(0, 0);
944 		context_desc->launch_time = igc_tx_launchtime(adapter,
945 							      txtime);
946 	} else {
947 		context_desc->launch_time = 0;
948 	}
949 }
950 
igc_ipv6_csum_is_sctp(struct sk_buff * skb)951 static inline bool igc_ipv6_csum_is_sctp(struct sk_buff *skb)
952 {
953 	unsigned int offset = 0;
954 
955 	ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
956 
957 	return offset == skb_checksum_start_offset(skb);
958 }
959 
igc_tx_csum(struct igc_ring * tx_ring,struct igc_tx_buffer * first)960 static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first)
961 {
962 	struct sk_buff *skb = first->skb;
963 	u32 vlan_macip_lens = 0;
964 	u32 type_tucmd = 0;
965 
966 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
967 csum_failed:
968 		if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) &&
969 		    !tx_ring->launchtime_enable)
970 			return;
971 		goto no_csum;
972 	}
973 
974 	switch (skb->csum_offset) {
975 	case offsetof(struct tcphdr, check):
976 		type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
977 		fallthrough;
978 	case offsetof(struct udphdr, check):
979 		break;
980 	case offsetof(struct sctphdr, checksum):
981 		/* validate that this is actually an SCTP request */
982 		if ((first->protocol == htons(ETH_P_IP) &&
983 		     (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
984 		    (first->protocol == htons(ETH_P_IPV6) &&
985 		     igc_ipv6_csum_is_sctp(skb))) {
986 			type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP;
987 			break;
988 		}
989 		fallthrough;
990 	default:
991 		skb_checksum_help(skb);
992 		goto csum_failed;
993 	}
994 
995 	/* update TX checksum flag */
996 	first->tx_flags |= IGC_TX_FLAGS_CSUM;
997 	vlan_macip_lens = skb_checksum_start_offset(skb) -
998 			  skb_network_offset(skb);
999 no_csum:
1000 	vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT;
1001 	vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
1002 
1003 	igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
1004 }
1005 
__igc_maybe_stop_tx(struct igc_ring * tx_ring,const u16 size)1006 static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
1007 {
1008 	struct net_device *netdev = tx_ring->netdev;
1009 
1010 	netif_stop_subqueue(netdev, tx_ring->queue_index);
1011 
1012 	/* memory barriier comment */
1013 	smp_mb();
1014 
1015 	/* We need to check again in a case another CPU has just
1016 	 * made room available.
1017 	 */
1018 	if (igc_desc_unused(tx_ring) < size)
1019 		return -EBUSY;
1020 
1021 	/* A reprieve! */
1022 	netif_wake_subqueue(netdev, tx_ring->queue_index);
1023 
1024 	u64_stats_update_begin(&tx_ring->tx_syncp2);
1025 	tx_ring->tx_stats.restart_queue2++;
1026 	u64_stats_update_end(&tx_ring->tx_syncp2);
1027 
1028 	return 0;
1029 }
1030 
igc_maybe_stop_tx(struct igc_ring * tx_ring,const u16 size)1031 static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
1032 {
1033 	if (igc_desc_unused(tx_ring) >= size)
1034 		return 0;
1035 	return __igc_maybe_stop_tx(tx_ring, size);
1036 }
1037 
1038 #define IGC_SET_FLAG(_input, _flag, _result) \
1039 	(((_flag) <= (_result)) ?				\
1040 	 ((u32)((_input) & (_flag)) * ((_result) / (_flag))) :	\
1041 	 ((u32)((_input) & (_flag)) / ((_flag) / (_result))))
1042 
igc_tx_cmd_type(struct sk_buff * skb,u32 tx_flags)1043 static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
1044 {
1045 	/* set type for advanced descriptor with frame checksum insertion */
1046 	u32 cmd_type = IGC_ADVTXD_DTYP_DATA |
1047 		       IGC_ADVTXD_DCMD_DEXT |
1048 		       IGC_ADVTXD_DCMD_IFCS;
1049 
1050 	/* set segmentation bits for TSO */
1051 	cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO,
1052 				 (IGC_ADVTXD_DCMD_TSE));
1053 
1054 	/* set timestamp bit if present */
1055 	cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP,
1056 				 (IGC_ADVTXD_MAC_TSTAMP));
1057 
1058 	return cmd_type;
1059 }
1060 
igc_tx_olinfo_status(struct igc_ring * tx_ring,union igc_adv_tx_desc * tx_desc,u32 tx_flags,unsigned int paylen)1061 static void igc_tx_olinfo_status(struct igc_ring *tx_ring,
1062 				 union igc_adv_tx_desc *tx_desc,
1063 				 u32 tx_flags, unsigned int paylen)
1064 {
1065 	u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT;
1066 
1067 	/* insert L4 checksum */
1068 	olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) *
1069 			  ((IGC_TXD_POPTS_TXSM << 8) /
1070 			  IGC_TX_FLAGS_CSUM);
1071 
1072 	/* insert IPv4 checksum */
1073 	olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) *
1074 			  (((IGC_TXD_POPTS_IXSM << 8)) /
1075 			  IGC_TX_FLAGS_IPV4);
1076 
1077 	tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
1078 }
1079 
igc_tx_map(struct igc_ring * tx_ring,struct igc_tx_buffer * first,const u8 hdr_len)1080 static int igc_tx_map(struct igc_ring *tx_ring,
1081 		      struct igc_tx_buffer *first,
1082 		      const u8 hdr_len)
1083 {
1084 	struct sk_buff *skb = first->skb;
1085 	struct igc_tx_buffer *tx_buffer;
1086 	union igc_adv_tx_desc *tx_desc;
1087 	u32 tx_flags = first->tx_flags;
1088 	skb_frag_t *frag;
1089 	u16 i = tx_ring->next_to_use;
1090 	unsigned int data_len, size;
1091 	dma_addr_t dma;
1092 	u32 cmd_type = igc_tx_cmd_type(skb, tx_flags);
1093 
1094 	tx_desc = IGC_TX_DESC(tx_ring, i);
1095 
1096 	igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
1097 
1098 	size = skb_headlen(skb);
1099 	data_len = skb->data_len;
1100 
1101 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1102 
1103 	tx_buffer = first;
1104 
1105 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1106 		if (dma_mapping_error(tx_ring->dev, dma))
1107 			goto dma_error;
1108 
1109 		/* record length, and DMA address */
1110 		dma_unmap_len_set(tx_buffer, len, size);
1111 		dma_unmap_addr_set(tx_buffer, dma, dma);
1112 
1113 		tx_desc->read.buffer_addr = cpu_to_le64(dma);
1114 
1115 		while (unlikely(size > IGC_MAX_DATA_PER_TXD)) {
1116 			tx_desc->read.cmd_type_len =
1117 				cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD);
1118 
1119 			i++;
1120 			tx_desc++;
1121 			if (i == tx_ring->count) {
1122 				tx_desc = IGC_TX_DESC(tx_ring, 0);
1123 				i = 0;
1124 			}
1125 			tx_desc->read.olinfo_status = 0;
1126 
1127 			dma += IGC_MAX_DATA_PER_TXD;
1128 			size -= IGC_MAX_DATA_PER_TXD;
1129 
1130 			tx_desc->read.buffer_addr = cpu_to_le64(dma);
1131 		}
1132 
1133 		if (likely(!data_len))
1134 			break;
1135 
1136 		tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
1137 
1138 		i++;
1139 		tx_desc++;
1140 		if (i == tx_ring->count) {
1141 			tx_desc = IGC_TX_DESC(tx_ring, 0);
1142 			i = 0;
1143 		}
1144 		tx_desc->read.olinfo_status = 0;
1145 
1146 		size = skb_frag_size(frag);
1147 		data_len -= size;
1148 
1149 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
1150 				       size, DMA_TO_DEVICE);
1151 
1152 		tx_buffer = &tx_ring->tx_buffer_info[i];
1153 	}
1154 
1155 	/* write last descriptor with RS and EOP bits */
1156 	cmd_type |= size | IGC_TXD_DCMD;
1157 	tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1158 
1159 	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1160 
1161 	/* set the timestamp */
1162 	first->time_stamp = jiffies;
1163 
1164 	skb_tx_timestamp(skb);
1165 
1166 	/* Force memory writes to complete before letting h/w know there
1167 	 * are new descriptors to fetch.  (Only applicable for weak-ordered
1168 	 * memory model archs, such as IA-64).
1169 	 *
1170 	 * We also need this memory barrier to make certain all of the
1171 	 * status bits have been updated before next_to_watch is written.
1172 	 */
1173 	wmb();
1174 
1175 	/* set next_to_watch value indicating a packet is present */
1176 	first->next_to_watch = tx_desc;
1177 
1178 	i++;
1179 	if (i == tx_ring->count)
1180 		i = 0;
1181 
1182 	tx_ring->next_to_use = i;
1183 
1184 	/* Make sure there is space in the ring for the next send. */
1185 	igc_maybe_stop_tx(tx_ring, DESC_NEEDED);
1186 
1187 	if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
1188 		writel(i, tx_ring->tail);
1189 	}
1190 
1191 	return 0;
1192 dma_error:
1193 	netdev_err(tx_ring->netdev, "TX DMA map failed\n");
1194 	tx_buffer = &tx_ring->tx_buffer_info[i];
1195 
1196 	/* clear dma mappings for failed tx_buffer_info map */
1197 	while (tx_buffer != first) {
1198 		if (dma_unmap_len(tx_buffer, len))
1199 			dma_unmap_page(tx_ring->dev,
1200 				       dma_unmap_addr(tx_buffer, dma),
1201 				       dma_unmap_len(tx_buffer, len),
1202 				       DMA_TO_DEVICE);
1203 		dma_unmap_len_set(tx_buffer, len, 0);
1204 
1205 		if (i-- == 0)
1206 			i += tx_ring->count;
1207 		tx_buffer = &tx_ring->tx_buffer_info[i];
1208 	}
1209 
1210 	if (dma_unmap_len(tx_buffer, len))
1211 		dma_unmap_single(tx_ring->dev,
1212 				 dma_unmap_addr(tx_buffer, dma),
1213 				 dma_unmap_len(tx_buffer, len),
1214 				 DMA_TO_DEVICE);
1215 	dma_unmap_len_set(tx_buffer, len, 0);
1216 
1217 	dev_kfree_skb_any(tx_buffer->skb);
1218 	tx_buffer->skb = NULL;
1219 
1220 	tx_ring->next_to_use = i;
1221 
1222 	return -1;
1223 }
1224 
igc_tso(struct igc_ring * tx_ring,struct igc_tx_buffer * first,u8 * hdr_len)1225 static int igc_tso(struct igc_ring *tx_ring,
1226 		   struct igc_tx_buffer *first,
1227 		   u8 *hdr_len)
1228 {
1229 	u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
1230 	struct sk_buff *skb = first->skb;
1231 	union {
1232 		struct iphdr *v4;
1233 		struct ipv6hdr *v6;
1234 		unsigned char *hdr;
1235 	} ip;
1236 	union {
1237 		struct tcphdr *tcp;
1238 		struct udphdr *udp;
1239 		unsigned char *hdr;
1240 	} l4;
1241 	u32 paylen, l4_offset;
1242 	int err;
1243 
1244 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1245 		return 0;
1246 
1247 	if (!skb_is_gso(skb))
1248 		return 0;
1249 
1250 	err = skb_cow_head(skb, 0);
1251 	if (err < 0)
1252 		return err;
1253 
1254 	ip.hdr = skb_network_header(skb);
1255 	l4.hdr = skb_checksum_start(skb);
1256 
1257 	/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
1258 	type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
1259 
1260 	/* initialize outer IP header fields */
1261 	if (ip.v4->version == 4) {
1262 		unsigned char *csum_start = skb_checksum_start(skb);
1263 		unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
1264 
1265 		/* IP header will have to cancel out any data that
1266 		 * is not a part of the outer IP header
1267 		 */
1268 		ip.v4->check = csum_fold(csum_partial(trans_start,
1269 						      csum_start - trans_start,
1270 						      0));
1271 		type_tucmd |= IGC_ADVTXD_TUCMD_IPV4;
1272 
1273 		ip.v4->tot_len = 0;
1274 		first->tx_flags |= IGC_TX_FLAGS_TSO |
1275 				   IGC_TX_FLAGS_CSUM |
1276 				   IGC_TX_FLAGS_IPV4;
1277 	} else {
1278 		ip.v6->payload_len = 0;
1279 		first->tx_flags |= IGC_TX_FLAGS_TSO |
1280 				   IGC_TX_FLAGS_CSUM;
1281 	}
1282 
1283 	/* determine offset of inner transport header */
1284 	l4_offset = l4.hdr - skb->data;
1285 
1286 	/* remove payload length from inner checksum */
1287 	paylen = skb->len - l4_offset;
1288 	if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) {
1289 		/* compute length of segmentation header */
1290 		*hdr_len = (l4.tcp->doff * 4) + l4_offset;
1291 		csum_replace_by_diff(&l4.tcp->check,
1292 				     (__force __wsum)htonl(paylen));
1293 	} else {
1294 		/* compute length of segmentation header */
1295 		*hdr_len = sizeof(*l4.udp) + l4_offset;
1296 		csum_replace_by_diff(&l4.udp->check,
1297 				     (__force __wsum)htonl(paylen));
1298 	}
1299 
1300 	/* update gso size and bytecount with header size */
1301 	first->gso_segs = skb_shinfo(skb)->gso_segs;
1302 	first->bytecount += (first->gso_segs - 1) * *hdr_len;
1303 
1304 	/* MSS L4LEN IDX */
1305 	mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT;
1306 	mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT;
1307 
1308 	/* VLAN MACLEN IPLEN */
1309 	vlan_macip_lens = l4.hdr - ip.hdr;
1310 	vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT;
1311 	vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
1312 
1313 	igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
1314 			type_tucmd, mss_l4len_idx);
1315 
1316 	return 1;
1317 }
1318 
igc_xmit_frame_ring(struct sk_buff * skb,struct igc_ring * tx_ring)1319 static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
1320 				       struct igc_ring *tx_ring)
1321 {
1322 	u16 count = TXD_USE_COUNT(skb_headlen(skb));
1323 	__be16 protocol = vlan_get_protocol(skb);
1324 	struct igc_tx_buffer *first;
1325 	u32 tx_flags = 0;
1326 	unsigned short f;
1327 	u8 hdr_len = 0;
1328 	int tso = 0;
1329 
1330 	/* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD,
1331 	 *	+ 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD,
1332 	 *	+ 2 desc gap to keep tail from touching head,
1333 	 *	+ 1 desc for context descriptor,
1334 	 * otherwise try next time
1335 	 */
1336 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1337 		count += TXD_USE_COUNT(skb_frag_size(
1338 						&skb_shinfo(skb)->frags[f]));
1339 
1340 	if (igc_maybe_stop_tx(tx_ring, count + 3)) {
1341 		/* this is a hard error */
1342 		return NETDEV_TX_BUSY;
1343 	}
1344 
1345 	/* record the location of the first descriptor for this packet */
1346 	first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
1347 	first->skb = skb;
1348 	first->bytecount = skb->len;
1349 	first->gso_segs = 1;
1350 
1351 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
1352 		struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
1353 
1354 		/* FIXME: add support for retrieving timestamps from
1355 		 * the other timer registers before skipping the
1356 		 * timestamping request.
1357 		 */
1358 		if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
1359 		    !test_and_set_bit_lock(__IGC_PTP_TX_IN_PROGRESS,
1360 					   &adapter->state)) {
1361 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1362 			tx_flags |= IGC_TX_FLAGS_TSTAMP;
1363 
1364 			adapter->ptp_tx_skb = skb_get(skb);
1365 			adapter->ptp_tx_start = jiffies;
1366 		} else {
1367 			adapter->tx_hwtstamp_skipped++;
1368 		}
1369 	}
1370 
1371 	/* record initial flags and protocol */
1372 	first->tx_flags = tx_flags;
1373 	first->protocol = protocol;
1374 
1375 	tso = igc_tso(tx_ring, first, &hdr_len);
1376 	if (tso < 0)
1377 		goto out_drop;
1378 	else if (!tso)
1379 		igc_tx_csum(tx_ring, first);
1380 
1381 	igc_tx_map(tx_ring, first, hdr_len);
1382 
1383 	return NETDEV_TX_OK;
1384 
1385 out_drop:
1386 	dev_kfree_skb_any(first->skb);
1387 	first->skb = NULL;
1388 
1389 	return NETDEV_TX_OK;
1390 }
1391 
igc_tx_queue_mapping(struct igc_adapter * adapter,struct sk_buff * skb)1392 static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter,
1393 						    struct sk_buff *skb)
1394 {
1395 	unsigned int r_idx = skb->queue_mapping;
1396 
1397 	if (r_idx >= adapter->num_tx_queues)
1398 		r_idx = r_idx % adapter->num_tx_queues;
1399 
1400 	return adapter->tx_ring[r_idx];
1401 }
1402 
igc_xmit_frame(struct sk_buff * skb,struct net_device * netdev)1403 static netdev_tx_t igc_xmit_frame(struct sk_buff *skb,
1404 				  struct net_device *netdev)
1405 {
1406 	struct igc_adapter *adapter = netdev_priv(netdev);
1407 
1408 	/* The minimum packet size with TCTL.PSP set is 17 so pad the skb
1409 	 * in order to meet this minimum size requirement.
1410 	 */
1411 	if (skb->len < 17) {
1412 		if (skb_padto(skb, 17))
1413 			return NETDEV_TX_OK;
1414 		skb->len = 17;
1415 	}
1416 
1417 	return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb));
1418 }
1419 
igc_rx_checksum(struct igc_ring * ring,union igc_adv_rx_desc * rx_desc,struct sk_buff * skb)1420 static void igc_rx_checksum(struct igc_ring *ring,
1421 			    union igc_adv_rx_desc *rx_desc,
1422 			    struct sk_buff *skb)
1423 {
1424 	skb_checksum_none_assert(skb);
1425 
1426 	/* Ignore Checksum bit is set */
1427 	if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM))
1428 		return;
1429 
1430 	/* Rx checksum disabled via ethtool */
1431 	if (!(ring->netdev->features & NETIF_F_RXCSUM))
1432 		return;
1433 
1434 	/* TCP/UDP checksum error bit is set */
1435 	if (igc_test_staterr(rx_desc,
1436 			     IGC_RXDEXT_STATERR_L4E |
1437 			     IGC_RXDEXT_STATERR_IPE)) {
1438 		/* work around errata with sctp packets where the TCPE aka
1439 		 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
1440 		 * packets (aka let the stack check the crc32c)
1441 		 */
1442 		if (!(skb->len == 60 &&
1443 		      test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
1444 			u64_stats_update_begin(&ring->rx_syncp);
1445 			ring->rx_stats.csum_err++;
1446 			u64_stats_update_end(&ring->rx_syncp);
1447 		}
1448 		/* let the stack verify checksum errors */
1449 		return;
1450 	}
1451 	/* It must be a TCP or UDP packet with a valid checksum */
1452 	if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS |
1453 				      IGC_RXD_STAT_UDPCS))
1454 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1455 
1456 	netdev_dbg(ring->netdev, "cksum success: bits %08X\n",
1457 		   le32_to_cpu(rx_desc->wb.upper.status_error));
1458 }
1459 
igc_rx_hash(struct igc_ring * ring,union igc_adv_rx_desc * rx_desc,struct sk_buff * skb)1460 static inline void igc_rx_hash(struct igc_ring *ring,
1461 			       union igc_adv_rx_desc *rx_desc,
1462 			       struct sk_buff *skb)
1463 {
1464 	if (ring->netdev->features & NETIF_F_RXHASH)
1465 		skb_set_hash(skb,
1466 			     le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
1467 			     PKT_HASH_TYPE_L3);
1468 }
1469 
1470 /**
1471  * igc_process_skb_fields - Populate skb header fields from Rx descriptor
1472  * @rx_ring: rx descriptor ring packet is being transacted on
1473  * @rx_desc: pointer to the EOP Rx descriptor
1474  * @skb: pointer to current skb being populated
1475  *
1476  * This function checks the ring, descriptor, and packet information in order
1477  * to populate the hash, checksum, VLAN, protocol, and other fields within the
1478  * skb.
1479  */
igc_process_skb_fields(struct igc_ring * rx_ring,union igc_adv_rx_desc * rx_desc,struct sk_buff * skb)1480 static void igc_process_skb_fields(struct igc_ring *rx_ring,
1481 				   union igc_adv_rx_desc *rx_desc,
1482 				   struct sk_buff *skb)
1483 {
1484 	igc_rx_hash(rx_ring, rx_desc, skb);
1485 
1486 	igc_rx_checksum(rx_ring, rx_desc, skb);
1487 
1488 	skb_record_rx_queue(skb, rx_ring->queue_index);
1489 
1490 	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1491 }
1492 
igc_get_rx_buffer(struct igc_ring * rx_ring,const unsigned int size)1493 static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,
1494 					       const unsigned int size)
1495 {
1496 	struct igc_rx_buffer *rx_buffer;
1497 
1498 	rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
1499 	prefetchw(rx_buffer->page);
1500 
1501 	/* we are reusing so sync this buffer for CPU use */
1502 	dma_sync_single_range_for_cpu(rx_ring->dev,
1503 				      rx_buffer->dma,
1504 				      rx_buffer->page_offset,
1505 				      size,
1506 				      DMA_FROM_DEVICE);
1507 
1508 	rx_buffer->pagecnt_bias--;
1509 
1510 	return rx_buffer;
1511 }
1512 
1513 /**
1514  * igc_add_rx_frag - Add contents of Rx buffer to sk_buff
1515  * @rx_ring: rx descriptor ring to transact packets on
1516  * @rx_buffer: buffer containing page to add
1517  * @skb: sk_buff to place the data into
1518  * @size: size of buffer to be added
1519  *
1520  * This function will add the data contained in rx_buffer->page to the skb.
1521  */
igc_add_rx_frag(struct igc_ring * rx_ring,struct igc_rx_buffer * rx_buffer,struct sk_buff * skb,unsigned int size)1522 static void igc_add_rx_frag(struct igc_ring *rx_ring,
1523 			    struct igc_rx_buffer *rx_buffer,
1524 			    struct sk_buff *skb,
1525 			    unsigned int size)
1526 {
1527 #if (PAGE_SIZE < 8192)
1528 	unsigned int truesize = igc_rx_pg_size(rx_ring) / 2;
1529 
1530 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1531 			rx_buffer->page_offset, size, truesize);
1532 	rx_buffer->page_offset ^= truesize;
1533 #else
1534 	unsigned int truesize = ring_uses_build_skb(rx_ring) ?
1535 				SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
1536 				SKB_DATA_ALIGN(size);
1537 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1538 			rx_buffer->page_offset, size, truesize);
1539 	rx_buffer->page_offset += truesize;
1540 #endif
1541 }
1542 
igc_build_skb(struct igc_ring * rx_ring,struct igc_rx_buffer * rx_buffer,union igc_adv_rx_desc * rx_desc,unsigned int size)1543 static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
1544 				     struct igc_rx_buffer *rx_buffer,
1545 				     union igc_adv_rx_desc *rx_desc,
1546 				     unsigned int size)
1547 {
1548 	void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1549 #if (PAGE_SIZE < 8192)
1550 	unsigned int truesize = igc_rx_pg_size(rx_ring) / 2;
1551 #else
1552 	unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1553 				SKB_DATA_ALIGN(IGC_SKB_PAD + size);
1554 #endif
1555 	struct sk_buff *skb;
1556 
1557 	/* prefetch first cache line of first page */
1558 	net_prefetch(va);
1559 
1560 	/* build an skb around the page buffer */
1561 	skb = build_skb(va - IGC_SKB_PAD, truesize);
1562 	if (unlikely(!skb))
1563 		return NULL;
1564 
1565 	/* update pointers within the skb to store the data */
1566 	skb_reserve(skb, IGC_SKB_PAD);
1567 	__skb_put(skb, size);
1568 
1569 	/* update buffer offset */
1570 #if (PAGE_SIZE < 8192)
1571 	rx_buffer->page_offset ^= truesize;
1572 #else
1573 	rx_buffer->page_offset += truesize;
1574 #endif
1575 
1576 	return skb;
1577 }
1578 
igc_construct_skb(struct igc_ring * rx_ring,struct igc_rx_buffer * rx_buffer,union igc_adv_rx_desc * rx_desc,unsigned int size)1579 static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
1580 					 struct igc_rx_buffer *rx_buffer,
1581 					 union igc_adv_rx_desc *rx_desc,
1582 					 unsigned int size)
1583 {
1584 	void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1585 #if (PAGE_SIZE < 8192)
1586 	unsigned int truesize = igc_rx_pg_size(rx_ring) / 2;
1587 #else
1588 	unsigned int truesize = SKB_DATA_ALIGN(size);
1589 #endif
1590 	unsigned int headlen;
1591 	struct sk_buff *skb;
1592 
1593 	/* prefetch first cache line of first page */
1594 	net_prefetch(va);
1595 
1596 	/* allocate a skb to store the frags */
1597 	skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN);
1598 	if (unlikely(!skb))
1599 		return NULL;
1600 
1601 	if (unlikely(igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP))) {
1602 		igc_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
1603 		va += IGC_TS_HDR_LEN;
1604 		size -= IGC_TS_HDR_LEN;
1605 	}
1606 
1607 	/* Determine available headroom for copy */
1608 	headlen = size;
1609 	if (headlen > IGC_RX_HDR_LEN)
1610 		headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN);
1611 
1612 	/* align pull length to size of long to optimize memcpy performance */
1613 	memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
1614 
1615 	/* update all of the pointers */
1616 	size -= headlen;
1617 	if (size) {
1618 		skb_add_rx_frag(skb, 0, rx_buffer->page,
1619 				(va + headlen) - page_address(rx_buffer->page),
1620 				size, truesize);
1621 #if (PAGE_SIZE < 8192)
1622 		rx_buffer->page_offset ^= truesize;
1623 #else
1624 		rx_buffer->page_offset += truesize;
1625 #endif
1626 	} else {
1627 		rx_buffer->pagecnt_bias++;
1628 	}
1629 
1630 	return skb;
1631 }
1632 
1633 /**
1634  * igc_reuse_rx_page - page flip buffer and store it back on the ring
1635  * @rx_ring: rx descriptor ring to store buffers on
1636  * @old_buff: donor buffer to have page reused
1637  *
1638  * Synchronizes page for reuse by the adapter
1639  */
igc_reuse_rx_page(struct igc_ring * rx_ring,struct igc_rx_buffer * old_buff)1640 static void igc_reuse_rx_page(struct igc_ring *rx_ring,
1641 			      struct igc_rx_buffer *old_buff)
1642 {
1643 	u16 nta = rx_ring->next_to_alloc;
1644 	struct igc_rx_buffer *new_buff;
1645 
1646 	new_buff = &rx_ring->rx_buffer_info[nta];
1647 
1648 	/* update, and store next to alloc */
1649 	nta++;
1650 	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1651 
1652 	/* Transfer page from old buffer to new buffer.
1653 	 * Move each member individually to avoid possible store
1654 	 * forwarding stalls.
1655 	 */
1656 	new_buff->dma		= old_buff->dma;
1657 	new_buff->page		= old_buff->page;
1658 	new_buff->page_offset	= old_buff->page_offset;
1659 	new_buff->pagecnt_bias	= old_buff->pagecnt_bias;
1660 }
1661 
igc_page_is_reserved(struct page * page)1662 static inline bool igc_page_is_reserved(struct page *page)
1663 {
1664 	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
1665 }
1666 
igc_can_reuse_rx_page(struct igc_rx_buffer * rx_buffer)1667 static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)
1668 {
1669 	unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1670 	struct page *page = rx_buffer->page;
1671 
1672 	/* avoid re-using remote pages */
1673 	if (unlikely(igc_page_is_reserved(page)))
1674 		return false;
1675 
1676 #if (PAGE_SIZE < 8192)
1677 	/* if we are only owner of page we can reuse it */
1678 	if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
1679 		return false;
1680 #else
1681 #define IGC_LAST_OFFSET \
1682 	(SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048)
1683 
1684 	if (rx_buffer->page_offset > IGC_LAST_OFFSET)
1685 		return false;
1686 #endif
1687 
1688 	/* If we have drained the page fragment pool we need to update
1689 	 * the pagecnt_bias and page count so that we fully restock the
1690 	 * number of references the driver holds.
1691 	 */
1692 	if (unlikely(!pagecnt_bias)) {
1693 		page_ref_add(page, USHRT_MAX);
1694 		rx_buffer->pagecnt_bias = USHRT_MAX;
1695 	}
1696 
1697 	return true;
1698 }
1699 
1700 /**
1701  * igc_is_non_eop - process handling of non-EOP buffers
1702  * @rx_ring: Rx ring being processed
1703  * @rx_desc: Rx descriptor for current buffer
1704  *
1705  * This function updates next to clean.  If the buffer is an EOP buffer
1706  * this function exits returning false, otherwise it will place the
1707  * sk_buff in the next buffer to be chained and return true indicating
1708  * that this is in fact a non-EOP buffer.
1709  */
igc_is_non_eop(struct igc_ring * rx_ring,union igc_adv_rx_desc * rx_desc)1710 static bool igc_is_non_eop(struct igc_ring *rx_ring,
1711 			   union igc_adv_rx_desc *rx_desc)
1712 {
1713 	u32 ntc = rx_ring->next_to_clean + 1;
1714 
1715 	/* fetch, update, and store next to clean */
1716 	ntc = (ntc < rx_ring->count) ? ntc : 0;
1717 	rx_ring->next_to_clean = ntc;
1718 
1719 	prefetch(IGC_RX_DESC(rx_ring, ntc));
1720 
1721 	if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP)))
1722 		return false;
1723 
1724 	return true;
1725 }
1726 
1727 /**
1728  * igc_cleanup_headers - Correct corrupted or empty headers
1729  * @rx_ring: rx descriptor ring packet is being transacted on
1730  * @rx_desc: pointer to the EOP Rx descriptor
1731  * @skb: pointer to current skb being fixed
1732  *
1733  * Address the case where we are pulling data in on pages only
1734  * and as such no data is present in the skb header.
1735  *
1736  * In addition if skb is not at least 60 bytes we need to pad it so that
1737  * it is large enough to qualify as a valid Ethernet frame.
1738  *
1739  * Returns true if an error was encountered and skb was freed.
1740  */
igc_cleanup_headers(struct igc_ring * rx_ring,union igc_adv_rx_desc * rx_desc,struct sk_buff * skb)1741 static bool igc_cleanup_headers(struct igc_ring *rx_ring,
1742 				union igc_adv_rx_desc *rx_desc,
1743 				struct sk_buff *skb)
1744 {
1745 	if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) {
1746 		struct net_device *netdev = rx_ring->netdev;
1747 
1748 		if (!(netdev->features & NETIF_F_RXALL)) {
1749 			dev_kfree_skb_any(skb);
1750 			return true;
1751 		}
1752 	}
1753 
1754 	/* if eth_skb_pad returns an error the skb was freed */
1755 	if (eth_skb_pad(skb))
1756 		return true;
1757 
1758 	return false;
1759 }
1760 
igc_put_rx_buffer(struct igc_ring * rx_ring,struct igc_rx_buffer * rx_buffer)1761 static void igc_put_rx_buffer(struct igc_ring *rx_ring,
1762 			      struct igc_rx_buffer *rx_buffer)
1763 {
1764 	if (igc_can_reuse_rx_page(rx_buffer)) {
1765 		/* hand second half of page back to the ring */
1766 		igc_reuse_rx_page(rx_ring, rx_buffer);
1767 	} else {
1768 		/* We are not reusing the buffer so unmap it and free
1769 		 * any references we are holding to it
1770 		 */
1771 		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
1772 				     igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
1773 				     IGC_RX_DMA_ATTR);
1774 		__page_frag_cache_drain(rx_buffer->page,
1775 					rx_buffer->pagecnt_bias);
1776 	}
1777 
1778 	/* clear contents of rx_buffer */
1779 	rx_buffer->page = NULL;
1780 }
1781 
igc_rx_offset(struct igc_ring * rx_ring)1782 static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)
1783 {
1784 	return ring_uses_build_skb(rx_ring) ? IGC_SKB_PAD : 0;
1785 }
1786 
igc_alloc_mapped_page(struct igc_ring * rx_ring,struct igc_rx_buffer * bi)1787 static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
1788 				  struct igc_rx_buffer *bi)
1789 {
1790 	struct page *page = bi->page;
1791 	dma_addr_t dma;
1792 
1793 	/* since we are recycling buffers we should seldom need to alloc */
1794 	if (likely(page))
1795 		return true;
1796 
1797 	/* alloc new page for storage */
1798 	page = dev_alloc_pages(igc_rx_pg_order(rx_ring));
1799 	if (unlikely(!page)) {
1800 		rx_ring->rx_stats.alloc_failed++;
1801 		return false;
1802 	}
1803 
1804 	/* map page for use */
1805 	dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1806 				 igc_rx_pg_size(rx_ring),
1807 				 DMA_FROM_DEVICE,
1808 				 IGC_RX_DMA_ATTR);
1809 
1810 	/* if mapping failed free memory back to system since
1811 	 * there isn't much point in holding memory we can't use
1812 	 */
1813 	if (dma_mapping_error(rx_ring->dev, dma)) {
1814 		__free_page(page);
1815 
1816 		rx_ring->rx_stats.alloc_failed++;
1817 		return false;
1818 	}
1819 
1820 	bi->dma = dma;
1821 	bi->page = page;
1822 	bi->page_offset = igc_rx_offset(rx_ring);
1823 	bi->pagecnt_bias = 1;
1824 
1825 	return true;
1826 }
1827 
1828 /**
1829  * igc_alloc_rx_buffers - Replace used receive buffers; packet split
1830  * @rx_ring: rx descriptor ring
1831  * @cleaned_count: number of buffers to clean
1832  */
igc_alloc_rx_buffers(struct igc_ring * rx_ring,u16 cleaned_count)1833 static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count)
1834 {
1835 	union igc_adv_rx_desc *rx_desc;
1836 	u16 i = rx_ring->next_to_use;
1837 	struct igc_rx_buffer *bi;
1838 	u16 bufsz;
1839 
1840 	/* nothing to do */
1841 	if (!cleaned_count)
1842 		return;
1843 
1844 	rx_desc = IGC_RX_DESC(rx_ring, i);
1845 	bi = &rx_ring->rx_buffer_info[i];
1846 	i -= rx_ring->count;
1847 
1848 	bufsz = igc_rx_bufsz(rx_ring);
1849 
1850 	do {
1851 		if (!igc_alloc_mapped_page(rx_ring, bi))
1852 			break;
1853 
1854 		/* sync the buffer for use by the device */
1855 		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1856 						 bi->page_offset, bufsz,
1857 						 DMA_FROM_DEVICE);
1858 
1859 		/* Refresh the desc even if buffer_addrs didn't change
1860 		 * because each write-back erases this info.
1861 		 */
1862 		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1863 
1864 		rx_desc++;
1865 		bi++;
1866 		i++;
1867 		if (unlikely(!i)) {
1868 			rx_desc = IGC_RX_DESC(rx_ring, 0);
1869 			bi = rx_ring->rx_buffer_info;
1870 			i -= rx_ring->count;
1871 		}
1872 
1873 		/* clear the length for the next_to_use descriptor */
1874 		rx_desc->wb.upper.length = 0;
1875 
1876 		cleaned_count--;
1877 	} while (cleaned_count);
1878 
1879 	i += rx_ring->count;
1880 
1881 	if (rx_ring->next_to_use != i) {
1882 		/* record the next descriptor to use */
1883 		rx_ring->next_to_use = i;
1884 
1885 		/* update next to alloc since we have filled the ring */
1886 		rx_ring->next_to_alloc = i;
1887 
1888 		/* Force memory writes to complete before letting h/w
1889 		 * know there are new descriptors to fetch.  (Only
1890 		 * applicable for weak-ordered memory model archs,
1891 		 * such as IA-64).
1892 		 */
1893 		wmb();
1894 		writel(i, rx_ring->tail);
1895 	}
1896 }
1897 
igc_clean_rx_irq(struct igc_q_vector * q_vector,const int budget)1898 static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
1899 {
1900 	unsigned int total_bytes = 0, total_packets = 0;
1901 	struct igc_ring *rx_ring = q_vector->rx.ring;
1902 	struct sk_buff *skb = rx_ring->skb;
1903 	u16 cleaned_count = igc_desc_unused(rx_ring);
1904 
1905 	while (likely(total_packets < budget)) {
1906 		union igc_adv_rx_desc *rx_desc;
1907 		struct igc_rx_buffer *rx_buffer;
1908 		unsigned int size;
1909 
1910 		/* return some buffers to hardware, one at a time is too slow */
1911 		if (cleaned_count >= IGC_RX_BUFFER_WRITE) {
1912 			igc_alloc_rx_buffers(rx_ring, cleaned_count);
1913 			cleaned_count = 0;
1914 		}
1915 
1916 		rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean);
1917 		size = le16_to_cpu(rx_desc->wb.upper.length);
1918 		if (!size)
1919 			break;
1920 
1921 		/* This memory barrier is needed to keep us from reading
1922 		 * any other fields out of the rx_desc until we know the
1923 		 * descriptor has been written back
1924 		 */
1925 		dma_rmb();
1926 
1927 		rx_buffer = igc_get_rx_buffer(rx_ring, size);
1928 
1929 		/* retrieve a buffer from the ring */
1930 		if (skb)
1931 			igc_add_rx_frag(rx_ring, rx_buffer, skb, size);
1932 		else if (ring_uses_build_skb(rx_ring))
1933 			skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size);
1934 		else
1935 			skb = igc_construct_skb(rx_ring, rx_buffer,
1936 						rx_desc, size);
1937 
1938 		/* exit if we failed to retrieve a buffer */
1939 		if (!skb) {
1940 			rx_ring->rx_stats.alloc_failed++;
1941 			rx_buffer->pagecnt_bias++;
1942 			break;
1943 		}
1944 
1945 		igc_put_rx_buffer(rx_ring, rx_buffer);
1946 		cleaned_count++;
1947 
1948 		/* fetch next buffer in frame if non-eop */
1949 		if (igc_is_non_eop(rx_ring, rx_desc))
1950 			continue;
1951 
1952 		/* verify the packet layout is correct */
1953 		if (igc_cleanup_headers(rx_ring, rx_desc, skb)) {
1954 			skb = NULL;
1955 			continue;
1956 		}
1957 
1958 		/* probably a little skewed due to removing CRC */
1959 		total_bytes += skb->len;
1960 
1961 		/* populate checksum, VLAN, and protocol */
1962 		igc_process_skb_fields(rx_ring, rx_desc, skb);
1963 
1964 		napi_gro_receive(&q_vector->napi, skb);
1965 
1966 		/* reset skb pointer */
1967 		skb = NULL;
1968 
1969 		/* update budget accounting */
1970 		total_packets++;
1971 	}
1972 
1973 	/* place incomplete frames back on ring for completion */
1974 	rx_ring->skb = skb;
1975 
1976 	u64_stats_update_begin(&rx_ring->rx_syncp);
1977 	rx_ring->rx_stats.packets += total_packets;
1978 	rx_ring->rx_stats.bytes += total_bytes;
1979 	u64_stats_update_end(&rx_ring->rx_syncp);
1980 	q_vector->rx.total_packets += total_packets;
1981 	q_vector->rx.total_bytes += total_bytes;
1982 
1983 	if (cleaned_count)
1984 		igc_alloc_rx_buffers(rx_ring, cleaned_count);
1985 
1986 	return total_packets;
1987 }
1988 
1989 /**
1990  * igc_clean_tx_irq - Reclaim resources after transmit completes
1991  * @q_vector: pointer to q_vector containing needed info
1992  * @napi_budget: Used to determine if we are in netpoll
1993  *
1994  * returns true if ring is completely cleaned
1995  */
igc_clean_tx_irq(struct igc_q_vector * q_vector,int napi_budget)1996 static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
1997 {
1998 	struct igc_adapter *adapter = q_vector->adapter;
1999 	unsigned int total_bytes = 0, total_packets = 0;
2000 	unsigned int budget = q_vector->tx.work_limit;
2001 	struct igc_ring *tx_ring = q_vector->tx.ring;
2002 	unsigned int i = tx_ring->next_to_clean;
2003 	struct igc_tx_buffer *tx_buffer;
2004 	union igc_adv_tx_desc *tx_desc;
2005 
2006 	if (test_bit(__IGC_DOWN, &adapter->state))
2007 		return true;
2008 
2009 	tx_buffer = &tx_ring->tx_buffer_info[i];
2010 	tx_desc = IGC_TX_DESC(tx_ring, i);
2011 	i -= tx_ring->count;
2012 
2013 	do {
2014 		union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
2015 
2016 		/* if next_to_watch is not set then there is no work pending */
2017 		if (!eop_desc)
2018 			break;
2019 
2020 		/* prevent any other reads prior to eop_desc */
2021 		smp_rmb();
2022 
2023 		/* if DD is not set pending work has not been completed */
2024 		if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD)))
2025 			break;
2026 
2027 		/* clear next_to_watch to prevent false hangs */
2028 		tx_buffer->next_to_watch = NULL;
2029 
2030 		/* update the statistics for this packet */
2031 		total_bytes += tx_buffer->bytecount;
2032 		total_packets += tx_buffer->gso_segs;
2033 
2034 		/* free the skb */
2035 		napi_consume_skb(tx_buffer->skb, napi_budget);
2036 
2037 		/* unmap skb header data */
2038 		dma_unmap_single(tx_ring->dev,
2039 				 dma_unmap_addr(tx_buffer, dma),
2040 				 dma_unmap_len(tx_buffer, len),
2041 				 DMA_TO_DEVICE);
2042 
2043 		/* clear tx_buffer data */
2044 		dma_unmap_len_set(tx_buffer, len, 0);
2045 
2046 		/* clear last DMA location and unmap remaining buffers */
2047 		while (tx_desc != eop_desc) {
2048 			tx_buffer++;
2049 			tx_desc++;
2050 			i++;
2051 			if (unlikely(!i)) {
2052 				i -= tx_ring->count;
2053 				tx_buffer = tx_ring->tx_buffer_info;
2054 				tx_desc = IGC_TX_DESC(tx_ring, 0);
2055 			}
2056 
2057 			/* unmap any remaining paged data */
2058 			if (dma_unmap_len(tx_buffer, len)) {
2059 				dma_unmap_page(tx_ring->dev,
2060 					       dma_unmap_addr(tx_buffer, dma),
2061 					       dma_unmap_len(tx_buffer, len),
2062 					       DMA_TO_DEVICE);
2063 				dma_unmap_len_set(tx_buffer, len, 0);
2064 			}
2065 		}
2066 
2067 		/* move us one more past the eop_desc for start of next pkt */
2068 		tx_buffer++;
2069 		tx_desc++;
2070 		i++;
2071 		if (unlikely(!i)) {
2072 			i -= tx_ring->count;
2073 			tx_buffer = tx_ring->tx_buffer_info;
2074 			tx_desc = IGC_TX_DESC(tx_ring, 0);
2075 		}
2076 
2077 		/* issue prefetch for next Tx descriptor */
2078 		prefetch(tx_desc);
2079 
2080 		/* update budget accounting */
2081 		budget--;
2082 	} while (likely(budget));
2083 
2084 	netdev_tx_completed_queue(txring_txq(tx_ring),
2085 				  total_packets, total_bytes);
2086 
2087 	i += tx_ring->count;
2088 	tx_ring->next_to_clean = i;
2089 	u64_stats_update_begin(&tx_ring->tx_syncp);
2090 	tx_ring->tx_stats.bytes += total_bytes;
2091 	tx_ring->tx_stats.packets += total_packets;
2092 	u64_stats_update_end(&tx_ring->tx_syncp);
2093 	q_vector->tx.total_bytes += total_bytes;
2094 	q_vector->tx.total_packets += total_packets;
2095 
2096 	if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
2097 		struct igc_hw *hw = &adapter->hw;
2098 
2099 		/* Detect a transmit hang in hardware, this serializes the
2100 		 * check with the clearing of time_stamp and movement of i
2101 		 */
2102 		clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
2103 		if (tx_buffer->next_to_watch &&
2104 		    time_after(jiffies, tx_buffer->time_stamp +
2105 		    (adapter->tx_timeout_factor * HZ)) &&
2106 		    !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) {
2107 			/* detected Tx unit hang */
2108 			netdev_err(tx_ring->netdev,
2109 				   "Detected Tx Unit Hang\n"
2110 				   "  Tx Queue             <%d>\n"
2111 				   "  TDH                  <%x>\n"
2112 				   "  TDT                  <%x>\n"
2113 				   "  next_to_use          <%x>\n"
2114 				   "  next_to_clean        <%x>\n"
2115 				   "buffer_info[next_to_clean]\n"
2116 				   "  time_stamp           <%lx>\n"
2117 				   "  next_to_watch        <%p>\n"
2118 				   "  jiffies              <%lx>\n"
2119 				   "  desc.status          <%x>\n",
2120 				   tx_ring->queue_index,
2121 				   rd32(IGC_TDH(tx_ring->reg_idx)),
2122 				   readl(tx_ring->tail),
2123 				   tx_ring->next_to_use,
2124 				   tx_ring->next_to_clean,
2125 				   tx_buffer->time_stamp,
2126 				   tx_buffer->next_to_watch,
2127 				   jiffies,
2128 				   tx_buffer->next_to_watch->wb.status);
2129 			netif_stop_subqueue(tx_ring->netdev,
2130 					    tx_ring->queue_index);
2131 
2132 			/* we are about to reset, no point in enabling stuff */
2133 			return true;
2134 		}
2135 	}
2136 
2137 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
2138 	if (unlikely(total_packets &&
2139 		     netif_carrier_ok(tx_ring->netdev) &&
2140 		     igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
2141 		/* Make sure that anybody stopping the queue after this
2142 		 * sees the new next_to_clean.
2143 		 */
2144 		smp_mb();
2145 		if (__netif_subqueue_stopped(tx_ring->netdev,
2146 					     tx_ring->queue_index) &&
2147 		    !(test_bit(__IGC_DOWN, &adapter->state))) {
2148 			netif_wake_subqueue(tx_ring->netdev,
2149 					    tx_ring->queue_index);
2150 
2151 			u64_stats_update_begin(&tx_ring->tx_syncp);
2152 			tx_ring->tx_stats.restart_queue++;
2153 			u64_stats_update_end(&tx_ring->tx_syncp);
2154 		}
2155 	}
2156 
2157 	return !!budget;
2158 }
2159 
igc_find_mac_filter(struct igc_adapter * adapter,enum igc_mac_filter_type type,const u8 * addr)2160 static int igc_find_mac_filter(struct igc_adapter *adapter,
2161 			       enum igc_mac_filter_type type, const u8 *addr)
2162 {
2163 	struct igc_hw *hw = &adapter->hw;
2164 	int max_entries = hw->mac.rar_entry_count;
2165 	u32 ral, rah;
2166 	int i;
2167 
2168 	for (i = 0; i < max_entries; i++) {
2169 		ral = rd32(IGC_RAL(i));
2170 		rah = rd32(IGC_RAH(i));
2171 
2172 		if (!(rah & IGC_RAH_AV))
2173 			continue;
2174 		if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type)
2175 			continue;
2176 		if ((rah & IGC_RAH_RAH_MASK) !=
2177 		    le16_to_cpup((__le16 *)(addr + 4)))
2178 			continue;
2179 		if (ral != le32_to_cpup((__le32 *)(addr)))
2180 			continue;
2181 
2182 		return i;
2183 	}
2184 
2185 	return -1;
2186 }
2187 
igc_get_avail_mac_filter_slot(struct igc_adapter * adapter)2188 static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter)
2189 {
2190 	struct igc_hw *hw = &adapter->hw;
2191 	int max_entries = hw->mac.rar_entry_count;
2192 	u32 rah;
2193 	int i;
2194 
2195 	for (i = 0; i < max_entries; i++) {
2196 		rah = rd32(IGC_RAH(i));
2197 
2198 		if (!(rah & IGC_RAH_AV))
2199 			return i;
2200 	}
2201 
2202 	return -1;
2203 }
2204 
2205 /**
2206  * igc_add_mac_filter() - Add MAC address filter
2207  * @adapter: Pointer to adapter where the filter should be added
2208  * @type: MAC address filter type (source or destination)
2209  * @addr: MAC address
2210  * @queue: If non-negative, queue assignment feature is enabled and frames
2211  *         matching the filter are enqueued onto 'queue'. Otherwise, queue
2212  *         assignment is disabled.
2213  *
2214  * Return: 0 in case of success, negative errno code otherwise.
2215  */
igc_add_mac_filter(struct igc_adapter * adapter,enum igc_mac_filter_type type,const u8 * addr,int queue)2216 static int igc_add_mac_filter(struct igc_adapter *adapter,
2217 			      enum igc_mac_filter_type type, const u8 *addr,
2218 			      int queue)
2219 {
2220 	struct net_device *dev = adapter->netdev;
2221 	int index;
2222 
2223 	index = igc_find_mac_filter(adapter, type, addr);
2224 	if (index >= 0)
2225 		goto update_filter;
2226 
2227 	index = igc_get_avail_mac_filter_slot(adapter);
2228 	if (index < 0)
2229 		return -ENOSPC;
2230 
2231 	netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n",
2232 		   index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
2233 		   addr, queue);
2234 
2235 update_filter:
2236 	igc_set_mac_filter_hw(adapter, index, type, addr, queue);
2237 	return 0;
2238 }
2239 
2240 /**
2241  * igc_del_mac_filter() - Delete MAC address filter
2242  * @adapter: Pointer to adapter where the filter should be deleted from
2243  * @type: MAC address filter type (source or destination)
2244  * @addr: MAC address
2245  */
igc_del_mac_filter(struct igc_adapter * adapter,enum igc_mac_filter_type type,const u8 * addr)2246 static void igc_del_mac_filter(struct igc_adapter *adapter,
2247 			       enum igc_mac_filter_type type, const u8 *addr)
2248 {
2249 	struct net_device *dev = adapter->netdev;
2250 	int index;
2251 
2252 	index = igc_find_mac_filter(adapter, type, addr);
2253 	if (index < 0)
2254 		return;
2255 
2256 	if (index == 0) {
2257 		/* If this is the default filter, we don't actually delete it.
2258 		 * We just reset to its default value i.e. disable queue
2259 		 * assignment.
2260 		 */
2261 		netdev_dbg(dev, "Disable default MAC filter queue assignment");
2262 
2263 		igc_set_mac_filter_hw(adapter, 0, type, addr, -1);
2264 	} else {
2265 		netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n",
2266 			   index,
2267 			   type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
2268 			   addr);
2269 
2270 		igc_clear_mac_filter_hw(adapter, index);
2271 	}
2272 }
2273 
2274 /**
2275  * igc_add_vlan_prio_filter() - Add VLAN priority filter
2276  * @adapter: Pointer to adapter where the filter should be added
2277  * @prio: VLAN priority value
2278  * @queue: Queue number which matching frames are assigned to
2279  *
2280  * Return: 0 in case of success, negative errno code otherwise.
2281  */
igc_add_vlan_prio_filter(struct igc_adapter * adapter,int prio,int queue)2282 static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio,
2283 				    int queue)
2284 {
2285 	struct net_device *dev = adapter->netdev;
2286 	struct igc_hw *hw = &adapter->hw;
2287 	u32 vlanpqf;
2288 
2289 	vlanpqf = rd32(IGC_VLANPQF);
2290 
2291 	if (vlanpqf & IGC_VLANPQF_VALID(prio)) {
2292 		netdev_dbg(dev, "VLAN priority filter already in use\n");
2293 		return -EEXIST;
2294 	}
2295 
2296 	vlanpqf |= IGC_VLANPQF_QSEL(prio, queue);
2297 	vlanpqf |= IGC_VLANPQF_VALID(prio);
2298 
2299 	wr32(IGC_VLANPQF, vlanpqf);
2300 
2301 	netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n",
2302 		   prio, queue);
2303 	return 0;
2304 }
2305 
2306 /**
2307  * igc_del_vlan_prio_filter() - Delete VLAN priority filter
2308  * @adapter: Pointer to adapter where the filter should be deleted from
2309  * @prio: VLAN priority value
2310  */
igc_del_vlan_prio_filter(struct igc_adapter * adapter,int prio)2311 static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio)
2312 {
2313 	struct igc_hw *hw = &adapter->hw;
2314 	u32 vlanpqf;
2315 
2316 	vlanpqf = rd32(IGC_VLANPQF);
2317 
2318 	vlanpqf &= ~IGC_VLANPQF_VALID(prio);
2319 	vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK);
2320 
2321 	wr32(IGC_VLANPQF, vlanpqf);
2322 
2323 	netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n",
2324 		   prio);
2325 }
2326 
igc_get_avail_etype_filter_slot(struct igc_adapter * adapter)2327 static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter)
2328 {
2329 	struct igc_hw *hw = &adapter->hw;
2330 	int i;
2331 
2332 	for (i = 0; i < MAX_ETYPE_FILTER; i++) {
2333 		u32 etqf = rd32(IGC_ETQF(i));
2334 
2335 		if (!(etqf & IGC_ETQF_FILTER_ENABLE))
2336 			return i;
2337 	}
2338 
2339 	return -1;
2340 }
2341 
2342 /**
2343  * igc_add_etype_filter() - Add ethertype filter
2344  * @adapter: Pointer to adapter where the filter should be added
2345  * @etype: Ethertype value
2346  * @queue: If non-negative, queue assignment feature is enabled and frames
2347  *         matching the filter are enqueued onto 'queue'. Otherwise, queue
2348  *         assignment is disabled.
2349  *
2350  * Return: 0 in case of success, negative errno code otherwise.
2351  */
igc_add_etype_filter(struct igc_adapter * adapter,u16 etype,int queue)2352 static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype,
2353 				int queue)
2354 {
2355 	struct igc_hw *hw = &adapter->hw;
2356 	int index;
2357 	u32 etqf;
2358 
2359 	index = igc_get_avail_etype_filter_slot(adapter);
2360 	if (index < 0)
2361 		return -ENOSPC;
2362 
2363 	etqf = rd32(IGC_ETQF(index));
2364 
2365 	etqf &= ~IGC_ETQF_ETYPE_MASK;
2366 	etqf |= etype;
2367 
2368 	if (queue >= 0) {
2369 		etqf &= ~IGC_ETQF_QUEUE_MASK;
2370 		etqf |= (queue << IGC_ETQF_QUEUE_SHIFT);
2371 		etqf |= IGC_ETQF_QUEUE_ENABLE;
2372 	}
2373 
2374 	etqf |= IGC_ETQF_FILTER_ENABLE;
2375 
2376 	wr32(IGC_ETQF(index), etqf);
2377 
2378 	netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n",
2379 		   etype, queue);
2380 	return 0;
2381 }
2382 
igc_find_etype_filter(struct igc_adapter * adapter,u16 etype)2383 static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype)
2384 {
2385 	struct igc_hw *hw = &adapter->hw;
2386 	int i;
2387 
2388 	for (i = 0; i < MAX_ETYPE_FILTER; i++) {
2389 		u32 etqf = rd32(IGC_ETQF(i));
2390 
2391 		if ((etqf & IGC_ETQF_ETYPE_MASK) == etype)
2392 			return i;
2393 	}
2394 
2395 	return -1;
2396 }
2397 
2398 /**
2399  * igc_del_etype_filter() - Delete ethertype filter
2400  * @adapter: Pointer to adapter where the filter should be deleted from
2401  * @etype: Ethertype value
2402  */
igc_del_etype_filter(struct igc_adapter * adapter,u16 etype)2403 static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype)
2404 {
2405 	struct igc_hw *hw = &adapter->hw;
2406 	int index;
2407 
2408 	index = igc_find_etype_filter(adapter, etype);
2409 	if (index < 0)
2410 		return;
2411 
2412 	wr32(IGC_ETQF(index), 0);
2413 
2414 	netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n",
2415 		   etype);
2416 }
2417 
igc_enable_nfc_rule(struct igc_adapter * adapter,const struct igc_nfc_rule * rule)2418 static int igc_enable_nfc_rule(struct igc_adapter *adapter,
2419 			       const struct igc_nfc_rule *rule)
2420 {
2421 	int err;
2422 
2423 	if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
2424 		err = igc_add_etype_filter(adapter, rule->filter.etype,
2425 					   rule->action);
2426 		if (err)
2427 			return err;
2428 	}
2429 
2430 	if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) {
2431 		err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
2432 					 rule->filter.src_addr, rule->action);
2433 		if (err)
2434 			return err;
2435 	}
2436 
2437 	if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
2438 		err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
2439 					 rule->filter.dst_addr, rule->action);
2440 		if (err)
2441 			return err;
2442 	}
2443 
2444 	if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
2445 		int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
2446 			   VLAN_PRIO_SHIFT;
2447 
2448 		err = igc_add_vlan_prio_filter(adapter, prio, rule->action);
2449 		if (err)
2450 			return err;
2451 	}
2452 
2453 	return 0;
2454 }
2455 
igc_disable_nfc_rule(struct igc_adapter * adapter,const struct igc_nfc_rule * rule)2456 static void igc_disable_nfc_rule(struct igc_adapter *adapter,
2457 				 const struct igc_nfc_rule *rule)
2458 {
2459 	if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE)
2460 		igc_del_etype_filter(adapter, rule->filter.etype);
2461 
2462 	if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
2463 		int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
2464 			   VLAN_PRIO_SHIFT;
2465 
2466 		igc_del_vlan_prio_filter(adapter, prio);
2467 	}
2468 
2469 	if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
2470 		igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
2471 				   rule->filter.src_addr);
2472 
2473 	if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
2474 		igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
2475 				   rule->filter.dst_addr);
2476 }
2477 
2478 /**
2479  * igc_get_nfc_rule() - Get NFC rule
2480  * @adapter: Pointer to adapter
2481  * @location: Rule location
2482  *
2483  * Context: Expects adapter->nfc_rule_lock to be held by caller.
2484  *
2485  * Return: Pointer to NFC rule at @location. If not found, NULL.
2486  */
igc_get_nfc_rule(struct igc_adapter * adapter,u32 location)2487 struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter,
2488 				      u32 location)
2489 {
2490 	struct igc_nfc_rule *rule;
2491 
2492 	list_for_each_entry(rule, &adapter->nfc_rule_list, list) {
2493 		if (rule->location == location)
2494 			return rule;
2495 		if (rule->location > location)
2496 			break;
2497 	}
2498 
2499 	return NULL;
2500 }
2501 
2502 /**
2503  * igc_del_nfc_rule() - Delete NFC rule
2504  * @adapter: Pointer to adapter
2505  * @rule: Pointer to rule to be deleted
2506  *
2507  * Disable NFC rule in hardware and delete it from adapter.
2508  *
2509  * Context: Expects adapter->nfc_rule_lock to be held by caller.
2510  */
igc_del_nfc_rule(struct igc_adapter * adapter,struct igc_nfc_rule * rule)2511 void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
2512 {
2513 	igc_disable_nfc_rule(adapter, rule);
2514 
2515 	list_del(&rule->list);
2516 	adapter->nfc_rule_count--;
2517 
2518 	kfree(rule);
2519 }
2520 
igc_flush_nfc_rules(struct igc_adapter * adapter)2521 static void igc_flush_nfc_rules(struct igc_adapter *adapter)
2522 {
2523 	struct igc_nfc_rule *rule, *tmp;
2524 
2525 	mutex_lock(&adapter->nfc_rule_lock);
2526 
2527 	list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list)
2528 		igc_del_nfc_rule(adapter, rule);
2529 
2530 	mutex_unlock(&adapter->nfc_rule_lock);
2531 }
2532 
2533 /**
2534  * igc_add_nfc_rule() - Add NFC rule
2535  * @adapter: Pointer to adapter
2536  * @rule: Pointer to rule to be added
2537  *
2538  * Enable NFC rule in hardware and add it to adapter.
2539  *
2540  * Context: Expects adapter->nfc_rule_lock to be held by caller.
2541  *
2542  * Return: 0 on success, negative errno on failure.
2543  */
igc_add_nfc_rule(struct igc_adapter * adapter,struct igc_nfc_rule * rule)2544 int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
2545 {
2546 	struct igc_nfc_rule *pred, *cur;
2547 	int err;
2548 
2549 	err = igc_enable_nfc_rule(adapter, rule);
2550 	if (err)
2551 		return err;
2552 
2553 	pred = NULL;
2554 	list_for_each_entry(cur, &adapter->nfc_rule_list, list) {
2555 		if (cur->location >= rule->location)
2556 			break;
2557 		pred = cur;
2558 	}
2559 
2560 	list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list);
2561 	adapter->nfc_rule_count++;
2562 	return 0;
2563 }
2564 
igc_restore_nfc_rules(struct igc_adapter * adapter)2565 static void igc_restore_nfc_rules(struct igc_adapter *adapter)
2566 {
2567 	struct igc_nfc_rule *rule;
2568 
2569 	mutex_lock(&adapter->nfc_rule_lock);
2570 
2571 	list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list)
2572 		igc_enable_nfc_rule(adapter, rule);
2573 
2574 	mutex_unlock(&adapter->nfc_rule_lock);
2575 }
2576 
igc_uc_sync(struct net_device * netdev,const unsigned char * addr)2577 static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr)
2578 {
2579 	struct igc_adapter *adapter = netdev_priv(netdev);
2580 
2581 	return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1);
2582 }
2583 
igc_uc_unsync(struct net_device * netdev,const unsigned char * addr)2584 static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
2585 {
2586 	struct igc_adapter *adapter = netdev_priv(netdev);
2587 
2588 	igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr);
2589 	return 0;
2590 }
2591 
2592 /**
2593  * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2594  * @netdev: network interface device structure
2595  *
2596  * The set_rx_mode entry point is called whenever the unicast or multicast
2597  * address lists or the network interface flags are updated.  This routine is
2598  * responsible for configuring the hardware for proper unicast, multicast,
2599  * promiscuous mode, and all-multi behavior.
2600  */
igc_set_rx_mode(struct net_device * netdev)2601 static void igc_set_rx_mode(struct net_device *netdev)
2602 {
2603 	struct igc_adapter *adapter = netdev_priv(netdev);
2604 	struct igc_hw *hw = &adapter->hw;
2605 	u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
2606 	int count;
2607 
2608 	/* Check for Promiscuous and All Multicast modes */
2609 	if (netdev->flags & IFF_PROMISC) {
2610 		rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE;
2611 	} else {
2612 		if (netdev->flags & IFF_ALLMULTI) {
2613 			rctl |= IGC_RCTL_MPE;
2614 		} else {
2615 			/* Write addresses to the MTA, if the attempt fails
2616 			 * then we should just turn on promiscuous mode so
2617 			 * that we can at least receive multicast traffic
2618 			 */
2619 			count = igc_write_mc_addr_list(netdev);
2620 			if (count < 0)
2621 				rctl |= IGC_RCTL_MPE;
2622 		}
2623 	}
2624 
2625 	/* Write addresses to available RAR registers, if there is not
2626 	 * sufficient space to store all the addresses then enable
2627 	 * unicast promiscuous mode
2628 	 */
2629 	if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync))
2630 		rctl |= IGC_RCTL_UPE;
2631 
2632 	/* update state of unicast and multicast */
2633 	rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE);
2634 	wr32(IGC_RCTL, rctl);
2635 
2636 #if (PAGE_SIZE < 8192)
2637 	if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB)
2638 		rlpml = IGC_MAX_FRAME_BUILD_SKB;
2639 #endif
2640 	wr32(IGC_RLPML, rlpml);
2641 }
2642 
2643 /**
2644  * igc_configure - configure the hardware for RX and TX
2645  * @adapter: private board structure
2646  */
igc_configure(struct igc_adapter * adapter)2647 static void igc_configure(struct igc_adapter *adapter)
2648 {
2649 	struct net_device *netdev = adapter->netdev;
2650 	int i = 0;
2651 
2652 	igc_get_hw_control(adapter);
2653 	igc_set_rx_mode(netdev);
2654 
2655 	igc_setup_tctl(adapter);
2656 	igc_setup_mrqc(adapter);
2657 	igc_setup_rctl(adapter);
2658 
2659 	igc_set_default_mac_filter(adapter);
2660 	igc_restore_nfc_rules(adapter);
2661 
2662 	igc_configure_tx(adapter);
2663 	igc_configure_rx(adapter);
2664 
2665 	igc_rx_fifo_flush_base(&adapter->hw);
2666 
2667 	/* call igc_desc_unused which always leaves
2668 	 * at least 1 descriptor unused to make sure
2669 	 * next_to_use != next_to_clean
2670 	 */
2671 	for (i = 0; i < adapter->num_rx_queues; i++) {
2672 		struct igc_ring *ring = adapter->rx_ring[i];
2673 
2674 		igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
2675 	}
2676 }
2677 
2678 /**
2679  * igc_write_ivar - configure ivar for given MSI-X vector
2680  * @hw: pointer to the HW structure
2681  * @msix_vector: vector number we are allocating to a given ring
2682  * @index: row index of IVAR register to write within IVAR table
2683  * @offset: column offset of in IVAR, should be multiple of 8
2684  *
2685  * The IVAR table consists of 2 columns,
2686  * each containing an cause allocation for an Rx and Tx ring, and a
2687  * variable number of rows depending on the number of queues supported.
2688  */
igc_write_ivar(struct igc_hw * hw,int msix_vector,int index,int offset)2689 static void igc_write_ivar(struct igc_hw *hw, int msix_vector,
2690 			   int index, int offset)
2691 {
2692 	u32 ivar = array_rd32(IGC_IVAR0, index);
2693 
2694 	/* clear any bits that are currently set */
2695 	ivar &= ~((u32)0xFF << offset);
2696 
2697 	/* write vector and valid bit */
2698 	ivar |= (msix_vector | IGC_IVAR_VALID) << offset;
2699 
2700 	array_wr32(IGC_IVAR0, index, ivar);
2701 }
2702 
igc_assign_vector(struct igc_q_vector * q_vector,int msix_vector)2703 static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector)
2704 {
2705 	struct igc_adapter *adapter = q_vector->adapter;
2706 	struct igc_hw *hw = &adapter->hw;
2707 	int rx_queue = IGC_N0_QUEUE;
2708 	int tx_queue = IGC_N0_QUEUE;
2709 
2710 	if (q_vector->rx.ring)
2711 		rx_queue = q_vector->rx.ring->reg_idx;
2712 	if (q_vector->tx.ring)
2713 		tx_queue = q_vector->tx.ring->reg_idx;
2714 
2715 	switch (hw->mac.type) {
2716 	case igc_i225:
2717 		if (rx_queue > IGC_N0_QUEUE)
2718 			igc_write_ivar(hw, msix_vector,
2719 				       rx_queue >> 1,
2720 				       (rx_queue & 0x1) << 4);
2721 		if (tx_queue > IGC_N0_QUEUE)
2722 			igc_write_ivar(hw, msix_vector,
2723 				       tx_queue >> 1,
2724 				       ((tx_queue & 0x1) << 4) + 8);
2725 		q_vector->eims_value = BIT(msix_vector);
2726 		break;
2727 	default:
2728 		WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n");
2729 		break;
2730 	}
2731 
2732 	/* add q_vector eims value to global eims_enable_mask */
2733 	adapter->eims_enable_mask |= q_vector->eims_value;
2734 
2735 	/* configure q_vector to set itr on first interrupt */
2736 	q_vector->set_itr = 1;
2737 }
2738 
2739 /**
2740  * igc_configure_msix - Configure MSI-X hardware
2741  * @adapter: Pointer to adapter structure
2742  *
2743  * igc_configure_msix sets up the hardware to properly
2744  * generate MSI-X interrupts.
2745  */
igc_configure_msix(struct igc_adapter * adapter)2746 static void igc_configure_msix(struct igc_adapter *adapter)
2747 {
2748 	struct igc_hw *hw = &adapter->hw;
2749 	int i, vector = 0;
2750 	u32 tmp;
2751 
2752 	adapter->eims_enable_mask = 0;
2753 
2754 	/* set vector for other causes, i.e. link changes */
2755 	switch (hw->mac.type) {
2756 	case igc_i225:
2757 		/* Turn on MSI-X capability first, or our settings
2758 		 * won't stick.  And it will take days to debug.
2759 		 */
2760 		wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE |
2761 		     IGC_GPIE_PBA | IGC_GPIE_EIAME |
2762 		     IGC_GPIE_NSICR);
2763 
2764 		/* enable msix_other interrupt */
2765 		adapter->eims_other = BIT(vector);
2766 		tmp = (vector++ | IGC_IVAR_VALID) << 8;
2767 
2768 		wr32(IGC_IVAR_MISC, tmp);
2769 		break;
2770 	default:
2771 		/* do nothing, since nothing else supports MSI-X */
2772 		break;
2773 	} /* switch (hw->mac.type) */
2774 
2775 	adapter->eims_enable_mask |= adapter->eims_other;
2776 
2777 	for (i = 0; i < adapter->num_q_vectors; i++)
2778 		igc_assign_vector(adapter->q_vector[i], vector++);
2779 
2780 	wrfl();
2781 }
2782 
2783 /**
2784  * igc_irq_enable - Enable default interrupt generation settings
2785  * @adapter: board private structure
2786  */
igc_irq_enable(struct igc_adapter * adapter)2787 static void igc_irq_enable(struct igc_adapter *adapter)
2788 {
2789 	struct igc_hw *hw = &adapter->hw;
2790 
2791 	if (adapter->msix_entries) {
2792 		u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA;
2793 		u32 regval = rd32(IGC_EIAC);
2794 
2795 		wr32(IGC_EIAC, regval | adapter->eims_enable_mask);
2796 		regval = rd32(IGC_EIAM);
2797 		wr32(IGC_EIAM, regval | adapter->eims_enable_mask);
2798 		wr32(IGC_EIMS, adapter->eims_enable_mask);
2799 		wr32(IGC_IMS, ims);
2800 	} else {
2801 		wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
2802 		wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
2803 	}
2804 }
2805 
2806 /**
2807  * igc_irq_disable - Mask off interrupt generation on the NIC
2808  * @adapter: board private structure
2809  */
igc_irq_disable(struct igc_adapter * adapter)2810 static void igc_irq_disable(struct igc_adapter *adapter)
2811 {
2812 	struct igc_hw *hw = &adapter->hw;
2813 
2814 	if (adapter->msix_entries) {
2815 		u32 regval = rd32(IGC_EIAM);
2816 
2817 		wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask);
2818 		wr32(IGC_EIMC, adapter->eims_enable_mask);
2819 		regval = rd32(IGC_EIAC);
2820 		wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask);
2821 	}
2822 
2823 	wr32(IGC_IAM, 0);
2824 	wr32(IGC_IMC, ~0);
2825 	wrfl();
2826 
2827 	if (adapter->msix_entries) {
2828 		int vector = 0, i;
2829 
2830 		synchronize_irq(adapter->msix_entries[vector++].vector);
2831 
2832 		for (i = 0; i < adapter->num_q_vectors; i++)
2833 			synchronize_irq(adapter->msix_entries[vector++].vector);
2834 	} else {
2835 		synchronize_irq(adapter->pdev->irq);
2836 	}
2837 }
2838 
igc_set_flag_queue_pairs(struct igc_adapter * adapter,const u32 max_rss_queues)2839 void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
2840 			      const u32 max_rss_queues)
2841 {
2842 	/* Determine if we need to pair queues. */
2843 	/* If rss_queues > half of max_rss_queues, pair the queues in
2844 	 * order to conserve interrupts due to limited supply.
2845 	 */
2846 	if (adapter->rss_queues > (max_rss_queues / 2))
2847 		adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
2848 	else
2849 		adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS;
2850 }
2851 
igc_get_max_rss_queues(struct igc_adapter * adapter)2852 unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter)
2853 {
2854 	return IGC_MAX_RX_QUEUES;
2855 }
2856 
igc_init_queue_configuration(struct igc_adapter * adapter)2857 static void igc_init_queue_configuration(struct igc_adapter *adapter)
2858 {
2859 	u32 max_rss_queues;
2860 
2861 	max_rss_queues = igc_get_max_rss_queues(adapter);
2862 	adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
2863 
2864 	igc_set_flag_queue_pairs(adapter, max_rss_queues);
2865 }
2866 
2867 /**
2868  * igc_reset_q_vector - Reset config for interrupt vector
2869  * @adapter: board private structure to initialize
2870  * @v_idx: Index of vector to be reset
2871  *
2872  * If NAPI is enabled it will delete any references to the
2873  * NAPI struct. This is preparation for igc_free_q_vector.
2874  */
igc_reset_q_vector(struct igc_adapter * adapter,int v_idx)2875 static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx)
2876 {
2877 	struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
2878 
2879 	/* if we're coming from igc_set_interrupt_capability, the vectors are
2880 	 * not yet allocated
2881 	 */
2882 	if (!q_vector)
2883 		return;
2884 
2885 	if (q_vector->tx.ring)
2886 		adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
2887 
2888 	if (q_vector->rx.ring)
2889 		adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
2890 
2891 	netif_napi_del(&q_vector->napi);
2892 }
2893 
2894 /**
2895  * igc_free_q_vector - Free memory allocated for specific interrupt vector
2896  * @adapter: board private structure to initialize
2897  * @v_idx: Index of vector to be freed
2898  *
2899  * This function frees the memory allocated to the q_vector.
2900  */
igc_free_q_vector(struct igc_adapter * adapter,int v_idx)2901 static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx)
2902 {
2903 	struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
2904 
2905 	adapter->q_vector[v_idx] = NULL;
2906 
2907 	/* igc_get_stats64() might access the rings on this vector,
2908 	 * we must wait a grace period before freeing it.
2909 	 */
2910 	if (q_vector)
2911 		kfree_rcu(q_vector, rcu);
2912 }
2913 
2914 /**
2915  * igc_free_q_vectors - Free memory allocated for interrupt vectors
2916  * @adapter: board private structure to initialize
2917  *
2918  * This function frees the memory allocated to the q_vectors.  In addition if
2919  * NAPI is enabled it will delete any references to the NAPI struct prior
2920  * to freeing the q_vector.
2921  */
igc_free_q_vectors(struct igc_adapter * adapter)2922 static void igc_free_q_vectors(struct igc_adapter *adapter)
2923 {
2924 	int v_idx = adapter->num_q_vectors;
2925 
2926 	adapter->num_tx_queues = 0;
2927 	adapter->num_rx_queues = 0;
2928 	adapter->num_q_vectors = 0;
2929 
2930 	while (v_idx--) {
2931 		igc_reset_q_vector(adapter, v_idx);
2932 		igc_free_q_vector(adapter, v_idx);
2933 	}
2934 }
2935 
2936 /**
2937  * igc_update_itr - update the dynamic ITR value based on statistics
2938  * @q_vector: pointer to q_vector
2939  * @ring_container: ring info to update the itr for
2940  *
2941  * Stores a new ITR value based on packets and byte
2942  * counts during the last interrupt.  The advantage of per interrupt
2943  * computation is faster updates and more accurate ITR for the current
2944  * traffic pattern.  Constants in this function were computed
2945  * based on theoretical maximum wire speed and thresholds were set based
2946  * on testing data as well as attempting to minimize response time
2947  * while increasing bulk throughput.
2948  * NOTE: These calculations are only valid when operating in a single-
2949  * queue environment.
2950  */
igc_update_itr(struct igc_q_vector * q_vector,struct igc_ring_container * ring_container)2951 static void igc_update_itr(struct igc_q_vector *q_vector,
2952 			   struct igc_ring_container *ring_container)
2953 {
2954 	unsigned int packets = ring_container->total_packets;
2955 	unsigned int bytes = ring_container->total_bytes;
2956 	u8 itrval = ring_container->itr;
2957 
2958 	/* no packets, exit with status unchanged */
2959 	if (packets == 0)
2960 		return;
2961 
2962 	switch (itrval) {
2963 	case lowest_latency:
2964 		/* handle TSO and jumbo frames */
2965 		if (bytes / packets > 8000)
2966 			itrval = bulk_latency;
2967 		else if ((packets < 5) && (bytes > 512))
2968 			itrval = low_latency;
2969 		break;
2970 	case low_latency:  /* 50 usec aka 20000 ints/s */
2971 		if (bytes > 10000) {
2972 			/* this if handles the TSO accounting */
2973 			if (bytes / packets > 8000)
2974 				itrval = bulk_latency;
2975 			else if ((packets < 10) || ((bytes / packets) > 1200))
2976 				itrval = bulk_latency;
2977 			else if ((packets > 35))
2978 				itrval = lowest_latency;
2979 		} else if (bytes / packets > 2000) {
2980 			itrval = bulk_latency;
2981 		} else if (packets <= 2 && bytes < 512) {
2982 			itrval = lowest_latency;
2983 		}
2984 		break;
2985 	case bulk_latency: /* 250 usec aka 4000 ints/s */
2986 		if (bytes > 25000) {
2987 			if (packets > 35)
2988 				itrval = low_latency;
2989 		} else if (bytes < 1500) {
2990 			itrval = low_latency;
2991 		}
2992 		break;
2993 	}
2994 
2995 	/* clear work counters since we have the values we need */
2996 	ring_container->total_bytes = 0;
2997 	ring_container->total_packets = 0;
2998 
2999 	/* write updated itr to ring container */
3000 	ring_container->itr = itrval;
3001 }
3002 
igc_set_itr(struct igc_q_vector * q_vector)3003 static void igc_set_itr(struct igc_q_vector *q_vector)
3004 {
3005 	struct igc_adapter *adapter = q_vector->adapter;
3006 	u32 new_itr = q_vector->itr_val;
3007 	u8 current_itr = 0;
3008 
3009 	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3010 	switch (adapter->link_speed) {
3011 	case SPEED_10:
3012 	case SPEED_100:
3013 		current_itr = 0;
3014 		new_itr = IGC_4K_ITR;
3015 		goto set_itr_now;
3016 	default:
3017 		break;
3018 	}
3019 
3020 	igc_update_itr(q_vector, &q_vector->tx);
3021 	igc_update_itr(q_vector, &q_vector->rx);
3022 
3023 	current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
3024 
3025 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
3026 	if (current_itr == lowest_latency &&
3027 	    ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3028 	    (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3029 		current_itr = low_latency;
3030 
3031 	switch (current_itr) {
3032 	/* counts and packets in update_itr are dependent on these numbers */
3033 	case lowest_latency:
3034 		new_itr = IGC_70K_ITR; /* 70,000 ints/sec */
3035 		break;
3036 	case low_latency:
3037 		new_itr = IGC_20K_ITR; /* 20,000 ints/sec */
3038 		break;
3039 	case bulk_latency:
3040 		new_itr = IGC_4K_ITR;  /* 4,000 ints/sec */
3041 		break;
3042 	default:
3043 		break;
3044 	}
3045 
3046 set_itr_now:
3047 	if (new_itr != q_vector->itr_val) {
3048 		/* this attempts to bias the interrupt rate towards Bulk
3049 		 * by adding intermediate steps when interrupt rate is
3050 		 * increasing
3051 		 */
3052 		new_itr = new_itr > q_vector->itr_val ?
3053 			  max((new_itr * q_vector->itr_val) /
3054 			  (new_itr + (q_vector->itr_val >> 2)),
3055 			  new_itr) : new_itr;
3056 		/* Don't write the value here; it resets the adapter's
3057 		 * internal timer, and causes us to delay far longer than
3058 		 * we should between interrupts.  Instead, we write the ITR
3059 		 * value at the beginning of the next interrupt so the timing
3060 		 * ends up being correct.
3061 		 */
3062 		q_vector->itr_val = new_itr;
3063 		q_vector->set_itr = 1;
3064 	}
3065 }
3066 
igc_reset_interrupt_capability(struct igc_adapter * adapter)3067 static void igc_reset_interrupt_capability(struct igc_adapter *adapter)
3068 {
3069 	int v_idx = adapter->num_q_vectors;
3070 
3071 	if (adapter->msix_entries) {
3072 		pci_disable_msix(adapter->pdev);
3073 		kfree(adapter->msix_entries);
3074 		adapter->msix_entries = NULL;
3075 	} else if (adapter->flags & IGC_FLAG_HAS_MSI) {
3076 		pci_disable_msi(adapter->pdev);
3077 	}
3078 
3079 	while (v_idx--)
3080 		igc_reset_q_vector(adapter, v_idx);
3081 }
3082 
3083 /**
3084  * igc_set_interrupt_capability - set MSI or MSI-X if supported
3085  * @adapter: Pointer to adapter structure
3086  * @msix: boolean value for MSI-X capability
3087  *
3088  * Attempt to configure interrupts using the best available
3089  * capabilities of the hardware and kernel.
3090  */
igc_set_interrupt_capability(struct igc_adapter * adapter,bool msix)3091 static void igc_set_interrupt_capability(struct igc_adapter *adapter,
3092 					 bool msix)
3093 {
3094 	int numvecs, i;
3095 	int err;
3096 
3097 	if (!msix)
3098 		goto msi_only;
3099 	adapter->flags |= IGC_FLAG_HAS_MSIX;
3100 
3101 	/* Number of supported queues. */
3102 	adapter->num_rx_queues = adapter->rss_queues;
3103 
3104 	adapter->num_tx_queues = adapter->rss_queues;
3105 
3106 	/* start with one vector for every Rx queue */
3107 	numvecs = adapter->num_rx_queues;
3108 
3109 	/* if Tx handler is separate add 1 for every Tx queue */
3110 	if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS))
3111 		numvecs += adapter->num_tx_queues;
3112 
3113 	/* store the number of vectors reserved for queues */
3114 	adapter->num_q_vectors = numvecs;
3115 
3116 	/* add 1 vector for link status interrupts */
3117 	numvecs++;
3118 
3119 	adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
3120 					GFP_KERNEL);
3121 
3122 	if (!adapter->msix_entries)
3123 		return;
3124 
3125 	/* populate entry values */
3126 	for (i = 0; i < numvecs; i++)
3127 		adapter->msix_entries[i].entry = i;
3128 
3129 	err = pci_enable_msix_range(adapter->pdev,
3130 				    adapter->msix_entries,
3131 				    numvecs,
3132 				    numvecs);
3133 	if (err > 0)
3134 		return;
3135 
3136 	kfree(adapter->msix_entries);
3137 	adapter->msix_entries = NULL;
3138 
3139 	igc_reset_interrupt_capability(adapter);
3140 
3141 msi_only:
3142 	adapter->flags &= ~IGC_FLAG_HAS_MSIX;
3143 
3144 	adapter->rss_queues = 1;
3145 	adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
3146 	adapter->num_rx_queues = 1;
3147 	adapter->num_tx_queues = 1;
3148 	adapter->num_q_vectors = 1;
3149 	if (!pci_enable_msi(adapter->pdev))
3150 		adapter->flags |= IGC_FLAG_HAS_MSI;
3151 }
3152 
3153 /**
3154  * igc_update_ring_itr - update the dynamic ITR value based on packet size
3155  * @q_vector: pointer to q_vector
3156  *
3157  * Stores a new ITR value based on strictly on packet size.  This
3158  * algorithm is less sophisticated than that used in igc_update_itr,
3159  * due to the difficulty of synchronizing statistics across multiple
3160  * receive rings.  The divisors and thresholds used by this function
3161  * were determined based on theoretical maximum wire speed and testing
3162  * data, in order to minimize response time while increasing bulk
3163  * throughput.
3164  * NOTE: This function is called only when operating in a multiqueue
3165  * receive environment.
3166  */
igc_update_ring_itr(struct igc_q_vector * q_vector)3167 static void igc_update_ring_itr(struct igc_q_vector *q_vector)
3168 {
3169 	struct igc_adapter *adapter = q_vector->adapter;
3170 	int new_val = q_vector->itr_val;
3171 	int avg_wire_size = 0;
3172 	unsigned int packets;
3173 
3174 	/* For non-gigabit speeds, just fix the interrupt rate at 4000
3175 	 * ints/sec - ITR timer value of 120 ticks.
3176 	 */
3177 	switch (adapter->link_speed) {
3178 	case SPEED_10:
3179 	case SPEED_100:
3180 		new_val = IGC_4K_ITR;
3181 		goto set_itr_val;
3182 	default:
3183 		break;
3184 	}
3185 
3186 	packets = q_vector->rx.total_packets;
3187 	if (packets)
3188 		avg_wire_size = q_vector->rx.total_bytes / packets;
3189 
3190 	packets = q_vector->tx.total_packets;
3191 	if (packets)
3192 		avg_wire_size = max_t(u32, avg_wire_size,
3193 				      q_vector->tx.total_bytes / packets);
3194 
3195 	/* if avg_wire_size isn't set no work was done */
3196 	if (!avg_wire_size)
3197 		goto clear_counts;
3198 
3199 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
3200 	avg_wire_size += 24;
3201 
3202 	/* Don't starve jumbo frames */
3203 	avg_wire_size = min(avg_wire_size, 3000);
3204 
3205 	/* Give a little boost to mid-size frames */
3206 	if (avg_wire_size > 300 && avg_wire_size < 1200)
3207 		new_val = avg_wire_size / 3;
3208 	else
3209 		new_val = avg_wire_size / 2;
3210 
3211 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
3212 	if (new_val < IGC_20K_ITR &&
3213 	    ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3214 	    (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3215 		new_val = IGC_20K_ITR;
3216 
3217 set_itr_val:
3218 	if (new_val != q_vector->itr_val) {
3219 		q_vector->itr_val = new_val;
3220 		q_vector->set_itr = 1;
3221 	}
3222 clear_counts:
3223 	q_vector->rx.total_bytes = 0;
3224 	q_vector->rx.total_packets = 0;
3225 	q_vector->tx.total_bytes = 0;
3226 	q_vector->tx.total_packets = 0;
3227 }
3228 
igc_ring_irq_enable(struct igc_q_vector * q_vector)3229 static void igc_ring_irq_enable(struct igc_q_vector *q_vector)
3230 {
3231 	struct igc_adapter *adapter = q_vector->adapter;
3232 	struct igc_hw *hw = &adapter->hw;
3233 
3234 	if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
3235 	    (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
3236 		if (adapter->num_q_vectors == 1)
3237 			igc_set_itr(q_vector);
3238 		else
3239 			igc_update_ring_itr(q_vector);
3240 	}
3241 
3242 	if (!test_bit(__IGC_DOWN, &adapter->state)) {
3243 		if (adapter->msix_entries)
3244 			wr32(IGC_EIMS, q_vector->eims_value);
3245 		else
3246 			igc_irq_enable(adapter);
3247 	}
3248 }
3249 
igc_add_ring(struct igc_ring * ring,struct igc_ring_container * head)3250 static void igc_add_ring(struct igc_ring *ring,
3251 			 struct igc_ring_container *head)
3252 {
3253 	head->ring = ring;
3254 	head->count++;
3255 }
3256 
3257 /**
3258  * igc_cache_ring_register - Descriptor ring to register mapping
3259  * @adapter: board private structure to initialize
3260  *
3261  * Once we know the feature-set enabled for the device, we'll cache
3262  * the register offset the descriptor ring is assigned to.
3263  */
igc_cache_ring_register(struct igc_adapter * adapter)3264 static void igc_cache_ring_register(struct igc_adapter *adapter)
3265 {
3266 	int i = 0, j = 0;
3267 
3268 	switch (adapter->hw.mac.type) {
3269 	case igc_i225:
3270 	default:
3271 		for (; i < adapter->num_rx_queues; i++)
3272 			adapter->rx_ring[i]->reg_idx = i;
3273 		for (; j < adapter->num_tx_queues; j++)
3274 			adapter->tx_ring[j]->reg_idx = j;
3275 		break;
3276 	}
3277 }
3278 
3279 /**
3280  * igc_poll - NAPI Rx polling callback
3281  * @napi: napi polling structure
3282  * @budget: count of how many packets we should handle
3283  */
igc_poll(struct napi_struct * napi,int budget)3284 static int igc_poll(struct napi_struct *napi, int budget)
3285 {
3286 	struct igc_q_vector *q_vector = container_of(napi,
3287 						     struct igc_q_vector,
3288 						     napi);
3289 	bool clean_complete = true;
3290 	int work_done = 0;
3291 
3292 	if (q_vector->tx.ring)
3293 		clean_complete = igc_clean_tx_irq(q_vector, budget);
3294 
3295 	if (q_vector->rx.ring) {
3296 		int cleaned = igc_clean_rx_irq(q_vector, budget);
3297 
3298 		work_done += cleaned;
3299 		if (cleaned >= budget)
3300 			clean_complete = false;
3301 	}
3302 
3303 	/* If all work not completed, return budget and keep polling */
3304 	if (!clean_complete)
3305 		return budget;
3306 
3307 	/* Exit the polling mode, but don't re-enable interrupts if stack might
3308 	 * poll us due to busy-polling
3309 	 */
3310 	if (likely(napi_complete_done(napi, work_done)))
3311 		igc_ring_irq_enable(q_vector);
3312 
3313 	return min(work_done, budget - 1);
3314 }
3315 
3316 /**
3317  * igc_alloc_q_vector - Allocate memory for a single interrupt vector
3318  * @adapter: board private structure to initialize
3319  * @v_count: q_vectors allocated on adapter, used for ring interleaving
3320  * @v_idx: index of vector in adapter struct
3321  * @txr_count: total number of Tx rings to allocate
3322  * @txr_idx: index of first Tx ring to allocate
3323  * @rxr_count: total number of Rx rings to allocate
3324  * @rxr_idx: index of first Rx ring to allocate
3325  *
3326  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
3327  */
igc_alloc_q_vector(struct igc_adapter * adapter,unsigned int v_count,unsigned int v_idx,unsigned int txr_count,unsigned int txr_idx,unsigned int rxr_count,unsigned int rxr_idx)3328 static int igc_alloc_q_vector(struct igc_adapter *adapter,
3329 			      unsigned int v_count, unsigned int v_idx,
3330 			      unsigned int txr_count, unsigned int txr_idx,
3331 			      unsigned int rxr_count, unsigned int rxr_idx)
3332 {
3333 	struct igc_q_vector *q_vector;
3334 	struct igc_ring *ring;
3335 	int ring_count;
3336 
3337 	/* igc only supports 1 Tx and/or 1 Rx queue per vector */
3338 	if (txr_count > 1 || rxr_count > 1)
3339 		return -ENOMEM;
3340 
3341 	ring_count = txr_count + rxr_count;
3342 
3343 	/* allocate q_vector and rings */
3344 	q_vector = adapter->q_vector[v_idx];
3345 	if (!q_vector)
3346 		q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
3347 				   GFP_KERNEL);
3348 	else
3349 		memset(q_vector, 0, struct_size(q_vector, ring, ring_count));
3350 	if (!q_vector)
3351 		return -ENOMEM;
3352 
3353 	/* initialize NAPI */
3354 	netif_napi_add(adapter->netdev, &q_vector->napi,
3355 		       igc_poll, 64);
3356 
3357 	/* tie q_vector and adapter together */
3358 	adapter->q_vector[v_idx] = q_vector;
3359 	q_vector->adapter = adapter;
3360 
3361 	/* initialize work limits */
3362 	q_vector->tx.work_limit = adapter->tx_work_limit;
3363 
3364 	/* initialize ITR configuration */
3365 	q_vector->itr_register = adapter->io_addr + IGC_EITR(0);
3366 	q_vector->itr_val = IGC_START_ITR;
3367 
3368 	/* initialize pointer to rings */
3369 	ring = q_vector->ring;
3370 
3371 	/* initialize ITR */
3372 	if (rxr_count) {
3373 		/* rx or rx/tx vector */
3374 		if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
3375 			q_vector->itr_val = adapter->rx_itr_setting;
3376 	} else {
3377 		/* tx only vector */
3378 		if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
3379 			q_vector->itr_val = adapter->tx_itr_setting;
3380 	}
3381 
3382 	if (txr_count) {
3383 		/* assign generic ring traits */
3384 		ring->dev = &adapter->pdev->dev;
3385 		ring->netdev = adapter->netdev;
3386 
3387 		/* configure backlink on ring */
3388 		ring->q_vector = q_vector;
3389 
3390 		/* update q_vector Tx values */
3391 		igc_add_ring(ring, &q_vector->tx);
3392 
3393 		/* apply Tx specific ring traits */
3394 		ring->count = adapter->tx_ring_count;
3395 		ring->queue_index = txr_idx;
3396 
3397 		/* assign ring to adapter */
3398 		adapter->tx_ring[txr_idx] = ring;
3399 
3400 		/* push pointer to next ring */
3401 		ring++;
3402 	}
3403 
3404 	if (rxr_count) {
3405 		/* assign generic ring traits */
3406 		ring->dev = &adapter->pdev->dev;
3407 		ring->netdev = adapter->netdev;
3408 
3409 		/* configure backlink on ring */
3410 		ring->q_vector = q_vector;
3411 
3412 		/* update q_vector Rx values */
3413 		igc_add_ring(ring, &q_vector->rx);
3414 
3415 		/* apply Rx specific ring traits */
3416 		ring->count = adapter->rx_ring_count;
3417 		ring->queue_index = rxr_idx;
3418 
3419 		/* assign ring to adapter */
3420 		adapter->rx_ring[rxr_idx] = ring;
3421 	}
3422 
3423 	return 0;
3424 }
3425 
3426 /**
3427  * igc_alloc_q_vectors - Allocate memory for interrupt vectors
3428  * @adapter: board private structure to initialize
3429  *
3430  * We allocate one q_vector per queue interrupt.  If allocation fails we
3431  * return -ENOMEM.
3432  */
igc_alloc_q_vectors(struct igc_adapter * adapter)3433 static int igc_alloc_q_vectors(struct igc_adapter *adapter)
3434 {
3435 	int rxr_remaining = adapter->num_rx_queues;
3436 	int txr_remaining = adapter->num_tx_queues;
3437 	int rxr_idx = 0, txr_idx = 0, v_idx = 0;
3438 	int q_vectors = adapter->num_q_vectors;
3439 	int err;
3440 
3441 	if (q_vectors >= (rxr_remaining + txr_remaining)) {
3442 		for (; rxr_remaining; v_idx++) {
3443 			err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
3444 						 0, 0, 1, rxr_idx);
3445 
3446 			if (err)
3447 				goto err_out;
3448 
3449 			/* update counts and index */
3450 			rxr_remaining--;
3451 			rxr_idx++;
3452 		}
3453 	}
3454 
3455 	for (; v_idx < q_vectors; v_idx++) {
3456 		int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
3457 		int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
3458 
3459 		err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
3460 					 tqpv, txr_idx, rqpv, rxr_idx);
3461 
3462 		if (err)
3463 			goto err_out;
3464 
3465 		/* update counts and index */
3466 		rxr_remaining -= rqpv;
3467 		txr_remaining -= tqpv;
3468 		rxr_idx++;
3469 		txr_idx++;
3470 	}
3471 
3472 	return 0;
3473 
3474 err_out:
3475 	adapter->num_tx_queues = 0;
3476 	adapter->num_rx_queues = 0;
3477 	adapter->num_q_vectors = 0;
3478 
3479 	while (v_idx--)
3480 		igc_free_q_vector(adapter, v_idx);
3481 
3482 	return -ENOMEM;
3483 }
3484 
3485 /**
3486  * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
3487  * @adapter: Pointer to adapter structure
3488  * @msix: boolean for MSI-X capability
3489  *
3490  * This function initializes the interrupts and allocates all of the queues.
3491  */
igc_init_interrupt_scheme(struct igc_adapter * adapter,bool msix)3492 static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix)
3493 {
3494 	struct net_device *dev = adapter->netdev;
3495 	int err = 0;
3496 
3497 	igc_set_interrupt_capability(adapter, msix);
3498 
3499 	err = igc_alloc_q_vectors(adapter);
3500 	if (err) {
3501 		netdev_err(dev, "Unable to allocate memory for vectors\n");
3502 		goto err_alloc_q_vectors;
3503 	}
3504 
3505 	igc_cache_ring_register(adapter);
3506 
3507 	return 0;
3508 
3509 err_alloc_q_vectors:
3510 	igc_reset_interrupt_capability(adapter);
3511 	return err;
3512 }
3513 
3514 /**
3515  * igc_sw_init - Initialize general software structures (struct igc_adapter)
3516  * @adapter: board private structure to initialize
3517  *
3518  * igc_sw_init initializes the Adapter private data structure.
3519  * Fields are initialized based on PCI device information and
3520  * OS network device settings (MTU size).
3521  */
igc_sw_init(struct igc_adapter * adapter)3522 static int igc_sw_init(struct igc_adapter *adapter)
3523 {
3524 	struct net_device *netdev = adapter->netdev;
3525 	struct pci_dev *pdev = adapter->pdev;
3526 	struct igc_hw *hw = &adapter->hw;
3527 
3528 	pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
3529 
3530 	/* set default ring sizes */
3531 	adapter->tx_ring_count = IGC_DEFAULT_TXD;
3532 	adapter->rx_ring_count = IGC_DEFAULT_RXD;
3533 
3534 	/* set default ITR values */
3535 	adapter->rx_itr_setting = IGC_DEFAULT_ITR;
3536 	adapter->tx_itr_setting = IGC_DEFAULT_ITR;
3537 
3538 	/* set default work limits */
3539 	adapter->tx_work_limit = IGC_DEFAULT_TX_WORK;
3540 
3541 	/* adjust max frame to be at least the size of a standard frame */
3542 	adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
3543 				VLAN_HLEN;
3544 	adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3545 
3546 	mutex_init(&adapter->nfc_rule_lock);
3547 	INIT_LIST_HEAD(&adapter->nfc_rule_list);
3548 	adapter->nfc_rule_count = 0;
3549 
3550 	spin_lock_init(&adapter->stats64_lock);
3551 	/* Assume MSI-X interrupts, will be checked during IRQ allocation */
3552 	adapter->flags |= IGC_FLAG_HAS_MSIX;
3553 
3554 	igc_init_queue_configuration(adapter);
3555 
3556 	/* This call may decrease the number of queues */
3557 	if (igc_init_interrupt_scheme(adapter, true)) {
3558 		netdev_err(netdev, "Unable to allocate memory for queues\n");
3559 		return -ENOMEM;
3560 	}
3561 
3562 	/* Explicitly disable IRQ since the NIC can be in any state. */
3563 	igc_irq_disable(adapter);
3564 
3565 	set_bit(__IGC_DOWN, &adapter->state);
3566 
3567 	return 0;
3568 }
3569 
3570 /**
3571  * igc_up - Open the interface and prepare it to handle traffic
3572  * @adapter: board private structure
3573  */
igc_up(struct igc_adapter * adapter)3574 void igc_up(struct igc_adapter *adapter)
3575 {
3576 	struct igc_hw *hw = &adapter->hw;
3577 	int i = 0;
3578 
3579 	/* hardware has been reset, we need to reload some things */
3580 	igc_configure(adapter);
3581 
3582 	clear_bit(__IGC_DOWN, &adapter->state);
3583 
3584 	for (i = 0; i < adapter->num_q_vectors; i++)
3585 		napi_enable(&adapter->q_vector[i]->napi);
3586 
3587 	if (adapter->msix_entries)
3588 		igc_configure_msix(adapter);
3589 	else
3590 		igc_assign_vector(adapter->q_vector[0], 0);
3591 
3592 	/* Clear any pending interrupts. */
3593 	rd32(IGC_ICR);
3594 	igc_irq_enable(adapter);
3595 
3596 	netif_tx_start_all_queues(adapter->netdev);
3597 
3598 	/* start the watchdog. */
3599 	hw->mac.get_link_status = 1;
3600 	schedule_work(&adapter->watchdog_task);
3601 }
3602 
3603 /**
3604  * igc_update_stats - Update the board statistics counters
3605  * @adapter: board private structure
3606  */
igc_update_stats(struct igc_adapter * adapter)3607 void igc_update_stats(struct igc_adapter *adapter)
3608 {
3609 	struct rtnl_link_stats64 *net_stats = &adapter->stats64;
3610 	struct pci_dev *pdev = adapter->pdev;
3611 	struct igc_hw *hw = &adapter->hw;
3612 	u64 _bytes, _packets;
3613 	u64 bytes, packets;
3614 	unsigned int start;
3615 	u32 mpc;
3616 	int i;
3617 
3618 	/* Prevent stats update while adapter is being reset, or if the pci
3619 	 * connection is down.
3620 	 */
3621 	if (adapter->link_speed == 0)
3622 		return;
3623 	if (pci_channel_offline(pdev))
3624 		return;
3625 
3626 	packets = 0;
3627 	bytes = 0;
3628 
3629 	rcu_read_lock();
3630 	for (i = 0; i < adapter->num_rx_queues; i++) {
3631 		struct igc_ring *ring = adapter->rx_ring[i];
3632 		u32 rqdpc = rd32(IGC_RQDPC(i));
3633 
3634 		if (hw->mac.type >= igc_i225)
3635 			wr32(IGC_RQDPC(i), 0);
3636 
3637 		if (rqdpc) {
3638 			ring->rx_stats.drops += rqdpc;
3639 			net_stats->rx_fifo_errors += rqdpc;
3640 		}
3641 
3642 		do {
3643 			start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
3644 			_bytes = ring->rx_stats.bytes;
3645 			_packets = ring->rx_stats.packets;
3646 		} while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
3647 		bytes += _bytes;
3648 		packets += _packets;
3649 	}
3650 
3651 	net_stats->rx_bytes = bytes;
3652 	net_stats->rx_packets = packets;
3653 
3654 	packets = 0;
3655 	bytes = 0;
3656 	for (i = 0; i < adapter->num_tx_queues; i++) {
3657 		struct igc_ring *ring = adapter->tx_ring[i];
3658 
3659 		do {
3660 			start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
3661 			_bytes = ring->tx_stats.bytes;
3662 			_packets = ring->tx_stats.packets;
3663 		} while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
3664 		bytes += _bytes;
3665 		packets += _packets;
3666 	}
3667 	net_stats->tx_bytes = bytes;
3668 	net_stats->tx_packets = packets;
3669 	rcu_read_unlock();
3670 
3671 	/* read stats registers */
3672 	adapter->stats.crcerrs += rd32(IGC_CRCERRS);
3673 	adapter->stats.gprc += rd32(IGC_GPRC);
3674 	adapter->stats.gorc += rd32(IGC_GORCL);
3675 	rd32(IGC_GORCH); /* clear GORCL */
3676 	adapter->stats.bprc += rd32(IGC_BPRC);
3677 	adapter->stats.mprc += rd32(IGC_MPRC);
3678 	adapter->stats.roc += rd32(IGC_ROC);
3679 
3680 	adapter->stats.prc64 += rd32(IGC_PRC64);
3681 	adapter->stats.prc127 += rd32(IGC_PRC127);
3682 	adapter->stats.prc255 += rd32(IGC_PRC255);
3683 	adapter->stats.prc511 += rd32(IGC_PRC511);
3684 	adapter->stats.prc1023 += rd32(IGC_PRC1023);
3685 	adapter->stats.prc1522 += rd32(IGC_PRC1522);
3686 	adapter->stats.tlpic += rd32(IGC_TLPIC);
3687 	adapter->stats.rlpic += rd32(IGC_RLPIC);
3688 
3689 	mpc = rd32(IGC_MPC);
3690 	adapter->stats.mpc += mpc;
3691 	net_stats->rx_fifo_errors += mpc;
3692 	adapter->stats.scc += rd32(IGC_SCC);
3693 	adapter->stats.ecol += rd32(IGC_ECOL);
3694 	adapter->stats.mcc += rd32(IGC_MCC);
3695 	adapter->stats.latecol += rd32(IGC_LATECOL);
3696 	adapter->stats.dc += rd32(IGC_DC);
3697 	adapter->stats.rlec += rd32(IGC_RLEC);
3698 	adapter->stats.xonrxc += rd32(IGC_XONRXC);
3699 	adapter->stats.xontxc += rd32(IGC_XONTXC);
3700 	adapter->stats.xoffrxc += rd32(IGC_XOFFRXC);
3701 	adapter->stats.xofftxc += rd32(IGC_XOFFTXC);
3702 	adapter->stats.fcruc += rd32(IGC_FCRUC);
3703 	adapter->stats.gptc += rd32(IGC_GPTC);
3704 	adapter->stats.gotc += rd32(IGC_GOTCL);
3705 	rd32(IGC_GOTCH); /* clear GOTCL */
3706 	adapter->stats.rnbc += rd32(IGC_RNBC);
3707 	adapter->stats.ruc += rd32(IGC_RUC);
3708 	adapter->stats.rfc += rd32(IGC_RFC);
3709 	adapter->stats.rjc += rd32(IGC_RJC);
3710 	adapter->stats.tor += rd32(IGC_TORH);
3711 	adapter->stats.tot += rd32(IGC_TOTH);
3712 	adapter->stats.tpr += rd32(IGC_TPR);
3713 
3714 	adapter->stats.ptc64 += rd32(IGC_PTC64);
3715 	adapter->stats.ptc127 += rd32(IGC_PTC127);
3716 	adapter->stats.ptc255 += rd32(IGC_PTC255);
3717 	adapter->stats.ptc511 += rd32(IGC_PTC511);
3718 	adapter->stats.ptc1023 += rd32(IGC_PTC1023);
3719 	adapter->stats.ptc1522 += rd32(IGC_PTC1522);
3720 
3721 	adapter->stats.mptc += rd32(IGC_MPTC);
3722 	adapter->stats.bptc += rd32(IGC_BPTC);
3723 
3724 	adapter->stats.tpt += rd32(IGC_TPT);
3725 	adapter->stats.colc += rd32(IGC_COLC);
3726 	adapter->stats.colc += rd32(IGC_RERC);
3727 
3728 	adapter->stats.algnerrc += rd32(IGC_ALGNERRC);
3729 
3730 	adapter->stats.tsctc += rd32(IGC_TSCTC);
3731 
3732 	adapter->stats.iac += rd32(IGC_IAC);
3733 
3734 	/* Fill out the OS statistics structure */
3735 	net_stats->multicast = adapter->stats.mprc;
3736 	net_stats->collisions = adapter->stats.colc;
3737 
3738 	/* Rx Errors */
3739 
3740 	/* RLEC on some newer hardware can be incorrect so build
3741 	 * our own version based on RUC and ROC
3742 	 */
3743 	net_stats->rx_errors = adapter->stats.rxerrc +
3744 		adapter->stats.crcerrs + adapter->stats.algnerrc +
3745 		adapter->stats.ruc + adapter->stats.roc +
3746 		adapter->stats.cexterr;
3747 	net_stats->rx_length_errors = adapter->stats.ruc +
3748 				      adapter->stats.roc;
3749 	net_stats->rx_crc_errors = adapter->stats.crcerrs;
3750 	net_stats->rx_frame_errors = adapter->stats.algnerrc;
3751 	net_stats->rx_missed_errors = adapter->stats.mpc;
3752 
3753 	/* Tx Errors */
3754 	net_stats->tx_errors = adapter->stats.ecol +
3755 			       adapter->stats.latecol;
3756 	net_stats->tx_aborted_errors = adapter->stats.ecol;
3757 	net_stats->tx_window_errors = adapter->stats.latecol;
3758 	net_stats->tx_carrier_errors = adapter->stats.tncrs;
3759 
3760 	/* Tx Dropped needs to be maintained elsewhere */
3761 
3762 	/* Management Stats */
3763 	adapter->stats.mgptc += rd32(IGC_MGTPTC);
3764 	adapter->stats.mgprc += rd32(IGC_MGTPRC);
3765 	adapter->stats.mgpdc += rd32(IGC_MGTPDC);
3766 }
3767 
3768 /**
3769  * igc_down - Close the interface
3770  * @adapter: board private structure
3771  */
igc_down(struct igc_adapter * adapter)3772 void igc_down(struct igc_adapter *adapter)
3773 {
3774 	struct net_device *netdev = adapter->netdev;
3775 	struct igc_hw *hw = &adapter->hw;
3776 	u32 tctl, rctl;
3777 	int i = 0;
3778 
3779 	set_bit(__IGC_DOWN, &adapter->state);
3780 
3781 	igc_ptp_suspend(adapter);
3782 
3783 	/* disable receives in the hardware */
3784 	rctl = rd32(IGC_RCTL);
3785 	wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
3786 	/* flush and sleep below */
3787 
3788 	/* set trans_start so we don't get spurious watchdogs during reset */
3789 	netif_trans_update(netdev);
3790 
3791 	netif_carrier_off(netdev);
3792 	netif_tx_stop_all_queues(netdev);
3793 
3794 	/* disable transmits in the hardware */
3795 	tctl = rd32(IGC_TCTL);
3796 	tctl &= ~IGC_TCTL_EN;
3797 	wr32(IGC_TCTL, tctl);
3798 	/* flush both disables and wait for them to finish */
3799 	wrfl();
3800 	usleep_range(10000, 20000);
3801 
3802 	igc_irq_disable(adapter);
3803 
3804 	adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
3805 
3806 	for (i = 0; i < adapter->num_q_vectors; i++) {
3807 		if (adapter->q_vector[i]) {
3808 			napi_synchronize(&adapter->q_vector[i]->napi);
3809 			napi_disable(&adapter->q_vector[i]->napi);
3810 		}
3811 	}
3812 
3813 	del_timer_sync(&adapter->watchdog_timer);
3814 	del_timer_sync(&adapter->phy_info_timer);
3815 
3816 	/* record the stats before reset*/
3817 	spin_lock(&adapter->stats64_lock);
3818 	igc_update_stats(adapter);
3819 	spin_unlock(&adapter->stats64_lock);
3820 
3821 	adapter->link_speed = 0;
3822 	adapter->link_duplex = 0;
3823 
3824 	if (!pci_channel_offline(adapter->pdev))
3825 		igc_reset(adapter);
3826 
3827 	/* clear VLAN promisc flag so VFTA will be updated if necessary */
3828 	adapter->flags &= ~IGC_FLAG_VLAN_PROMISC;
3829 
3830 	igc_clean_all_tx_rings(adapter);
3831 	igc_clean_all_rx_rings(adapter);
3832 }
3833 
igc_reinit_locked(struct igc_adapter * adapter)3834 void igc_reinit_locked(struct igc_adapter *adapter)
3835 {
3836 	while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
3837 		usleep_range(1000, 2000);
3838 	igc_down(adapter);
3839 	igc_up(adapter);
3840 	clear_bit(__IGC_RESETTING, &adapter->state);
3841 }
3842 
igc_reset_task(struct work_struct * work)3843 static void igc_reset_task(struct work_struct *work)
3844 {
3845 	struct igc_adapter *adapter;
3846 
3847 	adapter = container_of(work, struct igc_adapter, reset_task);
3848 
3849 	igc_rings_dump(adapter);
3850 	igc_regs_dump(adapter);
3851 	netdev_err(adapter->netdev, "Reset adapter\n");
3852 	igc_reinit_locked(adapter);
3853 }
3854 
3855 /**
3856  * igc_change_mtu - Change the Maximum Transfer Unit
3857  * @netdev: network interface device structure
3858  * @new_mtu: new value for maximum frame size
3859  *
3860  * Returns 0 on success, negative on failure
3861  */
igc_change_mtu(struct net_device * netdev,int new_mtu)3862 static int igc_change_mtu(struct net_device *netdev, int new_mtu)
3863 {
3864 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
3865 	struct igc_adapter *adapter = netdev_priv(netdev);
3866 
3867 	/* adjust max frame to be at least the size of a standard frame */
3868 	if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
3869 		max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
3870 
3871 	while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
3872 		usleep_range(1000, 2000);
3873 
3874 	/* igc_down has a dependency on max_frame_size */
3875 	adapter->max_frame_size = max_frame;
3876 
3877 	if (netif_running(netdev))
3878 		igc_down(adapter);
3879 
3880 	netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
3881 	netdev->mtu = new_mtu;
3882 
3883 	if (netif_running(netdev))
3884 		igc_up(adapter);
3885 	else
3886 		igc_reset(adapter);
3887 
3888 	clear_bit(__IGC_RESETTING, &adapter->state);
3889 
3890 	return 0;
3891 }
3892 
3893 /**
3894  * igc_get_stats64 - Get System Network Statistics
3895  * @netdev: network interface device structure
3896  * @stats: rtnl_link_stats64 pointer
3897  *
3898  * Returns the address of the device statistics structure.
3899  * The statistics are updated here and also from the timer callback.
3900  */
igc_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)3901 static void igc_get_stats64(struct net_device *netdev,
3902 			    struct rtnl_link_stats64 *stats)
3903 {
3904 	struct igc_adapter *adapter = netdev_priv(netdev);
3905 
3906 	spin_lock(&adapter->stats64_lock);
3907 	if (!test_bit(__IGC_RESETTING, &adapter->state))
3908 		igc_update_stats(adapter);
3909 	memcpy(stats, &adapter->stats64, sizeof(*stats));
3910 	spin_unlock(&adapter->stats64_lock);
3911 }
3912 
igc_fix_features(struct net_device * netdev,netdev_features_t features)3913 static netdev_features_t igc_fix_features(struct net_device *netdev,
3914 					  netdev_features_t features)
3915 {
3916 	/* Since there is no support for separate Rx/Tx vlan accel
3917 	 * enable/disable make sure Tx flag is always in same state as Rx.
3918 	 */
3919 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
3920 		features |= NETIF_F_HW_VLAN_CTAG_TX;
3921 	else
3922 		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
3923 
3924 	return features;
3925 }
3926 
igc_set_features(struct net_device * netdev,netdev_features_t features)3927 static int igc_set_features(struct net_device *netdev,
3928 			    netdev_features_t features)
3929 {
3930 	netdev_features_t changed = netdev->features ^ features;
3931 	struct igc_adapter *adapter = netdev_priv(netdev);
3932 
3933 	/* Add VLAN support */
3934 	if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
3935 		return 0;
3936 
3937 	if (!(features & NETIF_F_NTUPLE))
3938 		igc_flush_nfc_rules(adapter);
3939 
3940 	netdev->features = features;
3941 
3942 	if (netif_running(netdev))
3943 		igc_reinit_locked(adapter);
3944 	else
3945 		igc_reset(adapter);
3946 
3947 	return 1;
3948 }
3949 
3950 static netdev_features_t
igc_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)3951 igc_features_check(struct sk_buff *skb, struct net_device *dev,
3952 		   netdev_features_t features)
3953 {
3954 	unsigned int network_hdr_len, mac_hdr_len;
3955 
3956 	/* Make certain the headers can be described by a context descriptor */
3957 	mac_hdr_len = skb_network_header(skb) - skb->data;
3958 	if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN))
3959 		return features & ~(NETIF_F_HW_CSUM |
3960 				    NETIF_F_SCTP_CRC |
3961 				    NETIF_F_HW_VLAN_CTAG_TX |
3962 				    NETIF_F_TSO |
3963 				    NETIF_F_TSO6);
3964 
3965 	network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
3966 	if (unlikely(network_hdr_len >  IGC_MAX_NETWORK_HDR_LEN))
3967 		return features & ~(NETIF_F_HW_CSUM |
3968 				    NETIF_F_SCTP_CRC |
3969 				    NETIF_F_TSO |
3970 				    NETIF_F_TSO6);
3971 
3972 	/* We can only support IPv4 TSO in tunnels if we can mangle the
3973 	 * inner IP ID field, so strip TSO if MANGLEID is not supported.
3974 	 */
3975 	if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
3976 		features &= ~NETIF_F_TSO;
3977 
3978 	return features;
3979 }
3980 
igc_tsync_interrupt(struct igc_adapter * adapter)3981 static void igc_tsync_interrupt(struct igc_adapter *adapter)
3982 {
3983 	struct igc_hw *hw = &adapter->hw;
3984 	u32 tsicr = rd32(IGC_TSICR);
3985 	u32 ack = 0;
3986 
3987 	if (tsicr & IGC_TSICR_TXTS) {
3988 		/* retrieve hardware timestamp */
3989 		schedule_work(&adapter->ptp_tx_work);
3990 		ack |= IGC_TSICR_TXTS;
3991 	}
3992 
3993 	/* acknowledge the interrupts */
3994 	wr32(IGC_TSICR, ack);
3995 }
3996 
3997 /**
3998  * igc_msix_other - msix other interrupt handler
3999  * @irq: interrupt number
4000  * @data: pointer to a q_vector
4001  */
igc_msix_other(int irq,void * data)4002 static irqreturn_t igc_msix_other(int irq, void *data)
4003 {
4004 	struct igc_adapter *adapter = data;
4005 	struct igc_hw *hw = &adapter->hw;
4006 	u32 icr = rd32(IGC_ICR);
4007 
4008 	/* reading ICR causes bit 31 of EICR to be cleared */
4009 	if (icr & IGC_ICR_DRSTA)
4010 		schedule_work(&adapter->reset_task);
4011 
4012 	if (icr & IGC_ICR_DOUTSYNC) {
4013 		/* HW is reporting DMA is out of sync */
4014 		adapter->stats.doosync++;
4015 	}
4016 
4017 	if (icr & IGC_ICR_LSC) {
4018 		hw->mac.get_link_status = 1;
4019 		/* guard against interrupt when we're going down */
4020 		if (!test_bit(__IGC_DOWN, &adapter->state))
4021 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
4022 	}
4023 
4024 	if (icr & IGC_ICR_TS)
4025 		igc_tsync_interrupt(adapter);
4026 
4027 	wr32(IGC_EIMS, adapter->eims_other);
4028 
4029 	return IRQ_HANDLED;
4030 }
4031 
igc_write_itr(struct igc_q_vector * q_vector)4032 static void igc_write_itr(struct igc_q_vector *q_vector)
4033 {
4034 	u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK;
4035 
4036 	if (!q_vector->set_itr)
4037 		return;
4038 
4039 	if (!itr_val)
4040 		itr_val = IGC_ITR_VAL_MASK;
4041 
4042 	itr_val |= IGC_EITR_CNT_IGNR;
4043 
4044 	writel(itr_val, q_vector->itr_register);
4045 	q_vector->set_itr = 0;
4046 }
4047 
igc_msix_ring(int irq,void * data)4048 static irqreturn_t igc_msix_ring(int irq, void *data)
4049 {
4050 	struct igc_q_vector *q_vector = data;
4051 
4052 	/* Write the ITR value calculated from the previous interrupt. */
4053 	igc_write_itr(q_vector);
4054 
4055 	napi_schedule(&q_vector->napi);
4056 
4057 	return IRQ_HANDLED;
4058 }
4059 
4060 /**
4061  * igc_request_msix - Initialize MSI-X interrupts
4062  * @adapter: Pointer to adapter structure
4063  *
4064  * igc_request_msix allocates MSI-X vectors and requests interrupts from the
4065  * kernel.
4066  */
igc_request_msix(struct igc_adapter * adapter)4067 static int igc_request_msix(struct igc_adapter *adapter)
4068 {
4069 	int i = 0, err = 0, vector = 0, free_vector = 0;
4070 	struct net_device *netdev = adapter->netdev;
4071 
4072 	err = request_irq(adapter->msix_entries[vector].vector,
4073 			  &igc_msix_other, 0, netdev->name, adapter);
4074 	if (err)
4075 		goto err_out;
4076 
4077 	for (i = 0; i < adapter->num_q_vectors; i++) {
4078 		struct igc_q_vector *q_vector = adapter->q_vector[i];
4079 
4080 		vector++;
4081 
4082 		q_vector->itr_register = adapter->io_addr + IGC_EITR(vector);
4083 
4084 		if (q_vector->rx.ring && q_vector->tx.ring)
4085 			sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
4086 				q_vector->rx.ring->queue_index);
4087 		else if (q_vector->tx.ring)
4088 			sprintf(q_vector->name, "%s-tx-%u", netdev->name,
4089 				q_vector->tx.ring->queue_index);
4090 		else if (q_vector->rx.ring)
4091 			sprintf(q_vector->name, "%s-rx-%u", netdev->name,
4092 				q_vector->rx.ring->queue_index);
4093 		else
4094 			sprintf(q_vector->name, "%s-unused", netdev->name);
4095 
4096 		err = request_irq(adapter->msix_entries[vector].vector,
4097 				  igc_msix_ring, 0, q_vector->name,
4098 				  q_vector);
4099 		if (err)
4100 			goto err_free;
4101 	}
4102 
4103 	igc_configure_msix(adapter);
4104 	return 0;
4105 
4106 err_free:
4107 	/* free already assigned IRQs */
4108 	free_irq(adapter->msix_entries[free_vector++].vector, adapter);
4109 
4110 	vector--;
4111 	for (i = 0; i < vector; i++) {
4112 		free_irq(adapter->msix_entries[free_vector++].vector,
4113 			 adapter->q_vector[i]);
4114 	}
4115 err_out:
4116 	return err;
4117 }
4118 
4119 /**
4120  * igc_clear_interrupt_scheme - reset the device to a state of no interrupts
4121  * @adapter: Pointer to adapter structure
4122  *
4123  * This function resets the device so that it has 0 rx queues, tx queues, and
4124  * MSI-X interrupts allocated.
4125  */
igc_clear_interrupt_scheme(struct igc_adapter * adapter)4126 static void igc_clear_interrupt_scheme(struct igc_adapter *adapter)
4127 {
4128 	igc_free_q_vectors(adapter);
4129 	igc_reset_interrupt_capability(adapter);
4130 }
4131 
4132 /* Need to wait a few seconds after link up to get diagnostic information from
4133  * the phy
4134  */
igc_update_phy_info(struct timer_list * t)4135 static void igc_update_phy_info(struct timer_list *t)
4136 {
4137 	struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer);
4138 
4139 	igc_get_phy_info(&adapter->hw);
4140 }
4141 
4142 /**
4143  * igc_has_link - check shared code for link and determine up/down
4144  * @adapter: pointer to driver private info
4145  */
igc_has_link(struct igc_adapter * adapter)4146 bool igc_has_link(struct igc_adapter *adapter)
4147 {
4148 	struct igc_hw *hw = &adapter->hw;
4149 	bool link_active = false;
4150 
4151 	/* get_link_status is set on LSC (link status) interrupt or
4152 	 * rx sequence error interrupt.  get_link_status will stay
4153 	 * false until the igc_check_for_link establishes link
4154 	 * for copper adapters ONLY
4155 	 */
4156 	switch (hw->phy.media_type) {
4157 	case igc_media_type_copper:
4158 		if (!hw->mac.get_link_status)
4159 			return true;
4160 		hw->mac.ops.check_for_link(hw);
4161 		link_active = !hw->mac.get_link_status;
4162 		break;
4163 	default:
4164 	case igc_media_type_unknown:
4165 		break;
4166 	}
4167 
4168 	if (hw->mac.type == igc_i225 &&
4169 	    hw->phy.id == I225_I_PHY_ID) {
4170 		if (!netif_carrier_ok(adapter->netdev)) {
4171 			adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
4172 		} else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) {
4173 			adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE;
4174 			adapter->link_check_timeout = jiffies;
4175 		}
4176 	}
4177 
4178 	return link_active;
4179 }
4180 
4181 /**
4182  * igc_watchdog - Timer Call-back
4183  * @t: timer for the watchdog
4184  */
igc_watchdog(struct timer_list * t)4185 static void igc_watchdog(struct timer_list *t)
4186 {
4187 	struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer);
4188 	/* Do the rest outside of interrupt context */
4189 	schedule_work(&adapter->watchdog_task);
4190 }
4191 
igc_watchdog_task(struct work_struct * work)4192 static void igc_watchdog_task(struct work_struct *work)
4193 {
4194 	struct igc_adapter *adapter = container_of(work,
4195 						   struct igc_adapter,
4196 						   watchdog_task);
4197 	struct net_device *netdev = adapter->netdev;
4198 	struct igc_hw *hw = &adapter->hw;
4199 	struct igc_phy_info *phy = &hw->phy;
4200 	u16 phy_data, retry_count = 20;
4201 	u32 link;
4202 	int i;
4203 
4204 	link = igc_has_link(adapter);
4205 
4206 	if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) {
4207 		if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
4208 			adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
4209 		else
4210 			link = false;
4211 	}
4212 
4213 	if (link) {
4214 		/* Cancel scheduled suspend requests. */
4215 		pm_runtime_resume(netdev->dev.parent);
4216 
4217 		if (!netif_carrier_ok(netdev)) {
4218 			u32 ctrl;
4219 
4220 			hw->mac.ops.get_speed_and_duplex(hw,
4221 							 &adapter->link_speed,
4222 							 &adapter->link_duplex);
4223 
4224 			ctrl = rd32(IGC_CTRL);
4225 			/* Link status message must follow this format */
4226 			netdev_info(netdev,
4227 				    "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
4228 				    adapter->link_speed,
4229 				    adapter->link_duplex == FULL_DUPLEX ?
4230 				    "Full" : "Half",
4231 				    (ctrl & IGC_CTRL_TFCE) &&
4232 				    (ctrl & IGC_CTRL_RFCE) ? "RX/TX" :
4233 				    (ctrl & IGC_CTRL_RFCE) ?  "RX" :
4234 				    (ctrl & IGC_CTRL_TFCE) ?  "TX" : "None");
4235 
4236 			/* disable EEE if enabled */
4237 			if ((adapter->flags & IGC_FLAG_EEE) &&
4238 			    adapter->link_duplex == HALF_DUPLEX) {
4239 				netdev_info(netdev,
4240 					    "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n");
4241 				adapter->hw.dev_spec._base.eee_enable = false;
4242 				adapter->flags &= ~IGC_FLAG_EEE;
4243 			}
4244 
4245 			/* check if SmartSpeed worked */
4246 			igc_check_downshift(hw);
4247 			if (phy->speed_downgraded)
4248 				netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
4249 
4250 			/* adjust timeout factor according to speed/duplex */
4251 			adapter->tx_timeout_factor = 1;
4252 			switch (adapter->link_speed) {
4253 			case SPEED_10:
4254 				adapter->tx_timeout_factor = 14;
4255 				break;
4256 			case SPEED_100:
4257 				/* maybe add some timeout factor ? */
4258 				break;
4259 			}
4260 
4261 			if (adapter->link_speed != SPEED_1000)
4262 				goto no_wait;
4263 
4264 			/* wait for Remote receiver status OK */
4265 retry_read_status:
4266 			if (!igc_read_phy_reg(hw, PHY_1000T_STATUS,
4267 					      &phy_data)) {
4268 				if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
4269 				    retry_count) {
4270 					msleep(100);
4271 					retry_count--;
4272 					goto retry_read_status;
4273 				} else if (!retry_count) {
4274 					netdev_err(netdev, "exceed max 2 second\n");
4275 				}
4276 			} else {
4277 				netdev_err(netdev, "read 1000Base-T Status Reg\n");
4278 			}
4279 no_wait:
4280 			netif_carrier_on(netdev);
4281 
4282 			/* link state has changed, schedule phy info update */
4283 			if (!test_bit(__IGC_DOWN, &adapter->state))
4284 				mod_timer(&adapter->phy_info_timer,
4285 					  round_jiffies(jiffies + 2 * HZ));
4286 		}
4287 	} else {
4288 		if (netif_carrier_ok(netdev)) {
4289 			adapter->link_speed = 0;
4290 			adapter->link_duplex = 0;
4291 
4292 			/* Links status message must follow this format */
4293 			netdev_info(netdev, "NIC Link is Down\n");
4294 			netif_carrier_off(netdev);
4295 
4296 			/* link state has changed, schedule phy info update */
4297 			if (!test_bit(__IGC_DOWN, &adapter->state))
4298 				mod_timer(&adapter->phy_info_timer,
4299 					  round_jiffies(jiffies + 2 * HZ));
4300 
4301 			/* link is down, time to check for alternate media */
4302 			if (adapter->flags & IGC_FLAG_MAS_ENABLE) {
4303 				if (adapter->flags & IGC_FLAG_MEDIA_RESET) {
4304 					schedule_work(&adapter->reset_task);
4305 					/* return immediately */
4306 					return;
4307 				}
4308 			}
4309 			pm_schedule_suspend(netdev->dev.parent,
4310 					    MSEC_PER_SEC * 5);
4311 
4312 		/* also check for alternate media here */
4313 		} else if (!netif_carrier_ok(netdev) &&
4314 			   (adapter->flags & IGC_FLAG_MAS_ENABLE)) {
4315 			if (adapter->flags & IGC_FLAG_MEDIA_RESET) {
4316 				schedule_work(&adapter->reset_task);
4317 				/* return immediately */
4318 				return;
4319 			}
4320 		}
4321 	}
4322 
4323 	spin_lock(&adapter->stats64_lock);
4324 	igc_update_stats(adapter);
4325 	spin_unlock(&adapter->stats64_lock);
4326 
4327 	for (i = 0; i < adapter->num_tx_queues; i++) {
4328 		struct igc_ring *tx_ring = adapter->tx_ring[i];
4329 
4330 		if (!netif_carrier_ok(netdev)) {
4331 			/* We've lost link, so the controller stops DMA,
4332 			 * but we've got queued Tx work that's never going
4333 			 * to get done, so reset controller to flush Tx.
4334 			 * (Do the reset outside of interrupt context).
4335 			 */
4336 			if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) {
4337 				adapter->tx_timeout_count++;
4338 				schedule_work(&adapter->reset_task);
4339 				/* return immediately since reset is imminent */
4340 				return;
4341 			}
4342 		}
4343 
4344 		/* Force detection of hung controller every watchdog period */
4345 		set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
4346 	}
4347 
4348 	/* Cause software interrupt to ensure Rx ring is cleaned */
4349 	if (adapter->flags & IGC_FLAG_HAS_MSIX) {
4350 		u32 eics = 0;
4351 
4352 		for (i = 0; i < adapter->num_q_vectors; i++)
4353 			eics |= adapter->q_vector[i]->eims_value;
4354 		wr32(IGC_EICS, eics);
4355 	} else {
4356 		wr32(IGC_ICS, IGC_ICS_RXDMT0);
4357 	}
4358 
4359 	igc_ptp_tx_hang(adapter);
4360 
4361 	/* Reset the timer */
4362 	if (!test_bit(__IGC_DOWN, &adapter->state)) {
4363 		if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)
4364 			mod_timer(&adapter->watchdog_timer,
4365 				  round_jiffies(jiffies +  HZ));
4366 		else
4367 			mod_timer(&adapter->watchdog_timer,
4368 				  round_jiffies(jiffies + 2 * HZ));
4369 	}
4370 }
4371 
4372 /**
4373  * igc_intr_msi - Interrupt Handler
4374  * @irq: interrupt number
4375  * @data: pointer to a network interface device structure
4376  */
igc_intr_msi(int irq,void * data)4377 static irqreturn_t igc_intr_msi(int irq, void *data)
4378 {
4379 	struct igc_adapter *adapter = data;
4380 	struct igc_q_vector *q_vector = adapter->q_vector[0];
4381 	struct igc_hw *hw = &adapter->hw;
4382 	/* read ICR disables interrupts using IAM */
4383 	u32 icr = rd32(IGC_ICR);
4384 
4385 	igc_write_itr(q_vector);
4386 
4387 	if (icr & IGC_ICR_DRSTA)
4388 		schedule_work(&adapter->reset_task);
4389 
4390 	if (icr & IGC_ICR_DOUTSYNC) {
4391 		/* HW is reporting DMA is out of sync */
4392 		adapter->stats.doosync++;
4393 	}
4394 
4395 	if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
4396 		hw->mac.get_link_status = 1;
4397 		if (!test_bit(__IGC_DOWN, &adapter->state))
4398 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
4399 	}
4400 
4401 	napi_schedule(&q_vector->napi);
4402 
4403 	return IRQ_HANDLED;
4404 }
4405 
4406 /**
4407  * igc_intr - Legacy Interrupt Handler
4408  * @irq: interrupt number
4409  * @data: pointer to a network interface device structure
4410  */
igc_intr(int irq,void * data)4411 static irqreturn_t igc_intr(int irq, void *data)
4412 {
4413 	struct igc_adapter *adapter = data;
4414 	struct igc_q_vector *q_vector = adapter->q_vector[0];
4415 	struct igc_hw *hw = &adapter->hw;
4416 	/* Interrupt Auto-Mask...upon reading ICR, interrupts are masked.  No
4417 	 * need for the IMC write
4418 	 */
4419 	u32 icr = rd32(IGC_ICR);
4420 
4421 	/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
4422 	 * not set, then the adapter didn't send an interrupt
4423 	 */
4424 	if (!(icr & IGC_ICR_INT_ASSERTED))
4425 		return IRQ_NONE;
4426 
4427 	igc_write_itr(q_vector);
4428 
4429 	if (icr & IGC_ICR_DRSTA)
4430 		schedule_work(&adapter->reset_task);
4431 
4432 	if (icr & IGC_ICR_DOUTSYNC) {
4433 		/* HW is reporting DMA is out of sync */
4434 		adapter->stats.doosync++;
4435 	}
4436 
4437 	if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
4438 		hw->mac.get_link_status = 1;
4439 		/* guard against interrupt when we're going down */
4440 		if (!test_bit(__IGC_DOWN, &adapter->state))
4441 			mod_timer(&adapter->watchdog_timer, jiffies + 1);
4442 	}
4443 
4444 	napi_schedule(&q_vector->napi);
4445 
4446 	return IRQ_HANDLED;
4447 }
4448 
igc_free_irq(struct igc_adapter * adapter)4449 static void igc_free_irq(struct igc_adapter *adapter)
4450 {
4451 	if (adapter->msix_entries) {
4452 		int vector = 0, i;
4453 
4454 		free_irq(adapter->msix_entries[vector++].vector, adapter);
4455 
4456 		for (i = 0; i < adapter->num_q_vectors; i++)
4457 			free_irq(adapter->msix_entries[vector++].vector,
4458 				 adapter->q_vector[i]);
4459 	} else {
4460 		free_irq(adapter->pdev->irq, adapter);
4461 	}
4462 }
4463 
4464 /**
4465  * igc_request_irq - initialize interrupts
4466  * @adapter: Pointer to adapter structure
4467  *
4468  * Attempts to configure interrupts using the best available
4469  * capabilities of the hardware and kernel.
4470  */
igc_request_irq(struct igc_adapter * adapter)4471 static int igc_request_irq(struct igc_adapter *adapter)
4472 {
4473 	struct net_device *netdev = adapter->netdev;
4474 	struct pci_dev *pdev = adapter->pdev;
4475 	int err = 0;
4476 
4477 	if (adapter->flags & IGC_FLAG_HAS_MSIX) {
4478 		err = igc_request_msix(adapter);
4479 		if (!err)
4480 			goto request_done;
4481 		/* fall back to MSI */
4482 		igc_free_all_tx_resources(adapter);
4483 		igc_free_all_rx_resources(adapter);
4484 
4485 		igc_clear_interrupt_scheme(adapter);
4486 		err = igc_init_interrupt_scheme(adapter, false);
4487 		if (err)
4488 			goto request_done;
4489 		igc_setup_all_tx_resources(adapter);
4490 		igc_setup_all_rx_resources(adapter);
4491 		igc_configure(adapter);
4492 	}
4493 
4494 	igc_assign_vector(adapter->q_vector[0], 0);
4495 
4496 	if (adapter->flags & IGC_FLAG_HAS_MSI) {
4497 		err = request_irq(pdev->irq, &igc_intr_msi, 0,
4498 				  netdev->name, adapter);
4499 		if (!err)
4500 			goto request_done;
4501 
4502 		/* fall back to legacy interrupts */
4503 		igc_reset_interrupt_capability(adapter);
4504 		adapter->flags &= ~IGC_FLAG_HAS_MSI;
4505 	}
4506 
4507 	err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED,
4508 			  netdev->name, adapter);
4509 
4510 	if (err)
4511 		netdev_err(netdev, "Error %d getting interrupt\n", err);
4512 
4513 request_done:
4514 	return err;
4515 }
4516 
4517 /**
4518  * __igc_open - Called when a network interface is made active
4519  * @netdev: network interface device structure
4520  * @resuming: boolean indicating if the device is resuming
4521  *
4522  * Returns 0 on success, negative value on failure
4523  *
4524  * The open entry point is called when a network interface is made
4525  * active by the system (IFF_UP).  At this point all resources needed
4526  * for transmit and receive operations are allocated, the interrupt
4527  * handler is registered with the OS, the watchdog timer is started,
4528  * and the stack is notified that the interface is ready.
4529  */
__igc_open(struct net_device * netdev,bool resuming)4530 static int __igc_open(struct net_device *netdev, bool resuming)
4531 {
4532 	struct igc_adapter *adapter = netdev_priv(netdev);
4533 	struct pci_dev *pdev = adapter->pdev;
4534 	struct igc_hw *hw = &adapter->hw;
4535 	int err = 0;
4536 	int i = 0;
4537 
4538 	/* disallow open during test */
4539 
4540 	if (test_bit(__IGC_TESTING, &adapter->state)) {
4541 		WARN_ON(resuming);
4542 		return -EBUSY;
4543 	}
4544 
4545 	if (!resuming)
4546 		pm_runtime_get_sync(&pdev->dev);
4547 
4548 	netif_carrier_off(netdev);
4549 
4550 	/* allocate transmit descriptors */
4551 	err = igc_setup_all_tx_resources(adapter);
4552 	if (err)
4553 		goto err_setup_tx;
4554 
4555 	/* allocate receive descriptors */
4556 	err = igc_setup_all_rx_resources(adapter);
4557 	if (err)
4558 		goto err_setup_rx;
4559 
4560 	igc_power_up_link(adapter);
4561 
4562 	igc_configure(adapter);
4563 
4564 	err = igc_request_irq(adapter);
4565 	if (err)
4566 		goto err_req_irq;
4567 
4568 	/* Notify the stack of the actual queue counts. */
4569 	err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
4570 	if (err)
4571 		goto err_set_queues;
4572 
4573 	err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
4574 	if (err)
4575 		goto err_set_queues;
4576 
4577 	clear_bit(__IGC_DOWN, &adapter->state);
4578 
4579 	for (i = 0; i < adapter->num_q_vectors; i++)
4580 		napi_enable(&adapter->q_vector[i]->napi);
4581 
4582 	/* Clear any pending interrupts. */
4583 	rd32(IGC_ICR);
4584 	igc_irq_enable(adapter);
4585 
4586 	if (!resuming)
4587 		pm_runtime_put(&pdev->dev);
4588 
4589 	netif_tx_start_all_queues(netdev);
4590 
4591 	/* start the watchdog. */
4592 	hw->mac.get_link_status = 1;
4593 	schedule_work(&adapter->watchdog_task);
4594 
4595 	return IGC_SUCCESS;
4596 
4597 err_set_queues:
4598 	igc_free_irq(adapter);
4599 err_req_irq:
4600 	igc_release_hw_control(adapter);
4601 	igc_power_down_phy_copper_base(&adapter->hw);
4602 	igc_free_all_rx_resources(adapter);
4603 err_setup_rx:
4604 	igc_free_all_tx_resources(adapter);
4605 err_setup_tx:
4606 	igc_reset(adapter);
4607 	if (!resuming)
4608 		pm_runtime_put(&pdev->dev);
4609 
4610 	return err;
4611 }
4612 
igc_open(struct net_device * netdev)4613 int igc_open(struct net_device *netdev)
4614 {
4615 	return __igc_open(netdev, false);
4616 }
4617 
4618 /**
4619  * __igc_close - Disables a network interface
4620  * @netdev: network interface device structure
4621  * @suspending: boolean indicating the device is suspending
4622  *
4623  * Returns 0, this is not allowed to fail
4624  *
4625  * The close entry point is called when an interface is de-activated
4626  * by the OS.  The hardware is still under the driver's control, but
4627  * needs to be disabled.  A global MAC reset is issued to stop the
4628  * hardware, and all transmit and receive resources are freed.
4629  */
__igc_close(struct net_device * netdev,bool suspending)4630 static int __igc_close(struct net_device *netdev, bool suspending)
4631 {
4632 	struct igc_adapter *adapter = netdev_priv(netdev);
4633 	struct pci_dev *pdev = adapter->pdev;
4634 
4635 	WARN_ON(test_bit(__IGC_RESETTING, &adapter->state));
4636 
4637 	if (!suspending)
4638 		pm_runtime_get_sync(&pdev->dev);
4639 
4640 	igc_down(adapter);
4641 
4642 	igc_release_hw_control(adapter);
4643 
4644 	igc_free_irq(adapter);
4645 
4646 	igc_free_all_tx_resources(adapter);
4647 	igc_free_all_rx_resources(adapter);
4648 
4649 	if (!suspending)
4650 		pm_runtime_put_sync(&pdev->dev);
4651 
4652 	return 0;
4653 }
4654 
igc_close(struct net_device * netdev)4655 int igc_close(struct net_device *netdev)
4656 {
4657 	if (netif_device_present(netdev) || netdev->dismantle)
4658 		return __igc_close(netdev, false);
4659 	return 0;
4660 }
4661 
4662 /**
4663  * igc_ioctl - Access the hwtstamp interface
4664  * @netdev: network interface device structure
4665  * @ifr: interface request data
4666  * @cmd: ioctl command
4667  **/
igc_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)4668 static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4669 {
4670 	switch (cmd) {
4671 	case SIOCGHWTSTAMP:
4672 		return igc_ptp_get_ts_config(netdev, ifr);
4673 	case SIOCSHWTSTAMP:
4674 		return igc_ptp_set_ts_config(netdev, ifr);
4675 	default:
4676 		return -EOPNOTSUPP;
4677 	}
4678 }
4679 
igc_save_launchtime_params(struct igc_adapter * adapter,int queue,bool enable)4680 static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue,
4681 				      bool enable)
4682 {
4683 	struct igc_ring *ring;
4684 	int i;
4685 
4686 	if (queue < 0 || queue >= adapter->num_tx_queues)
4687 		return -EINVAL;
4688 
4689 	ring = adapter->tx_ring[queue];
4690 	ring->launchtime_enable = enable;
4691 
4692 	if (adapter->base_time)
4693 		return 0;
4694 
4695 	adapter->cycle_time = NSEC_PER_SEC;
4696 
4697 	for (i = 0; i < adapter->num_tx_queues; i++) {
4698 		ring = adapter->tx_ring[i];
4699 		ring->start_time = 0;
4700 		ring->end_time = NSEC_PER_SEC;
4701 	}
4702 
4703 	return 0;
4704 }
4705 
is_base_time_past(ktime_t base_time,const struct timespec64 * now)4706 static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now)
4707 {
4708 	struct timespec64 b;
4709 
4710 	b = ktime_to_timespec64(base_time);
4711 
4712 	return timespec64_compare(now, &b) > 0;
4713 }
4714 
validate_schedule(struct igc_adapter * adapter,const struct tc_taprio_qopt_offload * qopt)4715 static bool validate_schedule(struct igc_adapter *adapter,
4716 			      const struct tc_taprio_qopt_offload *qopt)
4717 {
4718 	int queue_uses[IGC_MAX_TX_QUEUES] = { };
4719 	struct timespec64 now;
4720 	size_t n;
4721 
4722 	if (qopt->cycle_time_extension)
4723 		return false;
4724 
4725 	igc_ptp_read(adapter, &now);
4726 
4727 	/* If we program the controller's BASET registers with a time
4728 	 * in the future, it will hold all the packets until that
4729 	 * time, causing a lot of TX Hangs, so to avoid that, we
4730 	 * reject schedules that would start in the future.
4731 	 */
4732 	if (!is_base_time_past(qopt->base_time, &now))
4733 		return false;
4734 
4735 	for (n = 0; n < qopt->num_entries; n++) {
4736 		const struct tc_taprio_sched_entry *e;
4737 		int i;
4738 
4739 		e = &qopt->entries[n];
4740 
4741 		/* i225 only supports "global" frame preemption
4742 		 * settings.
4743 		 */
4744 		if (e->command != TC_TAPRIO_CMD_SET_GATES)
4745 			return false;
4746 
4747 		for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
4748 			if (e->gate_mask & BIT(i))
4749 				queue_uses[i]++;
4750 
4751 			if (queue_uses[i] > 1)
4752 				return false;
4753 		}
4754 	}
4755 
4756 	return true;
4757 }
4758 
igc_tsn_enable_launchtime(struct igc_adapter * adapter,struct tc_etf_qopt_offload * qopt)4759 static int igc_tsn_enable_launchtime(struct igc_adapter *adapter,
4760 				     struct tc_etf_qopt_offload *qopt)
4761 {
4762 	struct igc_hw *hw = &adapter->hw;
4763 	int err;
4764 
4765 	if (hw->mac.type != igc_i225)
4766 		return -EOPNOTSUPP;
4767 
4768 	err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable);
4769 	if (err)
4770 		return err;
4771 
4772 	return igc_tsn_offload_apply(adapter);
4773 }
4774 
igc_save_qbv_schedule(struct igc_adapter * adapter,struct tc_taprio_qopt_offload * qopt)4775 static int igc_save_qbv_schedule(struct igc_adapter *adapter,
4776 				 struct tc_taprio_qopt_offload *qopt)
4777 {
4778 	u32 start_time = 0, end_time = 0;
4779 	size_t n;
4780 
4781 	if (!qopt->enable) {
4782 		adapter->base_time = 0;
4783 		return 0;
4784 	}
4785 
4786 	if (adapter->base_time)
4787 		return -EALREADY;
4788 
4789 	if (!validate_schedule(adapter, qopt))
4790 		return -EINVAL;
4791 
4792 	adapter->cycle_time = qopt->cycle_time;
4793 	adapter->base_time = qopt->base_time;
4794 
4795 	/* FIXME: be a little smarter about cases when the gate for a
4796 	 * queue stays open for more than one entry.
4797 	 */
4798 	for (n = 0; n < qopt->num_entries; n++) {
4799 		struct tc_taprio_sched_entry *e = &qopt->entries[n];
4800 		int i;
4801 
4802 		end_time += e->interval;
4803 
4804 		for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
4805 			struct igc_ring *ring = adapter->tx_ring[i];
4806 
4807 			if (!(e->gate_mask & BIT(i)))
4808 				continue;
4809 
4810 			ring->start_time = start_time;
4811 			ring->end_time = end_time;
4812 		}
4813 
4814 		start_time += e->interval;
4815 	}
4816 
4817 	return 0;
4818 }
4819 
igc_tsn_enable_qbv_scheduling(struct igc_adapter * adapter,struct tc_taprio_qopt_offload * qopt)4820 static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter,
4821 					 struct tc_taprio_qopt_offload *qopt)
4822 {
4823 	struct igc_hw *hw = &adapter->hw;
4824 	int err;
4825 
4826 	if (hw->mac.type != igc_i225)
4827 		return -EOPNOTSUPP;
4828 
4829 	err = igc_save_qbv_schedule(adapter, qopt);
4830 	if (err)
4831 		return err;
4832 
4833 	return igc_tsn_offload_apply(adapter);
4834 }
4835 
igc_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)4836 static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
4837 			void *type_data)
4838 {
4839 	struct igc_adapter *adapter = netdev_priv(dev);
4840 
4841 	switch (type) {
4842 	case TC_SETUP_QDISC_TAPRIO:
4843 		return igc_tsn_enable_qbv_scheduling(adapter, type_data);
4844 
4845 	case TC_SETUP_QDISC_ETF:
4846 		return igc_tsn_enable_launchtime(adapter, type_data);
4847 
4848 	default:
4849 		return -EOPNOTSUPP;
4850 	}
4851 }
4852 
4853 static const struct net_device_ops igc_netdev_ops = {
4854 	.ndo_open		= igc_open,
4855 	.ndo_stop		= igc_close,
4856 	.ndo_start_xmit		= igc_xmit_frame,
4857 	.ndo_set_rx_mode	= igc_set_rx_mode,
4858 	.ndo_set_mac_address	= igc_set_mac,
4859 	.ndo_change_mtu		= igc_change_mtu,
4860 	.ndo_get_stats64	= igc_get_stats64,
4861 	.ndo_fix_features	= igc_fix_features,
4862 	.ndo_set_features	= igc_set_features,
4863 	.ndo_features_check	= igc_features_check,
4864 	.ndo_do_ioctl		= igc_ioctl,
4865 	.ndo_setup_tc		= igc_setup_tc,
4866 };
4867 
4868 /* PCIe configuration access */
igc_read_pci_cfg(struct igc_hw * hw,u32 reg,u16 * value)4869 void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
4870 {
4871 	struct igc_adapter *adapter = hw->back;
4872 
4873 	pci_read_config_word(adapter->pdev, reg, value);
4874 }
4875 
igc_write_pci_cfg(struct igc_hw * hw,u32 reg,u16 * value)4876 void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
4877 {
4878 	struct igc_adapter *adapter = hw->back;
4879 
4880 	pci_write_config_word(adapter->pdev, reg, *value);
4881 }
4882 
igc_read_pcie_cap_reg(struct igc_hw * hw,u32 reg,u16 * value)4883 s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
4884 {
4885 	struct igc_adapter *adapter = hw->back;
4886 
4887 	if (!pci_is_pcie(adapter->pdev))
4888 		return -IGC_ERR_CONFIG;
4889 
4890 	pcie_capability_read_word(adapter->pdev, reg, value);
4891 
4892 	return IGC_SUCCESS;
4893 }
4894 
igc_write_pcie_cap_reg(struct igc_hw * hw,u32 reg,u16 * value)4895 s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
4896 {
4897 	struct igc_adapter *adapter = hw->back;
4898 
4899 	if (!pci_is_pcie(adapter->pdev))
4900 		return -IGC_ERR_CONFIG;
4901 
4902 	pcie_capability_write_word(adapter->pdev, reg, *value);
4903 
4904 	return IGC_SUCCESS;
4905 }
4906 
igc_rd32(struct igc_hw * hw,u32 reg)4907 u32 igc_rd32(struct igc_hw *hw, u32 reg)
4908 {
4909 	struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw);
4910 	u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
4911 	u32 value = 0;
4912 
4913 	value = readl(&hw_addr[reg]);
4914 
4915 	/* reads should not return all F's */
4916 	if (!(~value) && (!reg || !(~readl(hw_addr)))) {
4917 		struct net_device *netdev = igc->netdev;
4918 
4919 		hw->hw_addr = NULL;
4920 		netif_device_detach(netdev);
4921 		netdev_err(netdev, "PCIe link lost, device now detached\n");
4922 		WARN(pci_device_is_present(igc->pdev),
4923 		     "igc: Failed to read reg 0x%x!\n", reg);
4924 	}
4925 
4926 	return value;
4927 }
4928 
igc_set_spd_dplx(struct igc_adapter * adapter,u32 spd,u8 dplx)4929 int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx)
4930 {
4931 	struct igc_mac_info *mac = &adapter->hw.mac;
4932 
4933 	mac->autoneg = 0;
4934 
4935 	/* Make sure dplx is at most 1 bit and lsb of speed is not set
4936 	 * for the switch() below to work
4937 	 */
4938 	if ((spd & 1) || (dplx & ~1))
4939 		goto err_inval;
4940 
4941 	switch (spd + dplx) {
4942 	case SPEED_10 + DUPLEX_HALF:
4943 		mac->forced_speed_duplex = ADVERTISE_10_HALF;
4944 		break;
4945 	case SPEED_10 + DUPLEX_FULL:
4946 		mac->forced_speed_duplex = ADVERTISE_10_FULL;
4947 		break;
4948 	case SPEED_100 + DUPLEX_HALF:
4949 		mac->forced_speed_duplex = ADVERTISE_100_HALF;
4950 		break;
4951 	case SPEED_100 + DUPLEX_FULL:
4952 		mac->forced_speed_duplex = ADVERTISE_100_FULL;
4953 		break;
4954 	case SPEED_1000 + DUPLEX_FULL:
4955 		mac->autoneg = 1;
4956 		adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
4957 		break;
4958 	case SPEED_1000 + DUPLEX_HALF: /* not supported */
4959 		goto err_inval;
4960 	case SPEED_2500 + DUPLEX_FULL:
4961 		mac->autoneg = 1;
4962 		adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL;
4963 		break;
4964 	case SPEED_2500 + DUPLEX_HALF: /* not supported */
4965 	default:
4966 		goto err_inval;
4967 	}
4968 
4969 	/* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
4970 	adapter->hw.phy.mdix = AUTO_ALL_MODES;
4971 
4972 	return 0;
4973 
4974 err_inval:
4975 	netdev_err(adapter->netdev, "Unsupported Speed/Duplex configuration\n");
4976 	return -EINVAL;
4977 }
4978 
4979 /**
4980  * igc_probe - Device Initialization Routine
4981  * @pdev: PCI device information struct
4982  * @ent: entry in igc_pci_tbl
4983  *
4984  * Returns 0 on success, negative on failure
4985  *
4986  * igc_probe initializes an adapter identified by a pci_dev structure.
4987  * The OS initialization, configuring the adapter private structure,
4988  * and a hardware reset occur.
4989  */
igc_probe(struct pci_dev * pdev,const struct pci_device_id * ent)4990 static int igc_probe(struct pci_dev *pdev,
4991 		     const struct pci_device_id *ent)
4992 {
4993 	struct igc_adapter *adapter;
4994 	struct net_device *netdev;
4995 	struct igc_hw *hw;
4996 	const struct igc_info *ei = igc_info_tbl[ent->driver_data];
4997 	int err, pci_using_dac;
4998 
4999 	err = pci_enable_device_mem(pdev);
5000 	if (err)
5001 		return err;
5002 
5003 	pci_using_dac = 0;
5004 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
5005 	if (!err) {
5006 		pci_using_dac = 1;
5007 	} else {
5008 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
5009 		if (err) {
5010 			dev_err(&pdev->dev,
5011 				"No usable DMA configuration, aborting\n");
5012 			goto err_dma;
5013 		}
5014 	}
5015 
5016 	err = pci_request_mem_regions(pdev, igc_driver_name);
5017 	if (err)
5018 		goto err_pci_reg;
5019 
5020 	pci_enable_pcie_error_reporting(pdev);
5021 
5022 	pci_set_master(pdev);
5023 
5024 	err = -ENOMEM;
5025 	netdev = alloc_etherdev_mq(sizeof(struct igc_adapter),
5026 				   IGC_MAX_TX_QUEUES);
5027 
5028 	if (!netdev)
5029 		goto err_alloc_etherdev;
5030 
5031 	SET_NETDEV_DEV(netdev, &pdev->dev);
5032 
5033 	pci_set_drvdata(pdev, netdev);
5034 	adapter = netdev_priv(netdev);
5035 	adapter->netdev = netdev;
5036 	adapter->pdev = pdev;
5037 	hw = &adapter->hw;
5038 	hw->back = adapter;
5039 	adapter->port_num = hw->bus.func;
5040 	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
5041 
5042 	err = pci_save_state(pdev);
5043 	if (err)
5044 		goto err_ioremap;
5045 
5046 	err = -EIO;
5047 	adapter->io_addr = ioremap(pci_resource_start(pdev, 0),
5048 				   pci_resource_len(pdev, 0));
5049 	if (!adapter->io_addr)
5050 		goto err_ioremap;
5051 
5052 	/* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */
5053 	hw->hw_addr = adapter->io_addr;
5054 
5055 	netdev->netdev_ops = &igc_netdev_ops;
5056 	igc_ethtool_set_ops(netdev);
5057 	netdev->watchdog_timeo = 5 * HZ;
5058 
5059 	netdev->mem_start = pci_resource_start(pdev, 0);
5060 	netdev->mem_end = pci_resource_end(pdev, 0);
5061 
5062 	/* PCI config space info */
5063 	hw->vendor_id = pdev->vendor;
5064 	hw->device_id = pdev->device;
5065 	hw->revision_id = pdev->revision;
5066 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
5067 	hw->subsystem_device_id = pdev->subsystem_device;
5068 
5069 	/* Copy the default MAC and PHY function pointers */
5070 	memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
5071 	memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
5072 
5073 	/* Initialize skew-specific constants */
5074 	err = ei->get_invariants(hw);
5075 	if (err)
5076 		goto err_sw_init;
5077 
5078 	/* Add supported features to the features list*/
5079 	netdev->features |= NETIF_F_SG;
5080 	netdev->features |= NETIF_F_TSO;
5081 	netdev->features |= NETIF_F_TSO6;
5082 	netdev->features |= NETIF_F_TSO_ECN;
5083 	netdev->features |= NETIF_F_RXCSUM;
5084 	netdev->features |= NETIF_F_HW_CSUM;
5085 	netdev->features |= NETIF_F_SCTP_CRC;
5086 	netdev->features |= NETIF_F_HW_TC;
5087 
5088 #define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
5089 				  NETIF_F_GSO_GRE_CSUM | \
5090 				  NETIF_F_GSO_IPXIP4 | \
5091 				  NETIF_F_GSO_IPXIP6 | \
5092 				  NETIF_F_GSO_UDP_TUNNEL | \
5093 				  NETIF_F_GSO_UDP_TUNNEL_CSUM)
5094 
5095 	netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES;
5096 	netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES;
5097 
5098 	/* setup the private structure */
5099 	err = igc_sw_init(adapter);
5100 	if (err)
5101 		goto err_sw_init;
5102 
5103 	/* copy netdev features into list of user selectable features */
5104 	netdev->hw_features |= NETIF_F_NTUPLE;
5105 	netdev->hw_features |= netdev->features;
5106 
5107 	if (pci_using_dac)
5108 		netdev->features |= NETIF_F_HIGHDMA;
5109 
5110 	/* MTU range: 68 - 9216 */
5111 	netdev->min_mtu = ETH_MIN_MTU;
5112 	netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
5113 
5114 	/* before reading the NVM, reset the controller to put the device in a
5115 	 * known good starting state
5116 	 */
5117 	hw->mac.ops.reset_hw(hw);
5118 
5119 	if (igc_get_flash_presence_i225(hw)) {
5120 		if (hw->nvm.ops.validate(hw) < 0) {
5121 			dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
5122 			err = -EIO;
5123 			goto err_eeprom;
5124 		}
5125 	}
5126 
5127 	if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
5128 		/* copy the MAC address out of the NVM */
5129 		if (hw->mac.ops.read_mac_addr(hw))
5130 			dev_err(&pdev->dev, "NVM Read Error\n");
5131 	}
5132 
5133 	memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
5134 
5135 	if (!is_valid_ether_addr(netdev->dev_addr)) {
5136 		dev_err(&pdev->dev, "Invalid MAC Address\n");
5137 		err = -EIO;
5138 		goto err_eeprom;
5139 	}
5140 
5141 	/* configure RXPBSIZE and TXPBSIZE */
5142 	wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT);
5143 	wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
5144 
5145 	timer_setup(&adapter->watchdog_timer, igc_watchdog, 0);
5146 	timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0);
5147 
5148 	INIT_WORK(&adapter->reset_task, igc_reset_task);
5149 	INIT_WORK(&adapter->watchdog_task, igc_watchdog_task);
5150 
5151 	/* Initialize link properties that are user-changeable */
5152 	adapter->fc_autoneg = true;
5153 	hw->mac.autoneg = true;
5154 	hw->phy.autoneg_advertised = 0xaf;
5155 
5156 	hw->fc.requested_mode = igc_fc_default;
5157 	hw->fc.current_mode = igc_fc_default;
5158 
5159 	/* By default, support wake on port A */
5160 	adapter->flags |= IGC_FLAG_WOL_SUPPORTED;
5161 
5162 	/* initialize the wol settings based on the eeprom settings */
5163 	if (adapter->flags & IGC_FLAG_WOL_SUPPORTED)
5164 		adapter->wol |= IGC_WUFC_MAG;
5165 
5166 	device_set_wakeup_enable(&adapter->pdev->dev,
5167 				 adapter->flags & IGC_FLAG_WOL_SUPPORTED);
5168 
5169 	igc_ptp_init(adapter);
5170 
5171 	/* reset the hardware with the new settings */
5172 	igc_reset(adapter);
5173 
5174 	/* let the f/w know that the h/w is now under the control of the
5175 	 * driver.
5176 	 */
5177 	igc_get_hw_control(adapter);
5178 
5179 	strncpy(netdev->name, "eth%d", IFNAMSIZ);
5180 	err = register_netdev(netdev);
5181 	if (err)
5182 		goto err_register;
5183 
5184 	 /* carrier off reporting is important to ethtool even BEFORE open */
5185 	netif_carrier_off(netdev);
5186 
5187 	/* Check if Media Autosense is enabled */
5188 	adapter->ei = *ei;
5189 
5190 	/* print pcie link status and MAC address */
5191 	pcie_print_link_status(pdev);
5192 	netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
5193 
5194 	dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
5195 	/* Disable EEE for internal PHY devices */
5196 	hw->dev_spec._base.eee_enable = false;
5197 	adapter->flags &= ~IGC_FLAG_EEE;
5198 	igc_set_eee_i225(hw, false, false, false);
5199 
5200 	pm_runtime_put_noidle(&pdev->dev);
5201 
5202 	return 0;
5203 
5204 err_register:
5205 	igc_release_hw_control(adapter);
5206 err_eeprom:
5207 	if (!igc_check_reset_block(hw))
5208 		igc_reset_phy(hw);
5209 err_sw_init:
5210 	igc_clear_interrupt_scheme(adapter);
5211 	iounmap(adapter->io_addr);
5212 err_ioremap:
5213 	free_netdev(netdev);
5214 err_alloc_etherdev:
5215 	pci_release_mem_regions(pdev);
5216 err_pci_reg:
5217 err_dma:
5218 	pci_disable_device(pdev);
5219 	return err;
5220 }
5221 
5222 /**
5223  * igc_remove - Device Removal Routine
5224  * @pdev: PCI device information struct
5225  *
5226  * igc_remove is called by the PCI subsystem to alert the driver
5227  * that it should release a PCI device.  This could be caused by a
5228  * Hot-Plug event, or because the driver is going to be removed from
5229  * memory.
5230  */
igc_remove(struct pci_dev * pdev)5231 static void igc_remove(struct pci_dev *pdev)
5232 {
5233 	struct net_device *netdev = pci_get_drvdata(pdev);
5234 	struct igc_adapter *adapter = netdev_priv(netdev);
5235 
5236 	pm_runtime_get_noresume(&pdev->dev);
5237 
5238 	igc_flush_nfc_rules(adapter);
5239 
5240 	igc_ptp_stop(adapter);
5241 
5242 	set_bit(__IGC_DOWN, &adapter->state);
5243 
5244 	del_timer_sync(&adapter->watchdog_timer);
5245 	del_timer_sync(&adapter->phy_info_timer);
5246 
5247 	cancel_work_sync(&adapter->reset_task);
5248 	cancel_work_sync(&adapter->watchdog_task);
5249 
5250 	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
5251 	 * would have already happened in close and is redundant.
5252 	 */
5253 	igc_release_hw_control(adapter);
5254 	unregister_netdev(netdev);
5255 
5256 	igc_clear_interrupt_scheme(adapter);
5257 	pci_iounmap(pdev, adapter->io_addr);
5258 	pci_release_mem_regions(pdev);
5259 
5260 	free_netdev(netdev);
5261 
5262 	pci_disable_pcie_error_reporting(pdev);
5263 
5264 	pci_disable_device(pdev);
5265 }
5266 
__igc_shutdown(struct pci_dev * pdev,bool * enable_wake,bool runtime)5267 static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake,
5268 			  bool runtime)
5269 {
5270 	struct net_device *netdev = pci_get_drvdata(pdev);
5271 	struct igc_adapter *adapter = netdev_priv(netdev);
5272 	u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol;
5273 	struct igc_hw *hw = &adapter->hw;
5274 	u32 ctrl, rctl, status;
5275 	bool wake;
5276 
5277 	rtnl_lock();
5278 	netif_device_detach(netdev);
5279 
5280 	if (netif_running(netdev))
5281 		__igc_close(netdev, true);
5282 
5283 	igc_ptp_suspend(adapter);
5284 
5285 	igc_clear_interrupt_scheme(adapter);
5286 	rtnl_unlock();
5287 
5288 	status = rd32(IGC_STATUS);
5289 	if (status & IGC_STATUS_LU)
5290 		wufc &= ~IGC_WUFC_LNKC;
5291 
5292 	if (wufc) {
5293 		igc_setup_rctl(adapter);
5294 		igc_set_rx_mode(netdev);
5295 
5296 		/* turn on all-multi mode if wake on multicast is enabled */
5297 		if (wufc & IGC_WUFC_MC) {
5298 			rctl = rd32(IGC_RCTL);
5299 			rctl |= IGC_RCTL_MPE;
5300 			wr32(IGC_RCTL, rctl);
5301 		}
5302 
5303 		ctrl = rd32(IGC_CTRL);
5304 		ctrl |= IGC_CTRL_ADVD3WUC;
5305 		wr32(IGC_CTRL, ctrl);
5306 
5307 		/* Allow time for pending master requests to run */
5308 		igc_disable_pcie_master(hw);
5309 
5310 		wr32(IGC_WUC, IGC_WUC_PME_EN);
5311 		wr32(IGC_WUFC, wufc);
5312 	} else {
5313 		wr32(IGC_WUC, 0);
5314 		wr32(IGC_WUFC, 0);
5315 	}
5316 
5317 	wake = wufc || adapter->en_mng_pt;
5318 	if (!wake)
5319 		igc_power_down_phy_copper_base(&adapter->hw);
5320 	else
5321 		igc_power_up_link(adapter);
5322 
5323 	if (enable_wake)
5324 		*enable_wake = wake;
5325 
5326 	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
5327 	 * would have already happened in close and is redundant.
5328 	 */
5329 	igc_release_hw_control(adapter);
5330 
5331 	pci_disable_device(pdev);
5332 
5333 	return 0;
5334 }
5335 
5336 #ifdef CONFIG_PM
igc_runtime_suspend(struct device * dev)5337 static int __maybe_unused igc_runtime_suspend(struct device *dev)
5338 {
5339 	return __igc_shutdown(to_pci_dev(dev), NULL, 1);
5340 }
5341 
igc_deliver_wake_packet(struct net_device * netdev)5342 static void igc_deliver_wake_packet(struct net_device *netdev)
5343 {
5344 	struct igc_adapter *adapter = netdev_priv(netdev);
5345 	struct igc_hw *hw = &adapter->hw;
5346 	struct sk_buff *skb;
5347 	u32 wupl;
5348 
5349 	wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK;
5350 
5351 	/* WUPM stores only the first 128 bytes of the wake packet.
5352 	 * Read the packet only if we have the whole thing.
5353 	 */
5354 	if (wupl == 0 || wupl > IGC_WUPM_BYTES)
5355 		return;
5356 
5357 	skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES);
5358 	if (!skb)
5359 		return;
5360 
5361 	skb_put(skb, wupl);
5362 
5363 	/* Ensure reads are 32-bit aligned */
5364 	wupl = roundup(wupl, 4);
5365 
5366 	memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl);
5367 
5368 	skb->protocol = eth_type_trans(skb, netdev);
5369 	netif_rx(skb);
5370 }
5371 
igc_resume(struct device * dev)5372 static int __maybe_unused igc_resume(struct device *dev)
5373 {
5374 	struct pci_dev *pdev = to_pci_dev(dev);
5375 	struct net_device *netdev = pci_get_drvdata(pdev);
5376 	struct igc_adapter *adapter = netdev_priv(netdev);
5377 	struct igc_hw *hw = &adapter->hw;
5378 	u32 err, val;
5379 
5380 	pci_set_power_state(pdev, PCI_D0);
5381 	pci_restore_state(pdev);
5382 	pci_save_state(pdev);
5383 
5384 	if (!pci_device_is_present(pdev))
5385 		return -ENODEV;
5386 	err = pci_enable_device_mem(pdev);
5387 	if (err) {
5388 		netdev_err(netdev, "Cannot enable PCI device from suspend\n");
5389 		return err;
5390 	}
5391 	pci_set_master(pdev);
5392 
5393 	pci_enable_wake(pdev, PCI_D3hot, 0);
5394 	pci_enable_wake(pdev, PCI_D3cold, 0);
5395 
5396 	if (igc_init_interrupt_scheme(adapter, true)) {
5397 		netdev_err(netdev, "Unable to allocate memory for queues\n");
5398 		return -ENOMEM;
5399 	}
5400 
5401 	igc_reset(adapter);
5402 
5403 	/* let the f/w know that the h/w is now under the control of the
5404 	 * driver.
5405 	 */
5406 	igc_get_hw_control(adapter);
5407 
5408 	val = rd32(IGC_WUS);
5409 	if (val & WAKE_PKT_WUS)
5410 		igc_deliver_wake_packet(netdev);
5411 
5412 	wr32(IGC_WUS, ~0);
5413 
5414 	rtnl_lock();
5415 	if (!err && netif_running(netdev))
5416 		err = __igc_open(netdev, true);
5417 
5418 	if (!err)
5419 		netif_device_attach(netdev);
5420 	rtnl_unlock();
5421 
5422 	return err;
5423 }
5424 
igc_runtime_resume(struct device * dev)5425 static int __maybe_unused igc_runtime_resume(struct device *dev)
5426 {
5427 	return igc_resume(dev);
5428 }
5429 
igc_suspend(struct device * dev)5430 static int __maybe_unused igc_suspend(struct device *dev)
5431 {
5432 	return __igc_shutdown(to_pci_dev(dev), NULL, 0);
5433 }
5434 
igc_runtime_idle(struct device * dev)5435 static int __maybe_unused igc_runtime_idle(struct device *dev)
5436 {
5437 	struct net_device *netdev = dev_get_drvdata(dev);
5438 	struct igc_adapter *adapter = netdev_priv(netdev);
5439 
5440 	if (!igc_has_link(adapter))
5441 		pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
5442 
5443 	return -EBUSY;
5444 }
5445 #endif /* CONFIG_PM */
5446 
igc_shutdown(struct pci_dev * pdev)5447 static void igc_shutdown(struct pci_dev *pdev)
5448 {
5449 	bool wake;
5450 
5451 	__igc_shutdown(pdev, &wake, 0);
5452 
5453 	if (system_state == SYSTEM_POWER_OFF) {
5454 		pci_wake_from_d3(pdev, wake);
5455 		pci_set_power_state(pdev, PCI_D3hot);
5456 	}
5457 }
5458 
5459 /**
5460  *  igc_io_error_detected - called when PCI error is detected
5461  *  @pdev: Pointer to PCI device
5462  *  @state: The current PCI connection state
5463  *
5464  *  This function is called after a PCI bus error affecting
5465  *  this device has been detected.
5466  **/
igc_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)5467 static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev,
5468 					      pci_channel_state_t state)
5469 {
5470 	struct net_device *netdev = pci_get_drvdata(pdev);
5471 	struct igc_adapter *adapter = netdev_priv(netdev);
5472 
5473 	netif_device_detach(netdev);
5474 
5475 	if (state == pci_channel_io_perm_failure)
5476 		return PCI_ERS_RESULT_DISCONNECT;
5477 
5478 	if (netif_running(netdev))
5479 		igc_down(adapter);
5480 	pci_disable_device(pdev);
5481 
5482 	/* Request a slot reset. */
5483 	return PCI_ERS_RESULT_NEED_RESET;
5484 }
5485 
5486 /**
5487  *  igc_io_slot_reset - called after the PCI bus has been reset.
5488  *  @pdev: Pointer to PCI device
5489  *
5490  *  Restart the card from scratch, as if from a cold-boot. Implementation
5491  *  resembles the first-half of the igc_resume routine.
5492  **/
igc_io_slot_reset(struct pci_dev * pdev)5493 static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
5494 {
5495 	struct net_device *netdev = pci_get_drvdata(pdev);
5496 	struct igc_adapter *adapter = netdev_priv(netdev);
5497 	struct igc_hw *hw = &adapter->hw;
5498 	pci_ers_result_t result;
5499 
5500 	if (pci_enable_device_mem(pdev)) {
5501 		netdev_err(netdev, "Could not re-enable PCI device after reset\n");
5502 		result = PCI_ERS_RESULT_DISCONNECT;
5503 	} else {
5504 		pci_set_master(pdev);
5505 		pci_restore_state(pdev);
5506 		pci_save_state(pdev);
5507 
5508 		pci_enable_wake(pdev, PCI_D3hot, 0);
5509 		pci_enable_wake(pdev, PCI_D3cold, 0);
5510 
5511 		/* In case of PCI error, adapter loses its HW address
5512 		 * so we should re-assign it here.
5513 		 */
5514 		hw->hw_addr = adapter->io_addr;
5515 
5516 		igc_reset(adapter);
5517 		wr32(IGC_WUS, ~0);
5518 		result = PCI_ERS_RESULT_RECOVERED;
5519 	}
5520 
5521 	return result;
5522 }
5523 
5524 /**
5525  *  igc_io_resume - called when traffic can start to flow again.
5526  *  @pdev: Pointer to PCI device
5527  *
5528  *  This callback is called when the error recovery driver tells us that
5529  *  its OK to resume normal operation. Implementation resembles the
5530  *  second-half of the igc_resume routine.
5531  */
igc_io_resume(struct pci_dev * pdev)5532 static void igc_io_resume(struct pci_dev *pdev)
5533 {
5534 	struct net_device *netdev = pci_get_drvdata(pdev);
5535 	struct igc_adapter *adapter = netdev_priv(netdev);
5536 
5537 	rtnl_lock();
5538 	if (netif_running(netdev)) {
5539 		if (igc_open(netdev)) {
5540 			netdev_err(netdev, "igc_open failed after reset\n");
5541 			return;
5542 		}
5543 	}
5544 
5545 	netif_device_attach(netdev);
5546 
5547 	/* let the f/w know that the h/w is now under the control of the
5548 	 * driver.
5549 	 */
5550 	igc_get_hw_control(adapter);
5551 	rtnl_unlock();
5552 }
5553 
5554 static const struct pci_error_handlers igc_err_handler = {
5555 	.error_detected = igc_io_error_detected,
5556 	.slot_reset = igc_io_slot_reset,
5557 	.resume = igc_io_resume,
5558 };
5559 
5560 #ifdef CONFIG_PM
5561 static const struct dev_pm_ops igc_pm_ops = {
5562 	SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume)
5563 	SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume,
5564 			   igc_runtime_idle)
5565 };
5566 #endif
5567 
5568 static struct pci_driver igc_driver = {
5569 	.name     = igc_driver_name,
5570 	.id_table = igc_pci_tbl,
5571 	.probe    = igc_probe,
5572 	.remove   = igc_remove,
5573 #ifdef CONFIG_PM
5574 	.driver.pm = &igc_pm_ops,
5575 #endif
5576 	.shutdown = igc_shutdown,
5577 	.err_handler = &igc_err_handler,
5578 };
5579 
5580 /**
5581  * igc_reinit_queues - return error
5582  * @adapter: pointer to adapter structure
5583  */
igc_reinit_queues(struct igc_adapter * adapter)5584 int igc_reinit_queues(struct igc_adapter *adapter)
5585 {
5586 	struct net_device *netdev = adapter->netdev;
5587 	int err = 0;
5588 
5589 	if (netif_running(netdev))
5590 		igc_close(netdev);
5591 
5592 	igc_reset_interrupt_capability(adapter);
5593 
5594 	if (igc_init_interrupt_scheme(adapter, true)) {
5595 		netdev_err(netdev, "Unable to allocate memory for queues\n");
5596 		return -ENOMEM;
5597 	}
5598 
5599 	if (netif_running(netdev))
5600 		err = igc_open(netdev);
5601 
5602 	return err;
5603 }
5604 
5605 /**
5606  * igc_get_hw_dev - return device
5607  * @hw: pointer to hardware structure
5608  *
5609  * used by hardware layer to print debugging information
5610  */
igc_get_hw_dev(struct igc_hw * hw)5611 struct net_device *igc_get_hw_dev(struct igc_hw *hw)
5612 {
5613 	struct igc_adapter *adapter = hw->back;
5614 
5615 	return adapter->netdev;
5616 }
5617 
5618 /**
5619  * igc_init_module - Driver Registration Routine
5620  *
5621  * igc_init_module is the first routine called when the driver is
5622  * loaded. All it does is register with the PCI subsystem.
5623  */
igc_init_module(void)5624 static int __init igc_init_module(void)
5625 {
5626 	int ret;
5627 
5628 	pr_info("%s\n", igc_driver_string);
5629 	pr_info("%s\n", igc_copyright);
5630 
5631 	ret = pci_register_driver(&igc_driver);
5632 	return ret;
5633 }
5634 
5635 module_init(igc_init_module);
5636 
5637 /**
5638  * igc_exit_module - Driver Exit Cleanup Routine
5639  *
5640  * igc_exit_module is called just before the driver is removed
5641  * from memory.
5642  */
igc_exit_module(void)5643 static void __exit igc_exit_module(void)
5644 {
5645 	pci_unregister_driver(&igc_driver);
5646 }
5647 
5648 module_exit(igc_exit_module);
5649 /* igc_main.c */
5650