Lines Matching +full:disable +full:- +full:eop
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
36 static int debug = -1;
112 * __ew32_prepare - prepare to write to MAC CSR register on certain parts
127 while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i) in __ew32_prepare()
133 if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in __ew32()
136 writel(val, hw->hw_addr + reg); in __ew32()
140 * e1000_regdump - register printout routine
150 switch (reginfo->ofs) { in e1000_regdump()
164 pr_info("%-15s %08x\n", in e1000_regdump()
165 reginfo->name, __er32(hw, reginfo->ofs)); in e1000_regdump()
169 snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]"); in e1000_regdump()
170 pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]); in e1000_regdump()
179 for (i = 0; i < adapter->rx_ps_pages; i++) { in e1000e_dump_ps_pages()
180 ps_page = &bi->ps_pages[i]; in e1000e_dump_ps_pages()
182 if (ps_page->page) { in e1000e_dump_ps_pages()
185 16, 1, page_address(ps_page->page), in e1000e_dump_ps_pages()
192 * e1000e_dump - Print registers, Tx-ring and Rx-ring
197 struct net_device *netdev = adapter->netdev; in e1000e_dump()
198 struct e1000_hw *hw = &adapter->hw; in e1000e_dump()
200 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000e_dump()
207 struct e1000_ring *rx_ring = adapter->rx_ring; in e1000e_dump()
224 dev_info(&adapter->pdev->dev, "Net device Info\n"); in e1000e_dump()
226 pr_info("%-15s %016lX %016lX\n", netdev->name, in e1000e_dump()
227 netdev->state, dev_trans_start(netdev)); in e1000e_dump()
231 dev_info(&adapter->pdev->dev, "Register Dump\n"); in e1000e_dump()
234 reginfo->name; reginfo++) { in e1000e_dump()
242 dev_info(&adapter->pdev->dev, "Tx Ring Summary\n"); in e1000e_dump()
243 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); in e1000e_dump()
244 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; in e1000e_dump()
246 0, tx_ring->next_to_use, tx_ring->next_to_clean, in e1000e_dump()
247 (unsigned long long)buffer_info->dma, in e1000e_dump()
248 buffer_info->length, in e1000e_dump()
249 buffer_info->next_to_watch, in e1000e_dump()
250 (unsigned long long)buffer_info->time_stamp); in e1000e_dump()
256 dev_info(&adapter->pdev->dev, "Tx Ring Dump\n"); in e1000e_dump()
258 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) in e1000e_dump()
261 * +--------------------------------------------------------------+ in e1000e_dump()
263 * +--------------------------------------------------------------+ in e1000e_dump()
265 * +--------------------------------------------------------------+ in e1000e_dump()
270 * +----------------------------------------------------------------+ in e1000e_dump()
272 * +----------------------------------------------------------------+ in e1000e_dump()
274 * +----------------------------------------------------------------+ in e1000e_dump()
278 * +----------------------------------------------------------------+ in e1000e_dump()
280 * +----------------------------------------------------------------+ in e1000e_dump()
282 * +----------------------------------------------------------------+ in e1000e_dump()
285 …Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb… in e1000e_dump()
286 …Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb… in e1000e_dump()
287 …Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb… in e1000e_dump()
288 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { in e1000e_dump()
291 buffer_info = &tx_ring->buffer_info[i]; in e1000e_dump()
293 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) in e1000e_dump()
295 else if (i == tx_ring->next_to_use) in e1000e_dump()
297 else if (i == tx_ring->next_to_clean) in e1000e_dump()
302 (!(le64_to_cpu(u0->b) & BIT(29)) ? 'l' : in e1000e_dump()
303 ((le64_to_cpu(u0->b) & BIT(20)) ? 'd' : 'c')), in e1000e_dump()
305 (unsigned long long)le64_to_cpu(u0->a), in e1000e_dump()
306 (unsigned long long)le64_to_cpu(u0->b), in e1000e_dump()
307 (unsigned long long)buffer_info->dma, in e1000e_dump()
308 buffer_info->length, buffer_info->next_to_watch, in e1000e_dump()
309 (unsigned long long)buffer_info->time_stamp, in e1000e_dump()
310 buffer_info->skb, next_desc); in e1000e_dump()
312 if (netif_msg_pktdata(adapter) && buffer_info->skb) in e1000e_dump()
314 16, 1, buffer_info->skb->data, in e1000e_dump()
315 buffer_info->skb->len, true); in e1000e_dump()
320 dev_info(&adapter->pdev->dev, "Rx Ring Summary\n"); in e1000e_dump()
323 0, rx_ring->next_to_use, rx_ring->next_to_clean); in e1000e_dump()
329 dev_info(&adapter->pdev->dev, "Rx Ring Dump\n"); in e1000e_dump()
330 switch (adapter->rx_ps_pages) { in e1000e_dump()
336 * +-----------------------------------------------------+ in e1000e_dump()
338 * +-----------------------------------------------------+ in e1000e_dump()
340 * +-----------------------------------------------------+ in e1000e_dump()
342 * +-----------------------------------------------------+ in e1000e_dump()
344 * +-----------------------------------------------------+ in e1000e_dump()
346 …0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt… in e1000e_dump()
347 /* [Extended] Receive Descriptor (Write-Back) Format in e1000e_dump()
350 * +------------------------------------------------------+ in e1000e_dump()
353 * +------------------------------------------------------+ in e1000e_dump()
355 * +------------------------------------------------------+ in e1000e_dump()
358 …h] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ---------------- [bi->skb] <-- Ext Rx Write-… in e1000e_dump()
359 for (i = 0; i < rx_ring->count; i++) { in e1000e_dump()
361 buffer_info = &rx_ring->buffer_info[i]; in e1000e_dump()
365 le32_to_cpu(rx_desc_ps->wb.middle.status_error); in e1000e_dump()
367 if (i == rx_ring->next_to_use) in e1000e_dump()
369 else if (i == rx_ring->next_to_clean) in e1000e_dump()
376 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n", in e1000e_dump()
378 (unsigned long long)le64_to_cpu(u1->a), in e1000e_dump()
379 (unsigned long long)le64_to_cpu(u1->b), in e1000e_dump()
380 (unsigned long long)le64_to_cpu(u1->c), in e1000e_dump()
381 (unsigned long long)le64_to_cpu(u1->d), in e1000e_dump()
382 buffer_info->skb, next_desc); in e1000e_dump()
386 (unsigned long long)le64_to_cpu(u1->a), in e1000e_dump()
387 (unsigned long long)le64_to_cpu(u1->b), in e1000e_dump()
388 (unsigned long long)le64_to_cpu(u1->c), in e1000e_dump()
389 (unsigned long long)le64_to_cpu(u1->d), in e1000e_dump()
390 (unsigned long long)buffer_info->dma, in e1000e_dump()
391 buffer_info->skb, next_desc); in e1000e_dump()
403 * +-----------------------------------------------------+ in e1000e_dump()
405 * +-----------------------------------------------------+ in e1000e_dump()
407 * +-----------------------------------------------------+ in e1000e_dump()
409 …pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read… in e1000e_dump()
410 /* Extended Receive Descriptor (Write-Back) Format in e1000e_dump()
413 * +------------------------------------------------------+ in e1000e_dump()
415 * 0 +-------------------+ Rsvd | Reserved | MRQ RSS | in e1000e_dump()
418 * +------------------------------------------------------+ in e1000e_dump()
420 * +------------------------------------------------------+ in e1000e_dump()
423 …pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n"… in e1000e_dump()
425 for (i = 0; i < rx_ring->count; i++) { in e1000e_dump()
428 buffer_info = &rx_ring->buffer_info[i]; in e1000e_dump()
431 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); in e1000e_dump()
433 if (i == rx_ring->next_to_use) in e1000e_dump()
435 else if (i == rx_ring->next_to_clean) in e1000e_dump()
442 pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n", in e1000e_dump()
444 (unsigned long long)le64_to_cpu(u1->a), in e1000e_dump()
445 (unsigned long long)le64_to_cpu(u1->b), in e1000e_dump()
446 buffer_info->skb, next_desc); in e1000e_dump()
450 (unsigned long long)le64_to_cpu(u1->a), in e1000e_dump()
451 (unsigned long long)le64_to_cpu(u1->b), in e1000e_dump()
452 (unsigned long long)buffer_info->dma, in e1000e_dump()
453 buffer_info->skb, next_desc); in e1000e_dump()
456 buffer_info->skb) in e1000e_dump()
460 buffer_info->skb->data, in e1000e_dump()
461 adapter->rx_buffer_len, in e1000e_dump()
469 * e1000_desc_unused - calculate if we have unused descriptors
474 if (ring->next_to_clean > ring->next_to_use) in e1000_desc_unused()
475 return ring->next_to_clean - ring->next_to_use - 1; in e1000_desc_unused()
477 return ring->count + ring->next_to_clean - ring->next_to_use - 1; in e1000_desc_unused()
481 * e1000e_systim_to_hwtstamp - convert system time value to hw time stamp
501 spin_lock_irqsave(&adapter->systim_lock, flags); in e1000e_systim_to_hwtstamp()
502 ns = timecounter_cyc2time(&adapter->tc, systim); in e1000e_systim_to_hwtstamp()
503 spin_unlock_irqrestore(&adapter->systim_lock, flags); in e1000e_systim_to_hwtstamp()
506 hwtstamps->hwtstamp = ns_to_ktime(ns); in e1000e_systim_to_hwtstamp()
510 * e1000e_rx_hwtstamp - utility function which checks for Rx time stamp
522 struct e1000_hw *hw = &adapter->hw; in e1000e_rx_hwtstamp()
525 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP) || in e1000e_rx_hwtstamp()
541 adapter->flags2 &= ~FLAG2_CHECK_RX_HWTSTAMP; in e1000e_rx_hwtstamp()
545 * e1000_receive_skb - helper function to handle Rx indications
560 skb->protocol = eth_type_trans(skb, netdev); in e1000_receive_skb()
565 napi_gro_receive(&adapter->napi, skb); in e1000_receive_skb()
569 * e1000_rx_checksum - Receive Checksum Offload
583 if (!(adapter->netdev->features & NETIF_F_RXCSUM)) in e1000_rx_checksum()
593 adapter->hw_csum_err++; in e1000_rx_checksum()
602 skb->ip_summed = CHECKSUM_UNNECESSARY; in e1000_rx_checksum()
603 adapter->hw_csum_good++; in e1000_rx_checksum()
608 struct e1000_adapter *adapter = rx_ring->adapter; in e1000e_update_rdt_wa()
609 struct e1000_hw *hw = &adapter->hw; in e1000e_update_rdt_wa()
612 writel(i, rx_ring->tail); in e1000e_update_rdt_wa()
614 if (unlikely(i != readl(rx_ring->tail))) { in e1000e_update_rdt_wa()
618 e_err("ME firmware caused invalid RDT - resetting\n"); in e1000e_update_rdt_wa()
619 schedule_work(&adapter->reset_task); in e1000e_update_rdt_wa()
625 struct e1000_adapter *adapter = tx_ring->adapter; in e1000e_update_tdt_wa()
626 struct e1000_hw *hw = &adapter->hw; in e1000e_update_tdt_wa()
629 writel(i, tx_ring->tail); in e1000e_update_tdt_wa()
631 if (unlikely(i != readl(tx_ring->tail))) { in e1000e_update_tdt_wa()
635 e_err("ME firmware caused invalid TDT - resetting\n"); in e1000e_update_tdt_wa()
636 schedule_work(&adapter->reset_task); in e1000e_update_tdt_wa()
641 * e1000_alloc_rx_buffers - Replace used receive buffers
649 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_alloc_rx_buffers()
650 struct net_device *netdev = adapter->netdev; in e1000_alloc_rx_buffers()
651 struct pci_dev *pdev = adapter->pdev; in e1000_alloc_rx_buffers()
656 unsigned int bufsz = adapter->rx_buffer_len; in e1000_alloc_rx_buffers()
658 i = rx_ring->next_to_use; in e1000_alloc_rx_buffers()
659 buffer_info = &rx_ring->buffer_info[i]; in e1000_alloc_rx_buffers()
661 while (cleaned_count--) { in e1000_alloc_rx_buffers()
662 skb = buffer_info->skb; in e1000_alloc_rx_buffers()
671 adapter->alloc_rx_buff_failed++; in e1000_alloc_rx_buffers()
675 buffer_info->skb = skb; in e1000_alloc_rx_buffers()
677 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, in e1000_alloc_rx_buffers()
678 adapter->rx_buffer_len, in e1000_alloc_rx_buffers()
680 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { in e1000_alloc_rx_buffers()
681 dev_err(&pdev->dev, "Rx DMA map failed\n"); in e1000_alloc_rx_buffers()
682 adapter->rx_dma_failed++; in e1000_alloc_rx_buffers()
687 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); in e1000_alloc_rx_buffers()
689 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { in e1000_alloc_rx_buffers()
692 * applicable for weak-ordered memory model archs, in e1000_alloc_rx_buffers()
693 * such as IA-64). in e1000_alloc_rx_buffers()
696 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in e1000_alloc_rx_buffers()
699 writel(i, rx_ring->tail); in e1000_alloc_rx_buffers()
702 if (i == rx_ring->count) in e1000_alloc_rx_buffers()
704 buffer_info = &rx_ring->buffer_info[i]; in e1000_alloc_rx_buffers()
707 rx_ring->next_to_use = i; in e1000_alloc_rx_buffers()
711 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
719 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_alloc_rx_buffers_ps()
720 struct net_device *netdev = adapter->netdev; in e1000_alloc_rx_buffers_ps()
721 struct pci_dev *pdev = adapter->pdev; in e1000_alloc_rx_buffers_ps()
728 i = rx_ring->next_to_use; in e1000_alloc_rx_buffers_ps()
729 buffer_info = &rx_ring->buffer_info[i]; in e1000_alloc_rx_buffers_ps()
731 while (cleaned_count--) { in e1000_alloc_rx_buffers_ps()
735 ps_page = &buffer_info->ps_pages[j]; in e1000_alloc_rx_buffers_ps()
736 if (j >= adapter->rx_ps_pages) { in e1000_alloc_rx_buffers_ps()
738 rx_desc->read.buffer_addr[j + 1] = in e1000_alloc_rx_buffers_ps()
742 if (!ps_page->page) { in e1000_alloc_rx_buffers_ps()
743 ps_page->page = alloc_page(gfp); in e1000_alloc_rx_buffers_ps()
744 if (!ps_page->page) { in e1000_alloc_rx_buffers_ps()
745 adapter->alloc_rx_buff_failed++; in e1000_alloc_rx_buffers_ps()
748 ps_page->dma = dma_map_page(&pdev->dev, in e1000_alloc_rx_buffers_ps()
749 ps_page->page, in e1000_alloc_rx_buffers_ps()
752 if (dma_mapping_error(&pdev->dev, in e1000_alloc_rx_buffers_ps()
753 ps_page->dma)) { in e1000_alloc_rx_buffers_ps()
754 dev_err(&adapter->pdev->dev, in e1000_alloc_rx_buffers_ps()
756 adapter->rx_dma_failed++; in e1000_alloc_rx_buffers_ps()
761 * didn't change because each write-back in e1000_alloc_rx_buffers_ps()
764 rx_desc->read.buffer_addr[j + 1] = in e1000_alloc_rx_buffers_ps()
765 cpu_to_le64(ps_page->dma); in e1000_alloc_rx_buffers_ps()
768 skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0, in e1000_alloc_rx_buffers_ps()
772 adapter->alloc_rx_buff_failed++; in e1000_alloc_rx_buffers_ps()
776 buffer_info->skb = skb; in e1000_alloc_rx_buffers_ps()
777 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, in e1000_alloc_rx_buffers_ps()
778 adapter->rx_ps_bsize0, in e1000_alloc_rx_buffers_ps()
780 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { in e1000_alloc_rx_buffers_ps()
781 dev_err(&pdev->dev, "Rx DMA map failed\n"); in e1000_alloc_rx_buffers_ps()
782 adapter->rx_dma_failed++; in e1000_alloc_rx_buffers_ps()
785 buffer_info->skb = NULL; in e1000_alloc_rx_buffers_ps()
789 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); in e1000_alloc_rx_buffers_ps()
791 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { in e1000_alloc_rx_buffers_ps()
794 * applicable for weak-ordered memory model archs, in e1000_alloc_rx_buffers_ps()
795 * such as IA-64). in e1000_alloc_rx_buffers_ps()
798 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in e1000_alloc_rx_buffers_ps()
801 writel(i << 1, rx_ring->tail); in e1000_alloc_rx_buffers_ps()
805 if (i == rx_ring->count) in e1000_alloc_rx_buffers_ps()
807 buffer_info = &rx_ring->buffer_info[i]; in e1000_alloc_rx_buffers_ps()
811 rx_ring->next_to_use = i; in e1000_alloc_rx_buffers_ps()
815 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
824 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_alloc_jumbo_rx_buffers()
825 struct net_device *netdev = adapter->netdev; in e1000_alloc_jumbo_rx_buffers()
826 struct pci_dev *pdev = adapter->pdev; in e1000_alloc_jumbo_rx_buffers()
831 unsigned int bufsz = 256 - 16; /* for skb_reserve */ in e1000_alloc_jumbo_rx_buffers()
833 i = rx_ring->next_to_use; in e1000_alloc_jumbo_rx_buffers()
834 buffer_info = &rx_ring->buffer_info[i]; in e1000_alloc_jumbo_rx_buffers()
836 while (cleaned_count--) { in e1000_alloc_jumbo_rx_buffers()
837 skb = buffer_info->skb; in e1000_alloc_jumbo_rx_buffers()
846 adapter->alloc_rx_buff_failed++; in e1000_alloc_jumbo_rx_buffers()
850 buffer_info->skb = skb; in e1000_alloc_jumbo_rx_buffers()
853 if (!buffer_info->page) { in e1000_alloc_jumbo_rx_buffers()
854 buffer_info->page = alloc_page(gfp); in e1000_alloc_jumbo_rx_buffers()
855 if (unlikely(!buffer_info->page)) { in e1000_alloc_jumbo_rx_buffers()
856 adapter->alloc_rx_buff_failed++; in e1000_alloc_jumbo_rx_buffers()
861 if (!buffer_info->dma) { in e1000_alloc_jumbo_rx_buffers()
862 buffer_info->dma = dma_map_page(&pdev->dev, in e1000_alloc_jumbo_rx_buffers()
863 buffer_info->page, 0, in e1000_alloc_jumbo_rx_buffers()
866 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { in e1000_alloc_jumbo_rx_buffers()
867 adapter->alloc_rx_buff_failed++; in e1000_alloc_jumbo_rx_buffers()
873 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); in e1000_alloc_jumbo_rx_buffers()
875 if (unlikely(++i == rx_ring->count)) in e1000_alloc_jumbo_rx_buffers()
877 buffer_info = &rx_ring->buffer_info[i]; in e1000_alloc_jumbo_rx_buffers()
880 if (likely(rx_ring->next_to_use != i)) { in e1000_alloc_jumbo_rx_buffers()
881 rx_ring->next_to_use = i; in e1000_alloc_jumbo_rx_buffers()
882 if (unlikely(i-- == 0)) in e1000_alloc_jumbo_rx_buffers()
883 i = (rx_ring->count - 1); in e1000_alloc_jumbo_rx_buffers()
887 * applicable for weak-ordered memory model archs, in e1000_alloc_jumbo_rx_buffers()
888 * such as IA-64). in e1000_alloc_jumbo_rx_buffers()
891 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in e1000_alloc_jumbo_rx_buffers()
894 writel(i, rx_ring->tail); in e1000_alloc_jumbo_rx_buffers()
901 if (netdev->features & NETIF_F_RXHASH) in e1000_rx_hash()
906 * e1000_clean_rx_irq - Send received data up the network stack
917 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_clean_rx_irq()
918 struct net_device *netdev = adapter->netdev; in e1000_clean_rx_irq()
919 struct pci_dev *pdev = adapter->pdev; in e1000_clean_rx_irq()
920 struct e1000_hw *hw = &adapter->hw; in e1000_clean_rx_irq()
929 i = rx_ring->next_to_clean; in e1000_clean_rx_irq()
931 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); in e1000_clean_rx_irq()
932 buffer_info = &rx_ring->buffer_info[i]; in e1000_clean_rx_irq()
942 skb = buffer_info->skb; in e1000_clean_rx_irq()
943 buffer_info->skb = NULL; in e1000_clean_rx_irq()
945 prefetch(skb->data - NET_IP_ALIGN); in e1000_clean_rx_irq()
948 if (i == rx_ring->count) in e1000_clean_rx_irq()
953 next_buffer = &rx_ring->buffer_info[i]; in e1000_clean_rx_irq()
957 dma_unmap_single(&pdev->dev, buffer_info->dma, in e1000_clean_rx_irq()
958 adapter->rx_buffer_len, DMA_FROM_DEVICE); in e1000_clean_rx_irq()
959 buffer_info->dma = 0; in e1000_clean_rx_irq()
961 length = le16_to_cpu(rx_desc->wb.upper.length); in e1000_clean_rx_irq()
963 /* !EOP means multiple descriptors were used to store a single in e1000_clean_rx_irq()
965 * need to toss every packet with the EOP bit clear and the in e1000_clean_rx_irq()
966 * next frame that _does_ have the EOP bit set, as it is by in e1000_clean_rx_irq()
970 adapter->flags2 |= FLAG2_IS_DISCARDING; in e1000_clean_rx_irq()
972 if (adapter->flags2 & FLAG2_IS_DISCARDING) { in e1000_clean_rx_irq()
976 buffer_info->skb = skb; in e1000_clean_rx_irq()
978 adapter->flags2 &= ~FLAG2_IS_DISCARDING; in e1000_clean_rx_irq()
983 !(netdev->features & NETIF_F_RXALL))) { in e1000_clean_rx_irq()
985 buffer_info->skb = skb; in e1000_clean_rx_irq()
990 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { in e1000_clean_rx_irq()
995 if (netdev->features & NETIF_F_RXFCS) in e1000_clean_rx_irq()
996 total_rx_bytes -= 4; in e1000_clean_rx_irq()
998 length -= 4; in e1000_clean_rx_irq()
1010 napi_alloc_skb(&adapter->napi, length); in e1000_clean_rx_irq()
1013 -NET_IP_ALIGN, in e1000_clean_rx_irq()
1014 (skb->data - in e1000_clean_rx_irq()
1019 buffer_info->skb = skb; in e1000_clean_rx_irq()
1030 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); in e1000_clean_rx_irq()
1033 rx_desc->wb.upper.vlan); in e1000_clean_rx_irq()
1036 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); in e1000_clean_rx_irq()
1040 adapter->alloc_rx_buf(rx_ring, cleaned_count, in e1000_clean_rx_irq()
1049 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); in e1000_clean_rx_irq()
1051 rx_ring->next_to_clean = i; in e1000_clean_rx_irq()
1055 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); in e1000_clean_rx_irq()
1057 adapter->total_rx_bytes += total_rx_bytes; in e1000_clean_rx_irq()
1058 adapter->total_rx_packets += total_rx_packets; in e1000_clean_rx_irq()
1066 struct e1000_adapter *adapter = tx_ring->adapter; in e1000_put_txbuf()
1068 if (buffer_info->dma) { in e1000_put_txbuf()
1069 if (buffer_info->mapped_as_page) in e1000_put_txbuf()
1070 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, in e1000_put_txbuf()
1071 buffer_info->length, DMA_TO_DEVICE); in e1000_put_txbuf()
1073 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, in e1000_put_txbuf()
1074 buffer_info->length, DMA_TO_DEVICE); in e1000_put_txbuf()
1075 buffer_info->dma = 0; in e1000_put_txbuf()
1077 if (buffer_info->skb) { in e1000_put_txbuf()
1079 dev_kfree_skb_any(buffer_info->skb); in e1000_put_txbuf()
1081 dev_consume_skb_any(buffer_info->skb); in e1000_put_txbuf()
1082 buffer_info->skb = NULL; in e1000_put_txbuf()
1084 buffer_info->time_stamp = 0; in e1000_put_txbuf()
1092 struct net_device *netdev = adapter->netdev; in e1000_print_hw_hang()
1093 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_print_hw_hang()
1094 unsigned int i = tx_ring->next_to_clean; in e1000_print_hw_hang()
1095 unsigned int eop = tx_ring->buffer_info[i].next_to_watch; in e1000_print_hw_hang() local
1096 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); in e1000_print_hw_hang()
1097 struct e1000_hw *hw = &adapter->hw; in e1000_print_hw_hang()
1101 if (test_bit(__E1000_DOWN, &adapter->state)) in e1000_print_hw_hang()
1104 if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) { in e1000_print_hw_hang()
1105 /* May be block on write-back, flush and detect again in e1000_print_hw_hang()
1108 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); in e1000_print_hw_hang()
1114 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); in e1000_print_hw_hang()
1117 adapter->tx_hang_recheck = true; in e1000_print_hw_hang()
1120 adapter->tx_hang_recheck = false; in e1000_print_hw_hang()
1134 pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status); in e1000_print_hw_hang()
1149 "PHY 1000BASE-T Status <%x>\n" in e1000_print_hw_hang()
1152 readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use, in e1000_print_hw_hang()
1153 tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp, in e1000_print_hw_hang()
1154 eop, jiffies, eop_desc->upper.fields.status, er32(STATUS), in e1000_print_hw_hang()
1160 if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE)) in e1000_print_hw_hang()
1165 * e1000e_tx_hwtstamp_work - check for Tx time stamp
1176 struct e1000_hw *hw = &adapter->hw; in e1000e_tx_hwtstamp_work()
1179 struct sk_buff *skb = adapter->tx_hwtstamp_skb; in e1000e_tx_hwtstamp_work()
1191 adapter->tx_hwtstamp_skb = NULL; in e1000e_tx_hwtstamp_work()
1196 } else if (time_after(jiffies, adapter->tx_hwtstamp_start in e1000e_tx_hwtstamp_work()
1197 + adapter->tx_timeout_factor * HZ)) { in e1000e_tx_hwtstamp_work()
1198 dev_kfree_skb_any(adapter->tx_hwtstamp_skb); in e1000e_tx_hwtstamp_work()
1199 adapter->tx_hwtstamp_skb = NULL; in e1000e_tx_hwtstamp_work()
1200 adapter->tx_hwtstamp_timeouts++; in e1000e_tx_hwtstamp_work()
1204 schedule_work(&adapter->tx_hwtstamp_work); in e1000e_tx_hwtstamp_work()
1209 * e1000_clean_tx_irq - Reclaim resources after transmit completes
1217 struct e1000_adapter *adapter = tx_ring->adapter; in e1000_clean_tx_irq()
1218 struct net_device *netdev = adapter->netdev; in e1000_clean_tx_irq()
1219 struct e1000_hw *hw = &adapter->hw; in e1000_clean_tx_irq()
1222 unsigned int i, eop; in e1000_clean_tx_irq() local
1227 i = tx_ring->next_to_clean; in e1000_clean_tx_irq()
1228 eop = tx_ring->buffer_info[i].next_to_watch; in e1000_clean_tx_irq()
1229 eop_desc = E1000_TX_DESC(*tx_ring, eop); in e1000_clean_tx_irq()
1231 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && in e1000_clean_tx_irq()
1232 (count < tx_ring->count)) { in e1000_clean_tx_irq()
1238 buffer_info = &tx_ring->buffer_info[i]; in e1000_clean_tx_irq()
1239 cleaned = (i == eop); in e1000_clean_tx_irq()
1242 total_tx_packets += buffer_info->segs; in e1000_clean_tx_irq()
1243 total_tx_bytes += buffer_info->bytecount; in e1000_clean_tx_irq()
1244 if (buffer_info->skb) { in e1000_clean_tx_irq()
1245 bytes_compl += buffer_info->skb->len; in e1000_clean_tx_irq()
1251 tx_desc->upper.data = 0; in e1000_clean_tx_irq()
1254 if (i == tx_ring->count) in e1000_clean_tx_irq()
1258 if (i == tx_ring->next_to_use) in e1000_clean_tx_irq()
1260 eop = tx_ring->buffer_info[i].next_to_watch; in e1000_clean_tx_irq()
1261 eop_desc = E1000_TX_DESC(*tx_ring, eop); in e1000_clean_tx_irq()
1264 tx_ring->next_to_clean = i; in e1000_clean_tx_irq()
1277 !(test_bit(__E1000_DOWN, &adapter->state))) { in e1000_clean_tx_irq()
1279 ++adapter->restart_queue; in e1000_clean_tx_irq()
1283 if (adapter->detect_tx_hung) { in e1000_clean_tx_irq()
1287 adapter->detect_tx_hung = false; in e1000_clean_tx_irq()
1288 if (tx_ring->buffer_info[i].time_stamp && in e1000_clean_tx_irq()
1289 time_after(jiffies, tx_ring->buffer_info[i].time_stamp in e1000_clean_tx_irq()
1290 + (adapter->tx_timeout_factor * HZ)) && in e1000_clean_tx_irq()
1292 schedule_work(&adapter->print_hang_task); in e1000_clean_tx_irq()
1294 adapter->tx_hang_recheck = false; in e1000_clean_tx_irq()
1296 adapter->total_tx_bytes += total_tx_bytes; in e1000_clean_tx_irq()
1297 adapter->total_tx_packets += total_tx_packets; in e1000_clean_tx_irq()
1298 return count < tx_ring->count; in e1000_clean_tx_irq()
1302 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
1313 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_clean_rx_irq_ps()
1314 struct e1000_hw *hw = &adapter->hw; in e1000_clean_rx_irq_ps()
1316 struct net_device *netdev = adapter->netdev; in e1000_clean_rx_irq_ps()
1317 struct pci_dev *pdev = adapter->pdev; in e1000_clean_rx_irq_ps()
1327 i = rx_ring->next_to_clean; in e1000_clean_rx_irq_ps()
1329 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); in e1000_clean_rx_irq_ps()
1330 buffer_info = &rx_ring->buffer_info[i]; in e1000_clean_rx_irq_ps()
1336 skb = buffer_info->skb; in e1000_clean_rx_irq_ps()
1340 prefetch(skb->data - NET_IP_ALIGN); in e1000_clean_rx_irq_ps()
1343 if (i == rx_ring->count) in e1000_clean_rx_irq_ps()
1348 next_buffer = &rx_ring->buffer_info[i]; in e1000_clean_rx_irq_ps()
1352 dma_unmap_single(&pdev->dev, buffer_info->dma, in e1000_clean_rx_irq_ps()
1353 adapter->rx_ps_bsize0, DMA_FROM_DEVICE); in e1000_clean_rx_irq_ps()
1354 buffer_info->dma = 0; in e1000_clean_rx_irq_ps()
1356 /* see !EOP comment in other Rx routine */ in e1000_clean_rx_irq_ps()
1358 adapter->flags2 |= FLAG2_IS_DISCARDING; in e1000_clean_rx_irq_ps()
1360 if (adapter->flags2 & FLAG2_IS_DISCARDING) { in e1000_clean_rx_irq_ps()
1364 adapter->flags2 &= ~FLAG2_IS_DISCARDING; in e1000_clean_rx_irq_ps()
1369 !(netdev->features & NETIF_F_RXALL))) { in e1000_clean_rx_irq_ps()
1374 length = le16_to_cpu(rx_desc->wb.middle.length0); in e1000_clean_rx_irq_ps()
1389 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); in e1000_clean_rx_irq_ps()
1396 ((length + l1) <= adapter->rx_ps_bsize0)) { in e1000_clean_rx_irq_ps()
1397 ps_page = &buffer_info->ps_pages[0]; in e1000_clean_rx_irq_ps()
1399 dma_sync_single_for_cpu(&pdev->dev, in e1000_clean_rx_irq_ps()
1400 ps_page->dma, in e1000_clean_rx_irq_ps()
1404 page_address(ps_page->page), l1); in e1000_clean_rx_irq_ps()
1405 dma_sync_single_for_device(&pdev->dev, in e1000_clean_rx_irq_ps()
1406 ps_page->dma, in e1000_clean_rx_irq_ps()
1411 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { in e1000_clean_rx_irq_ps()
1412 if (!(netdev->features & NETIF_F_RXFCS)) in e1000_clean_rx_irq_ps()
1413 l1 -= 4; in e1000_clean_rx_irq_ps()
1422 length = le16_to_cpu(rx_desc->wb.upper.length[j]); in e1000_clean_rx_irq_ps()
1426 ps_page = &buffer_info->ps_pages[j]; in e1000_clean_rx_irq_ps()
1427 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, in e1000_clean_rx_irq_ps()
1429 ps_page->dma = 0; in e1000_clean_rx_irq_ps()
1430 skb_fill_page_desc(skb, j, ps_page->page, 0, length); in e1000_clean_rx_irq_ps()
1431 ps_page->page = NULL; in e1000_clean_rx_irq_ps()
1432 skb->len += length; in e1000_clean_rx_irq_ps()
1433 skb->data_len += length; in e1000_clean_rx_irq_ps()
1434 skb->truesize += PAGE_SIZE; in e1000_clean_rx_irq_ps()
1440 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { in e1000_clean_rx_irq_ps()
1441 if (!(netdev->features & NETIF_F_RXFCS)) in e1000_clean_rx_irq_ps()
1442 pskb_trim(skb, skb->len - 4); in e1000_clean_rx_irq_ps()
1446 total_rx_bytes += skb->len; in e1000_clean_rx_irq_ps()
1451 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); in e1000_clean_rx_irq_ps()
1453 if (rx_desc->wb.upper.header_status & in e1000_clean_rx_irq_ps()
1455 adapter->rx_hdr_split++; in e1000_clean_rx_irq_ps()
1458 rx_desc->wb.middle.vlan); in e1000_clean_rx_irq_ps()
1461 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); in e1000_clean_rx_irq_ps()
1462 buffer_info->skb = NULL; in e1000_clean_rx_irq_ps()
1466 adapter->alloc_rx_buf(rx_ring, cleaned_count, in e1000_clean_rx_irq_ps()
1475 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); in e1000_clean_rx_irq_ps()
1477 rx_ring->next_to_clean = i; in e1000_clean_rx_irq_ps()
1481 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); in e1000_clean_rx_irq_ps()
1483 adapter->total_rx_bytes += total_rx_bytes; in e1000_clean_rx_irq_ps()
1484 adapter->total_rx_packets += total_rx_packets; in e1000_clean_rx_irq_ps()
1491 bi->page = NULL; in e1000_consume_page()
1492 skb->len += length; in e1000_consume_page()
1493 skb->data_len += length; in e1000_consume_page()
1494 skb->truesize += PAGE_SIZE; in e1000_consume_page()
1498 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
1509 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_clean_jumbo_rx_irq()
1510 struct net_device *netdev = adapter->netdev; in e1000_clean_jumbo_rx_irq()
1511 struct pci_dev *pdev = adapter->pdev; in e1000_clean_jumbo_rx_irq()
1521 i = rx_ring->next_to_clean; in e1000_clean_jumbo_rx_irq()
1523 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); in e1000_clean_jumbo_rx_irq()
1524 buffer_info = &rx_ring->buffer_info[i]; in e1000_clean_jumbo_rx_irq()
1534 skb = buffer_info->skb; in e1000_clean_jumbo_rx_irq()
1535 buffer_info->skb = NULL; in e1000_clean_jumbo_rx_irq()
1538 if (i == rx_ring->count) in e1000_clean_jumbo_rx_irq()
1543 next_buffer = &rx_ring->buffer_info[i]; in e1000_clean_jumbo_rx_irq()
1547 dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE, in e1000_clean_jumbo_rx_irq()
1549 buffer_info->dma = 0; in e1000_clean_jumbo_rx_irq()
1551 length = le16_to_cpu(rx_desc->wb.upper.length); in e1000_clean_jumbo_rx_irq()
1553 /* errors is only valid for DD + EOP descriptors */ in e1000_clean_jumbo_rx_irq()
1556 !(netdev->features & NETIF_F_RXALL)))) { in e1000_clean_jumbo_rx_irq()
1558 buffer_info->skb = skb; in e1000_clean_jumbo_rx_irq()
1560 if (rx_ring->rx_skb_top) in e1000_clean_jumbo_rx_irq()
1561 dev_kfree_skb_irq(rx_ring->rx_skb_top); in e1000_clean_jumbo_rx_irq()
1562 rx_ring->rx_skb_top = NULL; in e1000_clean_jumbo_rx_irq()
1565 #define rxtop (rx_ring->rx_skb_top) in e1000_clean_jumbo_rx_irq()
1571 skb_fill_page_desc(rxtop, 0, buffer_info->page, in e1000_clean_jumbo_rx_irq()
1576 skb_fill_page_desc(rxtop, shinfo->nr_frags, in e1000_clean_jumbo_rx_irq()
1577 buffer_info->page, 0, in e1000_clean_jumbo_rx_irq()
1579 /* re-use the skb, only consumed the page */ in e1000_clean_jumbo_rx_irq()
1580 buffer_info->skb = skb; in e1000_clean_jumbo_rx_irq()
1588 skb_fill_page_desc(rxtop, shinfo->nr_frags, in e1000_clean_jumbo_rx_irq()
1589 buffer_info->page, 0, in e1000_clean_jumbo_rx_irq()
1591 /* re-use the current skb, we only consumed the in e1000_clean_jumbo_rx_irq()
1594 buffer_info->skb = skb; in e1000_clean_jumbo_rx_irq()
1599 /* no chain, got EOP, this buf is the packet in e1000_clean_jumbo_rx_irq()
1605 page_address(buffer_info->page), in e1000_clean_jumbo_rx_irq()
1607 /* re-use the page, so don't erase in e1000_clean_jumbo_rx_irq()
1608 * buffer_info->page in e1000_clean_jumbo_rx_irq()
1613 buffer_info->page, 0, in e1000_clean_jumbo_rx_irq()
1624 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); in e1000_clean_jumbo_rx_irq()
1627 total_rx_bytes += skb->len; in e1000_clean_jumbo_rx_irq()
1630 /* eth type trans needs skb->data to point to something */ in e1000_clean_jumbo_rx_irq()
1638 rx_desc->wb.upper.vlan); in e1000_clean_jumbo_rx_irq()
1641 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); in e1000_clean_jumbo_rx_irq()
1645 adapter->alloc_rx_buf(rx_ring, cleaned_count, in e1000_clean_jumbo_rx_irq()
1654 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); in e1000_clean_jumbo_rx_irq()
1656 rx_ring->next_to_clean = i; in e1000_clean_jumbo_rx_irq()
1660 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); in e1000_clean_jumbo_rx_irq()
1662 adapter->total_rx_bytes += total_rx_bytes; in e1000_clean_jumbo_rx_irq()
1663 adapter->total_rx_packets += total_rx_packets; in e1000_clean_jumbo_rx_irq()
1668 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1673 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_clean_rx_ring()
1676 struct pci_dev *pdev = adapter->pdev; in e1000_clean_rx_ring()
1680 for (i = 0; i < rx_ring->count; i++) { in e1000_clean_rx_ring()
1681 buffer_info = &rx_ring->buffer_info[i]; in e1000_clean_rx_ring()
1682 if (buffer_info->dma) { in e1000_clean_rx_ring()
1683 if (adapter->clean_rx == e1000_clean_rx_irq) in e1000_clean_rx_ring()
1684 dma_unmap_single(&pdev->dev, buffer_info->dma, in e1000_clean_rx_ring()
1685 adapter->rx_buffer_len, in e1000_clean_rx_ring()
1687 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) in e1000_clean_rx_ring()
1688 dma_unmap_page(&pdev->dev, buffer_info->dma, in e1000_clean_rx_ring()
1690 else if (adapter->clean_rx == e1000_clean_rx_irq_ps) in e1000_clean_rx_ring()
1691 dma_unmap_single(&pdev->dev, buffer_info->dma, in e1000_clean_rx_ring()
1692 adapter->rx_ps_bsize0, in e1000_clean_rx_ring()
1694 buffer_info->dma = 0; in e1000_clean_rx_ring()
1697 if (buffer_info->page) { in e1000_clean_rx_ring()
1698 put_page(buffer_info->page); in e1000_clean_rx_ring()
1699 buffer_info->page = NULL; in e1000_clean_rx_ring()
1702 if (buffer_info->skb) { in e1000_clean_rx_ring()
1703 dev_kfree_skb(buffer_info->skb); in e1000_clean_rx_ring()
1704 buffer_info->skb = NULL; in e1000_clean_rx_ring()
1708 ps_page = &buffer_info->ps_pages[j]; in e1000_clean_rx_ring()
1709 if (!ps_page->page) in e1000_clean_rx_ring()
1711 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, in e1000_clean_rx_ring()
1713 ps_page->dma = 0; in e1000_clean_rx_ring()
1714 put_page(ps_page->page); in e1000_clean_rx_ring()
1715 ps_page->page = NULL; in e1000_clean_rx_ring()
1720 if (rx_ring->rx_skb_top) { in e1000_clean_rx_ring()
1721 dev_kfree_skb(rx_ring->rx_skb_top); in e1000_clean_rx_ring()
1722 rx_ring->rx_skb_top = NULL; in e1000_clean_rx_ring()
1726 memset(rx_ring->desc, 0, rx_ring->size); in e1000_clean_rx_ring()
1728 rx_ring->next_to_clean = 0; in e1000_clean_rx_ring()
1729 rx_ring->next_to_use = 0; in e1000_clean_rx_ring()
1730 adapter->flags2 &= ~FLAG2_IS_DISCARDING; in e1000_clean_rx_ring()
1739 if (test_bit(__E1000_DOWN, &adapter->state)) in e1000e_downshift_workaround()
1742 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); in e1000e_downshift_workaround()
1746 * e1000_intr_msi - Interrupt Handler
1754 struct e1000_hw *hw = &adapter->hw; in e1000_intr_msi()
1759 hw->mac.get_link_status = true; in e1000_intr_msi()
1760 /* ICH8 workaround-- Call gig speed drop workaround on cable in e1000_intr_msi()
1763 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && in e1000_intr_msi()
1765 schedule_work(&adapter->downshift_task); in e1000_intr_msi()
1767 /* 80003ES2LAN workaround-- For packet buffer work-around on in e1000_intr_msi()
1768 * link down event; disable receives here in the ISR and reset in e1000_intr_msi()
1772 adapter->flags & FLAG_RX_NEEDS_RESTART) { in e1000_intr_msi()
1773 /* disable receives */ in e1000_intr_msi()
1777 adapter->flags |= FLAG_RESTART_NOW; in e1000_intr_msi()
1780 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_intr_msi()
1781 mod_timer(&adapter->watchdog_timer, jiffies + 1); in e1000_intr_msi()
1785 if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) { in e1000_intr_msi()
1788 adapter->corr_errors += in e1000_intr_msi()
1790 adapter->uncorr_errors += in e1000_intr_msi()
1795 schedule_work(&adapter->reset_task); in e1000_intr_msi()
1801 if (napi_schedule_prep(&adapter->napi)) { in e1000_intr_msi()
1802 adapter->total_tx_bytes = 0; in e1000_intr_msi()
1803 adapter->total_tx_packets = 0; in e1000_intr_msi()
1804 adapter->total_rx_bytes = 0; in e1000_intr_msi()
1805 adapter->total_rx_packets = 0; in e1000_intr_msi()
1806 __napi_schedule(&adapter->napi); in e1000_intr_msi()
1813 * e1000_intr - Interrupt Handler
1821 struct e1000_hw *hw = &adapter->hw; in e1000_intr()
1824 if (!icr || test_bit(__E1000_DOWN, &adapter->state)) in e1000_intr()
1827 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is in e1000_intr()
1833 /* Interrupt Auto-Mask...upon reading ICR, in e1000_intr()
1839 hw->mac.get_link_status = true; in e1000_intr()
1840 /* ICH8 workaround-- Call gig speed drop workaround on cable in e1000_intr()
1843 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && in e1000_intr()
1845 schedule_work(&adapter->downshift_task); in e1000_intr()
1847 /* 80003ES2LAN workaround-- in e1000_intr()
1848 * For packet buffer work-around on link down event; in e1000_intr()
1849 * disable receives here in the ISR and in e1000_intr()
1853 (adapter->flags & FLAG_RX_NEEDS_RESTART)) { in e1000_intr()
1854 /* disable receives */ in e1000_intr()
1857 adapter->flags |= FLAG_RESTART_NOW; in e1000_intr()
1860 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_intr()
1861 mod_timer(&adapter->watchdog_timer, jiffies + 1); in e1000_intr()
1865 if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) { in e1000_intr()
1868 adapter->corr_errors += in e1000_intr()
1870 adapter->uncorr_errors += in e1000_intr()
1875 schedule_work(&adapter->reset_task); in e1000_intr()
1881 if (napi_schedule_prep(&adapter->napi)) { in e1000_intr()
1882 adapter->total_tx_bytes = 0; in e1000_intr()
1883 adapter->total_tx_packets = 0; in e1000_intr()
1884 adapter->total_rx_bytes = 0; in e1000_intr()
1885 adapter->total_rx_packets = 0; in e1000_intr()
1886 __napi_schedule(&adapter->napi); in e1000_intr()
1896 struct e1000_hw *hw = &adapter->hw; in e1000_msix_other()
1899 if (icr & adapter->eiac_mask) in e1000_msix_other()
1900 ew32(ICS, (icr & adapter->eiac_mask)); in e1000_msix_other()
1903 hw->mac.get_link_status = true; in e1000_msix_other()
1905 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_msix_other()
1906 mod_timer(&adapter->watchdog_timer, jiffies + 1); in e1000_msix_other()
1909 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_msix_other()
1919 struct e1000_hw *hw = &adapter->hw; in e1000_intr_msix_tx()
1920 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_intr_msix_tx()
1922 adapter->total_tx_bytes = 0; in e1000_intr_msix_tx()
1923 adapter->total_tx_packets = 0; in e1000_intr_msix_tx()
1927 ew32(ICS, tx_ring->ims_val); in e1000_intr_msix_tx()
1929 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_intr_msix_tx()
1930 ew32(IMS, adapter->tx_ring->ims_val); in e1000_intr_msix_tx()
1939 struct e1000_ring *rx_ring = adapter->rx_ring; in e1000_intr_msix_rx()
1944 if (rx_ring->set_itr) { in e1000_intr_msix_rx()
1945 u32 itr = rx_ring->itr_val ? in e1000_intr_msix_rx()
1946 1000000000 / (rx_ring->itr_val * 256) : 0; in e1000_intr_msix_rx()
1948 writel(itr, rx_ring->itr_register); in e1000_intr_msix_rx()
1949 rx_ring->set_itr = 0; in e1000_intr_msix_rx()
1952 if (napi_schedule_prep(&adapter->napi)) { in e1000_intr_msix_rx()
1953 adapter->total_rx_bytes = 0; in e1000_intr_msix_rx()
1954 adapter->total_rx_packets = 0; in e1000_intr_msix_rx()
1955 __napi_schedule(&adapter->napi); in e1000_intr_msix_rx()
1961 * e1000_configure_msix - Configure MSI-X hardware
1965 * generate MSI-X interrupts.
1969 struct e1000_hw *hw = &adapter->hw; in e1000_configure_msix()
1970 struct e1000_ring *rx_ring = adapter->rx_ring; in e1000_configure_msix()
1971 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_configure_msix()
1975 adapter->eiac_mask = 0; in e1000_configure_msix()
1977 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */ in e1000_configure_msix()
1978 if (hw->mac.type == e1000_82574) { in e1000_configure_msix()
1986 rx_ring->ims_val = E1000_IMS_RXQ0; in e1000_configure_msix()
1987 adapter->eiac_mask |= rx_ring->ims_val; in e1000_configure_msix()
1988 if (rx_ring->itr_val) in e1000_configure_msix()
1989 writel(1000000000 / (rx_ring->itr_val * 256), in e1000_configure_msix()
1990 rx_ring->itr_register); in e1000_configure_msix()
1992 writel(1, rx_ring->itr_register); in e1000_configure_msix()
1996 tx_ring->ims_val = E1000_IMS_TXQ0; in e1000_configure_msix()
1998 if (tx_ring->itr_val) in e1000_configure_msix()
1999 writel(1000000000 / (tx_ring->itr_val * 256), in e1000_configure_msix()
2000 tx_ring->itr_register); in e1000_configure_msix()
2002 writel(1, tx_ring->itr_register); in e1000_configure_msix()
2003 adapter->eiac_mask |= tx_ring->ims_val; in e1000_configure_msix()
2009 if (rx_ring->itr_val) in e1000_configure_msix()
2010 writel(1000000000 / (rx_ring->itr_val * 256), in e1000_configure_msix()
2011 hw->hw_addr + E1000_EITR_82574(vector)); in e1000_configure_msix()
2013 writel(1, hw->hw_addr + E1000_EITR_82574(vector)); in e1000_configure_msix()
2020 /* enable MSI-X PBA support */ in e1000_configure_msix()
2029 if (adapter->msix_entries) { in e1000e_reset_interrupt_capability()
2030 pci_disable_msix(adapter->pdev); in e1000e_reset_interrupt_capability()
2031 kfree(adapter->msix_entries); in e1000e_reset_interrupt_capability()
2032 adapter->msix_entries = NULL; in e1000e_reset_interrupt_capability()
2033 } else if (adapter->flags & FLAG_MSI_ENABLED) { in e1000e_reset_interrupt_capability()
2034 pci_disable_msi(adapter->pdev); in e1000e_reset_interrupt_capability()
2035 adapter->flags &= ~FLAG_MSI_ENABLED; in e1000e_reset_interrupt_capability()
2040 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
2051 switch (adapter->int_mode) { in e1000e_set_interrupt_capability()
2053 if (adapter->flags & FLAG_HAS_MSIX) { in e1000e_set_interrupt_capability()
2054 adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */ in e1000e_set_interrupt_capability()
2055 adapter->msix_entries = kcalloc(adapter->num_vectors, in e1000e_set_interrupt_capability()
2059 if (adapter->msix_entries) { in e1000e_set_interrupt_capability()
2062 for (i = 0; i < adapter->num_vectors; i++) in e1000e_set_interrupt_capability()
2063 adapter->msix_entries[i].entry = i; in e1000e_set_interrupt_capability()
2065 err = pci_enable_msix_range(a->pdev, in e1000e_set_interrupt_capability()
2066 a->msix_entries, in e1000e_set_interrupt_capability()
2067 a->num_vectors, in e1000e_set_interrupt_capability()
2068 a->num_vectors); in e1000e_set_interrupt_capability()
2072 /* MSI-X failed, so fall through and try MSI */ in e1000e_set_interrupt_capability()
2073 e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n"); in e1000e_set_interrupt_capability()
2076 adapter->int_mode = E1000E_INT_MODE_MSI; in e1000e_set_interrupt_capability()
2079 if (!pci_enable_msi(adapter->pdev)) { in e1000e_set_interrupt_capability()
2080 adapter->flags |= FLAG_MSI_ENABLED; in e1000e_set_interrupt_capability()
2082 adapter->int_mode = E1000E_INT_MODE_LEGACY; in e1000e_set_interrupt_capability()
2092 adapter->num_vectors = 1; in e1000e_set_interrupt_capability()
2096 * e1000_request_msix - Initialize MSI-X interrupts
2099 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
2104 struct net_device *netdev = adapter->netdev; in e1000_request_msix()
2107 if (strlen(netdev->name) < (IFNAMSIZ - 5)) in e1000_request_msix()
2108 snprintf(adapter->rx_ring->name, in e1000_request_msix()
2109 sizeof(adapter->rx_ring->name) - 1, in e1000_request_msix()
2110 "%.14s-rx-0", netdev->name); in e1000_request_msix()
2112 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); in e1000_request_msix()
2113 err = request_irq(adapter->msix_entries[vector].vector, in e1000_request_msix()
2114 e1000_intr_msix_rx, 0, adapter->rx_ring->name, in e1000_request_msix()
2118 adapter->rx_ring->itr_register = adapter->hw.hw_addr + in e1000_request_msix()
2120 adapter->rx_ring->itr_val = adapter->itr; in e1000_request_msix()
2123 if (strlen(netdev->name) < (IFNAMSIZ - 5)) in e1000_request_msix()
2124 snprintf(adapter->tx_ring->name, in e1000_request_msix()
2125 sizeof(adapter->tx_ring->name) - 1, in e1000_request_msix()
2126 "%.14s-tx-0", netdev->name); in e1000_request_msix()
2128 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); in e1000_request_msix()
2129 err = request_irq(adapter->msix_entries[vector].vector, in e1000_request_msix()
2130 e1000_intr_msix_tx, 0, adapter->tx_ring->name, in e1000_request_msix()
2134 adapter->tx_ring->itr_register = adapter->hw.hw_addr + in e1000_request_msix()
2136 adapter->tx_ring->itr_val = adapter->itr; in e1000_request_msix()
2139 err = request_irq(adapter->msix_entries[vector].vector, in e1000_request_msix()
2140 e1000_msix_other, 0, netdev->name, netdev); in e1000_request_msix()
2150 * e1000_request_irq - initialize interrupts
2158 struct net_device *netdev = adapter->netdev; in e1000_request_irq()
2161 if (adapter->msix_entries) { in e1000_request_irq()
2167 adapter->int_mode = E1000E_INT_MODE_MSI; in e1000_request_irq()
2170 if (adapter->flags & FLAG_MSI_ENABLED) { in e1000_request_irq()
2171 err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0, in e1000_request_irq()
2172 netdev->name, netdev); in e1000_request_irq()
2178 adapter->int_mode = E1000E_INT_MODE_LEGACY; in e1000_request_irq()
2181 err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED, in e1000_request_irq()
2182 netdev->name, netdev); in e1000_request_irq()
2191 struct net_device *netdev = adapter->netdev; in e1000_free_irq()
2193 if (adapter->msix_entries) { in e1000_free_irq()
2196 free_irq(adapter->msix_entries[vector].vector, netdev); in e1000_free_irq()
2199 free_irq(adapter->msix_entries[vector].vector, netdev); in e1000_free_irq()
2203 free_irq(adapter->msix_entries[vector].vector, netdev); in e1000_free_irq()
2207 free_irq(adapter->pdev->irq, netdev); in e1000_free_irq()
2211 * e1000_irq_disable - Mask off interrupt generation on the NIC
2216 struct e1000_hw *hw = &adapter->hw; in e1000_irq_disable()
2219 if (adapter->msix_entries) in e1000_irq_disable()
2223 if (adapter->msix_entries) { in e1000_irq_disable()
2226 for (i = 0; i < adapter->num_vectors; i++) in e1000_irq_disable()
2227 synchronize_irq(adapter->msix_entries[i].vector); in e1000_irq_disable()
2229 synchronize_irq(adapter->pdev->irq); in e1000_irq_disable()
2234 * e1000_irq_enable - Enable default interrupt generation settings
2239 struct e1000_hw *hw = &adapter->hw; in e1000_irq_enable()
2241 if (adapter->msix_entries) { in e1000_irq_enable()
2242 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); in e1000_irq_enable()
2243 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | in e1000_irq_enable()
2245 } else if (hw->mac.type >= e1000_pch_lpt) { in e1000_irq_enable()
2254 * e1000e_get_hw_control - get control of the h/w from f/w
2264 struct e1000_hw *hw = &adapter->hw; in e1000e_get_hw_control()
2269 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { in e1000e_get_hw_control()
2272 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { in e1000e_get_hw_control()
2279 * e1000e_release_hw_control - release control of the h/w to f/w
2290 struct e1000_hw *hw = &adapter->hw; in e1000e_release_hw_control()
2295 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { in e1000e_release_hw_control()
2298 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { in e1000e_release_hw_control()
2305 * e1000_alloc_ring_dma - allocate memory for a ring structure
2312 struct pci_dev *pdev = adapter->pdev; in e1000_alloc_ring_dma()
2314 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, in e1000_alloc_ring_dma()
2316 if (!ring->desc) in e1000_alloc_ring_dma()
2317 return -ENOMEM; in e1000_alloc_ring_dma()
2323 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
2330 struct e1000_adapter *adapter = tx_ring->adapter; in e1000e_setup_tx_resources()
2331 int err = -ENOMEM, size; in e1000e_setup_tx_resources()
2333 size = sizeof(struct e1000_buffer) * tx_ring->count; in e1000e_setup_tx_resources()
2334 tx_ring->buffer_info = vzalloc(size); in e1000e_setup_tx_resources()
2335 if (!tx_ring->buffer_info) in e1000e_setup_tx_resources()
2339 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); in e1000e_setup_tx_resources()
2340 tx_ring->size = ALIGN(tx_ring->size, 4096); in e1000e_setup_tx_resources()
2346 tx_ring->next_to_use = 0; in e1000e_setup_tx_resources()
2347 tx_ring->next_to_clean = 0; in e1000e_setup_tx_resources()
2351 vfree(tx_ring->buffer_info); in e1000e_setup_tx_resources()
2357 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
2364 struct e1000_adapter *adapter = rx_ring->adapter; in e1000e_setup_rx_resources()
2366 int i, size, desc_len, err = -ENOMEM; in e1000e_setup_rx_resources()
2368 size = sizeof(struct e1000_buffer) * rx_ring->count; in e1000e_setup_rx_resources()
2369 rx_ring->buffer_info = vzalloc(size); in e1000e_setup_rx_resources()
2370 if (!rx_ring->buffer_info) in e1000e_setup_rx_resources()
2373 for (i = 0; i < rx_ring->count; i++) { in e1000e_setup_rx_resources()
2374 buffer_info = &rx_ring->buffer_info[i]; in e1000e_setup_rx_resources()
2375 buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS, in e1000e_setup_rx_resources()
2378 if (!buffer_info->ps_pages) in e1000e_setup_rx_resources()
2385 rx_ring->size = rx_ring->count * desc_len; in e1000e_setup_rx_resources()
2386 rx_ring->size = ALIGN(rx_ring->size, 4096); in e1000e_setup_rx_resources()
2392 rx_ring->next_to_clean = 0; in e1000e_setup_rx_resources()
2393 rx_ring->next_to_use = 0; in e1000e_setup_rx_resources()
2394 rx_ring->rx_skb_top = NULL; in e1000e_setup_rx_resources()
2399 for (i = 0; i < rx_ring->count; i++) { in e1000e_setup_rx_resources()
2400 buffer_info = &rx_ring->buffer_info[i]; in e1000e_setup_rx_resources()
2401 kfree(buffer_info->ps_pages); in e1000e_setup_rx_resources()
2404 vfree(rx_ring->buffer_info); in e1000e_setup_rx_resources()
2410 * e1000_clean_tx_ring - Free Tx Buffers
2415 struct e1000_adapter *adapter = tx_ring->adapter; in e1000_clean_tx_ring()
2420 for (i = 0; i < tx_ring->count; i++) { in e1000_clean_tx_ring()
2421 buffer_info = &tx_ring->buffer_info[i]; in e1000_clean_tx_ring()
2425 netdev_reset_queue(adapter->netdev); in e1000_clean_tx_ring()
2426 size = sizeof(struct e1000_buffer) * tx_ring->count; in e1000_clean_tx_ring()
2427 memset(tx_ring->buffer_info, 0, size); in e1000_clean_tx_ring()
2429 memset(tx_ring->desc, 0, tx_ring->size); in e1000_clean_tx_ring()
2431 tx_ring->next_to_use = 0; in e1000_clean_tx_ring()
2432 tx_ring->next_to_clean = 0; in e1000_clean_tx_ring()
2436 * e1000e_free_tx_resources - Free Tx Resources per Queue
2443 struct e1000_adapter *adapter = tx_ring->adapter; in e1000e_free_tx_resources()
2444 struct pci_dev *pdev = adapter->pdev; in e1000e_free_tx_resources()
2448 vfree(tx_ring->buffer_info); in e1000e_free_tx_resources()
2449 tx_ring->buffer_info = NULL; in e1000e_free_tx_resources()
2451 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, in e1000e_free_tx_resources()
2452 tx_ring->dma); in e1000e_free_tx_resources()
2453 tx_ring->desc = NULL; in e1000e_free_tx_resources()
2457 * e1000e_free_rx_resources - Free Rx Resources
2464 struct e1000_adapter *adapter = rx_ring->adapter; in e1000e_free_rx_resources()
2465 struct pci_dev *pdev = adapter->pdev; in e1000e_free_rx_resources()
2470 for (i = 0; i < rx_ring->count; i++) in e1000e_free_rx_resources()
2471 kfree(rx_ring->buffer_info[i].ps_pages); in e1000e_free_rx_resources()
2473 vfree(rx_ring->buffer_info); in e1000e_free_rx_resources()
2474 rx_ring->buffer_info = NULL; in e1000e_free_rx_resources()
2476 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, in e1000e_free_rx_resources()
2477 rx_ring->dma); in e1000e_free_rx_resources()
2478 rx_ring->desc = NULL; in e1000e_free_rx_resources()
2482 * e1000_update_itr - update the dynamic ITR value based on statistics
2483 * @itr_setting: current adapter->itr
2542 u32 new_itr = adapter->itr; in e1000_set_itr()
2544 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ in e1000_set_itr()
2545 if (adapter->link_speed != SPEED_1000) { in e1000_set_itr()
2550 if (adapter->flags2 & FLAG2_DISABLE_AIM) { in e1000_set_itr()
2555 adapter->tx_itr = e1000_update_itr(adapter->tx_itr, in e1000_set_itr()
2556 adapter->total_tx_packets, in e1000_set_itr()
2557 adapter->total_tx_bytes); in e1000_set_itr()
2559 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) in e1000_set_itr()
2560 adapter->tx_itr = low_latency; in e1000_set_itr()
2562 adapter->rx_itr = e1000_update_itr(adapter->rx_itr, in e1000_set_itr()
2563 adapter->total_rx_packets, in e1000_set_itr()
2564 adapter->total_rx_bytes); in e1000_set_itr()
2566 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) in e1000_set_itr()
2567 adapter->rx_itr = low_latency; in e1000_set_itr()
2569 current_itr = max(adapter->rx_itr, adapter->tx_itr); in e1000_set_itr()
2587 if (new_itr != adapter->itr) { in e1000_set_itr()
2592 new_itr = new_itr > adapter->itr ? in e1000_set_itr()
2593 min(adapter->itr + (new_itr >> 2), new_itr) : new_itr; in e1000_set_itr()
2594 adapter->itr = new_itr; in e1000_set_itr()
2595 adapter->rx_ring->itr_val = new_itr; in e1000_set_itr()
2596 if (adapter->msix_entries) in e1000_set_itr()
2597 adapter->rx_ring->set_itr = 1; in e1000_set_itr()
2604 * e1000e_write_itr - write the ITR value to the appropriate registers
2608 * e1000e_write_itr determines if the adapter is in MSI-X mode
2614 struct e1000_hw *hw = &adapter->hw; in e1000e_write_itr()
2617 if (adapter->msix_entries) { in e1000e_write_itr()
2620 for (vector = 0; vector < adapter->num_vectors; vector++) in e1000e_write_itr()
2621 writel(new_itr, hw->hw_addr + E1000_EITR_82574(vector)); in e1000e_write_itr()
2628 * e1000_alloc_queues - Allocate memory for all rings
2635 adapter->tx_ring = kzalloc(size, GFP_KERNEL); in e1000_alloc_queues()
2636 if (!adapter->tx_ring) in e1000_alloc_queues()
2638 adapter->tx_ring->count = adapter->tx_ring_count; in e1000_alloc_queues()
2639 adapter->tx_ring->adapter = adapter; in e1000_alloc_queues()
2641 adapter->rx_ring = kzalloc(size, GFP_KERNEL); in e1000_alloc_queues()
2642 if (!adapter->rx_ring) in e1000_alloc_queues()
2644 adapter->rx_ring->count = adapter->rx_ring_count; in e1000_alloc_queues()
2645 adapter->rx_ring->adapter = adapter; in e1000_alloc_queues()
2650 kfree(adapter->rx_ring); in e1000_alloc_queues()
2651 kfree(adapter->tx_ring); in e1000_alloc_queues()
2652 return -ENOMEM; in e1000_alloc_queues()
2656 * e1000e_poll - NAPI Rx polling callback
2664 struct e1000_hw *hw = &adapter->hw; in e1000e_poll()
2665 struct net_device *poll_dev = adapter->netdev; in e1000e_poll()
2670 if (!adapter->msix_entries || in e1000e_poll()
2671 (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) in e1000e_poll()
2672 tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring); in e1000e_poll()
2674 adapter->clean_rx(adapter->rx_ring, &work_done, budget); in e1000e_poll()
2679 /* Exit the polling mode, but don't re-enable interrupts if stack might in e1000e_poll()
2680 * poll us due to busy-polling in e1000e_poll()
2683 if (adapter->itr_setting & 3) in e1000e_poll()
2685 if (!test_bit(__E1000_DOWN, &adapter->state)) { in e1000e_poll()
2686 if (adapter->msix_entries) in e1000e_poll()
2687 ew32(IMS, adapter->rx_ring->ims_val); in e1000e_poll()
2700 struct e1000_hw *hw = &adapter->hw; in e1000_vlan_rx_add_vid()
2704 if ((adapter->hw.mng_cookie.status & in e1000_vlan_rx_add_vid()
2706 (vid == adapter->mng_vlan_id)) in e1000_vlan_rx_add_vid()
2710 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { in e1000_vlan_rx_add_vid()
2714 hw->mac.ops.write_vfta(hw, index, vfta); in e1000_vlan_rx_add_vid()
2717 set_bit(vid, adapter->active_vlans); in e1000_vlan_rx_add_vid()
2726 struct e1000_hw *hw = &adapter->hw; in e1000_vlan_rx_kill_vid()
2729 if ((adapter->hw.mng_cookie.status & in e1000_vlan_rx_kill_vid()
2731 (vid == adapter->mng_vlan_id)) { in e1000_vlan_rx_kill_vid()
2738 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { in e1000_vlan_rx_kill_vid()
2742 hw->mac.ops.write_vfta(hw, index, vfta); in e1000_vlan_rx_kill_vid()
2745 clear_bit(vid, adapter->active_vlans); in e1000_vlan_rx_kill_vid()
2751 * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering
2756 struct net_device *netdev = adapter->netdev; in e1000e_vlan_filter_disable()
2757 struct e1000_hw *hw = &adapter->hw; in e1000e_vlan_filter_disable()
2760 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { in e1000e_vlan_filter_disable()
2761 /* disable VLAN receive filtering */ in e1000e_vlan_filter_disable()
2766 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) { in e1000e_vlan_filter_disable()
2768 adapter->mng_vlan_id); in e1000e_vlan_filter_disable()
2769 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; in e1000e_vlan_filter_disable()
2775 * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering
2780 struct e1000_hw *hw = &adapter->hw; in e1000e_vlan_filter_enable()
2783 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { in e1000e_vlan_filter_enable()
2793 * e1000e_vlan_strip_disable - helper to disable HW VLAN stripping
2798 struct e1000_hw *hw = &adapter->hw; in e1000e_vlan_strip_disable()
2801 /* disable VLAN tag insert/strip */ in e1000e_vlan_strip_disable()
2808 * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping
2813 struct e1000_hw *hw = &adapter->hw; in e1000e_vlan_strip_enable()
2824 struct net_device *netdev = adapter->netdev; in e1000_update_mng_vlan()
2825 u16 vid = adapter->hw.mng_cookie.vlan_id; in e1000_update_mng_vlan()
2826 u16 old_vid = adapter->mng_vlan_id; in e1000_update_mng_vlan()
2828 if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { in e1000_update_mng_vlan()
2830 adapter->mng_vlan_id = vid; in e1000_update_mng_vlan()
2841 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); in e1000_restore_vlan()
2843 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) in e1000_restore_vlan()
2844 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); in e1000_restore_vlan()
2849 struct e1000_hw *hw = &adapter->hw; in e1000_init_manageability_pt()
2852 if (!(adapter->flags & FLAG_MNG_PT_ENABLED)) in e1000_init_manageability_pt()
2864 switch (hw->mac.type) { in e1000_init_manageability_pt()
2870 /* Check if IPMI pass-through decision filter already exists; in e1000_init_manageability_pt()
2901 e_warn("Unable to create IPMI pass-through filter\n"); in e1000_init_manageability_pt()
2910 * e1000_configure_tx - Configure Transmit Unit after Reset
2917 struct e1000_hw *hw = &adapter->hw; in e1000_configure_tx()
2918 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_configure_tx()
2923 tdba = tx_ring->dma; in e1000_configure_tx()
2924 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc); in e1000_configure_tx()
2930 tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0); in e1000_configure_tx()
2931 tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0); in e1000_configure_tx()
2933 writel(0, tx_ring->head); in e1000_configure_tx()
2934 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in e1000_configure_tx()
2937 writel(0, tx_ring->tail); in e1000_configure_tx()
2940 ew32(TIDV, adapter->tx_int_delay); in e1000_configure_tx()
2942 ew32(TADV, adapter->tx_abs_int_delay); in e1000_configure_tx()
2944 if (adapter->flags2 & FLAG2_DMA_BURST) { in e1000_configure_tx()
2970 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { in e1000_configure_tx()
2981 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) { in e1000_configure_tx()
2990 /* Setup Transmit Descriptor Settings for eop descriptor */ in e1000_configure_tx()
2991 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; in e1000_configure_tx()
2994 if (adapter->tx_int_delay) in e1000_configure_tx()
2995 adapter->txd_cmd |= E1000_TXD_CMD_IDE; in e1000_configure_tx()
2998 adapter->txd_cmd |= E1000_TXD_CMD_RS; in e1000_configure_tx()
3002 hw->mac.ops.config_collision_dist(hw); in e1000_configure_tx()
3005 if (hw->mac.type == e1000_pch_spt) { in e1000_configure_tx()
3024 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
3027 * e1000_setup_rctl - configure the receive control registers
3032 struct e1000_hw *hw = &adapter->hw; in e1000_setup_rctl()
3036 /* Workaround Si errata on PCHx - configure jumbo frame flow. in e1000_setup_rctl()
3040 if (hw->mac.type >= e1000_pch2lan) { in e1000_setup_rctl()
3043 if (adapter->netdev->mtu > ETH_DATA_LEN) in e1000_setup_rctl()
3049 e_dbg("failed to enable|disable jumbo frame workaround mode\n"); in e1000_setup_rctl()
3057 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); in e1000_setup_rctl()
3063 if (adapter->netdev->mtu <= ETH_DATA_LEN) in e1000_setup_rctl()
3072 if (adapter->flags2 & FLAG2_CRC_STRIPPING) in e1000_setup_rctl()
3075 /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */ in e1000_setup_rctl()
3076 if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) { in e1000_setup_rctl()
3095 switch (adapter->rx_buffer_len) { in e1000_setup_rctl()
3117 /* 82571 and greater support packet-split where the protocol in e1000_setup_rctl()
3118 * header is placed in skb->data and the packet data is in e1000_setup_rctl()
3119 * placed in pages hanging off of skb_shinfo(skb)->nr_frags. in e1000_setup_rctl()
3120 * In the case of a non-split, skb->data is linearly filled, in e1000_setup_rctl()
3121 * followed by the page buffers. Therefore, skb->data is in e1000_setup_rctl()
3131 pages = PAGE_USE_COUNT(adapter->netdev->mtu); in e1000_setup_rctl()
3133 adapter->rx_ps_pages = pages; in e1000_setup_rctl()
3135 adapter->rx_ps_pages = 0; in e1000_setup_rctl()
3137 if (adapter->rx_ps_pages) { in e1000_setup_rctl()
3143 psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT; in e1000_setup_rctl()
3145 switch (adapter->rx_ps_pages) { in e1000_setup_rctl()
3161 if (adapter->netdev->features & NETIF_F_RXALL) { in e1000_setup_rctl()
3169 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ in e1000_setup_rctl()
3179 adapter->flags &= ~FLAG_RESTART_NOW; in e1000_setup_rctl()
3183 * e1000_configure_rx - Configure Receive Unit after Reset
3190 struct e1000_hw *hw = &adapter->hw; in e1000_configure_rx()
3191 struct e1000_ring *rx_ring = adapter->rx_ring; in e1000_configure_rx()
3195 if (adapter->rx_ps_pages) { in e1000_configure_rx()
3197 rdlen = rx_ring->count * in e1000_configure_rx()
3199 adapter->clean_rx = e1000_clean_rx_irq_ps; in e1000_configure_rx()
3200 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; in e1000_configure_rx()
3201 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { in e1000_configure_rx()
3202 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); in e1000_configure_rx()
3203 adapter->clean_rx = e1000_clean_jumbo_rx_irq; in e1000_configure_rx()
3204 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; in e1000_configure_rx()
3206 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); in e1000_configure_rx()
3207 adapter->clean_rx = e1000_clean_rx_irq; in e1000_configure_rx()
3208 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; in e1000_configure_rx()
3211 /* disable receives while setting up the descriptors */ in e1000_configure_rx()
3213 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) in e1000_configure_rx()
3218 if (adapter->flags2 & FLAG2_DMA_BURST) { in e1000_configure_rx()
3232 ew32(RDTR, adapter->rx_int_delay); in e1000_configure_rx()
3235 ew32(RADV, adapter->rx_abs_int_delay); in e1000_configure_rx()
3236 if ((adapter->itr_setting != 0) && (adapter->itr != 0)) in e1000_configure_rx()
3237 e1000e_write_itr(adapter, adapter->itr); in e1000_configure_rx()
3240 /* Auto-Mask interrupts upon ICR access */ in e1000_configure_rx()
3249 rdba = rx_ring->dma; in e1000_configure_rx()
3255 rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0); in e1000_configure_rx()
3256 rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0); in e1000_configure_rx()
3258 writel(0, rx_ring->head); in e1000_configure_rx()
3259 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in e1000_configure_rx()
3262 writel(0, rx_ring->tail); in e1000_configure_rx()
3266 if (adapter->netdev->features & NETIF_F_RXCSUM) in e1000_configure_rx()
3272 /* With jumbo frames, excessive C-state transition latencies result in e1000_configure_rx()
3275 if (adapter->netdev->mtu > ETH_DATA_LEN) { in e1000_configure_rx()
3277 ((er32(PBA) & E1000_PBA_RXA_MASK) * 1024 - in e1000_configure_rx()
3278 adapter->max_frame_size) * 8 / 1000; in e1000_configure_rx()
3280 if (adapter->flags & FLAG_IS_ICH) { in e1000_configure_rx()
3286 dev_info(&adapter->pdev->dev, in e1000_configure_rx()
3287 "Some CPU C-states have been disabled in order to enable jumbo frames\n"); in e1000_configure_rx()
3288 cpu_latency_qos_update_request(&adapter->pm_qos_req, lat); in e1000_configure_rx()
3290 cpu_latency_qos_update_request(&adapter->pm_qos_req, in e1000_configure_rx()
3299 * e1000e_write_mc_addr_list - write multicast addresses to MTA
3303 * Returns: -ENOMEM on failure
3310 struct e1000_hw *hw = &adapter->hw; in e1000e_write_mc_addr_list()
3317 hw->mac.ops.update_mc_addr_list(hw, NULL, 0); in e1000e_write_mc_addr_list()
3323 return -ENOMEM; in e1000e_write_mc_addr_list()
3328 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); in e1000e_write_mc_addr_list()
3330 hw->mac.ops.update_mc_addr_list(hw, mta_list, i); in e1000e_write_mc_addr_list()
3337 * e1000e_write_uc_addr_list - write unicast addresses to RAR table
3341 * Returns: -ENOMEM on failure/insufficient address space
3348 struct e1000_hw *hw = &adapter->hw; in e1000e_write_uc_addr_list()
3352 rar_entries = hw->mac.ops.rar_get_count(hw); in e1000e_write_uc_addr_list()
3355 rar_entries--; in e1000e_write_uc_addr_list()
3358 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) in e1000e_write_uc_addr_list()
3359 rar_entries--; in e1000e_write_uc_addr_list()
3363 return -ENOMEM; in e1000e_write_uc_addr_list()
3376 ret_val = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--); in e1000e_write_uc_addr_list()
3378 return -ENOMEM; in e1000e_write_uc_addr_list()
3384 for (; rar_entries > 0; rar_entries--) { in e1000e_write_uc_addr_list()
3394 * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set
3400 * promiscuous mode, and all-multi behavior.
3405 struct e1000_hw *hw = &adapter->hw; in e1000e_set_rx_mode()
3408 if (pm_runtime_suspended(netdev->dev.parent)) in e1000e_set_rx_mode()
3417 if (netdev->flags & IFF_PROMISC) { in e1000e_set_rx_mode()
3424 if (netdev->flags & IFF_ALLMULTI) { in e1000e_set_rx_mode()
3447 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) in e1000e_set_rx_mode()
3455 struct e1000_hw *hw = &adapter->hw; in e1000e_setup_rss_hash()
3468 /* Disable raw packet checksumming so that RSS hash is placed in in e1000e_setup_rss_hash()
3486 * e1000e_get_base_timinca - get default SYSTIM time increment attributes
3495 struct e1000_hw *hw = &adapter->hw; in e1000e_get_base_timinca()
3501 if ((hw->mac.type >= e1000_pch_lpt) && in e1000e_get_base_timinca()
3512 switch (hw->mac.type) { in e1000e_get_base_timinca()
3518 adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHZ; in e1000e_get_base_timinca()
3526 adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHZ; in e1000e_get_base_timinca()
3532 adapter->cc.shift = shift; in e1000e_get_base_timinca()
3540 adapter->cc.shift = shift; in e1000e_get_base_timinca()
3554 adapter->cc.shift = shift; in e1000e_get_base_timinca()
3560 adapter->cc.shift = shift; in e1000e_get_base_timinca()
3569 adapter->cc.shift = shift; in e1000e_get_base_timinca()
3572 return -EINVAL; in e1000e_get_base_timinca()
3582 * e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable
3587 * disable it when requested, although it shouldn't cause any overhead
3600 struct e1000_hw *hw = &adapter->hw; in e1000e_config_hwtstamp()
3609 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) in e1000e_config_hwtstamp()
3610 return -EINVAL; in e1000e_config_hwtstamp()
3612 switch (config->tx_type) { in e1000e_config_hwtstamp()
3619 return -ERANGE; in e1000e_config_hwtstamp()
3622 switch (config->rx_filter) { in e1000e_config_hwtstamp()
3674 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; in e1000e_config_hwtstamp()
3680 * Delay Request messages but not both so fall-through to in e1000e_config_hwtstamp()
3689 config->rx_filter = HWTSTAMP_FILTER_ALL; in e1000e_config_hwtstamp()
3692 return -ERANGE; in e1000e_config_hwtstamp()
3695 adapter->hwtstamp_config = *config; in e1000e_config_hwtstamp()
3697 /* enable/disable Tx h/w time stamping */ in e1000e_config_hwtstamp()
3705 return -EAGAIN; in e1000e_config_hwtstamp()
3708 /* enable/disable Rx h/w time stamping */ in e1000e_config_hwtstamp()
3718 return -EAGAIN; in e1000e_config_hwtstamp()
3745 * e1000_configure - configure the hardware for Rx and Tx
3750 struct e1000_ring *rx_ring = adapter->rx_ring; in e1000_configure()
3752 e1000e_set_rx_mode(adapter->netdev); in e1000_configure()
3759 if (adapter->netdev->features & NETIF_F_RXHASH) in e1000_configure()
3763 adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL); in e1000_configure()
3767 * e1000e_power_up_phy - restore link in case the phy was powered down
3776 if (adapter->hw.phy.ops.power_up) in e1000e_power_up_phy()
3777 adapter->hw.phy.ops.power_up(&adapter->hw); in e1000e_power_up_phy()
3779 adapter->hw.mac.ops.setup_link(&adapter->hw); in e1000e_power_up_phy()
3783 * e1000_power_down_phy - Power down the PHY
3791 if (adapter->hw.phy.ops.power_down) in e1000_power_down_phy()
3792 adapter->hw.phy.ops.power_down(&adapter->hw); in e1000_power_down_phy()
3796 * e1000_flush_tx_ring - remove all descriptors from the tx_ring
3806 struct e1000_hw *hw = &adapter->hw; in e1000_flush_tx_ring()
3807 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_flush_tx_ring()
3815 BUG_ON(tdt != tx_ring->next_to_use); in e1000_flush_tx_ring()
3816 tx_desc = E1000_TX_DESC(*tx_ring, tx_ring->next_to_use); in e1000_flush_tx_ring()
3817 tx_desc->buffer_addr = cpu_to_le64(tx_ring->dma); in e1000_flush_tx_ring()
3819 tx_desc->lower.data = cpu_to_le32(txd_lower | size); in e1000_flush_tx_ring()
3820 tx_desc->upper.data = 0; in e1000_flush_tx_ring()
3823 tx_ring->next_to_use++; in e1000_flush_tx_ring()
3824 if (tx_ring->next_to_use == tx_ring->count) in e1000_flush_tx_ring()
3825 tx_ring->next_to_use = 0; in e1000_flush_tx_ring()
3826 ew32(TDT(0), tx_ring->next_to_use); in e1000_flush_tx_ring()
3831 * e1000_flush_rx_ring - remove all descriptors from the rx_ring
3834 * Mark all descriptors in the RX ring as consumed and disable the rx ring
3839 struct e1000_hw *hw = &adapter->hw; in e1000_flush_rx_ring()
3864 * e1000_flush_desc_rings - remove all descriptors from the descriptor rings
3879 struct e1000_hw *hw = &adapter->hw; in e1000_flush_desc_rings()
3881 /* First, disable MULR fix in FEXTNVM11 */ in e1000_flush_desc_rings()
3887 pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS, in e1000_flush_desc_rings()
3893 pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS, in e1000_flush_desc_rings()
3900 * e1000e_systim_reset - reset the timesync registers after a hardware reset
3910 struct ptp_clock_info *info = &adapter->ptp_clock_info; in e1000e_systim_reset()
3911 struct e1000_hw *hw = &adapter->hw; in e1000e_systim_reset()
3916 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) in e1000e_systim_reset()
3919 if (info->adjfine) { in e1000e_systim_reset()
3921 ret_val = info->adjfine(info, adapter->ptp_delta); in e1000e_systim_reset()
3930 dev_warn(&adapter->pdev->dev, in e1000e_systim_reset()
3937 spin_lock_irqsave(&adapter->systim_lock, flags); in e1000e_systim_reset()
3938 timecounter_init(&adapter->tc, &adapter->cc, in e1000e_systim_reset()
3940 spin_unlock_irqrestore(&adapter->systim_lock, flags); in e1000e_systim_reset()
3943 e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config); in e1000e_systim_reset()
3947 * e1000e_reset - bring the hardware into a known good state
3951 * require a configuration cycle of the hardware - those cannot be
3957 struct e1000_mac_info *mac = &adapter->hw.mac; in e1000e_reset()
3958 struct e1000_fc_info *fc = &adapter->hw.fc; in e1000e_reset()
3959 struct e1000_hw *hw = &adapter->hw; in e1000e_reset()
3961 u32 pba = adapter->pba; in e1000e_reset()
3967 if (adapter->max_frame_size > (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) { in e1000e_reset()
3983 min_tx_space = (adapter->max_frame_size + in e1000e_reset()
3984 sizeof(struct e1000_tx_desc) - ETH_FCS_LEN) * 2; in e1000e_reset()
3988 min_rx_space = adapter->max_frame_size; in e1000e_reset()
3997 ((min_tx_space - tx_space) < pba)) { in e1000e_reset()
3998 pba -= min_tx_space - tx_space; in e1000e_reset()
4015 * - 90% of the Rx FIFO size, and in e1000e_reset()
4016 * - the full Rx FIFO size minus one full frame in e1000e_reset()
4018 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) in e1000e_reset()
4019 fc->pause_time = 0xFFFF; in e1000e_reset()
4021 fc->pause_time = E1000_FC_PAUSE_TIME; in e1000e_reset()
4022 fc->send_xon = true; in e1000e_reset()
4023 fc->current_mode = fc->requested_mode; in e1000e_reset()
4025 switch (hw->mac.type) { in e1000e_reset()
4028 if (adapter->netdev->mtu > ETH_DATA_LEN) { in e1000e_reset()
4031 fc->high_water = 0x2800; in e1000e_reset()
4032 fc->low_water = fc->high_water - 8; in e1000e_reset()
4038 ((pba << 10) - adapter->max_frame_size)); in e1000e_reset()
4040 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ in e1000e_reset()
4041 fc->low_water = fc->high_water - 8; in e1000e_reset()
4047 if (adapter->netdev->mtu > ETH_DATA_LEN) { in e1000e_reset()
4048 fc->high_water = 0x3500; in e1000e_reset()
4049 fc->low_water = 0x1500; in e1000e_reset()
4051 fc->high_water = 0x5000; in e1000e_reset()
4052 fc->low_water = 0x3000; in e1000e_reset()
4054 fc->refresh_time = 0x1000; in e1000e_reset()
4066 fc->refresh_time = 0xFFFF; in e1000e_reset()
4067 fc->pause_time = 0xFFFF; in e1000e_reset()
4069 if (adapter->netdev->mtu <= ETH_DATA_LEN) { in e1000e_reset()
4070 fc->high_water = 0x05C20; in e1000e_reset()
4071 fc->low_water = 0x05048; in e1000e_reset()
4077 fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH; in e1000e_reset()
4078 fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL; in e1000e_reset()
4087 adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96, in e1000e_reset()
4090 /* Disable Adaptive Interrupt Moderation if 2 full packets cannot in e1000e_reset()
4093 if (adapter->itr_setting & 0x3) { in e1000e_reset()
4094 if ((adapter->max_frame_size * 2) > (pba << 10)) { in e1000e_reset()
4095 if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) { in e1000e_reset()
4096 dev_info(&adapter->pdev->dev, in e1000e_reset()
4098 adapter->flags2 |= FLAG2_DISABLE_AIM; in e1000e_reset()
4101 } else if (adapter->flags2 & FLAG2_DISABLE_AIM) { in e1000e_reset()
4102 dev_info(&adapter->pdev->dev, in e1000e_reset()
4104 adapter->flags2 &= ~FLAG2_DISABLE_AIM; in e1000e_reset()
4105 adapter->itr = 20000; in e1000e_reset()
4106 e1000e_write_itr(adapter, adapter->itr); in e1000e_reset()
4110 if (hw->mac.type >= e1000_pch_spt) in e1000e_reset()
4113 mac->ops.reset_hw(hw); in e1000e_reset()
4118 if (adapter->flags & FLAG_HAS_AMT) in e1000e_reset()
4123 if (mac->ops.init_hw(hw)) in e1000e_reset()
4137 if (adapter->flags2 & FLAG2_HAS_EEE) { in e1000e_reset()
4141 switch (hw->phy.type) { in e1000e_reset()
4149 dev_err(&adapter->pdev->dev, in e1000e_reset()
4154 ret_val = hw->phy.ops.acquire(hw); in e1000e_reset()
4156 dev_err(&adapter->pdev->dev, in e1000e_reset()
4157 "EEE advertisement - unable to acquire PHY\n"); in e1000e_reset()
4162 hw->dev_spec.ich8lan.eee_disable ? in e1000e_reset()
4163 0 : adapter->eee_advert); in e1000e_reset()
4165 hw->phy.ops.release(hw); in e1000e_reset()
4168 if (!netif_running(adapter->netdev) && in e1000e_reset()
4169 !test_bit(__E1000_TESTING, &adapter->state)) in e1000e_reset()
4174 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && in e1000e_reset()
4175 !(adapter->flags & FLAG_SMART_POWER_DOWN)) { in e1000e_reset()
4185 if (hw->mac.type >= e1000_pch_spt && adapter->int_mode == 0) { in e1000e_reset()
4202 * e1000e_trigger_lsc - trigger an LSC interrupt
4209 struct e1000_hw *hw = &adapter->hw; in e1000e_trigger_lsc()
4211 if (adapter->msix_entries) in e1000e_trigger_lsc()
4222 clear_bit(__E1000_DOWN, &adapter->state); in e1000e_up()
4224 if (adapter->msix_entries) in e1000e_up()
4235 struct e1000_hw *hw = &adapter->hw; in e1000e_flush_descriptors()
4237 if (!(adapter->flags2 & FLAG2_DMA_BURST)) in e1000e_flush_descriptors()
4241 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); in e1000e_flush_descriptors()
4242 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); in e1000e_flush_descriptors()
4250 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); in e1000e_flush_descriptors()
4251 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); in e1000e_flush_descriptors()
4260 * e1000e_down - quiesce the device and optionally reset the hardware
4266 struct net_device *netdev = adapter->netdev; in e1000e_down()
4267 struct e1000_hw *hw = &adapter->hw; in e1000e_down()
4273 set_bit(__E1000_DOWN, &adapter->state); in e1000e_down()
4277 /* disable receives in the hardware */ in e1000e_down()
4279 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) in e1000e_down()
4285 /* disable transmits in the hardware */ in e1000e_down()
4296 napi_synchronize(&adapter->napi); in e1000e_down()
4298 del_timer_sync(&adapter->watchdog_timer); in e1000e_down()
4299 del_timer_sync(&adapter->phy_info_timer); in e1000e_down()
4301 spin_lock(&adapter->stats64_lock); in e1000e_down()
4303 spin_unlock(&adapter->stats64_lock); in e1000e_down()
4307 adapter->link_speed = 0; in e1000e_down()
4308 adapter->link_duplex = 0; in e1000e_down()
4310 /* Disable Si errata workaround on PCHx for jumbo frame flow */ in e1000e_down()
4311 if ((hw->mac.type >= e1000_pch2lan) && in e1000e_down()
4312 (adapter->netdev->mtu > ETH_DATA_LEN) && in e1000e_down()
4314 e_dbg("failed to disable jumbo frame workaround mode\n"); in e1000e_down()
4316 if (!pci_channel_offline(adapter->pdev)) { in e1000e_down()
4319 else if (hw->mac.type >= e1000_pch_spt) in e1000e_down()
4322 e1000_clean_tx_ring(adapter->tx_ring); in e1000e_down()
4323 e1000_clean_rx_ring(adapter->rx_ring); in e1000e_down()
4329 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) in e1000e_reinit_locked()
4333 clear_bit(__E1000_RESETTING, &adapter->state); in e1000e_reinit_locked()
4337 * e1000e_sanitize_systim - sanitize raw cycle counter reads
4363 time_delta = systim_next - systim; in e1000e_sanitize_systim()
4378 * e1000e_read_systim - read SYSTIM register
4386 struct e1000_hw *hw = &adapter->hw; in e1000e_read_systim()
4393 * to fix that we test for overflow and if true, we re-read systime. in e1000e_read_systim()
4400 if (systimel >= (u32)0xffffffff - E1000_TIMINCA_INCVALUE_MASK) { in e1000e_read_systim()
4415 if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW) in e1000e_read_systim()
4422 * e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
4434 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
4443 struct net_device *netdev = adapter->netdev; in e1000_sw_init()
4445 adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; in e1000_sw_init()
4446 adapter->rx_ps_bsize0 = 128; in e1000_sw_init()
4447 adapter->max_frame_size = netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; in e1000_sw_init()
4448 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; in e1000_sw_init()
4449 adapter->tx_ring_count = E1000_DEFAULT_TXD; in e1000_sw_init()
4450 adapter->rx_ring_count = E1000_DEFAULT_RXD; in e1000_sw_init()
4452 spin_lock_init(&adapter->stats64_lock); in e1000_sw_init()
4457 return -ENOMEM; in e1000_sw_init()
4460 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { in e1000_sw_init()
4461 adapter->cc.read = e1000e_cyclecounter_read; in e1000_sw_init()
4462 adapter->cc.mask = CYCLECOUNTER_MASK(64); in e1000_sw_init()
4463 adapter->cc.mult = 1; in e1000_sw_init()
4466 spin_lock_init(&adapter->systim_lock); in e1000_sw_init()
4467 INIT_WORK(&adapter->tx_hwtstamp_work, e1000e_tx_hwtstamp_work); in e1000_sw_init()
4470 /* Explicitly disable IRQ since the NIC can be in any state. */ in e1000_sw_init()
4473 set_bit(__E1000_DOWN, &adapter->state); in e1000_sw_init()
4478 * e1000_intr_msi_test - Interrupt Handler
4486 struct e1000_hw *hw = &adapter->hw; in e1000_intr_msi_test()
4491 adapter->flags &= ~FLAG_MSI_TEST_FAILED; in e1000_intr_msi_test()
4502 * e1000_test_msi_interrupt - Returns 0 for successful test
4509 struct net_device *netdev = adapter->netdev; in e1000_test_msi_interrupt()
4510 struct e1000_hw *hw = &adapter->hw; in e1000_test_msi_interrupt()
4513 /* poll_enable hasn't been called yet, so don't need disable */ in e1000_test_msi_interrupt()
4524 adapter->flags |= FLAG_MSI_TEST_FAILED; in e1000_test_msi_interrupt()
4526 err = pci_enable_msi(adapter->pdev); in e1000_test_msi_interrupt()
4530 err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0, in e1000_test_msi_interrupt()
4531 netdev->name, netdev); in e1000_test_msi_interrupt()
4533 pci_disable_msi(adapter->pdev); in e1000_test_msi_interrupt()
4553 if (adapter->flags & FLAG_MSI_TEST_FAILED) { in e1000_test_msi_interrupt()
4554 adapter->int_mode = E1000E_INT_MODE_LEGACY; in e1000_test_msi_interrupt()
4560 free_irq(adapter->pdev->irq, netdev); in e1000_test_msi_interrupt()
4561 pci_disable_msi(adapter->pdev); in e1000_test_msi_interrupt()
4569 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
4579 if (!(adapter->flags & FLAG_MSI_ENABLED)) in e1000_test_msi()
4582 /* disable SERR in case the MSI write causes a master abort */ in e1000_test_msi()
4583 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); in e1000_test_msi()
4585 pci_write_config_word(adapter->pdev, PCI_COMMAND, in e1000_test_msi()
4590 /* re-enable SERR */ in e1000_test_msi()
4592 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); in e1000_test_msi()
4594 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd); in e1000_test_msi()
4601 * e1000e_open - Called when a network interface is made active
4615 struct e1000_hw *hw = &adapter->hw; in e1000e_open()
4616 struct pci_dev *pdev = adapter->pdev; in e1000e_open()
4620 if (test_bit(__E1000_TESTING, &adapter->state)) in e1000e_open()
4621 return -EBUSY; in e1000e_open()
4623 pm_runtime_get_sync(&pdev->dev); in e1000e_open()
4629 err = e1000e_setup_tx_resources(adapter->tx_ring); in e1000e_open()
4634 err = e1000e_setup_rx_resources(adapter->rx_ring); in e1000e_open()
4641 if (adapter->flags & FLAG_HAS_AMT) { in e1000e_open()
4648 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; in e1000e_open()
4649 if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) in e1000e_open()
4653 cpu_latency_qos_add_request(&adapter->pm_qos_req, PM_QOS_DEFAULT_VALUE); in e1000e_open()
4670 if (adapter->int_mode != E1000E_INT_MODE_LEGACY) { in e1000e_open()
4679 clear_bit(__E1000_DOWN, &adapter->state); in e1000e_open()
4681 napi_enable(&adapter->napi); in e1000e_open()
4685 adapter->tx_hang_recheck = false; in e1000e_open()
4687 hw->mac.get_link_status = true; in e1000e_open()
4688 pm_runtime_put(&pdev->dev); in e1000e_open()
4695 cpu_latency_qos_remove_request(&adapter->pm_qos_req); in e1000e_open()
4698 e1000e_free_rx_resources(adapter->rx_ring); in e1000e_open()
4700 e1000e_free_tx_resources(adapter->tx_ring); in e1000e_open()
4703 pm_runtime_put_sync(&pdev->dev); in e1000e_open()
4709 * e1000e_close - Disables a network interface
4714 * The close entry point is called when an interface is de-activated
4722 struct pci_dev *pdev = adapter->pdev; in e1000e_close()
4725 while (test_bit(__E1000_RESETTING, &adapter->state) && count--) in e1000e_close()
4728 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); in e1000e_close()
4730 pm_runtime_get_sync(&pdev->dev); in e1000e_close()
4740 napi_disable(&adapter->napi); in e1000e_close()
4742 e1000e_free_tx_resources(adapter->tx_ring); in e1000e_close()
4743 e1000e_free_rx_resources(adapter->rx_ring); in e1000e_close()
4748 if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) in e1000e_close()
4750 adapter->mng_vlan_id); in e1000e_close()
4755 if ((adapter->flags & FLAG_HAS_AMT) && in e1000e_close()
4756 !test_bit(__E1000_TESTING, &adapter->state)) in e1000e_close()
4759 cpu_latency_qos_remove_request(&adapter->pm_qos_req); in e1000e_close()
4761 pm_runtime_put_sync(&pdev->dev); in e1000e_close()
4767 * e1000_set_mac - Change the Ethernet Address of the NIC
4776 struct e1000_hw *hw = &adapter->hw; in e1000_set_mac()
4779 if (!is_valid_ether_addr(addr->sa_data)) in e1000_set_mac()
4780 return -EADDRNOTAVAIL; in e1000_set_mac()
4782 eth_hw_addr_set(netdev, addr->sa_data); in e1000_set_mac()
4783 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); in e1000_set_mac()
4785 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0); in e1000_set_mac()
4787 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) { in e1000_set_mac()
4789 e1000e_set_laa_state_82571(&adapter->hw, 1); in e1000_set_mac()
4798 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, in e1000_set_mac()
4799 adapter->hw.mac.rar_entry_count - 1); in e1000_set_mac()
4806 * e1000e_update_phy_task - work thread to update phy
4818 struct e1000_hw *hw = &adapter->hw; in e1000e_update_phy_task()
4820 if (test_bit(__E1000_DOWN, &adapter->state)) in e1000e_update_phy_task()
4826 if (hw->phy.type >= e1000_phy_82579) in e1000e_update_phy_task()
4831 * e1000_update_phy_info - timre call-back to update PHY info
4841 if (test_bit(__E1000_DOWN, &adapter->state)) in e1000_update_phy_info()
4844 schedule_work(&adapter->update_phy_task); in e1000_update_phy_info()
4848 * e1000e_update_phy_stats - Update the PHY statistics counters
4851 * Read/clear the upper 16-bit PHY registers and read/accumulate lower
4855 struct e1000_hw *hw = &adapter->hw; in e1000e_update_phy_stats()
4859 ret_val = hw->phy.ops.acquire(hw); in e1000e_update_phy_stats()
4866 hw->phy.addr = 1; in e1000e_update_phy_stats()
4872 ret_val = hw->phy.ops.set_page(hw, in e1000e_update_phy_stats()
4879 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); in e1000e_update_phy_stats()
4880 ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); in e1000e_update_phy_stats()
4882 adapter->stats.scc += phy_data; in e1000e_update_phy_stats()
4885 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); in e1000e_update_phy_stats()
4886 ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); in e1000e_update_phy_stats()
4888 adapter->stats.ecol += phy_data; in e1000e_update_phy_stats()
4891 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); in e1000e_update_phy_stats()
4892 ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); in e1000e_update_phy_stats()
4894 adapter->stats.mcc += phy_data; in e1000e_update_phy_stats()
4897 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); in e1000e_update_phy_stats()
4898 ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); in e1000e_update_phy_stats()
4900 adapter->stats.latecol += phy_data; in e1000e_update_phy_stats()
4902 /* Collision Count - also used for adaptive IFS */ in e1000e_update_phy_stats()
4903 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); in e1000e_update_phy_stats()
4904 ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); in e1000e_update_phy_stats()
4906 hw->mac.collision_delta = phy_data; in e1000e_update_phy_stats()
4909 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); in e1000e_update_phy_stats()
4910 ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); in e1000e_update_phy_stats()
4912 adapter->stats.dc += phy_data; in e1000e_update_phy_stats()
4915 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); in e1000e_update_phy_stats()
4916 ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); in e1000e_update_phy_stats()
4918 adapter->stats.tncrs += phy_data; in e1000e_update_phy_stats()
4921 hw->phy.ops.release(hw); in e1000e_update_phy_stats()
4925 * e1000e_update_stats - Update the board statistics counters
4930 struct net_device *netdev = adapter->netdev; in e1000e_update_stats()
4931 struct e1000_hw *hw = &adapter->hw; in e1000e_update_stats()
4932 struct pci_dev *pdev = adapter->pdev; in e1000e_update_stats()
4937 if (adapter->link_speed == 0) in e1000e_update_stats()
4942 adapter->stats.crcerrs += er32(CRCERRS); in e1000e_update_stats()
4943 adapter->stats.gprc += er32(GPRC); in e1000e_update_stats()
4944 adapter->stats.gorc += er32(GORCL); in e1000e_update_stats()
4946 adapter->stats.bprc += er32(BPRC); in e1000e_update_stats()
4947 adapter->stats.mprc += er32(MPRC); in e1000e_update_stats()
4948 adapter->stats.roc += er32(ROC); in e1000e_update_stats()
4950 adapter->stats.mpc += er32(MPC); in e1000e_update_stats()
4952 /* Half-duplex statistics */ in e1000e_update_stats()
4953 if (adapter->link_duplex == HALF_DUPLEX) { in e1000e_update_stats()
4954 if (adapter->flags2 & FLAG2_HAS_PHY_STATS) { in e1000e_update_stats()
4957 adapter->stats.scc += er32(SCC); in e1000e_update_stats()
4958 adapter->stats.ecol += er32(ECOL); in e1000e_update_stats()
4959 adapter->stats.mcc += er32(MCC); in e1000e_update_stats()
4960 adapter->stats.latecol += er32(LATECOL); in e1000e_update_stats()
4961 adapter->stats.dc += er32(DC); in e1000e_update_stats()
4963 hw->mac.collision_delta = er32(COLC); in e1000e_update_stats()
4965 if ((hw->mac.type != e1000_82574) && in e1000e_update_stats()
4966 (hw->mac.type != e1000_82583)) in e1000e_update_stats()
4967 adapter->stats.tncrs += er32(TNCRS); in e1000e_update_stats()
4969 adapter->stats.colc += hw->mac.collision_delta; in e1000e_update_stats()
4972 adapter->stats.xonrxc += er32(XONRXC); in e1000e_update_stats()
4973 adapter->stats.xontxc += er32(XONTXC); in e1000e_update_stats()
4974 adapter->stats.xoffrxc += er32(XOFFRXC); in e1000e_update_stats()
4975 adapter->stats.xofftxc += er32(XOFFTXC); in e1000e_update_stats()
4976 adapter->stats.gptc += er32(GPTC); in e1000e_update_stats()
4977 adapter->stats.gotc += er32(GOTCL); in e1000e_update_stats()
4979 adapter->stats.rnbc += er32(RNBC); in e1000e_update_stats()
4980 adapter->stats.ruc += er32(RUC); in e1000e_update_stats()
4982 adapter->stats.mptc += er32(MPTC); in e1000e_update_stats()
4983 adapter->stats.bptc += er32(BPTC); in e1000e_update_stats()
4987 hw->mac.tx_packet_delta = er32(TPT); in e1000e_update_stats()
4988 adapter->stats.tpt += hw->mac.tx_packet_delta; in e1000e_update_stats()
4990 adapter->stats.algnerrc += er32(ALGNERRC); in e1000e_update_stats()
4991 adapter->stats.rxerrc += er32(RXERRC); in e1000e_update_stats()
4992 adapter->stats.cexterr += er32(CEXTERR); in e1000e_update_stats()
4993 adapter->stats.tsctc += er32(TSCTC); in e1000e_update_stats()
4994 adapter->stats.tsctfc += er32(TSCTFC); in e1000e_update_stats()
4997 netdev->stats.multicast = adapter->stats.mprc; in e1000e_update_stats()
4998 netdev->stats.collisions = adapter->stats.colc; in e1000e_update_stats()
5005 netdev->stats.rx_errors = adapter->stats.rxerrc + in e1000e_update_stats()
5006 adapter->stats.crcerrs + adapter->stats.algnerrc + in e1000e_update_stats()
5007 adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; in e1000e_update_stats()
5008 netdev->stats.rx_length_errors = adapter->stats.ruc + in e1000e_update_stats()
5009 adapter->stats.roc; in e1000e_update_stats()
5010 netdev->stats.rx_crc_errors = adapter->stats.crcerrs; in e1000e_update_stats()
5011 netdev->stats.rx_frame_errors = adapter->stats.algnerrc; in e1000e_update_stats()
5012 netdev->stats.rx_missed_errors = adapter->stats.mpc; in e1000e_update_stats()
5015 netdev->stats.tx_errors = adapter->stats.ecol + adapter->stats.latecol; in e1000e_update_stats()
5016 netdev->stats.tx_aborted_errors = adapter->stats.ecol; in e1000e_update_stats()
5017 netdev->stats.tx_window_errors = adapter->stats.latecol; in e1000e_update_stats()
5018 netdev->stats.tx_carrier_errors = adapter->stats.tncrs; in e1000e_update_stats()
5023 adapter->stats.mgptc += er32(MGTPTC); in e1000e_update_stats()
5024 adapter->stats.mgprc += er32(MGTPRC); in e1000e_update_stats()
5025 adapter->stats.mgpdc += er32(MGTPDC); in e1000e_update_stats()
5028 if (hw->mac.type >= e1000_pch_lpt) { in e1000e_update_stats()
5031 adapter->corr_errors += in e1000e_update_stats()
5033 adapter->uncorr_errors += in e1000e_update_stats()
5040 * e1000_phy_read_status - Update the PHY register status snapshot
5045 struct e1000_hw *hw = &adapter->hw; in e1000_phy_read_status()
5046 struct e1000_phy_regs *phy = &adapter->phy_regs; in e1000_phy_read_status()
5048 if (!pm_runtime_suspended((&adapter->pdev->dev)->parent) && in e1000_phy_read_status()
5050 (adapter->hw.phy.media_type == e1000_media_type_copper)) { in e1000_phy_read_status()
5053 ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr); in e1000_phy_read_status()
5054 ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr); in e1000_phy_read_status()
5055 ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise); in e1000_phy_read_status()
5056 ret_val |= e1e_rphy(hw, MII_LPA, &phy->lpa); in e1000_phy_read_status()
5057 ret_val |= e1e_rphy(hw, MII_EXPANSION, &phy->expansion); in e1000_phy_read_status()
5058 ret_val |= e1e_rphy(hw, MII_CTRL1000, &phy->ctrl1000); in e1000_phy_read_status()
5059 ret_val |= e1e_rphy(hw, MII_STAT1000, &phy->stat1000); in e1000_phy_read_status()
5060 ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus); in e1000_phy_read_status()
5065 * Set values to typical power-on defaults in e1000_phy_read_status()
5067 phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX); in e1000_phy_read_status()
5068 phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | in e1000_phy_read_status()
5071 phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP | in e1000_phy_read_status()
5073 phy->lpa = 0; in e1000_phy_read_status()
5074 phy->expansion = EXPANSION_ENABLENPAGE; in e1000_phy_read_status()
5075 phy->ctrl1000 = ADVERTISE_1000FULL; in e1000_phy_read_status()
5076 phy->stat1000 = 0; in e1000_phy_read_status()
5077 phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF); in e1000_phy_read_status()
5083 struct e1000_hw *hw = &adapter->hw; in e1000_print_link_info()
5087 netdev_info(adapter->netdev, in e1000_print_link_info()
5089 adapter->link_speed, in e1000_print_link_info()
5090 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half", in e1000_print_link_info()
5098 struct e1000_hw *hw = &adapter->hw; in e1000e_has_link()
5107 switch (hw->phy.media_type) { in e1000e_has_link()
5109 if (hw->mac.get_link_status) { in e1000e_has_link()
5110 ret_val = hw->mac.ops.check_for_link(hw); in e1000e_has_link()
5111 link_active = !hw->mac.get_link_status; in e1000e_has_link()
5117 ret_val = hw->mac.ops.check_for_link(hw); in e1000e_has_link()
5121 ret_val = hw->mac.ops.check_for_link(hw); in e1000e_has_link()
5122 link_active = hw->mac.serdes_has_link; in e1000e_has_link()
5129 if ((ret_val == -E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && in e1000e_has_link()
5141 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) && in e1000e_enable_receives()
5142 (adapter->flags & FLAG_RESTART_NOW)) { in e1000e_enable_receives()
5143 struct e1000_hw *hw = &adapter->hw; in e1000e_enable_receives()
5147 adapter->flags &= ~FLAG_RESTART_NOW; in e1000e_enable_receives()
5153 struct e1000_hw *hw = &adapter->hw; in e1000e_check_82574_phy_workaround()
5159 adapter->phy_hang_count++; in e1000e_check_82574_phy_workaround()
5161 adapter->phy_hang_count = 0; in e1000e_check_82574_phy_workaround()
5163 if (adapter->phy_hang_count > 1) { in e1000e_check_82574_phy_workaround()
5164 adapter->phy_hang_count = 0; in e1000e_check_82574_phy_workaround()
5165 e_dbg("PHY appears hung - resetting\n"); in e1000e_check_82574_phy_workaround()
5166 schedule_work(&adapter->reset_task); in e1000e_check_82574_phy_workaround()
5171 * e1000_watchdog - Timer Call-back
5179 schedule_work(&adapter->watchdog_task); in e1000_watchdog()
5189 struct net_device *netdev = adapter->netdev; in e1000_watchdog_task()
5190 struct e1000_mac_info *mac = &adapter->hw.mac; in e1000_watchdog_task()
5191 struct e1000_phy_info *phy = &adapter->hw.phy; in e1000_watchdog_task()
5192 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_watchdog_task()
5194 struct e1000_hw *hw = &adapter->hw; in e1000_watchdog_task()
5197 if (test_bit(__E1000_DOWN, &adapter->state)) in e1000_watchdog_task()
5203 pm_runtime_resume(netdev->dev.parent); in e1000_watchdog_task()
5210 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)) in e1000_watchdog_task()
5218 pm_runtime_resume(netdev->dev.parent); in e1000_watchdog_task()
5233 e1000_phy_hw_reset(&adapter->hw); in e1000_watchdog_task()
5239 mac->ops.get_link_up_info(&adapter->hw, in e1000_watchdog_task()
5240 &adapter->link_speed, in e1000_watchdog_task()
5241 &adapter->link_duplex); in e1000_watchdog_task()
5246 if (phy->speed_downgraded) in e1000_watchdog_task()
5253 if ((hw->phy.type == e1000_phy_igp_3 || in e1000_watchdog_task()
5254 hw->phy.type == e1000_phy_bm) && in e1000_watchdog_task()
5255 hw->mac.autoneg && in e1000_watchdog_task()
5256 (adapter->link_speed == SPEED_10 || in e1000_watchdog_task()
5257 adapter->link_speed == SPEED_100) && in e1000_watchdog_task()
5258 (adapter->link_duplex == HALF_DUPLEX)) { in e1000_watchdog_task()
5268 adapter->tx_timeout_factor = 1; in e1000_watchdog_task()
5269 switch (adapter->link_speed) { in e1000_watchdog_task()
5272 adapter->tx_timeout_factor = 16; in e1000_watchdog_task()
5276 adapter->tx_timeout_factor = 10; in e1000_watchdog_task()
5280 /* workaround: re-program speed mode bit after in e1000_watchdog_task()
5281 * link-up event in e1000_watchdog_task()
5283 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && in e1000_watchdog_task()
5299 /* Perform any post-link-up configuration before in e1000_watchdog_task()
5302 if (phy->ops.cfg_on_link_up) in e1000_watchdog_task()
5303 phy->ops.cfg_on_link_up(hw); in e1000_watchdog_task()
5308 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_watchdog_task()
5309 mod_timer(&adapter->phy_info_timer, in e1000_watchdog_task()
5314 adapter->link_speed = 0; in e1000_watchdog_task()
5315 adapter->link_duplex = 0; in e1000_watchdog_task()
5320 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_watchdog_task()
5321 mod_timer(&adapter->phy_info_timer, in e1000_watchdog_task()
5324 /* 8000ES2LAN requires a Rx packet buffer work-around in e1000_watchdog_task()
5328 if (adapter->flags & FLAG_RX_NEEDS_RESTART) in e1000_watchdog_task()
5329 adapter->flags |= FLAG_RESTART_NOW; in e1000_watchdog_task()
5331 pm_schedule_suspend(netdev->dev.parent, in e1000_watchdog_task()
5337 spin_lock(&adapter->stats64_lock); in e1000_watchdog_task()
5340 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; in e1000_watchdog_task()
5341 adapter->tpt_old = adapter->stats.tpt; in e1000_watchdog_task()
5342 mac->collision_delta = adapter->stats.colc - adapter->colc_old; in e1000_watchdog_task()
5343 adapter->colc_old = adapter->stats.colc; in e1000_watchdog_task()
5345 adapter->gorc = adapter->stats.gorc - adapter->gorc_old; in e1000_watchdog_task()
5346 adapter->gorc_old = adapter->stats.gorc; in e1000_watchdog_task()
5347 adapter->gotc = adapter->stats.gotc - adapter->gotc_old; in e1000_watchdog_task()
5348 adapter->gotc_old = adapter->stats.gotc; in e1000_watchdog_task()
5349 spin_unlock(&adapter->stats64_lock); in e1000_watchdog_task()
5356 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) in e1000_watchdog_task()
5357 adapter->flags |= FLAG_RESTART_NOW; in e1000_watchdog_task()
5360 if (adapter->flags & FLAG_RESTART_NOW) { in e1000_watchdog_task()
5361 schedule_work(&adapter->reset_task); in e1000_watchdog_task()
5366 e1000e_update_adaptive(&adapter->hw); in e1000_watchdog_task()
5369 if (adapter->itr_setting == 4) { in e1000_watchdog_task()
5372 * everyone else is between 2000-8000. in e1000_watchdog_task()
5374 u32 goc = (adapter->gotc + adapter->gorc) / 10000; in e1000_watchdog_task()
5375 u32 dif = (adapter->gotc > adapter->gorc ? in e1000_watchdog_task()
5376 adapter->gotc - adapter->gorc : in e1000_watchdog_task()
5377 adapter->gorc - adapter->gotc) / 10000; in e1000_watchdog_task()
5384 if (adapter->msix_entries) in e1000_watchdog_task()
5385 ew32(ICS, adapter->rx_ring->ims_val); in e1000_watchdog_task()
5393 adapter->detect_tx_hung = true; in e1000_watchdog_task()
5399 hw->mac.ops.rar_set(hw, adapter->hw.mac.addr, 0); in e1000_watchdog_task()
5401 if (adapter->flags2 & FLAG2_CHECK_PHY_HANG) in e1000_watchdog_task()
5405 if (adapter->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) { in e1000_watchdog_task()
5406 if ((adapter->flags2 & FLAG2_CHECK_RX_HWTSTAMP) && in e1000_watchdog_task()
5409 adapter->rx_hwtstamp_cleared++; in e1000_watchdog_task()
5411 adapter->flags2 |= FLAG2_CHECK_RX_HWTSTAMP; in e1000_watchdog_task()
5416 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_watchdog_task()
5417 mod_timer(&adapter->watchdog_timer, in e1000_watchdog_task()
5449 mss = skb_shinfo(skb)->gso_size; in e1000_tso()
5452 iph->tot_len = 0; in e1000_tso()
5453 iph->check = 0; in e1000_tso()
5454 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, in e1000_tso()
5457 ipcse = skb_transport_offset(skb) - 1; in e1000_tso()
5463 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; in e1000_tso()
5465 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; in e1000_tso()
5468 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); in e1000_tso()
5470 i = tx_ring->next_to_use; in e1000_tso()
5472 buffer_info = &tx_ring->buffer_info[i]; in e1000_tso()
5474 context_desc->lower_setup.ip_fields.ipcss = ipcss; in e1000_tso()
5475 context_desc->lower_setup.ip_fields.ipcso = ipcso; in e1000_tso()
5476 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); in e1000_tso()
5477 context_desc->upper_setup.tcp_fields.tucss = tucss; in e1000_tso()
5478 context_desc->upper_setup.tcp_fields.tucso = tucso; in e1000_tso()
5479 context_desc->upper_setup.tcp_fields.tucse = 0; in e1000_tso()
5480 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); in e1000_tso()
5481 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; in e1000_tso()
5482 context_desc->cmd_and_length = cpu_to_le32(cmd_length); in e1000_tso()
5484 buffer_info->time_stamp = jiffies; in e1000_tso()
5485 buffer_info->next_to_watch = i; in e1000_tso()
5488 if (i == tx_ring->count) in e1000_tso()
5490 tx_ring->next_to_use = i; in e1000_tso()
5498 struct e1000_adapter *adapter = tx_ring->adapter; in e1000_tx_csum()
5505 if (skb->ip_summed != CHECKSUM_PARTIAL) in e1000_tx_csum()
5510 if (ip_hdr(skb)->protocol == IPPROTO_TCP) in e1000_tx_csum()
5515 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) in e1000_tx_csum()
5527 i = tx_ring->next_to_use; in e1000_tx_csum()
5528 buffer_info = &tx_ring->buffer_info[i]; in e1000_tx_csum()
5531 context_desc->lower_setup.ip_config = 0; in e1000_tx_csum()
5532 context_desc->upper_setup.tcp_fields.tucss = css; in e1000_tx_csum()
5533 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset; in e1000_tx_csum()
5534 context_desc->upper_setup.tcp_fields.tucse = 0; in e1000_tx_csum()
5535 context_desc->tcp_seg_setup.data = 0; in e1000_tx_csum()
5536 context_desc->cmd_and_length = cpu_to_le32(cmd_len); in e1000_tx_csum()
5538 buffer_info->time_stamp = jiffies; in e1000_tx_csum()
5539 buffer_info->next_to_watch = i; in e1000_tx_csum()
5542 if (i == tx_ring->count) in e1000_tx_csum()
5544 tx_ring->next_to_use = i; in e1000_tx_csum()
5553 struct e1000_adapter *adapter = tx_ring->adapter; in e1000_tx_map()
5554 struct pci_dev *pdev = adapter->pdev; in e1000_tx_map()
5560 i = tx_ring->next_to_use; in e1000_tx_map()
5563 buffer_info = &tx_ring->buffer_info[i]; in e1000_tx_map()
5566 buffer_info->length = size; in e1000_tx_map()
5567 buffer_info->time_stamp = jiffies; in e1000_tx_map()
5568 buffer_info->next_to_watch = i; in e1000_tx_map()
5569 buffer_info->dma = dma_map_single(&pdev->dev, in e1000_tx_map()
5570 skb->data + offset, in e1000_tx_map()
5572 buffer_info->mapped_as_page = false; in e1000_tx_map()
5573 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) in e1000_tx_map()
5576 len -= size; in e1000_tx_map()
5582 if (i == tx_ring->count) in e1000_tx_map()
5588 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; in e1000_tx_map()
5595 if (i == tx_ring->count) in e1000_tx_map()
5598 buffer_info = &tx_ring->buffer_info[i]; in e1000_tx_map()
5601 buffer_info->length = size; in e1000_tx_map()
5602 buffer_info->time_stamp = jiffies; in e1000_tx_map()
5603 buffer_info->next_to_watch = i; in e1000_tx_map()
5604 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, in e1000_tx_map()
5607 buffer_info->mapped_as_page = true; in e1000_tx_map()
5608 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) in e1000_tx_map()
5611 len -= size; in e1000_tx_map()
5617 segs = skb_shinfo(skb)->gso_segs ? : 1; in e1000_tx_map()
5619 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; in e1000_tx_map()
5621 tx_ring->buffer_info[i].skb = skb; in e1000_tx_map()
5622 tx_ring->buffer_info[i].segs = segs; in e1000_tx_map()
5623 tx_ring->buffer_info[i].bytecount = bytecount; in e1000_tx_map()
5624 tx_ring->buffer_info[first].next_to_watch = i; in e1000_tx_map()
5629 dev_err(&pdev->dev, "Tx DMA map failed\n"); in e1000_tx_map()
5630 buffer_info->dma = 0; in e1000_tx_map()
5632 count--; in e1000_tx_map()
5634 while (count--) { in e1000_tx_map()
5636 i += tx_ring->count; in e1000_tx_map()
5637 i--; in e1000_tx_map()
5638 buffer_info = &tx_ring->buffer_info[i]; in e1000_tx_map()
5647 struct e1000_adapter *adapter = tx_ring->adapter; in e1000_tx_queue()
5680 i = tx_ring->next_to_use; in e1000_tx_queue()
5683 buffer_info = &tx_ring->buffer_info[i]; in e1000_tx_queue()
5685 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); in e1000_tx_queue()
5686 tx_desc->lower.data = cpu_to_le32(txd_lower | in e1000_tx_queue()
5687 buffer_info->length); in e1000_tx_queue()
5688 tx_desc->upper.data = cpu_to_le32(txd_upper); in e1000_tx_queue()
5691 if (i == tx_ring->count) in e1000_tx_queue()
5693 } while (--count > 0); in e1000_tx_queue()
5695 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); in e1000_tx_queue()
5697 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ in e1000_tx_queue()
5699 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); in e1000_tx_queue()
5703 * applicable for weak-ordered memory model archs, in e1000_tx_queue()
5704 * such as IA-64). in e1000_tx_queue()
5708 tx_ring->next_to_use = i; in e1000_tx_queue()
5715 struct e1000_hw *hw = &adapter->hw; in e1000_transfer_dhcp_info()
5719 !((skb_vlan_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && in e1000_transfer_dhcp_info()
5720 (adapter->hw.mng_cookie.status & in e1000_transfer_dhcp_info()
5724 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE) in e1000_transfer_dhcp_info()
5727 if (((struct ethhdr *)skb->data)->h_proto != htons(ETH_P_IP)) in e1000_transfer_dhcp_info()
5731 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data + 14); in e1000_transfer_dhcp_info()
5734 if (ip->protocol != IPPROTO_UDP) in e1000_transfer_dhcp_info()
5737 udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); in e1000_transfer_dhcp_info()
5738 if (ntohs(udp->dest) != 67) in e1000_transfer_dhcp_info()
5741 offset = (u8 *)udp + 8 - skb->data; in e1000_transfer_dhcp_info()
5742 length = skb->len - offset; in e1000_transfer_dhcp_info()
5751 struct e1000_adapter *adapter = tx_ring->adapter; in __e1000_maybe_stop_tx()
5753 netif_stop_queue(adapter->netdev); in __e1000_maybe_stop_tx()
5764 return -EBUSY; in __e1000_maybe_stop_tx()
5767 netif_start_queue(adapter->netdev); in __e1000_maybe_stop_tx()
5768 ++adapter->restart_queue; in __e1000_maybe_stop_tx()
5774 BUG_ON(size > tx_ring->count); in e1000_maybe_stop_tx()
5785 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_xmit_frame()
5796 if (test_bit(__E1000_DOWN, &adapter->state)) { in e1000_xmit_frame()
5801 if (skb->len <= 0) { in e1000_xmit_frame()
5812 mss = skb_shinfo(skb)->gso_size; in e1000_xmit_frame()
5816 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data in e1000_xmit_frame()
5818 * frags into skb->data in e1000_xmit_frame()
5821 /* we do this workaround for ES2LAN, but it is un-necessary, in e1000_xmit_frame()
5824 if (skb->data_len && (hdr_len == len)) { in e1000_xmit_frame()
5827 pull_size = min_t(unsigned int, 4, skb->data_len); in e1000_xmit_frame()
5838 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) in e1000_xmit_frame()
5842 count += DIV_ROUND_UP(len, adapter->tx_fifo_limit); in e1000_xmit_frame()
5844 nr_frags = skb_shinfo(skb)->nr_frags; in e1000_xmit_frame()
5846 count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]), in e1000_xmit_frame()
5847 adapter->tx_fifo_limit); in e1000_xmit_frame()
5849 if (adapter->hw.mac.tx_pkt_filtering) in e1000_xmit_frame()
5864 first = tx_ring->next_to_use; in e1000_xmit_frame()
5884 if (unlikely(skb->no_fcs)) in e1000_xmit_frame()
5888 count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit, in e1000_xmit_frame()
5891 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in e1000_xmit_frame()
5892 (adapter->flags & FLAG_HAS_HW_TIMESTAMP)) { in e1000_xmit_frame()
5893 if (!adapter->tx_hwtstamp_skb) { in e1000_xmit_frame()
5894 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in e1000_xmit_frame()
5896 adapter->tx_hwtstamp_skb = skb_get(skb); in e1000_xmit_frame()
5897 adapter->tx_hwtstamp_start = jiffies; in e1000_xmit_frame()
5898 schedule_work(&adapter->tx_hwtstamp_work); in e1000_xmit_frame()
5900 adapter->tx_hwtstamp_skipped++; in e1000_xmit_frame()
5906 netdev_sent_queue(netdev, skb->len); in e1000_xmit_frame()
5912 adapter->tx_fifo_limit) + 4)); in e1000_xmit_frame()
5916 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in e1000_xmit_frame()
5918 tx_ring->next_to_use); in e1000_xmit_frame()
5920 writel(tx_ring->next_to_use, tx_ring->tail); in e1000_xmit_frame()
5924 tx_ring->buffer_info[first].time_stamp = 0; in e1000_xmit_frame()
5925 tx_ring->next_to_use = first; in e1000_xmit_frame()
5932 * e1000_tx_timeout - Respond to a Tx Hang
5941 adapter->tx_timeout_count++; in e1000_tx_timeout()
5942 schedule_work(&adapter->reset_task); in e1000_tx_timeout()
5952 if (test_bit(__E1000_DOWN, &adapter->state)) { in e1000_reset_task()
5957 if (!(adapter->flags & FLAG_RESTART_NOW)) { in e1000_reset_task()
5966 * e1000e_get_stats64 - Get System Network Statistics
5977 spin_lock(&adapter->stats64_lock); in e1000e_get_stats64()
5980 stats->rx_bytes = adapter->stats.gorc; in e1000e_get_stats64()
5981 stats->rx_packets = adapter->stats.gprc; in e1000e_get_stats64()
5982 stats->tx_bytes = adapter->stats.gotc; in e1000e_get_stats64()
5983 stats->tx_packets = adapter->stats.gptc; in e1000e_get_stats64()
5984 stats->multicast = adapter->stats.mprc; in e1000e_get_stats64()
5985 stats->collisions = adapter->stats.colc; in e1000e_get_stats64()
5992 stats->rx_errors = adapter->stats.rxerrc + in e1000e_get_stats64()
5993 adapter->stats.crcerrs + adapter->stats.algnerrc + in e1000e_get_stats64()
5994 adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; in e1000e_get_stats64()
5995 stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc; in e1000e_get_stats64()
5996 stats->rx_crc_errors = adapter->stats.crcerrs; in e1000e_get_stats64()
5997 stats->rx_frame_errors = adapter->stats.algnerrc; in e1000e_get_stats64()
5998 stats->rx_missed_errors = adapter->stats.mpc; in e1000e_get_stats64()
6001 stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol; in e1000e_get_stats64()
6002 stats->tx_aborted_errors = adapter->stats.ecol; in e1000e_get_stats64()
6003 stats->tx_window_errors = adapter->stats.latecol; in e1000e_get_stats64()
6004 stats->tx_carrier_errors = adapter->stats.tncrs; in e1000e_get_stats64()
6008 spin_unlock(&adapter->stats64_lock); in e1000e_get_stats64()
6012 * e1000_change_mtu - Change the Maximum Transfer Unit
6025 !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { in e1000_change_mtu()
6027 return -EINVAL; in e1000_change_mtu()
6031 if ((adapter->hw.mac.type >= e1000_pch2lan) && in e1000_change_mtu()
6032 !(adapter->flags2 & FLAG2_CRC_STRIPPING) && in e1000_change_mtu()
6035 return -EINVAL; in e1000_change_mtu()
6038 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) in e1000_change_mtu()
6040 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ in e1000_change_mtu()
6041 adapter->max_frame_size = max_frame; in e1000_change_mtu()
6043 netdev->mtu, new_mtu); in e1000_change_mtu()
6044 netdev->mtu = new_mtu; in e1000_change_mtu()
6046 pm_runtime_get_sync(netdev->dev.parent); in e1000_change_mtu()
6054 * i.e. RXBUFFER_2048 --> size-4096 slab in e1000_change_mtu()
6060 adapter->rx_buffer_len = 2048; in e1000_change_mtu()
6062 adapter->rx_buffer_len = 4096; in e1000_change_mtu()
6066 adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; in e1000_change_mtu()
6073 pm_runtime_put_sync(netdev->dev.parent); in e1000_change_mtu()
6075 clear_bit(__E1000_RESETTING, &adapter->state); in e1000_change_mtu()
6086 if (adapter->hw.phy.media_type != e1000_media_type_copper) in e1000_mii_ioctl()
6087 return -EOPNOTSUPP; in e1000_mii_ioctl()
6091 data->phy_id = adapter->hw.phy.addr; in e1000_mii_ioctl()
6096 switch (data->reg_num & 0x1F) { in e1000_mii_ioctl()
6098 data->val_out = adapter->phy_regs.bmcr; in e1000_mii_ioctl()
6101 data->val_out = adapter->phy_regs.bmsr; in e1000_mii_ioctl()
6104 data->val_out = (adapter->hw.phy.id >> 16); in e1000_mii_ioctl()
6107 data->val_out = (adapter->hw.phy.id & 0xFFFF); in e1000_mii_ioctl()
6110 data->val_out = adapter->phy_regs.advertise; in e1000_mii_ioctl()
6113 data->val_out = adapter->phy_regs.lpa; in e1000_mii_ioctl()
6116 data->val_out = adapter->phy_regs.expansion; in e1000_mii_ioctl()
6119 data->val_out = adapter->phy_regs.ctrl1000; in e1000_mii_ioctl()
6122 data->val_out = adapter->phy_regs.stat1000; in e1000_mii_ioctl()
6125 data->val_out = adapter->phy_regs.estatus; in e1000_mii_ioctl()
6128 return -EIO; in e1000_mii_ioctl()
6133 return -EOPNOTSUPP; in e1000_mii_ioctl()
6139 * e1000e_hwtstamp_set - control hardware time stamping
6144 * disable it when requested, although it shouldn't cause any overhead
6160 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) in e1000e_hwtstamp_set()
6161 return -EFAULT; in e1000e_hwtstamp_set()
6185 return copy_to_user(ifr->ifr_data, &config, in e1000e_hwtstamp_set()
6186 sizeof(config)) ? -EFAULT : 0; in e1000e_hwtstamp_set()
6193 return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config, in e1000e_hwtstamp_get()
6194 sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0; in e1000e_hwtstamp_get()
6209 return -EOPNOTSUPP; in e1000_ioctl()
6215 struct e1000_hw *hw = &adapter->hw; in e1000_init_phy_wakeup()
6223 retval = hw->phy.ops.acquire(hw); in e1000_init_phy_wakeup()
6234 /* copy MAC MTA to PHY MTA - only needed for pchlan */ in e1000_init_phy_wakeup()
6235 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) { in e1000_init_phy_wakeup()
6237 hw->phy.ops.write_reg_page(hw, BM_MTA(i), in e1000_init_phy_wakeup()
6239 hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1, in e1000_init_phy_wakeup()
6244 hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg); in e1000_init_phy_wakeup()
6261 hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg); in e1000_init_phy_wakeup()
6273 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc); in e1000_init_phy_wakeup()
6274 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, wuc); in e1000_init_phy_wakeup()
6282 hw->phy.ops.release(hw); in e1000_init_phy_wakeup()
6291 struct e1000_hw *hw = &adapter->hw; in e1000e_flush_lpic()
6294 pm_runtime_get_sync(netdev->dev.parent); in e1000e_flush_lpic()
6296 ret_val = hw->phy.ops.acquire(hw); in e1000e_flush_lpic()
6303 hw->phy.ops.release(hw); in e1000e_flush_lpic()
6306 pm_runtime_put_sync(netdev->dev.parent); in e1000e_flush_lpic()
6312 struct e1000_hw *hw = &adapter->hw; in e1000e_s0ix_entry_flow()
6317 hw->mac.type >= e1000_pch_adp) { in e1000e_s0ix_entry_flow()
6326 /* Disable the periodic inband message, in e1000e_s0ix_entry_flow()
6353 * page769_20[7] - PHY PLL stop in e1000e_s0ix_entry_flow()
6354 * page769_20[8] - PHY go to the electrical idle in e1000e_s0ix_entry_flow()
6355 * page769_20[9] - PHY serdes disable in e1000e_s0ix_entry_flow()
6356 * Gate PPW via EXTCNF_CTRL - set 0x0F00[7] = 1 in e1000e_s0ix_entry_flow()
6374 /* Disable disconnected cable conditioning for Power Gating */ in e1000e_s0ix_entry_flow()
6412 /* Disable the time synchronization clock */ in e1000e_s0ix_entry_flow()
6461 struct e1000_hw *hw = &adapter->hw; in e1000e_s0ix_exit_flow()
6468 hw->mac.type >= e1000_pch_adp) { in e1000e_s0ix_exit_flow()
6504 /* Disable the Dynamic Power Gating in the MAC */ in e1000e_s0ix_exit_flow()
6509 /* Disable mPHY power gating for any link and speed */ in e1000e_s0ix_exit_flow()
6514 /* Disable K1 off */ in e1000e_s0ix_exit_flow()
6519 /* Disable Ungate PGCB clock */ in e1000e_s0ix_exit_flow()
6531 /* Cancel disable disconnected cable conditioning in e1000e_s0ix_exit_flow()
6538 /* Disable the Dynamic Clock Gating in the DMA and MAC */ in e1000e_s0ix_exit_flow()
6577 /* Disable Dynamic Power Gating */ in e1000e_s0ix_exit_flow()
6603 while (test_bit(__E1000_RESETTING, &adapter->state) && count--) in e1000e_pm_freeze()
6606 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); in e1000e_pm_freeze()
6617 e1000e_disable_pcie_master(&adapter->hw); in e1000e_pm_freeze()
6626 struct e1000_hw *hw = &adapter->hw; in __e1000_shutdown()
6633 else if (device_may_wakeup(&pdev->dev)) in __e1000_shutdown()
6634 wufc = adapter->wol; in __e1000_shutdown()
6646 /* turn on all-multi mode if wake on multicast is enabled */ in __e1000_shutdown()
6655 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)) in __e1000_shutdown()
6659 if (adapter->hw.phy.media_type == e1000_media_type_fiber || in __e1000_shutdown()
6660 adapter->hw.phy.media_type == in __e1000_shutdown()
6671 if (adapter->flags & FLAG_IS_ICH) in __e1000_shutdown()
6672 e1000_suspend_workarounds_ich8lan(&adapter->hw); in __e1000_shutdown()
6674 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { in __e1000_shutdown()
6691 if (adapter->hw.phy.type == e1000_phy_igp_3) { in __e1000_shutdown()
6692 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); in __e1000_shutdown()
6693 } else if (hw->mac.type >= e1000_pch_lpt) { in __e1000_shutdown()
6707 if ((hw->phy.type >= e1000_phy_i217) && in __e1000_shutdown()
6708 adapter->eee_advert && hw->dev_spec.ich8lan.eee_lp_ability) { in __e1000_shutdown()
6711 retval = hw->phy.ops.acquire(hw); in __e1000_shutdown()
6716 if (adapter->eee_advert & in __e1000_shutdown()
6717 hw->dev_spec.ich8lan.eee_lp_ability & in __e1000_shutdown()
6720 if (adapter->eee_advert & in __e1000_shutdown()
6721 hw->dev_spec.ich8lan.eee_lp_ability & in __e1000_shutdown()
6729 hw->phy.ops.release(hw); in __e1000_shutdown()
6739 /* The pci-e switch on some quad port adapters will report a in __e1000_shutdown()
6742 * downstream port of the pci-e switch. in __e1000_shutdown()
6748 if (adapter->flags & FLAG_IS_QUAD_PORT) { in __e1000_shutdown()
6749 struct pci_dev *us_dev = pdev->bus->self; in __e1000_shutdown()
6769 * __e1000e_disable_aspm - Disable ASPM states
6771 * @state: bit-mask of ASPM states to disable
6778 struct pci_dev *parent = pdev->bus->self; in __e1000e_disable_aspm()
6808 dev_info(&pdev->dev, "Disabling ASPM %s %s\n", in __e1000e_disable_aspm()
6820 /* Double-check ASPM control. If not disabled by the above, the in __e1000e_disable_aspm()
6832 * Disable ASPM in downstream component first and then upstream. in __e1000e_disable_aspm()
6842 * e1000e_disable_aspm - Disable ASPM states.
6844 * @state: bit-mask of ASPM states to disable
6855 * e1000e_disable_aspm_locked - Disable ASPM states.
6857 * @state: bit-mask of ASPM states to disable
6895 struct e1000_hw *hw = &adapter->hw; in __e1000_resume()
6898 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) in __e1000_resume()
6900 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) in __e1000_resume()
6907 if (hw->mac.type >= e1000_pch2lan) in __e1000_resume()
6908 e1000_resume_workarounds_pchlan(&adapter->hw); in __e1000_resume()
6913 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { in __e1000_resume()
6916 e1e_rphy(&adapter->hw, BM_WUS, &phy_data); in __e1000_resume()
6918 e_info("PHY Wakeup cause - %s\n", in __e1000_resume()
6926 e1e_wphy(&adapter->hw, BM_WUS, ~0); in __e1000_resume()
6931 e_info("MAC Wakeup cause - %s\n", in __e1000_resume()
6950 if (!(adapter->flags & FLAG_HAS_AMT)) in __e1000_resume()
6978 if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS) in e1000e_pm_suspend()
6993 if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS) in e1000e_pm_resume()
7009 eee_lp = adapter->hw.dev_spec.ich8lan.eee_lp_ability; in e1000e_pm_runtime_idle()
7012 adapter->hw.dev_spec.ich8lan.eee_lp_ability = eee_lp; in e1000e_pm_runtime_idle()
7016 return -EBUSY; in e1000e_pm_runtime_idle()
7026 pdev->pme_poll = true; in e1000e_pm_runtime_resume()
7032 if (netdev->flags & IFF_UP) in e1000e_pm_runtime_resume()
7044 if (netdev->flags & IFF_UP) { in e1000e_pm_runtime_suspend()
7047 while (test_bit(__E1000_RESETTING, &adapter->state) && count--) in e1000e_pm_runtime_suspend()
7050 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); in e1000e_pm_runtime_suspend()
7058 return -EBUSY; in e1000e_pm_runtime_suspend()
7068 e1000e_pm_freeze(&pdev->dev); in e1000_shutdown()
7080 if (adapter->msix_entries) { in e1000_intr_msix()
7084 msix_irq = adapter->msix_entries[vector].vector; in e1000_intr_msix()
7090 msix_irq = adapter->msix_entries[vector].vector; in e1000_intr_msix()
7096 msix_irq = adapter->msix_entries[vector].vector; in e1000_intr_msix()
7109 * Polling 'interrupt' - used by things like netconsole to send skbs
7110 * without having to re-enable interrupts. It's not called while
7117 switch (adapter->int_mode) { in e1000_netpoll()
7119 e1000_intr_msix(adapter->pdev->irq, netdev); in e1000_netpoll()
7122 if (disable_hardirq(adapter->pdev->irq)) in e1000_netpoll()
7123 e1000_intr_msi(adapter->pdev->irq, netdev); in e1000_netpoll()
7124 enable_irq(adapter->pdev->irq); in e1000_netpoll()
7127 if (disable_hardirq(adapter->pdev->irq)) in e1000_netpoll()
7128 e1000_intr(adapter->pdev->irq, netdev); in e1000_netpoll()
7129 enable_irq(adapter->pdev->irq); in e1000_netpoll()
7136 * e1000_io_error_detected - called when PCI error is detected
7146 e1000e_pm_freeze(&pdev->dev); in e1000_io_error_detected()
7158 * e1000_io_slot_reset - called after the pci bus has been reset.
7161 * Restart the card from scratch, as if from a cold-boot. Implementation
7162 * resembles the first-half of the e1000e_pm_resume routine.
7168 struct e1000_hw *hw = &adapter->hw; in e1000_io_slot_reset()
7173 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) in e1000_io_slot_reset()
7175 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) in e1000_io_slot_reset()
7182 dev_err(&pdev->dev, in e1000_io_slot_reset()
7183 "Cannot re-enable PCI device after reset.\n"); in e1000_io_slot_reset()
7186 pdev->state_saved = true; in e1000_io_slot_reset()
7202 * e1000_io_resume - called when traffic can start flowing again.
7207 * second-half of the e1000e_pm_resume routine.
7216 e1000e_pm_thaw(&pdev->dev); in e1000_io_resume()
7222 if (!(adapter->flags & FLAG_HAS_AMT)) in e1000_io_resume()
7228 struct e1000_hw *hw = &adapter->hw; in e1000_print_device_info()
7229 struct net_device *netdev = adapter->netdev; in e1000_print_device_info()
7236 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : in e1000_print_device_info()
7239 netdev->dev_addr); in e1000_print_device_info()
7241 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000"); in e1000_print_device_info()
7247 hw->mac.type, hw->phy.type, pba_str); in e1000_print_device_info()
7252 struct e1000_hw *hw = &adapter->hw; in e1000_eeprom_checks()
7256 if (hw->mac.type != e1000_82573) in e1000_eeprom_checks()
7263 dev_warn(&adapter->pdev->dev, in e1000_eeprom_checks()
7272 struct e1000_hw *hw = &adapter->hw; in e1000_fix_features()
7275 if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN)) in e1000_fix_features()
7279 * enable/disable make sure Tx flag is always in same state as Rx. in e1000_fix_features()
7293 netdev_features_t changed = features ^ netdev->features; in e1000_set_features()
7296 adapter->flags |= FLAG_TSO_FORCE; in e1000_set_features()
7305 adapter->flags2 &= ~FLAG2_CRC_STRIPPING; in e1000_set_features()
7310 if (adapter->flags2 & FLAG2_DFLT_CRC_STRIPPING) in e1000_set_features()
7311 adapter->flags2 |= FLAG2_CRC_STRIPPING; in e1000_set_features()
7313 adapter->flags2 &= ~FLAG2_CRC_STRIPPING; in e1000_set_features()
7317 netdev->features = features; in e1000_set_features()
7350 * e1000_probe - Device Initialization Routine
7365 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; in e1000_probe()
7375 if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S) in e1000_probe()
7377 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1) in e1000_probe()
7386 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in e1000_probe()
7388 dev_err(&pdev->dev, in e1000_probe()
7405 err = -ENOMEM; in e1000_probe()
7410 SET_NETDEV_DEV(netdev, &pdev->dev); in e1000_probe()
7412 netdev->irq = pdev->irq; in e1000_probe()
7416 hw = &adapter->hw; in e1000_probe()
7417 adapter->netdev = netdev; in e1000_probe()
7418 adapter->pdev = pdev; in e1000_probe()
7419 adapter->ei = ei; in e1000_probe()
7420 adapter->pba = ei->pba; in e1000_probe()
7421 adapter->flags = ei->flags; in e1000_probe()
7422 adapter->flags2 = ei->flags2; in e1000_probe()
7423 adapter->hw.adapter = adapter; in e1000_probe()
7424 adapter->hw.mac.type = ei->mac; in e1000_probe()
7425 adapter->max_hw_frame_size = ei->max_hw_frame_size; in e1000_probe()
7426 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); in e1000_probe()
7431 err = -EIO; in e1000_probe()
7432 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); in e1000_probe()
7433 if (!adapter->hw.hw_addr) in e1000_probe()
7436 if ((adapter->flags & FLAG_HAS_FLASH) && in e1000_probe()
7438 (hw->mac.type < e1000_pch_spt)) { in e1000_probe()
7441 adapter->hw.flash_address = ioremap(flash_start, flash_len); in e1000_probe()
7442 if (!adapter->hw.flash_address) in e1000_probe()
7447 if (adapter->flags2 & FLAG2_HAS_EEE) in e1000_probe()
7448 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; in e1000_probe()
7451 netdev->netdev_ops = &e1000e_netdev_ops; in e1000_probe()
7453 netdev->watchdog_timeo = 5 * HZ; in e1000_probe()
7454 netif_napi_add(netdev, &adapter->napi, e1000e_poll); in e1000_probe()
7455 strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); in e1000_probe()
7457 netdev->mem_start = mmio_start; in e1000_probe()
7458 netdev->mem_end = mmio_start + mmio_len; in e1000_probe()
7460 adapter->bd_number = cards_found++; in e1000_probe()
7469 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); in e1000_probe()
7470 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); in e1000_probe()
7471 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); in e1000_probe()
7473 err = ei->get_variants(adapter); in e1000_probe()
7477 if ((adapter->flags & FLAG_IS_ICH) && in e1000_probe()
7478 (adapter->flags & FLAG_READ_ONLY_NVM) && in e1000_probe()
7479 (hw->mac.type < e1000_pch_spt)) in e1000_probe()
7480 e1000e_write_protect_nvm_ich8lan(&adapter->hw); in e1000_probe()
7482 hw->mac.ops.get_bus_info(&adapter->hw); in e1000_probe()
7484 adapter->hw.phy.autoneg_wait_to_complete = 0; in e1000_probe()
7487 if (adapter->hw.phy.media_type == e1000_media_type_copper) { in e1000_probe()
7488 adapter->hw.phy.mdix = AUTO_ALL_MODES; in e1000_probe()
7489 adapter->hw.phy.disable_polarity_correction = 0; in e1000_probe()
7490 adapter->hw.phy.ms_type = e1000_ms_hw_default; in e1000_probe()
7493 if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) in e1000_probe()
7494 dev_info(&pdev->dev, in e1000_probe()
7498 netdev->features = (NETIF_F_SG | in e1000_probe()
7507 /* disable TSO for pcie and 10/100 speeds to avoid in e1000_probe()
7511 if (!(adapter->flags & FLAG_TSO_FORCE)) { in e1000_probe()
7512 switch (adapter->link_speed) { in e1000_probe()
7516 netdev->features &= ~NETIF_F_TSO; in e1000_probe()
7517 netdev->features &= ~NETIF_F_TSO6; in e1000_probe()
7520 netdev->features |= NETIF_F_TSO; in e1000_probe()
7521 netdev->features |= NETIF_F_TSO6; in e1000_probe()
7527 if (hw->mac.type == e1000_pch_spt) { in e1000_probe()
7528 netdev->features &= ~NETIF_F_TSO; in e1000_probe()
7529 netdev->features &= ~NETIF_F_TSO6; in e1000_probe()
7533 /* Set user-changeable features (subset of all device features) */ in e1000_probe()
7534 netdev->hw_features = netdev->features; in e1000_probe()
7535 netdev->hw_features |= NETIF_F_RXFCS; in e1000_probe()
7536 netdev->priv_flags |= IFF_SUPP_NOFCS; in e1000_probe()
7537 netdev->hw_features |= NETIF_F_RXALL; in e1000_probe()
7539 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) in e1000_probe()
7540 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; in e1000_probe()
7542 netdev->vlan_features |= (NETIF_F_SG | in e1000_probe()
7547 netdev->priv_flags |= IFF_UNICAST_FLT; in e1000_probe()
7549 netdev->features |= NETIF_F_HIGHDMA; in e1000_probe()
7550 netdev->vlan_features |= NETIF_F_HIGHDMA; in e1000_probe()
7552 /* MTU range: 68 - max_hw_frame_size */ in e1000_probe()
7553 netdev->min_mtu = ETH_MIN_MTU; in e1000_probe()
7554 netdev->max_mtu = adapter->max_hw_frame_size - in e1000_probe()
7557 if (e1000e_enable_mng_pass_thru(&adapter->hw)) in e1000_probe()
7558 adapter->flags |= FLAG_MNG_PT_ENABLED; in e1000_probe()
7563 adapter->hw.mac.ops.reset_hw(&adapter->hw); in e1000_probe()
7569 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) in e1000_probe()
7572 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); in e1000_probe()
7573 err = -EIO; in e1000_probe()
7581 if (e1000e_read_mac_addr(&adapter->hw)) in e1000_probe()
7582 dev_err(&pdev->dev, in e1000_probe()
7585 eth_hw_addr_set(netdev, adapter->hw.mac.addr); in e1000_probe()
7587 if (!is_valid_ether_addr(netdev->dev_addr)) { in e1000_probe()
7588 dev_err(&pdev->dev, "Invalid MAC Address: %pM\n", in e1000_probe()
7589 netdev->dev_addr); in e1000_probe()
7590 err = -EIO; in e1000_probe()
7594 timer_setup(&adapter->watchdog_timer, e1000_watchdog, 0); in e1000_probe()
7595 timer_setup(&adapter->phy_info_timer, e1000_update_phy_info, 0); in e1000_probe()
7597 INIT_WORK(&adapter->reset_task, e1000_reset_task); in e1000_probe()
7598 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); in e1000_probe()
7599 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); in e1000_probe()
7600 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); in e1000_probe()
7601 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); in e1000_probe()
7604 adapter->hw.mac.autoneg = 1; in e1000_probe()
7605 adapter->fc_autoneg = true; in e1000_probe()
7606 adapter->hw.fc.requested_mode = e1000_fc_default; in e1000_probe()
7607 adapter->hw.fc.current_mode = e1000_fc_default; in e1000_probe()
7608 adapter->hw.phy.autoneg_advertised = 0x2f; in e1000_probe()
7610 /* Initial Wake on LAN setting - If APM wake is enabled in in e1000_probe()
7613 if (adapter->flags & FLAG_APME_IN_WUC) { in e1000_probe()
7617 if ((hw->mac.type > e1000_ich10lan) && in e1000_probe()
7619 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; in e1000_probe()
7620 } else if (adapter->flags & FLAG_APME_IN_CTRL3) { in e1000_probe()
7621 if (adapter->flags & FLAG_APME_CHECK_PORT_B && in e1000_probe()
7622 (adapter->hw.bus.func == 1)) in e1000_probe()
7623 ret_val = e1000_read_nvm(&adapter->hw, in e1000_probe()
7627 ret_val = e1000_read_nvm(&adapter->hw, in e1000_probe()
7636 adapter->eeprom_wol |= E1000_WUFC_MAG; in e1000_probe()
7642 if (!(adapter->flags & FLAG_HAS_WOL)) in e1000_probe()
7643 adapter->eeprom_wol = 0; in e1000_probe()
7646 adapter->wol = adapter->eeprom_wol; in e1000_probe()
7649 if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) || in e1000_probe()
7650 (hw->mac.ops.check_mng_mode(hw))) in e1000_probe()
7651 device_wakeup_enable(&pdev->dev); in e1000_probe()
7654 ret_val = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); in e1000_probe()
7658 adapter->eeprom_vers = 0; in e1000_probe()
7671 if (!(adapter->flags & FLAG_HAS_AMT)) in e1000_probe()
7674 if (hw->mac.type >= e1000_pch_cnp) in e1000_probe()
7675 adapter->flags2 |= FLAG2_ENABLE_S0IX_FLOWS; in e1000_probe()
7677 strscpy(netdev->name, "eth%d", sizeof(netdev->name)); in e1000_probe()
7687 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_SMART_PREPARE); in e1000_probe()
7690 pm_runtime_put_noidle(&pdev->dev); in e1000_probe()
7695 if (!(adapter->flags & FLAG_HAS_AMT)) in e1000_probe()
7698 if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw)) in e1000_probe()
7699 e1000_phy_hw_reset(&adapter->hw); in e1000_probe()
7701 kfree(adapter->tx_ring); in e1000_probe()
7702 kfree(adapter->rx_ring); in e1000_probe()
7704 if ((adapter->hw.flash_address) && (hw->mac.type < e1000_pch_spt)) in e1000_probe()
7705 iounmap(adapter->hw.flash_address); in e1000_probe()
7708 iounmap(adapter->hw.hw_addr); in e1000_probe()
7720 * e1000_remove - Device Removal Routine
7725 * Hot-Plug event, or because the driver is going to be removed from
7735 /* The timers may be rescheduled, so explicitly disable them in e1000_remove()
7738 set_bit(__E1000_DOWN, &adapter->state); in e1000_remove()
7739 del_timer_sync(&adapter->watchdog_timer); in e1000_remove()
7740 del_timer_sync(&adapter->phy_info_timer); in e1000_remove()
7742 cancel_work_sync(&adapter->reset_task); in e1000_remove()
7743 cancel_work_sync(&adapter->watchdog_task); in e1000_remove()
7744 cancel_work_sync(&adapter->downshift_task); in e1000_remove()
7745 cancel_work_sync(&adapter->update_phy_task); in e1000_remove()
7746 cancel_work_sync(&adapter->print_hang_task); in e1000_remove()
7748 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { in e1000_remove()
7749 cancel_work_sync(&adapter->tx_hwtstamp_work); in e1000_remove()
7750 if (adapter->tx_hwtstamp_skb) { in e1000_remove()
7751 dev_consume_skb_any(adapter->tx_hwtstamp_skb); in e1000_remove()
7752 adapter->tx_hwtstamp_skb = NULL; in e1000_remove()
7759 pm_runtime_get_noresume(&pdev->dev); in e1000_remove()
7767 kfree(adapter->tx_ring); in e1000_remove()
7768 kfree(adapter->rx_ring); in e1000_remove()
7770 iounmap(adapter->hw.hw_addr); in e1000_remove()
7771 if ((adapter->hw.flash_address) && in e1000_remove()
7772 (adapter->hw.mac.type < e1000_pch_spt)) in e1000_remove()
7773 iounmap(adapter->hw.flash_address); in e1000_remove()
7953 * e1000_init_module - Driver Registration Routine
7961 pr_info("Copyright(c) 1999 - 2015 Intel Corporation.\n"); in e1000_init_module()
7968 * e1000_exit_module - Driver Exit Cleanup Routine