Lines Matching +full:latch +full:- +full:ck

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
35 static int debug = -1;
109 * __ew32_prepare - prepare to write to MAC CSR register on certain parts
124 while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i) in __ew32_prepare()
130 if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in __ew32()
133 writel(val, hw->hw_addr + reg); in __ew32()
137 * e1000_regdump - register printout routine
147 switch (reginfo->ofs) { in e1000_regdump()
161 pr_info("%-15s %08x\n", in e1000_regdump()
162 reginfo->name, __er32(hw, reginfo->ofs)); in e1000_regdump()
166 snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]"); in e1000_regdump()
167 pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]); in e1000_regdump()
176 for (i = 0; i < adapter->rx_ps_pages; i++) { in e1000e_dump_ps_pages()
177 ps_page = &bi->ps_pages[i]; in e1000e_dump_ps_pages()
179 if (ps_page->page) { in e1000e_dump_ps_pages()
182 16, 1, page_address(ps_page->page), in e1000e_dump_ps_pages()
189 * e1000e_dump - Print registers, Tx-ring and Rx-ring
194 struct net_device *netdev = adapter->netdev; in e1000e_dump()
195 struct e1000_hw *hw = &adapter->hw; in e1000e_dump()
197 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000e_dump()
204 struct e1000_ring *rx_ring = adapter->rx_ring; in e1000e_dump()
221 dev_info(&adapter->pdev->dev, "Net device Info\n"); in e1000e_dump()
223 pr_info("%-15s %016lX %016lX\n", netdev->name, in e1000e_dump()
224 netdev->state, dev_trans_start(netdev)); in e1000e_dump()
228 dev_info(&adapter->pdev->dev, "Register Dump\n"); in e1000e_dump()
231 reginfo->name; reginfo++) { in e1000e_dump()
239 dev_info(&adapter->pdev->dev, "Tx Ring Summary\n"); in e1000e_dump()
240 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); in e1000e_dump()
241 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; in e1000e_dump()
243 0, tx_ring->next_to_use, tx_ring->next_to_clean, in e1000e_dump()
244 (unsigned long long)buffer_info->dma, in e1000e_dump()
245 buffer_info->length, in e1000e_dump()
246 buffer_info->next_to_watch, in e1000e_dump()
247 (unsigned long long)buffer_info->time_stamp); in e1000e_dump()
253 dev_info(&adapter->pdev->dev, "Tx Ring Dump\n"); in e1000e_dump()
255 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) in e1000e_dump()
258 * +--------------------------------------------------------------+ in e1000e_dump()
260 * +--------------------------------------------------------------+ in e1000e_dump()
262 * +--------------------------------------------------------------+ in e1000e_dump()
267 * +----------------------------------------------------------------+ in e1000e_dump()
269 * +----------------------------------------------------------------+ in e1000e_dump()
271 * +----------------------------------------------------------------+ in e1000e_dump()
275 * +----------------------------------------------------------------+ in e1000e_dump()
277 * +----------------------------------------------------------------+ in e1000e_dump()
279 * +----------------------------------------------------------------+ in e1000e_dump()
282 …Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb… in e1000e_dump()
283 …Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb… in e1000e_dump()
284 …Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb… in e1000e_dump()
285 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { in e1000e_dump()
288 buffer_info = &tx_ring->buffer_info[i]; in e1000e_dump()
290 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) in e1000e_dump()
292 else if (i == tx_ring->next_to_use) in e1000e_dump()
294 else if (i == tx_ring->next_to_clean) in e1000e_dump()
299 (!(le64_to_cpu(u0->b) & BIT(29)) ? 'l' : in e1000e_dump()
300 ((le64_to_cpu(u0->b) & BIT(20)) ? 'd' : 'c')), in e1000e_dump()
302 (unsigned long long)le64_to_cpu(u0->a), in e1000e_dump()
303 (unsigned long long)le64_to_cpu(u0->b), in e1000e_dump()
304 (unsigned long long)buffer_info->dma, in e1000e_dump()
305 buffer_info->length, buffer_info->next_to_watch, in e1000e_dump()
306 (unsigned long long)buffer_info->time_stamp, in e1000e_dump()
307 buffer_info->skb, next_desc); in e1000e_dump()
309 if (netif_msg_pktdata(adapter) && buffer_info->skb) in e1000e_dump()
311 16, 1, buffer_info->skb->data, in e1000e_dump()
312 buffer_info->skb->len, true); in e1000e_dump()
317 dev_info(&adapter->pdev->dev, "Rx Ring Summary\n"); in e1000e_dump()
320 0, rx_ring->next_to_use, rx_ring->next_to_clean); in e1000e_dump()
326 dev_info(&adapter->pdev->dev, "Rx Ring Dump\n"); in e1000e_dump()
327 switch (adapter->rx_ps_pages) { in e1000e_dump()
333 * +-----------------------------------------------------+ in e1000e_dump()
335 * +-----------------------------------------------------+ in e1000e_dump()
337 * +-----------------------------------------------------+ in e1000e_dump()
339 * +-----------------------------------------------------+ in e1000e_dump()
341 * +-----------------------------------------------------+ in e1000e_dump()
343 …0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt… in e1000e_dump()
344 /* [Extended] Receive Descriptor (Write-Back) Format in e1000e_dump()
347 * +------------------------------------------------------+ in e1000e_dump()
350 * +------------------------------------------------------+ in e1000e_dump()
352 * +------------------------------------------------------+ in e1000e_dump()
355 …nfo("RWB[desc] [ck ipid mrqhsh] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ----------… in e1000e_dump()
356 for (i = 0; i < rx_ring->count; i++) { in e1000e_dump()
358 buffer_info = &rx_ring->buffer_info[i]; in e1000e_dump()
362 le32_to_cpu(rx_desc_ps->wb.middle.status_error); in e1000e_dump()
364 if (i == rx_ring->next_to_use) in e1000e_dump()
366 else if (i == rx_ring->next_to_clean) in e1000e_dump()
373 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n", in e1000e_dump()
375 (unsigned long long)le64_to_cpu(u1->a), in e1000e_dump()
376 (unsigned long long)le64_to_cpu(u1->b), in e1000e_dump()
377 (unsigned long long)le64_to_cpu(u1->c), in e1000e_dump()
378 (unsigned long long)le64_to_cpu(u1->d), in e1000e_dump()
379 buffer_info->skb, next_desc); in e1000e_dump()
383 (unsigned long long)le64_to_cpu(u1->a), in e1000e_dump()
384 (unsigned long long)le64_to_cpu(u1->b), in e1000e_dump()
385 (unsigned long long)le64_to_cpu(u1->c), in e1000e_dump()
386 (unsigned long long)le64_to_cpu(u1->d), in e1000e_dump()
387 (unsigned long long)buffer_info->dma, in e1000e_dump()
388 buffer_info->skb, next_desc); in e1000e_dump()
400 * +-----------------------------------------------------+ in e1000e_dump()
402 * +-----------------------------------------------------+ in e1000e_dump()
404 * +-----------------------------------------------------+ in e1000e_dump()
406 …pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read… in e1000e_dump()
407 /* Extended Receive Descriptor (Write-Back) Format in e1000e_dump()
410 * +------------------------------------------------------+ in e1000e_dump()
412 * 0 +-------------------+ Rsvd | Reserved | MRQ RSS | in e1000e_dump()
415 * +------------------------------------------------------+ in e1000e_dump()
417 * +------------------------------------------------------+ in e1000e_dump()
420 …pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n"… in e1000e_dump()
422 for (i = 0; i < rx_ring->count; i++) { in e1000e_dump()
425 buffer_info = &rx_ring->buffer_info[i]; in e1000e_dump()
428 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); in e1000e_dump()
430 if (i == rx_ring->next_to_use) in e1000e_dump()
432 else if (i == rx_ring->next_to_clean) in e1000e_dump()
439 pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n", in e1000e_dump()
441 (unsigned long long)le64_to_cpu(u1->a), in e1000e_dump()
442 (unsigned long long)le64_to_cpu(u1->b), in e1000e_dump()
443 buffer_info->skb, next_desc); in e1000e_dump()
447 (unsigned long long)le64_to_cpu(u1->a), in e1000e_dump()
448 (unsigned long long)le64_to_cpu(u1->b), in e1000e_dump()
449 (unsigned long long)buffer_info->dma, in e1000e_dump()
450 buffer_info->skb, next_desc); in e1000e_dump()
453 buffer_info->skb) in e1000e_dump()
457 buffer_info->skb->data, in e1000e_dump()
458 adapter->rx_buffer_len, in e1000e_dump()
466 * e1000_desc_unused - calculate if we have unused descriptors
471 if (ring->next_to_clean > ring->next_to_use) in e1000_desc_unused()
472 return ring->next_to_clean - ring->next_to_use - 1; in e1000_desc_unused()
474 return ring->count + ring->next_to_clean - ring->next_to_use - 1; in e1000_desc_unused()
478 * e1000e_systim_to_hwtstamp - convert system time value to hw time stamp
498 spin_lock_irqsave(&adapter->systim_lock, flags); in e1000e_systim_to_hwtstamp()
499 ns = timecounter_cyc2time(&adapter->tc, systim); in e1000e_systim_to_hwtstamp()
500 spin_unlock_irqrestore(&adapter->systim_lock, flags); in e1000e_systim_to_hwtstamp()
503 hwtstamps->hwtstamp = ns_to_ktime(ns); in e1000e_systim_to_hwtstamp()
507 * e1000e_rx_hwtstamp - utility function which checks for Rx time stamp
519 struct e1000_hw *hw = &adapter->hw; in e1000e_rx_hwtstamp()
522 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP) || in e1000e_rx_hwtstamp()
538 adapter->flags2 &= ~FLAG2_CHECK_RX_HWTSTAMP; in e1000e_rx_hwtstamp()
542 * e1000_receive_skb - helper function to handle Rx indications
557 skb->protocol = eth_type_trans(skb, netdev); in e1000_receive_skb()
562 napi_gro_receive(&adapter->napi, skb); in e1000_receive_skb()
566 * e1000_rx_checksum - Receive Checksum Offload
580 if (!(adapter->netdev->features & NETIF_F_RXCSUM)) in e1000_rx_checksum()
590 adapter->hw_csum_err++; in e1000_rx_checksum()
599 skb->ip_summed = CHECKSUM_UNNECESSARY; in e1000_rx_checksum()
600 adapter->hw_csum_good++; in e1000_rx_checksum()
605 struct e1000_adapter *adapter = rx_ring->adapter; in e1000e_update_rdt_wa()
606 struct e1000_hw *hw = &adapter->hw; in e1000e_update_rdt_wa()
609 writel(i, rx_ring->tail); in e1000e_update_rdt_wa()
611 if (unlikely(i != readl(rx_ring->tail))) { in e1000e_update_rdt_wa()
615 e_err("ME firmware caused invalid RDT - resetting\n"); in e1000e_update_rdt_wa()
616 schedule_work(&adapter->reset_task); in e1000e_update_rdt_wa()
622 struct e1000_adapter *adapter = tx_ring->adapter; in e1000e_update_tdt_wa()
623 struct e1000_hw *hw = &adapter->hw; in e1000e_update_tdt_wa()
626 writel(i, tx_ring->tail); in e1000e_update_tdt_wa()
628 if (unlikely(i != readl(tx_ring->tail))) { in e1000e_update_tdt_wa()
632 e_err("ME firmware caused invalid TDT - resetting\n"); in e1000e_update_tdt_wa()
633 schedule_work(&adapter->reset_task); in e1000e_update_tdt_wa()
638 * e1000_alloc_rx_buffers - Replace used receive buffers
646 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_alloc_rx_buffers()
647 struct net_device *netdev = adapter->netdev; in e1000_alloc_rx_buffers()
648 struct pci_dev *pdev = adapter->pdev; in e1000_alloc_rx_buffers()
653 unsigned int bufsz = adapter->rx_buffer_len; in e1000_alloc_rx_buffers()
655 i = rx_ring->next_to_use; in e1000_alloc_rx_buffers()
656 buffer_info = &rx_ring->buffer_info[i]; in e1000_alloc_rx_buffers()
658 while (cleaned_count--) { in e1000_alloc_rx_buffers()
659 skb = buffer_info->skb; in e1000_alloc_rx_buffers()
668 adapter->alloc_rx_buff_failed++; in e1000_alloc_rx_buffers()
672 buffer_info->skb = skb; in e1000_alloc_rx_buffers()
674 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, in e1000_alloc_rx_buffers()
675 adapter->rx_buffer_len, in e1000_alloc_rx_buffers()
677 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { in e1000_alloc_rx_buffers()
678 dev_err(&pdev->dev, "Rx DMA map failed\n"); in e1000_alloc_rx_buffers()
679 adapter->rx_dma_failed++; in e1000_alloc_rx_buffers()
684 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); in e1000_alloc_rx_buffers()
686 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { in e1000_alloc_rx_buffers()
689 * applicable for weak-ordered memory model archs, in e1000_alloc_rx_buffers()
690 * such as IA-64). in e1000_alloc_rx_buffers()
693 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in e1000_alloc_rx_buffers()
696 writel(i, rx_ring->tail); in e1000_alloc_rx_buffers()
699 if (i == rx_ring->count) in e1000_alloc_rx_buffers()
701 buffer_info = &rx_ring->buffer_info[i]; in e1000_alloc_rx_buffers()
704 rx_ring->next_to_use = i; in e1000_alloc_rx_buffers()
708 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
716 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_alloc_rx_buffers_ps()
717 struct net_device *netdev = adapter->netdev; in e1000_alloc_rx_buffers_ps()
718 struct pci_dev *pdev = adapter->pdev; in e1000_alloc_rx_buffers_ps()
725 i = rx_ring->next_to_use; in e1000_alloc_rx_buffers_ps()
726 buffer_info = &rx_ring->buffer_info[i]; in e1000_alloc_rx_buffers_ps()
728 while (cleaned_count--) { in e1000_alloc_rx_buffers_ps()
732 ps_page = &buffer_info->ps_pages[j]; in e1000_alloc_rx_buffers_ps()
733 if (j >= adapter->rx_ps_pages) { in e1000_alloc_rx_buffers_ps()
735 rx_desc->read.buffer_addr[j + 1] = in e1000_alloc_rx_buffers_ps()
739 if (!ps_page->page) { in e1000_alloc_rx_buffers_ps()
740 ps_page->page = alloc_page(gfp); in e1000_alloc_rx_buffers_ps()
741 if (!ps_page->page) { in e1000_alloc_rx_buffers_ps()
742 adapter->alloc_rx_buff_failed++; in e1000_alloc_rx_buffers_ps()
745 ps_page->dma = dma_map_page(&pdev->dev, in e1000_alloc_rx_buffers_ps()
746 ps_page->page, in e1000_alloc_rx_buffers_ps()
749 if (dma_mapping_error(&pdev->dev, in e1000_alloc_rx_buffers_ps()
750 ps_page->dma)) { in e1000_alloc_rx_buffers_ps()
751 dev_err(&adapter->pdev->dev, in e1000_alloc_rx_buffers_ps()
753 adapter->rx_dma_failed++; in e1000_alloc_rx_buffers_ps()
758 * didn't change because each write-back in e1000_alloc_rx_buffers_ps()
761 rx_desc->read.buffer_addr[j + 1] = in e1000_alloc_rx_buffers_ps()
762 cpu_to_le64(ps_page->dma); in e1000_alloc_rx_buffers_ps()
765 skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0, in e1000_alloc_rx_buffers_ps()
769 adapter->alloc_rx_buff_failed++; in e1000_alloc_rx_buffers_ps()
773 buffer_info->skb = skb; in e1000_alloc_rx_buffers_ps()
774 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, in e1000_alloc_rx_buffers_ps()
775 adapter->rx_ps_bsize0, in e1000_alloc_rx_buffers_ps()
777 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { in e1000_alloc_rx_buffers_ps()
778 dev_err(&pdev->dev, "Rx DMA map failed\n"); in e1000_alloc_rx_buffers_ps()
779 adapter->rx_dma_failed++; in e1000_alloc_rx_buffers_ps()
782 buffer_info->skb = NULL; in e1000_alloc_rx_buffers_ps()
786 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); in e1000_alloc_rx_buffers_ps()
788 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { in e1000_alloc_rx_buffers_ps()
791 * applicable for weak-ordered memory model archs, in e1000_alloc_rx_buffers_ps()
792 * such as IA-64). in e1000_alloc_rx_buffers_ps()
795 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in e1000_alloc_rx_buffers_ps()
798 writel(i << 1, rx_ring->tail); in e1000_alloc_rx_buffers_ps()
802 if (i == rx_ring->count) in e1000_alloc_rx_buffers_ps()
804 buffer_info = &rx_ring->buffer_info[i]; in e1000_alloc_rx_buffers_ps()
808 rx_ring->next_to_use = i; in e1000_alloc_rx_buffers_ps()
812 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
821 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_alloc_jumbo_rx_buffers()
822 struct net_device *netdev = adapter->netdev; in e1000_alloc_jumbo_rx_buffers()
823 struct pci_dev *pdev = adapter->pdev; in e1000_alloc_jumbo_rx_buffers()
828 unsigned int bufsz = 256 - 16; /* for skb_reserve */ in e1000_alloc_jumbo_rx_buffers()
830 i = rx_ring->next_to_use; in e1000_alloc_jumbo_rx_buffers()
831 buffer_info = &rx_ring->buffer_info[i]; in e1000_alloc_jumbo_rx_buffers()
833 while (cleaned_count--) { in e1000_alloc_jumbo_rx_buffers()
834 skb = buffer_info->skb; in e1000_alloc_jumbo_rx_buffers()
843 adapter->alloc_rx_buff_failed++; in e1000_alloc_jumbo_rx_buffers()
847 buffer_info->skb = skb; in e1000_alloc_jumbo_rx_buffers()
850 if (!buffer_info->page) { in e1000_alloc_jumbo_rx_buffers()
851 buffer_info->page = alloc_page(gfp); in e1000_alloc_jumbo_rx_buffers()
852 if (unlikely(!buffer_info->page)) { in e1000_alloc_jumbo_rx_buffers()
853 adapter->alloc_rx_buff_failed++; in e1000_alloc_jumbo_rx_buffers()
858 if (!buffer_info->dma) { in e1000_alloc_jumbo_rx_buffers()
859 buffer_info->dma = dma_map_page(&pdev->dev, in e1000_alloc_jumbo_rx_buffers()
860 buffer_info->page, 0, in e1000_alloc_jumbo_rx_buffers()
863 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { in e1000_alloc_jumbo_rx_buffers()
864 adapter->alloc_rx_buff_failed++; in e1000_alloc_jumbo_rx_buffers()
870 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); in e1000_alloc_jumbo_rx_buffers()
872 if (unlikely(++i == rx_ring->count)) in e1000_alloc_jumbo_rx_buffers()
874 buffer_info = &rx_ring->buffer_info[i]; in e1000_alloc_jumbo_rx_buffers()
877 if (likely(rx_ring->next_to_use != i)) { in e1000_alloc_jumbo_rx_buffers()
878 rx_ring->next_to_use = i; in e1000_alloc_jumbo_rx_buffers()
879 if (unlikely(i-- == 0)) in e1000_alloc_jumbo_rx_buffers()
880 i = (rx_ring->count - 1); in e1000_alloc_jumbo_rx_buffers()
884 * applicable for weak-ordered memory model archs, in e1000_alloc_jumbo_rx_buffers()
885 * such as IA-64). in e1000_alloc_jumbo_rx_buffers()
888 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in e1000_alloc_jumbo_rx_buffers()
891 writel(i, rx_ring->tail); in e1000_alloc_jumbo_rx_buffers()
898 if (netdev->features & NETIF_F_RXHASH) in e1000_rx_hash()
903 * e1000_clean_rx_irq - Send received data up the network stack
914 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_clean_rx_irq()
915 struct net_device *netdev = adapter->netdev; in e1000_clean_rx_irq()
916 struct pci_dev *pdev = adapter->pdev; in e1000_clean_rx_irq()
917 struct e1000_hw *hw = &adapter->hw; in e1000_clean_rx_irq()
926 i = rx_ring->next_to_clean; in e1000_clean_rx_irq()
928 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); in e1000_clean_rx_irq()
929 buffer_info = &rx_ring->buffer_info[i]; in e1000_clean_rx_irq()
939 skb = buffer_info->skb; in e1000_clean_rx_irq()
940 buffer_info->skb = NULL; in e1000_clean_rx_irq()
942 prefetch(skb->data - NET_IP_ALIGN); in e1000_clean_rx_irq()
945 if (i == rx_ring->count) in e1000_clean_rx_irq()
950 next_buffer = &rx_ring->buffer_info[i]; in e1000_clean_rx_irq()
954 dma_unmap_single(&pdev->dev, buffer_info->dma, in e1000_clean_rx_irq()
955 adapter->rx_buffer_len, DMA_FROM_DEVICE); in e1000_clean_rx_irq()
956 buffer_info->dma = 0; in e1000_clean_rx_irq()
958 length = le16_to_cpu(rx_desc->wb.upper.length); in e1000_clean_rx_irq()
967 adapter->flags2 |= FLAG2_IS_DISCARDING; in e1000_clean_rx_irq()
969 if (adapter->flags2 & FLAG2_IS_DISCARDING) { in e1000_clean_rx_irq()
973 buffer_info->skb = skb; in e1000_clean_rx_irq()
975 adapter->flags2 &= ~FLAG2_IS_DISCARDING; in e1000_clean_rx_irq()
980 !(netdev->features & NETIF_F_RXALL))) { in e1000_clean_rx_irq()
982 buffer_info->skb = skb; in e1000_clean_rx_irq()
987 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { in e1000_clean_rx_irq()
992 if (netdev->features & NETIF_F_RXFCS) in e1000_clean_rx_irq()
993 total_rx_bytes -= 4; in e1000_clean_rx_irq()
995 length -= 4; in e1000_clean_rx_irq()
1007 napi_alloc_skb(&adapter->napi, length); in e1000_clean_rx_irq()
1010 -NET_IP_ALIGN, in e1000_clean_rx_irq()
1011 (skb->data - in e1000_clean_rx_irq()
1016 buffer_info->skb = skb; in e1000_clean_rx_irq()
1027 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); in e1000_clean_rx_irq()
1030 rx_desc->wb.upper.vlan); in e1000_clean_rx_irq()
1033 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); in e1000_clean_rx_irq()
1037 adapter->alloc_rx_buf(rx_ring, cleaned_count, in e1000_clean_rx_irq()
1046 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); in e1000_clean_rx_irq()
1048 rx_ring->next_to_clean = i; in e1000_clean_rx_irq()
1052 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); in e1000_clean_rx_irq()
1054 adapter->total_rx_bytes += total_rx_bytes; in e1000_clean_rx_irq()
1055 adapter->total_rx_packets += total_rx_packets; in e1000_clean_rx_irq()
1063 struct e1000_adapter *adapter = tx_ring->adapter; in e1000_put_txbuf()
1065 if (buffer_info->dma) { in e1000_put_txbuf()
1066 if (buffer_info->mapped_as_page) in e1000_put_txbuf()
1067 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, in e1000_put_txbuf()
1068 buffer_info->length, DMA_TO_DEVICE); in e1000_put_txbuf()
1070 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, in e1000_put_txbuf()
1071 buffer_info->length, DMA_TO_DEVICE); in e1000_put_txbuf()
1072 buffer_info->dma = 0; in e1000_put_txbuf()
1074 if (buffer_info->skb) { in e1000_put_txbuf()
1076 dev_kfree_skb_any(buffer_info->skb); in e1000_put_txbuf()
1078 dev_consume_skb_any(buffer_info->skb); in e1000_put_txbuf()
1079 buffer_info->skb = NULL; in e1000_put_txbuf()
1081 buffer_info->time_stamp = 0; in e1000_put_txbuf()
1089 struct net_device *netdev = adapter->netdev; in e1000_print_hw_hang()
1090 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_print_hw_hang()
1091 unsigned int i = tx_ring->next_to_clean; in e1000_print_hw_hang()
1092 unsigned int eop = tx_ring->buffer_info[i].next_to_watch; in e1000_print_hw_hang()
1094 struct e1000_hw *hw = &adapter->hw; in e1000_print_hw_hang()
1098 if (test_bit(__E1000_DOWN, &adapter->state)) in e1000_print_hw_hang()
1101 if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) { in e1000_print_hw_hang()
1102 /* May be block on write-back, flush and detect again in e1000_print_hw_hang()
1105 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); in e1000_print_hw_hang()
1111 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); in e1000_print_hw_hang()
1114 adapter->tx_hang_recheck = true; in e1000_print_hw_hang()
1117 adapter->tx_hang_recheck = false; in e1000_print_hw_hang()
1131 pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status); in e1000_print_hw_hang()
1146 "PHY 1000BASE-T Status <%x>\n" in e1000_print_hw_hang()
1149 readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use, in e1000_print_hw_hang()
1150 tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp, in e1000_print_hw_hang()
1151 eop, jiffies, eop_desc->upper.fields.status, er32(STATUS), in e1000_print_hw_hang()
1157 if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE)) in e1000_print_hw_hang()
1162 * e1000e_tx_hwtstamp_work - check for Tx time stamp
1173 struct e1000_hw *hw = &adapter->hw; in e1000e_tx_hwtstamp_work()
1176 struct sk_buff *skb = adapter->tx_hwtstamp_skb; in e1000e_tx_hwtstamp_work()
1188 adapter->tx_hwtstamp_skb = NULL; in e1000e_tx_hwtstamp_work()
1193 } else if (time_after(jiffies, adapter->tx_hwtstamp_start in e1000e_tx_hwtstamp_work()
1194 + adapter->tx_timeout_factor * HZ)) { in e1000e_tx_hwtstamp_work()
1195 dev_kfree_skb_any(adapter->tx_hwtstamp_skb); in e1000e_tx_hwtstamp_work()
1196 adapter->tx_hwtstamp_skb = NULL; in e1000e_tx_hwtstamp_work()
1197 adapter->tx_hwtstamp_timeouts++; in e1000e_tx_hwtstamp_work()
1201 schedule_work(&adapter->tx_hwtstamp_work); in e1000e_tx_hwtstamp_work()
1206 * e1000_clean_tx_irq - Reclaim resources after transmit completes
1214 struct e1000_adapter *adapter = tx_ring->adapter; in e1000_clean_tx_irq()
1215 struct net_device *netdev = adapter->netdev; in e1000_clean_tx_irq()
1216 struct e1000_hw *hw = &adapter->hw; in e1000_clean_tx_irq()
1224 i = tx_ring->next_to_clean; in e1000_clean_tx_irq()
1225 eop = tx_ring->buffer_info[i].next_to_watch; in e1000_clean_tx_irq()
1228 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && in e1000_clean_tx_irq()
1229 (count < tx_ring->count)) { in e1000_clean_tx_irq()
1235 buffer_info = &tx_ring->buffer_info[i]; in e1000_clean_tx_irq()
1239 total_tx_packets += buffer_info->segs; in e1000_clean_tx_irq()
1240 total_tx_bytes += buffer_info->bytecount; in e1000_clean_tx_irq()
1241 if (buffer_info->skb) { in e1000_clean_tx_irq()
1242 bytes_compl += buffer_info->skb->len; in e1000_clean_tx_irq()
1248 tx_desc->upper.data = 0; in e1000_clean_tx_irq()
1251 if (i == tx_ring->count) in e1000_clean_tx_irq()
1255 if (i == tx_ring->next_to_use) in e1000_clean_tx_irq()
1257 eop = tx_ring->buffer_info[i].next_to_watch; in e1000_clean_tx_irq()
1261 tx_ring->next_to_clean = i; in e1000_clean_tx_irq()
1274 !(test_bit(__E1000_DOWN, &adapter->state))) { in e1000_clean_tx_irq()
1276 ++adapter->restart_queue; in e1000_clean_tx_irq()
1280 if (adapter->detect_tx_hung) { in e1000_clean_tx_irq()
1284 adapter->detect_tx_hung = false; in e1000_clean_tx_irq()
1285 if (tx_ring->buffer_info[i].time_stamp && in e1000_clean_tx_irq()
1286 time_after(jiffies, tx_ring->buffer_info[i].time_stamp in e1000_clean_tx_irq()
1287 + (adapter->tx_timeout_factor * HZ)) && in e1000_clean_tx_irq()
1289 schedule_work(&adapter->print_hang_task); in e1000_clean_tx_irq()
1291 adapter->tx_hang_recheck = false; in e1000_clean_tx_irq()
1293 adapter->total_tx_bytes += total_tx_bytes; in e1000_clean_tx_irq()
1294 adapter->total_tx_packets += total_tx_packets; in e1000_clean_tx_irq()
1295 return count < tx_ring->count; in e1000_clean_tx_irq()
1299 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
1310 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_clean_rx_irq_ps()
1311 struct e1000_hw *hw = &adapter->hw; in e1000_clean_rx_irq_ps()
1313 struct net_device *netdev = adapter->netdev; in e1000_clean_rx_irq_ps()
1314 struct pci_dev *pdev = adapter->pdev; in e1000_clean_rx_irq_ps()
1324 i = rx_ring->next_to_clean; in e1000_clean_rx_irq_ps()
1326 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); in e1000_clean_rx_irq_ps()
1327 buffer_info = &rx_ring->buffer_info[i]; in e1000_clean_rx_irq_ps()
1333 skb = buffer_info->skb; in e1000_clean_rx_irq_ps()
1337 prefetch(skb->data - NET_IP_ALIGN); in e1000_clean_rx_irq_ps()
1340 if (i == rx_ring->count) in e1000_clean_rx_irq_ps()
1345 next_buffer = &rx_ring->buffer_info[i]; in e1000_clean_rx_irq_ps()
1349 dma_unmap_single(&pdev->dev, buffer_info->dma, in e1000_clean_rx_irq_ps()
1350 adapter->rx_ps_bsize0, DMA_FROM_DEVICE); in e1000_clean_rx_irq_ps()
1351 buffer_info->dma = 0; in e1000_clean_rx_irq_ps()
1355 adapter->flags2 |= FLAG2_IS_DISCARDING; in e1000_clean_rx_irq_ps()
1357 if (adapter->flags2 & FLAG2_IS_DISCARDING) { in e1000_clean_rx_irq_ps()
1361 adapter->flags2 &= ~FLAG2_IS_DISCARDING; in e1000_clean_rx_irq_ps()
1366 !(netdev->features & NETIF_F_RXALL))) { in e1000_clean_rx_irq_ps()
1371 length = le16_to_cpu(rx_desc->wb.middle.length0); in e1000_clean_rx_irq_ps()
1386 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); in e1000_clean_rx_irq_ps()
1394 ((length + l1) <= adapter->rx_ps_bsize0)) { in e1000_clean_rx_irq_ps()
1397 ps_page = &buffer_info->ps_pages[0]; in e1000_clean_rx_irq_ps()
1403 dma_sync_single_for_cpu(&pdev->dev, in e1000_clean_rx_irq_ps()
1404 ps_page->dma, in e1000_clean_rx_irq_ps()
1407 vaddr = kmap_atomic(ps_page->page); in e1000_clean_rx_irq_ps()
1410 dma_sync_single_for_device(&pdev->dev, in e1000_clean_rx_irq_ps()
1411 ps_page->dma, in e1000_clean_rx_irq_ps()
1416 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { in e1000_clean_rx_irq_ps()
1417 if (!(netdev->features & NETIF_F_RXFCS)) in e1000_clean_rx_irq_ps()
1418 l1 -= 4; in e1000_clean_rx_irq_ps()
1427 length = le16_to_cpu(rx_desc->wb.upper.length[j]); in e1000_clean_rx_irq_ps()
1431 ps_page = &buffer_info->ps_pages[j]; in e1000_clean_rx_irq_ps()
1432 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, in e1000_clean_rx_irq_ps()
1434 ps_page->dma = 0; in e1000_clean_rx_irq_ps()
1435 skb_fill_page_desc(skb, j, ps_page->page, 0, length); in e1000_clean_rx_irq_ps()
1436 ps_page->page = NULL; in e1000_clean_rx_irq_ps()
1437 skb->len += length; in e1000_clean_rx_irq_ps()
1438 skb->data_len += length; in e1000_clean_rx_irq_ps()
1439 skb->truesize += PAGE_SIZE; in e1000_clean_rx_irq_ps()
1445 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { in e1000_clean_rx_irq_ps()
1446 if (!(netdev->features & NETIF_F_RXFCS)) in e1000_clean_rx_irq_ps()
1447 pskb_trim(skb, skb->len - 4); in e1000_clean_rx_irq_ps()
1451 total_rx_bytes += skb->len; in e1000_clean_rx_irq_ps()
1456 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); in e1000_clean_rx_irq_ps()
1458 if (rx_desc->wb.upper.header_status & in e1000_clean_rx_irq_ps()
1460 adapter->rx_hdr_split++; in e1000_clean_rx_irq_ps()
1463 rx_desc->wb.middle.vlan); in e1000_clean_rx_irq_ps()
1466 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); in e1000_clean_rx_irq_ps()
1467 buffer_info->skb = NULL; in e1000_clean_rx_irq_ps()
1471 adapter->alloc_rx_buf(rx_ring, cleaned_count, in e1000_clean_rx_irq_ps()
1480 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); in e1000_clean_rx_irq_ps()
1482 rx_ring->next_to_clean = i; in e1000_clean_rx_irq_ps()
1486 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); in e1000_clean_rx_irq_ps()
1488 adapter->total_rx_bytes += total_rx_bytes; in e1000_clean_rx_irq_ps()
1489 adapter->total_rx_packets += total_rx_packets; in e1000_clean_rx_irq_ps()
1496 bi->page = NULL; in e1000_consume_page()
1497 skb->len += length; in e1000_consume_page()
1498 skb->data_len += length; in e1000_consume_page()
1499 skb->truesize += PAGE_SIZE; in e1000_consume_page()
1503 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
1514 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_clean_jumbo_rx_irq()
1515 struct net_device *netdev = adapter->netdev; in e1000_clean_jumbo_rx_irq()
1516 struct pci_dev *pdev = adapter->pdev; in e1000_clean_jumbo_rx_irq()
1526 i = rx_ring->next_to_clean; in e1000_clean_jumbo_rx_irq()
1528 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); in e1000_clean_jumbo_rx_irq()
1529 buffer_info = &rx_ring->buffer_info[i]; in e1000_clean_jumbo_rx_irq()
1539 skb = buffer_info->skb; in e1000_clean_jumbo_rx_irq()
1540 buffer_info->skb = NULL; in e1000_clean_jumbo_rx_irq()
1543 if (i == rx_ring->count) in e1000_clean_jumbo_rx_irq()
1548 next_buffer = &rx_ring->buffer_info[i]; in e1000_clean_jumbo_rx_irq()
1552 dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE, in e1000_clean_jumbo_rx_irq()
1554 buffer_info->dma = 0; in e1000_clean_jumbo_rx_irq()
1556 length = le16_to_cpu(rx_desc->wb.upper.length); in e1000_clean_jumbo_rx_irq()
1561 !(netdev->features & NETIF_F_RXALL)))) { in e1000_clean_jumbo_rx_irq()
1563 buffer_info->skb = skb; in e1000_clean_jumbo_rx_irq()
1565 if (rx_ring->rx_skb_top) in e1000_clean_jumbo_rx_irq()
1566 dev_kfree_skb_irq(rx_ring->rx_skb_top); in e1000_clean_jumbo_rx_irq()
1567 rx_ring->rx_skb_top = NULL; in e1000_clean_jumbo_rx_irq()
1570 #define rxtop (rx_ring->rx_skb_top) in e1000_clean_jumbo_rx_irq()
1576 skb_fill_page_desc(rxtop, 0, buffer_info->page, in e1000_clean_jumbo_rx_irq()
1581 skb_fill_page_desc(rxtop, shinfo->nr_frags, in e1000_clean_jumbo_rx_irq()
1582 buffer_info->page, 0, in e1000_clean_jumbo_rx_irq()
1584 /* re-use the skb, only consumed the page */ in e1000_clean_jumbo_rx_irq()
1585 buffer_info->skb = skb; in e1000_clean_jumbo_rx_irq()
1593 skb_fill_page_desc(rxtop, shinfo->nr_frags, in e1000_clean_jumbo_rx_irq()
1594 buffer_info->page, 0, in e1000_clean_jumbo_rx_irq()
1596 /* re-use the current skb, we only consumed the in e1000_clean_jumbo_rx_irq()
1599 buffer_info->skb = skb; in e1000_clean_jumbo_rx_irq()
1610 vaddr = kmap_atomic(buffer_info->page); in e1000_clean_jumbo_rx_irq()
1614 /* re-use the page, so don't erase in e1000_clean_jumbo_rx_irq()
1615 * buffer_info->page in e1000_clean_jumbo_rx_irq()
1620 buffer_info->page, 0, in e1000_clean_jumbo_rx_irq()
1631 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); in e1000_clean_jumbo_rx_irq()
1634 total_rx_bytes += skb->len; in e1000_clean_jumbo_rx_irq()
1637 /* eth type trans needs skb->data to point to something */ in e1000_clean_jumbo_rx_irq()
1645 rx_desc->wb.upper.vlan); in e1000_clean_jumbo_rx_irq()
1648 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); in e1000_clean_jumbo_rx_irq()
1652 adapter->alloc_rx_buf(rx_ring, cleaned_count, in e1000_clean_jumbo_rx_irq()
1661 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); in e1000_clean_jumbo_rx_irq()
1663 rx_ring->next_to_clean = i; in e1000_clean_jumbo_rx_irq()
1667 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); in e1000_clean_jumbo_rx_irq()
1669 adapter->total_rx_bytes += total_rx_bytes; in e1000_clean_jumbo_rx_irq()
1670 adapter->total_rx_packets += total_rx_packets; in e1000_clean_jumbo_rx_irq()
1675 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1680 struct e1000_adapter *adapter = rx_ring->adapter; in e1000_clean_rx_ring()
1683 struct pci_dev *pdev = adapter->pdev; in e1000_clean_rx_ring()
1687 for (i = 0; i < rx_ring->count; i++) { in e1000_clean_rx_ring()
1688 buffer_info = &rx_ring->buffer_info[i]; in e1000_clean_rx_ring()
1689 if (buffer_info->dma) { in e1000_clean_rx_ring()
1690 if (adapter->clean_rx == e1000_clean_rx_irq) in e1000_clean_rx_ring()
1691 dma_unmap_single(&pdev->dev, buffer_info->dma, in e1000_clean_rx_ring()
1692 adapter->rx_buffer_len, in e1000_clean_rx_ring()
1694 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) in e1000_clean_rx_ring()
1695 dma_unmap_page(&pdev->dev, buffer_info->dma, in e1000_clean_rx_ring()
1697 else if (adapter->clean_rx == e1000_clean_rx_irq_ps) in e1000_clean_rx_ring()
1698 dma_unmap_single(&pdev->dev, buffer_info->dma, in e1000_clean_rx_ring()
1699 adapter->rx_ps_bsize0, in e1000_clean_rx_ring()
1701 buffer_info->dma = 0; in e1000_clean_rx_ring()
1704 if (buffer_info->page) { in e1000_clean_rx_ring()
1705 put_page(buffer_info->page); in e1000_clean_rx_ring()
1706 buffer_info->page = NULL; in e1000_clean_rx_ring()
1709 if (buffer_info->skb) { in e1000_clean_rx_ring()
1710 dev_kfree_skb(buffer_info->skb); in e1000_clean_rx_ring()
1711 buffer_info->skb = NULL; in e1000_clean_rx_ring()
1715 ps_page = &buffer_info->ps_pages[j]; in e1000_clean_rx_ring()
1716 if (!ps_page->page) in e1000_clean_rx_ring()
1718 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, in e1000_clean_rx_ring()
1720 ps_page->dma = 0; in e1000_clean_rx_ring()
1721 put_page(ps_page->page); in e1000_clean_rx_ring()
1722 ps_page->page = NULL; in e1000_clean_rx_ring()
1727 if (rx_ring->rx_skb_top) { in e1000_clean_rx_ring()
1728 dev_kfree_skb(rx_ring->rx_skb_top); in e1000_clean_rx_ring()
1729 rx_ring->rx_skb_top = NULL; in e1000_clean_rx_ring()
1733 memset(rx_ring->desc, 0, rx_ring->size); in e1000_clean_rx_ring()
1735 rx_ring->next_to_clean = 0; in e1000_clean_rx_ring()
1736 rx_ring->next_to_use = 0; in e1000_clean_rx_ring()
1737 adapter->flags2 &= ~FLAG2_IS_DISCARDING; in e1000_clean_rx_ring()
1746 if (test_bit(__E1000_DOWN, &adapter->state)) in e1000e_downshift_workaround()
1749 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); in e1000e_downshift_workaround()
1753 * e1000_intr_msi - Interrupt Handler
1761 struct e1000_hw *hw = &adapter->hw; in e1000_intr_msi()
1766 hw->mac.get_link_status = true; in e1000_intr_msi()
1767 /* ICH8 workaround-- Call gig speed drop workaround on cable in e1000_intr_msi()
1770 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && in e1000_intr_msi()
1772 schedule_work(&adapter->downshift_task); in e1000_intr_msi()
1774 /* 80003ES2LAN workaround-- For packet buffer work-around on in e1000_intr_msi()
1779 adapter->flags & FLAG_RX_NEEDS_RESTART) { in e1000_intr_msi()
1784 adapter->flags |= FLAG_RESTART_NOW; in e1000_intr_msi()
1787 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_intr_msi()
1788 mod_timer(&adapter->watchdog_timer, jiffies + 1); in e1000_intr_msi()
1792 if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) { in e1000_intr_msi()
1795 adapter->corr_errors += in e1000_intr_msi()
1797 adapter->uncorr_errors += in e1000_intr_msi()
1802 schedule_work(&adapter->reset_task); in e1000_intr_msi()
1808 if (napi_schedule_prep(&adapter->napi)) { in e1000_intr_msi()
1809 adapter->total_tx_bytes = 0; in e1000_intr_msi()
1810 adapter->total_tx_packets = 0; in e1000_intr_msi()
1811 adapter->total_rx_bytes = 0; in e1000_intr_msi()
1812 adapter->total_rx_packets = 0; in e1000_intr_msi()
1813 __napi_schedule(&adapter->napi); in e1000_intr_msi()
1820 * e1000_intr - Interrupt Handler
1828 struct e1000_hw *hw = &adapter->hw; in e1000_intr()
1831 if (!icr || test_bit(__E1000_DOWN, &adapter->state)) in e1000_intr()
1834 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is in e1000_intr()
1840 /* Interrupt Auto-Mask...upon reading ICR, in e1000_intr()
1846 hw->mac.get_link_status = true; in e1000_intr()
1847 /* ICH8 workaround-- Call gig speed drop workaround on cable in e1000_intr()
1850 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && in e1000_intr()
1852 schedule_work(&adapter->downshift_task); in e1000_intr()
1854 /* 80003ES2LAN workaround-- in e1000_intr()
1855 * For packet buffer work-around on link down event; in e1000_intr()
1860 (adapter->flags & FLAG_RX_NEEDS_RESTART)) { in e1000_intr()
1864 adapter->flags |= FLAG_RESTART_NOW; in e1000_intr()
1867 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_intr()
1868 mod_timer(&adapter->watchdog_timer, jiffies + 1); in e1000_intr()
1872 if ((icr & E1000_ICR_ECCER) && (hw->mac.type >= e1000_pch_lpt)) { in e1000_intr()
1875 adapter->corr_errors += in e1000_intr()
1877 adapter->uncorr_errors += in e1000_intr()
1882 schedule_work(&adapter->reset_task); in e1000_intr()
1888 if (napi_schedule_prep(&adapter->napi)) { in e1000_intr()
1889 adapter->total_tx_bytes = 0; in e1000_intr()
1890 adapter->total_tx_packets = 0; in e1000_intr()
1891 adapter->total_rx_bytes = 0; in e1000_intr()
1892 adapter->total_rx_packets = 0; in e1000_intr()
1893 __napi_schedule(&adapter->napi); in e1000_intr()
1903 struct e1000_hw *hw = &adapter->hw; in e1000_msix_other()
1906 if (icr & adapter->eiac_mask) in e1000_msix_other()
1907 ew32(ICS, (icr & adapter->eiac_mask)); in e1000_msix_other()
1910 hw->mac.get_link_status = true; in e1000_msix_other()
1912 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_msix_other()
1913 mod_timer(&adapter->watchdog_timer, jiffies + 1); in e1000_msix_other()
1916 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_msix_other()
1926 struct e1000_hw *hw = &adapter->hw; in e1000_intr_msix_tx()
1927 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_intr_msix_tx()
1929 adapter->total_tx_bytes = 0; in e1000_intr_msix_tx()
1930 adapter->total_tx_packets = 0; in e1000_intr_msix_tx()
1934 ew32(ICS, tx_ring->ims_val); in e1000_intr_msix_tx()
1936 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_intr_msix_tx()
1937 ew32(IMS, adapter->tx_ring->ims_val); in e1000_intr_msix_tx()
1946 struct e1000_ring *rx_ring = adapter->rx_ring; in e1000_intr_msix_rx()
1951 if (rx_ring->set_itr) { in e1000_intr_msix_rx()
1952 u32 itr = rx_ring->itr_val ? in e1000_intr_msix_rx()
1953 1000000000 / (rx_ring->itr_val * 256) : 0; in e1000_intr_msix_rx()
1955 writel(itr, rx_ring->itr_register); in e1000_intr_msix_rx()
1956 rx_ring->set_itr = 0; in e1000_intr_msix_rx()
1959 if (napi_schedule_prep(&adapter->napi)) { in e1000_intr_msix_rx()
1960 adapter->total_rx_bytes = 0; in e1000_intr_msix_rx()
1961 adapter->total_rx_packets = 0; in e1000_intr_msix_rx()
1962 __napi_schedule(&adapter->napi); in e1000_intr_msix_rx()
1968 * e1000_configure_msix - Configure MSI-X hardware
1972 * generate MSI-X interrupts.
1976 struct e1000_hw *hw = &adapter->hw; in e1000_configure_msix()
1977 struct e1000_ring *rx_ring = adapter->rx_ring; in e1000_configure_msix()
1978 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_configure_msix()
1982 adapter->eiac_mask = 0; in e1000_configure_msix()
1984 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */ in e1000_configure_msix()
1985 if (hw->mac.type == e1000_82574) { in e1000_configure_msix()
1993 rx_ring->ims_val = E1000_IMS_RXQ0; in e1000_configure_msix()
1994 adapter->eiac_mask |= rx_ring->ims_val; in e1000_configure_msix()
1995 if (rx_ring->itr_val) in e1000_configure_msix()
1996 writel(1000000000 / (rx_ring->itr_val * 256), in e1000_configure_msix()
1997 rx_ring->itr_register); in e1000_configure_msix()
1999 writel(1, rx_ring->itr_register); in e1000_configure_msix()
2003 tx_ring->ims_val = E1000_IMS_TXQ0; in e1000_configure_msix()
2005 if (tx_ring->itr_val) in e1000_configure_msix()
2006 writel(1000000000 / (tx_ring->itr_val * 256), in e1000_configure_msix()
2007 tx_ring->itr_register); in e1000_configure_msix()
2009 writel(1, tx_ring->itr_register); in e1000_configure_msix()
2010 adapter->eiac_mask |= tx_ring->ims_val; in e1000_configure_msix()
2016 if (rx_ring->itr_val) in e1000_configure_msix()
2017 writel(1000000000 / (rx_ring->itr_val * 256), in e1000_configure_msix()
2018 hw->hw_addr + E1000_EITR_82574(vector)); in e1000_configure_msix()
2020 writel(1, hw->hw_addr + E1000_EITR_82574(vector)); in e1000_configure_msix()
2027 /* enable MSI-X PBA support */ in e1000_configure_msix()
2036 if (adapter->msix_entries) { in e1000e_reset_interrupt_capability()
2037 pci_disable_msix(adapter->pdev); in e1000e_reset_interrupt_capability()
2038 kfree(adapter->msix_entries); in e1000e_reset_interrupt_capability()
2039 adapter->msix_entries = NULL; in e1000e_reset_interrupt_capability()
2040 } else if (adapter->flags & FLAG_MSI_ENABLED) { in e1000e_reset_interrupt_capability()
2041 pci_disable_msi(adapter->pdev); in e1000e_reset_interrupt_capability()
2042 adapter->flags &= ~FLAG_MSI_ENABLED; in e1000e_reset_interrupt_capability()
2047 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
2058 switch (adapter->int_mode) { in e1000e_set_interrupt_capability()
2060 if (adapter->flags & FLAG_HAS_MSIX) { in e1000e_set_interrupt_capability()
2061 adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */ in e1000e_set_interrupt_capability()
2062 adapter->msix_entries = kcalloc(adapter->num_vectors, in e1000e_set_interrupt_capability()
2066 if (adapter->msix_entries) { in e1000e_set_interrupt_capability()
2069 for (i = 0; i < adapter->num_vectors; i++) in e1000e_set_interrupt_capability()
2070 adapter->msix_entries[i].entry = i; in e1000e_set_interrupt_capability()
2072 err = pci_enable_msix_range(a->pdev, in e1000e_set_interrupt_capability()
2073 a->msix_entries, in e1000e_set_interrupt_capability()
2074 a->num_vectors, in e1000e_set_interrupt_capability()
2075 a->num_vectors); in e1000e_set_interrupt_capability()
2079 /* MSI-X failed, so fall through and try MSI */ in e1000e_set_interrupt_capability()
2080 e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n"); in e1000e_set_interrupt_capability()
2083 adapter->int_mode = E1000E_INT_MODE_MSI; in e1000e_set_interrupt_capability()
2086 if (!pci_enable_msi(adapter->pdev)) { in e1000e_set_interrupt_capability()
2087 adapter->flags |= FLAG_MSI_ENABLED; in e1000e_set_interrupt_capability()
2089 adapter->int_mode = E1000E_INT_MODE_LEGACY; in e1000e_set_interrupt_capability()
2099 adapter->num_vectors = 1; in e1000e_set_interrupt_capability()
2103 * e1000_request_msix - Initialize MSI-X interrupts
2106 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
2111 struct net_device *netdev = adapter->netdev; in e1000_request_msix()
2114 if (strlen(netdev->name) < (IFNAMSIZ - 5)) in e1000_request_msix()
2115 snprintf(adapter->rx_ring->name, in e1000_request_msix()
2116 sizeof(adapter->rx_ring->name) - 1, in e1000_request_msix()
2117 "%.14s-rx-0", netdev->name); in e1000_request_msix()
2119 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); in e1000_request_msix()
2120 err = request_irq(adapter->msix_entries[vector].vector, in e1000_request_msix()
2121 e1000_intr_msix_rx, 0, adapter->rx_ring->name, in e1000_request_msix()
2125 adapter->rx_ring->itr_register = adapter->hw.hw_addr + in e1000_request_msix()
2127 adapter->rx_ring->itr_val = adapter->itr; in e1000_request_msix()
2130 if (strlen(netdev->name) < (IFNAMSIZ - 5)) in e1000_request_msix()
2131 snprintf(adapter->tx_ring->name, in e1000_request_msix()
2132 sizeof(adapter->tx_ring->name) - 1, in e1000_request_msix()
2133 "%.14s-tx-0", netdev->name); in e1000_request_msix()
2135 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); in e1000_request_msix()
2136 err = request_irq(adapter->msix_entries[vector].vector, in e1000_request_msix()
2137 e1000_intr_msix_tx, 0, adapter->tx_ring->name, in e1000_request_msix()
2141 adapter->tx_ring->itr_register = adapter->hw.hw_addr + in e1000_request_msix()
2143 adapter->tx_ring->itr_val = adapter->itr; in e1000_request_msix()
2146 err = request_irq(adapter->msix_entries[vector].vector, in e1000_request_msix()
2147 e1000_msix_other, 0, netdev->name, netdev); in e1000_request_msix()
2157 * e1000_request_irq - initialize interrupts
2165 struct net_device *netdev = adapter->netdev; in e1000_request_irq()
2168 if (adapter->msix_entries) { in e1000_request_irq()
2174 adapter->int_mode = E1000E_INT_MODE_MSI; in e1000_request_irq()
2177 if (adapter->flags & FLAG_MSI_ENABLED) { in e1000_request_irq()
2178 err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0, in e1000_request_irq()
2179 netdev->name, netdev); in e1000_request_irq()
2185 adapter->int_mode = E1000E_INT_MODE_LEGACY; in e1000_request_irq()
2188 err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED, in e1000_request_irq()
2189 netdev->name, netdev); in e1000_request_irq()
2198 struct net_device *netdev = adapter->netdev; in e1000_free_irq()
2200 if (adapter->msix_entries) { in e1000_free_irq()
2203 free_irq(adapter->msix_entries[vector].vector, netdev); in e1000_free_irq()
2206 free_irq(adapter->msix_entries[vector].vector, netdev); in e1000_free_irq()
2210 free_irq(adapter->msix_entries[vector].vector, netdev); in e1000_free_irq()
2214 free_irq(adapter->pdev->irq, netdev); in e1000_free_irq()
2218 * e1000_irq_disable - Mask off interrupt generation on the NIC
2223 struct e1000_hw *hw = &adapter->hw; in e1000_irq_disable()
2226 if (adapter->msix_entries) in e1000_irq_disable()
2230 if (adapter->msix_entries) { in e1000_irq_disable()
2233 for (i = 0; i < adapter->num_vectors; i++) in e1000_irq_disable()
2234 synchronize_irq(adapter->msix_entries[i].vector); in e1000_irq_disable()
2236 synchronize_irq(adapter->pdev->irq); in e1000_irq_disable()
2241 * e1000_irq_enable - Enable default interrupt generation settings
2246 struct e1000_hw *hw = &adapter->hw; in e1000_irq_enable()
2248 if (adapter->msix_entries) { in e1000_irq_enable()
2249 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); in e1000_irq_enable()
2250 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | in e1000_irq_enable()
2252 } else if (hw->mac.type >= e1000_pch_lpt) { in e1000_irq_enable()
2261 * e1000e_get_hw_control - get control of the h/w from f/w
2271 struct e1000_hw *hw = &adapter->hw; in e1000e_get_hw_control()
2276 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { in e1000e_get_hw_control()
2279 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { in e1000e_get_hw_control()
2286 * e1000e_release_hw_control - release control of the h/w to f/w
2297 struct e1000_hw *hw = &adapter->hw; in e1000e_release_hw_control()
2302 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { in e1000e_release_hw_control()
2305 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { in e1000e_release_hw_control()
2312 * e1000_alloc_ring_dma - allocate memory for a ring structure
2319 struct pci_dev *pdev = adapter->pdev; in e1000_alloc_ring_dma()
2321 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, in e1000_alloc_ring_dma()
2323 if (!ring->desc) in e1000_alloc_ring_dma()
2324 return -ENOMEM; in e1000_alloc_ring_dma()
2330 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
2337 struct e1000_adapter *adapter = tx_ring->adapter; in e1000e_setup_tx_resources()
2338 int err = -ENOMEM, size; in e1000e_setup_tx_resources()
2340 size = sizeof(struct e1000_buffer) * tx_ring->count; in e1000e_setup_tx_resources()
2341 tx_ring->buffer_info = vzalloc(size); in e1000e_setup_tx_resources()
2342 if (!tx_ring->buffer_info) in e1000e_setup_tx_resources()
2346 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); in e1000e_setup_tx_resources()
2347 tx_ring->size = ALIGN(tx_ring->size, 4096); in e1000e_setup_tx_resources()
2353 tx_ring->next_to_use = 0; in e1000e_setup_tx_resources()
2354 tx_ring->next_to_clean = 0; in e1000e_setup_tx_resources()
2358 vfree(tx_ring->buffer_info); in e1000e_setup_tx_resources()
2364 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
2371 struct e1000_adapter *adapter = rx_ring->adapter; in e1000e_setup_rx_resources()
2373 int i, size, desc_len, err = -ENOMEM; in e1000e_setup_rx_resources()
2375 size = sizeof(struct e1000_buffer) * rx_ring->count; in e1000e_setup_rx_resources()
2376 rx_ring->buffer_info = vzalloc(size); in e1000e_setup_rx_resources()
2377 if (!rx_ring->buffer_info) in e1000e_setup_rx_resources()
2380 for (i = 0; i < rx_ring->count; i++) { in e1000e_setup_rx_resources()
2381 buffer_info = &rx_ring->buffer_info[i]; in e1000e_setup_rx_resources()
2382 buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS, in e1000e_setup_rx_resources()
2385 if (!buffer_info->ps_pages) in e1000e_setup_rx_resources()
2392 rx_ring->size = rx_ring->count * desc_len; in e1000e_setup_rx_resources()
2393 rx_ring->size = ALIGN(rx_ring->size, 4096); in e1000e_setup_rx_resources()
2399 rx_ring->next_to_clean = 0; in e1000e_setup_rx_resources()
2400 rx_ring->next_to_use = 0; in e1000e_setup_rx_resources()
2401 rx_ring->rx_skb_top = NULL; in e1000e_setup_rx_resources()
2406 for (i = 0; i < rx_ring->count; i++) { in e1000e_setup_rx_resources()
2407 buffer_info = &rx_ring->buffer_info[i]; in e1000e_setup_rx_resources()
2408 kfree(buffer_info->ps_pages); in e1000e_setup_rx_resources()
2411 vfree(rx_ring->buffer_info); in e1000e_setup_rx_resources()
2417 * e1000_clean_tx_ring - Free Tx Buffers
2422 struct e1000_adapter *adapter = tx_ring->adapter; in e1000_clean_tx_ring()
2427 for (i = 0; i < tx_ring->count; i++) { in e1000_clean_tx_ring()
2428 buffer_info = &tx_ring->buffer_info[i]; in e1000_clean_tx_ring()
2432 netdev_reset_queue(adapter->netdev); in e1000_clean_tx_ring()
2433 size = sizeof(struct e1000_buffer) * tx_ring->count; in e1000_clean_tx_ring()
2434 memset(tx_ring->buffer_info, 0, size); in e1000_clean_tx_ring()
2436 memset(tx_ring->desc, 0, tx_ring->size); in e1000_clean_tx_ring()
2438 tx_ring->next_to_use = 0; in e1000_clean_tx_ring()
2439 tx_ring->next_to_clean = 0; in e1000_clean_tx_ring()
2443 * e1000e_free_tx_resources - Free Tx Resources per Queue
2450 struct e1000_adapter *adapter = tx_ring->adapter; in e1000e_free_tx_resources()
2451 struct pci_dev *pdev = adapter->pdev; in e1000e_free_tx_resources()
2455 vfree(tx_ring->buffer_info); in e1000e_free_tx_resources()
2456 tx_ring->buffer_info = NULL; in e1000e_free_tx_resources()
2458 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, in e1000e_free_tx_resources()
2459 tx_ring->dma); in e1000e_free_tx_resources()
2460 tx_ring->desc = NULL; in e1000e_free_tx_resources()
2464 * e1000e_free_rx_resources - Free Rx Resources
2471 struct e1000_adapter *adapter = rx_ring->adapter; in e1000e_free_rx_resources()
2472 struct pci_dev *pdev = adapter->pdev; in e1000e_free_rx_resources()
2477 for (i = 0; i < rx_ring->count; i++) in e1000e_free_rx_resources()
2478 kfree(rx_ring->buffer_info[i].ps_pages); in e1000e_free_rx_resources()
2480 vfree(rx_ring->buffer_info); in e1000e_free_rx_resources()
2481 rx_ring->buffer_info = NULL; in e1000e_free_rx_resources()
2483 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, in e1000e_free_rx_resources()
2484 rx_ring->dma); in e1000e_free_rx_resources()
2485 rx_ring->desc = NULL; in e1000e_free_rx_resources()
2489 * e1000_update_itr - update the dynamic ITR value based on statistics
2490 * @itr_setting: current adapter->itr
2549 u32 new_itr = adapter->itr; in e1000_set_itr()
2551 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ in e1000_set_itr()
2552 if (adapter->link_speed != SPEED_1000) { in e1000_set_itr()
2558 if (adapter->flags2 & FLAG2_DISABLE_AIM) { in e1000_set_itr()
2563 adapter->tx_itr = e1000_update_itr(adapter->tx_itr, in e1000_set_itr()
2564 adapter->total_tx_packets, in e1000_set_itr()
2565 adapter->total_tx_bytes); in e1000_set_itr()
2567 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) in e1000_set_itr()
2568 adapter->tx_itr = low_latency; in e1000_set_itr()
2570 adapter->rx_itr = e1000_update_itr(adapter->rx_itr, in e1000_set_itr()
2571 adapter->total_rx_packets, in e1000_set_itr()
2572 adapter->total_rx_bytes); in e1000_set_itr()
2574 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) in e1000_set_itr()
2575 adapter->rx_itr = low_latency; in e1000_set_itr()
2577 current_itr = max(adapter->rx_itr, adapter->tx_itr); in e1000_set_itr()
2595 if (new_itr != adapter->itr) { in e1000_set_itr()
2600 new_itr = new_itr > adapter->itr ? in e1000_set_itr()
2601 min(adapter->itr + (new_itr >> 2), new_itr) : new_itr; in e1000_set_itr()
2602 adapter->itr = new_itr; in e1000_set_itr()
2603 adapter->rx_ring->itr_val = new_itr; in e1000_set_itr()
2604 if (adapter->msix_entries) in e1000_set_itr()
2605 adapter->rx_ring->set_itr = 1; in e1000_set_itr()
2612 * e1000e_write_itr - write the ITR value to the appropriate registers
2616 * e1000e_write_itr determines if the adapter is in MSI-X mode
2622 struct e1000_hw *hw = &adapter->hw; in e1000e_write_itr()
2625 if (adapter->msix_entries) { in e1000e_write_itr()
2628 for (vector = 0; vector < adapter->num_vectors; vector++) in e1000e_write_itr()
2629 writel(new_itr, hw->hw_addr + E1000_EITR_82574(vector)); in e1000e_write_itr()
2636 * e1000_alloc_queues - Allocate memory for all rings
2643 adapter->tx_ring = kzalloc(size, GFP_KERNEL); in e1000_alloc_queues()
2644 if (!adapter->tx_ring) in e1000_alloc_queues()
2646 adapter->tx_ring->count = adapter->tx_ring_count; in e1000_alloc_queues()
2647 adapter->tx_ring->adapter = adapter; in e1000_alloc_queues()
2649 adapter->rx_ring = kzalloc(size, GFP_KERNEL); in e1000_alloc_queues()
2650 if (!adapter->rx_ring) in e1000_alloc_queues()
2652 adapter->rx_ring->count = adapter->rx_ring_count; in e1000_alloc_queues()
2653 adapter->rx_ring->adapter = adapter; in e1000_alloc_queues()
2658 kfree(adapter->rx_ring); in e1000_alloc_queues()
2659 kfree(adapter->tx_ring); in e1000_alloc_queues()
2660 return -ENOMEM; in e1000_alloc_queues()
2664 * e1000e_poll - NAPI Rx polling callback
2672 struct e1000_hw *hw = &adapter->hw; in e1000e_poll()
2673 struct net_device *poll_dev = adapter->netdev; in e1000e_poll()
2678 if (!adapter->msix_entries || in e1000e_poll()
2679 (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) in e1000e_poll()
2680 tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring); in e1000e_poll()
2682 adapter->clean_rx(adapter->rx_ring, &work_done, budget); in e1000e_poll()
2687 /* Exit the polling mode, but don't re-enable interrupts if stack might in e1000e_poll()
2688 * poll us due to busy-polling in e1000e_poll()
2691 if (adapter->itr_setting & 3) in e1000e_poll()
2693 if (!test_bit(__E1000_DOWN, &adapter->state)) { in e1000e_poll()
2694 if (adapter->msix_entries) in e1000e_poll()
2695 ew32(IMS, adapter->rx_ring->ims_val); in e1000e_poll()
2708 struct e1000_hw *hw = &adapter->hw; in e1000_vlan_rx_add_vid()
2712 if ((adapter->hw.mng_cookie.status & in e1000_vlan_rx_add_vid()
2714 (vid == adapter->mng_vlan_id)) in e1000_vlan_rx_add_vid()
2718 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { in e1000_vlan_rx_add_vid()
2722 hw->mac.ops.write_vfta(hw, index, vfta); in e1000_vlan_rx_add_vid()
2725 set_bit(vid, adapter->active_vlans); in e1000_vlan_rx_add_vid()
2734 struct e1000_hw *hw = &adapter->hw; in e1000_vlan_rx_kill_vid()
2737 if ((adapter->hw.mng_cookie.status & in e1000_vlan_rx_kill_vid()
2739 (vid == adapter->mng_vlan_id)) { in e1000_vlan_rx_kill_vid()
2746 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { in e1000_vlan_rx_kill_vid()
2750 hw->mac.ops.write_vfta(hw, index, vfta); in e1000_vlan_rx_kill_vid()
2753 clear_bit(vid, adapter->active_vlans); in e1000_vlan_rx_kill_vid()
2759 * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering
2764 struct net_device *netdev = adapter->netdev; in e1000e_vlan_filter_disable()
2765 struct e1000_hw *hw = &adapter->hw; in e1000e_vlan_filter_disable()
2768 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { in e1000e_vlan_filter_disable()
2774 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) { in e1000e_vlan_filter_disable()
2776 adapter->mng_vlan_id); in e1000e_vlan_filter_disable()
2777 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; in e1000e_vlan_filter_disable()
2783 * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering
2788 struct e1000_hw *hw = &adapter->hw; in e1000e_vlan_filter_enable()
2791 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { in e1000e_vlan_filter_enable()
2801 * e1000e_vlan_strip_disable - helper to disable HW VLAN stripping
2806 struct e1000_hw *hw = &adapter->hw; in e1000e_vlan_strip_disable()
2816 * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping
2821 struct e1000_hw *hw = &adapter->hw; in e1000e_vlan_strip_enable()
2832 struct net_device *netdev = adapter->netdev; in e1000_update_mng_vlan()
2833 u16 vid = adapter->hw.mng_cookie.vlan_id; in e1000_update_mng_vlan()
2834 u16 old_vid = adapter->mng_vlan_id; in e1000_update_mng_vlan()
2836 if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { in e1000_update_mng_vlan()
2838 adapter->mng_vlan_id = vid; in e1000_update_mng_vlan()
2849 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); in e1000_restore_vlan()
2851 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) in e1000_restore_vlan()
2852 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); in e1000_restore_vlan()
2857 struct e1000_hw *hw = &adapter->hw; in e1000_init_manageability_pt()
2860 if (!(adapter->flags & FLAG_MNG_PT_ENABLED)) in e1000_init_manageability_pt()
2872 switch (hw->mac.type) { in e1000_init_manageability_pt()
2878 /* Check if IPMI pass-through decision filter already exists; in e1000_init_manageability_pt()
2909 e_warn("Unable to create IPMI pass-through filter\n"); in e1000_init_manageability_pt()
2918 * e1000_configure_tx - Configure Transmit Unit after Reset
2925 struct e1000_hw *hw = &adapter->hw; in e1000_configure_tx()
2926 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_configure_tx()
2931 tdba = tx_ring->dma; in e1000_configure_tx()
2932 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc); in e1000_configure_tx()
2938 tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0); in e1000_configure_tx()
2939 tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0); in e1000_configure_tx()
2941 writel(0, tx_ring->head); in e1000_configure_tx()
2942 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in e1000_configure_tx()
2945 writel(0, tx_ring->tail); in e1000_configure_tx()
2948 ew32(TIDV, adapter->tx_int_delay); in e1000_configure_tx()
2950 ew32(TADV, adapter->tx_abs_int_delay); in e1000_configure_tx()
2952 if (adapter->flags2 & FLAG2_DMA_BURST) { in e1000_configure_tx()
2978 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { in e1000_configure_tx()
2989 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) { in e1000_configure_tx()
2999 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; in e1000_configure_tx()
3002 if (adapter->tx_int_delay) in e1000_configure_tx()
3003 adapter->txd_cmd |= E1000_TXD_CMD_IDE; in e1000_configure_tx()
3006 adapter->txd_cmd |= E1000_TXD_CMD_RS; in e1000_configure_tx()
3010 hw->mac.ops.config_collision_dist(hw); in e1000_configure_tx()
3013 if (hw->mac.type == e1000_pch_spt) { in e1000_configure_tx()
3032 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
3035 * e1000_setup_rctl - configure the receive control registers
3040 struct e1000_hw *hw = &adapter->hw; in e1000_setup_rctl()
3044 /* Workaround Si errata on PCHx - configure jumbo frame flow. in e1000_setup_rctl()
3048 if (hw->mac.type >= e1000_pch2lan) { in e1000_setup_rctl()
3051 if (adapter->netdev->mtu > ETH_DATA_LEN) in e1000_setup_rctl()
3065 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); in e1000_setup_rctl()
3071 if (adapter->netdev->mtu <= ETH_DATA_LEN) in e1000_setup_rctl()
3080 if (adapter->flags2 & FLAG2_CRC_STRIPPING) in e1000_setup_rctl()
3083 /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */ in e1000_setup_rctl()
3084 if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) { in e1000_setup_rctl()
3103 switch (adapter->rx_buffer_len) { in e1000_setup_rctl()
3125 /* 82571 and greater support packet-split where the protocol in e1000_setup_rctl()
3126 * header is placed in skb->data and the packet data is in e1000_setup_rctl()
3127 * placed in pages hanging off of skb_shinfo(skb)->nr_frags. in e1000_setup_rctl()
3128 * In the case of a non-split, skb->data is linearly filled, in e1000_setup_rctl()
3129 * followed by the page buffers. Therefore, skb->data is in e1000_setup_rctl()
3139 pages = PAGE_USE_COUNT(adapter->netdev->mtu); in e1000_setup_rctl()
3141 adapter->rx_ps_pages = pages; in e1000_setup_rctl()
3143 adapter->rx_ps_pages = 0; in e1000_setup_rctl()
3145 if (adapter->rx_ps_pages) { in e1000_setup_rctl()
3151 psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT; in e1000_setup_rctl()
3153 switch (adapter->rx_ps_pages) { in e1000_setup_rctl()
3169 if (adapter->netdev->features & NETIF_F_RXALL) { in e1000_setup_rctl()
3187 adapter->flags &= ~FLAG_RESTART_NOW; in e1000_setup_rctl()
3191 * e1000_configure_rx - Configure Receive Unit after Reset
3198 struct e1000_hw *hw = &adapter->hw; in e1000_configure_rx()
3199 struct e1000_ring *rx_ring = adapter->rx_ring; in e1000_configure_rx()
3203 if (adapter->rx_ps_pages) { in e1000_configure_rx()
3205 rdlen = rx_ring->count * in e1000_configure_rx()
3207 adapter->clean_rx = e1000_clean_rx_irq_ps; in e1000_configure_rx()
3208 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; in e1000_configure_rx()
3209 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { in e1000_configure_rx()
3210 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); in e1000_configure_rx()
3211 adapter->clean_rx = e1000_clean_jumbo_rx_irq; in e1000_configure_rx()
3212 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; in e1000_configure_rx()
3214 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); in e1000_configure_rx()
3215 adapter->clean_rx = e1000_clean_rx_irq; in e1000_configure_rx()
3216 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; in e1000_configure_rx()
3221 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) in e1000_configure_rx()
3226 if (adapter->flags2 & FLAG2_DMA_BURST) { in e1000_configure_rx()
3240 ew32(RDTR, adapter->rx_int_delay); in e1000_configure_rx()
3243 ew32(RADV, adapter->rx_abs_int_delay); in e1000_configure_rx()
3244 if ((adapter->itr_setting != 0) && (adapter->itr != 0)) in e1000_configure_rx()
3245 e1000e_write_itr(adapter, adapter->itr); in e1000_configure_rx()
3248 /* Auto-Mask interrupts upon ICR access */ in e1000_configure_rx()
3257 rdba = rx_ring->dma; in e1000_configure_rx()
3263 rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0); in e1000_configure_rx()
3264 rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0); in e1000_configure_rx()
3266 writel(0, rx_ring->head); in e1000_configure_rx()
3267 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in e1000_configure_rx()
3270 writel(0, rx_ring->tail); in e1000_configure_rx()
3274 if (adapter->netdev->features & NETIF_F_RXCSUM) in e1000_configure_rx()
3280 /* With jumbo frames, excessive C-state transition latencies result in e1000_configure_rx()
3283 if (adapter->netdev->mtu > ETH_DATA_LEN) { in e1000_configure_rx()
3285 ((er32(PBA) & E1000_PBA_RXA_MASK) * 1024 - in e1000_configure_rx()
3286 adapter->max_frame_size) * 8 / 1000; in e1000_configure_rx()
3288 if (adapter->flags & FLAG_IS_ICH) { in e1000_configure_rx()
3294 dev_info(&adapter->pdev->dev, in e1000_configure_rx()
3295 "Some CPU C-states have been disabled in order to enable jumbo frames\n"); in e1000_configure_rx()
3296 cpu_latency_qos_update_request(&adapter->pm_qos_req, lat); in e1000_configure_rx()
3298 cpu_latency_qos_update_request(&adapter->pm_qos_req, in e1000_configure_rx()
3307 * e1000e_write_mc_addr_list - write multicast addresses to MTA
3311 * Returns: -ENOMEM on failure
3318 struct e1000_hw *hw = &adapter->hw; in e1000e_write_mc_addr_list()
3325 hw->mac.ops.update_mc_addr_list(hw, NULL, 0); in e1000e_write_mc_addr_list()
3331 return -ENOMEM; in e1000e_write_mc_addr_list()
3336 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); in e1000e_write_mc_addr_list()
3338 hw->mac.ops.update_mc_addr_list(hw, mta_list, i); in e1000e_write_mc_addr_list()
3345 * e1000e_write_uc_addr_list - write unicast addresses to RAR table
3349 * Returns: -ENOMEM on failure/insufficient address space
3356 struct e1000_hw *hw = &adapter->hw; in e1000e_write_uc_addr_list()
3360 rar_entries = hw->mac.ops.rar_get_count(hw); in e1000e_write_uc_addr_list()
3363 rar_entries--; in e1000e_write_uc_addr_list()
3366 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) in e1000e_write_uc_addr_list()
3367 rar_entries--; in e1000e_write_uc_addr_list()
3371 return -ENOMEM; in e1000e_write_uc_addr_list()
3384 ret_val = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--); in e1000e_write_uc_addr_list()
3386 return -ENOMEM; in e1000e_write_uc_addr_list()
3392 for (; rar_entries > 0; rar_entries--) { in e1000e_write_uc_addr_list()
3402 * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set
3408 * promiscuous mode, and all-multi behavior.
3413 struct e1000_hw *hw = &adapter->hw; in e1000e_set_rx_mode()
3416 if (pm_runtime_suspended(netdev->dev.parent)) in e1000e_set_rx_mode()
3425 if (netdev->flags & IFF_PROMISC) { in e1000e_set_rx_mode()
3432 if (netdev->flags & IFF_ALLMULTI) { in e1000e_set_rx_mode()
3455 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) in e1000e_set_rx_mode()
3463 struct e1000_hw *hw = &adapter->hw; in e1000e_setup_rss_hash()
3494 * e1000e_get_base_timinca - get default SYSTIM time increment attributes
3503 struct e1000_hw *hw = &adapter->hw; in e1000e_get_base_timinca()
3509 if ((hw->mac.type >= e1000_pch_lpt) && in e1000e_get_base_timinca()
3520 switch (hw->mac.type) { in e1000e_get_base_timinca()
3526 adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHZ; in e1000e_get_base_timinca()
3534 adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHZ; in e1000e_get_base_timinca()
3540 adapter->cc.shift = shift; in e1000e_get_base_timinca()
3548 adapter->cc.shift = shift; in e1000e_get_base_timinca()
3560 adapter->cc.shift = shift; in e1000e_get_base_timinca()
3566 adapter->cc.shift = shift; in e1000e_get_base_timinca()
3575 adapter->cc.shift = shift; in e1000e_get_base_timinca()
3578 return -EINVAL; in e1000e_get_base_timinca()
3588 * e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable
3606 struct e1000_hw *hw = &adapter->hw; in e1000e_config_hwtstamp()
3615 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) in e1000e_config_hwtstamp()
3616 return -EINVAL; in e1000e_config_hwtstamp()
3618 /* flags reserved for future extensions - must be zero */ in e1000e_config_hwtstamp()
3619 if (config->flags) in e1000e_config_hwtstamp()
3620 return -EINVAL; in e1000e_config_hwtstamp()
3622 switch (config->tx_type) { in e1000e_config_hwtstamp()
3629 return -ERANGE; in e1000e_config_hwtstamp()
3632 switch (config->rx_filter) { in e1000e_config_hwtstamp()
3684 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; in e1000e_config_hwtstamp()
3690 * Delay Request messages but not both so fall-through to in e1000e_config_hwtstamp()
3699 config->rx_filter = HWTSTAMP_FILTER_ALL; in e1000e_config_hwtstamp()
3702 return -ERANGE; in e1000e_config_hwtstamp()
3705 adapter->hwtstamp_config = *config; in e1000e_config_hwtstamp()
3715 return -EAGAIN; in e1000e_config_hwtstamp()
3728 return -EAGAIN; in e1000e_config_hwtstamp()
3755 * e1000_configure - configure the hardware for Rx and Tx
3760 struct e1000_ring *rx_ring = adapter->rx_ring; in e1000_configure()
3762 e1000e_set_rx_mode(adapter->netdev); in e1000_configure()
3769 if (adapter->netdev->features & NETIF_F_RXHASH) in e1000_configure()
3773 adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL); in e1000_configure()
3777 * e1000e_power_up_phy - restore link in case the phy was powered down
3786 if (adapter->hw.phy.ops.power_up) in e1000e_power_up_phy()
3787 adapter->hw.phy.ops.power_up(&adapter->hw); in e1000e_power_up_phy()
3789 adapter->hw.mac.ops.setup_link(&adapter->hw); in e1000e_power_up_phy()
3793 * e1000_power_down_phy - Power down the PHY
3801 if (adapter->hw.phy.ops.power_down) in e1000_power_down_phy()
3802 adapter->hw.phy.ops.power_down(&adapter->hw); in e1000_power_down_phy()
3806 * e1000_flush_tx_ring - remove all descriptors from the tx_ring
3816 struct e1000_hw *hw = &adapter->hw; in e1000_flush_tx_ring()
3817 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_flush_tx_ring()
3825 BUG_ON(tdt != tx_ring->next_to_use); in e1000_flush_tx_ring()
3826 tx_desc = E1000_TX_DESC(*tx_ring, tx_ring->next_to_use); in e1000_flush_tx_ring()
3827 tx_desc->buffer_addr = cpu_to_le64(tx_ring->dma); in e1000_flush_tx_ring()
3829 tx_desc->lower.data = cpu_to_le32(txd_lower | size); in e1000_flush_tx_ring()
3830 tx_desc->upper.data = 0; in e1000_flush_tx_ring()
3833 tx_ring->next_to_use++; in e1000_flush_tx_ring()
3834 if (tx_ring->next_to_use == tx_ring->count) in e1000_flush_tx_ring()
3835 tx_ring->next_to_use = 0; in e1000_flush_tx_ring()
3836 ew32(TDT(0), tx_ring->next_to_use); in e1000_flush_tx_ring()
3841 * e1000_flush_rx_ring - remove all descriptors from the rx_ring
3849 struct e1000_hw *hw = &adapter->hw; in e1000_flush_rx_ring()
3874 * e1000_flush_desc_rings - remove all descriptors from the descriptor rings
3889 struct e1000_hw *hw = &adapter->hw; in e1000_flush_desc_rings()
3897 pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS, in e1000_flush_desc_rings()
3903 pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS, in e1000_flush_desc_rings()
3910 * e1000e_systim_reset - reset the timesync registers after a hardware reset
3920 struct ptp_clock_info *info = &adapter->ptp_clock_info; in e1000e_systim_reset()
3921 struct e1000_hw *hw = &adapter->hw; in e1000e_systim_reset()
3926 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) in e1000e_systim_reset()
3929 if (info->adjfreq) { in e1000e_systim_reset()
3931 ret_val = info->adjfreq(info, adapter->ptp_delta); in e1000e_systim_reset()
3940 dev_warn(&adapter->pdev->dev, in e1000e_systim_reset()
3947 spin_lock_irqsave(&adapter->systim_lock, flags); in e1000e_systim_reset()
3948 timecounter_init(&adapter->tc, &adapter->cc, in e1000e_systim_reset()
3950 spin_unlock_irqrestore(&adapter->systim_lock, flags); in e1000e_systim_reset()
3953 e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config); in e1000e_systim_reset()
3957 * e1000e_reset - bring the hardware into a known good state
3961 * require a configuration cycle of the hardware - those cannot be
3967 struct e1000_mac_info *mac = &adapter->hw.mac; in e1000e_reset()
3968 struct e1000_fc_info *fc = &adapter->hw.fc; in e1000e_reset()
3969 struct e1000_hw *hw = &adapter->hw; in e1000e_reset()
3971 u32 pba = adapter->pba; in e1000e_reset()
3977 if (adapter->max_frame_size > (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) { in e1000e_reset()
3993 min_tx_space = (adapter->max_frame_size + in e1000e_reset()
3994 sizeof(struct e1000_tx_desc) - ETH_FCS_LEN) * 2; in e1000e_reset()
3998 min_rx_space = adapter->max_frame_size; in e1000e_reset()
4007 ((min_tx_space - tx_space) < pba)) { in e1000e_reset()
4008 pba -= min_tx_space - tx_space; in e1000e_reset()
4025 * - 90% of the Rx FIFO size, and in e1000e_reset()
4026 * - the full Rx FIFO size minus one full frame in e1000e_reset()
4028 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) in e1000e_reset()
4029 fc->pause_time = 0xFFFF; in e1000e_reset()
4031 fc->pause_time = E1000_FC_PAUSE_TIME; in e1000e_reset()
4032 fc->send_xon = true; in e1000e_reset()
4033 fc->current_mode = fc->requested_mode; in e1000e_reset()
4035 switch (hw->mac.type) { in e1000e_reset()
4038 if (adapter->netdev->mtu > ETH_DATA_LEN) { in e1000e_reset()
4041 fc->high_water = 0x2800; in e1000e_reset()
4042 fc->low_water = fc->high_water - 8; in e1000e_reset()
4048 ((pba << 10) - adapter->max_frame_size)); in e1000e_reset()
4050 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ in e1000e_reset()
4051 fc->low_water = fc->high_water - 8; in e1000e_reset()
4057 if (adapter->netdev->mtu > ETH_DATA_LEN) { in e1000e_reset()
4058 fc->high_water = 0x3500; in e1000e_reset()
4059 fc->low_water = 0x1500; in e1000e_reset()
4061 fc->high_water = 0x5000; in e1000e_reset()
4062 fc->low_water = 0x3000; in e1000e_reset()
4064 fc->refresh_time = 0x1000; in e1000e_reset()
4074 fc->refresh_time = 0xFFFF; in e1000e_reset()
4075 fc->pause_time = 0xFFFF; in e1000e_reset()
4077 if (adapter->netdev->mtu <= ETH_DATA_LEN) { in e1000e_reset()
4078 fc->high_water = 0x05C20; in e1000e_reset()
4079 fc->low_water = 0x05048; in e1000e_reset()
4085 fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH; in e1000e_reset()
4086 fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL; in e1000e_reset()
4095 adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96, in e1000e_reset()
4101 if (adapter->itr_setting & 0x3) { in e1000e_reset()
4102 if ((adapter->max_frame_size * 2) > (pba << 10)) { in e1000e_reset()
4103 if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) { in e1000e_reset()
4104 dev_info(&adapter->pdev->dev, in e1000e_reset()
4106 adapter->flags2 |= FLAG2_DISABLE_AIM; in e1000e_reset()
4109 } else if (adapter->flags2 & FLAG2_DISABLE_AIM) { in e1000e_reset()
4110 dev_info(&adapter->pdev->dev, in e1000e_reset()
4112 adapter->flags2 &= ~FLAG2_DISABLE_AIM; in e1000e_reset()
4113 adapter->itr = 20000; in e1000e_reset()
4114 e1000e_write_itr(adapter, adapter->itr); in e1000e_reset()
4118 if (hw->mac.type >= e1000_pch_spt) in e1000e_reset()
4121 mac->ops.reset_hw(hw); in e1000e_reset()
4126 if (adapter->flags & FLAG_HAS_AMT) in e1000e_reset()
4131 if (mac->ops.init_hw(hw)) in e1000e_reset()
4145 if (adapter->flags2 & FLAG2_HAS_EEE) { in e1000e_reset()
4149 switch (hw->phy.type) { in e1000e_reset()
4157 dev_err(&adapter->pdev->dev, in e1000e_reset()
4162 ret_val = hw->phy.ops.acquire(hw); in e1000e_reset()
4164 dev_err(&adapter->pdev->dev, in e1000e_reset()
4165 "EEE advertisement - unable to acquire PHY\n"); in e1000e_reset()
4170 hw->dev_spec.ich8lan.eee_disable ? in e1000e_reset()
4171 0 : adapter->eee_advert); in e1000e_reset()
4173 hw->phy.ops.release(hw); in e1000e_reset()
4176 if (!netif_running(adapter->netdev) && in e1000e_reset()
4177 !test_bit(__E1000_TESTING, &adapter->state)) in e1000e_reset()
4182 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && in e1000e_reset()
4183 !(adapter->flags & FLAG_SMART_POWER_DOWN)) { in e1000e_reset()
4193 if (hw->mac.type >= e1000_pch_spt && adapter->int_mode == 0) { in e1000e_reset()
4210 * e1000e_trigger_lsc - trigger an LSC interrupt
4217 struct e1000_hw *hw = &adapter->hw; in e1000e_trigger_lsc()
4219 if (adapter->msix_entries) in e1000e_trigger_lsc()
4230 clear_bit(__E1000_DOWN, &adapter->state); in e1000e_up()
4232 if (adapter->msix_entries) in e1000e_up()
4243 struct e1000_hw *hw = &adapter->hw; in e1000e_flush_descriptors()
4245 if (!(adapter->flags2 & FLAG2_DMA_BURST)) in e1000e_flush_descriptors()
4249 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); in e1000e_flush_descriptors()
4250 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); in e1000e_flush_descriptors()
4258 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); in e1000e_flush_descriptors()
4259 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); in e1000e_flush_descriptors()
4268 * e1000e_down - quiesce the device and optionally reset the hardware
4274 struct net_device *netdev = adapter->netdev; in e1000e_down()
4275 struct e1000_hw *hw = &adapter->hw; in e1000e_down()
4281 set_bit(__E1000_DOWN, &adapter->state); in e1000e_down()
4287 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) in e1000e_down()
4304 napi_synchronize(&adapter->napi); in e1000e_down()
4306 del_timer_sync(&adapter->watchdog_timer); in e1000e_down()
4307 del_timer_sync(&adapter->phy_info_timer); in e1000e_down()
4309 spin_lock(&adapter->stats64_lock); in e1000e_down()
4311 spin_unlock(&adapter->stats64_lock); in e1000e_down()
4315 adapter->link_speed = 0; in e1000e_down()
4316 adapter->link_duplex = 0; in e1000e_down()
4319 if ((hw->mac.type >= e1000_pch2lan) && in e1000e_down()
4320 (adapter->netdev->mtu > ETH_DATA_LEN) && in e1000e_down()
4324 if (!pci_channel_offline(adapter->pdev)) { in e1000e_down()
4327 else if (hw->mac.type >= e1000_pch_spt) in e1000e_down()
4330 e1000_clean_tx_ring(adapter->tx_ring); in e1000e_down()
4331 e1000_clean_rx_ring(adapter->rx_ring); in e1000e_down()
4337 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) in e1000e_reinit_locked()
4341 clear_bit(__E1000_RESETTING, &adapter->state); in e1000e_reinit_locked()
4345 * e1000e_sanitize_systim - sanitize raw cycle counter reads
4365 /* latch SYSTIMH on read of SYSTIML */ in e1000e_sanitize_systim()
4371 time_delta = systim_next - systim; in e1000e_sanitize_systim()
4386 * e1000e_read_systim - read SYSTIM register
4394 struct e1000_hw *hw = &adapter->hw; in e1000e_read_systim()
4401 * to fix that we test for overflow and if true, we re-read systime. in e1000e_read_systim()
4408 if (systimel >= (u32)0xffffffff - E1000_TIMINCA_INCVALUE_MASK) { in e1000e_read_systim()
4423 if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW) in e1000e_read_systim()
4430 * e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
4442 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
4451 struct net_device *netdev = adapter->netdev; in e1000_sw_init()
4453 adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; in e1000_sw_init()
4454 adapter->rx_ps_bsize0 = 128; in e1000_sw_init()
4455 adapter->max_frame_size = netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; in e1000_sw_init()
4456 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; in e1000_sw_init()
4457 adapter->tx_ring_count = E1000_DEFAULT_TXD; in e1000_sw_init()
4458 adapter->rx_ring_count = E1000_DEFAULT_RXD; in e1000_sw_init()
4460 spin_lock_init(&adapter->stats64_lock); in e1000_sw_init()
4465 return -ENOMEM; in e1000_sw_init()
4468 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { in e1000_sw_init()
4469 adapter->cc.read = e1000e_cyclecounter_read; in e1000_sw_init()
4470 adapter->cc.mask = CYCLECOUNTER_MASK(64); in e1000_sw_init()
4471 adapter->cc.mult = 1; in e1000_sw_init()
4474 spin_lock_init(&adapter->systim_lock); in e1000_sw_init()
4475 INIT_WORK(&adapter->tx_hwtstamp_work, e1000e_tx_hwtstamp_work); in e1000_sw_init()
4481 set_bit(__E1000_DOWN, &adapter->state); in e1000_sw_init()
4486 * e1000_intr_msi_test - Interrupt Handler
4494 struct e1000_hw *hw = &adapter->hw; in e1000_intr_msi_test()
4499 adapter->flags &= ~FLAG_MSI_TEST_FAILED; in e1000_intr_msi_test()
4510 * e1000_test_msi_interrupt - Returns 0 for successful test
4517 struct net_device *netdev = adapter->netdev; in e1000_test_msi_interrupt()
4518 struct e1000_hw *hw = &adapter->hw; in e1000_test_msi_interrupt()
4532 adapter->flags |= FLAG_MSI_TEST_FAILED; in e1000_test_msi_interrupt()
4534 err = pci_enable_msi(adapter->pdev); in e1000_test_msi_interrupt()
4538 err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0, in e1000_test_msi_interrupt()
4539 netdev->name, netdev); in e1000_test_msi_interrupt()
4541 pci_disable_msi(adapter->pdev); in e1000_test_msi_interrupt()
4561 if (adapter->flags & FLAG_MSI_TEST_FAILED) { in e1000_test_msi_interrupt()
4562 adapter->int_mode = E1000E_INT_MODE_LEGACY; in e1000_test_msi_interrupt()
4568 free_irq(adapter->pdev->irq, netdev); in e1000_test_msi_interrupt()
4569 pci_disable_msi(adapter->pdev); in e1000_test_msi_interrupt()
4577 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
4587 if (!(adapter->flags & FLAG_MSI_ENABLED)) in e1000_test_msi()
4591 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); in e1000_test_msi()
4593 pci_write_config_word(adapter->pdev, PCI_COMMAND, in e1000_test_msi()
4598 /* re-enable SERR */ in e1000_test_msi()
4600 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); in e1000_test_msi()
4602 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd); in e1000_test_msi()
4609 * e1000e_open - Called when a network interface is made active
4623 struct e1000_hw *hw = &adapter->hw; in e1000e_open()
4624 struct pci_dev *pdev = adapter->pdev; in e1000e_open()
4628 if (test_bit(__E1000_TESTING, &adapter->state)) in e1000e_open()
4629 return -EBUSY; in e1000e_open()
4631 pm_runtime_get_sync(&pdev->dev); in e1000e_open()
4637 err = e1000e_setup_tx_resources(adapter->tx_ring); in e1000e_open()
4642 err = e1000e_setup_rx_resources(adapter->rx_ring); in e1000e_open()
4649 if (adapter->flags & FLAG_HAS_AMT) { in e1000e_open()
4656 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; in e1000e_open()
4657 if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) in e1000e_open()
4661 cpu_latency_qos_add_request(&adapter->pm_qos_req, PM_QOS_DEFAULT_VALUE); in e1000e_open()
4678 if (adapter->int_mode != E1000E_INT_MODE_LEGACY) { in e1000e_open()
4687 clear_bit(__E1000_DOWN, &adapter->state); in e1000e_open()
4689 napi_enable(&adapter->napi); in e1000e_open()
4693 adapter->tx_hang_recheck = false; in e1000e_open()
4695 hw->mac.get_link_status = true; in e1000e_open()
4696 pm_runtime_put(&pdev->dev); in e1000e_open()
4703 cpu_latency_qos_remove_request(&adapter->pm_qos_req); in e1000e_open()
4706 e1000e_free_rx_resources(adapter->rx_ring); in e1000e_open()
4708 e1000e_free_tx_resources(adapter->tx_ring); in e1000e_open()
4711 pm_runtime_put_sync(&pdev->dev); in e1000e_open()
4717 * e1000e_close - Disables a network interface
4722 * The close entry point is called when an interface is de-activated
4730 struct pci_dev *pdev = adapter->pdev; in e1000e_close()
4733 while (test_bit(__E1000_RESETTING, &adapter->state) && count--) in e1000e_close()
4736 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); in e1000e_close()
4738 pm_runtime_get_sync(&pdev->dev); in e1000e_close()
4748 napi_disable(&adapter->napi); in e1000e_close()
4750 e1000e_free_tx_resources(adapter->tx_ring); in e1000e_close()
4751 e1000e_free_rx_resources(adapter->rx_ring); in e1000e_close()
4756 if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) in e1000e_close()
4758 adapter->mng_vlan_id); in e1000e_close()
4763 if ((adapter->flags & FLAG_HAS_AMT) && in e1000e_close()
4764 !test_bit(__E1000_TESTING, &adapter->state)) in e1000e_close()
4767 cpu_latency_qos_remove_request(&adapter->pm_qos_req); in e1000e_close()
4769 pm_runtime_put_sync(&pdev->dev); in e1000e_close()
4775 * e1000_set_mac - Change the Ethernet Address of the NIC
4784 struct e1000_hw *hw = &adapter->hw; in e1000_set_mac()
4787 if (!is_valid_ether_addr(addr->sa_data)) in e1000_set_mac()
4788 return -EADDRNOTAVAIL; in e1000_set_mac()
4790 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); in e1000_set_mac()
4791 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); in e1000_set_mac()
4793 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0); in e1000_set_mac()
4795 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) { in e1000_set_mac()
4797 e1000e_set_laa_state_82571(&adapter->hw, 1); in e1000_set_mac()
4806 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, in e1000_set_mac()
4807 adapter->hw.mac.rar_entry_count - 1); in e1000_set_mac()
4814 * e1000e_update_phy_task - work thread to update phy
4826 struct e1000_hw *hw = &adapter->hw; in e1000e_update_phy_task()
4828 if (test_bit(__E1000_DOWN, &adapter->state)) in e1000e_update_phy_task()
4834 if (hw->phy.type >= e1000_phy_82579) in e1000e_update_phy_task()
4839 * e1000_update_phy_info - timre call-back to update PHY info
4849 if (test_bit(__E1000_DOWN, &adapter->state)) in e1000_update_phy_info()
4852 schedule_work(&adapter->update_phy_task); in e1000_update_phy_info()
4856 * e1000e_update_phy_stats - Update the PHY statistics counters
4859 * Read/clear the upper 16-bit PHY registers and read/accumulate lower
4863 struct e1000_hw *hw = &adapter->hw; in e1000e_update_phy_stats()
4867 ret_val = hw->phy.ops.acquire(hw); in e1000e_update_phy_stats()
4874 hw->phy.addr = 1; in e1000e_update_phy_stats()
4880 ret_val = hw->phy.ops.set_page(hw, in e1000e_update_phy_stats()
4887 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); in e1000e_update_phy_stats()
4888 ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); in e1000e_update_phy_stats()
4890 adapter->stats.scc += phy_data; in e1000e_update_phy_stats()
4893 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); in e1000e_update_phy_stats()
4894 ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); in e1000e_update_phy_stats()
4896 adapter->stats.ecol += phy_data; in e1000e_update_phy_stats()
4899 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); in e1000e_update_phy_stats()
4900 ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); in e1000e_update_phy_stats()
4902 adapter->stats.mcc += phy_data; in e1000e_update_phy_stats()
4905 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); in e1000e_update_phy_stats()
4906 ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); in e1000e_update_phy_stats()
4908 adapter->stats.latecol += phy_data; in e1000e_update_phy_stats()
4910 /* Collision Count - also used for adaptive IFS */ in e1000e_update_phy_stats()
4911 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); in e1000e_update_phy_stats()
4912 ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); in e1000e_update_phy_stats()
4914 hw->mac.collision_delta = phy_data; in e1000e_update_phy_stats()
4917 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); in e1000e_update_phy_stats()
4918 ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); in e1000e_update_phy_stats()
4920 adapter->stats.dc += phy_data; in e1000e_update_phy_stats()
4923 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); in e1000e_update_phy_stats()
4924 ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); in e1000e_update_phy_stats()
4926 adapter->stats.tncrs += phy_data; in e1000e_update_phy_stats()
4929 hw->phy.ops.release(hw); in e1000e_update_phy_stats()
4933 * e1000e_update_stats - Update the board statistics counters
4938 struct net_device *netdev = adapter->netdev; in e1000e_update_stats()
4939 struct e1000_hw *hw = &adapter->hw; in e1000e_update_stats()
4940 struct pci_dev *pdev = adapter->pdev; in e1000e_update_stats()
4945 if (adapter->link_speed == 0) in e1000e_update_stats()
4950 adapter->stats.crcerrs += er32(CRCERRS); in e1000e_update_stats()
4951 adapter->stats.gprc += er32(GPRC); in e1000e_update_stats()
4952 adapter->stats.gorc += er32(GORCL); in e1000e_update_stats()
4954 adapter->stats.bprc += er32(BPRC); in e1000e_update_stats()
4955 adapter->stats.mprc += er32(MPRC); in e1000e_update_stats()
4956 adapter->stats.roc += er32(ROC); in e1000e_update_stats()
4958 adapter->stats.mpc += er32(MPC); in e1000e_update_stats()
4960 /* Half-duplex statistics */ in e1000e_update_stats()
4961 if (adapter->link_duplex == HALF_DUPLEX) { in e1000e_update_stats()
4962 if (adapter->flags2 & FLAG2_HAS_PHY_STATS) { in e1000e_update_stats()
4965 adapter->stats.scc += er32(SCC); in e1000e_update_stats()
4966 adapter->stats.ecol += er32(ECOL); in e1000e_update_stats()
4967 adapter->stats.mcc += er32(MCC); in e1000e_update_stats()
4968 adapter->stats.latecol += er32(LATECOL); in e1000e_update_stats()
4969 adapter->stats.dc += er32(DC); in e1000e_update_stats()
4971 hw->mac.collision_delta = er32(COLC); in e1000e_update_stats()
4973 if ((hw->mac.type != e1000_82574) && in e1000e_update_stats()
4974 (hw->mac.type != e1000_82583)) in e1000e_update_stats()
4975 adapter->stats.tncrs += er32(TNCRS); in e1000e_update_stats()
4977 adapter->stats.colc += hw->mac.collision_delta; in e1000e_update_stats()
4980 adapter->stats.xonrxc += er32(XONRXC); in e1000e_update_stats()
4981 adapter->stats.xontxc += er32(XONTXC); in e1000e_update_stats()
4982 adapter->stats.xoffrxc += er32(XOFFRXC); in e1000e_update_stats()
4983 adapter->stats.xofftxc += er32(XOFFTXC); in e1000e_update_stats()
4984 adapter->stats.gptc += er32(GPTC); in e1000e_update_stats()
4985 adapter->stats.gotc += er32(GOTCL); in e1000e_update_stats()
4987 adapter->stats.rnbc += er32(RNBC); in e1000e_update_stats()
4988 adapter->stats.ruc += er32(RUC); in e1000e_update_stats()
4990 adapter->stats.mptc += er32(MPTC); in e1000e_update_stats()
4991 adapter->stats.bptc += er32(BPTC); in e1000e_update_stats()
4995 hw->mac.tx_packet_delta = er32(TPT); in e1000e_update_stats()
4996 adapter->stats.tpt += hw->mac.tx_packet_delta; in e1000e_update_stats()
4998 adapter->stats.algnerrc += er32(ALGNERRC); in e1000e_update_stats()
4999 adapter->stats.rxerrc += er32(RXERRC); in e1000e_update_stats()
5000 adapter->stats.cexterr += er32(CEXTERR); in e1000e_update_stats()
5001 adapter->stats.tsctc += er32(TSCTC); in e1000e_update_stats()
5002 adapter->stats.tsctfc += er32(TSCTFC); in e1000e_update_stats()
5005 netdev->stats.multicast = adapter->stats.mprc; in e1000e_update_stats()
5006 netdev->stats.collisions = adapter->stats.colc; in e1000e_update_stats()
5013 netdev->stats.rx_errors = adapter->stats.rxerrc + in e1000e_update_stats()
5014 adapter->stats.crcerrs + adapter->stats.algnerrc + in e1000e_update_stats()
5015 adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; in e1000e_update_stats()
5016 netdev->stats.rx_length_errors = adapter->stats.ruc + in e1000e_update_stats()
5017 adapter->stats.roc; in e1000e_update_stats()
5018 netdev->stats.rx_crc_errors = adapter->stats.crcerrs; in e1000e_update_stats()
5019 netdev->stats.rx_frame_errors = adapter->stats.algnerrc; in e1000e_update_stats()
5020 netdev->stats.rx_missed_errors = adapter->stats.mpc; in e1000e_update_stats()
5023 netdev->stats.tx_errors = adapter->stats.ecol + adapter->stats.latecol; in e1000e_update_stats()
5024 netdev->stats.tx_aborted_errors = adapter->stats.ecol; in e1000e_update_stats()
5025 netdev->stats.tx_window_errors = adapter->stats.latecol; in e1000e_update_stats()
5026 netdev->stats.tx_carrier_errors = adapter->stats.tncrs; in e1000e_update_stats()
5031 adapter->stats.mgptc += er32(MGTPTC); in e1000e_update_stats()
5032 adapter->stats.mgprc += er32(MGTPRC); in e1000e_update_stats()
5033 adapter->stats.mgpdc += er32(MGTPDC); in e1000e_update_stats()
5036 if (hw->mac.type >= e1000_pch_lpt) { in e1000e_update_stats()
5039 adapter->corr_errors += in e1000e_update_stats()
5041 adapter->uncorr_errors += in e1000e_update_stats()
5048 * e1000_phy_read_status - Update the PHY register status snapshot
5053 struct e1000_hw *hw = &adapter->hw; in e1000_phy_read_status()
5054 struct e1000_phy_regs *phy = &adapter->phy_regs; in e1000_phy_read_status()
5056 if (!pm_runtime_suspended((&adapter->pdev->dev)->parent) && in e1000_phy_read_status()
5058 (adapter->hw.phy.media_type == e1000_media_type_copper)) { in e1000_phy_read_status()
5061 ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr); in e1000_phy_read_status()
5062 ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr); in e1000_phy_read_status()
5063 ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise); in e1000_phy_read_status()
5064 ret_val |= e1e_rphy(hw, MII_LPA, &phy->lpa); in e1000_phy_read_status()
5065 ret_val |= e1e_rphy(hw, MII_EXPANSION, &phy->expansion); in e1000_phy_read_status()
5066 ret_val |= e1e_rphy(hw, MII_CTRL1000, &phy->ctrl1000); in e1000_phy_read_status()
5067 ret_val |= e1e_rphy(hw, MII_STAT1000, &phy->stat1000); in e1000_phy_read_status()
5068 ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus); in e1000_phy_read_status()
5073 * Set values to typical power-on defaults in e1000_phy_read_status()
5075 phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX); in e1000_phy_read_status()
5076 phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | in e1000_phy_read_status()
5079 phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP | in e1000_phy_read_status()
5081 phy->lpa = 0; in e1000_phy_read_status()
5082 phy->expansion = EXPANSION_ENABLENPAGE; in e1000_phy_read_status()
5083 phy->ctrl1000 = ADVERTISE_1000FULL; in e1000_phy_read_status()
5084 phy->stat1000 = 0; in e1000_phy_read_status()
5085 phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF); in e1000_phy_read_status()
5091 struct e1000_hw *hw = &adapter->hw; in e1000_print_link_info()
5095 netdev_info(adapter->netdev, in e1000_print_link_info()
5097 adapter->link_speed, in e1000_print_link_info()
5098 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half", in e1000_print_link_info()
5106 struct e1000_hw *hw = &adapter->hw; in e1000e_has_link()
5115 switch (hw->phy.media_type) { in e1000e_has_link()
5117 if (hw->mac.get_link_status) { in e1000e_has_link()
5118 ret_val = hw->mac.ops.check_for_link(hw); in e1000e_has_link()
5119 link_active = !hw->mac.get_link_status; in e1000e_has_link()
5125 ret_val = hw->mac.ops.check_for_link(hw); in e1000e_has_link()
5129 ret_val = hw->mac.ops.check_for_link(hw); in e1000e_has_link()
5130 link_active = hw->mac.serdes_has_link; in e1000e_has_link()
5137 if ((ret_val == -E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && in e1000e_has_link()
5149 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) && in e1000e_enable_receives()
5150 (adapter->flags & FLAG_RESTART_NOW)) { in e1000e_enable_receives()
5151 struct e1000_hw *hw = &adapter->hw; in e1000e_enable_receives()
5155 adapter->flags &= ~FLAG_RESTART_NOW; in e1000e_enable_receives()
5161 struct e1000_hw *hw = &adapter->hw; in e1000e_check_82574_phy_workaround()
5167 adapter->phy_hang_count++; in e1000e_check_82574_phy_workaround()
5169 adapter->phy_hang_count = 0; in e1000e_check_82574_phy_workaround()
5171 if (adapter->phy_hang_count > 1) { in e1000e_check_82574_phy_workaround()
5172 adapter->phy_hang_count = 0; in e1000e_check_82574_phy_workaround()
5173 e_dbg("PHY appears hung - resetting\n"); in e1000e_check_82574_phy_workaround()
5174 schedule_work(&adapter->reset_task); in e1000e_check_82574_phy_workaround()
5179 * e1000_watchdog - Timer Call-back
5187 schedule_work(&adapter->watchdog_task); in e1000_watchdog()
5197 struct net_device *netdev = adapter->netdev; in e1000_watchdog_task()
5198 struct e1000_mac_info *mac = &adapter->hw.mac; in e1000_watchdog_task()
5199 struct e1000_phy_info *phy = &adapter->hw.phy; in e1000_watchdog_task()
5200 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_watchdog_task()
5202 struct e1000_hw *hw = &adapter->hw; in e1000_watchdog_task()
5205 if (test_bit(__E1000_DOWN, &adapter->state)) in e1000_watchdog_task()
5211 pm_runtime_resume(netdev->dev.parent); in e1000_watchdog_task()
5218 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)) in e1000_watchdog_task()
5226 pm_runtime_resume(netdev->dev.parent); in e1000_watchdog_task()
5241 e1000_phy_hw_reset(&adapter->hw); in e1000_watchdog_task()
5247 mac->ops.get_link_up_info(&adapter->hw, in e1000_watchdog_task()
5248 &adapter->link_speed, in e1000_watchdog_task()
5249 &adapter->link_duplex); in e1000_watchdog_task()
5254 if (phy->speed_downgraded) in e1000_watchdog_task()
5261 if ((hw->phy.type == e1000_phy_igp_3 || in e1000_watchdog_task()
5262 hw->phy.type == e1000_phy_bm) && in e1000_watchdog_task()
5263 hw->mac.autoneg && in e1000_watchdog_task()
5264 (adapter->link_speed == SPEED_10 || in e1000_watchdog_task()
5265 adapter->link_speed == SPEED_100) && in e1000_watchdog_task()
5266 (adapter->link_duplex == HALF_DUPLEX)) { in e1000_watchdog_task()
5276 adapter->tx_timeout_factor = 1; in e1000_watchdog_task()
5277 switch (adapter->link_speed) { in e1000_watchdog_task()
5280 adapter->tx_timeout_factor = 16; in e1000_watchdog_task()
5284 adapter->tx_timeout_factor = 10; in e1000_watchdog_task()
5288 /* workaround: re-program speed mode bit after in e1000_watchdog_task()
5289 * link-up event in e1000_watchdog_task()
5291 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && in e1000_watchdog_task()
5303 if (!(adapter->flags & FLAG_TSO_FORCE)) { in e1000_watchdog_task()
5304 switch (adapter->link_speed) { in e1000_watchdog_task()
5308 netdev->features &= ~NETIF_F_TSO; in e1000_watchdog_task()
5309 netdev->features &= ~NETIF_F_TSO6; in e1000_watchdog_task()
5312 netdev->features |= NETIF_F_TSO; in e1000_watchdog_task()
5313 netdev->features |= NETIF_F_TSO6; in e1000_watchdog_task()
5319 if (hw->mac.type == e1000_pch_spt) { in e1000_watchdog_task()
5320 netdev->features &= ~NETIF_F_TSO; in e1000_watchdog_task()
5321 netdev->features &= ~NETIF_F_TSO6; in e1000_watchdog_task()
5332 /* Perform any post-link-up configuration before in e1000_watchdog_task()
5335 if (phy->ops.cfg_on_link_up) in e1000_watchdog_task()
5336 phy->ops.cfg_on_link_up(hw); in e1000_watchdog_task()
5341 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_watchdog_task()
5342 mod_timer(&adapter->phy_info_timer, in e1000_watchdog_task()
5347 adapter->link_speed = 0; in e1000_watchdog_task()
5348 adapter->link_duplex = 0; in e1000_watchdog_task()
5353 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_watchdog_task()
5354 mod_timer(&adapter->phy_info_timer, in e1000_watchdog_task()
5357 /* 8000ES2LAN requires a Rx packet buffer work-around in e1000_watchdog_task()
5361 if (adapter->flags & FLAG_RX_NEEDS_RESTART) in e1000_watchdog_task()
5362 adapter->flags |= FLAG_RESTART_NOW; in e1000_watchdog_task()
5364 pm_schedule_suspend(netdev->dev.parent, in e1000_watchdog_task()
5370 spin_lock(&adapter->stats64_lock); in e1000_watchdog_task()
5373 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; in e1000_watchdog_task()
5374 adapter->tpt_old = adapter->stats.tpt; in e1000_watchdog_task()
5375 mac->collision_delta = adapter->stats.colc - adapter->colc_old; in e1000_watchdog_task()
5376 adapter->colc_old = adapter->stats.colc; in e1000_watchdog_task()
5378 adapter->gorc = adapter->stats.gorc - adapter->gorc_old; in e1000_watchdog_task()
5379 adapter->gorc_old = adapter->stats.gorc; in e1000_watchdog_task()
5380 adapter->gotc = adapter->stats.gotc - adapter->gotc_old; in e1000_watchdog_task()
5381 adapter->gotc_old = adapter->stats.gotc; in e1000_watchdog_task()
5382 spin_unlock(&adapter->stats64_lock); in e1000_watchdog_task()
5389 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) in e1000_watchdog_task()
5390 adapter->flags |= FLAG_RESTART_NOW; in e1000_watchdog_task()
5393 if (adapter->flags & FLAG_RESTART_NOW) { in e1000_watchdog_task()
5394 schedule_work(&adapter->reset_task); in e1000_watchdog_task()
5399 e1000e_update_adaptive(&adapter->hw); in e1000_watchdog_task()
5402 if (adapter->itr_setting == 4) { in e1000_watchdog_task()
5405 * everyone else is between 2000-8000. in e1000_watchdog_task()
5407 u32 goc = (adapter->gotc + adapter->gorc) / 10000; in e1000_watchdog_task()
5408 u32 dif = (adapter->gotc > adapter->gorc ? in e1000_watchdog_task()
5409 adapter->gotc - adapter->gorc : in e1000_watchdog_task()
5410 adapter->gorc - adapter->gotc) / 10000; in e1000_watchdog_task()
5417 if (adapter->msix_entries) in e1000_watchdog_task()
5418 ew32(ICS, adapter->rx_ring->ims_val); in e1000_watchdog_task()
5426 adapter->detect_tx_hung = true; in e1000_watchdog_task()
5432 hw->mac.ops.rar_set(hw, adapter->hw.mac.addr, 0); in e1000_watchdog_task()
5434 if (adapter->flags2 & FLAG2_CHECK_PHY_HANG) in e1000_watchdog_task()
5438 if (adapter->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) { in e1000_watchdog_task()
5439 if ((adapter->flags2 & FLAG2_CHECK_RX_HWTSTAMP) && in e1000_watchdog_task()
5442 adapter->rx_hwtstamp_cleared++; in e1000_watchdog_task()
5444 adapter->flags2 |= FLAG2_CHECK_RX_HWTSTAMP; in e1000_watchdog_task()
5449 if (!test_bit(__E1000_DOWN, &adapter->state)) in e1000_watchdog_task()
5450 mod_timer(&adapter->watchdog_timer, in e1000_watchdog_task()
5482 mss = skb_shinfo(skb)->gso_size; in e1000_tso()
5485 iph->tot_len = 0; in e1000_tso()
5486 iph->check = 0; in e1000_tso()
5487 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, in e1000_tso()
5490 ipcse = skb_transport_offset(skb) - 1; in e1000_tso()
5496 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; in e1000_tso()
5498 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; in e1000_tso()
5501 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); in e1000_tso()
5503 i = tx_ring->next_to_use; in e1000_tso()
5505 buffer_info = &tx_ring->buffer_info[i]; in e1000_tso()
5507 context_desc->lower_setup.ip_fields.ipcss = ipcss; in e1000_tso()
5508 context_desc->lower_setup.ip_fields.ipcso = ipcso; in e1000_tso()
5509 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); in e1000_tso()
5510 context_desc->upper_setup.tcp_fields.tucss = tucss; in e1000_tso()
5511 context_desc->upper_setup.tcp_fields.tucso = tucso; in e1000_tso()
5512 context_desc->upper_setup.tcp_fields.tucse = 0; in e1000_tso()
5513 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); in e1000_tso()
5514 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; in e1000_tso()
5515 context_desc->cmd_and_length = cpu_to_le32(cmd_length); in e1000_tso()
5517 buffer_info->time_stamp = jiffies; in e1000_tso()
5518 buffer_info->next_to_watch = i; in e1000_tso()
5521 if (i == tx_ring->count) in e1000_tso()
5523 tx_ring->next_to_use = i; in e1000_tso()
5531 struct e1000_adapter *adapter = tx_ring->adapter; in e1000_tx_csum()
5538 if (skb->ip_summed != CHECKSUM_PARTIAL) in e1000_tx_csum()
5543 if (ip_hdr(skb)->protocol == IPPROTO_TCP) in e1000_tx_csum()
5548 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) in e1000_tx_csum()
5560 i = tx_ring->next_to_use; in e1000_tx_csum()
5561 buffer_info = &tx_ring->buffer_info[i]; in e1000_tx_csum()
5564 context_desc->lower_setup.ip_config = 0; in e1000_tx_csum()
5565 context_desc->upper_setup.tcp_fields.tucss = css; in e1000_tx_csum()
5566 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset; in e1000_tx_csum()
5567 context_desc->upper_setup.tcp_fields.tucse = 0; in e1000_tx_csum()
5568 context_desc->tcp_seg_setup.data = 0; in e1000_tx_csum()
5569 context_desc->cmd_and_length = cpu_to_le32(cmd_len); in e1000_tx_csum()
5571 buffer_info->time_stamp = jiffies; in e1000_tx_csum()
5572 buffer_info->next_to_watch = i; in e1000_tx_csum()
5575 if (i == tx_ring->count) in e1000_tx_csum()
5577 tx_ring->next_to_use = i; in e1000_tx_csum()
5586 struct e1000_adapter *adapter = tx_ring->adapter; in e1000_tx_map()
5587 struct pci_dev *pdev = adapter->pdev; in e1000_tx_map()
5593 i = tx_ring->next_to_use; in e1000_tx_map()
5596 buffer_info = &tx_ring->buffer_info[i]; in e1000_tx_map()
5599 buffer_info->length = size; in e1000_tx_map()
5600 buffer_info->time_stamp = jiffies; in e1000_tx_map()
5601 buffer_info->next_to_watch = i; in e1000_tx_map()
5602 buffer_info->dma = dma_map_single(&pdev->dev, in e1000_tx_map()
5603 skb->data + offset, in e1000_tx_map()
5605 buffer_info->mapped_as_page = false; in e1000_tx_map()
5606 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) in e1000_tx_map()
5609 len -= size; in e1000_tx_map()
5615 if (i == tx_ring->count) in e1000_tx_map()
5621 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; in e1000_tx_map()
5628 if (i == tx_ring->count) in e1000_tx_map()
5631 buffer_info = &tx_ring->buffer_info[i]; in e1000_tx_map()
5634 buffer_info->length = size; in e1000_tx_map()
5635 buffer_info->time_stamp = jiffies; in e1000_tx_map()
5636 buffer_info->next_to_watch = i; in e1000_tx_map()
5637 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, in e1000_tx_map()
5640 buffer_info->mapped_as_page = true; in e1000_tx_map()
5641 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) in e1000_tx_map()
5644 len -= size; in e1000_tx_map()
5650 segs = skb_shinfo(skb)->gso_segs ? : 1; in e1000_tx_map()
5652 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; in e1000_tx_map()
5654 tx_ring->buffer_info[i].skb = skb; in e1000_tx_map()
5655 tx_ring->buffer_info[i].segs = segs; in e1000_tx_map()
5656 tx_ring->buffer_info[i].bytecount = bytecount; in e1000_tx_map()
5657 tx_ring->buffer_info[first].next_to_watch = i; in e1000_tx_map()
5662 dev_err(&pdev->dev, "Tx DMA map failed\n"); in e1000_tx_map()
5663 buffer_info->dma = 0; in e1000_tx_map()
5665 count--; in e1000_tx_map()
5667 while (count--) { in e1000_tx_map()
5669 i += tx_ring->count; in e1000_tx_map()
5670 i--; in e1000_tx_map()
5671 buffer_info = &tx_ring->buffer_info[i]; in e1000_tx_map()
5680 struct e1000_adapter *adapter = tx_ring->adapter; in e1000_tx_queue()
5713 i = tx_ring->next_to_use; in e1000_tx_queue()
5716 buffer_info = &tx_ring->buffer_info[i]; in e1000_tx_queue()
5718 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); in e1000_tx_queue()
5719 tx_desc->lower.data = cpu_to_le32(txd_lower | in e1000_tx_queue()
5720 buffer_info->length); in e1000_tx_queue()
5721 tx_desc->upper.data = cpu_to_le32(txd_upper); in e1000_tx_queue()
5724 if (i == tx_ring->count) in e1000_tx_queue()
5726 } while (--count > 0); in e1000_tx_queue()
5728 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); in e1000_tx_queue()
5730 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ in e1000_tx_queue()
5732 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); in e1000_tx_queue()
5736 * applicable for weak-ordered memory model archs, in e1000_tx_queue()
5737 * such as IA-64). in e1000_tx_queue()
5741 tx_ring->next_to_use = i; in e1000_tx_queue()
5748 struct e1000_hw *hw = &adapter->hw; in e1000_transfer_dhcp_info()
5752 !((skb_vlan_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && in e1000_transfer_dhcp_info()
5753 (adapter->hw.mng_cookie.status & in e1000_transfer_dhcp_info()
5757 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE) in e1000_transfer_dhcp_info()
5760 if (((struct ethhdr *)skb->data)->h_proto != htons(ETH_P_IP)) in e1000_transfer_dhcp_info()
5764 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data + 14); in e1000_transfer_dhcp_info()
5767 if (ip->protocol != IPPROTO_UDP) in e1000_transfer_dhcp_info()
5770 udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); in e1000_transfer_dhcp_info()
5771 if (ntohs(udp->dest) != 67) in e1000_transfer_dhcp_info()
5774 offset = (u8 *)udp + 8 - skb->data; in e1000_transfer_dhcp_info()
5775 length = skb->len - offset; in e1000_transfer_dhcp_info()
5784 struct e1000_adapter *adapter = tx_ring->adapter; in __e1000_maybe_stop_tx()
5786 netif_stop_queue(adapter->netdev); in __e1000_maybe_stop_tx()
5797 return -EBUSY; in __e1000_maybe_stop_tx()
5800 netif_start_queue(adapter->netdev); in __e1000_maybe_stop_tx()
5801 ++adapter->restart_queue; in __e1000_maybe_stop_tx()
5807 BUG_ON(size > tx_ring->count); in e1000_maybe_stop_tx()
5818 struct e1000_ring *tx_ring = adapter->tx_ring; in e1000_xmit_frame()
5829 if (test_bit(__E1000_DOWN, &adapter->state)) { in e1000_xmit_frame()
5834 if (skb->len <= 0) { in e1000_xmit_frame()
5845 mss = skb_shinfo(skb)->gso_size; in e1000_xmit_frame()
5849 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data in e1000_xmit_frame()
5851 * frags into skb->data in e1000_xmit_frame()
5854 /* we do this workaround for ES2LAN, but it is un-necessary, in e1000_xmit_frame()
5857 if (skb->data_len && (hdr_len == len)) { in e1000_xmit_frame()
5860 pull_size = min_t(unsigned int, 4, skb->data_len); in e1000_xmit_frame()
5871 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) in e1000_xmit_frame()
5875 count += DIV_ROUND_UP(len, adapter->tx_fifo_limit); in e1000_xmit_frame()
5877 nr_frags = skb_shinfo(skb)->nr_frags; in e1000_xmit_frame()
5879 count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]), in e1000_xmit_frame()
5880 adapter->tx_fifo_limit); in e1000_xmit_frame()
5882 if (adapter->hw.mac.tx_pkt_filtering) in e1000_xmit_frame()
5897 first = tx_ring->next_to_use; in e1000_xmit_frame()
5917 if (unlikely(skb->no_fcs)) in e1000_xmit_frame()
5921 count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit, in e1000_xmit_frame()
5924 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in e1000_xmit_frame()
5925 (adapter->flags & FLAG_HAS_HW_TIMESTAMP)) { in e1000_xmit_frame()
5926 if (!adapter->tx_hwtstamp_skb) { in e1000_xmit_frame()
5927 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in e1000_xmit_frame()
5929 adapter->tx_hwtstamp_skb = skb_get(skb); in e1000_xmit_frame()
5930 adapter->tx_hwtstamp_start = jiffies; in e1000_xmit_frame()
5931 schedule_work(&adapter->tx_hwtstamp_work); in e1000_xmit_frame()
5933 adapter->tx_hwtstamp_skipped++; in e1000_xmit_frame()
5939 netdev_sent_queue(netdev, skb->len); in e1000_xmit_frame()
5945 adapter->tx_fifo_limit) + 2)); in e1000_xmit_frame()
5949 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) in e1000_xmit_frame()
5951 tx_ring->next_to_use); in e1000_xmit_frame()
5953 writel(tx_ring->next_to_use, tx_ring->tail); in e1000_xmit_frame()
5957 tx_ring->buffer_info[first].time_stamp = 0; in e1000_xmit_frame()
5958 tx_ring->next_to_use = first; in e1000_xmit_frame()
5965 * e1000_tx_timeout - Respond to a Tx Hang
5974 adapter->tx_timeout_count++; in e1000_tx_timeout()
5975 schedule_work(&adapter->reset_task); in e1000_tx_timeout()
5985 if (test_bit(__E1000_DOWN, &adapter->state)) { in e1000_reset_task()
5990 if (!(adapter->flags & FLAG_RESTART_NOW)) { in e1000_reset_task()
5999 * e1000e_get_stats64 - Get System Network Statistics
6010 spin_lock(&adapter->stats64_lock); in e1000e_get_stats64()
6013 stats->rx_bytes = adapter->stats.gorc; in e1000e_get_stats64()
6014 stats->rx_packets = adapter->stats.gprc; in e1000e_get_stats64()
6015 stats->tx_bytes = adapter->stats.gotc; in e1000e_get_stats64()
6016 stats->tx_packets = adapter->stats.gptc; in e1000e_get_stats64()
6017 stats->multicast = adapter->stats.mprc; in e1000e_get_stats64()
6018 stats->collisions = adapter->stats.colc; in e1000e_get_stats64()
6025 stats->rx_errors = adapter->stats.rxerrc + in e1000e_get_stats64()
6026 adapter->stats.crcerrs + adapter->stats.algnerrc + in e1000e_get_stats64()
6027 adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; in e1000e_get_stats64()
6028 stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc; in e1000e_get_stats64()
6029 stats->rx_crc_errors = adapter->stats.crcerrs; in e1000e_get_stats64()
6030 stats->rx_frame_errors = adapter->stats.algnerrc; in e1000e_get_stats64()
6031 stats->rx_missed_errors = adapter->stats.mpc; in e1000e_get_stats64()
6034 stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol; in e1000e_get_stats64()
6035 stats->tx_aborted_errors = adapter->stats.ecol; in e1000e_get_stats64()
6036 stats->tx_window_errors = adapter->stats.latecol; in e1000e_get_stats64()
6037 stats->tx_carrier_errors = adapter->stats.tncrs; in e1000e_get_stats64()
6041 spin_unlock(&adapter->stats64_lock); in e1000e_get_stats64()
6045 * e1000_change_mtu - Change the Maximum Transfer Unit
6058 !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { in e1000_change_mtu()
6060 return -EINVAL; in e1000_change_mtu()
6064 if ((adapter->hw.mac.type >= e1000_pch2lan) && in e1000_change_mtu()
6065 !(adapter->flags2 & FLAG2_CRC_STRIPPING) && in e1000_change_mtu()
6068 return -EINVAL; in e1000_change_mtu()
6071 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) in e1000_change_mtu()
6073 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ in e1000_change_mtu()
6074 adapter->max_frame_size = max_frame; in e1000_change_mtu()
6076 netdev->mtu, new_mtu); in e1000_change_mtu()
6077 netdev->mtu = new_mtu; in e1000_change_mtu()
6079 pm_runtime_get_sync(netdev->dev.parent); in e1000_change_mtu()
6087 * i.e. RXBUFFER_2048 --> size-4096 slab in e1000_change_mtu()
6093 adapter->rx_buffer_len = 2048; in e1000_change_mtu()
6095 adapter->rx_buffer_len = 4096; in e1000_change_mtu()
6099 adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; in e1000_change_mtu()
6106 pm_runtime_put_sync(netdev->dev.parent); in e1000_change_mtu()
6108 clear_bit(__E1000_RESETTING, &adapter->state); in e1000_change_mtu()
6119 if (adapter->hw.phy.media_type != e1000_media_type_copper) in e1000_mii_ioctl()
6120 return -EOPNOTSUPP; in e1000_mii_ioctl()
6124 data->phy_id = adapter->hw.phy.addr; in e1000_mii_ioctl()
6129 switch (data->reg_num & 0x1F) { in e1000_mii_ioctl()
6131 data->val_out = adapter->phy_regs.bmcr; in e1000_mii_ioctl()
6134 data->val_out = adapter->phy_regs.bmsr; in e1000_mii_ioctl()
6137 data->val_out = (adapter->hw.phy.id >> 16); in e1000_mii_ioctl()
6140 data->val_out = (adapter->hw.phy.id & 0xFFFF); in e1000_mii_ioctl()
6143 data->val_out = adapter->phy_regs.advertise; in e1000_mii_ioctl()
6146 data->val_out = adapter->phy_regs.lpa; in e1000_mii_ioctl()
6149 data->val_out = adapter->phy_regs.expansion; in e1000_mii_ioctl()
6152 data->val_out = adapter->phy_regs.ctrl1000; in e1000_mii_ioctl()
6155 data->val_out = adapter->phy_regs.stat1000; in e1000_mii_ioctl()
6158 data->val_out = adapter->phy_regs.estatus; in e1000_mii_ioctl()
6161 return -EIO; in e1000_mii_ioctl()
6166 return -EOPNOTSUPP; in e1000_mii_ioctl()
6172 * e1000e_hwtstamp_set - control hardware time stamping
6193 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) in e1000e_hwtstamp_set()
6194 return -EFAULT; in e1000e_hwtstamp_set()
6218 return copy_to_user(ifr->ifr_data, &config, in e1000e_hwtstamp_set()
6219 sizeof(config)) ? -EFAULT : 0; in e1000e_hwtstamp_set()
6226 return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config, in e1000e_hwtstamp_get()
6227 sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0; in e1000e_hwtstamp_get()
6242 return -EOPNOTSUPP; in e1000_ioctl()
6248 struct e1000_hw *hw = &adapter->hw; in e1000_init_phy_wakeup()
6256 retval = hw->phy.ops.acquire(hw); in e1000_init_phy_wakeup()
6267 /* copy MAC MTA to PHY MTA - only needed for pchlan */ in e1000_init_phy_wakeup()
6268 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) { in e1000_init_phy_wakeup()
6270 hw->phy.ops.write_reg_page(hw, BM_MTA(i), in e1000_init_phy_wakeup()
6272 hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1, in e1000_init_phy_wakeup()
6277 hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg); in e1000_init_phy_wakeup()
6294 hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg); in e1000_init_phy_wakeup()
6306 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc); in e1000_init_phy_wakeup()
6307 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, wuc); in e1000_init_phy_wakeup()
6315 hw->phy.ops.release(hw); in e1000_init_phy_wakeup()
6324 struct e1000_hw *hw = &adapter->hw; in e1000e_flush_lpic()
6327 pm_runtime_get_sync(netdev->dev.parent); in e1000e_flush_lpic()
6329 ret_val = hw->phy.ops.acquire(hw); in e1000e_flush_lpic()
6336 hw->phy.ops.release(hw); in e1000e_flush_lpic()
6339 pm_runtime_put_sync(netdev->dev.parent); in e1000e_flush_lpic()
6345 struct e1000_hw *hw = &adapter->hw; in e1000e_s0ix_entry_flow()
6384 * page769_20[7] - PHY PLL stop in e1000e_s0ix_entry_flow()
6385 * page769_20[8] - PHY go to the electrical idle in e1000e_s0ix_entry_flow()
6386 * page769_20[9] - PHY serdes disable in e1000e_s0ix_entry_flow()
6387 * Gate PPW via EXTCNF_CTRL - set 0x0F00[7] = 1 in e1000e_s0ix_entry_flow()
6492 struct e1000_hw *hw = &adapter->hw; in e1000e_s0ix_exit_flow()
6628 while (test_bit(__E1000_RESETTING, &adapter->state) && count--) in e1000e_pm_freeze()
6631 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); in e1000e_pm_freeze()
6642 e1000e_disable_pcie_master(&adapter->hw); in e1000e_pm_freeze()
6651 struct e1000_hw *hw = &adapter->hw; in __e1000_shutdown()
6658 else if (device_may_wakeup(&pdev->dev)) in __e1000_shutdown()
6659 wufc = adapter->wol; in __e1000_shutdown()
6671 /* turn on all-multi mode if wake on multicast is enabled */ in __e1000_shutdown()
6680 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)) in __e1000_shutdown()
6684 if (adapter->hw.phy.media_type == e1000_media_type_fiber || in __e1000_shutdown()
6685 adapter->hw.phy.media_type == in __e1000_shutdown()
6696 if (adapter->flags & FLAG_IS_ICH) in __e1000_shutdown()
6697 e1000_suspend_workarounds_ich8lan(&adapter->hw); in __e1000_shutdown()
6699 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { in __e1000_shutdown()
6716 if (adapter->hw.phy.type == e1000_phy_igp_3) { in __e1000_shutdown()
6717 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); in __e1000_shutdown()
6718 } else if (hw->mac.type >= e1000_pch_lpt) { in __e1000_shutdown()
6732 if ((hw->phy.type >= e1000_phy_i217) && in __e1000_shutdown()
6733 adapter->eee_advert && hw->dev_spec.ich8lan.eee_lp_ability) { in __e1000_shutdown()
6736 retval = hw->phy.ops.acquire(hw); in __e1000_shutdown()
6741 if (adapter->eee_advert & in __e1000_shutdown()
6742 hw->dev_spec.ich8lan.eee_lp_ability & in __e1000_shutdown()
6745 if (adapter->eee_advert & in __e1000_shutdown()
6746 hw->dev_spec.ich8lan.eee_lp_ability & in __e1000_shutdown()
6754 hw->phy.ops.release(hw); in __e1000_shutdown()
6764 /* The pci-e switch on some quad port adapters will report a in __e1000_shutdown()
6767 * downstream port of the pci-e switch. in __e1000_shutdown()
6773 if (adapter->flags & FLAG_IS_QUAD_PORT) { in __e1000_shutdown()
6774 struct pci_dev *us_dev = pdev->bus->self; in __e1000_shutdown()
6794 * __e1000e_disable_aspm - Disable ASPM states
6796 * @state: bit-mask of ASPM states to disable
6803 struct pci_dev *parent = pdev->bus->self; in __e1000e_disable_aspm()
6833 dev_info(&pdev->dev, "Disabling ASPM %s %s\n", in __e1000e_disable_aspm()
6845 /* Double-check ASPM control. If not disabled by the above, the in __e1000e_disable_aspm()
6867 * e1000e_disable_aspm - Disable ASPM states.
6869 * @state: bit-mask of ASPM states to disable
6880 * e1000e_disable_aspm_locked - Disable ASPM states.
6882 * @state: bit-mask of ASPM states to disable
6920 struct e1000_hw *hw = &adapter->hw; in __e1000_resume()
6923 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) in __e1000_resume()
6925 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) in __e1000_resume()
6932 if (hw->mac.type >= e1000_pch2lan) in __e1000_resume()
6933 e1000_resume_workarounds_pchlan(&adapter->hw); in __e1000_resume()
6938 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { in __e1000_resume()
6941 e1e_rphy(&adapter->hw, BM_WUS, &phy_data); in __e1000_resume()
6943 e_info("PHY Wakeup cause - %s\n", in __e1000_resume()
6951 e1e_wphy(&adapter->hw, BM_WUS, ~0); in __e1000_resume()
6956 e_info("MAC Wakeup cause - %s\n", in __e1000_resume()
6975 if (!(adapter->flags & FLAG_HAS_AMT)) in __e1000_resume()
7003 if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS) in e1000e_pm_suspend()
7018 if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS) in e1000e_pm_resume()
7034 eee_lp = adapter->hw.dev_spec.ich8lan.eee_lp_ability; in e1000e_pm_runtime_idle()
7037 adapter->hw.dev_spec.ich8lan.eee_lp_ability = eee_lp; in e1000e_pm_runtime_idle()
7041 return -EBUSY; in e1000e_pm_runtime_idle()
7055 if (netdev->flags & IFF_UP) in e1000e_pm_runtime_resume()
7067 if (netdev->flags & IFF_UP) { in e1000e_pm_runtime_suspend()
7070 while (test_bit(__E1000_RESETTING, &adapter->state) && count--) in e1000e_pm_runtime_suspend()
7073 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); in e1000e_pm_runtime_suspend()
7081 return -EBUSY; in e1000e_pm_runtime_suspend()
7091 e1000e_pm_freeze(&pdev->dev); in e1000_shutdown()
7103 if (adapter->msix_entries) { in e1000_intr_msix()
7107 msix_irq = adapter->msix_entries[vector].vector; in e1000_intr_msix()
7113 msix_irq = adapter->msix_entries[vector].vector; in e1000_intr_msix()
7119 msix_irq = adapter->msix_entries[vector].vector; in e1000_intr_msix()
7132 * Polling 'interrupt' - used by things like netconsole to send skbs
7133 * without having to re-enable interrupts. It's not called while
7140 switch (adapter->int_mode) { in e1000_netpoll()
7142 e1000_intr_msix(adapter->pdev->irq, netdev); in e1000_netpoll()
7145 if (disable_hardirq(adapter->pdev->irq)) in e1000_netpoll()
7146 e1000_intr_msi(adapter->pdev->irq, netdev); in e1000_netpoll()
7147 enable_irq(adapter->pdev->irq); in e1000_netpoll()
7150 if (disable_hardirq(adapter->pdev->irq)) in e1000_netpoll()
7151 e1000_intr(adapter->pdev->irq, netdev); in e1000_netpoll()
7152 enable_irq(adapter->pdev->irq); in e1000_netpoll()
7159 * e1000_io_error_detected - called when PCI error is detected
7169 e1000e_pm_freeze(&pdev->dev); in e1000_io_error_detected()
7181 * e1000_io_slot_reset - called after the pci bus has been reset.
7184 * Restart the card from scratch, as if from a cold-boot. Implementation
7185 * resembles the first-half of the e1000e_pm_resume routine.
7191 struct e1000_hw *hw = &adapter->hw; in e1000_io_slot_reset()
7196 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) in e1000_io_slot_reset()
7198 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) in e1000_io_slot_reset()
7205 dev_err(&pdev->dev, in e1000_io_slot_reset()
7206 "Cannot re-enable PCI device after reset.\n"); in e1000_io_slot_reset()
7209 pdev->state_saved = true; in e1000_io_slot_reset()
7225 * e1000_io_resume - called when traffic can start flowing again.
7230 * second-half of the e1000e_pm_resume routine.
7239 e1000e_pm_thaw(&pdev->dev); in e1000_io_resume()
7245 if (!(adapter->flags & FLAG_HAS_AMT)) in e1000_io_resume()
7251 struct e1000_hw *hw = &adapter->hw; in e1000_print_device_info()
7252 struct net_device *netdev = adapter->netdev; in e1000_print_device_info()
7259 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : in e1000_print_device_info()
7262 netdev->dev_addr); in e1000_print_device_info()
7264 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000"); in e1000_print_device_info()
7270 hw->mac.type, hw->phy.type, pba_str); in e1000_print_device_info()
7275 struct e1000_hw *hw = &adapter->hw; in e1000_eeprom_checks()
7279 if (hw->mac.type != e1000_82573) in e1000_eeprom_checks()
7286 dev_warn(&adapter->pdev->dev, in e1000_eeprom_checks()
7295 struct e1000_hw *hw = &adapter->hw; in e1000_fix_features()
7298 if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN)) in e1000_fix_features()
7316 netdev_features_t changed = features ^ netdev->features; in e1000_set_features()
7319 adapter->flags |= FLAG_TSO_FORCE; in e1000_set_features()
7328 adapter->flags2 &= ~FLAG2_CRC_STRIPPING; in e1000_set_features()
7333 if (adapter->flags2 & FLAG2_DFLT_CRC_STRIPPING) in e1000_set_features()
7334 adapter->flags2 |= FLAG2_CRC_STRIPPING; in e1000_set_features()
7336 adapter->flags2 &= ~FLAG2_CRC_STRIPPING; in e1000_set_features()
7340 netdev->features = features; in e1000_set_features()
7373 * e1000_probe - Device Initialization Routine
7388 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; in e1000_probe()
7398 if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S) in e1000_probe()
7400 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1) in e1000_probe()
7410 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in e1000_probe()
7414 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in e1000_probe()
7416 dev_err(&pdev->dev, in e1000_probe()
7437 err = -ENOMEM; in e1000_probe()
7442 SET_NETDEV_DEV(netdev, &pdev->dev); in e1000_probe()
7444 netdev->irq = pdev->irq; in e1000_probe()
7448 hw = &adapter->hw; in e1000_probe()
7449 adapter->netdev = netdev; in e1000_probe()
7450 adapter->pdev = pdev; in e1000_probe()
7451 adapter->ei = ei; in e1000_probe()
7452 adapter->pba = ei->pba; in e1000_probe()
7453 adapter->flags = ei->flags; in e1000_probe()
7454 adapter->flags2 = ei->flags2; in e1000_probe()
7455 adapter->hw.adapter = adapter; in e1000_probe()
7456 adapter->hw.mac.type = ei->mac; in e1000_probe()
7457 adapter->max_hw_frame_size = ei->max_hw_frame_size; in e1000_probe()
7458 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); in e1000_probe()
7463 err = -EIO; in e1000_probe()
7464 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); in e1000_probe()
7465 if (!adapter->hw.hw_addr) in e1000_probe()
7468 if ((adapter->flags & FLAG_HAS_FLASH) && in e1000_probe()
7470 (hw->mac.type < e1000_pch_spt)) { in e1000_probe()
7473 adapter->hw.flash_address = ioremap(flash_start, flash_len); in e1000_probe()
7474 if (!adapter->hw.flash_address) in e1000_probe()
7479 if (adapter->flags2 & FLAG2_HAS_EEE) in e1000_probe()
7480 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; in e1000_probe()
7483 netdev->netdev_ops = &e1000e_netdev_ops; in e1000_probe()
7485 netdev->watchdog_timeo = 5 * HZ; in e1000_probe()
7486 netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64); in e1000_probe()
7487 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); in e1000_probe()
7489 netdev->mem_start = mmio_start; in e1000_probe()
7490 netdev->mem_end = mmio_start + mmio_len; in e1000_probe()
7492 adapter->bd_number = cards_found++; in e1000_probe()
7501 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); in e1000_probe()
7502 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); in e1000_probe()
7503 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); in e1000_probe()
7505 err = ei->get_variants(adapter); in e1000_probe()
7509 if ((adapter->flags & FLAG_IS_ICH) && in e1000_probe()
7510 (adapter->flags & FLAG_READ_ONLY_NVM) && in e1000_probe()
7511 (hw->mac.type < e1000_pch_spt)) in e1000_probe()
7512 e1000e_write_protect_nvm_ich8lan(&adapter->hw); in e1000_probe()
7514 hw->mac.ops.get_bus_info(&adapter->hw); in e1000_probe()
7516 adapter->hw.phy.autoneg_wait_to_complete = 0; in e1000_probe()
7519 if (adapter->hw.phy.media_type == e1000_media_type_copper) { in e1000_probe()
7520 adapter->hw.phy.mdix = AUTO_ALL_MODES; in e1000_probe()
7521 adapter->hw.phy.disable_polarity_correction = 0; in e1000_probe()
7522 adapter->hw.phy.ms_type = e1000_ms_hw_default; in e1000_probe()
7525 if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) in e1000_probe()
7526 dev_info(&pdev->dev, in e1000_probe()
7530 netdev->features = (NETIF_F_SG | in e1000_probe()
7539 /* Set user-changeable features (subset of all device features) */ in e1000_probe()
7540 netdev->hw_features = netdev->features; in e1000_probe()
7541 netdev->hw_features |= NETIF_F_RXFCS; in e1000_probe()
7542 netdev->priv_flags |= IFF_SUPP_NOFCS; in e1000_probe()
7543 netdev->hw_features |= NETIF_F_RXALL; in e1000_probe()
7545 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) in e1000_probe()
7546 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; in e1000_probe()
7548 netdev->vlan_features |= (NETIF_F_SG | in e1000_probe()
7553 netdev->priv_flags |= IFF_UNICAST_FLT; in e1000_probe()
7556 netdev->features |= NETIF_F_HIGHDMA; in e1000_probe()
7557 netdev->vlan_features |= NETIF_F_HIGHDMA; in e1000_probe()
7560 /* MTU range: 68 - max_hw_frame_size */ in e1000_probe()
7561 netdev->min_mtu = ETH_MIN_MTU; in e1000_probe()
7562 netdev->max_mtu = adapter->max_hw_frame_size - in e1000_probe()
7565 if (e1000e_enable_mng_pass_thru(&adapter->hw)) in e1000_probe()
7566 adapter->flags |= FLAG_MNG_PT_ENABLED; in e1000_probe()
7571 adapter->hw.mac.ops.reset_hw(&adapter->hw); in e1000_probe()
7577 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) in e1000_probe()
7580 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); in e1000_probe()
7581 err = -EIO; in e1000_probe()
7589 if (e1000e_read_mac_addr(&adapter->hw)) in e1000_probe()
7590 dev_err(&pdev->dev, in e1000_probe()
7593 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); in e1000_probe()
7595 if (!is_valid_ether_addr(netdev->dev_addr)) { in e1000_probe()
7596 dev_err(&pdev->dev, "Invalid MAC Address: %pM\n", in e1000_probe()
7597 netdev->dev_addr); in e1000_probe()
7598 err = -EIO; in e1000_probe()
7602 timer_setup(&adapter->watchdog_timer, e1000_watchdog, 0); in e1000_probe()
7603 timer_setup(&adapter->phy_info_timer, e1000_update_phy_info, 0); in e1000_probe()
7605 INIT_WORK(&adapter->reset_task, e1000_reset_task); in e1000_probe()
7606 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); in e1000_probe()
7607 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); in e1000_probe()
7608 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); in e1000_probe()
7609 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); in e1000_probe()
7612 adapter->hw.mac.autoneg = 1; in e1000_probe()
7613 adapter->fc_autoneg = true; in e1000_probe()
7614 adapter->hw.fc.requested_mode = e1000_fc_default; in e1000_probe()
7615 adapter->hw.fc.current_mode = e1000_fc_default; in e1000_probe()
7616 adapter->hw.phy.autoneg_advertised = 0x2f; in e1000_probe()
7618 /* Initial Wake on LAN setting - If APM wake is enabled in in e1000_probe()
7621 if (adapter->flags & FLAG_APME_IN_WUC) { in e1000_probe()
7625 if ((hw->mac.type > e1000_ich10lan) && in e1000_probe()
7627 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; in e1000_probe()
7628 } else if (adapter->flags & FLAG_APME_IN_CTRL3) { in e1000_probe()
7629 if (adapter->flags & FLAG_APME_CHECK_PORT_B && in e1000_probe()
7630 (adapter->hw.bus.func == 1)) in e1000_probe()
7631 ret_val = e1000_read_nvm(&adapter->hw, in e1000_probe()
7635 ret_val = e1000_read_nvm(&adapter->hw, in e1000_probe()
7644 adapter->eeprom_wol |= E1000_WUFC_MAG; in e1000_probe()
7650 if (!(adapter->flags & FLAG_HAS_WOL)) in e1000_probe()
7651 adapter->eeprom_wol = 0; in e1000_probe()
7654 adapter->wol = adapter->eeprom_wol; in e1000_probe()
7657 if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) || in e1000_probe()
7658 (hw->mac.ops.check_mng_mode(hw))) in e1000_probe()
7659 device_wakeup_enable(&pdev->dev); in e1000_probe()
7662 ret_val = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); in e1000_probe()
7666 adapter->eeprom_vers = 0; in e1000_probe()
7679 if (!(adapter->flags & FLAG_HAS_AMT)) in e1000_probe()
7682 if (hw->mac.type >= e1000_pch_cnp) in e1000_probe()
7683 adapter->flags2 |= FLAG2_ENABLE_S0IX_FLOWS; in e1000_probe()
7685 strlcpy(netdev->name, "eth%d", sizeof(netdev->name)); in e1000_probe()
7695 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_SMART_PREPARE); in e1000_probe()
7697 if (pci_dev_run_wake(pdev) && hw->mac.type != e1000_pch_cnp) in e1000_probe()
7698 pm_runtime_put_noidle(&pdev->dev); in e1000_probe()
7703 if (!(adapter->flags & FLAG_HAS_AMT)) in e1000_probe()
7706 if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw)) in e1000_probe()
7707 e1000_phy_hw_reset(&adapter->hw); in e1000_probe()
7709 kfree(adapter->tx_ring); in e1000_probe()
7710 kfree(adapter->rx_ring); in e1000_probe()
7712 if ((adapter->hw.flash_address) && (hw->mac.type < e1000_pch_spt)) in e1000_probe()
7713 iounmap(adapter->hw.flash_address); in e1000_probe()
7716 iounmap(adapter->hw.hw_addr); in e1000_probe()
7729 * e1000_remove - Device Removal Routine
7734 * Hot-Plug event, or because the driver is going to be removed from
7747 set_bit(__E1000_DOWN, &adapter->state); in e1000_remove()
7748 del_timer_sync(&adapter->watchdog_timer); in e1000_remove()
7749 del_timer_sync(&adapter->phy_info_timer); in e1000_remove()
7751 cancel_work_sync(&adapter->reset_task); in e1000_remove()
7752 cancel_work_sync(&adapter->watchdog_task); in e1000_remove()
7753 cancel_work_sync(&adapter->downshift_task); in e1000_remove()
7754 cancel_work_sync(&adapter->update_phy_task); in e1000_remove()
7755 cancel_work_sync(&adapter->print_hang_task); in e1000_remove()
7757 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { in e1000_remove()
7758 cancel_work_sync(&adapter->tx_hwtstamp_work); in e1000_remove()
7759 if (adapter->tx_hwtstamp_skb) { in e1000_remove()
7760 dev_consume_skb_any(adapter->tx_hwtstamp_skb); in e1000_remove()
7761 adapter->tx_hwtstamp_skb = NULL; in e1000_remove()
7768 pm_runtime_get_noresume(&pdev->dev); in e1000_remove()
7776 kfree(adapter->tx_ring); in e1000_remove()
7777 kfree(adapter->rx_ring); in e1000_remove()
7779 iounmap(adapter->hw.hw_addr); in e1000_remove()
7780 if ((adapter->hw.flash_address) && in e1000_remove()
7781 (adapter->hw.mac.type < e1000_pch_spt)) in e1000_remove()
7782 iounmap(adapter->hw.flash_address); in e1000_remove()
7955 * e1000_init_module - Driver Registration Routine
7963 pr_info("Copyright(c) 1999 - 2015 Intel Corporation.\n"); in e1000_init_module()
7970 * e1000_exit_module - Driver Exit Cleanup Routine