Lines Matching +full:reg +full:- +full:spacing
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2006 Intel Corporation. */
26 * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
27 * configurations. 8255x supports a 32-bit linear addressing
32 * Memory-mapped mode is used exclusively to access the device's
33 * shared-memory structure, the Control/Status Registers (CSR). All
39 * 8255x is highly MII-compliant and all access to the PHY go
41 * driver leverages the mii.c library shared with other MII-compliant
44 * Big- and Little-Endian byte order as well as 32- and 64-bit
45 * archs are supported. Weak-ordered memory and non-cache-coherent
51 * together in a fixed-size ring (CBL) thus forming the flexible mode
52 * memory structure. A TCB marked with the suspend-bit indicates
58 * Non-Tx commands (config, multicast setup, etc) are linked
60 * used for both Tx and non-Tx commands is the Command Block (CB).
79 * protocol headers are u32-aligned. Since the RFD is part of the
87 * packet as end-of-list (EL). After updating the link, we remove EL
89 * previous-to-end RFD.
93 * replacement RFDs cannot be allocated, or the RU goes non-active,
95 * and Rx indication and re-allocation happen in the same context,
96 * therefore no locking is required. A software-generated interrupt
98 * scenario where all Rx resources have been indicated and none re-
104 * supported, but driver will accommodate the extra 4-byte VLAN tag
115 * o several entry points race with dev->close
116 * o check for tx-no-resources/stop Q races with tx clean/wake Q
119 * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
120 * - Stratus87247: protect MDI control register manipulations
121 * 2009/06/01 - Andreas Mohr <andi at lisas dot de>
122 * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
138 #include <linux/dma-mapping.h>
154 #define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
285 RU_UNINITIALIZED = -1,
388 * cb_command - Command Block flags
472 /* Important: keep total struct u32-aligned */
542 u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
610 (void)ioread8(&nic->csr->scb.status); in e100_write_flush()
617 spin_lock_irqsave(&nic->cmd_lock, flags); in e100_enable_irq()
618 iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi); in e100_enable_irq()
620 spin_unlock_irqrestore(&nic->cmd_lock, flags); in e100_enable_irq()
627 spin_lock_irqsave(&nic->cmd_lock, flags); in e100_disable_irq()
628 iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi); in e100_disable_irq()
630 spin_unlock_irqrestore(&nic->cmd_lock, flags); in e100_disable_irq()
637 iowrite32(selective_reset, &nic->csr->port); in e100_hw_reset()
641 iowrite32(software_reset, &nic->csr->port); in e100_hw_reset()
644 /* Mask off our interrupt line - it's unmasked after reset */ in e100_hw_reset()
650 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest); in e100_self_test()
652 /* Passing the self-test is a pretty good indication in e100_self_test()
655 nic->mem->selftest.signature = 0; in e100_self_test()
656 nic->mem->selftest.result = 0xFFFFFFFF; in e100_self_test()
658 iowrite32(selftest | dma_addr, &nic->csr->port); in e100_self_test()
660 /* Wait 10 msec for self-test to complete */ in e100_self_test()
663 /* Interrupts are enabled after self-test */ in e100_self_test()
666 /* Check results of self-test */ in e100_self_test()
667 if (nic->mem->selftest.result != 0) { in e100_self_test()
668 netif_err(nic, hw, nic->netdev, in e100_self_test()
669 "Self-test failed: result=0x%08X\n", in e100_self_test()
670 nic->mem->selftest.result); in e100_self_test()
671 return -ETIMEDOUT; in e100_self_test()
673 if (nic->mem->selftest.signature == 0) { in e100_self_test()
674 netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n"); in e100_self_test()
675 return -ETIMEDOUT; in e100_self_test()
688 cmd_addr_data[0] = op_ewen << (addr_len - 2); in e100_eeprom_write()
691 cmd_addr_data[2] = op_ewds << (addr_len - 2); in e100_eeprom_write()
693 /* Bit-bang cmds to write word to eeprom */ in e100_eeprom_write()
697 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); in e100_eeprom_write()
700 for (i = 31; i >= 0; i--) { in e100_eeprom_write()
703 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); in e100_eeprom_write()
706 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); in e100_eeprom_write()
713 iowrite8(0, &nic->csr->eeprom_ctrl_lo); in e100_eeprom_write()
718 /* General technique stolen from the eepro100 driver - very clever */
729 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); in e100_eeprom_read()
732 /* Bit-bang to read word from eeprom */ in e100_eeprom_read()
733 for (i = 31; i >= 0; i--) { in e100_eeprom_read()
735 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); in e100_eeprom_read()
738 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); in e100_eeprom_read()
743 ctrl = ioread8(&nic->csr->eeprom_ctrl_lo); in e100_eeprom_read()
745 *addr_len -= (i - 16); in e100_eeprom_read()
753 iowrite8(0, &nic->csr->eeprom_ctrl_lo); in e100_eeprom_read()
764 /* Try reading with an 8-bit addr len to discover actual addr len */ in e100_eeprom_load()
766 nic->eeprom_wc = 1 << addr_len; in e100_eeprom_load()
768 for (addr = 0; addr < nic->eeprom_wc; addr++) { in e100_eeprom_load()
769 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr); in e100_eeprom_load()
770 if (addr < nic->eeprom_wc - 1) in e100_eeprom_load()
771 checksum += le16_to_cpu(nic->eeprom[addr]); in e100_eeprom_load()
776 if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) { in e100_eeprom_load()
777 netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n"); in e100_eeprom_load()
779 return -EAGAIN; in e100_eeprom_load()
790 /* Try reading with an 8-bit addr len to discover actual addr len */ in e100_eeprom_save()
792 nic->eeprom_wc = 1 << addr_len; in e100_eeprom_save()
794 if (start + count >= nic->eeprom_wc) in e100_eeprom_save()
795 return -EINVAL; in e100_eeprom_save()
798 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]); in e100_eeprom_save()
802 for (addr = 0; addr < nic->eeprom_wc - 1; addr++) in e100_eeprom_save()
803 checksum += le16_to_cpu(nic->eeprom[addr]); in e100_eeprom_save()
804 nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum); in e100_eeprom_save()
805 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1, in e100_eeprom_save()
806 nic->eeprom[nic->eeprom_wc - 1]); in e100_eeprom_save()
819 spin_lock_irqsave(&nic->cmd_lock, flags); in e100_exec_cmd()
823 if (likely(!ioread8(&nic->csr->scb.cmd_lo))) in e100_exec_cmd()
830 err = -EAGAIN; in e100_exec_cmd()
835 iowrite32(dma_addr, &nic->csr->scb.gen_ptr); in e100_exec_cmd()
836 iowrite8(cmd, &nic->csr->scb.cmd_lo); in e100_exec_cmd()
839 spin_unlock_irqrestore(&nic->cmd_lock, flags); in e100_exec_cmd()
851 spin_lock_irqsave(&nic->cb_lock, flags); in e100_exec_cb()
853 if (unlikely(!nic->cbs_avail)) { in e100_exec_cb()
854 err = -ENOMEM; in e100_exec_cb()
858 cb = nic->cb_to_use; in e100_exec_cb()
859 nic->cb_to_use = cb->next; in e100_exec_cb()
860 nic->cbs_avail--; in e100_exec_cb()
861 cb->skb = skb; in e100_exec_cb()
867 if (unlikely(!nic->cbs_avail)) in e100_exec_cb()
868 err = -ENOSPC; in e100_exec_cb()
872 * set S-bit in current first, then clear S-bit in previous. */ in e100_exec_cb()
873 cb->command |= cpu_to_le16(cb_s); in e100_exec_cb()
875 cb->prev->command &= cpu_to_le16(~cb_s); in e100_exec_cb()
877 while (nic->cb_to_send != nic->cb_to_use) { in e100_exec_cb()
878 if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd, in e100_exec_cb()
879 nic->cb_to_send->dma_addr))) { in e100_exec_cb()
885 if (err == -ENOSPC) { in e100_exec_cb()
887 schedule_work(&nic->tx_timeout_task); in e100_exec_cb()
891 nic->cuc_cmd = cuc_resume; in e100_exec_cb()
892 nic->cb_to_send = nic->cb_to_send->next; in e100_exec_cb()
897 spin_unlock_irqrestore(&nic->cb_lock, flags); in e100_exec_cb()
902 static int mdio_read(struct net_device *netdev, int addr, int reg) in mdio_read() argument
905 return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0); in mdio_read()
908 static void mdio_write(struct net_device *netdev, int addr, int reg, int data) in mdio_write() argument
912 nic->mdio_ctrl(nic, addr, mdi_write, reg, data); in mdio_write()
915 /* the standard mdio_ctrl() function for usual MII-compliant hardware */
916 static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data) in mdio_ctrl_hw() argument
926 * manipulation of the MDI control registers is a multi-step in mdio_ctrl_hw()
929 spin_lock_irqsave(&nic->mdio_lock, flags); in mdio_ctrl_hw()
930 for (i = 100; i; --i) { in mdio_ctrl_hw()
931 if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready) in mdio_ctrl_hw()
936 netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n"); in mdio_ctrl_hw()
937 spin_unlock_irqrestore(&nic->mdio_lock, flags); in mdio_ctrl_hw()
940 iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl); in mdio_ctrl_hw()
944 if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready) in mdio_ctrl_hw()
947 spin_unlock_irqrestore(&nic->mdio_lock, flags); in mdio_ctrl_hw()
948 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, in mdio_ctrl_hw()
949 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n", in mdio_ctrl_hw()
951 addr, reg, data, data_out); in mdio_ctrl_hw()
959 u32 reg, in mdio_ctrl_phy_82552_v() argument
962 if ((reg == MII_BMCR) && (dir == mdi_write)) { in mdio_ctrl_phy_82552_v()
964 u16 advert = mdio_read(nic->netdev, nic->mii.phy_id, in mdio_ctrl_phy_82552_v()
977 return mdio_ctrl_hw(nic, addr, dir, reg, data); in mdio_ctrl_phy_82552_v()
980 /* Fully software-emulated mdio_ctrl() function for cards without
981 * MII-compliant PHYs.
989 u32 reg, in mdio_ctrl_phy_mii_emulated() argument
997 switch (reg) { in mdio_ctrl_phy_mii_emulated()
999 /* Auto-negotiation, right? */ in mdio_ctrl_phy_mii_emulated()
1011 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, in mdio_ctrl_phy_mii_emulated()
1012 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", in mdio_ctrl_phy_mii_emulated()
1014 addr, reg, data); in mdio_ctrl_phy_mii_emulated()
1018 switch (reg) { in mdio_ctrl_phy_mii_emulated()
1020 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, in mdio_ctrl_phy_mii_emulated()
1021 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", in mdio_ctrl_phy_mii_emulated()
1023 addr, reg, data); in mdio_ctrl_phy_mii_emulated()
1033 return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated); in e100_phy_supports_mii()
1042 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision; in e100_get_defaults()
1043 if (nic->mac == mac_unknown) in e100_get_defaults()
1044 nic->mac = mac_82557_D100_A; in e100_get_defaults()
1046 nic->params.rfds = rfds; in e100_get_defaults()
1047 nic->params.cbs = cbs; in e100_get_defaults()
1050 nic->tx_threshold = 0xE0; in e100_get_defaults()
1053 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf | in e100_get_defaults()
1054 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); in e100_get_defaults()
1057 nic->blank_rfd.command = 0; in e100_get_defaults()
1058 nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF); in e100_get_defaults()
1059 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN); in e100_get_defaults()
1062 nic->mii.phy_id_mask = 0x1F; in e100_get_defaults()
1063 nic->mii.reg_num_mask = 0x1F; in e100_get_defaults()
1064 nic->mii.dev = nic->netdev; in e100_get_defaults()
1065 nic->mii.mdio_read = mdio_read; in e100_get_defaults()
1066 nic->mii.mdio_write = mdio_write; in e100_get_defaults()
1071 struct config *config = &cb->u.config; in e100_configure()
1073 struct net_device *netdev = nic->netdev; in e100_configure()
1075 cb->command = cpu_to_le16(cb_config); in e100_configure()
1079 config->byte_count = 0x16; /* bytes in this struct */ in e100_configure()
1080 config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */ in e100_configure()
1081 config->direct_rx_dma = 0x1; /* reserved */ in e100_configure()
1082 config->standard_tcb = 0x1; /* 1=standard, 0=extended */ in e100_configure()
1083 config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */ in e100_configure()
1084 config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */ in e100_configure()
1085 config->tx_underrun_retry = 0x3; /* # of underrun retries */ in e100_configure()
1087 config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */ in e100_configure()
1088 config->pad10 = 0x6; in e100_configure()
1089 config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */ in e100_configure()
1090 config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */ in e100_configure()
1091 config->ifs = 0x6; /* x16 = inter frame spacing */ in e100_configure()
1092 config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */ in e100_configure()
1093 config->pad15_1 = 0x1; in e100_configure()
1094 config->pad15_2 = 0x1; in e100_configure()
1095 config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */ in e100_configure()
1096 config->fc_delay_hi = 0x40; /* time delay for fc frame */ in e100_configure()
1097 config->tx_padding = 0x1; /* 1=pad short frames */ in e100_configure()
1098 config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */ in e100_configure()
1099 config->pad18 = 0x1; in e100_configure()
1100 config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */ in e100_configure()
1101 config->pad20_1 = 0x1F; in e100_configure()
1102 config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */ in e100_configure()
1103 config->pad21_1 = 0x5; in e100_configure()
1105 config->adaptive_ifs = nic->adaptive_ifs; in e100_configure()
1106 config->loopback = nic->loopback; in e100_configure()
1108 if (nic->mii.force_media && nic->mii.full_duplex) in e100_configure()
1109 config->full_duplex_force = 0x1; /* 1=force, 0=auto */ in e100_configure()
1111 if (nic->flags & promiscuous || nic->loopback) { in e100_configure()
1112 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ in e100_configure()
1113 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ in e100_configure()
1114 config->promiscuous_mode = 0x1; /* 1=on, 0=off */ in e100_configure()
1117 if (unlikely(netdev->features & NETIF_F_RXFCS)) in e100_configure()
1118 config->rx_crc_transfer = 0x1; /* 1=save, 0=discard */ in e100_configure()
1120 if (nic->flags & multicast_all) in e100_configure()
1121 config->multicast_all = 0x1; /* 1=accept, 0=no */ in e100_configure()
1124 if (netif_running(nic->netdev) || !(nic->flags & wol_magic)) in e100_configure()
1125 config->magic_packet_disable = 0x1; /* 1=off, 0=on */ in e100_configure()
1127 if (nic->mac >= mac_82558_D101_A4) { in e100_configure()
1128 config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */ in e100_configure()
1129 config->mwi_enable = 0x1; /* 1=enable, 0=disable */ in e100_configure()
1130 config->standard_tcb = 0x0; /* 1=standard, 0=extended */ in e100_configure()
1131 config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */ in e100_configure()
1132 if (nic->mac >= mac_82559_D101M) { in e100_configure()
1133 config->tno_intr = 0x1; /* TCO stats enable */ in e100_configure()
1135 if (nic->mac >= mac_82551_10) { in e100_configure()
1136 config->byte_count = 0x20; /* extended bytes */ in e100_configure()
1137 config->rx_d102_mode = 0x1; /* GMRC for TCO */ in e100_configure()
1140 config->standard_stat_counter = 0x0; in e100_configure()
1144 if (netdev->features & NETIF_F_RXALL) { in e100_configure()
1145 config->rx_save_overruns = 0x1; /* 1=save, 0=discard */ in e100_configure()
1146 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ in e100_configure()
1147 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ in e100_configure()
1150 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n", in e100_configure()
1152 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n", in e100_configure()
1154 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n", in e100_configure()
1162 * All CPUSaver parameters are 16-bit literals that are part of a
1167 * INTDELAY - This loads the dead-man timer with its initial value.
1172 * the value should probably stay within the 0x200 - 0x1000.
1174 * BUNDLEMAX -
1184 * BUNDLESMALL -
1185 * This contains a bit-mask describing the minimum size frame that
1222 const struct firmware *fw = nic->fw; in e100_request_firmware()
1227 /* do not load u-code for ICH devices */ in e100_request_firmware()
1228 if (nic->flags & ich) in e100_request_firmware()
1236 * "fixes for bugs in the B-step hardware (specifically, bugs in e100_request_firmware()
1244 if (nic->mac == mac_82559_D101M) { in e100_request_firmware()
1246 } else if (nic->mac == mac_82559_D101S) { in e100_request_firmware()
1248 } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) { in e100_request_firmware()
1261 err = request_firmware(&fw, fw_name, &nic->pdev->dev); in e100_request_firmware()
1265 netif_err(nic, probe, nic->netdev, in e100_request_firmware()
1270 netif_info(nic, probe, nic->netdev, in e100_request_firmware()
1279 if (fw->size != UCODE_SIZE * 4 + 3) { in e100_request_firmware()
1280 netif_err(nic, probe, nic->netdev, in e100_request_firmware()
1282 fw_name, fw->size); in e100_request_firmware()
1284 return ERR_PTR(-EINVAL); in e100_request_firmware()
1288 timer = fw->data[UCODE_SIZE * 4]; in e100_request_firmware()
1289 bundle = fw->data[UCODE_SIZE * 4 + 1]; in e100_request_firmware()
1290 min_size = fw->data[UCODE_SIZE * 4 + 2]; in e100_request_firmware()
1294 netif_err(nic, probe, nic->netdev, in e100_request_firmware()
1298 return ERR_PTR(-EINVAL); in e100_request_firmware()
1303 nic->fw = fw; in e100_request_firmware()
1315 cb->skb = NULL; in e100_setup_ucode()
1318 memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4); in e100_setup_ucode()
1321 timer = fw->data[UCODE_SIZE * 4]; in e100_setup_ucode()
1322 bundle = fw->data[UCODE_SIZE * 4 + 1]; in e100_setup_ucode()
1323 min_size = fw->data[UCODE_SIZE * 4 + 2]; in e100_setup_ucode()
1325 /* Insert user-tunable settings in cb->u.ucode */ in e100_setup_ucode()
1326 cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000); in e100_setup_ucode()
1327 cb->u.ucode[timer] |= cpu_to_le32(INTDELAY); in e100_setup_ucode()
1328 cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000); in e100_setup_ucode()
1329 cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX); in e100_setup_ucode()
1330 cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000); in e100_setup_ucode()
1331 cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80); in e100_setup_ucode()
1333 cb->command = cpu_to_le16(cb_ucode | cb_el); in e100_setup_ucode()
1341 struct cb *cb = nic->cb_to_clean; in e100_load_ucode_wait()
1349 netif_err(nic, probe, nic->netdev, in e100_load_ucode_wait()
1353 nic->cuc_cmd = cuc_start; in e100_load_ucode_wait()
1360 while (!(cb->status & cpu_to_le16(cb_complete))) { in e100_load_ucode_wait()
1362 if (!--counter) break; in e100_load_ucode_wait()
1366 iowrite8(~0, &nic->csr->scb.stat_ack); in e100_load_ucode_wait()
1369 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) { in e100_load_ucode_wait()
1370 netif_err(nic, probe, nic->netdev, "ucode load failed\n"); in e100_load_ucode_wait()
1371 err = -EPERM; in e100_load_ucode_wait()
1380 cb->command = cpu_to_le16(cb_iaaddr); in e100_setup_iaaddr()
1381 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); in e100_setup_iaaddr()
1387 cb->command = cpu_to_le16(cb_dump); in e100_dump()
1388 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + in e100_dump()
1398 phy_type = (le16_to_cpu(nic->eeprom[eeprom_phy_iface]) >> 8) & 0x0f; in e100_phy_check_without_mii()
1401 case NoSuchPhy: /* Non-MII PHY; UNTESTED! */ in e100_phy_check_without_mii()
1402 case I82503: /* Non-MII PHY; UNTESTED! */ in e100_phy_check_without_mii()
1403 case S80C24: /* Non-MII PHY; tested and working */ in e100_phy_check_without_mii()
1410 netif_info(nic, probe, nic->netdev, in e100_phy_check_without_mii()
1411 "found MII-less i82503 or 80c24 or other PHY\n"); in e100_phy_check_without_mii()
1413 nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated; in e100_phy_check_without_mii()
1414 nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */ in e100_phy_check_without_mii()
1416 /* these might be needed for certain MII-less cards... in e100_phy_check_without_mii()
1417 * nic->flags |= ich; in e100_phy_check_without_mii()
1418 * nic->flags |= ich_10h_workaround; */ in e100_phy_check_without_mii()
1435 struct net_device *netdev = nic->netdev; in e100_phy_init()
1441 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr; in e100_phy_init()
1442 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); in e100_phy_init()
1443 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); in e100_phy_init()
1444 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); in e100_phy_init()
1457 netif_err(nic, hw, nic->netdev, in e100_phy_init()
1459 return -EAGAIN; in e100_phy_init()
1462 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, in e100_phy_init()
1463 "phy_addr = %d\n", nic->mii.phy_id); in e100_phy_init()
1466 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1); in e100_phy_init()
1467 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2); in e100_phy_init()
1468 nic->phy = (u32)id_hi << 16 | (u32)id_lo; in e100_phy_init()
1469 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, in e100_phy_init()
1470 "phy ID = 0x%08X\n", nic->phy); in e100_phy_init()
1474 if (addr != nic->mii.phy_id) { in e100_phy_init()
1476 } else if (nic->phy != phy_82552_v) { in e100_phy_init()
1487 if (nic->phy == phy_82552_v) in e100_phy_init()
1488 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, in e100_phy_init()
1493 if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) { in e100_phy_init()
1495 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG); in e100_phy_init()
1498 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong); in e100_phy_init()
1501 if (nic->phy == phy_82552_v) { in e100_phy_init()
1502 u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE); in e100_phy_init()
1505 nic->mdio_ctrl = mdio_ctrl_phy_82552_v; in e100_phy_init()
1507 /* Workaround Si not advertising flow-control during autoneg */ in e100_phy_init()
1509 mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert); in e100_phy_init()
1512 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); in e100_phy_init()
1514 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr); in e100_phy_init()
1515 } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && in e100_phy_init()
1516 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) && in e100_phy_init()
1517 (le16_to_cpu(nic->eeprom[eeprom_cnfg_mdix]) & eeprom_mdix_enabled))) { in e100_phy_init()
1518 /* enable/disable MDI/MDI-X auto-switching. */ in e100_phy_init()
1519 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, in e100_phy_init()
1520 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH); in e100_phy_init()
1532 netif_err(nic, hw, nic->netdev, "e100_hw_init\n"); in e100_hw_init()
1549 nic->dma_addr + offsetof(struct mem, stats)))) in e100_hw_init()
1561 struct net_device *netdev = nic->netdev; in e100_multi()
1565 cb->command = cpu_to_le16(cb_multi); in e100_multi()
1566 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN); in e100_multi()
1571 memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, in e100_multi()
1581 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, in e100_set_multicast_list()
1583 netdev_mc_count(netdev), netdev->flags); in e100_set_multicast_list()
1585 if (netdev->flags & IFF_PROMISC) in e100_set_multicast_list()
1586 nic->flags |= promiscuous; in e100_set_multicast_list()
1588 nic->flags &= ~promiscuous; in e100_set_multicast_list()
1590 if (netdev->flags & IFF_ALLMULTI || in e100_set_multicast_list()
1592 nic->flags |= multicast_all; in e100_set_multicast_list()
1594 nic->flags &= ~multicast_all; in e100_set_multicast_list()
1602 struct net_device *dev = nic->netdev; in e100_update_stats()
1603 struct net_device_stats *ns = &dev->stats; in e100_update_stats()
1604 struct stats *s = &nic->mem->stats; in e100_update_stats()
1605 __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause : in e100_update_stats()
1606 (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames : in e100_update_stats()
1607 &s->complete; in e100_update_stats()
1615 nic->tx_frames = le32_to_cpu(s->tx_good_frames); in e100_update_stats()
1616 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions); in e100_update_stats()
1617 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions); in e100_update_stats()
1618 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions); in e100_update_stats()
1619 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs); in e100_update_stats()
1620 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns); in e100_update_stats()
1621 ns->collisions += nic->tx_collisions; in e100_update_stats()
1622 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) + in e100_update_stats()
1623 le32_to_cpu(s->tx_lost_crs); in e100_update_stats()
1624 nic->rx_short_frame_errors += in e100_update_stats()
1625 le32_to_cpu(s->rx_short_frame_errors); in e100_update_stats()
1626 ns->rx_length_errors = nic->rx_short_frame_errors + in e100_update_stats()
1627 nic->rx_over_length_errors; in e100_update_stats()
1628 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors); in e100_update_stats()
1629 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors); in e100_update_stats()
1630 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors); in e100_update_stats()
1631 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors); in e100_update_stats()
1632 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors); in e100_update_stats()
1633 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) + in e100_update_stats()
1634 le32_to_cpu(s->rx_alignment_errors) + in e100_update_stats()
1635 le32_to_cpu(s->rx_short_frame_errors) + in e100_update_stats()
1636 le32_to_cpu(s->rx_cdt_errors); in e100_update_stats()
1637 nic->tx_deferred += le32_to_cpu(s->tx_deferred); in e100_update_stats()
1638 nic->tx_single_collisions += in e100_update_stats()
1639 le32_to_cpu(s->tx_single_collisions); in e100_update_stats()
1640 nic->tx_multiple_collisions += in e100_update_stats()
1641 le32_to_cpu(s->tx_multiple_collisions); in e100_update_stats()
1642 if (nic->mac >= mac_82558_D101_A4) { in e100_update_stats()
1643 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause); in e100_update_stats()
1644 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause); in e100_update_stats()
1645 nic->rx_fc_unsupported += in e100_update_stats()
1646 le32_to_cpu(s->fc_rcv_unsupported); in e100_update_stats()
1647 if (nic->mac >= mac_82559_D101M) { in e100_update_stats()
1648 nic->tx_tco_frames += in e100_update_stats()
1649 le16_to_cpu(s->xmt_tco_frames); in e100_update_stats()
1650 nic->rx_tco_frames += in e100_update_stats()
1651 le16_to_cpu(s->rcv_tco_frames); in e100_update_stats()
1658 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, in e100_update_stats()
1664 /* Adjust inter-frame-spacing (IFS) between two transmits if in e100_adjust_adaptive_ifs()
1665 * we're getting collisions on a half-duplex connection. */ in e100_adjust_adaptive_ifs()
1668 u32 prev = nic->adaptive_ifs; in e100_adjust_adaptive_ifs()
1671 if ((nic->tx_frames / 32 < nic->tx_collisions) && in e100_adjust_adaptive_ifs()
1672 (nic->tx_frames > min_frames)) { in e100_adjust_adaptive_ifs()
1673 if (nic->adaptive_ifs < 60) in e100_adjust_adaptive_ifs()
1674 nic->adaptive_ifs += 5; in e100_adjust_adaptive_ifs()
1675 } else if (nic->tx_frames < min_frames) { in e100_adjust_adaptive_ifs()
1676 if (nic->adaptive_ifs >= 5) in e100_adjust_adaptive_ifs()
1677 nic->adaptive_ifs -= 5; in e100_adjust_adaptive_ifs()
1679 if (nic->adaptive_ifs != prev) in e100_adjust_adaptive_ifs()
1690 netif_printk(nic, timer, KERN_DEBUG, nic->netdev, in e100_watchdog()
1695 mii_ethtool_gset(&nic->mii, &cmd); in e100_watchdog()
1698 if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) { in e100_watchdog()
1699 netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n", in e100_watchdog()
1702 } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) { in e100_watchdog()
1703 netdev_info(nic->netdev, "NIC Link is Down\n"); in e100_watchdog()
1706 mii_check_link(&nic->mii); in e100_watchdog()
1710 * Unfortunately have to use a spinlock to not re-enable interrupts in e100_watchdog()
1713 spin_lock_irq(&nic->cmd_lock); in e100_watchdog()
1714 iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi); in e100_watchdog()
1716 spin_unlock_irq(&nic->cmd_lock); in e100_watchdog()
1721 if (nic->mac <= mac_82557_D100_C) in e100_watchdog()
1723 e100_set_multicast_list(nic->netdev); in e100_watchdog()
1725 if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF) in e100_watchdog()
1727 nic->flags |= ich_10h_workaround; in e100_watchdog()
1729 nic->flags &= ~ich_10h_workaround; in e100_watchdog()
1731 mod_timer(&nic->watchdog, in e100_watchdog()
1739 cb->command = nic->tx_command; in e100_xmit_prepare()
1741 dma_addr = dma_map_single(&nic->pdev->dev, skb->data, skb->len, in e100_xmit_prepare()
1744 if (dma_mapping_error(&nic->pdev->dev, dma_addr)) in e100_xmit_prepare()
1745 return -ENOMEM; in e100_xmit_prepare()
1751 if (unlikely(skb->no_fcs)) in e100_xmit_prepare()
1752 cb->command |= cpu_to_le16(cb_tx_nc); in e100_xmit_prepare()
1754 cb->command &= ~cpu_to_le16(cb_tx_nc); in e100_xmit_prepare()
1757 if ((nic->cbs_avail & ~15) == nic->cbs_avail) in e100_xmit_prepare()
1758 cb->command |= cpu_to_le16(cb_i); in e100_xmit_prepare()
1759 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); in e100_xmit_prepare()
1760 cb->u.tcb.tcb_byte_count = 0; in e100_xmit_prepare()
1761 cb->u.tcb.threshold = nic->tx_threshold; in e100_xmit_prepare()
1762 cb->u.tcb.tbd_count = 1; in e100_xmit_prepare()
1763 cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr); in e100_xmit_prepare()
1764 cb->u.tcb.tbd.size = cpu_to_le16(skb->len); in e100_xmit_prepare()
1775 if (nic->flags & ich_10h_workaround) { in e100_xmit_frame()
1780 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, in e100_xmit_frame()
1788 case -ENOSPC: in e100_xmit_frame()
1790 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, in e100_xmit_frame()
1794 case -ENOMEM: in e100_xmit_frame()
1795 /* This is a hard error - log it. */ in e100_xmit_frame()
1796 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, in e100_xmit_frame()
1807 struct net_device *dev = nic->netdev; in e100_tx_clean()
1811 spin_lock(&nic->cb_lock); in e100_tx_clean()
1814 for (cb = nic->cb_to_clean; in e100_tx_clean()
1815 cb->status & cpu_to_le16(cb_complete); in e100_tx_clean()
1816 cb = nic->cb_to_clean = cb->next) { in e100_tx_clean()
1818 netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev, in e100_tx_clean()
1819 "cb[%d]->status = 0x%04X\n", in e100_tx_clean()
1820 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)), in e100_tx_clean()
1821 cb->status); in e100_tx_clean()
1823 if (likely(cb->skb != NULL)) { in e100_tx_clean()
1824 dev->stats.tx_packets++; in e100_tx_clean()
1825 dev->stats.tx_bytes += cb->skb->len; in e100_tx_clean()
1827 dma_unmap_single(&nic->pdev->dev, in e100_tx_clean()
1828 le32_to_cpu(cb->u.tcb.tbd.buf_addr), in e100_tx_clean()
1829 le16_to_cpu(cb->u.tcb.tbd.size), in e100_tx_clean()
1831 dev_kfree_skb_any(cb->skb); in e100_tx_clean()
1832 cb->skb = NULL; in e100_tx_clean()
1835 cb->status = 0; in e100_tx_clean()
1836 nic->cbs_avail++; in e100_tx_clean()
1839 spin_unlock(&nic->cb_lock); in e100_tx_clean()
1842 if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev))) in e100_tx_clean()
1843 netif_wake_queue(nic->netdev); in e100_tx_clean()
1850 if (nic->cbs) { in e100_clean_cbs()
1851 while (nic->cbs_avail != nic->params.cbs.count) { in e100_clean_cbs()
1852 struct cb *cb = nic->cb_to_clean; in e100_clean_cbs()
1853 if (cb->skb) { in e100_clean_cbs()
1854 dma_unmap_single(&nic->pdev->dev, in e100_clean_cbs()
1855 le32_to_cpu(cb->u.tcb.tbd.buf_addr), in e100_clean_cbs()
1856 le16_to_cpu(cb->u.tcb.tbd.size), in e100_clean_cbs()
1858 dev_kfree_skb(cb->skb); in e100_clean_cbs()
1860 nic->cb_to_clean = nic->cb_to_clean->next; in e100_clean_cbs()
1861 nic->cbs_avail++; in e100_clean_cbs()
1863 dma_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr); in e100_clean_cbs()
1864 nic->cbs = NULL; in e100_clean_cbs()
1865 nic->cbs_avail = 0; in e100_clean_cbs()
1867 nic->cuc_cmd = cuc_start; in e100_clean_cbs()
1868 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = in e100_clean_cbs()
1869 nic->cbs; in e100_clean_cbs()
1875 unsigned int i, count = nic->params.cbs.count; in e100_alloc_cbs()
1877 nic->cuc_cmd = cuc_start; in e100_alloc_cbs()
1878 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL; in e100_alloc_cbs()
1879 nic->cbs_avail = 0; in e100_alloc_cbs()
1881 nic->cbs = dma_pool_zalloc(nic->cbs_pool, GFP_KERNEL, in e100_alloc_cbs()
1882 &nic->cbs_dma_addr); in e100_alloc_cbs()
1883 if (!nic->cbs) in e100_alloc_cbs()
1884 return -ENOMEM; in e100_alloc_cbs()
1886 for (cb = nic->cbs, i = 0; i < count; cb++, i++) { in e100_alloc_cbs()
1887 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs; in e100_alloc_cbs()
1888 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1; in e100_alloc_cbs()
1890 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb); in e100_alloc_cbs()
1891 cb->link = cpu_to_le32(nic->cbs_dma_addr + in e100_alloc_cbs()
1895 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs; in e100_alloc_cbs()
1896 nic->cbs_avail = count; in e100_alloc_cbs()
1903 if (!nic->rxs) return; in e100_start_receiver()
1904 if (RU_SUSPENDED != nic->ru_running) return; in e100_start_receiver()
1907 if (!rx) rx = nic->rxs; in e100_start_receiver()
1909 /* (Re)start RU if suspended or idle and RFA is non-NULL */ in e100_start_receiver()
1910 if (rx->skb) { in e100_start_receiver()
1911 e100_exec_cmd(nic, ruc_start, rx->dma_addr); in e100_start_receiver()
1912 nic->ru_running = RU_RUNNING; in e100_start_receiver()
1919 if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN))) in e100_rx_alloc_skb()
1920 return -ENOMEM; in e100_rx_alloc_skb()
1923 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd)); in e100_rx_alloc_skb()
1924 rx->dma_addr = dma_map_single(&nic->pdev->dev, rx->skb->data, in e100_rx_alloc_skb()
1927 if (dma_mapping_error(&nic->pdev->dev, rx->dma_addr)) { in e100_rx_alloc_skb()
1928 dev_kfree_skb_any(rx->skb); in e100_rx_alloc_skb()
1929 rx->skb = NULL; in e100_rx_alloc_skb()
1930 rx->dma_addr = 0; in e100_rx_alloc_skb()
1931 return -ENOMEM; in e100_rx_alloc_skb()
1937 if (rx->prev->skb) { in e100_rx_alloc_skb()
1938 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data; in e100_rx_alloc_skb()
1939 put_unaligned_le32(rx->dma_addr, &prev_rfd->link); in e100_rx_alloc_skb()
1940 dma_sync_single_for_device(&nic->pdev->dev, in e100_rx_alloc_skb()
1941 rx->prev->dma_addr, in e100_rx_alloc_skb()
1952 struct net_device *dev = nic->netdev; in e100_rx_indicate()
1953 struct sk_buff *skb = rx->skb; in e100_rx_indicate()
1954 struct rfd *rfd = (struct rfd *)skb->data; in e100_rx_indicate()
1959 return -EAGAIN; in e100_rx_indicate()
1962 dma_sync_single_for_cpu(&nic->pdev->dev, rx->dma_addr, in e100_rx_indicate()
1964 rfd_status = le16_to_cpu(rfd->status); in e100_rx_indicate()
1966 netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev, in e100_rx_indicate()
1975 * This allows for a fast restart without re-enabling in e100_rx_indicate()
1977 if ((le16_to_cpu(rfd->command) & cb_el) && in e100_rx_indicate()
1978 (RU_RUNNING == nic->ru_running)) in e100_rx_indicate()
1980 if (ioread8(&nic->csr->scb.status) & rus_no_res) in e100_rx_indicate()
1981 nic->ru_running = RU_SUSPENDED; in e100_rx_indicate()
1982 dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr, in e100_rx_indicate()
1985 return -ENODATA; in e100_rx_indicate()
1989 if (unlikely(dev->features & NETIF_F_RXFCS)) in e100_rx_indicate()
1991 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF; in e100_rx_indicate()
1992 if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd))) in e100_rx_indicate()
1993 actual_size = RFD_BUF_LEN - sizeof(struct rfd); in e100_rx_indicate()
1996 dma_unmap_single(&nic->pdev->dev, rx->dma_addr, RFD_BUF_LEN, in e100_rx_indicate()
2002 * This allows for a fast restart without re-enabling interrupts. in e100_rx_indicate()
2005 if ((le16_to_cpu(rfd->command) & cb_el) && in e100_rx_indicate()
2006 (RU_RUNNING == nic->ru_running)) { in e100_rx_indicate()
2008 if (ioread8(&nic->csr->scb.status) & rus_no_res) in e100_rx_indicate()
2009 nic->ru_running = RU_SUSPENDED; in e100_rx_indicate()
2015 skb->protocol = eth_type_trans(skb, nic->netdev); in e100_rx_indicate()
2020 if (unlikely(dev->features & NETIF_F_RXALL)) { in e100_rx_indicate()
2023 nic->rx_over_length_errors++; in e100_rx_indicate()
2032 nic->rx_over_length_errors++; in e100_rx_indicate()
2036 dev->stats.rx_packets++; in e100_rx_indicate()
2037 dev->stats.rx_bytes += (actual_size - fcs_pad); in e100_rx_indicate()
2043 rx->skb = NULL; in e100_rx_indicate()
2057 for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { in e100_rx_clean()
2060 if (-EAGAIN == err || -ENODATA == err) in e100_rx_clean()
2071 if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running) in e100_rx_clean()
2074 old_before_last_rx = nic->rx_to_use->prev->prev; in e100_rx_clean()
2075 old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data; in e100_rx_clean()
2078 for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { in e100_rx_clean()
2083 new_before_last_rx = nic->rx_to_use->prev->prev; in e100_rx_clean()
2085 /* Set the el-bit on the buffer that is before the last buffer. in e100_rx_clean()
2090 * When the hardware hits the before last buffer with el-bit in e100_rx_clean()
2095 (struct rfd *)new_before_last_rx->skb->data; in e100_rx_clean()
2096 new_before_last_rfd->size = 0; in e100_rx_clean()
2097 new_before_last_rfd->command |= cpu_to_le16(cb_el); in e100_rx_clean()
2098 dma_sync_single_for_device(&nic->pdev->dev, in e100_rx_clean()
2099 new_before_last_rx->dma_addr, in e100_rx_clean()
2106 old_before_last_rfd->command &= ~cpu_to_le16(cb_el); in e100_rx_clean()
2107 dma_sync_single_for_device(&nic->pdev->dev, in e100_rx_clean()
2108 old_before_last_rx->dma_addr, in e100_rx_clean()
2111 old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN in e100_rx_clean()
2113 dma_sync_single_for_device(&nic->pdev->dev, in e100_rx_clean()
2114 old_before_last_rx->dma_addr, in e100_rx_clean()
2121 iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack); in e100_rx_clean()
2122 e100_start_receiver(nic, nic->rx_to_clean); in e100_rx_clean()
2131 unsigned int i, count = nic->params.rfds.count; in e100_rx_clean_list()
2133 nic->ru_running = RU_UNINITIALIZED; in e100_rx_clean_list()
2135 if (nic->rxs) { in e100_rx_clean_list()
2136 for (rx = nic->rxs, i = 0; i < count; rx++, i++) { in e100_rx_clean_list()
2137 if (rx->skb) { in e100_rx_clean_list()
2138 dma_unmap_single(&nic->pdev->dev, in e100_rx_clean_list()
2139 rx->dma_addr, RFD_BUF_LEN, in e100_rx_clean_list()
2141 dev_kfree_skb(rx->skb); in e100_rx_clean_list()
2144 kfree(nic->rxs); in e100_rx_clean_list()
2145 nic->rxs = NULL; in e100_rx_clean_list()
2148 nic->rx_to_use = nic->rx_to_clean = NULL; in e100_rx_clean_list()
2154 unsigned int i, count = nic->params.rfds.count; in e100_rx_alloc_list()
2157 nic->rx_to_use = nic->rx_to_clean = NULL; in e100_rx_alloc_list()
2158 nic->ru_running = RU_UNINITIALIZED; in e100_rx_alloc_list()
2160 if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_KERNEL))) in e100_rx_alloc_list()
2161 return -ENOMEM; in e100_rx_alloc_list()
2163 for (rx = nic->rxs, i = 0; i < count; rx++, i++) { in e100_rx_alloc_list()
2164 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs; in e100_rx_alloc_list()
2165 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1; in e100_rx_alloc_list()
2168 return -ENOMEM; in e100_rx_alloc_list()
2171 /* Set the el-bit on the buffer that is before the last buffer. in e100_rx_alloc_list()
2175 * When the hardware hits the before last buffer with el-bit and size in e100_rx_alloc_list()
2178 rx = nic->rxs->prev->prev; in e100_rx_alloc_list()
2179 before_last = (struct rfd *)rx->skb->data; in e100_rx_alloc_list()
2180 before_last->command |= cpu_to_le16(cb_el); in e100_rx_alloc_list()
2181 before_last->size = 0; in e100_rx_alloc_list()
2182 dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr, in e100_rx_alloc_list()
2185 nic->rx_to_use = nic->rx_to_clean = nic->rxs; in e100_rx_alloc_list()
2186 nic->ru_running = RU_SUSPENDED; in e100_rx_alloc_list()
2195 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack); in e100_intr()
2197 netif_printk(nic, intr, KERN_DEBUG, nic->netdev, in e100_intr()
2205 iowrite8(stat_ack, &nic->csr->scb.stat_ack); in e100_intr()
2209 nic->ru_running = RU_SUSPENDED; in e100_intr()
2211 if (likely(napi_schedule_prep(&nic->napi))) { in e100_intr()
2213 __napi_schedule(&nic->napi); in e100_intr()
2231 /* only re-enable interrupt if stack agrees polling is really done */ in e100_poll()
2244 e100_intr(nic->pdev->irq, netdev); in e100_netpoll()
2255 if (!is_valid_ether_addr(addr->sa_data)) in e100_set_mac_address()
2256 return -EADDRNOTAVAIL; in e100_set_mac_address()
2258 eth_hw_addr_set(netdev, addr->sa_data); in e100_set_mac_address()
2267 return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) && in e100_asf()
2268 (le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_asf) && in e100_asf()
2269 !(le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_gcl) && in e100_asf()
2270 ((le16_to_cpu(nic->eeprom[eeprom_smbus_addr]) & 0xFF) != 0xFE); in e100_asf()
2283 e100_set_multicast_list(nic->netdev); in e100_up()
2285 mod_timer(&nic->watchdog, jiffies); in e100_up()
2286 if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED, in e100_up()
2287 nic->netdev->name, nic->netdev))) in e100_up()
2289 netif_wake_queue(nic->netdev); in e100_up()
2290 napi_enable(&nic->napi); in e100_up()
2297 del_timer_sync(&nic->watchdog); in e100_up()
2308 napi_disable(&nic->napi); in e100_down()
2309 netif_stop_queue(nic->netdev); in e100_down()
2311 free_irq(nic->pdev->irq, nic->netdev); in e100_down()
2312 del_timer_sync(&nic->watchdog); in e100_down()
2313 netif_carrier_off(nic->netdev); in e100_down()
2324 schedule_work(&nic->tx_timeout_task); in e100_tx_timeout()
2330 struct net_device *netdev = nic->netdev; in e100_tx_timeout_task()
2332 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, in e100_tx_timeout_task()
2333 "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status)); in e100_tx_timeout_task()
2351 * packet compares byte-for-byte to the transmitted packet. */ in e100_loopback_test()
2359 if (nic->flags & ich && loopback_mode == lb_phy) in e100_loopback_test()
2362 nic->loopback = loopback_mode; in e100_loopback_test()
2367 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, in e100_loopback_test()
2372 if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) { in e100_loopback_test()
2373 err = -ENOMEM; in e100_loopback_test()
2377 memset(skb->data, 0xFF, ETH_DATA_LEN); in e100_loopback_test()
2378 e100_xmit_frame(skb, nic->netdev); in e100_loopback_test()
2382 dma_sync_single_for_cpu(&nic->pdev->dev, nic->rx_to_clean->dma_addr, in e100_loopback_test()
2385 if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd), in e100_loopback_test()
2386 skb->data, ETH_DATA_LEN)) in e100_loopback_test()
2387 err = -EAGAIN; in e100_loopback_test()
2390 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0); in e100_loopback_test()
2391 nic->loopback = lb_none; in e100_loopback_test()
2409 mii_ethtool_get_link_ksettings(&nic->mii, cmd); in e100_get_link_ksettings()
2420 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET); in e100_set_link_ksettings()
2421 err = mii_ethtool_set_link_ksettings(&nic->mii, cmd); in e100_set_link_ksettings()
2431 strscpy(info->driver, DRV_NAME, sizeof(info->driver)); in e100_get_drvinfo()
2432 strscpy(info->bus_info, pci_name(nic->pdev), in e100_get_drvinfo()
2433 sizeof(info->bus_info)); in e100_get_drvinfo()
2444 return (1 + E100_PHY_REGS) * sizeof(u32) + sizeof(nic->mem->dump_buf); in e100_get_regs_len()
2454 regs->version = (1 << 24) | nic->pdev->revision; in e100_get_regs()
2455 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 | in e100_get_regs()
2456 ioread8(&nic->csr->scb.cmd_lo) << 16 | in e100_get_regs()
2457 ioread16(&nic->csr->scb.status); in e100_get_regs()
2463 buff[1 + i] = mdio_read(netdev, nic->mii.phy_id, in e100_get_regs()
2464 E100_PHY_REGS - 1 - i); in e100_get_regs()
2465 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf)); in e100_get_regs()
2468 memcpy(&buff[1 + E100_PHY_REGS], nic->mem->dump_buf, in e100_get_regs()
2469 sizeof(nic->mem->dump_buf)); in e100_get_regs()
2475 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0; in e100_get_wol()
2476 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0; in e100_get_wol()
2483 if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) || in e100_set_wol()
2484 !device_can_wakeup(&nic->pdev->dev)) in e100_set_wol()
2485 return -EOPNOTSUPP; in e100_set_wol()
2487 if (wol->wolopts) in e100_set_wol()
2488 nic->flags |= wol_magic; in e100_set_wol()
2490 nic->flags &= ~wol_magic; in e100_set_wol()
2492 device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts); in e100_set_wol()
2502 return nic->msg_enable; in e100_get_msglevel()
2508 nic->msg_enable = value; in e100_set_msglevel()
2514 return mii_nway_restart(&nic->mii); in e100_nway_reset()
2520 return mii_link_ok(&nic->mii); in e100_get_link()
2526 return nic->eeprom_wc << 1; in e100_get_eeprom_len()
2535 eeprom->magic = E100_EEPROM_MAGIC; in e100_get_eeprom()
2536 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len); in e100_get_eeprom()
2546 if (eeprom->magic != E100_EEPROM_MAGIC) in e100_set_eeprom()
2547 return -EINVAL; in e100_set_eeprom()
2549 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len); in e100_set_eeprom()
2551 return e100_eeprom_save(nic, eeprom->offset >> 1, in e100_set_eeprom()
2552 (eeprom->len >> 1) + 1); in e100_set_eeprom()
2561 struct param_range *rfds = &nic->params.rfds; in e100_get_ringparam()
2562 struct param_range *cbs = &nic->params.cbs; in e100_get_ringparam()
2564 ring->rx_max_pending = rfds->max; in e100_get_ringparam()
2565 ring->tx_max_pending = cbs->max; in e100_get_ringparam()
2566 ring->rx_pending = rfds->count; in e100_get_ringparam()
2567 ring->tx_pending = cbs->count; in e100_get_ringparam()
2576 struct param_range *rfds = &nic->params.rfds; in e100_set_ringparam()
2577 struct param_range *cbs = &nic->params.cbs; in e100_set_ringparam()
2579 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) in e100_set_ringparam()
2580 return -EINVAL; in e100_set_ringparam()
2584 rfds->count = max(ring->rx_pending, rfds->min); in e100_set_ringparam()
2585 rfds->count = min(rfds->count, rfds->max); in e100_set_ringparam()
2586 cbs->count = max(ring->tx_pending, cbs->min); in e100_set_ringparam()
2587 cbs->count = min(cbs->count, cbs->max); in e100_set_ringparam()
2588 netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n", in e100_set_ringparam()
2589 rfds->count, cbs->count); in e100_set_ringparam()
2613 data[0] = !mii_link_ok(&nic->mii); in e100_diag_test()
2615 if (test->flags & ETH_TEST_FL_OFFLINE) { in e100_diag_test()
2618 mii_ethtool_gset(&nic->mii, &cmd); in e100_diag_test()
2627 mii_ethtool_sset(&nic->mii, &cmd); in e100_diag_test()
2633 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0; in e100_diag_test()
2648 u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE : in e100_set_phys_id()
2657 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON : in e100_set_phys_id()
2658 (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559; in e100_set_phys_id()
2662 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off; in e100_set_phys_id()
2669 mdio_write(netdev, nic->mii.phy_id, led_reg, leds); in e100_set_phys_id()
2680 /* device-specific stats */
2697 return -EOPNOTSUPP; in e100_get_sset_count()
2708 data[i] = ((unsigned long *)&netdev->stats)[i]; in e100_get_ethtool_stats()
2710 data[i++] = nic->tx_deferred; in e100_get_ethtool_stats()
2711 data[i++] = nic->tx_single_collisions; in e100_get_ethtool_stats()
2712 data[i++] = nic->tx_multiple_collisions; in e100_get_ethtool_stats()
2713 data[i++] = nic->tx_fc_pause; in e100_get_ethtool_stats()
2714 data[i++] = nic->rx_fc_pause; in e100_get_ethtool_stats()
2715 data[i++] = nic->rx_fc_unsupported; in e100_get_ethtool_stats()
2716 data[i++] = nic->tx_tco_frames; in e100_get_ethtool_stats()
2717 data[i++] = nic->rx_tco_frames; in e100_get_ethtool_stats()
2718 data[i++] = nic->rx_short_frame_errors; in e100_get_ethtool_stats()
2719 data[i++] = nic->rx_over_length_errors; in e100_get_ethtool_stats()
2763 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL); in e100_do_ioctl()
2768 nic->mem = dma_alloc_coherent(&nic->pdev->dev, sizeof(struct mem), in e100_alloc()
2769 &nic->dma_addr, GFP_KERNEL); in e100_alloc()
2770 return nic->mem ? 0 : -ENOMEM; in e100_alloc()
2775 if (nic->mem) { in e100_free()
2776 dma_free_coherent(&nic->pdev->dev, sizeof(struct mem), in e100_free()
2777 nic->mem, nic->dma_addr); in e100_free()
2778 nic->mem = NULL; in e100_free()
2789 netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n"); in e100_open()
2803 netdev_features_t changed = features ^ netdev->features; in e100_set_features()
2808 netdev->features = features; in e100_set_features()
2835 return -ENOMEM; in e100_probe()
2837 netdev->hw_features |= NETIF_F_RXFCS; in e100_probe()
2838 netdev->priv_flags |= IFF_SUPP_NOFCS; in e100_probe()
2839 netdev->hw_features |= NETIF_F_RXALL; in e100_probe()
2841 netdev->netdev_ops = &e100_netdev_ops; in e100_probe()
2842 netdev->ethtool_ops = &e100_ethtool_ops; in e100_probe()
2843 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD; in e100_probe()
2844 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); in e100_probe()
2847 netif_napi_add_weight(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT); in e100_probe()
2848 nic->netdev = netdev; in e100_probe()
2849 nic->pdev = pdev; in e100_probe()
2850 nic->msg_enable = (1 << debug) - 1; in e100_probe()
2851 nic->mdio_ctrl = mdio_ctrl_hw; in e100_probe()
2855 netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n"); in e100_probe()
2860 netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n"); in e100_probe()
2861 err = -ENODEV; in e100_probe()
2866 netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n"); in e100_probe()
2870 if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))) { in e100_probe()
2871 netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n"); in e100_probe()
2875 SET_NETDEV_DEV(netdev, &pdev->dev); in e100_probe()
2878 netif_info(nic, probe, nic->netdev, "using i/o access mode\n"); in e100_probe()
2880 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr)); in e100_probe()
2881 if (!nic->csr) { in e100_probe()
2882 netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n"); in e100_probe()
2883 err = -ENOMEM; in e100_probe()
2887 if (ent->driver_data) in e100_probe()
2888 nic->flags |= ich; in e100_probe()
2890 nic->flags &= ~ich; in e100_probe()
2895 if (nic->mac < mac_82558_D101_A4) in e100_probe()
2896 netdev->features |= NETIF_F_VLAN_CHALLENGED; in e100_probe()
2899 spin_lock_init(&nic->cb_lock); in e100_probe()
2900 spin_lock_init(&nic->cmd_lock); in e100_probe()
2901 spin_lock_init(&nic->mdio_lock); in e100_probe()
2904 * funky state and has an interrupt pending - hint: we don't have the in e100_probe()
2910 timer_setup(&nic->watchdog, e100_watchdog, 0); in e100_probe()
2912 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task); in e100_probe()
2915 netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n"); in e100_probe()
2924 eth_hw_addr_set(netdev, (u8 *)nic->eeprom); in e100_probe()
2925 if (!is_valid_ether_addr(netdev->dev_addr)) { in e100_probe()
2927 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n"); in e100_probe()
2928 err = -EAGAIN; in e100_probe()
2931 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n"); in e100_probe()
2936 if ((nic->mac >= mac_82558_D101_A4) && in e100_probe()
2937 (le16_to_cpu(nic->eeprom[eeprom_id]) & eeprom_id_wol)) { in e100_probe()
2938 nic->flags |= wol_magic; in e100_probe()
2939 device_set_wakeup_enable(&pdev->dev, true); in e100_probe()
2945 strcpy(netdev->name, "eth%d"); in e100_probe()
2947 netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n"); in e100_probe()
2950 nic->cbs_pool = dma_pool_create(netdev->name, in e100_probe()
2951 &nic->pdev->dev, in e100_probe()
2952 nic->params.cbs.max * sizeof(struct cb), in e100_probe()
2955 if (!nic->cbs_pool) { in e100_probe()
2956 netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n"); in e100_probe()
2957 err = -ENOMEM; in e100_probe()
2960 netif_info(nic, probe, nic->netdev, in e100_probe()
2963 pdev->irq, netdev->dev_addr); in e100_probe()
2972 pci_iounmap(pdev, nic->csr); in e100_probe()
2990 pci_iounmap(pdev, nic->csr); in e100_remove()
2991 dma_pool_destroy(nic->cbs_pool); in e100_remove()
2999 #define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */
3000 #define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */
3011 if ((nic->flags & wol_magic) | e100_asf(nic)) { in __e100_shutdown()
3012 /* enable reverse auto-negotiation */ in __e100_shutdown()
3013 if (nic->phy == phy_82552_v) { in __e100_shutdown()
3014 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, in __e100_shutdown()
3017 mdio_write(netdev, nic->mii.phy_id, in __e100_shutdown()
3062 /* disable reverse auto-negotiation */ in e100_resume()
3063 if (nic->phy == phy_82552_v) { in e100_resume()
3064 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, in e100_resume()
3067 mdio_write(netdev, nic->mii.phy_id, in e100_resume()
3088 /* ------------------ PCI Error Recovery infrastructure -------------- */
3090 * e100_io_error_detected - called when PCI error is detected.
3113 * e100_io_slot_reset - called after the pci bus has been reset.
3124 pr_err("Cannot re-enable PCI device after reset\n"); in e100_io_slot_reset()
3130 if (0 != PCI_FUNC(pdev->devfn)) in e100_io_slot_reset()
3139 * e100_io_resume - resume normal operations
3156 mod_timer(&nic->watchdog, jiffies); in e100_io_resume()
3183 if (((1 << debug) - 1) & NETIF_MSG_DRV) { in e100_init_module()