Lines Matching +full:need +full:- +full:phy +full:- +full:for +full:- +full:wake

1 // SPDX-License-Identifier: GPL-2.0-only
3 * drivers/net/ethernet/micrel/ksx884x.c - Micrel KSZ8841/2 PCI Ethernet driver
5 * Copyright (c) 2009-2010 Micrel, Inc.
271 #define PHY_CTRL_INTERVAL (KS884X_P2MBCR_P - KS884X_P1MBCR_P)
293 #define PHY_SPECIAL_INTERVAL (KS884X_P2VCT_P - KS884X_P1VCT_P)
482 (KS8842_PORT_2_CTRL_1 - KS8842_PORT_1_CTRL_1))
587 #define STATIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
588 #define STATIC_MAC_TABLE_FWD_PORTS 00-00070000-00000000
589 #define STATIC_MAC_TABLE_VALID 00-00080000-00000000
590 #define STATIC_MAC_TABLE_OVERRIDE 00-00100000-00000000
591 #define STATIC_MAC_TABLE_USE_FID 00-00200000-00000000
592 #define STATIC_MAC_TABLE_FID 00-03C00000-00000000
606 #define VLAN_TABLE_VID 00-00000000-00000FFF
607 #define VLAN_TABLE_FID 00-00000000-0000F000
608 #define VLAN_TABLE_MEMBERSHIP 00-00000000-00070000
609 #define VLAN_TABLE_VALID 00-00000000-00080000
621 #define DYNAMIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
622 #define DYNAMIC_MAC_TABLE_FID 00-000F0000-00000000
623 #define DYNAMIC_MAC_TABLE_SRC_PORT 00-00300000-00000000
624 #define DYNAMIC_MAC_TABLE_TIMESTAMP 00-00C00000-00000000
625 #define DYNAMIC_MAC_TABLE_ENTRIES 03-FF000000-00000000
626 #define DYNAMIC_MAC_TABLE_MAC_EMPTY 04-00000000-00000000
627 #define DYNAMIC_MAC_TABLE_RESERVED 78-00000000-00000000
628 #define DYNAMIC_MAC_TABLE_NOT_READY 80-00000000-00000000
649 #define MIB_COUNTER_VALUE 00-00000000-3FFFFFFF
650 #define MIB_COUNTER_VALID 00-00000000-40000000
651 #define MIB_COUNTER_OVERFLOW 00-00000000-80000000
670 #define MAC_ADDR_ORDER(i) (ETH_ALEN - 1 - (i))
686 #define HW_TO_DEV_PORT(port) (port - 1)
865 * struct ksz_hw_desc - Hardware descriptor data structure
879 * struct ksz_sw_desc - Software descriptor data structure
891 * struct ksz_dma_buf - OS dependent DMA buffer data structure
903 * struct ksz_desc - Descriptor structure
905 * @sw: Cached memory to hold hardware descriptor values for
916 #define DMA_BUFFER(desc) ((struct ksz_dma_buf *)(&(desc)->dma_buf))
919 * struct ksz_desc_info - Descriptor information data structure
926 * @avail: Number of descriptors available for use.
927 * @last: Index for last descriptor released to hardware.
928 * @next: Index for next descriptor available for use.
929 * @mask: Mask for index wrapping.
959 * struct ksz_mac_table - Static MAC table data structure
981 * struct ksz_vlan_table - VLAN table data structure
1045 * struct ksz_port_mib - Port MIB data structure
1050 * @counter: 64-bit MIB counter value.
1074 * struct ksz_port_cfg - Port configuration data structure
1092 * struct ksz_switch - KSZ8842 switch data structure
1096 * @diffserv: DiffServ priority settings. Possible values from 6-bit of ToS
1098 * @p_802_1p: 802.1P priority settings. Possible values from 3-bit of 802.1p
1100 * @br_addr: Bridge address. Used for STP.
1101 * @other_addr: Other MAC address. Used for multiple network device mode.
1103 * @member: Current port membership. Used for STP.
1123 * struct ksz_port_info - Port information data structure
1127 * @advertised: Advertised auto-negotiation setting. Used to determine link.
1128 * @partner: Auto-negotiation partner setting. Used to determine link.
1156 * struct ksz_hw - KSZ884X hardware data structure
1162 * @dst_ports: Destination ports in switch for transmission.
1163 * @id: Hardware ID. Used for display only.
1173 * @tx_int_cnt: Transmit interrupt count. Used for TX optimization.
1174 * @tx_int_mask: Transmit interrupt mask. Used for TX optimization.
1175 * @tx_size: Transmit data size. Used for TX optimization.
1248 * struct ksz_port - Virtual port data structure
1249 * @duplex: Duplex mode setting. 1 for half duplex, 2 for full
1250 * duplex, and 0 for auto, which normally results in full
1252 * @speed: Speed setting. 10 for 10 Mbit, 100 for 100 Mbit, and
1253 * 0 for auto, which normally results in 100 Mbit.
1254 * @force_link: Force link setting. 0 for auto-negotiation, and 1 for
1256 * @flow_ctrl: Flow control setting. PHY_NO_FLOW_CTRL for no flow
1257 * control, and PHY_FLOW_CTRL for flow control.
1258 * PHY_TX_ONLY and PHY_RX_ONLY are not supported for 100
1259 * Mbit PHY.
1283 * struct ksz_timer_info - Timer information data structure
1286 * @max: Number of times to run timer; -1 for infinity.
1297 * struct ksz_shared_mem - OS dependent shared memory data structure
1313 * struct ksz_counter_info - OS dependent counter information data structure
1325 * struct dev_info - Network device information data structure
1329 * @desc_pool: Physical memory used for descriptor pool.
1333 * @last_skb: Socket buffer allocated for descriptor rx fragments.
1334 * @skb_index: Buffer index for receiving fragments.
1335 * @skb_len: Buffer length for receiving fragments.
1338 * @counter: Used for MIB reading.
1344 * @wol_enable: Wake-on-LAN enable set by ethtool.
1345 * @wol_support: Wake-on-LAN support used by ethtool.
1346 * @pme_wait: Used for KSZ8841 power management.
1380 * struct dev_priv - Network device private data structure
1384 * @proc_sem: Semaphore for proc accessing.
1426 writel(interrupt, hw->io + KS884X_INTERRUPTS_STATUS); in hw_ack_intr()
1431 hw->intr_blocked = hw->intr_mask; in hw_dis_intr()
1432 writel(0, hw->io + KS884X_INTERRUPTS_ENABLE); in hw_dis_intr()
1433 hw->intr_set = readl(hw->io + KS884X_INTERRUPTS_ENABLE); in hw_dis_intr()
1438 hw->intr_set = interrupt; in hw_set_intr()
1439 writel(interrupt, hw->io + KS884X_INTERRUPTS_ENABLE); in hw_set_intr()
1444 hw->intr_blocked = 0; in hw_ena_intr()
1445 hw_set_intr(hw, hw->intr_mask); in hw_ena_intr()
1450 hw->intr_mask &= ~(bit); in hw_dis_intr_bit()
1457 read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE); in hw_turn_off_intr()
1458 hw->intr_set = read_intr & ~interrupt; in hw_turn_off_intr()
1459 writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE); in hw_turn_off_intr()
1464 * hw_turn_on_intr - turn on specified interrupts
1473 hw->intr_mask |= bit; in hw_turn_on_intr()
1475 if (!hw->intr_blocked) in hw_turn_on_intr()
1476 hw_set_intr(hw, hw->intr_mask); in hw_turn_on_intr()
1483 read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE); in hw_ena_intr_bit()
1484 hw->intr_set = read_intr | interrupt; in hw_ena_intr_bit()
1485 writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE); in hw_ena_intr_bit()
1490 *status = readl(hw->io + KS884X_INTERRUPTS_STATUS); in hw_read_intr()
1491 *status = *status & hw->intr_set; in hw_read_intr()
1501 * hw_block_intr - block hardware interrupts
1513 if (!hw->intr_blocked) { in hw_block_intr()
1515 interrupt = hw->intr_blocked; in hw_block_intr()
1527 desc->phw->ctrl.data = cpu_to_le32(status.data); in reset_desc()
1532 desc->sw.ctrl.tx.hw_owned = 1; in release_desc()
1533 if (desc->sw.buf_size != desc->sw.buf.data) { in release_desc()
1534 desc->sw.buf_size = desc->sw.buf.data; in release_desc()
1535 desc->phw->buf.data = cpu_to_le32(desc->sw.buf.data); in release_desc()
1537 desc->phw->ctrl.data = cpu_to_le32(desc->sw.ctrl.data); in release_desc()
1542 *desc = &info->ring[info->last]; in get_rx_pkt()
1543 info->last++; in get_rx_pkt()
1544 info->last &= info->mask; in get_rx_pkt()
1545 info->avail--; in get_rx_pkt()
1546 (*desc)->sw.buf.data &= ~KS_DESC_RX_MASK; in get_rx_pkt()
1551 desc->phw->addr = cpu_to_le32(addr); in set_rx_buf()
1556 desc->sw.buf.rx.buf_size = len; in set_rx_len()
1562 *desc = &info->ring[info->next]; in get_tx_pkt()
1563 info->next++; in get_tx_pkt()
1564 info->next &= info->mask; in get_tx_pkt()
1565 info->avail--; in get_tx_pkt()
1566 (*desc)->sw.buf.data &= ~KS_DESC_TX_MASK; in get_tx_pkt()
1571 desc->phw->addr = cpu_to_le32(addr); in set_tx_buf()
1576 desc->sw.buf.tx.buf_size = len; in set_tx_len()
1586 readw(hw->io + reg); \
1590 * sw_r_table - read 4 bytes of data from switch table
1608 writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET); in sw_r_table()
1610 *data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET); in sw_r_table()
1616 * sw_w_table_64 - write 8 bytes of data to the switch table
1636 writel(data_hi, hw->io + KS884X_ACC_DATA_4_OFFSET); in sw_w_table_64()
1637 writel(data_lo, hw->io + KS884X_ACC_DATA_0_OFFSET); in sw_w_table_64()
1639 writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET); in sw_w_table_64()
1646 * sw_w_sta_mac_table - write to the static MAC table
1684 * sw_r_vlan_table - read from the VLAN table
1694 * Return 0 if the entry is valid; otherwise -1.
1709 return -1; in sw_r_vlan_table()
1713 * port_r_mib_cnt - read MIB counter
1734 writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET); in port_r_mib_cnt()
1737 for (timeout = 100; timeout > 0; timeout--) { in port_r_mib_cnt()
1738 data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET); in port_r_mib_cnt()
1752 * port_r_mib_pkt - read dropped packet counts
1776 writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET); in port_r_mib_pkt()
1778 data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET); in port_r_mib_pkt()
1788 data -= cur; in port_r_mib_pkt()
1793 index -= KS_MIB_PACKET_DROPPED_TX - in port_r_mib_pkt()
1799 * port_r_cnt - read MIB counters periodically
1807 * Return non-zero when not all counters not read.
1811 struct ksz_port_mib *mib = &hw->port_mib[port]; in port_r_cnt()
1813 if (mib->mib_start < PORT_COUNTER_NUM) in port_r_cnt()
1814 while (mib->cnt_ptr < PORT_COUNTER_NUM) { in port_r_cnt()
1815 port_r_mib_cnt(hw, port, mib->cnt_ptr, in port_r_cnt()
1816 &mib->counter[mib->cnt_ptr]); in port_r_cnt()
1817 ++mib->cnt_ptr; in port_r_cnt()
1819 if (hw->mib_cnt > PORT_COUNTER_NUM) in port_r_cnt()
1820 port_r_mib_pkt(hw, port, mib->dropped, in port_r_cnt()
1821 &mib->counter[PORT_COUNTER_NUM]); in port_r_cnt()
1822 mib->cnt_ptr = 0; in port_r_cnt()
1827 * port_init_cnt - initialize MIB counter values
1836 struct ksz_port_mib *mib = &hw->port_mib[port]; in port_init_cnt()
1838 mib->cnt_ptr = 0; in port_init_cnt()
1839 if (mib->mib_start < PORT_COUNTER_NUM) in port_init_cnt()
1841 port_r_mib_cnt(hw, port, mib->cnt_ptr, in port_init_cnt()
1842 &mib->counter[mib->cnt_ptr]); in port_init_cnt()
1843 ++mib->cnt_ptr; in port_init_cnt()
1844 } while (mib->cnt_ptr < PORT_COUNTER_NUM); in port_init_cnt()
1845 if (hw->mib_cnt > PORT_COUNTER_NUM) in port_init_cnt()
1846 port_r_mib_pkt(hw, port, mib->dropped, in port_init_cnt()
1847 &mib->counter[PORT_COUNTER_NUM]); in port_init_cnt()
1848 memset((void *) mib->counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM); in port_init_cnt()
1849 mib->cnt_ptr = 0; in port_init_cnt()
1857 * port_chk - check port register bits
1875 data = readw(hw->io + addr); in port_chk()
1880 * port_cfg - set port register bits
1897 data = readw(hw->io + addr); in port_cfg()
1902 writew(data, hw->io + addr); in port_cfg()
1906 * port_chk_shift - check port bit
1922 data = readw(hw->io + addr); in port_chk_shift()
1928 * port_cfg_shift - set port bit
1943 data = readw(hw->io + addr); in port_cfg_shift()
1949 writew(data, hw->io + addr); in port_cfg_shift()
1953 * port_r8 - read byte from port register
1967 *data = readb(hw->io + addr); in port_r8()
1971 * port_r16 - read word from port register.
1985 *data = readw(hw->io + addr); in port_r16()
1989 * port_w16 - write word to port register.
2003 writew(data, hw->io + addr); in port_w16()
2007 * sw_chk - check switch register bits
2021 data = readw(hw->io + addr); in sw_chk()
2026 * sw_cfg - set switch register bits
2038 data = readw(hw->io + addr); in sw_cfg()
2043 writew(data, hw->io + addr); in sw_cfg()
2067 * sw_cfg_broad_storm - configure broadcast storm threshold
2081 data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET); in sw_cfg_broad_storm()
2084 writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET); in sw_cfg_broad_storm()
2088 * sw_get_broad_storm - get broadcast storm threshold
2099 data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET); in sw_get_broad_storm()
2108 * sw_dis_broad_storm - disable broadstorm
2120 * sw_ena_broad_storm - enable broadcast storm
2128 sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per); in sw_ena_broad_storm()
2133 * sw_init_broad_storm - initialize broadcast storm
2142 hw->ksz_switch->broad_per = 1; in sw_init_broad_storm()
2143 sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per); in sw_init_broad_storm()
2144 for (port = 0; port < TOTAL_PORT_NUM; port++) in sw_init_broad_storm()
2150 * hw_cfg_broad_storm - configure broadcast storm
2164 hw->ksz_switch->broad_per = percent; in hw_cfg_broad_storm()
2168 * sw_dis_prio_rate - disable switch priority rate
2180 writel(0, hw->io + addr); in sw_dis_prio_rate()
2184 * sw_init_prio_rate - initialize switch prioirty rate
2193 struct ksz_switch *sw = hw->ksz_switch; in sw_init_prio_rate()
2195 for (port = 0; port < TOTAL_PORT_NUM; port++) { in sw_init_prio_rate()
2196 for (prio = 0; prio < PRIO_QUEUES; prio++) { in sw_init_prio_rate()
2197 sw->port_cfg[port].rx_rate[prio] = in sw_init_prio_rate()
2198 sw->port_cfg[port].tx_rate[prio] = 0; in sw_init_prio_rate()
2251 if (!(hw->overrides & FAST_AGING)) { in sw_flush_dyn_mac_table()
2337 for (port = 0; port < TOTAL_PORT_NUM; port++) { in sw_init_mirror()
2418 * sw_dis_diffserv - disable switch DiffServ priority
2430 * sw_dis_802_1p - disable switch 802.1p priority
2442 * sw_cfg_replace_null_vid -
2453 * sw_cfg_replace_vid - enable switch 802.10 priority re-mapping
2458 * This routine enables the 802.1p priority re-mapping function of the switch.
2469 * sw_cfg_port_based - configure switch port based priority
2483 hw->ksz_switch->port_cfg[port].port_prio = prio; in sw_cfg_port_based()
2492 * sw_dis_multi_queue - disable transmit multiple queues
2505 * sw_init_prio - initialize switch priority
2514 struct ksz_switch *sw = hw->ksz_switch; in sw_init_prio()
2520 sw->p_802_1p[0] = 0; in sw_init_prio()
2521 sw->p_802_1p[1] = 0; in sw_init_prio()
2522 sw->p_802_1p[2] = 1; in sw_init_prio()
2523 sw->p_802_1p[3] = 1; in sw_init_prio()
2524 sw->p_802_1p[4] = 2; in sw_init_prio()
2525 sw->p_802_1p[5] = 2; in sw_init_prio()
2526 sw->p_802_1p[6] = 3; in sw_init_prio()
2527 sw->p_802_1p[7] = 3; in sw_init_prio()
2533 for (tos = 0; tos < DIFFSERV_ENTRIES; tos++) in sw_init_prio()
2534 sw->diffserv[tos] = 0; in sw_init_prio()
2537 for (port = 0; port < TOTAL_PORT_NUM; port++) { in sw_init_prio()
2543 sw->port_cfg[port].port_prio = 0; in sw_init_prio()
2544 sw_cfg_port_based(hw, port, sw->port_cfg[port].port_prio); in sw_init_prio()
2550 * port_get_def_vid - get port default VID.
2563 *vid = readw(hw->io + addr); in port_get_def_vid()
2567 * sw_init_vlan - initialize switch VLAN
2576 struct ksz_switch *sw = hw->ksz_switch; in sw_init_vlan()
2579 for (entry = 0; entry < VLAN_TABLE_ENTRIES; entry++) { in sw_init_vlan()
2581 &sw->vlan_table[entry].vid, in sw_init_vlan()
2582 &sw->vlan_table[entry].fid, in sw_init_vlan()
2583 &sw->vlan_table[entry].member); in sw_init_vlan()
2586 for (port = 0; port < TOTAL_PORT_NUM; port++) { in sw_init_vlan()
2587 port_get_def_vid(hw, port, &sw->port_cfg[port].vid); in sw_init_vlan()
2588 sw->port_cfg[port].member = PORT_MASK; in sw_init_vlan()
2593 * sw_cfg_port_base_vlan - configure port-based VLAN membership
2596 * @member: The port-based VLAN membership.
2598 * This routine configures the port-based VLAN membership of the port.
2608 data = readb(hw->io + addr); in sw_cfg_port_base_vlan()
2611 writeb(data, hw->io + addr); in sw_cfg_port_base_vlan()
2613 hw->ksz_switch->port_cfg[port].member = member; in sw_cfg_port_base_vlan()
2617 * sw_get_addr - get the switch MAC address.
2627 for (i = 0; i < 6; i += 2) { in sw_get_addr()
2628 mac_addr[i] = readb(hw->io + KS8842_MAC_ADDR_0_OFFSET + i); in sw_get_addr()
2629 mac_addr[1 + i] = readb(hw->io + KS8842_MAC_ADDR_1_OFFSET + i); in sw_get_addr()
2634 * sw_set_addr - configure switch MAC address
2644 for (i = 0; i < 6; i += 2) { in sw_set_addr()
2645 writeb(mac_addr[i], hw->io + KS8842_MAC_ADDR_0_OFFSET + i); in sw_set_addr()
2646 writeb(mac_addr[1 + i], hw->io + KS8842_MAC_ADDR_1_OFFSET + i); in sw_set_addr()
2651 * sw_set_global_ctrl - set switch global control
2661 data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET); in sw_set_global_ctrl()
2663 writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET); in sw_set_global_ctrl()
2665 data = readw(hw->io + KS8842_SWITCH_CTRL_1_OFFSET); in sw_set_global_ctrl()
2674 if (hw->overrides & FAST_AGING) in sw_set_global_ctrl()
2678 writew(data, hw->io + KS8842_SWITCH_CTRL_1_OFFSET); in sw_set_global_ctrl()
2680 data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET); in sw_set_global_ctrl()
2684 writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET); in sw_set_global_ctrl()
2697 * port_set_stp_state - configure port spanning tree state
2716 * No need to turn on transmit because of port direct mode. in port_set_stp_state()
2734 * Need to setup static MAC table with override to keep receiving BPDU in port_set_stp_state()
2746 hw->ksz_switch->port_cfg[port].stp_state = state; in port_set_stp_state()
2755 * sw_clr_sta_mac_table - clear static MAC table
2765 for (i = 0; i < STATIC_MAC_TABLE_ENTRIES; i++) { in sw_clr_sta_mac_table()
2766 entry = &hw->ksz_switch->mac_table[i]; in sw_clr_sta_mac_table()
2768 entry->mac_addr, entry->ports, in sw_clr_sta_mac_table()
2769 entry->override, 0, in sw_clr_sta_mac_table()
2770 entry->use_fid, entry->fid); in sw_clr_sta_mac_table()
2775 * sw_init_stp - initialize switch spanning tree support
2784 entry = &hw->ksz_switch->mac_table[STP_ENTRY]; in sw_init_stp()
2785 entry->mac_addr[0] = 0x01; in sw_init_stp()
2786 entry->mac_addr[1] = 0x80; in sw_init_stp()
2787 entry->mac_addr[2] = 0xC2; in sw_init_stp()
2788 entry->mac_addr[3] = 0x00; in sw_init_stp()
2789 entry->mac_addr[4] = 0x00; in sw_init_stp()
2790 entry->mac_addr[5] = 0x00; in sw_init_stp()
2791 entry->ports = HOST_MASK; in sw_init_stp()
2792 entry->override = 1; in sw_init_stp()
2793 entry->valid = 1; in sw_init_stp()
2795 entry->mac_addr, entry->ports, in sw_init_stp()
2796 entry->override, entry->valid, in sw_init_stp()
2797 entry->use_fid, entry->fid); in sw_init_stp()
2801 * sw_block_addr - block certain packets from the host port
2811 for (i = BROADCAST_ENTRY; i <= IPV6_ADDR_ENTRY; i++) { in sw_block_addr()
2812 entry = &hw->ksz_switch->mac_table[i]; in sw_block_addr()
2813 entry->valid = 0; in sw_block_addr()
2815 entry->mac_addr, entry->ports, in sw_block_addr()
2816 entry->override, entry->valid, in sw_block_addr()
2817 entry->use_fid, entry->fid); in sw_block_addr()
2821 static inline void hw_r_phy_ctrl(struct ksz_hw *hw, int phy, u16 *data) in hw_r_phy_ctrl() argument
2823 *data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET); in hw_r_phy_ctrl()
2826 static inline void hw_w_phy_ctrl(struct ksz_hw *hw, int phy, u16 data) in hw_w_phy_ctrl() argument
2828 writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET); in hw_w_phy_ctrl()
2831 static inline void hw_r_phy_link_stat(struct ksz_hw *hw, int phy, u16 *data) in hw_r_phy_link_stat() argument
2833 *data = readw(hw->io + phy + KS884X_PHY_STATUS_OFFSET); in hw_r_phy_link_stat()
2836 static inline void hw_r_phy_auto_neg(struct ksz_hw *hw, int phy, u16 *data) in hw_r_phy_auto_neg() argument
2838 *data = readw(hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET); in hw_r_phy_auto_neg()
2841 static inline void hw_w_phy_auto_neg(struct ksz_hw *hw, int phy, u16 data) in hw_w_phy_auto_neg() argument
2843 writew(data, hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET); in hw_w_phy_auto_neg()
2846 static inline void hw_r_phy_rem_cap(struct ksz_hw *hw, int phy, u16 *data) in hw_r_phy_rem_cap() argument
2848 *data = readw(hw->io + phy + KS884X_PHY_REMOTE_CAP_OFFSET); in hw_r_phy_rem_cap()
2851 static inline void hw_r_phy_crossover(struct ksz_hw *hw, int phy, u16 *data) in hw_r_phy_crossover() argument
2853 *data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET); in hw_r_phy_crossover()
2856 static inline void hw_w_phy_crossover(struct ksz_hw *hw, int phy, u16 data) in hw_w_phy_crossover() argument
2858 writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET); in hw_w_phy_crossover()
2861 static inline void hw_r_phy_polarity(struct ksz_hw *hw, int phy, u16 *data) in hw_r_phy_polarity() argument
2863 *data = readw(hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET); in hw_r_phy_polarity()
2866 static inline void hw_w_phy_polarity(struct ksz_hw *hw, int phy, u16 data) in hw_w_phy_polarity() argument
2868 writew(data, hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET); in hw_w_phy_polarity()
2871 static inline void hw_r_phy_link_md(struct ksz_hw *hw, int phy, u16 *data) in hw_r_phy_link_md() argument
2873 *data = readw(hw->io + phy + KS884X_PHY_LINK_MD_OFFSET); in hw_r_phy_link_md()
2876 static inline void hw_w_phy_link_md(struct ksz_hw *hw, int phy, u16 data) in hw_w_phy_link_md() argument
2878 writew(data, hw->io + phy + KS884X_PHY_LINK_MD_OFFSET); in hw_w_phy_link_md()
2882 * hw_r_phy - read data from PHY register
2885 * @reg: PHY register to read.
2888 * This routine reads data from the PHY register.
2892 int phy; in hw_r_phy() local
2894 phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg; in hw_r_phy()
2895 *val = readw(hw->io + phy); in hw_r_phy()
2899 * hw_w_phy - write data to PHY register
2902 * @reg: PHY register to write.
2905 * This routine writes data to the PHY register.
2909 int phy; in hw_w_phy() local
2911 phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg; in hw_w_phy()
2912 writew(val, hw->io + phy); in hw_w_phy()
2935 data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET); in drop_gpio()
2937 writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET); in drop_gpio()
2944 data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET); in raise_gpio()
2946 writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET); in raise_gpio()
2953 data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET); in state_gpio()
2970 for (i = 15; i >= 0; i--) { in spi_r()
2986 for (i = 15; i >= 0; i--) { in spi_w()
3002 for (i = 1; i >= 0; i--) { in spi_reg()
3009 for (i = 5; i >= 0; i--) { in spi_reg()
3028 * eeprom_read - read from AT93C46 EEPROM
3051 * eeprom_write - write to AT93C46 EEPROM
3081 } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout); in eeprom_write()
3098 } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout); in eeprom_write()
3116 switch (port->flow_ctrl) { in advertised_flow_ctrl()
3134 rx_cfg = hw->rx_cfg; in set_flow_ctrl()
3135 tx_cfg = hw->tx_cfg; in set_flow_ctrl()
3137 hw->rx_cfg |= DMA_RX_FLOW_ENABLE; in set_flow_ctrl()
3139 hw->rx_cfg &= ~DMA_RX_FLOW_ENABLE; in set_flow_ctrl()
3141 hw->tx_cfg |= DMA_TX_FLOW_ENABLE; in set_flow_ctrl()
3143 hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE; in set_flow_ctrl()
3144 if (hw->enabled) { in set_flow_ctrl()
3145 if (rx_cfg != hw->rx_cfg) in set_flow_ctrl()
3146 writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL); in set_flow_ctrl()
3147 if (tx_cfg != hw->tx_cfg) in set_flow_ctrl()
3148 writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL); in set_flow_ctrl()
3158 if (hw->overrides & PAUSE_FLOW_CTRL) in determine_flow_ctrl()
3162 if (port->force_link) in determine_flow_ctrl()
3178 if (!hw->ksz_switch) in determine_flow_ctrl()
3185 if ((hw->features & HALF_DUPLEX_SIGNAL_BUG) && in port_cfg_change()
3186 !(hw->overrides & PAUSE_FLOW_CTRL)) { in port_cfg_change()
3187 u32 cfg = hw->tx_cfg; in port_cfg_change()
3190 if (1 == info->duplex) in port_cfg_change()
3191 hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE; in port_cfg_change()
3192 if (hw->enabled && cfg != hw->tx_cfg) in port_cfg_change()
3193 writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL); in port_cfg_change()
3198 * port_get_link_speed - get current link status
3201 * This routine reads PHY registers to determine the current link status of the
3209 struct ksz_hw *hw = port->hw; in port_get_link_speed()
3220 for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) { in port_get_link_speed()
3221 info = &hw->port_info[p]; in port_get_link_speed()
3234 if (local == info->advertised && remote == info->partner) in port_get_link_speed()
3237 info->advertised = local; in port_get_link_speed()
3238 info->partner = remote; in port_get_link_speed()
3245 info->tx_rate = 10 * TX_RATE_UNIT; in port_get_link_speed()
3247 info->tx_rate = 100 * TX_RATE_UNIT; in port_get_link_speed()
3249 info->duplex = 1; in port_get_link_speed()
3251 info->duplex = 2; in port_get_link_speed()
3253 if (media_connected != info->state) { in port_get_link_speed()
3259 if (hw->ksz_switch) { in port_get_link_speed()
3261 (1 == info->duplex)); in port_get_link_speed()
3266 info->state = media_connected; in port_get_link_speed()
3268 if (media_disconnected != info->state) { in port_get_link_speed()
3272 hw->port_mib[p].link_down = 1; in port_get_link_speed()
3274 info->state = media_disconnected; in port_get_link_speed()
3276 hw->port_mib[p].state = (u8) info->state; in port_get_link_speed()
3279 if (linked && media_disconnected == port->linked->state) in port_get_link_speed()
3280 port->linked = linked; in port_get_link_speed()
3288 * port_set_link_speed - set port speed
3295 struct ksz_hw *hw = port->hw; in port_set_link_speed()
3302 for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) { in port_set_link_speed()
3317 if (port->speed || port->duplex) { in port_set_link_speed()
3318 if (10 == port->speed) in port_set_link_speed()
3321 else if (100 == port->speed) in port_set_link_speed()
3324 if (1 == port->duplex) in port_set_link_speed()
3327 else if (2 == port->duplex) in port_set_link_speed()
3339 * port_force_link_speed - force port speed
3346 struct ksz_hw *hw = port->hw; in port_force_link_speed()
3349 int phy; in port_force_link_speed() local
3352 for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) { in port_force_link_speed()
3353 phy = KS884X_PHY_1_CTRL_OFFSET + p * PHY_CTRL_INTERVAL; in port_force_link_speed()
3354 hw_r_phy_ctrl(hw, phy, &data); in port_force_link_speed()
3358 if (10 == port->speed) in port_force_link_speed()
3360 else if (100 == port->speed) in port_force_link_speed()
3362 if (1 == port->duplex) in port_force_link_speed()
3364 else if (2 == port->duplex) in port_force_link_speed()
3366 hw_w_phy_ctrl(hw, phy, data); in port_force_link_speed()
3372 struct ksz_hw *hw = port->hw; in port_set_power_saving()
3376 for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) in port_set_power_saving()
3386 * hw_chk_wol_pme_status - check PMEN pin
3396 struct pci_dev *pdev = hw_priv->pdev; in hw_chk_wol_pme_status()
3399 if (!pdev->pm_cap) in hw_chk_wol_pme_status()
3401 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data); in hw_chk_wol_pme_status()
3406 * hw_clr_wol_pme_status - clear PMEN pin
3414 struct pci_dev *pdev = hw_priv->pdev; in hw_clr_wol_pme_status()
3417 if (!pdev->pm_cap) in hw_clr_wol_pme_status()
3421 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data); in hw_clr_wol_pme_status()
3423 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data); in hw_clr_wol_pme_status()
3427 * hw_cfg_wol_pme - enable or disable Wake-on-LAN
3431 * This routine is used to enable or disable Wake-on-LAN.
3436 struct pci_dev *pdev = hw_priv->pdev; in hw_cfg_wol_pme()
3439 if (!pdev->pm_cap) in hw_cfg_wol_pme()
3441 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data); in hw_cfg_wol_pme()
3447 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data); in hw_cfg_wol_pme()
3451 * hw_cfg_wol - configure Wake-on-LAN features
3456 * This routine is used to enable or disable certain Wake-on-LAN features.
3462 data = readw(hw->io + KS8841_WOL_CTRL_OFFSET); in hw_cfg_wol()
3467 writew(data, hw->io + KS8841_WOL_CTRL_OFFSET); in hw_cfg_wol()
3471 * hw_set_wol_frame - program Wake-on-LAN pattern
3479 * This routine is used to program Wake-on-LAN pattern.
3498 writel(0, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i); in hw_set_wol_frame()
3499 writel(0, hw->io + KS8841_WOL_FRAME_BYTE2_OFFSET + i); in hw_set_wol_frame()
3508 --bits; in hw_set_wol_frame()
3511 writeb(val, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i in hw_set_wol_frame()
3521 bits = mask[len - 1]; in hw_set_wol_frame()
3524 writeb(bits, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i + len - in hw_set_wol_frame()
3528 writel(crc, hw->io + KS8841_WOL_FRAME_CRC_OFFSET + i); in hw_set_wol_frame()
3532 * hw_add_wol_arp - add ARP pattern
3536 * This routine is used to add ARP pattern for waking up the host.
3556 * hw_add_wol_bcast - add broadcast pattern
3559 * This routine is used to add broadcast pattern for waking up the host.
3570 * hw_add_wol_mcast - add multicast pattern
3573 * This routine is used to add multicast pattern for waking up the host.
3577 * multicast hash table, so not all multicast packets can wake up the host.
3584 memcpy(&pattern[3], &hw->override_addr[3], 3); in hw_add_wol_mcast()
3589 * hw_add_wol_ucast - add unicast pattern
3601 hw_set_wol_frame(hw, 0, 1, mask, ETH_ALEN, hw->override_addr); in hw_add_wol_ucast()
3605 * hw_enable_wol - enable Wake-on-LAN
3607 * @wol_enable: The Wake-on-LAN settings.
3610 * This routine is used to enable Wake-on-LAN depending on driver settings.
3625 * hw_init - check driver is correct for the hardware
3628 * This function checks the hardware is correct for this driver and sets the
3629 * hardware up for proper initialization.
3640 writew(BUS_SPEED_125_MHZ, hw->io + KS884X_BUS_CTRL_OFFSET); in hw_init()
3643 data = readw(hw->io + KS884X_CHIP_ID_OFFSET); in hw_init()
3656 hw->features |= SMALL_PACKET_TX_BUG; in hw_init()
3658 hw->features |= HALF_DUPLEX_SIGNAL_BUG; in hw_init()
3664 * hw_reset - reset the hardware
3671 writew(GLOBAL_SOFTWARE_RESET, hw->io + KS884X_GLOBAL_CTRL_OFFSET); in hw_reset()
3673 /* Wait for device to reset. */ in hw_reset()
3677 writew(0, hw->io + KS884X_GLOBAL_CTRL_OFFSET); in hw_reset()
3681 * hw_setup - setup the hardware
3684 * This routine setup the hardware for proper operation.
3692 data = readw(hw->io + KS8842_SWITCH_CTRL_5_OFFSET); in hw_setup()
3695 writew(data, hw->io + KS8842_SWITCH_CTRL_5_OFFSET); in hw_setup()
3699 hw->tx_cfg = (DMA_TX_PAD_ENABLE | DMA_TX_CRC_ENABLE | in hw_setup()
3703 hw->rx_cfg = (DMA_RX_BROADCAST | DMA_RX_UNICAST | in hw_setup()
3705 hw->rx_cfg |= KS884X_DMA_RX_MULTICAST; in hw_setup()
3708 hw->rx_cfg |= (DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP); in hw_setup()
3710 if (hw->all_multi) in hw_setup()
3711 hw->rx_cfg |= DMA_RX_ALL_MULTICAST; in hw_setup()
3712 if (hw->promiscuous) in hw_setup()
3713 hw->rx_cfg |= DMA_RX_PROMISCUOUS; in hw_setup()
3717 * hw_setup_intr - setup interrupt mask
3720 * This routine setup the interrupt mask for proper operation.
3724 hw->intr_mask = KS884X_INT_MASK | KS884X_INT_RX_OVERRUN; in hw_setup_intr()
3731 int alloc = info->alloc; in ksz_check_desc_num()
3748 info->alloc = alloc; in ksz_check_desc_num()
3750 info->mask = info->alloc - 1; in ksz_check_desc_num()
3756 u32 phys = desc_info->ring_phys; in hw_init_desc()
3757 struct ksz_hw_desc *desc = desc_info->ring_virt; in hw_init_desc()
3758 struct ksz_desc *cur = desc_info->ring; in hw_init_desc()
3761 for (i = 0; i < desc_info->alloc; i++) { in hw_init_desc()
3762 cur->phw = desc++; in hw_init_desc()
3763 phys += desc_info->size; in hw_init_desc()
3765 previous->phw->next = cpu_to_le32(phys); in hw_init_desc()
3767 previous->phw->next = cpu_to_le32(desc_info->ring_phys); in hw_init_desc()
3768 previous->sw.buf.rx.end_of_ring = 1; in hw_init_desc()
3769 previous->phw->buf.data = cpu_to_le32(previous->sw.buf.data); in hw_init_desc()
3771 desc_info->avail = desc_info->alloc; in hw_init_desc()
3772 desc_info->last = desc_info->next = 0; in hw_init_desc()
3774 desc_info->cur = desc_info->ring; in hw_init_desc()
3778 * hw_set_desc_base - set descriptor base addresses
3788 writel(tx_addr, hw->io + KS_DMA_TX_ADDR); in hw_set_desc_base()
3789 writel(rx_addr, hw->io + KS_DMA_RX_ADDR); in hw_set_desc_base()
3794 info->cur = info->ring; in hw_reset_pkts()
3795 info->avail = info->alloc; in hw_reset_pkts()
3796 info->last = info->next = 0; in hw_reset_pkts()
3801 writel(DMA_START, hw->io + KS_DMA_RX_START); in hw_resume_rx()
3805 * hw_start_rx - start receiving
3812 writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL); in hw_start_rx()
3815 hw->intr_mask |= KS884X_INT_RX_STOPPED; in hw_start_rx()
3817 writel(DMA_START, hw->io + KS_DMA_RX_START); in hw_start_rx()
3819 hw->rx_stop++; in hw_start_rx()
3822 if (0 == hw->rx_stop) in hw_start_rx()
3823 hw->rx_stop = 2; in hw_start_rx()
3827 * hw_stop_rx - stop receiving
3834 hw->rx_stop = 0; in hw_stop_rx()
3836 writel((hw->rx_cfg & ~DMA_RX_ENABLE), hw->io + KS_DMA_RX_CTRL); in hw_stop_rx()
3840 * hw_start_tx - start transmitting
3847 writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL); in hw_start_tx()
3851 * hw_stop_tx - stop transmitting
3858 writel((hw->tx_cfg & ~DMA_TX_ENABLE), hw->io + KS_DMA_TX_CTRL); in hw_stop_tx()
3862 * hw_disable - disable hardware
3871 hw->enabled = 0; in hw_disable()
3875 * hw_enable - enable hardware
3884 hw->enabled = 1; in hw_enable()
3888 * hw_alloc_pkt - allocate enough descriptors for transmission
3893 * This function allocates descriptors for transmission.
3895 * Return 0 if not successful; 1 for buffer copy; or number of descriptors.
3900 if (hw->tx_desc_info.avail <= 1) in hw_alloc_pkt()
3903 /* Allocate a descriptor for transmission and mark it current. */ in hw_alloc_pkt()
3904 get_tx_pkt(&hw->tx_desc_info, &hw->tx_desc_info.cur); in hw_alloc_pkt()
3905 hw->tx_desc_info.cur->sw.buf.tx.first_seg = 1; in hw_alloc_pkt()
3908 ++hw->tx_int_cnt; in hw_alloc_pkt()
3909 hw->tx_size += length; in hw_alloc_pkt()
3912 if (hw->tx_size >= MAX_TX_HELD_SIZE) in hw_alloc_pkt()
3913 hw->tx_int_cnt = hw->tx_int_mask + 1; in hw_alloc_pkt()
3915 if (physical > hw->tx_desc_info.avail) in hw_alloc_pkt()
3918 return hw->tx_desc_info.avail; in hw_alloc_pkt()
3922 * hw_send_pkt - mark packet for transmission
3925 * This routine marks the packet for transmission in PCI version.
3929 struct ksz_desc *cur = hw->tx_desc_info.cur; in hw_send_pkt()
3931 cur->sw.buf.tx.last_seg = 1; in hw_send_pkt()
3934 if (hw->tx_int_cnt > hw->tx_int_mask) { in hw_send_pkt()
3935 cur->sw.buf.tx.intr = 1; in hw_send_pkt()
3936 hw->tx_int_cnt = 0; in hw_send_pkt()
3937 hw->tx_size = 0; in hw_send_pkt()
3941 cur->sw.buf.tx.dest_port = hw->dst_ports; in hw_send_pkt()
3945 writel(0, hw->io + KS_DMA_TX_START); in hw_send_pkt()
3957 * hw_set_addr - set MAC address
3967 for (i = 0; i < ETH_ALEN; i++) in hw_set_addr()
3968 writeb(hw->override_addr[MAC_ADDR_ORDER(i)], in hw_set_addr()
3969 hw->io + KS884X_ADDR_0_OFFSET + i); in hw_set_addr()
3971 sw_set_addr(hw, hw->override_addr); in hw_set_addr()
3975 * hw_read_addr - read MAC address
3984 for (i = 0; i < ETH_ALEN; i++) in hw_read_addr()
3985 hw->perm_addr[MAC_ADDR_ORDER(i)] = readb(hw->io + in hw_read_addr()
3988 if (!hw->mac_override) { in hw_read_addr()
3989 memcpy(hw->override_addr, hw->perm_addr, ETH_ALEN); in hw_read_addr()
3990 if (empty_addr(hw->override_addr)) { in hw_read_addr()
3991 memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS, ETH_ALEN); in hw_read_addr()
3992 memcpy(hw->override_addr, DEFAULT_MAC_ADDRESS, in hw_read_addr()
3994 hw->override_addr[5] += hw->id; in hw_read_addr()
4007 for (i = 0; i < 2; i++) { in hw_ena_add_addr()
4013 for (i = 2; i < 6; i++) { in hw_ena_add_addr()
4019 writel(mac_addr_lo, hw->io + index + KS_ADD_ADDR_0_LO); in hw_ena_add_addr()
4020 writel(mac_addr_hi, hw->io + index + KS_ADD_ADDR_0_HI); in hw_ena_add_addr()
4027 for (i = 0; i < ADDITIONAL_ENTRIES; i++) { in hw_set_add_addr()
4028 if (empty_addr(hw->address[i])) in hw_set_add_addr()
4029 writel(0, hw->io + ADD_ADDR_INCR * i + in hw_set_add_addr()
4032 hw_ena_add_addr(hw, i, hw->address[i]); in hw_set_add_addr()
4041 if (ether_addr_equal(hw->override_addr, mac_addr)) in hw_add_addr()
4043 for (i = 0; i < hw->addr_list_size; i++) { in hw_add_addr()
4044 if (ether_addr_equal(hw->address[i], mac_addr)) in hw_add_addr()
4046 if (ADDITIONAL_ENTRIES == j && empty_addr(hw->address[i])) in hw_add_addr()
4050 memcpy(hw->address[j], mac_addr, ETH_ALEN); in hw_add_addr()
4051 hw_ena_add_addr(hw, j, hw->address[j]); in hw_add_addr()
4054 return -1; in hw_add_addr()
4061 for (i = 0; i < hw->addr_list_size; i++) { in hw_del_addr()
4062 if (ether_addr_equal(hw->address[i], mac_addr)) { in hw_del_addr()
4063 eth_zero_addr(hw->address[i]); in hw_del_addr()
4064 writel(0, hw->io + ADD_ADDR_INCR * i + in hw_del_addr()
4069 return -1; in hw_del_addr()
4073 * hw_clr_multicast - clear multicast addresses
4082 for (i = 0; i < HW_MULTICAST_SIZE; i++) { in hw_clr_multicast()
4083 hw->multi_bits[i] = 0; in hw_clr_multicast()
4085 writeb(0, hw->io + KS884X_MULTICAST_0_OFFSET + i); in hw_clr_multicast()
4090 * hw_set_grp_addr - set multicast addresses
4093 * This routine programs multicast addresses for the hardware to accept those
4103 memset(hw->multi_bits, 0, sizeof(u8) * HW_MULTICAST_SIZE); in hw_set_grp_addr()
4105 for (i = 0; i < hw->multi_list_size; i++) { in hw_set_grp_addr()
4106 position = (ether_crc(6, hw->multi_list[i]) >> 26) & 0x3f; in hw_set_grp_addr()
4109 hw->multi_bits[index] |= (u8) value; in hw_set_grp_addr()
4112 for (i = 0; i < HW_MULTICAST_SIZE; i++) in hw_set_grp_addr()
4113 writeb(hw->multi_bits[i], hw->io + KS884X_MULTICAST_0_OFFSET + in hw_set_grp_addr()
4118 * hw_set_multicast - enable or disable all multicast receiving
4126 /* Stop receiving for reconfiguration. */ in hw_set_multicast()
4130 hw->rx_cfg |= DMA_RX_ALL_MULTICAST; in hw_set_multicast()
4132 hw->rx_cfg &= ~DMA_RX_ALL_MULTICAST; in hw_set_multicast()
4134 if (hw->enabled) in hw_set_multicast()
4139 * hw_set_promiscuous - enable or disable promiscuous receiving
4147 /* Stop receiving for reconfiguration. */ in hw_set_promiscuous()
4151 hw->rx_cfg |= DMA_RX_PROMISCUOUS; in hw_set_promiscuous()
4153 hw->rx_cfg &= ~DMA_RX_PROMISCUOUS; in hw_set_promiscuous()
4155 if (hw->enabled) in hw_set_promiscuous()
4160 * sw_enable - enable the switch
4170 for (port = 0; port < SWITCH_PORT_NUM; port++) { in sw_enable()
4171 if (hw->dev_count > 1) { in sw_enable()
4172 /* Set port-base vlan membership with host port. */ in sw_enable()
4181 if (hw->dev_count > 1) in sw_enable()
4188 writew(enable, hw->io + KS884X_CHIP_ID_OFFSET); in sw_enable()
4192 * sw_setup - setup the switch
4195 * This routine setup the hardware switch engine for default operation.
4206 for (port = 0; port < SWITCH_PORT_NUM; port++) in sw_setup()
4217 if (hw->features & STP_SUPPORT) in sw_setup()
4221 hw->overrides |= PAUSE_FLOW_CTRL; in sw_setup()
4226 * ksz_start_timer - start kernel timer
4234 info->cnt = 0; in ksz_start_timer()
4235 info->timer.expires = jiffies + time; in ksz_start_timer()
4236 add_timer(&info->timer); in ksz_start_timer()
4239 info->max = -1; in ksz_start_timer()
4243 * ksz_stop_timer - stop kernel timer
4250 if (info->max) { in ksz_stop_timer()
4251 info->max = 0; in ksz_stop_timer()
4252 del_timer_sync(&info->timer); in ksz_stop_timer()
4259 info->max = 0; in ksz_init_timer()
4260 info->period = period; in ksz_init_timer()
4261 timer_setup(&info->timer, function, 0); in ksz_init_timer()
4266 ++info->cnt; in ksz_update_timer()
4267 if (info->max > 0) { in ksz_update_timer()
4268 if (info->cnt < info->max) { in ksz_update_timer()
4269 info->timer.expires = jiffies + info->period; in ksz_update_timer()
4270 add_timer(&info->timer); in ksz_update_timer()
4272 info->max = 0; in ksz_update_timer()
4273 } else if (info->max < 0) { in ksz_update_timer()
4274 info->timer.expires = jiffies + info->period; in ksz_update_timer()
4275 add_timer(&info->timer); in ksz_update_timer()
4280 * ksz_alloc_soft_desc - allocate software descriptors
4282 * @transmit: Indication that descriptors are for transmit.
4284 * This local function allocates software descriptors for manipulation in
4291 desc_info->ring = kcalloc(desc_info->alloc, sizeof(struct ksz_desc), in ksz_alloc_soft_desc()
4293 if (!desc_info->ring) in ksz_alloc_soft_desc()
4300 * ksz_alloc_desc - allocate hardware descriptors
4303 * This local function allocates hardware descriptors for receiving and
4310 struct ksz_hw *hw = &adapter->hw; in ksz_alloc_desc()
4313 /* Allocate memory for RX & TX descriptors. */ in ksz_alloc_desc()
4314 adapter->desc_pool.alloc_size = in ksz_alloc_desc()
4315 hw->rx_desc_info.size * hw->rx_desc_info.alloc + in ksz_alloc_desc()
4316 hw->tx_desc_info.size * hw->tx_desc_info.alloc + in ksz_alloc_desc()
4319 adapter->desc_pool.alloc_virt = in ksz_alloc_desc()
4320 dma_alloc_coherent(&adapter->pdev->dev, in ksz_alloc_desc()
4321 adapter->desc_pool.alloc_size, in ksz_alloc_desc()
4322 &adapter->desc_pool.dma_addr, GFP_KERNEL); in ksz_alloc_desc()
4323 if (adapter->desc_pool.alloc_virt == NULL) { in ksz_alloc_desc()
4324 adapter->desc_pool.alloc_size = 0; in ksz_alloc_desc()
4329 offset = (((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT) ? in ksz_alloc_desc()
4330 (DESC_ALIGNMENT - in ksz_alloc_desc()
4331 ((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT)) : 0); in ksz_alloc_desc()
4332 adapter->desc_pool.virt = adapter->desc_pool.alloc_virt + offset; in ksz_alloc_desc()
4333 adapter->desc_pool.phys = adapter->desc_pool.dma_addr + offset; in ksz_alloc_desc()
4336 hw->rx_desc_info.ring_virt = (struct ksz_hw_desc *) in ksz_alloc_desc()
4337 adapter->desc_pool.virt; in ksz_alloc_desc()
4338 hw->rx_desc_info.ring_phys = adapter->desc_pool.phys; in ksz_alloc_desc()
4339 offset = hw->rx_desc_info.alloc * hw->rx_desc_info.size; in ksz_alloc_desc()
4340 hw->tx_desc_info.ring_virt = (struct ksz_hw_desc *) in ksz_alloc_desc()
4341 (adapter->desc_pool.virt + offset); in ksz_alloc_desc()
4342 hw->tx_desc_info.ring_phys = adapter->desc_pool.phys + offset; in ksz_alloc_desc()
4344 if (ksz_alloc_soft_desc(&hw->rx_desc_info, 0)) in ksz_alloc_desc()
4346 if (ksz_alloc_soft_desc(&hw->tx_desc_info, 1)) in ksz_alloc_desc()
4353 * free_dma_buf - release DMA buffer resources
4363 dma_unmap_single(&adapter->pdev->dev, dma_buf->dma, dma_buf->len, in free_dma_buf()
4365 dev_kfree_skb(dma_buf->skb); in free_dma_buf()
4366 dma_buf->skb = NULL; in free_dma_buf()
4367 dma_buf->dma = 0; in free_dma_buf()
4371 * ksz_init_rx_buffers - initialize receive descriptors
4374 * This routine initializes DMA buffers for receiving.
4381 struct ksz_hw *hw = &adapter->hw; in ksz_init_rx_buffers()
4382 struct ksz_desc_info *info = &hw->rx_desc_info; in ksz_init_rx_buffers()
4384 for (i = 0; i < hw->rx_desc_info.alloc; i++) { in ksz_init_rx_buffers()
4388 if (dma_buf->skb && dma_buf->len != adapter->mtu) in ksz_init_rx_buffers()
4390 dma_buf->len = adapter->mtu; in ksz_init_rx_buffers()
4391 if (!dma_buf->skb) in ksz_init_rx_buffers()
4392 dma_buf->skb = alloc_skb(dma_buf->len, GFP_ATOMIC); in ksz_init_rx_buffers()
4393 if (dma_buf->skb && !dma_buf->dma) in ksz_init_rx_buffers()
4394 dma_buf->dma = dma_map_single(&adapter->pdev->dev, in ksz_init_rx_buffers()
4395 skb_tail_pointer(dma_buf->skb), in ksz_init_rx_buffers()
4396 dma_buf->len, in ksz_init_rx_buffers()
4400 set_rx_buf(desc, dma_buf->dma); in ksz_init_rx_buffers()
4401 set_rx_len(desc, dma_buf->len); in ksz_init_rx_buffers()
4407 * ksz_alloc_mem - allocate memory for hardware descriptors
4410 * This function allocates memory for use by hardware descriptors for receiving
4417 struct ksz_hw *hw = &adapter->hw; in ksz_alloc_mem()
4420 hw->rx_desc_info.alloc = NUM_OF_RX_DESC; in ksz_alloc_mem()
4421 hw->tx_desc_info.alloc = NUM_OF_TX_DESC; in ksz_alloc_mem()
4424 hw->tx_int_cnt = 0; in ksz_alloc_mem()
4425 hw->tx_int_mask = NUM_OF_TX_DESC / 4; in ksz_alloc_mem()
4426 if (hw->tx_int_mask > 8) in ksz_alloc_mem()
4427 hw->tx_int_mask = 8; in ksz_alloc_mem()
4428 while (hw->tx_int_mask) { in ksz_alloc_mem()
4429 hw->tx_int_cnt++; in ksz_alloc_mem()
4430 hw->tx_int_mask >>= 1; in ksz_alloc_mem()
4432 if (hw->tx_int_cnt) { in ksz_alloc_mem()
4433 hw->tx_int_mask = (1 << (hw->tx_int_cnt - 1)) - 1; in ksz_alloc_mem()
4434 hw->tx_int_cnt = 0; in ksz_alloc_mem()
4438 hw->rx_desc_info.size = in ksz_alloc_mem()
4439 (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) / in ksz_alloc_mem()
4441 hw->tx_desc_info.size = in ksz_alloc_mem()
4442 (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) / in ksz_alloc_mem()
4444 if (hw->rx_desc_info.size != sizeof(struct ksz_hw_desc)) in ksz_alloc_mem()
4446 ksz_check_desc_num(&hw->rx_desc_info); in ksz_alloc_mem()
4447 ksz_check_desc_num(&hw->tx_desc_info); in ksz_alloc_mem()
4457 * ksz_free_desc - free software and hardware descriptors
4465 struct ksz_hw *hw = &adapter->hw; in ksz_free_desc()
4468 hw->rx_desc_info.ring_virt = NULL; in ksz_free_desc()
4469 hw->tx_desc_info.ring_virt = NULL; in ksz_free_desc()
4470 hw->rx_desc_info.ring_phys = 0; in ksz_free_desc()
4471 hw->tx_desc_info.ring_phys = 0; in ksz_free_desc()
4474 if (adapter->desc_pool.alloc_virt) in ksz_free_desc()
4475 dma_free_coherent(&adapter->pdev->dev, in ksz_free_desc()
4476 adapter->desc_pool.alloc_size, in ksz_free_desc()
4477 adapter->desc_pool.alloc_virt, in ksz_free_desc()
4478 adapter->desc_pool.dma_addr); in ksz_free_desc()
4481 adapter->desc_pool.alloc_size = 0; in ksz_free_desc()
4482 adapter->desc_pool.alloc_virt = NULL; in ksz_free_desc()
4484 kfree(hw->rx_desc_info.ring); in ksz_free_desc()
4485 hw->rx_desc_info.ring = NULL; in ksz_free_desc()
4486 kfree(hw->tx_desc_info.ring); in ksz_free_desc()
4487 hw->tx_desc_info.ring = NULL; in ksz_free_desc()
4491 * ksz_free_buffers - free buffers used in the descriptors
4503 struct ksz_desc *desc = desc_info->ring; in ksz_free_buffers()
4505 for (i = 0; i < desc_info->alloc; i++) { in ksz_free_buffers()
4507 if (dma_buf->skb) in ksz_free_buffers()
4514 * ksz_free_mem - free all resources used by descriptors
4522 ksz_free_buffers(adapter, &adapter->hw.tx_desc_info, DMA_TO_DEVICE); in ksz_free_mem()
4525 ksz_free_buffers(adapter, &adapter->hw.rx_desc_info, DMA_FROM_DEVICE); in ksz_free_mem()
4540 for (i = 0, port = first; i < cnt; i++, port++) { in get_mib_counters()
4541 port_mib = &hw->port_mib[port]; in get_mib_counters()
4542 for (mib = port_mib->mib_start; mib < hw->mib_cnt; mib++) in get_mib_counters()
4543 counter[mib] += port_mib->counter[mib]; in get_mib_counters()
4548 * send_packet - send packet
4559 struct dev_info *hw_priv = priv->adapter; in send_packet()
4560 struct ksz_hw *hw = &hw_priv->hw; in send_packet()
4561 struct ksz_desc_info *info = &hw->tx_desc_info; in send_packet()
4564 int last_frag = skb_shinfo(skb)->nr_frags; in send_packet()
4570 if (hw->dev_count > 1) in send_packet()
4571 hw->dst_ports = 1 << priv->port.first_port; in send_packet()
4574 len = skb->len; in send_packet()
4577 first = info->cur; in send_packet()
4585 dma_buf->len = skb_headlen(skb); in send_packet()
4587 dma_buf->dma = dma_map_single(&hw_priv->pdev->dev, skb->data, in send_packet()
4588 dma_buf->len, DMA_TO_DEVICE); in send_packet()
4589 set_tx_buf(desc, dma_buf->dma); in send_packet()
4590 set_tx_len(desc, dma_buf->len); in send_packet()
4594 this_frag = &skb_shinfo(skb)->frags[frag]; in send_packet()
4600 ++hw->tx_int_cnt; in send_packet()
4603 dma_buf->len = skb_frag_size(this_frag); in send_packet()
4605 dma_buf->dma = dma_map_single(&hw_priv->pdev->dev, in send_packet()
4607 dma_buf->len, in send_packet()
4609 set_tx_buf(desc, dma_buf->dma); in send_packet()
4610 set_tx_len(desc, dma_buf->len); in send_packet()
4621 info->cur = desc; in send_packet()
4626 dma_buf->len = len; in send_packet()
4628 dma_buf->dma = dma_map_single(&hw_priv->pdev->dev, skb->data, in send_packet()
4629 dma_buf->len, DMA_TO_DEVICE); in send_packet()
4630 set_tx_buf(desc, dma_buf->dma); in send_packet()
4631 set_tx_len(desc, dma_buf->len); in send_packet()
4634 if (skb->ip_summed == CHECKSUM_PARTIAL) { in send_packet()
4635 (desc)->sw.buf.tx.csum_gen_tcp = 1; in send_packet()
4636 (desc)->sw.buf.tx.csum_gen_udp = 1; in send_packet()
4643 dma_buf->skb = skb; in send_packet()
4648 dev->stats.tx_packets++; in send_packet()
4649 dev->stats.tx_bytes += len; in send_packet()
4653 * transmit_cleanup - clean up transmit descriptors
4663 struct ksz_hw *hw = &hw_priv->hw; in transmit_cleanup()
4664 struct ksz_desc_info *info = &hw->tx_desc_info; in transmit_cleanup()
4669 spin_lock_irq(&hw_priv->hwlock); in transmit_cleanup()
4670 last = info->last; in transmit_cleanup()
4672 while (info->avail < info->alloc) { in transmit_cleanup()
4674 desc = &info->ring[last]; in transmit_cleanup()
4675 status.data = le32_to_cpu(desc->phw->ctrl.data); in transmit_cleanup()
4684 dma_unmap_single(&hw_priv->pdev->dev, dma_buf->dma, in transmit_cleanup()
4685 dma_buf->len, DMA_TO_DEVICE); in transmit_cleanup()
4688 if (dma_buf->skb) { in transmit_cleanup()
4689 dev = dma_buf->skb->dev; in transmit_cleanup()
4692 dev_kfree_skb_irq(dma_buf->skb); in transmit_cleanup()
4693 dma_buf->skb = NULL; in transmit_cleanup()
4698 last &= info->mask; in transmit_cleanup()
4699 info->avail++; in transmit_cleanup()
4701 info->last = last; in transmit_cleanup()
4702 spin_unlock_irq(&hw_priv->hwlock); in transmit_cleanup()
4710 * tx_done - transmit done processing
4718 struct ksz_hw *hw = &hw_priv->hw; in tx_done()
4723 for (port = 0; port < hw->dev_count; port++) { in tx_done()
4724 struct net_device *dev = hw->port_info[port].pdev; in tx_done()
4733 skb->dev = old->dev; in copy_old_skb()
4734 skb->protocol = old->protocol; in copy_old_skb()
4735 skb->ip_summed = old->ip_summed; in copy_old_skb()
4736 skb->csum = old->csum; in copy_old_skb()
4743 * netdev_tx - send out packet
4754 struct dev_info *hw_priv = priv->adapter; in netdev_tx()
4755 struct ksz_hw *hw = &hw_priv->hw; in netdev_tx()
4760 if (hw->features & SMALL_PACKET_TX_BUG) { in netdev_tx()
4763 if (skb->len <= 48) { in netdev_tx()
4764 if (skb_end_pointer(skb) - skb->data >= 50) { in netdev_tx()
4765 memset(&skb->data[skb->len], 0, 50 - skb->len); in netdev_tx()
4766 skb->len = 50; in netdev_tx()
4771 memcpy(skb->data, org_skb->data, org_skb->len); in netdev_tx()
4772 memset(&skb->data[org_skb->len], 0, in netdev_tx()
4773 50 - org_skb->len); in netdev_tx()
4774 skb->len = 50; in netdev_tx()
4780 spin_lock_irq(&hw_priv->hwlock); in netdev_tx()
4782 num = skb_shinfo(skb)->nr_frags + 1; in netdev_tx()
4783 left = hw_alloc_pkt(hw, skb->len, num); in netdev_tx()
4786 (CHECKSUM_PARTIAL == skb->ip_summed && in netdev_tx()
4787 skb->protocol == htons(ETH_P_IPV6))) { in netdev_tx()
4790 skb = netdev_alloc_skb(dev, org_skb->len); in netdev_tx()
4795 skb_copy_and_csum_dev(org_skb, skb->data); in netdev_tx()
4796 org_skb->ip_summed = CHECKSUM_NONE; in netdev_tx()
4797 skb->len = org_skb->len; in netdev_tx()
4809 spin_unlock_irq(&hw_priv->hwlock); in netdev_tx()
4815 * netdev_tx_timeout - transmit timeout processing
4829 struct dev_info *hw_priv = priv->adapter; in netdev_tx_timeout()
4830 struct ksz_hw *hw = &hw_priv->hw; in netdev_tx_timeout()
4833 if (hw->dev_count > 1) { in netdev_tx_timeout()
4838 if (time_before_eq(jiffies, last_reset + dev->watchdog_timeo)) in netdev_tx_timeout()
4848 hw_reset_pkts(&hw->rx_desc_info); in netdev_tx_timeout()
4849 hw_reset_pkts(&hw->tx_desc_info); in netdev_tx_timeout()
4855 hw->tx_desc_info.ring_phys, in netdev_tx_timeout()
4856 hw->rx_desc_info.ring_phys); in netdev_tx_timeout()
4858 if (hw->all_multi) in netdev_tx_timeout()
4859 hw_set_multicast(hw, hw->all_multi); in netdev_tx_timeout()
4860 else if (hw->multi_list_size) in netdev_tx_timeout()
4863 if (hw->dev_count > 1) { in netdev_tx_timeout()
4865 for (port = 0; port < SWITCH_PORT_NUM; port++) { in netdev_tx_timeout()
4871 port_dev = hw->port_info[port].pdev; in netdev_tx_timeout()
4891 protocol = skb->protocol; in csum_verified()
4895 protocol = iph->tot_len; in csum_verified()
4900 if (iph->protocol == IPPROTO_TCP) in csum_verified()
4901 skb->ip_summed = CHECKSUM_UNNECESSARY; in csum_verified()
4910 struct dev_info *hw_priv = priv->adapter; in rx_proc()
4914 /* Received length includes 4-byte CRC. */ in rx_proc()
4915 packet_len = status.rx.frame_len - 4; in rx_proc()
4918 dma_sync_single_for_cpu(&hw_priv->pdev->dev, dma_buf->dma, in rx_proc()
4922 /* skb->data != skb->head */ in rx_proc()
4925 dev->stats.rx_dropped++; in rx_proc()
4926 return -ENOMEM; in rx_proc()
4930 * Align socket buffer in 4-byte boundary for better in rx_proc()
4935 skb_put_data(skb, dma_buf->skb->data, packet_len); in rx_proc()
4938 skb->protocol = eth_type_trans(skb, dev); in rx_proc()
4940 if (hw->rx_cfg & (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP)) in rx_proc()
4944 dev->stats.rx_packets++; in rx_proc()
4945 dev->stats.rx_bytes += packet_len; in rx_proc()
4947 /* Notify upper layer for received packet. */ in rx_proc()
4957 struct ksz_hw *hw = &hw_priv->hw; in dev_rcv_packets()
4958 struct net_device *dev = hw->port_info[0].pdev; in dev_rcv_packets()
4959 struct ksz_desc_info *info = &hw->rx_desc_info; in dev_rcv_packets()
4960 int left = info->alloc; in dev_rcv_packets()
4964 next = info->next; in dev_rcv_packets()
4965 while (left--) { in dev_rcv_packets()
4967 desc = &info->ring[next]; in dev_rcv_packets()
4968 status.data = le32_to_cpu(desc->phw->ctrl.data); in dev_rcv_packets()
4982 next &= info->mask; in dev_rcv_packets()
4984 info->next = next; in dev_rcv_packets()
4993 struct ksz_hw *hw = &hw_priv->hw; in port_rcv_packets()
4994 struct net_device *dev = hw->port_info[0].pdev; in port_rcv_packets()
4995 struct ksz_desc_info *info = &hw->rx_desc_info; in port_rcv_packets()
4996 int left = info->alloc; in port_rcv_packets()
5000 next = info->next; in port_rcv_packets()
5001 while (left--) { in port_rcv_packets()
5003 desc = &info->ring[next]; in port_rcv_packets()
5004 status.data = le32_to_cpu(desc->phw->ctrl.data); in port_rcv_packets()
5008 if (hw->dev_count > 1) { in port_rcv_packets()
5012 dev = hw->port_info[p].pdev; in port_rcv_packets()
5027 next &= info->mask; in port_rcv_packets()
5029 info->next = next; in port_rcv_packets()
5038 struct ksz_hw *hw = &hw_priv->hw; in dev_rcv_special()
5039 struct net_device *dev = hw->port_info[0].pdev; in dev_rcv_special()
5040 struct ksz_desc_info *info = &hw->rx_desc_info; in dev_rcv_special()
5041 int left = info->alloc; in dev_rcv_special()
5045 next = info->next; in dev_rcv_special()
5046 while (left--) { in dev_rcv_special()
5048 desc = &info->ring[next]; in dev_rcv_special()
5049 status.data = le32_to_cpu(desc->phw->ctrl.data); in dev_rcv_special()
5053 if (hw->dev_count > 1) { in dev_rcv_special()
5057 dev = hw->port_info[p].pdev; in dev_rcv_special()
5067 * dropped, so no need to check the error bit. in dev_rcv_special()
5079 priv->port.counter[OID_COUNTER_RCV_ERROR]++; in dev_rcv_special()
5086 next &= info->mask; in dev_rcv_special()
5088 info->next = next; in dev_rcv_special()
5096 struct ksz_hw *hw = &hw_priv->hw; in rx_proc_task()
5098 if (!hw->enabled) in rx_proc_task()
5100 if (unlikely(!hw_priv->dev_rcv(hw_priv))) { in rx_proc_task()
5106 spin_lock_irq(&hw_priv->hwlock); in rx_proc_task()
5108 spin_unlock_irq(&hw_priv->hwlock); in rx_proc_task()
5111 tasklet_schedule(&hw_priv->rx_tasklet); in rx_proc_task()
5118 struct ksz_hw *hw = &hw_priv->hw; in tx_proc_task()
5125 spin_lock_irq(&hw_priv->hwlock); in tx_proc_task()
5127 spin_unlock_irq(&hw_priv->hwlock); in tx_proc_task()
5133 if (0 == hw->rx_stop) in handle_rx_stop()
5134 hw->intr_mask &= ~KS884X_INT_RX_STOPPED; in handle_rx_stop()
5135 else if (hw->rx_stop > 1) { in handle_rx_stop()
5136 if (hw->enabled && (hw->rx_cfg & DMA_RX_ENABLE)) { in handle_rx_stop()
5139 hw->intr_mask &= ~KS884X_INT_RX_STOPPED; in handle_rx_stop()
5140 hw->rx_stop = 0; in handle_rx_stop()
5144 hw->rx_stop++; in handle_rx_stop()
5148 * netdev_intr - interrupt handling
5161 struct dev_info *hw_priv = priv->adapter; in netdev_intr()
5162 struct ksz_hw *hw = &hw_priv->hw; in netdev_intr()
5164 spin_lock(&hw_priv->hwlock); in netdev_intr()
5170 spin_unlock(&hw_priv->hwlock); in netdev_intr()
5176 int_enable &= hw->intr_mask; in netdev_intr()
5180 tasklet_schedule(&hw_priv->tx_tasklet); in netdev_intr()
5185 tasklet_schedule(&hw_priv->rx_tasklet); in netdev_intr()
5189 dev->stats.rx_fifo_errors++; in netdev_intr()
5194 struct ksz_port *port = &priv->port; in netdev_intr()
5196 hw->features |= LINK_INT_WORKING; in netdev_intr()
5208 hw->intr_mask &= ~KS884X_INT_TX_STOPPED; in netdev_intr()
5210 data = readl(hw->io + KS_DMA_TX_CTRL); in netdev_intr()
5219 spin_unlock(&hw_priv->hwlock); in netdev_intr()
5233 struct dev_info *hw_priv = priv->adapter; in netdev_netpoll()
5235 hw_dis_intr(&hw_priv->hw); in netdev_netpoll()
5236 netdev_intr(dev->irq, dev); in netdev_netpoll()
5244 struct ksz_switch *sw = hw->ksz_switch; in bridge_change()
5247 if (!sw->member) { in bridge_change()
5251 for (port = 0; port < SWITCH_PORT_NUM; port++) { in bridge_change()
5252 if (STP_STATE_FORWARDING == sw->port_cfg[port].stp_state) in bridge_change()
5253 member = HOST_MASK | sw->member; in bridge_change()
5256 if (member != sw->port_cfg[port].member) in bridge_change()
5262 * netdev_close - close network device
5273 struct dev_info *hw_priv = priv->adapter; in netdev_close()
5274 struct ksz_port *port = &priv->port; in netdev_close()
5275 struct ksz_hw *hw = &hw_priv->hw; in netdev_close()
5280 ksz_stop_timer(&priv->monitor_timer_info); in netdev_close()
5282 /* Need to shut the port manually in multiple device interfaces mode. */ in netdev_close()
5283 if (hw->dev_count > 1) { in netdev_close()
5284 port_set_stp_state(hw, port->first_port, STP_STATE_DISABLED); in netdev_close()
5286 /* Port is closed. Need to change bridge setting. */ in netdev_close()
5287 if (hw->features & STP_SUPPORT) { in netdev_close()
5288 pi = 1 << port->first_port; in netdev_close()
5289 if (hw->ksz_switch->member & pi) { in netdev_close()
5290 hw->ksz_switch->member &= ~pi; in netdev_close()
5295 if (port->first_port > 0) in netdev_close()
5296 hw_del_addr(hw, dev->dev_addr); in netdev_close()
5297 if (!hw_priv->wol_enable) in netdev_close()
5300 if (priv->multicast) in netdev_close()
5301 --hw->all_multi; in netdev_close()
5302 if (priv->promiscuous) in netdev_close()
5303 --hw->promiscuous; in netdev_close()
5305 hw_priv->opened--; in netdev_close()
5306 if (!(hw_priv->opened)) { in netdev_close()
5307 ksz_stop_timer(&hw_priv->mib_timer_info); in netdev_close()
5308 flush_work(&hw_priv->mib_read); in netdev_close()
5314 /* Delay for receive task to stop scheduling itself. */ in netdev_close()
5317 tasklet_kill(&hw_priv->rx_tasklet); in netdev_close()
5318 tasklet_kill(&hw_priv->tx_tasklet); in netdev_close()
5319 free_irq(dev->irq, hw_priv->dev); in netdev_close()
5322 hw_reset_pkts(&hw->rx_desc_info); in netdev_close()
5323 hw_reset_pkts(&hw->tx_desc_info); in netdev_close()
5326 if (hw->features & STP_SUPPORT) in netdev_close()
5335 if (hw->ksz_switch) { in hw_cfg_huge_frame()
5338 data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET); in hw_cfg_huge_frame()
5339 if (hw->features & RX_HUGE_FRAME) in hw_cfg_huge_frame()
5343 writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET); in hw_cfg_huge_frame()
5345 if (hw->features & RX_HUGE_FRAME) { in hw_cfg_huge_frame()
5346 hw->rx_cfg |= DMA_RX_ERROR; in hw_cfg_huge_frame()
5347 hw_priv->dev_rcv = dev_rcv_special; in hw_cfg_huge_frame()
5349 hw->rx_cfg &= ~DMA_RX_ERROR; in hw_cfg_huge_frame()
5350 if (hw->dev_count > 1) in hw_cfg_huge_frame()
5351 hw_priv->dev_rcv = port_rcv_packets; in hw_cfg_huge_frame()
5353 hw_priv->dev_rcv = dev_rcv_packets; in hw_cfg_huge_frame()
5360 struct dev_info *hw_priv = priv->adapter; in prepare_hardware()
5361 struct ksz_hw *hw = &hw_priv->hw; in prepare_hardware()
5365 hw_priv->dev = dev; in prepare_hardware()
5366 rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev); in prepare_hardware()
5369 tasklet_setup(&hw_priv->rx_tasklet, rx_proc_task); in prepare_hardware()
5370 tasklet_setup(&hw_priv->tx_tasklet, tx_proc_task); in prepare_hardware()
5372 hw->promiscuous = 0; in prepare_hardware()
5373 hw->all_multi = 0; in prepare_hardware()
5374 hw->multi_list_size = 0; in prepare_hardware()
5379 hw->tx_desc_info.ring_phys, hw->rx_desc_info.ring_phys); in prepare_hardware()
5390 if (media_state == priv->media_state) in set_media_state()
5395 media_state == priv->media_state ? "on" : "off"); in set_media_state()
5399 * netdev_open - open network device
5410 struct dev_info *hw_priv = priv->adapter; in netdev_open()
5411 struct ksz_hw *hw = &hw_priv->hw; in netdev_open()
5412 struct ksz_port *port = &priv->port; in netdev_open()
5419 priv->multicast = 0; in netdev_open()
5420 priv->promiscuous = 0; in netdev_open()
5423 memset(&dev->stats, 0, sizeof(struct net_device_stats)); in netdev_open()
5424 memset((void *) port->counter, 0, in netdev_open()
5427 if (!(hw_priv->opened)) { in netdev_open()
5431 for (i = 0; i < hw->mib_port_cnt; i++) { in netdev_open()
5433 hw_priv->counter[i].time = next_jiffies; in netdev_open()
5434 hw->port_mib[i].state = media_disconnected; in netdev_open()
5437 if (hw->ksz_switch) in netdev_open()
5438 hw->port_mib[HOST_PORT].state = media_connected; in netdev_open()
5442 hw_clr_wol_pme_status(&hw_priv->hw); in netdev_open()
5447 for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) { in netdev_open()
5452 hw->port_info[p].partner = 0xFF; in netdev_open()
5453 hw->port_info[p].state = media_disconnected; in netdev_open()
5456 /* Need to open the port in multiple device interfaces mode. */ in netdev_open()
5457 if (hw->dev_count > 1) { in netdev_open()
5458 port_set_stp_state(hw, port->first_port, STP_STATE_SIMPLE); in netdev_open()
5459 if (port->first_port > 0) in netdev_open()
5460 hw_add_addr(hw, dev->dev_addr); in netdev_open()
5464 if (port->force_link) in netdev_open()
5469 if (!(hw_priv->opened)) { in netdev_open()
5474 if (hw->mib_port_cnt) in netdev_open()
5475 ksz_start_timer(&hw_priv->mib_timer_info, in netdev_open()
5476 hw_priv->mib_timer_info.period); in netdev_open()
5479 hw_priv->opened++; in netdev_open()
5481 ksz_start_timer(&priv->monitor_timer_info, in netdev_open()
5482 priv->monitor_timer_info.period); in netdev_open()
5484 priv->media_state = port->linked->state; in netdev_open()
5503 * netdev_query_statistics - query network device statistics
5514 struct ksz_port *port = &priv->port; in netdev_query_statistics()
5515 struct ksz_hw *hw = &priv->adapter->hw; in netdev_query_statistics()
5520 dev->stats.rx_errors = port->counter[OID_COUNTER_RCV_ERROR]; in netdev_query_statistics()
5521 dev->stats.tx_errors = port->counter[OID_COUNTER_XMIT_ERROR]; in netdev_query_statistics()
5524 dev->stats.multicast = 0; in netdev_query_statistics()
5525 dev->stats.collisions = 0; in netdev_query_statistics()
5526 dev->stats.rx_length_errors = 0; in netdev_query_statistics()
5527 dev->stats.rx_crc_errors = 0; in netdev_query_statistics()
5528 dev->stats.rx_frame_errors = 0; in netdev_query_statistics()
5529 dev->stats.tx_window_errors = 0; in netdev_query_statistics()
5531 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) { in netdev_query_statistics()
5532 mib = &hw->port_mib[p]; in netdev_query_statistics()
5534 dev->stats.multicast += (unsigned long) in netdev_query_statistics()
5535 mib->counter[MIB_COUNTER_RX_MULTICAST]; in netdev_query_statistics()
5537 dev->stats.collisions += (unsigned long) in netdev_query_statistics()
5538 mib->counter[MIB_COUNTER_TX_TOTAL_COLLISION]; in netdev_query_statistics()
5540 dev->stats.rx_length_errors += (unsigned long)( in netdev_query_statistics()
5541 mib->counter[MIB_COUNTER_RX_UNDERSIZE] + in netdev_query_statistics()
5542 mib->counter[MIB_COUNTER_RX_FRAGMENT] + in netdev_query_statistics()
5543 mib->counter[MIB_COUNTER_RX_OVERSIZE] + in netdev_query_statistics()
5544 mib->counter[MIB_COUNTER_RX_JABBER]); in netdev_query_statistics()
5545 dev->stats.rx_crc_errors += (unsigned long) in netdev_query_statistics()
5546 mib->counter[MIB_COUNTER_RX_CRC_ERR]; in netdev_query_statistics()
5547 dev->stats.rx_frame_errors += (unsigned long)( in netdev_query_statistics()
5548 mib->counter[MIB_COUNTER_RX_ALIGNMENT_ERR] + in netdev_query_statistics()
5549 mib->counter[MIB_COUNTER_RX_SYMBOL_ERR]); in netdev_query_statistics()
5551 dev->stats.tx_window_errors += (unsigned long) in netdev_query_statistics()
5552 mib->counter[MIB_COUNTER_TX_LATE_COLLISION]; in netdev_query_statistics()
5555 return &dev->stats; in netdev_query_statistics()
5559 * netdev_set_mac_address - set network device MAC address
5570 struct dev_info *hw_priv = priv->adapter; in netdev_set_mac_address()
5571 struct ksz_hw *hw = &hw_priv->hw; in netdev_set_mac_address()
5575 if (priv->port.first_port > 0) in netdev_set_mac_address()
5576 hw_del_addr(hw, dev->dev_addr); in netdev_set_mac_address()
5578 hw->mac_override = 1; in netdev_set_mac_address()
5579 memcpy(hw->override_addr, mac->sa_data, ETH_ALEN); in netdev_set_mac_address()
5582 eth_hw_addr_set(dev, mac->sa_data); in netdev_set_mac_address()
5586 if (priv->port.first_port > 0) in netdev_set_mac_address()
5587 hw_add_addr(hw, dev->dev_addr); in netdev_set_mac_address()
5598 if (promiscuous != priv->promiscuous) { in dev_set_promiscuous()
5599 u8 prev_state = hw->promiscuous; in dev_set_promiscuous()
5602 ++hw->promiscuous; in dev_set_promiscuous()
5604 --hw->promiscuous; in dev_set_promiscuous()
5605 priv->promiscuous = promiscuous; in dev_set_promiscuous()
5608 if (hw->promiscuous <= 1 && prev_state <= 1) in dev_set_promiscuous()
5609 hw_set_promiscuous(hw, hw->promiscuous); in dev_set_promiscuous()
5615 if ((hw->features & STP_SUPPORT) && !promiscuous && in dev_set_promiscuous()
5617 struct ksz_switch *sw = hw->ksz_switch; in dev_set_promiscuous()
5618 int port = priv->port.first_port; in dev_set_promiscuous()
5622 if (sw->member & port) { in dev_set_promiscuous()
5623 sw->member &= ~port; in dev_set_promiscuous()
5633 if (multicast != priv->multicast) { in dev_set_multicast()
5634 u8 all_multi = hw->all_multi; in dev_set_multicast()
5637 ++hw->all_multi; in dev_set_multicast()
5639 --hw->all_multi; in dev_set_multicast()
5640 priv->multicast = multicast; in dev_set_multicast()
5643 if (hw->all_multi <= 1 && all_multi <= 1) in dev_set_multicast()
5644 hw_set_multicast(hw, hw->all_multi); in dev_set_multicast()
5658 struct dev_info *hw_priv = priv->adapter; in netdev_set_rx_mode()
5659 struct ksz_hw *hw = &hw_priv->hw; in netdev_set_rx_mode()
5661 int multicast = (dev->flags & IFF_ALLMULTI); in netdev_set_rx_mode()
5663 dev_set_promiscuous(dev, priv, hw, (dev->flags & IFF_PROMISC)); in netdev_set_rx_mode()
5665 if (hw_priv->hw.dev_count > 1) in netdev_set_rx_mode()
5666 multicast |= (dev->flags & IFF_MULTICAST); in netdev_set_rx_mode()
5670 if (hw_priv->hw.dev_count > 1) in netdev_set_rx_mode()
5673 if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) { in netdev_set_rx_mode()
5678 if (MAX_MULTICAST_LIST != hw->multi_list_size) { in netdev_set_rx_mode()
5679 hw->multi_list_size = MAX_MULTICAST_LIST; in netdev_set_rx_mode()
5680 ++hw->all_multi; in netdev_set_rx_mode()
5681 hw_set_multicast(hw, hw->all_multi); in netdev_set_rx_mode()
5689 memcpy(hw->multi_list[i++], ha->addr, ETH_ALEN); in netdev_set_rx_mode()
5691 hw->multi_list_size = (u8) i; in netdev_set_rx_mode()
5694 if (MAX_MULTICAST_LIST == hw->multi_list_size) { in netdev_set_rx_mode()
5695 --hw->all_multi; in netdev_set_rx_mode()
5696 hw_set_multicast(hw, hw->all_multi); in netdev_set_rx_mode()
5698 hw->multi_list_size = 0; in netdev_set_rx_mode()
5706 struct dev_info *hw_priv = priv->adapter; in netdev_change_mtu()
5707 struct ksz_hw *hw = &hw_priv->hw; in netdev_change_mtu()
5711 return -EBUSY; in netdev_change_mtu()
5714 if (hw->dev_count > 1) in netdev_change_mtu()
5715 if (dev != hw_priv->dev) in netdev_change_mtu()
5720 hw->features |= RX_HUGE_FRAME; in netdev_change_mtu()
5723 hw->features &= ~RX_HUGE_FRAME; in netdev_change_mtu()
5727 hw_priv->mtu = hw_mtu; in netdev_change_mtu()
5728 dev->mtu = new_mtu; in netdev_change_mtu()
5734 * netdev_ioctl - I/O control processing
5746 struct dev_info *hw_priv = priv->adapter; in netdev_ioctl()
5747 struct ksz_hw *hw = &hw_priv->hw; in netdev_ioctl()
5748 struct ksz_port *port = &priv->port; in netdev_ioctl()
5752 if (down_interruptible(&priv->proc_sem)) in netdev_ioctl()
5753 return -ERESTARTSYS; in netdev_ioctl()
5756 /* Get address of MII PHY in use. */ in netdev_ioctl()
5758 data->phy_id = priv->id; in netdev_ioctl()
5761 /* Read MII PHY register. */ in netdev_ioctl()
5763 if (data->phy_id != priv->id || data->reg_num >= 6) in netdev_ioctl()
5764 result = -EIO; in netdev_ioctl()
5766 hw_r_phy(hw, port->linked->port_id, data->reg_num, in netdev_ioctl()
5767 &data->val_out); in netdev_ioctl()
5770 /* Write MII PHY register. */ in netdev_ioctl()
5773 result = -EPERM; in netdev_ioctl()
5774 else if (data->phy_id != priv->id || data->reg_num >= 6) in netdev_ioctl()
5775 result = -EIO; in netdev_ioctl()
5777 hw_w_phy(hw, port->linked->port_id, data->reg_num, in netdev_ioctl()
5778 data->val_in); in netdev_ioctl()
5782 result = -EOPNOTSUPP; in netdev_ioctl()
5785 up(&priv->proc_sem); in netdev_ioctl()
5795 * mdio_read - read PHY register
5797 * @phy_id: The PHY id.
5800 * This function returns the PHY register value.
5807 struct ksz_port *port = &priv->port; in mdio_read()
5808 struct ksz_hw *hw = port->hw; in mdio_read()
5811 hw_r_phy(hw, port->linked->port_id, reg_num << 1, &val_out); in mdio_read()
5816 * mdio_write - set PHY register
5818 * @phy_id: The PHY id.
5822 * This procedure sets the PHY register value.
5827 struct ksz_port *port = &priv->port; in mdio_write()
5828 struct ksz_hw *hw = port->hw; in mdio_write()
5832 for (i = 0, pi = port->first_port; i < port->port_cnt; i++, pi++) in mdio_write()
5853 * netdev_get_link_ksettings - get network device settings
5857 * This function queries the PHY and returns its state in the ethtool command.
5865 struct dev_info *hw_priv = priv->adapter; in netdev_get_link_ksettings()
5867 mutex_lock(&hw_priv->lock); in netdev_get_link_ksettings()
5868 mii_ethtool_get_link_ksettings(&priv->mii_if, cmd); in netdev_get_link_ksettings()
5870 mutex_unlock(&hw_priv->lock); in netdev_get_link_ksettings()
5872 /* Save advertised settings for workaround in next function. */ in netdev_get_link_ksettings()
5873 ethtool_convert_link_mode_to_legacy_u32(&priv->advertising, in netdev_get_link_ksettings()
5874 cmd->link_modes.advertising); in netdev_get_link_ksettings()
5880 * netdev_set_link_ksettings - set network device settings
5884 * This function sets the PHY according to the ethtool command.
5892 struct dev_info *hw_priv = priv->adapter; in netdev_set_link_ksettings()
5893 struct ksz_port *port = &priv->port; in netdev_set_link_ksettings()
5895 u32 speed = cmd->base.speed; in netdev_set_link_ksettings()
5900 cmd->link_modes.advertising); in netdev_set_link_ksettings()
5906 if (cmd->base.autoneg && priv->advertising == advertising) { in netdev_set_link_ksettings()
5916 if (0 == cmd->base.duplex) in netdev_set_link_ksettings()
5920 else if (1 == cmd->base.duplex) in netdev_set_link_ksettings()
5925 mutex_lock(&hw_priv->lock); in netdev_set_link_ksettings()
5926 if (cmd->base.autoneg && in netdev_set_link_ksettings()
5928 port->duplex = 0; in netdev_set_link_ksettings()
5929 port->speed = 0; in netdev_set_link_ksettings()
5930 port->force_link = 0; in netdev_set_link_ksettings()
5932 port->duplex = cmd->base.duplex + 1; in netdev_set_link_ksettings()
5934 port->speed = speed; in netdev_set_link_ksettings()
5935 if (cmd->base.autoneg) in netdev_set_link_ksettings()
5936 port->force_link = 0; in netdev_set_link_ksettings()
5938 port->force_link = 1; in netdev_set_link_ksettings()
5945 &priv->mii_if, in netdev_set_link_ksettings()
5947 mutex_unlock(&hw_priv->lock); in netdev_set_link_ksettings()
5952 * netdev_nway_reset - restart auto-negotiation
5955 * This function restarts the PHY for auto-negotiation.
5962 struct dev_info *hw_priv = priv->adapter; in netdev_nway_reset()
5965 mutex_lock(&hw_priv->lock); in netdev_nway_reset()
5966 rc = mii_nway_restart(&priv->mii_if); in netdev_nway_reset()
5967 mutex_unlock(&hw_priv->lock); in netdev_nway_reset()
5972 * netdev_get_link - get network device link status
5975 * This function gets the link status from the PHY.
5977 * Return true if PHY is linked and false otherwise.
5984 rc = mii_link_ok(&priv->mii_if); in netdev_get_link()
5989 * netdev_get_drvinfo - get network driver information
5999 struct dev_info *hw_priv = priv->adapter; in netdev_get_drvinfo()
6001 strscpy(info->driver, DRV_NAME, sizeof(info->driver)); in netdev_get_drvinfo()
6002 strscpy(info->version, DRV_VERSION, sizeof(info->version)); in netdev_get_drvinfo()
6003 strscpy(info->bus_info, pci_name(hw_priv->pdev), in netdev_get_drvinfo()
6004 sizeof(info->bus_info)); in netdev_get_drvinfo()
6021 * netdev_get_regs_len - get length of register dump
6033 while (range->end > range->start) { in netdev_get_regs_len()
6034 regs_len += (range->end - range->start + 3) / 4 * 4; in netdev_get_regs_len()
6041 * netdev_get_regs - get register dump
6052 struct dev_info *hw_priv = priv->adapter; in netdev_get_regs()
6053 struct ksz_hw *hw = &hw_priv->hw; in netdev_get_regs()
6058 mutex_lock(&hw_priv->lock); in netdev_get_regs()
6059 regs->version = 0; in netdev_get_regs()
6060 for (len = 0; len < 0x40; len += 4) { in netdev_get_regs()
6061 pci_read_config_dword(hw_priv->pdev, len, buf); in netdev_get_regs()
6064 while (range->end > range->start) { in netdev_get_regs()
6065 for (len = range->start; len < range->end; len += 4) { in netdev_get_regs()
6066 *buf = readl(hw->io + len); in netdev_get_regs()
6071 mutex_unlock(&hw_priv->lock); in netdev_get_regs()
6080 * netdev_get_wol - get Wake-on-LAN support
6082 * @wol: Ethtool Wake-on-LAN data structure.
6084 * This procedure returns Wake-on-LAN support.
6090 struct dev_info *hw_priv = priv->adapter; in netdev_get_wol()
6092 wol->supported = hw_priv->wol_support; in netdev_get_wol()
6093 wol->wolopts = hw_priv->wol_enable; in netdev_get_wol()
6094 memset(&wol->sopass, 0, sizeof(wol->sopass)); in netdev_get_wol()
6098 * netdev_set_wol - set Wake-on-LAN support
6100 * @wol: Ethtool Wake-on-LAN data structure.
6102 * This function sets Wake-on-LAN support.
6110 struct dev_info *hw_priv = priv->adapter; in netdev_set_wol()
6112 /* Need to find a way to retrieve the device IP address. */ in netdev_set_wol()
6115 if (wol->wolopts & ~hw_priv->wol_support) in netdev_set_wol()
6116 return -EINVAL; in netdev_set_wol()
6118 hw_priv->wol_enable = wol->wolopts; in netdev_set_wol()
6121 if (wol->wolopts) in netdev_set_wol()
6122 hw_priv->wol_enable |= WAKE_PHY; in netdev_set_wol()
6123 hw_enable_wol(&hw_priv->hw, hw_priv->wol_enable, net_addr); in netdev_set_wol()
6128 * netdev_get_msglevel - get debug message level
6139 return priv->msg_enable; in netdev_get_msglevel()
6143 * netdev_set_msglevel - set debug message level
6153 priv->msg_enable = value; in netdev_set_msglevel()
6157 * netdev_get_eeprom_len - get EEPROM length
6172 * netdev_get_eeprom - get EEPROM data
6185 struct dev_info *hw_priv = priv->adapter; in netdev_get_eeprom()
6190 len = (eeprom->offset + eeprom->len + 1) / 2; in netdev_get_eeprom()
6191 for (i = eeprom->offset / 2; i < len; i++) in netdev_get_eeprom()
6192 eeprom_data[i] = eeprom_read(&hw_priv->hw, i); in netdev_get_eeprom()
6193 eeprom->magic = EEPROM_MAGIC; in netdev_get_eeprom()
6194 memcpy(data, &eeprom_byte[eeprom->offset], eeprom->len); in netdev_get_eeprom()
6200 * netdev_set_eeprom - write EEPROM data
6213 struct dev_info *hw_priv = priv->adapter; in netdev_set_eeprom()
6219 if (eeprom->magic != EEPROM_MAGIC) in netdev_set_eeprom()
6220 return -EINVAL; in netdev_set_eeprom()
6222 len = (eeprom->offset + eeprom->len + 1) / 2; in netdev_set_eeprom()
6223 for (i = eeprom->offset / 2; i < len; i++) in netdev_set_eeprom()
6224 eeprom_data[i] = eeprom_read(&hw_priv->hw, i); in netdev_set_eeprom()
6226 memcpy(&eeprom_byte[eeprom->offset], data, eeprom->len); in netdev_set_eeprom()
6227 for (i = 0; i < EEPROM_SIZE; i++) in netdev_set_eeprom()
6230 eeprom_write(&hw_priv->hw, i, eeprom_data[i]); in netdev_set_eeprom()
6237 * netdev_get_pauseparam - get flow control parameters
6247 struct dev_info *hw_priv = priv->adapter; in netdev_get_pauseparam()
6248 struct ksz_hw *hw = &hw_priv->hw; in netdev_get_pauseparam()
6250 pause->autoneg = (hw->overrides & PAUSE_FLOW_CTRL) ? 0 : 1; in netdev_get_pauseparam()
6251 if (!hw->ksz_switch) { in netdev_get_pauseparam()
6252 pause->rx_pause = in netdev_get_pauseparam()
6253 (hw->rx_cfg & DMA_RX_FLOW_ENABLE) ? 1 : 0; in netdev_get_pauseparam()
6254 pause->tx_pause = in netdev_get_pauseparam()
6255 (hw->tx_cfg & DMA_TX_FLOW_ENABLE) ? 1 : 0; in netdev_get_pauseparam()
6257 pause->rx_pause = in netdev_get_pauseparam()
6260 pause->tx_pause = in netdev_get_pauseparam()
6267 * netdev_set_pauseparam - set flow control parameters
6280 struct dev_info *hw_priv = priv->adapter; in netdev_set_pauseparam()
6281 struct ksz_hw *hw = &hw_priv->hw; in netdev_set_pauseparam()
6282 struct ksz_port *port = &priv->port; in netdev_set_pauseparam()
6284 mutex_lock(&hw_priv->lock); in netdev_set_pauseparam()
6285 if (pause->autoneg) { in netdev_set_pauseparam()
6286 if (!pause->rx_pause && !pause->tx_pause) in netdev_set_pauseparam()
6287 port->flow_ctrl = PHY_NO_FLOW_CTRL; in netdev_set_pauseparam()
6289 port->flow_ctrl = PHY_FLOW_CTRL; in netdev_set_pauseparam()
6290 hw->overrides &= ~PAUSE_FLOW_CTRL; in netdev_set_pauseparam()
6291 port->force_link = 0; in netdev_set_pauseparam()
6292 if (hw->ksz_switch) { in netdev_set_pauseparam()
6300 hw->overrides |= PAUSE_FLOW_CTRL; in netdev_set_pauseparam()
6301 if (hw->ksz_switch) { in netdev_set_pauseparam()
6303 SWITCH_RX_FLOW_CTRL, pause->rx_pause); in netdev_set_pauseparam()
6305 SWITCH_TX_FLOW_CTRL, pause->tx_pause); in netdev_set_pauseparam()
6307 set_flow_ctrl(hw, pause->rx_pause, pause->tx_pause); in netdev_set_pauseparam()
6309 mutex_unlock(&hw_priv->lock); in netdev_set_pauseparam()
6315 * netdev_get_ringparam - get tx/rx ring parameters
6329 struct dev_info *hw_priv = priv->adapter; in netdev_get_ringparam()
6330 struct ksz_hw *hw = &hw_priv->hw; in netdev_get_ringparam()
6332 ring->tx_max_pending = (1 << 9); in netdev_get_ringparam()
6333 ring->tx_pending = hw->tx_desc_info.alloc; in netdev_get_ringparam()
6334 ring->rx_max_pending = (1 << 9); in netdev_get_ringparam()
6335 ring->rx_pending = hw->rx_desc_info.alloc; in netdev_get_ringparam()
6382 * netdev_get_strings - get statistics identity strings
6392 struct dev_info *hw_priv = priv->adapter; in netdev_get_strings()
6393 struct ksz_hw *hw = &hw_priv->hw; in netdev_get_strings()
6397 ETH_GSTRING_LEN * hw->mib_cnt); in netdev_get_strings()
6401 * netdev_get_sset_count - get statistics size
6412 struct dev_info *hw_priv = priv->adapter; in netdev_get_sset_count()
6413 struct ksz_hw *hw = &hw_priv->hw; in netdev_get_sset_count()
6417 return hw->mib_cnt; in netdev_get_sset_count()
6419 return -EOPNOTSUPP; in netdev_get_sset_count()
6424 * netdev_get_ethtool_stats - get network device statistics
6435 struct dev_info *hw_priv = priv->adapter; in netdev_get_ethtool_stats()
6436 struct ksz_hw *hw = &hw_priv->hw; in netdev_get_ethtool_stats()
6437 struct ksz_port *port = &priv->port; in netdev_get_ethtool_stats()
6438 int n_stats = stats->n_stats; in netdev_get_ethtool_stats()
6444 mutex_lock(&hw_priv->lock); in netdev_get_ethtool_stats()
6446 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) { in netdev_get_ethtool_stats()
6447 if (media_connected == hw->port_mib[p].state) { in netdev_get_ethtool_stats()
6448 hw_priv->counter[p].read = 1; in netdev_get_ethtool_stats()
6455 mutex_unlock(&hw_priv->lock); in netdev_get_ethtool_stats()
6458 schedule_work(&hw_priv->mib_read); in netdev_get_ethtool_stats()
6460 if (1 == port->mib_port_cnt && n < SWITCH_PORT_NUM) { in netdev_get_ethtool_stats()
6463 hw_priv->counter[p].counter, in netdev_get_ethtool_stats()
6464 2 == hw_priv->counter[p].read, in netdev_get_ethtool_stats()
6467 for (i = 0, p = n; i < port->mib_port_cnt - n; i++, p++) { in netdev_get_ethtool_stats()
6470 hw_priv->counter[p].counter, in netdev_get_ethtool_stats()
6471 2 == hw_priv->counter[p].read, in netdev_get_ethtool_stats()
6473 } else if (hw->port_mib[p].cnt_ptr) { in netdev_get_ethtool_stats()
6475 hw_priv->counter[p].counter, in netdev_get_ethtool_stats()
6476 2 == hw_priv->counter[p].read, in netdev_get_ethtool_stats()
6481 get_mib_counters(hw, port->first_port, port->mib_port_cnt, counter); in netdev_get_ethtool_stats()
6482 n = hw->mib_cnt; in netdev_get_ethtool_stats()
6485 n_stats -= n; in netdev_get_ethtool_stats()
6486 for (i = 0; i < n; i++) in netdev_get_ethtool_stats()
6491 * netdev_set_features - set receive checksum support
6503 struct dev_info *hw_priv = priv->adapter; in netdev_set_features()
6504 struct ksz_hw *hw = &hw_priv->hw; in netdev_set_features()
6506 mutex_lock(&hw_priv->lock); in netdev_set_features()
6510 hw->rx_cfg |= DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP; in netdev_set_features()
6512 hw->rx_cfg &= ~(DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP); in netdev_set_features()
6514 if (hw->enabled) in netdev_set_features()
6515 writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL); in netdev_set_features()
6517 mutex_unlock(&hw_priv->lock); in netdev_set_features()
6552 if (priv->media_state != port->linked->state) { in update_link()
6553 priv->media_state = port->linked->state; in update_link()
6563 struct ksz_hw *hw = &hw_priv->hw; in mib_read_work()
6569 for (i = 0; i < hw->mib_port_cnt; i++) { in mib_read_work()
6570 mib = &hw->port_mib[i]; in mib_read_work()
6573 if (mib->cnt_ptr || 1 == hw_priv->counter[i].read) { in mib_read_work()
6575 /* Need to process receive interrupt. */ in mib_read_work()
6578 hw_priv->counter[i].read = 0; in mib_read_work()
6581 if (0 == mib->cnt_ptr) { in mib_read_work()
6582 hw_priv->counter[i].read = 2; in mib_read_work()
6584 &hw_priv->counter[i].counter); in mib_read_work()
6586 } else if (time_after_eq(jiffies, hw_priv->counter[i].time)) { in mib_read_work()
6588 if (media_connected == mib->state) in mib_read_work()
6589 hw_priv->counter[i].read = 1; in mib_read_work()
6590 next_jiffies += HZ * 1 * hw->mib_port_cnt; in mib_read_work()
6591 hw_priv->counter[i].time = next_jiffies; in mib_read_work()
6594 } else if (mib->link_down) { in mib_read_work()
6595 mib->link_down = 0; in mib_read_work()
6598 hw_priv->counter[i].read = 1; in mib_read_work()
6607 mib_read_work(&hw_priv->mib_read); in mib_monitor()
6609 /* This is used to verify Wake-on-LAN is working. */ in mib_monitor()
6610 if (hw_priv->pme_wait) { in mib_monitor()
6611 if (time_is_before_eq_jiffies(hw_priv->pme_wait)) { in mib_monitor()
6612 hw_clr_wol_pme_status(&hw_priv->hw); in mib_monitor()
6613 hw_priv->pme_wait = 0; in mib_monitor()
6615 } else if (hw_chk_wol_pme_status(&hw_priv->hw)) { in mib_monitor()
6618 hw_priv->pme_wait = jiffies + HZ * 2; in mib_monitor()
6621 ksz_update_timer(&hw_priv->mib_timer_info); in mib_monitor()
6625 * dev_monitor - periodic monitoring
6633 struct net_device *dev = priv->mii_if.dev; in dev_monitor()
6634 struct dev_info *hw_priv = priv->adapter; in dev_monitor()
6635 struct ksz_hw *hw = &hw_priv->hw; in dev_monitor()
6636 struct ksz_port *port = &priv->port; in dev_monitor()
6638 if (!(hw->features & LINK_INT_WORKING)) in dev_monitor()
6642 ksz_update_timer(&priv->monitor_timer_info); in dev_monitor()
6657 * This enables multiple network device mode for KSZ8842, which contains a
6659 * ports for running Spanning Tree Protocol. The driver will create an
6660 * additional eth? device for the other port.
6671 * that need the host's attention are passed to it. This prevents the host
6690 * netdev_init - initialize network device.
6702 ksz_init_timer(&priv->monitor_timer_info, 500 * HZ / 1000, in netdev_init()
6706 dev->watchdog_timeo = HZ / 2; in netdev_init()
6708 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_RXCSUM; in netdev_init()
6714 dev->hw_features |= NETIF_F_IPV6_CSUM; in netdev_init()
6716 dev->features |= dev->hw_features; in netdev_init()
6718 sema_init(&priv->proc_sem, 1); in netdev_init()
6720 priv->mii_if.phy_id_mask = 0x1; in netdev_init()
6721 priv->mii_if.reg_num_mask = 0x7; in netdev_init()
6722 priv->mii_if.dev = dev; in netdev_init()
6723 priv->mii_if.mdio_read = mdio_read; in netdev_init()
6724 priv->mii_if.mdio_write = mdio_write; in netdev_init()
6725 priv->mii_if.phy_id = priv->port.first_port + 1; in netdev_init()
6727 priv->msg_enable = netif_msg_init(msg_enable, in netdev_init()
6753 if (dev->watchdog_timeo) in netdev_free()
6792 hw_priv->hw.override_addr[j++] = (u8) num; in get_mac_addr()
6793 hw_priv->hw.override_addr[5] += in get_mac_addr()
6794 hw_priv->hw.id; in get_mac_addr()
6796 hw_priv->hw.ksz_switch->other_addr[j++] = in get_mac_addr()
6798 hw_priv->hw.ksz_switch->other_addr[5] += in get_mac_addr()
6799 hw_priv->hw.id; in get_mac_addr()
6807 hw_priv->hw.mac_override = 1; in get_mac_addr()
6817 struct ksz_switch *sw = hw->ksz_switch; in read_other_addr()
6819 for (i = 0; i < 3; i++) in read_other_addr()
6822 sw->other_addr[5] = (u8) data[0]; in read_other_addr()
6823 sw->other_addr[4] = (u8)(data[0] >> 8); in read_other_addr()
6824 sw->other_addr[3] = (u8) data[1]; in read_other_addr()
6825 sw->other_addr[2] = (u8)(data[1] >> 8); in read_other_addr()
6826 sw->other_addr[1] = (u8) data[2]; in read_other_addr()
6827 sw->other_addr[0] = (u8)(data[2] >> 8); in read_other_addr()
6858 result = -ENODEV; in pcidev_init()
6860 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) || in pcidev_init()
6861 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) in pcidev_init()
6873 result = -ENOMEM; in pcidev_init()
6879 hw_priv = &info->dev_info; in pcidev_init()
6880 hw_priv->pdev = pdev; in pcidev_init()
6882 hw = &hw_priv->hw; in pcidev_init()
6884 hw->io = ioremap(reg_base, reg_len); in pcidev_init()
6885 if (!hw->io) in pcidev_init()
6892 result = -ENODEV; in pcidev_init()
6898 dev_info(&hw_priv->pdev->dev, "%s\n", banner); in pcidev_init()
6899 dev_dbg(&hw_priv->pdev->dev, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq); in pcidev_init()
6902 hw->dev_count = 1; in pcidev_init()
6905 hw->addr_list_size = 0; in pcidev_init()
6906 hw->mib_cnt = PORT_COUNTER_NUM; in pcidev_init()
6907 hw->mib_port_cnt = 1; in pcidev_init()
6912 hw->overrides |= FAST_AGING; in pcidev_init()
6914 hw->mib_cnt = TOTAL_PORT_COUNTER_NUM; in pcidev_init()
6918 hw->dev_count = SWITCH_PORT_NUM; in pcidev_init()
6919 hw->addr_list_size = SWITCH_PORT_NUM - 1; in pcidev_init()
6923 if (1 == hw->dev_count) { in pcidev_init()
6927 hw->mib_port_cnt = TOTAL_PORT_NUM; in pcidev_init()
6928 hw->ksz_switch = kzalloc(sizeof(struct ksz_switch), GFP_KERNEL); in pcidev_init()
6929 if (!hw->ksz_switch) in pcidev_init()
6932 sw = hw->ksz_switch; in pcidev_init()
6934 for (i = 0; i < hw->mib_port_cnt; i++) in pcidev_init()
6935 hw->port_mib[i].mib_start = 0; in pcidev_init()
6937 hw->parent = hw_priv; in pcidev_init()
6940 hw_priv->mtu = (REGULAR_RX_BUF_SIZE + 3) & ~3; in pcidev_init()
6945 hw_priv->hw.id = net_device_present; in pcidev_init()
6947 spin_lock_init(&hw_priv->hwlock); in pcidev_init()
6948 mutex_init(&hw_priv->lock); in pcidev_init()
6950 for (i = 0; i < TOTAL_PORT_NUM; i++) in pcidev_init()
6951 init_waitqueue_head(&hw_priv->counter[i].counter); in pcidev_init()
6960 if (hw->dev_count > 1) { in pcidev_init()
6961 memcpy(sw->other_addr, hw->override_addr, ETH_ALEN); in pcidev_init()
6968 if (hw->ksz_switch) in pcidev_init()
6971 hw_priv->wol_support = WOL_SUPPORT; in pcidev_init()
6972 hw_priv->wol_enable = 0; in pcidev_init()
6975 INIT_WORK(&hw_priv->mib_read, mib_read_work); in pcidev_init()
6978 ksz_init_timer(&hw_priv->mib_timer_info, 500 * HZ / 1000, in pcidev_init()
6981 for (i = 0; i < hw->dev_count; i++) { in pcidev_init()
6985 SET_NETDEV_DEV(dev, &pdev->dev); in pcidev_init()
6986 info->netdev[i] = dev; in pcidev_init()
6989 priv->adapter = hw_priv; in pcidev_init()
6990 priv->id = net_device_present++; in pcidev_init()
6992 port = &priv->port; in pcidev_init()
6993 port->port_cnt = port_count; in pcidev_init()
6994 port->mib_port_cnt = mib_port_count; in pcidev_init()
6995 port->first_port = i; in pcidev_init()
6996 port->flow_ctrl = PHY_FLOW_CTRL; in pcidev_init()
6998 port->hw = hw; in pcidev_init()
6999 port->linked = &hw->port_info[port->first_port]; in pcidev_init()
7001 for (cnt = 0, pi = i; cnt < port_count; cnt++, pi++) { in pcidev_init()
7002 hw->port_info[pi].port_id = pi; in pcidev_init()
7003 hw->port_info[pi].pdev = dev; in pcidev_init()
7004 hw->port_info[pi].state = media_disconnected; in pcidev_init()
7007 dev->mem_start = (unsigned long) hw->io; in pcidev_init()
7008 dev->mem_end = dev->mem_start + reg_len - 1; in pcidev_init()
7009 dev->irq = pdev->irq; in pcidev_init()
7011 eth_hw_addr_set(dev, hw_priv->hw.override_addr); in pcidev_init()
7015 ether_addr_copy(addr, sw->other_addr); in pcidev_init()
7016 if (ether_addr_equal(sw->other_addr, hw->override_addr)) in pcidev_init()
7017 addr[5] += port->first_port; in pcidev_init()
7021 dev->netdev_ops = &netdev_ops; in pcidev_init()
7022 dev->ethtool_ops = &netdev_ethtool_ops; in pcidev_init()
7024 /* MTU range: 60 - 1894 */ in pcidev_init()
7025 dev->min_mtu = ETH_ZLEN; in pcidev_init()
7026 dev->max_mtu = MAX_RX_BUF_SIZE - in pcidev_init()
7034 pci_dev_get(hw_priv->pdev); in pcidev_init()
7039 for (i = 0; i < hw->dev_count; i++) { in pcidev_init()
7040 if (info->netdev[i]) { in pcidev_init()
7041 netdev_free(info->netdev[i]); in pcidev_init()
7042 info->netdev[i] = NULL; in pcidev_init()
7048 kfree(hw->ksz_switch); in pcidev_init()
7051 iounmap(hw->io); in pcidev_init()
7066 struct dev_info *hw_priv = &info->dev_info; in pcidev_exit()
7070 for (i = 0; i < hw_priv->hw.dev_count; i++) { in pcidev_exit()
7071 if (info->netdev[i]) in pcidev_exit()
7072 netdev_free(info->netdev[i]); in pcidev_exit()
7074 if (hw_priv->hw.io) in pcidev_exit()
7075 iounmap(hw_priv->hw.io); in pcidev_exit()
7077 kfree(hw_priv->hw.ksz_switch); in pcidev_exit()
7078 pci_dev_put(hw_priv->pdev); in pcidev_exit()
7086 struct dev_info *hw_priv = &info->dev_info; in pcidev_resume()
7087 struct ksz_hw *hw = &hw_priv->hw; in pcidev_resume()
7091 if (hw_priv->wol_enable) in pcidev_resume()
7093 for (i = 0; i < hw->dev_count; i++) { in pcidev_resume()
7094 if (info->netdev[i]) { in pcidev_resume()
7095 struct net_device *dev = info->netdev[i]; in pcidev_resume()
7110 struct dev_info *hw_priv = &info->dev_info; in pcidev_suspend()
7111 struct ksz_hw *hw = &hw_priv->hw; in pcidev_suspend()
7113 /* Need to find a way to retrieve the device IP address. */ in pcidev_suspend()
7116 for (i = 0; i < hw->dev_count; i++) { in pcidev_suspend()
7117 if (info->netdev[i]) { in pcidev_suspend()
7118 struct net_device *dev = info->netdev[i]; in pcidev_suspend()
7126 if (hw_priv->wol_enable) { in pcidev_suspend()
7127 hw_enable_wol(hw, hw_priv->wol_enable, net_addr); in pcidev_suspend()