Lines Matching +full:ether +full:- +full:r8a7790

1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu
6 * Copyright (C) 2008-2014 Renesas Solutions Corp.
7 * Copyright (C) 2013-2017 Cogent Embedded, Inc.
15 #include <linux/dma-mapping.h>
19 #include <linux/mdio-bitbang.h>
46 [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID
55 __diag_ignore(GCC, 8, "-Woverride-init",
352 u16 offset = mdp->reg_offset[enum_index]; in sh_eth_write()
357 iowrite32(data, mdp->addr + offset); in sh_eth_write()
363 u16 offset = mdp->reg_offset[enum_index]; in sh_eth_read()
368 return ioread32(mdp->addr + offset); in sh_eth_read()
380 return mdp->reg_offset[enum_index]; in sh_eth_tsu_get_offset()
391 iowrite32(data, mdp->tsu_addr + offset); in sh_eth_tsu_write()
401 return ioread32(mdp->tsu_addr + offset); in sh_eth_tsu_read()
420 switch (mdp->phy_interface) { in sh_eth_select_mii()
447 sh_eth_modify(ndev, ECMR, ECMR_DM, mdp->duplex ? ECMR_DM : 0); in sh_eth_set_duplex()
472 for (cnt = 100; cnt > 0; cnt--) { in sh_eth_check_soft_reset()
479 return -ETIMEDOUT; in sh_eth_check_soft_reset()
505 if (mdp->cd->csmr) in sh_eth_soft_reset_gether()
509 if (mdp->cd->select_mii) in sh_eth_soft_reset_gether()
519 if (WARN_ON(!mdp->cd->gecmr)) in sh_eth_set_rate_gether()
522 switch (mdp->speed) { in sh_eth_set_rate_gether()
638 switch (mdp->speed) { in sh_eth_set_rate_rcar()
648 /* R-Car Gen1 */
679 /* R-Car Gen2 and RZ/G1 */
801 switch (mdp->speed) { in sh_eth_set_rate_sh7724()
845 switch (mdp->speed) { in sh_eth_set_rate_sh7757()
916 if (WARN_ON(!mdp->cd->gecmr)) in sh_eth_set_rate_giga()
919 switch (mdp->speed) { in sh_eth_set_rate_giga()
1105 if (!cd->ecsr_value) in sh_eth_set_default_cpu_data()
1106 cd->ecsr_value = DEFAULT_ECSR_INIT; in sh_eth_set_default_cpu_data()
1108 if (!cd->ecsipr_value) in sh_eth_set_default_cpu_data()
1109 cd->ecsipr_value = DEFAULT_ECSIPR_INIT; in sh_eth_set_default_cpu_data()
1111 if (!cd->fcftr_value) in sh_eth_set_default_cpu_data()
1112 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | in sh_eth_set_default_cpu_data()
1115 if (!cd->fdr_value) in sh_eth_set_default_cpu_data()
1116 cd->fdr_value = DEFAULT_FDR_INIT; in sh_eth_set_default_cpu_data()
1118 if (!cd->tx_check) in sh_eth_set_default_cpu_data()
1119 cd->tx_check = DEFAULT_TX_CHECK; in sh_eth_set_default_cpu_data()
1121 if (!cd->eesr_err_check) in sh_eth_set_default_cpu_data()
1122 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK; in sh_eth_set_default_cpu_data()
1124 if (!cd->trscer_err_mask) in sh_eth_set_default_cpu_data()
1125 cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK; in sh_eth_set_default_cpu_data()
1130 uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1); in sh_eth_set_receive_align()
1133 skb_reserve(skb, SH_ETH_RX_ALIGN - reserve); in sh_eth_set_receive_align()
1136 /* Program the hardware MAC address from dev->dev_addr. */
1140 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | in update_mac_address()
1141 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); in update_mac_address()
1143 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); in update_mac_address()
1149 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
1183 if (bitbang->set_gate) in sh_mdio_ctrl()
1184 bitbang->set_gate(bitbang->addr); in sh_mdio_ctrl()
1186 pir = ioread32(bitbang->addr); in sh_mdio_ctrl()
1191 iowrite32(pir, bitbang->addr); in sh_mdio_ctrl()
1211 if (bitbang->set_gate) in sh_get_mdio()
1212 bitbang->set_gate(bitbang->addr); in sh_get_mdio()
1214 return (ioread32(bitbang->addr) & PIR_MDI) != 0; in sh_get_mdio()
1241 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { in sh_eth_tx_free()
1242 entry = mdp->dirty_tx % mdp->num_tx_ring; in sh_eth_tx_free()
1243 txdesc = &mdp->tx_ring[entry]; in sh_eth_tx_free()
1244 sent = !(txdesc->status & cpu_to_le32(TD_TACT)); in sh_eth_tx_free()
1251 entry, le32_to_cpu(txdesc->status)); in sh_eth_tx_free()
1253 if (mdp->tx_skbuff[entry]) { in sh_eth_tx_free()
1254 dma_unmap_single(&mdp->pdev->dev, in sh_eth_tx_free()
1255 le32_to_cpu(txdesc->addr), in sh_eth_tx_free()
1256 le32_to_cpu(txdesc->len) >> 16, in sh_eth_tx_free()
1258 dev_kfree_skb_irq(mdp->tx_skbuff[entry]); in sh_eth_tx_free()
1259 mdp->tx_skbuff[entry] = NULL; in sh_eth_tx_free()
1262 txdesc->status = cpu_to_le32(TD_TFP); in sh_eth_tx_free()
1263 if (entry >= mdp->num_tx_ring - 1) in sh_eth_tx_free()
1264 txdesc->status |= cpu_to_le32(TD_TDLE); in sh_eth_tx_free()
1267 ndev->stats.tx_packets++; in sh_eth_tx_free()
1268 ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16; in sh_eth_tx_free()
1280 if (mdp->rx_ring) { in sh_eth_ring_free()
1281 for (i = 0; i < mdp->num_rx_ring; i++) { in sh_eth_ring_free()
1282 if (mdp->rx_skbuff[i]) { in sh_eth_ring_free()
1283 struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i]; in sh_eth_ring_free()
1285 dma_unmap_single(&mdp->pdev->dev, in sh_eth_ring_free()
1286 le32_to_cpu(rxdesc->addr), in sh_eth_ring_free()
1287 ALIGN(mdp->rx_buf_sz, 32), in sh_eth_ring_free()
1291 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; in sh_eth_ring_free()
1292 dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->rx_ring, in sh_eth_ring_free()
1293 mdp->rx_desc_dma); in sh_eth_ring_free()
1294 mdp->rx_ring = NULL; in sh_eth_ring_free()
1298 if (mdp->rx_skbuff) { in sh_eth_ring_free()
1299 for (i = 0; i < mdp->num_rx_ring; i++) in sh_eth_ring_free()
1300 dev_kfree_skb(mdp->rx_skbuff[i]); in sh_eth_ring_free()
1302 kfree(mdp->rx_skbuff); in sh_eth_ring_free()
1303 mdp->rx_skbuff = NULL; in sh_eth_ring_free()
1305 if (mdp->tx_ring) { in sh_eth_ring_free()
1308 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; in sh_eth_ring_free()
1309 dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->tx_ring, in sh_eth_ring_free()
1310 mdp->tx_desc_dma); in sh_eth_ring_free()
1311 mdp->tx_ring = NULL; in sh_eth_ring_free()
1315 kfree(mdp->tx_skbuff); in sh_eth_ring_free()
1316 mdp->tx_skbuff = NULL; in sh_eth_ring_free()
1327 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; in sh_eth_ring_format()
1328 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; in sh_eth_ring_format()
1329 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1; in sh_eth_ring_format()
1333 mdp->cur_rx = 0; in sh_eth_ring_format()
1334 mdp->cur_tx = 0; in sh_eth_ring_format()
1335 mdp->dirty_rx = 0; in sh_eth_ring_format()
1336 mdp->dirty_tx = 0; in sh_eth_ring_format()
1338 memset(mdp->rx_ring, 0, rx_ringsize); in sh_eth_ring_format()
1341 for (i = 0; i < mdp->num_rx_ring; i++) { in sh_eth_ring_format()
1343 mdp->rx_skbuff[i] = NULL; in sh_eth_ring_format()
1350 buf_len = ALIGN(mdp->rx_buf_sz, 32); in sh_eth_ring_format()
1351 dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, buf_len, in sh_eth_ring_format()
1353 if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) { in sh_eth_ring_format()
1357 mdp->rx_skbuff[i] = skb; in sh_eth_ring_format()
1360 rxdesc = &mdp->rx_ring[i]; in sh_eth_ring_format()
1361 rxdesc->len = cpu_to_le32(buf_len << 16); in sh_eth_ring_format()
1362 rxdesc->addr = cpu_to_le32(dma_addr); in sh_eth_ring_format()
1363 rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP); in sh_eth_ring_format()
1367 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); in sh_eth_ring_format()
1368 if (mdp->cd->xdfar_rw) in sh_eth_ring_format()
1369 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR); in sh_eth_ring_format()
1373 mdp->dirty_rx = (u32) (i - mdp->num_rx_ring); in sh_eth_ring_format()
1377 rxdesc->status |= cpu_to_le32(RD_RDLE); in sh_eth_ring_format()
1379 memset(mdp->tx_ring, 0, tx_ringsize); in sh_eth_ring_format()
1382 for (i = 0; i < mdp->num_tx_ring; i++) { in sh_eth_ring_format()
1383 mdp->tx_skbuff[i] = NULL; in sh_eth_ring_format()
1384 txdesc = &mdp->tx_ring[i]; in sh_eth_ring_format()
1385 txdesc->status = cpu_to_le32(TD_TFP); in sh_eth_ring_format()
1386 txdesc->len = cpu_to_le32(0); in sh_eth_ring_format()
1389 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR); in sh_eth_ring_format()
1390 if (mdp->cd->xdfar_rw) in sh_eth_ring_format()
1391 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR); in sh_eth_ring_format()
1395 txdesc->status |= cpu_to_le32(TD_TDLE); in sh_eth_ring_format()
1409 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : in sh_eth_ring_init()
1410 (((ndev->mtu + 26 + 7) & ~7) + 2 + 16)); in sh_eth_ring_init()
1411 if (mdp->cd->rpadir) in sh_eth_ring_init()
1412 mdp->rx_buf_sz += NET_IP_ALIGN; in sh_eth_ring_init()
1415 mdp->rx_skbuff = kcalloc(mdp->num_rx_ring, sizeof(*mdp->rx_skbuff), in sh_eth_ring_init()
1417 if (!mdp->rx_skbuff) in sh_eth_ring_init()
1418 return -ENOMEM; in sh_eth_ring_init()
1420 mdp->tx_skbuff = kcalloc(mdp->num_tx_ring, sizeof(*mdp->tx_skbuff), in sh_eth_ring_init()
1422 if (!mdp->tx_skbuff) in sh_eth_ring_init()
1426 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; in sh_eth_ring_init()
1427 mdp->rx_ring = dma_alloc_coherent(&mdp->pdev->dev, rx_ringsize, in sh_eth_ring_init()
1428 &mdp->rx_desc_dma, GFP_KERNEL); in sh_eth_ring_init()
1429 if (!mdp->rx_ring) in sh_eth_ring_init()
1432 mdp->dirty_rx = 0; in sh_eth_ring_init()
1435 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; in sh_eth_ring_init()
1436 mdp->tx_ring = dma_alloc_coherent(&mdp->pdev->dev, tx_ringsize, in sh_eth_ring_init()
1437 &mdp->tx_desc_dma, GFP_KERNEL); in sh_eth_ring_init()
1438 if (!mdp->tx_ring) in sh_eth_ring_init()
1446 return -ENOMEM; in sh_eth_ring_init()
1455 ret = mdp->cd->soft_reset(ndev); in sh_eth_dev_init()
1459 if (mdp->cd->rmiimode) in sh_eth_dev_init()
1464 if (mdp->cd->rpadir) in sh_eth_dev_init()
1471 if (mdp->cd->hw_swap) in sh_eth_dev_init()
1478 sh_eth_write(ndev, mdp->cd->fdr_value, FDR); in sh_eth_dev_init()
1481 /* Frame recv control (enable multiple-packets per rx irq) */ in sh_eth_dev_init()
1484 sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER); in sh_eth_dev_init()
1487 if (mdp->cd->nbst) in sh_eth_dev_init()
1490 /* Burst cycle count upper-limit */ in sh_eth_dev_init()
1491 if (mdp->cd->bculr) in sh_eth_dev_init()
1494 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR); in sh_eth_dev_init()
1496 if (!mdp->cd->no_trimd) in sh_eth_dev_init()
1500 sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, in sh_eth_dev_init()
1504 mdp->irq_enabled = true; in sh_eth_dev_init()
1505 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); in sh_eth_dev_init()
1508 sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | in sh_eth_dev_init()
1509 (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) | in sh_eth_dev_init()
1512 if (mdp->cd->set_rate) in sh_eth_dev_init()
1513 mdp->cd->set_rate(ndev); in sh_eth_dev_init()
1515 /* E-MAC Status Register clear */ in sh_eth_dev_init()
1516 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR); in sh_eth_dev_init()
1518 /* E-MAC Interrupt Enable register */ in sh_eth_dev_init()
1519 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR); in sh_eth_dev_init()
1525 if (mdp->cd->apr) in sh_eth_dev_init()
1527 if (mdp->cd->mpr) in sh_eth_dev_init()
1529 if (mdp->cd->tpauser) in sh_eth_dev_init()
1546 for (i = 0; i < mdp->num_tx_ring; i++) in sh_eth_dev_exit()
1547 mdp->tx_ring[i].status &= ~cpu_to_le32(TD_TACT); in sh_eth_dev_exit()
1562 mdp->cd->soft_reset(ndev); in sh_eth_dev_exit()
1565 if (mdp->cd->rmiimode) in sh_eth_dev_exit()
1577 if (unlikely(skb->len < sizeof(__sum16))) in sh_eth_rx_csum()
1579 hw_csum = skb_tail_pointer(skb) - sizeof(__sum16); in sh_eth_rx_csum()
1580 skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum)); in sh_eth_rx_csum()
1581 skb->ip_summed = CHECKSUM_COMPLETE; in sh_eth_rx_csum()
1582 skb_trim(skb, skb->len - sizeof(__sum16)); in sh_eth_rx_csum()
1591 int entry = mdp->cur_rx % mdp->num_rx_ring; in sh_eth_rx()
1592 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx; in sh_eth_rx()
1596 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1; in sh_eth_rx()
1603 rxdesc = &mdp->rx_ring[entry]; in sh_eth_rx()
1604 while (!(rxdesc->status & cpu_to_le32(RD_RACT))) { in sh_eth_rx()
1607 desc_status = le32_to_cpu(rxdesc->status); in sh_eth_rx()
1608 pkt_len = le32_to_cpu(rxdesc->len) & RD_RFL; in sh_eth_rx()
1610 if (--boguscnt < 0) in sh_eth_rx()
1618 ndev->stats.rx_length_errors++; in sh_eth_rx()
1626 if (mdp->cd->csmr) in sh_eth_rx()
1629 skb = mdp->rx_skbuff[entry]; in sh_eth_rx()
1632 ndev->stats.rx_errors++; in sh_eth_rx()
1634 ndev->stats.rx_crc_errors++; in sh_eth_rx()
1636 ndev->stats.rx_frame_errors++; in sh_eth_rx()
1638 ndev->stats.rx_length_errors++; in sh_eth_rx()
1640 ndev->stats.rx_length_errors++; in sh_eth_rx()
1642 ndev->stats.rx_missed_errors++; in sh_eth_rx()
1644 ndev->stats.rx_over_errors++; in sh_eth_rx()
1646 dma_addr = le32_to_cpu(rxdesc->addr); in sh_eth_rx()
1647 if (!mdp->cd->hw_swap) in sh_eth_rx()
1651 mdp->rx_skbuff[entry] = NULL; in sh_eth_rx()
1652 if (mdp->cd->rpadir) in sh_eth_rx()
1654 dma_unmap_single(&mdp->pdev->dev, dma_addr, in sh_eth_rx()
1655 ALIGN(mdp->rx_buf_sz, 32), in sh_eth_rx()
1658 skb->protocol = eth_type_trans(skb, ndev); in sh_eth_rx()
1659 if (ndev->features & NETIF_F_RXCSUM) in sh_eth_rx()
1662 ndev->stats.rx_packets++; in sh_eth_rx()
1663 ndev->stats.rx_bytes += pkt_len; in sh_eth_rx()
1665 ndev->stats.multicast++; in sh_eth_rx()
1667 entry = (++mdp->cur_rx) % mdp->num_rx_ring; in sh_eth_rx()
1668 rxdesc = &mdp->rx_ring[entry]; in sh_eth_rx()
1672 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) { in sh_eth_rx()
1673 entry = mdp->dirty_rx % mdp->num_rx_ring; in sh_eth_rx()
1674 rxdesc = &mdp->rx_ring[entry]; in sh_eth_rx()
1676 buf_len = ALIGN(mdp->rx_buf_sz, 32); in sh_eth_rx()
1677 rxdesc->len = cpu_to_le32(buf_len << 16); in sh_eth_rx()
1679 if (mdp->rx_skbuff[entry] == NULL) { in sh_eth_rx()
1684 dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, in sh_eth_rx()
1686 if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) { in sh_eth_rx()
1690 mdp->rx_skbuff[entry] = skb; in sh_eth_rx()
1693 rxdesc->addr = cpu_to_le32(dma_addr); in sh_eth_rx()
1696 if (entry >= mdp->num_rx_ring - 1) in sh_eth_rx()
1697 rxdesc->status |= in sh_eth_rx()
1700 rxdesc->status |= cpu_to_le32(RD_RACT | RD_RFP); in sh_eth_rx()
1704 /* If we don't need to check status, don't. -KDU */ in sh_eth_rx()
1707 if (intr_status & EESR_RDE && !mdp->cd->no_xdfar) { in sh_eth_rx()
1708 u32 count = (sh_eth_read(ndev, RDFAR) - in sh_eth_rx()
1711 mdp->cur_rx = count; in sh_eth_rx()
1712 mdp->dirty_rx = count; in sh_eth_rx()
1717 *quota -= limit - boguscnt - 1; in sh_eth_rx()
1734 /* E-MAC interrupt handler */
1744 ndev->stats.tx_carrier_errors++; in sh_eth_emac_interrupt()
1746 pm_wakeup_event(&mdp->pdev->dev, 0); in sh_eth_emac_interrupt()
1749 if (mdp->cd->no_psr || mdp->no_ether_link) in sh_eth_emac_interrupt()
1752 if (mdp->ether_link_active_low) in sh_eth_emac_interrupt()
1777 ndev->stats.tx_aborted_errors++; in sh_eth_error()
1786 ndev->stats.rx_frame_errors++; in sh_eth_error()
1792 ndev->stats.tx_fifo_errors++; in sh_eth_error()
1798 ndev->stats.tx_fifo_errors++; in sh_eth_error()
1804 ndev->stats.rx_over_errors++; in sh_eth_error()
1809 ndev->stats.rx_fifo_errors++; in sh_eth_error()
1812 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { in sh_eth_error()
1814 ndev->stats.tx_fifo_errors++; in sh_eth_error()
1819 if (mdp->cd->no_ade) in sh_eth_error()
1827 intr_status, mdp->cur_tx, mdp->dirty_tx, in sh_eth_error()
1828 (u32)ndev->state, edtrr); in sh_eth_error()
1833 if (edtrr ^ mdp->cd->edtrr_trns) { in sh_eth_error()
1835 sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR); in sh_eth_error()
1846 struct sh_eth_cpu_data *cd = mdp->cd; in sh_eth_interrupt()
1850 spin_lock(&mdp->lock); in sh_eth_interrupt()
1862 if (intr_status & (EESR_RX_CHECK | cd->tx_check | EESR_ECI | in sh_eth_interrupt()
1863 cd->eesr_err_check)) in sh_eth_interrupt()
1868 if (unlikely(!mdp->irq_enabled)) { in sh_eth_interrupt()
1874 if (napi_schedule_prep(&mdp->napi)) { in sh_eth_interrupt()
1878 __napi_schedule(&mdp->napi); in sh_eth_interrupt()
1887 if (intr_status & cd->tx_check) { in sh_eth_interrupt()
1889 sh_eth_write(ndev, intr_status & cd->tx_check, EESR); in sh_eth_interrupt()
1895 /* E-MAC interrupt */ in sh_eth_interrupt()
1899 if (intr_status & cd->eesr_err_check) { in sh_eth_interrupt()
1901 sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR); in sh_eth_interrupt()
1907 spin_unlock(&mdp->lock); in sh_eth_interrupt()
1916 struct net_device *ndev = napi->dev; in sh_eth_poll()
1934 if (mdp->irq_enabled) in sh_eth_poll()
1935 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); in sh_eth_poll()
1937 return budget - quota; in sh_eth_poll()
1944 struct phy_device *phydev = ndev->phydev; in sh_eth_adjust_link()
1948 spin_lock_irqsave(&mdp->lock, flags); in sh_eth_adjust_link()
1950 /* Disable TX and RX right over here, if E-MAC change is ignored */ in sh_eth_adjust_link()
1951 if (mdp->cd->no_psr || mdp->no_ether_link) in sh_eth_adjust_link()
1954 if (phydev->link) { in sh_eth_adjust_link()
1955 if (phydev->duplex != mdp->duplex) { in sh_eth_adjust_link()
1957 mdp->duplex = phydev->duplex; in sh_eth_adjust_link()
1958 if (mdp->cd->set_duplex) in sh_eth_adjust_link()
1959 mdp->cd->set_duplex(ndev); in sh_eth_adjust_link()
1962 if (phydev->speed != mdp->speed) { in sh_eth_adjust_link()
1964 mdp->speed = phydev->speed; in sh_eth_adjust_link()
1965 if (mdp->cd->set_rate) in sh_eth_adjust_link()
1966 mdp->cd->set_rate(ndev); in sh_eth_adjust_link()
1968 if (!mdp->link) { in sh_eth_adjust_link()
1971 mdp->link = phydev->link; in sh_eth_adjust_link()
1973 } else if (mdp->link) { in sh_eth_adjust_link()
1975 mdp->link = 0; in sh_eth_adjust_link()
1976 mdp->speed = 0; in sh_eth_adjust_link()
1977 mdp->duplex = -1; in sh_eth_adjust_link()
1980 /* Enable TX and RX right over here, if E-MAC change is ignored */ in sh_eth_adjust_link()
1981 if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link) in sh_eth_adjust_link()
1984 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_adjust_link()
1993 struct device_node *np = ndev->dev.parent->of_node; in sh_eth_phy_init()
1997 mdp->link = 0; in sh_eth_phy_init()
1998 mdp->speed = 0; in sh_eth_phy_init()
1999 mdp->duplex = -1; in sh_eth_phy_init()
2005 pn = of_parse_phandle(np, "phy-handle", 0); in sh_eth_phy_init()
2008 mdp->phy_interface); in sh_eth_phy_init()
2012 phydev = ERR_PTR(-ENOENT); in sh_eth_phy_init()
2017 mdp->mii_bus->id, mdp->phy_id); in sh_eth_phy_init()
2020 mdp->phy_interface); in sh_eth_phy_init()
2029 if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) in sh_eth_phy_init()
2033 phydev->mac_managed_pm = true; in sh_eth_phy_init()
2048 phy_start(ndev->phydev); in sh_eth_phy_start()
2064 struct sh_eth_cpu_data *cd = mdp->cd; in __sh_eth_get_regs()
2088 if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) { \ in __sh_eth_get_regs()
2106 if (!cd->no_xdfar) in __sh_eth_get_regs()
2111 if (!cd->no_xdfar) in __sh_eth_get_regs()
2122 if (cd->rmiimode) in __sh_eth_get_regs()
2125 if (cd->rpadir) in __sh_eth_get_regs()
2127 if (!cd->no_trimd) in __sh_eth_get_regs()
2133 if (!cd->no_psr) in __sh_eth_get_regs()
2138 if (cd->apr) in __sh_eth_get_regs()
2140 if (cd->mpr) in __sh_eth_get_regs()
2144 if (cd->tpauser) in __sh_eth_get_regs()
2147 if (cd->gecmr) in __sh_eth_get_regs()
2149 if (cd->bculr) in __sh_eth_get_regs()
2153 if (!cd->no_tx_cntrs) { in __sh_eth_get_regs()
2163 if (cd->cexcr) { in __sh_eth_get_regs()
2168 if (cd->rtrate) in __sh_eth_get_regs()
2170 if (cd->csmr) in __sh_eth_get_regs()
2172 if (cd->select_mii) in __sh_eth_get_regs()
2174 if (cd->tsu) { in __sh_eth_get_regs()
2177 if (cd->dual_port) { in __sh_eth_get_regs()
2189 if (cd->dual_port) { in __sh_eth_get_regs()
2211 *buf++ = ioread32(mdp->tsu_addr + in __sh_eth_get_regs()
2212 mdp->reg_offset[TSU_ADRH0] + in __sh_eth_get_regs()
2236 regs->version = SH_ETH_REG_DUMP_VERSION; in sh_eth_get_regs()
2238 pm_runtime_get_sync(&mdp->pdev->dev); in sh_eth_get_regs()
2240 pm_runtime_put_sync(&mdp->pdev->dev); in sh_eth_get_regs()
2246 return mdp->msg_enable; in sh_eth_get_msglevel()
2252 mdp->msg_enable = value; in sh_eth_set_msglevel()
2267 return -EOPNOTSUPP; in sh_eth_get_sset_count()
2277 /* device-specific stats */ in sh_eth_get_ethtool_stats()
2278 data[i++] = mdp->cur_rx; in sh_eth_get_ethtool_stats()
2279 data[i++] = mdp->cur_tx; in sh_eth_get_ethtool_stats()
2280 data[i++] = mdp->dirty_rx; in sh_eth_get_ethtool_stats()
2281 data[i++] = mdp->dirty_tx; in sh_eth_get_ethtool_stats()
2301 ring->rx_max_pending = RX_RING_MAX; in sh_eth_get_ringparam()
2302 ring->tx_max_pending = TX_RING_MAX; in sh_eth_get_ringparam()
2303 ring->rx_pending = mdp->num_rx_ring; in sh_eth_get_ringparam()
2304 ring->tx_pending = mdp->num_tx_ring; in sh_eth_get_ringparam()
2315 if (ring->tx_pending > TX_RING_MAX || in sh_eth_set_ringparam()
2316 ring->rx_pending > RX_RING_MAX || in sh_eth_set_ringparam()
2317 ring->tx_pending < TX_RING_MIN || in sh_eth_set_ringparam()
2318 ring->rx_pending < RX_RING_MIN) in sh_eth_set_ringparam()
2319 return -EINVAL; in sh_eth_set_ringparam()
2320 if (ring->rx_mini_pending || ring->rx_jumbo_pending) in sh_eth_set_ringparam()
2321 return -EINVAL; in sh_eth_set_ringparam()
2330 * won't be re-enabled. in sh_eth_set_ringparam()
2332 mdp->irq_enabled = false; in sh_eth_set_ringparam()
2333 synchronize_irq(ndev->irq); in sh_eth_set_ringparam()
2334 napi_synchronize(&mdp->napi); in sh_eth_set_ringparam()
2344 mdp->num_rx_ring = ring->rx_pending; in sh_eth_set_ringparam()
2345 mdp->num_tx_ring = ring->tx_pending; in sh_eth_set_ringparam()
2371 wol->supported = 0; in sh_eth_get_wol()
2372 wol->wolopts = 0; in sh_eth_get_wol()
2374 if (mdp->cd->magic) { in sh_eth_get_wol()
2375 wol->supported = WAKE_MAGIC; in sh_eth_get_wol()
2376 wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0; in sh_eth_get_wol()
2384 if (!mdp->cd->magic || wol->wolopts & ~WAKE_MAGIC) in sh_eth_set_wol()
2385 return -EOPNOTSUPP; in sh_eth_set_wol()
2387 mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC); in sh_eth_set_wol()
2389 device_set_wakeup_enable(&mdp->pdev->dev, mdp->wol_enabled); in sh_eth_set_wol()
2418 pm_runtime_get_sync(&mdp->pdev->dev); in sh_eth_open()
2420 napi_enable(&mdp->napi); in sh_eth_open()
2422 ret = request_irq(ndev->irq, sh_eth_interrupt, in sh_eth_open()
2423 mdp->cd->irq_flags, ndev->name, ndev); in sh_eth_open()
2446 mdp->is_opened = 1; in sh_eth_open()
2451 free_irq(ndev->irq, ndev); in sh_eth_open()
2453 napi_disable(&mdp->napi); in sh_eth_open()
2454 pm_runtime_put_sync(&mdp->pdev->dev); in sh_eth_open()
2472 ndev->stats.tx_errors++; in sh_eth_tx_timeout()
2475 for (i = 0; i < mdp->num_rx_ring; i++) { in sh_eth_tx_timeout()
2476 rxdesc = &mdp->rx_ring[i]; in sh_eth_tx_timeout()
2477 rxdesc->status = cpu_to_le32(0); in sh_eth_tx_timeout()
2478 rxdesc->addr = cpu_to_le32(0xBADF00D0); in sh_eth_tx_timeout()
2479 dev_kfree_skb(mdp->rx_skbuff[i]); in sh_eth_tx_timeout()
2480 mdp->rx_skbuff[i] = NULL; in sh_eth_tx_timeout()
2482 for (i = 0; i < mdp->num_tx_ring; i++) { in sh_eth_tx_timeout()
2483 dev_kfree_skb(mdp->tx_skbuff[i]); in sh_eth_tx_timeout()
2484 mdp->tx_skbuff[i] = NULL; in sh_eth_tx_timeout()
2503 spin_lock_irqsave(&mdp->lock, flags); in sh_eth_start_xmit()
2504 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) { in sh_eth_start_xmit()
2508 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_start_xmit()
2512 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_start_xmit()
2517 entry = mdp->cur_tx % mdp->num_tx_ring; in sh_eth_start_xmit()
2518 mdp->tx_skbuff[entry] = skb; in sh_eth_start_xmit()
2519 txdesc = &mdp->tx_ring[entry]; in sh_eth_start_xmit()
2521 if (!mdp->cd->hw_swap) in sh_eth_start_xmit()
2522 sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2); in sh_eth_start_xmit()
2523 dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, skb->len, in sh_eth_start_xmit()
2525 if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) { in sh_eth_start_xmit()
2529 txdesc->addr = cpu_to_le32(dma_addr); in sh_eth_start_xmit()
2530 txdesc->len = cpu_to_le32(skb->len << 16); in sh_eth_start_xmit()
2533 if (entry >= mdp->num_tx_ring - 1) in sh_eth_start_xmit()
2534 txdesc->status |= cpu_to_le32(TD_TACT | TD_TDLE); in sh_eth_start_xmit()
2536 txdesc->status |= cpu_to_le32(TD_TACT); in sh_eth_start_xmit()
2539 mdp->cur_tx++; in sh_eth_start_xmit()
2541 if (!(sh_eth_read(ndev, EDTRR) & mdp->cd->edtrr_trns)) in sh_eth_start_xmit()
2542 sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR); in sh_eth_start_xmit()
2547 /* The statistics registers have write-clear behaviour, which means we
2549 * this by only clearing when we read a non-zero value, so we will
2567 if (mdp->cd->no_tx_cntrs) in sh_eth_get_stats()
2568 return &ndev->stats; in sh_eth_get_stats()
2570 if (!mdp->is_opened) in sh_eth_get_stats()
2571 return &ndev->stats; in sh_eth_get_stats()
2573 sh_eth_update_stat(ndev, &ndev->stats.tx_dropped, TROCR); in sh_eth_get_stats()
2574 sh_eth_update_stat(ndev, &ndev->stats.collisions, CDCR); in sh_eth_get_stats()
2575 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, LCCR); in sh_eth_get_stats()
2577 if (mdp->cd->cexcr) { in sh_eth_get_stats()
2578 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, in sh_eth_get_stats()
2580 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, in sh_eth_get_stats()
2583 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, in sh_eth_get_stats()
2587 return &ndev->stats; in sh_eth_get_stats()
2599 * ensure that interrupts won't be re-enabled. in sh_eth_close()
2601 mdp->irq_enabled = false; in sh_eth_close()
2602 synchronize_irq(ndev->irq); in sh_eth_close()
2603 napi_disable(&mdp->napi); in sh_eth_close()
2609 if (ndev->phydev) { in sh_eth_close()
2610 phy_stop(ndev->phydev); in sh_eth_close()
2611 phy_disconnect(ndev->phydev); in sh_eth_close()
2614 free_irq(ndev->irq, ndev); in sh_eth_close()
2619 mdp->is_opened = 0; in sh_eth_close()
2621 pm_runtime_put(&mdp->pdev->dev); in sh_eth_close()
2629 return -EBUSY; in sh_eth_change_mtu()
2631 ndev->mtu = new_mtu; in sh_eth_change_mtu()
2640 return 0x0f << (28 - ((entry % 8) * 4)); in sh_eth_tsu_get_post_mask()
2645 return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4)); in sh_eth_tsu_get_post_bit()
2683 timeout--; in sh_eth_tsu_busy()
2686 return -ETIMEDOUT; in sh_eth_tsu_busy()
2700 iowrite32(val, mdp->tsu_addr + offset); in sh_eth_tsu_write_entry()
2702 return -EBUSY; in sh_eth_tsu_write_entry()
2705 iowrite32(val, mdp->tsu_addr + offset + 4); in sh_eth_tsu_write_entry()
2707 return -EBUSY; in sh_eth_tsu_write_entry()
2717 val = ioread32(mdp->tsu_addr + offset); in sh_eth_tsu_read_entry()
2722 val = ioread32(mdp->tsu_addr + offset + 4); in sh_eth_tsu_read_entry()
2741 return -ENOENT; in sh_eth_tsu_find_entry()
2751 return (entry < 0) ? -ENOMEM : entry; in sh_eth_tsu_find_empty()
2763 ~(1 << (31 - entry)), TSU_TEN); in sh_eth_tsu_disable_cam_entry_table()
2778 if (!mdp->cd->tsu) in sh_eth_tsu_add_entry()
2786 return -ENOMEM; in sh_eth_tsu_add_entry()
2793 (1 << (31 - i)), TSU_TEN); in sh_eth_tsu_add_entry()
2807 if (!mdp->cd->tsu) in sh_eth_tsu_del_entry()
2830 if (!mdp->cd->tsu) in sh_eth_tsu_purge_all()
2853 if (!mdp->cd->tsu) in sh_eth_tsu_purge_mcast()
2871 spin_lock_irqsave(&mdp->lock, flags); in sh_eth_set_rx_mode()
2873 * Depending on ndev->flags, set PRM or clear MCT in sh_eth_set_rx_mode()
2876 if (mdp->cd->tsu) in sh_eth_set_rx_mode()
2879 if (!(ndev->flags & IFF_MULTICAST)) { in sh_eth_set_rx_mode()
2883 if (ndev->flags & IFF_ALLMULTI) { in sh_eth_set_rx_mode()
2889 if (ndev->flags & IFF_PROMISC) { in sh_eth_set_rx_mode()
2892 } else if (mdp->cd->tsu) { in sh_eth_set_rx_mode()
2895 if (mcast_all && is_multicast_ether_addr(ha->addr)) in sh_eth_set_rx_mode()
2898 if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) { in sh_eth_set_rx_mode()
2911 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_set_rx_mode()
2919 spin_lock_irqsave(&mdp->lock, flags); in sh_eth_set_rx_csum()
2930 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_set_rx_csum()
2936 netdev_features_t changed = ndev->features ^ features; in sh_eth_set_features()
2939 if (changed & NETIF_F_RXCSUM && mdp->cd->rx_csum) in sh_eth_set_features()
2942 ndev->features = features; in sh_eth_set_features()
2949 if (!mdp->port) in sh_eth_get_vtag_index()
2961 if (unlikely(!mdp->cd->tsu)) in sh_eth_vlan_rx_add_vid()
2962 return -EPERM; in sh_eth_vlan_rx_add_vid()
2968 mdp->vlan_num_ids++; in sh_eth_vlan_rx_add_vid()
2973 if (mdp->vlan_num_ids > 1) { in sh_eth_vlan_rx_add_vid()
2991 if (unlikely(!mdp->cd->tsu)) in sh_eth_vlan_rx_kill_vid()
2992 return -EPERM; in sh_eth_vlan_rx_kill_vid()
2998 mdp->vlan_num_ids--; in sh_eth_vlan_rx_kill_vid()
3007 if (!mdp->cd->dual_port) { in sh_eth_tsu_init()
3014 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */ in sh_eth_tsu_init()
3015 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */ in sh_eth_tsu_init()
3016 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */ in sh_eth_tsu_init()
3024 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */ in sh_eth_tsu_init()
3025 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */ in sh_eth_tsu_init()
3029 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */ in sh_eth_tsu_init()
3030 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */ in sh_eth_tsu_init()
3031 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */ in sh_eth_tsu_init()
3032 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */ in sh_eth_tsu_init()
3039 mdiobus_unregister(mdp->mii_bus); in sh_mdio_release()
3042 free_mdio_bitbang(mdp->mii_bus); in sh_mdio_release()
3051 pm_runtime_get_sync(bus->parent); in sh_mdiobb_read()
3053 pm_runtime_put(bus->parent); in sh_mdiobb_read()
3062 pm_runtime_get_sync(bus->parent); in sh_mdiobb_write()
3064 pm_runtime_put(bus->parent); in sh_mdiobb_write()
3075 struct platform_device *pdev = mdp->pdev; in sh_mdio_init()
3076 struct device *dev = &mdp->pdev->dev; in sh_mdio_init()
3081 return -ENOMEM; in sh_mdio_init()
3084 bitbang->addr = mdp->addr + mdp->reg_offset[PIR]; in sh_mdio_init()
3085 bitbang->set_gate = pd->set_mdio_gate; in sh_mdio_init()
3086 bitbang->ctrl.ops = &bb_ops; in sh_mdio_init()
3089 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); in sh_mdio_init()
3090 if (!mdp->mii_bus) in sh_mdio_init()
3091 return -ENOMEM; in sh_mdio_init()
3093 /* Wrap accessors with Runtime PM-aware ops */ in sh_mdio_init()
3094 mdp->mii_bus->read = sh_mdiobb_read; in sh_mdio_init()
3095 mdp->mii_bus->write = sh_mdiobb_write; in sh_mdio_init()
3098 mdp->mii_bus->name = "sh_mii"; in sh_mdio_init()
3099 mdp->mii_bus->parent = dev; in sh_mdio_init()
3100 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", in sh_mdio_init()
3101 pdev->name, pdev->id); in sh_mdio_init()
3104 if (pd->phy_irq > 0) in sh_mdio_init()
3105 mdp->mii_bus->irq[pd->phy] = pd->phy_irq; in sh_mdio_init()
3107 ret = of_mdiobus_register(mdp->mii_bus, dev->of_node); in sh_mdio_init()
3114 free_mdio_bitbang(mdp->mii_bus); in sh_mdio_init()
3173 struct device_node *np = dev->of_node; in sh_eth_parse_dt()
3185 pdata->phy_interface = interface; in sh_eth_parse_dt()
3187 of_get_mac_address(np, pdata->mac_addr); in sh_eth_parse_dt()
3189 pdata->no_ether_link = in sh_eth_parse_dt()
3190 of_property_read_bool(np, "renesas,no-ether-link"); in sh_eth_parse_dt()
3191 pdata->ether_link_active_low = in sh_eth_parse_dt()
3192 of_property_read_bool(np, "renesas,ether-link-active-low"); in sh_eth_parse_dt()
3198 { .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
3199 { .compatible = "renesas,ether-r8a7743", .data = &rcar_gen2_data },
3200 { .compatible = "renesas,ether-r8a7745", .data = &rcar_gen2_data },
3201 { .compatible = "renesas,ether-r8a7778", .data = &rcar_gen1_data },
3202 { .compatible = "renesas,ether-r8a7779", .data = &rcar_gen1_data },
3203 { .compatible = "renesas,ether-r8a7790", .data = &rcar_gen2_data },
3204 { .compatible = "renesas,ether-r8a7791", .data = &rcar_gen2_data },
3205 { .compatible = "renesas,ether-r8a7793", .data = &rcar_gen2_data },
3206 { .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data },
3207 { .compatible = "renesas,gether-r8a77980", .data = &r8a77980_data },
3208 { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
3209 { .compatible = "renesas,ether-r7s9210", .data = &r7s9210_data },
3210 { .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data },
3211 { .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data },
3225 struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev); in sh_eth_drv_probe()
3233 return -ENOMEM; in sh_eth_drv_probe()
3235 pm_runtime_enable(&pdev->dev); in sh_eth_drv_probe()
3236 pm_runtime_get_sync(&pdev->dev); in sh_eth_drv_probe()
3241 ndev->irq = ret; in sh_eth_drv_probe()
3243 SET_NETDEV_DEV(ndev, &pdev->dev); in sh_eth_drv_probe()
3246 mdp->num_tx_ring = TX_RING_SIZE; in sh_eth_drv_probe()
3247 mdp->num_rx_ring = RX_RING_SIZE; in sh_eth_drv_probe()
3248 mdp->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res); in sh_eth_drv_probe()
3249 if (IS_ERR(mdp->addr)) { in sh_eth_drv_probe()
3250 ret = PTR_ERR(mdp->addr); in sh_eth_drv_probe()
3254 ndev->base_addr = res->start; in sh_eth_drv_probe()
3256 spin_lock_init(&mdp->lock); in sh_eth_drv_probe()
3257 mdp->pdev = pdev; in sh_eth_drv_probe()
3259 if (pdev->dev.of_node) in sh_eth_drv_probe()
3260 pd = sh_eth_parse_dt(&pdev->dev); in sh_eth_drv_probe()
3262 dev_err(&pdev->dev, "no platform data\n"); in sh_eth_drv_probe()
3263 ret = -EINVAL; in sh_eth_drv_probe()
3268 mdp->phy_id = pd->phy; in sh_eth_drv_probe()
3269 mdp->phy_interface = pd->phy_interface; in sh_eth_drv_probe()
3270 mdp->no_ether_link = pd->no_ether_link; in sh_eth_drv_probe()
3271 mdp->ether_link_active_low = pd->ether_link_active_low; in sh_eth_drv_probe()
3275 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data; in sh_eth_drv_probe()
3277 mdp->cd = (struct sh_eth_cpu_data *)of_device_get_match_data(&pdev->dev); in sh_eth_drv_probe()
3279 mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type); in sh_eth_drv_probe()
3280 if (!mdp->reg_offset) { in sh_eth_drv_probe()
3281 dev_err(&pdev->dev, "Unknown register type (%d)\n", in sh_eth_drv_probe()
3282 mdp->cd->register_type); in sh_eth_drv_probe()
3283 ret = -EINVAL; in sh_eth_drv_probe()
3286 sh_eth_set_default_cpu_data(mdp->cd); in sh_eth_drv_probe()
3292 ndev->max_mtu = 2000 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); in sh_eth_drv_probe()
3293 ndev->min_mtu = ETH_MIN_MTU; in sh_eth_drv_probe()
3295 if (mdp->cd->rx_csum) { in sh_eth_drv_probe()
3296 ndev->features = NETIF_F_RXCSUM; in sh_eth_drv_probe()
3297 ndev->hw_features = NETIF_F_RXCSUM; in sh_eth_drv_probe()
3301 if (mdp->cd->tsu) in sh_eth_drv_probe()
3302 ndev->netdev_ops = &sh_eth_netdev_ops_tsu; in sh_eth_drv_probe()
3304 ndev->netdev_ops = &sh_eth_netdev_ops; in sh_eth_drv_probe()
3305 ndev->ethtool_ops = &sh_eth_ethtool_ops; in sh_eth_drv_probe()
3306 ndev->watchdog_timeo = TX_TIMEOUT; in sh_eth_drv_probe()
3309 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE; in sh_eth_drv_probe()
3312 read_mac_address(ndev, pd->mac_addr); in sh_eth_drv_probe()
3313 if (!is_valid_ether_addr(ndev->dev_addr)) { in sh_eth_drv_probe()
3314 dev_warn(&pdev->dev, in sh_eth_drv_probe()
3319 if (mdp->cd->tsu) { in sh_eth_drv_probe()
3320 int port = pdev->id < 0 ? 0 : pdev->id % 2; in sh_eth_drv_probe()
3325 dev_err(&pdev->dev, "no TSU resource\n"); in sh_eth_drv_probe()
3326 ret = -ENODEV; in sh_eth_drv_probe()
3333 !devm_request_mem_region(&pdev->dev, rtsu->start, in sh_eth_drv_probe()
3335 dev_name(&pdev->dev))) { in sh_eth_drv_probe()
3336 dev_err(&pdev->dev, "can't request TSU resource.\n"); in sh_eth_drv_probe()
3337 ret = -EBUSY; in sh_eth_drv_probe()
3341 mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start, in sh_eth_drv_probe()
3343 if (!mdp->tsu_addr) { in sh_eth_drv_probe()
3344 dev_err(&pdev->dev, "TSU region ioremap() failed.\n"); in sh_eth_drv_probe()
3345 ret = -ENOMEM; in sh_eth_drv_probe()
3348 mdp->port = port; in sh_eth_drv_probe()
3349 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; in sh_eth_drv_probe()
3353 if (mdp->cd->chip_reset) in sh_eth_drv_probe()
3354 mdp->cd->chip_reset(ndev); in sh_eth_drv_probe()
3361 if (mdp->cd->rmiimode) in sh_eth_drv_probe()
3367 dev_err_probe(&pdev->dev, ret, "MDIO init failed\n"); in sh_eth_drv_probe()
3371 netif_napi_add(ndev, &mdp->napi, sh_eth_poll); in sh_eth_drv_probe()
3378 if (mdp->cd->magic) in sh_eth_drv_probe()
3379 device_set_wakeup_capable(&pdev->dev, 1); in sh_eth_drv_probe()
3383 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); in sh_eth_drv_probe()
3385 pm_runtime_put(&pdev->dev); in sh_eth_drv_probe()
3391 netif_napi_del(&mdp->napi); in sh_eth_drv_probe()
3398 pm_runtime_put(&pdev->dev); in sh_eth_drv_probe()
3399 pm_runtime_disable(&pdev->dev); in sh_eth_drv_probe()
3409 netif_napi_del(&mdp->napi); in sh_eth_drv_remove()
3411 pm_runtime_disable(&pdev->dev); in sh_eth_drv_remove()
3424 synchronize_irq(ndev->irq); in sh_eth_wol_setup()
3425 napi_disable(&mdp->napi); in sh_eth_wol_setup()
3431 return enable_irq_wake(ndev->irq); in sh_eth_wol_setup()
3439 napi_enable(&mdp->napi); in sh_eth_wol_restore()
3454 return disable_irq_wake(ndev->irq); in sh_eth_wol_restore()
3468 if (mdp->wol_enabled) in sh_eth_suspend()
3485 if (mdp->wol_enabled) in sh_eth_resume()
3501 /* Runtime PM callback shared between ->runtime_suspend() in sh_eth_runtime_nop()
3502 * and ->runtime_resume(). Simply returns success. in sh_eth_runtime_nop()
3504 * This driver re-initializes all registers after in sh_eth_runtime_nop()
3521 { "sh7619-ether", (kernel_ulong_t)&sh7619_data },
3522 { "sh771x-ether", (kernel_ulong_t)&sh771x_data },
3523 { "sh7724-ether", (kernel_ulong_t)&sh7724_data },
3524 { "sh7734-gether", (kernel_ulong_t)&sh7734_data },
3525 { "sh7757-ether", (kernel_ulong_t)&sh7757_data },
3526 { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
3527 { "sh7763-gether", (kernel_ulong_t)&sh7763_data },