Lines Matching +full:sync +full:- +full:freq
1 // SPDX-License-Identifier: GPL-2.0+
247 #define NETSEC_RX_BUF_SIZE (PAGE_SIZE - NETSEC_RX_BUF_NON_DATA)
307 u32 freq; member
333 writel(val, priv->ioaddr + reg_addr); in netsec_write()
338 return readl(priv->ioaddr + reg_addr); in netsec_read()
346 static u32 netsec_clk_type(u32 freq) in netsec_clk_type() argument
348 if (freq < MHZ(35)) in netsec_clk_type()
350 if (freq < MHZ(60)) in netsec_clk_type()
352 if (freq < MHZ(100)) in netsec_clk_type()
354 if (freq < MHZ(150)) in netsec_clk_type()
356 if (freq < MHZ(250)) in netsec_clk_type()
366 while (--timeout && netsec_read(priv, addr) & mask) in netsec_wait_while_busy()
372 while (--timeout && netsec_read(priv, addr) & mask) in netsec_wait_while_busy()
378 netdev_WARN(priv->ndev, "%s: timeout\n", __func__); in netsec_wait_while_busy()
380 return -ETIMEDOUT; in netsec_wait_while_busy()
417 } while (--timeout && (data & mask)); in netsec_mac_wait_while_busy()
430 } while (--timeout && (data & mask)); in netsec_mac_wait_while_busy()
435 netdev_WARN(priv->ndev, "%s: timeout\n", __func__); in netsec_mac_wait_while_busy()
437 return -ETIMEDOUT; in netsec_mac_wait_while_busy()
442 struct phy_device *phydev = priv->ndev->phydev; in netsec_mac_update_to_phy_state()
445 value = phydev->duplex ? NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON : in netsec_mac_update_to_phy_state()
448 if (phydev->speed != SPEED_1000) in netsec_mac_update_to_phy_state()
451 if (priv->phy_interface != PHY_INTERFACE_MODE_GMII && in netsec_mac_update_to_phy_state()
452 phydev->speed == SPEED_100) in netsec_mac_update_to_phy_state()
457 if (phy_interface_mode_is_rgmii(priv->phy_interface)) in netsec_mac_update_to_phy_state()
461 return -ETIMEDOUT; in netsec_mac_update_to_phy_state()
472 struct netsec_priv *priv = bus->priv; in netsec_phy_write()
475 return -ETIMEDOUT; in netsec_phy_write()
480 (netsec_clk_type(priv->freq) << in netsec_phy_write()
482 return -ETIMEDOUT; in netsec_phy_write()
501 struct netsec_priv *priv = bus->priv; in netsec_phy_read()
508 (netsec_clk_type(priv->freq) << in netsec_phy_read()
510 return -ETIMEDOUT; in netsec_phy_read()
529 strscpy(info->driver, "netsec", sizeof(info->driver)); in netsec_et_get_drvinfo()
530 strscpy(info->bus_info, dev_name(net_device->dev.parent), in netsec_et_get_drvinfo()
531 sizeof(info->bus_info)); in netsec_et_get_drvinfo()
541 *et_coalesce = priv->et_coalesce; in netsec_et_get_coalesce()
553 priv->et_coalesce = *et_coalesce; in netsec_et_set_coalesce()
555 if (priv->et_coalesce.tx_coalesce_usecs < 50) in netsec_et_set_coalesce()
556 priv->et_coalesce.tx_coalesce_usecs = 50; in netsec_et_set_coalesce()
557 if (priv->et_coalesce.tx_max_coalesced_frames < 1) in netsec_et_set_coalesce()
558 priv->et_coalesce.tx_max_coalesced_frames = 1; in netsec_et_set_coalesce()
561 priv->et_coalesce.tx_max_coalesced_frames); in netsec_et_set_coalesce()
563 priv->et_coalesce.tx_coalesce_usecs); in netsec_et_set_coalesce()
567 if (priv->et_coalesce.rx_coalesce_usecs < 50) in netsec_et_set_coalesce()
568 priv->et_coalesce.rx_coalesce_usecs = 50; in netsec_et_set_coalesce()
569 if (priv->et_coalesce.rx_max_coalesced_frames < 1) in netsec_et_set_coalesce()
570 priv->et_coalesce.rx_max_coalesced_frames = 1; in netsec_et_set_coalesce()
573 priv->et_coalesce.rx_max_coalesced_frames); in netsec_et_set_coalesce()
575 priv->et_coalesce.rx_coalesce_usecs); in netsec_et_set_coalesce()
586 return priv->msg_enable; in netsec_et_get_msglevel()
593 priv->msg_enable = datum; in netsec_et_set_msglevel()
616 struct netsec_de *de = dring->vaddr + DESC_SZ * idx; in netsec_set_rx_de()
621 if (idx == DESC_NUM - 1) in netsec_set_rx_de()
624 de->data_buf_addr_up = upper_32_bits(desc->dma_addr); in netsec_set_rx_de()
625 de->data_buf_addr_lw = lower_32_bits(desc->dma_addr); in netsec_set_rx_de()
626 de->buf_len_info = desc->len; in netsec_set_rx_de()
627 de->attr = attr; in netsec_set_rx_de()
630 dring->desc[idx].dma_addr = desc->dma_addr; in netsec_set_rx_de()
631 dring->desc[idx].addr = desc->addr; in netsec_set_rx_de()
632 dring->desc[idx].len = desc->len; in netsec_set_rx_de()
637 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX]; in netsec_clean_tx_dring()
640 int tail = dring->tail; in netsec_clean_tx_dring()
644 spin_lock(&dring->lock); in netsec_clean_tx_dring()
648 entry = dring->vaddr + DESC_SZ * tail; in netsec_clean_tx_dring()
652 while (!(entry->attr & (1U << NETSEC_TX_SHIFT_OWN_FIELD)) && in netsec_clean_tx_dring()
657 desc = &dring->desc[tail]; in netsec_clean_tx_dring()
658 eop = (entry->attr >> NETSEC_TX_LAST) & 1; in netsec_clean_tx_dring()
664 if (desc->buf_type != TYPE_NETSEC_XDP_TX) in netsec_clean_tx_dring()
665 dma_unmap_single(priv->dev, desc->dma_addr, desc->len, in netsec_clean_tx_dring()
671 if (desc->buf_type == TYPE_NETSEC_SKB) { in netsec_clean_tx_dring()
672 bytes += desc->skb->len; in netsec_clean_tx_dring()
673 dev_kfree_skb(desc->skb); in netsec_clean_tx_dring()
675 bytes += desc->xdpf->len; in netsec_clean_tx_dring()
676 if (desc->buf_type == TYPE_NETSEC_XDP_TX) in netsec_clean_tx_dring()
677 xdp_return_frame_rx_napi(desc->xdpf); in netsec_clean_tx_dring()
679 xdp_return_frame_bulk(desc->xdpf, &bq); in netsec_clean_tx_dring()
687 /* entry->attr is not going to be accessed by the NIC until in netsec_clean_tx_dring()
690 entry->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD; in netsec_clean_tx_dring()
692 dring->tail = (tail + 1) % DESC_NUM; in netsec_clean_tx_dring()
694 tail = dring->tail; in netsec_clean_tx_dring()
695 entry = dring->vaddr + DESC_SZ * tail; in netsec_clean_tx_dring()
702 spin_unlock(&dring->lock); in netsec_clean_tx_dring()
710 priv->ndev->stats.tx_packets += cnt; in netsec_clean_tx_dring()
711 priv->ndev->stats.tx_bytes += bytes; in netsec_clean_tx_dring()
713 netdev_completed_queue(priv->ndev, cnt, bytes); in netsec_clean_tx_dring()
720 struct net_device *ndev = priv->ndev; in netsec_process_tx()
739 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; in netsec_alloc_rx_data()
742 page = page_pool_dev_alloc_pages(dring->page_pool); in netsec_alloc_rx_data()
746 /* We allocate the same buffer length for XDP and non-XDP cases. in netsec_alloc_rx_data()
751 /* Make sure the incoming payload fits in the page for XDP and non-XDP in netsec_alloc_rx_data()
761 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; in netsec_rx_fill()
765 netsec_set_rx_de(priv, dring, idx, &dring->desc[idx]); in netsec_rx_fill()
769 num--; in netsec_rx_fill()
794 int idx = dring->head; in netsec_set_tx_de()
798 de = dring->vaddr + (DESC_SZ * idx); in netsec_set_tx_de()
805 (tx_ctrl->cksum_offload_flag << NETSEC_TX_SHIFT_CO) | in netsec_set_tx_de()
806 (tx_ctrl->tcp_seg_offload_flag << NETSEC_TX_SHIFT_SO) | in netsec_set_tx_de()
808 if (idx == DESC_NUM - 1) in netsec_set_tx_de()
811 de->data_buf_addr_up = upper_32_bits(desc->dma_addr); in netsec_set_tx_de()
812 de->data_buf_addr_lw = lower_32_bits(desc->dma_addr); in netsec_set_tx_de()
813 de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len; in netsec_set_tx_de()
814 de->attr = attr; in netsec_set_tx_de()
816 dring->desc[idx] = *desc; in netsec_set_tx_de()
817 if (desc->buf_type == TYPE_NETSEC_SKB) in netsec_set_tx_de()
818 dring->desc[idx].skb = buf; in netsec_set_tx_de()
819 else if (desc->buf_type == TYPE_NETSEC_XDP_TX || in netsec_set_tx_de()
820 desc->buf_type == TYPE_NETSEC_XDP_NDO) in netsec_set_tx_de()
821 dring->desc[idx].xdpf = buf; in netsec_set_tx_de()
824 dring->head = (dring->head + 1) % DESC_NUM; in netsec_set_tx_de()
832 struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX]; in netsec_xdp_queue_one()
833 struct page *page = virt_to_page(xdpf->data); in netsec_xdp_queue_one()
839 if (tx_ring->head >= tx_ring->tail) in netsec_xdp_queue_one()
840 filled = tx_ring->head - tx_ring->tail; in netsec_xdp_queue_one()
842 filled = tx_ring->head + DESC_NUM - tx_ring->tail; in netsec_xdp_queue_one()
844 if (DESC_NUM - filled <= 1) in netsec_xdp_queue_one()
851 dma_handle = dma_map_single(priv->dev, xdpf->data, xdpf->len, in netsec_xdp_queue_one()
853 if (dma_mapping_error(priv->dev, dma_handle)) in netsec_xdp_queue_one()
858 * just sync and send it in netsec_xdp_queue_one()
861 &priv->desc_ring[NETSEC_RING_RX]; in netsec_xdp_queue_one()
863 page_pool_get_dma_dir(rx_ring->page_pool); in netsec_xdp_queue_one()
865 dma_handle = page_pool_get_dma_addr(page) + xdpf->headroom + in netsec_xdp_queue_one()
867 dma_sync_single_for_device(priv->dev, dma_handle, xdpf->len, in netsec_xdp_queue_one()
873 tx_desc.addr = xdpf->data; in netsec_xdp_queue_one()
874 tx_desc.len = xdpf->len; in netsec_xdp_queue_one()
876 netdev_sent_queue(priv->ndev, xdpf->len); in netsec_xdp_queue_one()
884 struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX]; in netsec_xdp_xmit_back()
891 spin_lock(&tx_ring->lock); in netsec_xdp_xmit_back()
893 spin_unlock(&tx_ring->lock); in netsec_xdp_xmit_back()
901 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; in netsec_run_xdp()
902 unsigned int sync, len = xdp->data_end - xdp->data; in netsec_run_xdp() local
910 /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ in netsec_run_xdp()
911 sync = xdp->data_end - xdp->data_hard_start - NETSEC_RXBUF_HEADROOM; in netsec_run_xdp()
912 sync = max(sync, len); in netsec_run_xdp()
921 page = virt_to_head_page(xdp->data); in netsec_run_xdp()
922 page_pool_put_page(dring->page_pool, page, sync, true); in netsec_run_xdp()
926 err = xdp_do_redirect(priv->ndev, xdp, prog); in netsec_run_xdp()
931 page = virt_to_head_page(xdp->data); in netsec_run_xdp()
932 page_pool_put_page(dring->page_pool, page, sync, true); in netsec_run_xdp()
936 bpf_warn_invalid_xdp_action(priv->ndev, prog, act); in netsec_run_xdp()
939 trace_xdp_exception(priv->ndev, prog, act); in netsec_run_xdp()
943 page = virt_to_head_page(xdp->data); in netsec_run_xdp()
944 page_pool_put_page(dring->page_pool, page, sync, true); in netsec_run_xdp()
953 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; in netsec_process_rx()
954 struct net_device *ndev = priv->ndev; in netsec_process_rx()
963 xdp_init_buff(&xdp, PAGE_SIZE, &dring->xdp_rxq); in netsec_process_rx()
965 xdp_prog = READ_ONCE(priv->xdp_prog); in netsec_process_rx()
966 dma_dir = page_pool_get_dma_dir(dring->page_pool); in netsec_process_rx()
969 u16 idx = dring->tail; in netsec_process_rx()
970 struct netsec_de *de = dring->vaddr + (DESC_SZ * idx); in netsec_process_rx()
971 struct netsec_desc *desc = &dring->desc[idx]; in netsec_process_rx()
972 struct page *page = virt_to_page(desc->addr); in netsec_process_rx()
979 if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) { in netsec_process_rx()
992 pkt_len = de->buf_len_info >> 16; in netsec_process_rx()
993 rx_info.err_code = (de->attr >> NETSEC_RX_PKT_ERR_FIELD) & in netsec_process_rx()
995 rx_info.err_flag = (de->attr >> NETSEC_RX_PKT_ER_FIELD) & 1; in netsec_process_rx()
997 netif_err(priv, drv, priv->ndev, in netsec_process_rx()
1000 ndev->stats.rx_dropped++; in netsec_process_rx()
1001 dring->tail = (dring->tail + 1) % DESC_NUM; in netsec_process_rx()
1007 (de->attr >> NETSEC_RX_PKT_CO_FIELD) & 3; in netsec_process_rx()
1017 dma_sync_single_for_cpu(priv->dev, desc->dma_addr, pkt_len, in netsec_process_rx()
1019 prefetch(desc->addr); in netsec_process_rx()
1021 xdp_prepare_buff(&xdp, desc->addr, NETSEC_RXBUF_HEADROOM, in netsec_process_rx()
1033 skb = build_skb(desc->addr, desc->len + NETSEC_RX_BUF_NON_DATA); in netsec_process_rx()
1041 page_pool_put_page(dring->page_pool, page, pkt_len, in netsec_process_rx()
1043 netif_err(priv, drv, priv->ndev, in netsec_process_rx()
1049 skb_reserve(skb, xdp.data - xdp.data_hard_start); in netsec_process_rx()
1050 skb_put(skb, xdp.data_end - xdp.data); in netsec_process_rx()
1051 skb->protocol = eth_type_trans(skb, priv->ndev); in netsec_process_rx()
1053 if (priv->rx_cksum_offload_flag && in netsec_process_rx()
1055 skb->ip_summed = CHECKSUM_UNNECESSARY; in netsec_process_rx()
1059 napi_gro_receive(&priv->napi, skb); in netsec_process_rx()
1061 ndev->stats.rx_packets++; in netsec_process_rx()
1062 ndev->stats.rx_bytes += xdp.data_end - xdp.data; in netsec_process_rx()
1066 desc->len = desc_len; in netsec_process_rx()
1067 desc->dma_addr = dma_handle; in netsec_process_rx()
1068 desc->addr = buf_addr; in netsec_process_rx()
1071 dring->tail = (dring->tail + 1) % DESC_NUM; in netsec_process_rx()
1091 spin_lock_irqsave(&priv->reglock, flags); in netsec_napi_poll()
1094 spin_unlock_irqrestore(&priv->reglock, flags); in netsec_napi_poll()
1105 if (dring->head >= dring->tail) in netsec_desc_used()
1106 used = dring->head - dring->tail; in netsec_desc_used()
1108 used = dring->head + DESC_NUM - dring->tail; in netsec_desc_used()
1115 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX]; in netsec_check_stop_tx()
1118 if (DESC_NUM - used < 2) { in netsec_check_stop_tx()
1119 netif_stop_queue(priv->ndev); in netsec_check_stop_tx()
1127 if (DESC_NUM - used < 2) in netsec_check_stop_tx()
1130 netif_wake_queue(priv->ndev); in netsec_check_stop_tx()
1140 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX]; in netsec_netdev_start_xmit()
1146 spin_lock_bh(&dring->lock); in netsec_netdev_start_xmit()
1149 spin_unlock_bh(&dring->lock); in netsec_netdev_start_xmit()
1151 dev_name(priv->dev), ndev->name); in netsec_netdev_start_xmit()
1155 if (skb->ip_summed == CHECKSUM_PARTIAL) in netsec_netdev_start_xmit()
1159 tso_seg_len = skb_shinfo(skb)->gso_size; in netsec_netdev_start_xmit()
1162 if (skb->protocol == htons(ETH_P_IP)) { in netsec_netdev_start_xmit()
1163 ip_hdr(skb)->tot_len = 0; in netsec_netdev_start_xmit()
1164 tcp_hdr(skb)->check = in netsec_netdev_start_xmit()
1165 ~tcp_v4_check(0, ip_hdr(skb)->saddr, in netsec_netdev_start_xmit()
1166 ip_hdr(skb)->daddr, 0); in netsec_netdev_start_xmit()
1175 tx_desc.dma_addr = dma_map_single(priv->dev, skb->data, in netsec_netdev_start_xmit()
1177 if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) { in netsec_netdev_start_xmit()
1178 spin_unlock_bh(&dring->lock); in netsec_netdev_start_xmit()
1179 netif_err(priv, drv, priv->ndev, in netsec_netdev_start_xmit()
1181 ndev->stats.tx_dropped++; in netsec_netdev_start_xmit()
1185 tx_desc.addr = skb->data; in netsec_netdev_start_xmit()
1190 netdev_sent_queue(priv->ndev, skb->len); in netsec_netdev_start_xmit()
1193 spin_unlock_bh(&dring->lock); in netsec_netdev_start_xmit()
1201 struct netsec_desc_ring *dring = &priv->desc_ring[id]; in netsec_uninit_pkt_dring()
1205 if (!dring->vaddr || !dring->desc) in netsec_uninit_pkt_dring()
1208 desc = &dring->desc[idx]; in netsec_uninit_pkt_dring()
1209 if (!desc->addr) in netsec_uninit_pkt_dring()
1213 struct page *page = virt_to_page(desc->addr); in netsec_uninit_pkt_dring()
1215 page_pool_put_full_page(dring->page_pool, page, false); in netsec_uninit_pkt_dring()
1217 dma_unmap_single(priv->dev, desc->dma_addr, desc->len, in netsec_uninit_pkt_dring()
1219 dev_kfree_skb(desc->skb); in netsec_uninit_pkt_dring()
1225 if (xdp_rxq_info_is_reg(&dring->xdp_rxq)) in netsec_uninit_pkt_dring()
1226 xdp_rxq_info_unreg(&dring->xdp_rxq); in netsec_uninit_pkt_dring()
1227 page_pool_destroy(dring->page_pool); in netsec_uninit_pkt_dring()
1230 memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM); in netsec_uninit_pkt_dring()
1231 memset(dring->vaddr, 0, DESC_SZ * DESC_NUM); in netsec_uninit_pkt_dring()
1233 dring->head = 0; in netsec_uninit_pkt_dring()
1234 dring->tail = 0; in netsec_uninit_pkt_dring()
1237 netdev_reset_queue(priv->ndev); in netsec_uninit_pkt_dring()
1242 struct netsec_desc_ring *dring = &priv->desc_ring[id]; in netsec_free_dring()
1244 if (dring->vaddr) { in netsec_free_dring()
1245 dma_free_coherent(priv->dev, DESC_SZ * DESC_NUM, in netsec_free_dring()
1246 dring->vaddr, dring->desc_dma); in netsec_free_dring()
1247 dring->vaddr = NULL; in netsec_free_dring()
1250 kfree(dring->desc); in netsec_free_dring()
1251 dring->desc = NULL; in netsec_free_dring()
1256 struct netsec_desc_ring *dring = &priv->desc_ring[id]; in netsec_alloc_dring()
1258 dring->vaddr = dma_alloc_coherent(priv->dev, DESC_SZ * DESC_NUM, in netsec_alloc_dring()
1259 &dring->desc_dma, GFP_KERNEL); in netsec_alloc_dring()
1260 if (!dring->vaddr) in netsec_alloc_dring()
1263 dring->desc = kcalloc(DESC_NUM, sizeof(*dring->desc), GFP_KERNEL); in netsec_alloc_dring()
1264 if (!dring->desc) in netsec_alloc_dring()
1271 return -ENOMEM; in netsec_alloc_dring()
1276 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX]; in netsec_setup_tx_dring()
1282 de = dring->vaddr + (DESC_SZ * i); in netsec_setup_tx_dring()
1283 /* de->attr is not going to be accessed by the NIC in netsec_setup_tx_dring()
1287 de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD; in netsec_setup_tx_dring()
1293 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; in netsec_setup_rx_dring()
1294 struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog); in netsec_setup_rx_dring()
1301 .dev = priv->dev, in netsec_setup_rx_dring()
1308 dring->page_pool = page_pool_create(&pp_params); in netsec_setup_rx_dring()
1309 if (IS_ERR(dring->page_pool)) { in netsec_setup_rx_dring()
1310 err = PTR_ERR(dring->page_pool); in netsec_setup_rx_dring()
1311 dring->page_pool = NULL; in netsec_setup_rx_dring()
1315 err = xdp_rxq_info_reg(&dring->xdp_rxq, priv->ndev, 0, priv->napi.napi_id); in netsec_setup_rx_dring()
1319 err = xdp_rxq_info_reg_mem_model(&dring->xdp_rxq, MEM_TYPE_PAGE_POOL, in netsec_setup_rx_dring()
1320 dring->page_pool); in netsec_setup_rx_dring()
1325 struct netsec_desc *desc = &dring->desc[i]; in netsec_setup_rx_dring()
1333 err = -ENOMEM; in netsec_setup_rx_dring()
1336 desc->dma_addr = dma_handle; in netsec_setup_rx_dring()
1337 desc->addr = buf; in netsec_setup_rx_dring()
1338 desc->len = len; in netsec_setup_rx_dring()
1359 return -ENOMEM; in netsec_netdev_load_ucode_region()
1373 addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_H); in netsec_netdev_load_microcode()
1374 addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_L); in netsec_netdev_load_microcode()
1375 size = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_SIZE); in netsec_netdev_load_microcode()
1381 addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_H); in netsec_netdev_load_microcode()
1382 addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_L); in netsec_netdev_load_microcode()
1383 size = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_SIZE); in netsec_netdev_load_microcode()
1390 addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_ADDRESS); in netsec_netdev_load_microcode()
1391 size = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_SIZE); in netsec_netdev_load_microcode()
1431 upper_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma)); in netsec_reset_hardware()
1433 lower_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma)); in netsec_reset_hardware()
1436 upper_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma)); in netsec_reset_hardware()
1438 lower_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma)); in netsec_reset_hardware()
1449 netif_err(priv, probe, priv->ndev, in netsec_reset_hardware()
1457 netsec_write(priv, NETSEC_REG_DMA_TMR_CTRL, priv->freq / 1000000 - 1); in netsec_reset_hardware()
1464 netif_err(priv, probe, priv->ndev, in netsec_reset_hardware()
1466 return -ENXIO; in netsec_reset_hardware()
1472 if (priv->ndev->mtu > ETH_DATA_LEN) in netsec_reset_hardware()
1494 struct phy_device *phydev = priv->ndev->phydev; in netsec_start_gmac()
1498 if (phydev->speed != SPEED_1000) in netsec_start_gmac()
1503 return -ETIMEDOUT; in netsec_start_gmac()
1506 return -ETIMEDOUT; in netsec_start_gmac()
1515 return -EAGAIN; in netsec_start_gmac()
1519 return -ETIMEDOUT; in netsec_start_gmac()
1523 return -ETIMEDOUT; in netsec_start_gmac()
1527 return -ETIMEDOUT; in netsec_start_gmac()
1530 return -ETIMEDOUT; in netsec_start_gmac()
1533 return -ETIMEDOUT; in netsec_start_gmac()
1535 return -ETIMEDOUT; in netsec_start_gmac()
1551 netsec_et_set_coalesce(priv->ndev, &priv->et_coalesce, NULL, NULL); in netsec_start_gmac()
1554 return -ETIMEDOUT; in netsec_start_gmac()
1581 if (ndev->phydev->link) in netsec_phy_adjust_link()
1586 phy_print_status(ndev->phydev); in netsec_phy_adjust_link()
1605 spin_lock_irqsave(&priv->reglock, flags); in netsec_irq_handler()
1607 spin_unlock_irqrestore(&priv->reglock, flags); in netsec_irq_handler()
1609 napi_schedule(&priv->napi); in netsec_irq_handler()
1619 pm_runtime_get_sync(priv->dev); in netsec_netdev_open()
1624 netif_err(priv, probe, priv->ndev, in netsec_netdev_open()
1629 ret = request_irq(priv->ndev->irq, netsec_irq_handler, in netsec_netdev_open()
1632 netif_err(priv, drv, priv->ndev, "request_irq failed\n"); in netsec_netdev_open()
1636 if (dev_of_node(priv->dev)) { in netsec_netdev_open()
1637 if (!of_phy_connect(priv->ndev, priv->phy_np, in netsec_netdev_open()
1639 priv->phy_interface)) { in netsec_netdev_open()
1640 netif_err(priv, link, priv->ndev, "missing PHY\n"); in netsec_netdev_open()
1641 ret = -ENODEV; in netsec_netdev_open()
1645 ret = phy_connect_direct(priv->ndev, priv->phydev, in netsec_netdev_open()
1647 priv->phy_interface); in netsec_netdev_open()
1649 netif_err(priv, link, priv->ndev, in netsec_netdev_open()
1655 phy_start(ndev->phydev); in netsec_netdev_open()
1658 napi_enable(&priv->napi); in netsec_netdev_open()
1666 free_irq(priv->ndev->irq, priv); in netsec_netdev_open()
1670 pm_runtime_put_sync(priv->dev); in netsec_netdev_open()
1679 netif_stop_queue(priv->ndev); in netsec_netdev_stop()
1682 napi_disable(&priv->napi); in netsec_netdev_stop()
1687 free_irq(priv->ndev->irq, priv); in netsec_netdev_stop()
1692 phy_stop(ndev->phydev); in netsec_netdev_stop()
1693 phy_disconnect(ndev->phydev); in netsec_netdev_stop()
1697 pm_runtime_put_sync(priv->dev); in netsec_netdev_stop()
1719 data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR); in netsec_netdev_init()
1720 netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, in netsec_netdev_init()
1728 netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data); in netsec_netdev_init()
1730 spin_lock_init(&priv->desc_ring[NETSEC_RING_TX].lock); in netsec_netdev_init()
1731 spin_lock_init(&priv->desc_ring[NETSEC_RING_RX].lock); in netsec_netdev_init()
1754 priv->rx_cksum_offload_flag = !!(features & NETIF_F_RXCSUM); in netsec_netdev_set_features()
1763 struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX]; in netsec_xdp_xmit()
1767 return -EINVAL; in netsec_xdp_xmit()
1769 spin_lock(&tx_ring->lock); in netsec_xdp_xmit()
1778 tx_ring->xdp_xmit++; in netsec_xdp_xmit()
1781 spin_unlock(&tx_ring->lock); in netsec_xdp_xmit()
1784 netsec_xdp_ring_tx_db(priv, tx_ring->xdp_xmit); in netsec_xdp_xmit()
1785 tx_ring->xdp_xmit = 0; in netsec_xdp_xmit()
1794 struct net_device *dev = priv->ndev; in netsec_xdp_setup()
1798 if (prog && dev->mtu > 1500) { in netsec_xdp_setup()
1800 return -EOPNOTSUPP; in netsec_xdp_setup()
1807 old_prog = xchg(&priv->xdp_prog, prog); in netsec_xdp_setup()
1821 switch (xdp->command) { in netsec_xdp()
1823 return netsec_xdp_setup(priv, xdp->prog, xdp->extack); in netsec_xdp()
1825 return -EINVAL; in netsec_xdp()
1848 err = of_get_phy_mode(pdev->dev.of_node, &priv->phy_interface); in netsec_of_probe()
1850 dev_err(&pdev->dev, "missing required property 'phy-mode'\n"); in netsec_of_probe()
1854 priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); in netsec_of_probe()
1855 if (!priv->phy_np) { in netsec_of_probe()
1856 dev_err(&pdev->dev, "missing required property 'phy-handle'\n"); in netsec_of_probe()
1857 return -EINVAL; in netsec_of_probe()
1860 *phy_addr = of_mdio_parse_addr(&pdev->dev, priv->phy_np); in netsec_of_probe()
1862 priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */ in netsec_of_probe()
1863 if (IS_ERR(priv->clk)) in netsec_of_probe()
1864 return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk), in netsec_of_probe()
1866 priv->freq = clk_get_rate(priv->clk); in netsec_of_probe()
1877 return -ENODEV; in netsec_acpi_probe()
1882 * PHY correctly but passes the wrong mode string in the phy-mode in netsec_acpi_probe()
1885 priv->phy_interface = PHY_INTERFACE_MODE_NA; in netsec_acpi_probe()
1887 ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr); in netsec_acpi_probe()
1889 return dev_err_probe(&pdev->dev, ret, in netsec_acpi_probe()
1890 "missing required property 'phy-channel'\n"); in netsec_acpi_probe()
1892 ret = device_property_read_u32(&pdev->dev, in netsec_acpi_probe()
1893 "socionext,phy-clock-frequency", in netsec_acpi_probe()
1894 &priv->freq); in netsec_acpi_probe()
1896 return dev_err_probe(&pdev->dev, ret, in netsec_acpi_probe()
1897 "missing required property 'socionext,phy-clock-frequency'\n"); in netsec_acpi_probe()
1903 struct phy_device *phydev = priv->phydev; in netsec_unregister_mdio()
1905 if (!dev_of_node(priv->dev) && phydev) { in netsec_unregister_mdio()
1910 mdiobus_unregister(priv->mii_bus); in netsec_unregister_mdio()
1918 bus = devm_mdiobus_alloc(priv->dev); in netsec_register_mdio()
1920 return -ENOMEM; in netsec_register_mdio()
1922 snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(priv->dev)); in netsec_register_mdio()
1923 bus->priv = priv; in netsec_register_mdio()
1924 bus->name = "SNI NETSEC MDIO"; in netsec_register_mdio()
1925 bus->read = netsec_phy_read; in netsec_register_mdio()
1926 bus->write = netsec_phy_write; in netsec_register_mdio()
1927 bus->parent = priv->dev; in netsec_register_mdio()
1928 priv->mii_bus = bus; in netsec_register_mdio()
1930 if (dev_of_node(priv->dev)) { in netsec_register_mdio()
1931 struct device_node *mdio_node, *parent = dev_of_node(priv->dev); in netsec_register_mdio()
1940 dev_info(priv->dev, "Upgrade f/w for mdio subnode!\n"); in netsec_register_mdio()
1947 dev_err(priv->dev, "mdiobus register err(%d)\n", ret); in netsec_register_mdio()
1952 bus->phy_mask = ~0; in netsec_register_mdio()
1955 dev_err(priv->dev, "mdiobus register err(%d)\n", ret); in netsec_register_mdio()
1959 priv->phydev = get_phy_device(bus, phy_addr, false); in netsec_register_mdio()
1960 if (IS_ERR(priv->phydev)) { in netsec_register_mdio()
1961 ret = PTR_ERR(priv->phydev); in netsec_register_mdio()
1962 dev_err(priv->dev, "get_phy_device err(%d)\n", ret); in netsec_register_mdio()
1963 priv->phydev = NULL; in netsec_register_mdio()
1965 return -ENODEV; in netsec_register_mdio()
1968 ret = phy_device_register(priv->phydev); in netsec_register_mdio()
1970 phy_device_free(priv->phydev); in netsec_register_mdio()
1972 dev_err(priv->dev, in netsec_register_mdio()
1991 dev_err(&pdev->dev, "No MMIO resource found.\n"); in netsec_probe()
1992 return -ENODEV; in netsec_probe()
1997 dev_info(&pdev->dev, "No EEPROM resource found.\n"); in netsec_probe()
1998 return -ENODEV; in netsec_probe()
2007 return -ENOMEM; in netsec_probe()
2011 spin_lock_init(&priv->reglock); in netsec_probe()
2012 SET_NETDEV_DEV(ndev, &pdev->dev); in netsec_probe()
2014 ndev->irq = irq; in netsec_probe()
2015 priv->dev = &pdev->dev; in netsec_probe()
2016 priv->ndev = ndev; in netsec_probe()
2018 priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV | in netsec_probe()
2021 priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start, in netsec_probe()
2023 if (!priv->ioaddr) { in netsec_probe()
2024 dev_err(&pdev->dev, "devm_ioremap() failed\n"); in netsec_probe()
2025 ret = -ENXIO; in netsec_probe()
2029 priv->eeprom_base = devm_ioremap(&pdev->dev, eeprom_res->start, in netsec_probe()
2031 if (!priv->eeprom_base) { in netsec_probe()
2032 dev_err(&pdev->dev, "devm_ioremap() failed for EEPROM\n"); in netsec_probe()
2033 ret = -ENXIO; in netsec_probe()
2037 ret = device_get_ethdev_address(&pdev->dev, ndev); in netsec_probe()
2038 if (ret && priv->eeprom_base) { in netsec_probe()
2039 void __iomem *macp = priv->eeprom_base + in netsec_probe()
2052 if (!is_valid_ether_addr(ndev->dev_addr)) { in netsec_probe()
2053 dev_warn(&pdev->dev, "No MAC address found, using random\n"); in netsec_probe()
2057 if (dev_of_node(&pdev->dev)) in netsec_probe()
2064 priv->phy_addr = phy_addr; in netsec_probe()
2066 if (!priv->freq) { in netsec_probe()
2067 dev_err(&pdev->dev, "missing PHY reference clock frequency\n"); in netsec_probe()
2068 ret = -ENODEV; in netsec_probe()
2073 priv->et_coalesce.rx_coalesce_usecs = 500; in netsec_probe()
2074 priv->et_coalesce.rx_max_coalesced_frames = 8; in netsec_probe()
2075 priv->et_coalesce.tx_coalesce_usecs = 500; in netsec_probe()
2076 priv->et_coalesce.tx_max_coalesced_frames = 8; in netsec_probe()
2078 ret = device_property_read_u32(&pdev->dev, "max-frame-size", in netsec_probe()
2079 &ndev->max_mtu); in netsec_probe()
2081 ndev->max_mtu = ETH_DATA_LEN; in netsec_probe()
2084 pm_runtime_enable(&pdev->dev); in netsec_probe()
2085 pm_runtime_get_sync(&pdev->dev); in netsec_probe()
2091 ret = -ENODEV; in netsec_probe()
2095 dev_info(&pdev->dev, "hardware revision %d.%d\n", in netsec_probe()
2098 netif_napi_add(ndev, &priv->napi, netsec_napi_poll); in netsec_probe()
2100 ndev->netdev_ops = &netsec_netdev_ops; in netsec_probe()
2101 ndev->ethtool_ops = &netsec_ethtool_ops; in netsec_probe()
2103 ndev->features |= NETIF_F_HIGHDMA | NETIF_F_RXCSUM | NETIF_F_GSO | in netsec_probe()
2105 ndev->hw_features = ndev->features; in netsec_probe()
2107 priv->rx_cksum_offload_flag = true; in netsec_probe()
2113 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40))) in netsec_probe()
2114 dev_warn(&pdev->dev, "Failed to set DMA mask\n"); in netsec_probe()
2122 pm_runtime_put_sync(&pdev->dev); in netsec_probe()
2128 netif_napi_del(&priv->napi); in netsec_probe()
2130 pm_runtime_put_sync(&pdev->dev); in netsec_probe()
2131 pm_runtime_disable(&pdev->dev); in netsec_probe()
2134 dev_err(&pdev->dev, "init failed\n"); in netsec_probe()
2143 unregister_netdev(priv->ndev); in netsec_remove()
2147 netif_napi_del(&priv->napi); in netsec_remove()
2149 pm_runtime_disable(&pdev->dev); in netsec_remove()
2150 free_netdev(priv->ndev); in netsec_remove()
2162 clk_disable_unprepare(priv->clk); in netsec_runtime_suspend()
2171 clk_prepare_enable(priv->clk); in netsec_runtime_resume()
2185 { .compatible = "socionext,synquacer-netsec" },