Lines Matching +full:cmdq +full:- +full:sync

1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/dma-mapping.h>
52 static int debug = -1;
81 /* hns3_pci_tbl - PCI Device ID Table
387 napi_schedule_irqoff(&tqp_vector->napi); in hns3_irq_handle()
388 tqp_vector->event_cnt++; in hns3_irq_handle()
398 for (i = 0; i < priv->vector_num; i++) { in hns3_nic_uninit_irq()
399 tqp_vectors = &priv->tqp_vector[i]; in hns3_nic_uninit_irq()
401 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED) in hns3_nic_uninit_irq()
405 irq_set_affinity_hint(tqp_vectors->vector_irq, NULL); in hns3_nic_uninit_irq()
408 free_irq(tqp_vectors->vector_irq, tqp_vectors); in hns3_nic_uninit_irq()
409 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED; in hns3_nic_uninit_irq()
422 for (i = 0; i < priv->vector_num; i++) { in hns3_nic_init_irq()
423 tqp_vectors = &priv->tqp_vector[i]; in hns3_nic_init_irq()
425 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED) in hns3_nic_init_irq()
428 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) { in hns3_nic_init_irq()
429 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, in hns3_nic_init_irq()
430 "%s-%s-%s-%d", hns3_driver_name, in hns3_nic_init_irq()
431 pci_name(priv->ae_handle->pdev), in hns3_nic_init_irq()
434 } else if (tqp_vectors->rx_group.ring) { in hns3_nic_init_irq()
435 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, in hns3_nic_init_irq()
436 "%s-%s-%s-%d", hns3_driver_name, in hns3_nic_init_irq()
437 pci_name(priv->ae_handle->pdev), in hns3_nic_init_irq()
439 } else if (tqp_vectors->tx_group.ring) { in hns3_nic_init_irq()
440 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, in hns3_nic_init_irq()
441 "%s-%s-%s-%d", hns3_driver_name, in hns3_nic_init_irq()
442 pci_name(priv->ae_handle->pdev), in hns3_nic_init_irq()
449 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0'; in hns3_nic_init_irq()
451 irq_set_status_flags(tqp_vectors->vector_irq, IRQ_NOAUTOEN); in hns3_nic_init_irq()
452 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0, in hns3_nic_init_irq()
453 tqp_vectors->name, tqp_vectors); in hns3_nic_init_irq()
455 netdev_err(priv->netdev, "request irq(%d) fail\n", in hns3_nic_init_irq()
456 tqp_vectors->vector_irq); in hns3_nic_init_irq()
461 irq_set_affinity_hint(tqp_vectors->vector_irq, in hns3_nic_init_irq()
462 &tqp_vectors->affinity_mask); in hns3_nic_init_irq()
464 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED; in hns3_nic_init_irq()
473 writel(mask_en, tqp_vector->mask_addr); in hns3_mask_vector_irq()
478 napi_enable(&tqp_vector->napi); in hns3_vector_enable()
479 enable_irq(tqp_vector->vector_irq); in hns3_vector_enable()
490 disable_irq(tqp_vector->vector_irq); in hns3_vector_disable()
491 napi_disable(&tqp_vector->napi); in hns3_vector_disable()
492 cancel_work_sync(&tqp_vector->rx_group.dim.work); in hns3_vector_disable()
493 cancel_work_sync(&tqp_vector->tx_group.dim.work); in hns3_vector_disable()
502 * Rl defines rate of interrupts i.e. number of interrupts-per-second in hns3_set_vector_coalesce_rl()
505 if (rl_reg > 0 && !tqp_vector->tx_group.coal.adapt_enable && in hns3_set_vector_coalesce_rl()
506 !tqp_vector->rx_group.coal.adapt_enable) in hns3_set_vector_coalesce_rl()
508 * 0-59 and the unit is 4. in hns3_set_vector_coalesce_rl()
512 writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET); in hns3_set_vector_coalesce_rl()
520 if (tqp_vector->rx_group.coal.unit_1us) in hns3_set_vector_coalesce_rx_gl()
525 writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); in hns3_set_vector_coalesce_rx_gl()
533 if (tqp_vector->tx_group.coal.unit_1us) in hns3_set_vector_coalesce_tx_gl()
538 writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); in hns3_set_vector_coalesce_tx_gl()
544 writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_TX_QL_OFFSET); in hns3_set_vector_coalesce_tx_ql()
550 writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_RX_QL_OFFSET); in hns3_set_vector_coalesce_rx_ql()
556 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); in hns3_vector_coalesce_init()
557 struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal; in hns3_vector_coalesce_init()
558 struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal; in hns3_vector_coalesce_init()
559 struct hns3_enet_coalesce *ptx_coal = &priv->tx_coal; in hns3_vector_coalesce_init()
560 struct hns3_enet_coalesce *prx_coal = &priv->rx_coal; in hns3_vector_coalesce_init()
562 tx_coal->adapt_enable = ptx_coal->adapt_enable; in hns3_vector_coalesce_init()
563 rx_coal->adapt_enable = prx_coal->adapt_enable; in hns3_vector_coalesce_init()
565 tx_coal->int_gl = ptx_coal->int_gl; in hns3_vector_coalesce_init()
566 rx_coal->int_gl = prx_coal->int_gl; in hns3_vector_coalesce_init()
568 rx_coal->flow_level = prx_coal->flow_level; in hns3_vector_coalesce_init()
569 tx_coal->flow_level = ptx_coal->flow_level; in hns3_vector_coalesce_init()
574 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) { in hns3_vector_coalesce_init()
575 tx_coal->unit_1us = 1; in hns3_vector_coalesce_init()
576 rx_coal->unit_1us = 1; in hns3_vector_coalesce_init()
579 if (ae_dev->dev_specs.int_ql_max) { in hns3_vector_coalesce_init()
580 tx_coal->ql_enable = 1; in hns3_vector_coalesce_init()
581 rx_coal->ql_enable = 1; in hns3_vector_coalesce_init()
582 tx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max; in hns3_vector_coalesce_init()
583 rx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max; in hns3_vector_coalesce_init()
584 tx_coal->int_ql = ptx_coal->int_ql; in hns3_vector_coalesce_init()
585 rx_coal->int_ql = prx_coal->int_ql; in hns3_vector_coalesce_init()
593 struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal; in hns3_vector_coalesce_init_hw()
594 struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal; in hns3_vector_coalesce_init_hw()
595 struct hnae3_handle *h = priv->ae_handle; in hns3_vector_coalesce_init_hw()
597 hns3_set_vector_coalesce_tx_gl(tqp_vector, tx_coal->int_gl); in hns3_vector_coalesce_init_hw()
598 hns3_set_vector_coalesce_rx_gl(tqp_vector, rx_coal->int_gl); in hns3_vector_coalesce_init_hw()
599 hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting); in hns3_vector_coalesce_init_hw()
601 if (tx_coal->ql_enable) in hns3_vector_coalesce_init_hw()
602 hns3_set_vector_coalesce_tx_ql(tqp_vector, tx_coal->int_ql); in hns3_vector_coalesce_init_hw()
604 if (rx_coal->ql_enable) in hns3_vector_coalesce_init_hw()
605 hns3_set_vector_coalesce_rx_ql(tqp_vector, rx_coal->int_ql); in hns3_vector_coalesce_init_hw()
611 struct hnae3_knic_private_info *kinfo = &h->kinfo; in hns3_nic_set_real_num_queue()
612 struct hnae3_tc_info *tc_info = &kinfo->tc_info; in hns3_nic_set_real_num_queue()
613 unsigned int queue_size = kinfo->num_tqps; in hns3_nic_set_real_num_queue()
616 if (tc_info->num_tc <= 1 && !tc_info->mqprio_active) { in hns3_nic_set_real_num_queue()
619 ret = netdev_set_num_tc(netdev, tc_info->num_tc); in hns3_nic_set_real_num_queue()
626 for (i = 0; i < tc_info->num_tc; i++) in hns3_nic_set_real_num_queue()
627 netdev_set_tc_queue(netdev, i, tc_info->tqp_count[i], in hns3_nic_set_real_num_queue()
628 tc_info->tqp_offset[i]); in hns3_nic_set_real_num_queue()
652 h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size); in hns3_get_max_available_channels()
653 rss_size = alloc_tqps / h->kinfo.tc_info.num_tc; in hns3_get_max_available_channels()
679 free_irq_cpu_rmap(netdev->rx_cpu_rmap); in hns3_free_rx_cpu_rmap()
680 netdev->rx_cpu_rmap = NULL; in hns3_free_rx_cpu_rmap()
691 if (!netdev->rx_cpu_rmap) { in hns3_set_rx_cpu_rmap()
692 netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->vector_num); in hns3_set_rx_cpu_rmap()
693 if (!netdev->rx_cpu_rmap) in hns3_set_rx_cpu_rmap()
694 return -ENOMEM; in hns3_set_rx_cpu_rmap()
697 for (i = 0; i < priv->vector_num; i++) { in hns3_set_rx_cpu_rmap()
698 tqp_vector = &priv->tqp_vector[i]; in hns3_set_rx_cpu_rmap()
699 ret = irq_cpu_rmap_add(netdev->rx_cpu_rmap, in hns3_set_rx_cpu_rmap()
700 tqp_vector->vector_irq); in hns3_set_rx_cpu_rmap()
713 struct hnae3_handle *h = priv->ae_handle; in hns3_nic_net_up()
721 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); in hns3_nic_net_up()
724 for (i = 0; i < priv->vector_num; i++) in hns3_nic_net_up()
725 hns3_vector_enable(&priv->tqp_vector[i]); in hns3_nic_net_up()
728 for (j = 0; j < h->kinfo.num_tqps; j++) in hns3_nic_net_up()
729 hns3_tqp_enable(h->kinfo.tqp[j]); in hns3_nic_net_up()
732 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; in hns3_nic_net_up()
734 set_bit(HNS3_NIC_STATE_DOWN, &priv->state); in hns3_nic_net_up()
735 while (j--) in hns3_nic_net_up()
736 hns3_tqp_disable(h->kinfo.tqp[j]); in hns3_nic_net_up()
738 for (j = i - 1; j >= 0; j--) in hns3_nic_net_up()
739 hns3_vector_disable(&priv->tqp_vector[j]); in hns3_nic_net_up()
749 for (i = 0; i < priv->vector_num; i++) { in hns3_config_xps()
750 struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i]; in hns3_config_xps()
751 struct hns3_enet_ring *ring = tqp_vector->tx_group.ring; in hns3_config_xps()
756 ret = netif_set_xps_queue(priv->netdev, in hns3_config_xps()
757 &tqp_vector->affinity_mask, in hns3_config_xps()
758 ring->tqp->tqp_index); in hns3_config_xps()
760 netdev_warn(priv->netdev, in hns3_config_xps()
763 ring = ring->next; in hns3_config_xps()
776 return -EBUSY; in hns3_nic_net_open()
778 if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { in hns3_nic_net_open()
795 kinfo = &h->kinfo; in hns3_nic_net_open()
797 netdev_set_prio_tc_map(netdev, i, kinfo->tc_info.prio_tc[i]); in hns3_nic_net_open()
799 if (h->ae_algo->ops->set_timer_task) in hns3_nic_net_open()
800 h->ae_algo->ops->set_timer_task(priv->ae_handle, true); in hns3_nic_net_open()
811 struct net_device *ndev = h->kinfo.netdev; in hns3_reset_tx_queue()
816 for (i = 0; i < h->kinfo.num_tqps; i++) { in hns3_reset_tx_queue()
818 priv->ring[i].queue_index); in hns3_reset_tx_queue()
831 for (i = 0; i < priv->vector_num; i++) in hns3_nic_net_down()
832 hns3_vector_disable(&priv->tqp_vector[i]); in hns3_nic_net_down()
835 for (i = 0; i < h->kinfo.num_tqps; i++) in hns3_nic_net_down()
836 hns3_tqp_disable(h->kinfo.tqp[i]); in hns3_nic_net_down()
839 ops = priv->ae_handle->ae_algo->ops; in hns3_nic_net_down()
840 if (ops->stop) in hns3_nic_net_down()
841 ops->stop(priv->ae_handle); in hns3_nic_net_down()
848 hns3_clear_all_ring(priv->ae_handle, false); in hns3_nic_net_down()
850 hns3_reset_tx_queue(priv->ae_handle); in hns3_nic_net_down()
858 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) in hns3_nic_net_stop()
863 if (h->ae_algo->ops->set_timer_task) in hns3_nic_net_stop()
864 h->ae_algo->ops->set_timer_task(priv->ae_handle, false); in hns3_nic_net_stop()
879 if (h->ae_algo->ops->add_uc_addr) in hns3_nic_uc_sync()
880 return h->ae_algo->ops->add_uc_addr(h, addr); in hns3_nic_uc_sync()
894 if (ether_addr_equal(addr, netdev->dev_addr)) in hns3_nic_uc_unsync()
897 if (h->ae_algo->ops->rm_uc_addr) in hns3_nic_uc_unsync()
898 return h->ae_algo->ops->rm_uc_addr(h, addr); in hns3_nic_uc_unsync()
908 if (h->ae_algo->ops->add_mc_addr) in hns3_nic_mc_sync()
909 return h->ae_algo->ops->add_mc_addr(h, addr); in hns3_nic_mc_sync()
919 if (h->ae_algo->ops->rm_mc_addr) in hns3_nic_mc_unsync()
920 return h->ae_algo->ops->rm_mc_addr(h, addr); in hns3_nic_mc_unsync()
929 if (netdev->flags & IFF_PROMISC) in hns3_get_netdev_flags()
931 else if (netdev->flags & IFF_ALLMULTI) in hns3_get_netdev_flags()
950 h->netdev_flags = new_flags; in hns3_nic_set_rx_mode()
956 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; in hns3_request_update_promisc_mode()
958 if (ops->request_update_promisc_mode) in hns3_request_update_promisc_mode()
959 ops->request_update_promisc_mode(handle); in hns3_request_update_promisc_mode()
964 struct hns3_tx_spare *tx_spare = ring->tx_spare; in hns3_tx_spare_space()
970 ntc = smp_load_acquire(&tx_spare->last_to_clean); in hns3_tx_spare_space()
971 ntu = tx_spare->next_to_use; in hns3_tx_spare_space()
974 return ntc - ntu - 1; in hns3_tx_spare_space()
979 return max(ntc, tx_spare->len - ntu) - 1; in hns3_tx_spare_space()
984 struct hns3_tx_spare *tx_spare = ring->tx_spare; in hns3_tx_spare_update()
987 tx_spare->last_to_clean == tx_spare->next_to_clean) in hns3_tx_spare_update()
993 smp_store_release(&tx_spare->last_to_clean, in hns3_tx_spare_update()
994 tx_spare->next_to_clean); in hns3_tx_spare_update()
1001 u32 len = skb->len <= ring->tx_copybreak ? skb->len : in hns3_can_use_tx_bounce()
1004 if (len > ring->tx_copybreak) in hns3_can_use_tx_bounce()
1008 u64_stats_update_begin(&ring->syncp); in hns3_can_use_tx_bounce()
1009 ring->stats.tx_spare_full++; in hns3_can_use_tx_bounce()
1010 u64_stats_update_end(&ring->syncp); in hns3_can_use_tx_bounce()
1021 if (skb->len <= ring->tx_copybreak || !tx_sgl || in hns3_can_use_tx_sgl()
1023 skb_shinfo(skb)->nr_frags < tx_sgl)) in hns3_can_use_tx_sgl()
1027 u64_stats_update_begin(&ring->syncp); in hns3_can_use_tx_sgl()
1028 ring->stats.tx_spare_full++; in hns3_can_use_tx_sgl()
1029 u64_stats_update_end(&ring->syncp); in hns3_can_use_tx_sgl()
1045 ring->tqp->handle->kinfo.tx_spare_buf_size; in hns3_init_tx_spare_buffer()
1075 tx_spare->dma = dma; in hns3_init_tx_spare_buffer()
1076 tx_spare->buf = page_address(page); in hns3_init_tx_spare_buffer()
1077 tx_spare->len = PAGE_SIZE << order; in hns3_init_tx_spare_buffer()
1078 ring->tx_spare = tx_spare; in hns3_init_tx_spare_buffer()
1088 struct hns3_tx_spare *tx_spare = ring->tx_spare; in hns3_tx_spare_alloc()
1089 u32 ntu = tx_spare->next_to_use; in hns3_tx_spare_alloc()
1097 if (ntu + size > tx_spare->len) { in hns3_tx_spare_alloc()
1098 *cb_len += (tx_spare->len - ntu); in hns3_tx_spare_alloc()
1102 tx_spare->next_to_use = ntu + size; in hns3_tx_spare_alloc()
1103 if (tx_spare->next_to_use == tx_spare->len) in hns3_tx_spare_alloc()
1104 tx_spare->next_to_use = 0; in hns3_tx_spare_alloc()
1106 *dma = tx_spare->dma + ntu; in hns3_tx_spare_alloc()
1108 return tx_spare->buf + ntu; in hns3_tx_spare_alloc()
1113 struct hns3_tx_spare *tx_spare = ring->tx_spare; in hns3_tx_spare_rollback()
1115 if (len > tx_spare->next_to_use) { in hns3_tx_spare_rollback()
1116 len -= tx_spare->next_to_use; in hns3_tx_spare_rollback()
1117 tx_spare->next_to_use = tx_spare->len - len; in hns3_tx_spare_rollback()
1119 tx_spare->next_to_use -= len; in hns3_tx_spare_rollback()
1126 struct hns3_tx_spare *tx_spare = ring->tx_spare; in hns3_tx_spare_reclaim_cb()
1127 u32 ntc = tx_spare->next_to_clean; in hns3_tx_spare_reclaim_cb()
1128 u32 len = cb->length; in hns3_tx_spare_reclaim_cb()
1130 tx_spare->next_to_clean += len; in hns3_tx_spare_reclaim_cb()
1132 if (tx_spare->next_to_clean >= tx_spare->len) { in hns3_tx_spare_reclaim_cb()
1133 tx_spare->next_to_clean -= tx_spare->len; in hns3_tx_spare_reclaim_cb()
1135 if (tx_spare->next_to_clean) { in hns3_tx_spare_reclaim_cb()
1137 len = tx_spare->next_to_clean; in hns3_tx_spare_reclaim_cb()
1143 * the tx buffer to do the dma sync or sg unmapping after in hns3_tx_spare_reclaim_cb()
1144 * tx_spare->next_to_clean is moved forword. in hns3_tx_spare_reclaim_cb()
1146 if (cb->type & (DESC_TYPE_BOUNCE_HEAD | DESC_TYPE_BOUNCE_ALL)) { in hns3_tx_spare_reclaim_cb()
1147 dma_addr_t dma = tx_spare->dma + ntc; in hns3_tx_spare_reclaim_cb()
1152 struct sg_table *sgt = tx_spare->buf + ntc; in hns3_tx_spare_reclaim_cb()
1154 dma_unmap_sg(ring_to_dev(ring), sgt->sgl, sgt->orig_nents, in hns3_tx_spare_reclaim_cb()
1181 if (l3.v4->version == 4) in hns3_set_tso()
1182 l3.v4->check = 0; in hns3_set_tso()
1185 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | in hns3_set_tso()
1196 if (l3.v4->version == 4) in hns3_set_tso()
1197 l3.v4->check = 0; in hns3_set_tso()
1201 l4_offset = l4.hdr - skb->data; in hns3_set_tso()
1204 l4_paylen = skb->len - l4_offset; in hns3_set_tso()
1206 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in hns3_set_tso()
1208 csum_replace_by_diff(&l4.udp->check, in hns3_set_tso()
1211 hdr_len = (l4.tcp->doff << 2) + l4_offset; in hns3_set_tso()
1212 csum_replace_by_diff(&l4.tcp->check, in hns3_set_tso()
1216 *send_bytes = (skb_shinfo(skb)->gso_segs - 1) * hdr_len + skb->len; in hns3_set_tso()
1219 *paylen_fdop_ol4cs = skb->len - hdr_len; in hns3_set_tso()
1223 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) in hns3_set_tso()
1227 *mss = skb_shinfo(skb)->gso_size; in hns3_set_tso()
1247 if (skb->protocol == htons(ETH_P_IPV6)) { in hns3_get_l4_protocol()
1249 l4_proto_tmp = l3.v6->nexthdr; in hns3_get_l4_protocol()
1251 ipv6_skip_exthdr(skb, exthdr - skb->data, in hns3_get_l4_protocol()
1253 } else if (skb->protocol == htons(ETH_P_IP)) { in hns3_get_l4_protocol()
1254 l4_proto_tmp = l3.v4->protocol; in hns3_get_l4_protocol()
1256 return -EINVAL; in hns3_get_l4_protocol()
1262 if (!skb->encapsulation) { in hns3_get_l4_protocol()
1271 if (l3.v6->version == 6) { in hns3_get_l4_protocol()
1273 l4_proto_tmp = l3.v6->nexthdr; in hns3_get_l4_protocol()
1275 ipv6_skip_exthdr(skb, exthdr - skb->data, in hns3_get_l4_protocol()
1277 } else if (l3.v4->version == 4) { in hns3_get_l4_protocol()
1278 l4_proto_tmp = l3.v4->protocol; in hns3_get_l4_protocol()
1286 /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
1294 struct hns3_nic_priv *priv = netdev_priv(skb->dev); in hns3_tunnel_csum_bug()
1295 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); in hns3_tunnel_csum_bug()
1301 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) in hns3_tunnel_csum_bug()
1306 if (!(!skb->encapsulation && in hns3_tunnel_csum_bug()
1307 (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) || in hns3_tunnel_csum_bug()
1308 l4.udp->dest == htons(GENEVE_UDP_PORT) || in hns3_tunnel_csum_bug()
1309 l4.udp->dest == htons(4790)))) in hns3_tunnel_csum_bug()
1327 l2_len = l3.hdr - skb->data; in hns3_set_outer_l2l3l4()
1331 l3_len = l4.hdr - l3.hdr; in hns3_set_outer_l2l3l4()
1336 l4_len = il2_hdr - l4.hdr; in hns3_set_outer_l2l3l4()
1340 if (skb->protocol == htons(ETH_P_IP)) { in hns3_set_outer_l2l3l4()
1349 } else if (skb->protocol == htons(ETH_P_IPV6)) { in hns3_set_outer_l2l3l4()
1366 unsigned char *l2_hdr = skb->data; in hns3_set_l2l3l4()
1376 if (skb->encapsulation) { in hns3_set_l2l3l4()
1383 return -EDOM; in hns3_set_l2l3l4()
1400 if (l3.v4->version == 4) { in hns3_set_l2l3l4()
1409 } else if (l3.v6->version == 6) { in hns3_set_l2l3l4()
1415 l2_len = l3.hdr - l2_hdr; in hns3_set_l2l3l4()
1419 l3_len = l4.hdr - l3.hdr; in hns3_set_l2l3l4()
1429 l4.tcp->doff); in hns3_set_l2l3l4()
1456 return -EDOM; in hns3_set_l2l3l4()
1470 struct hnae3_handle *handle = tx_ring->tqp->handle; in hns3_handle_vtags()
1475 if (!(skb->protocol == htons(ETH_P_8021Q) || in hns3_handle_vtags()
1483 ae_dev = pci_get_drvdata(handle->pdev); in hns3_handle_vtags()
1485 ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 && in hns3_handle_vtags()
1486 handle->port_base_vlan_state == in hns3_handle_vtags()
1488 return -EINVAL; in hns3_handle_vtags()
1490 if (skb->protocol == htons(ETH_P_8021Q) && in hns3_handle_vtags()
1491 !(handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { in hns3_handle_vtags()
1496 skb->protocol = vlan_get_protocol(skb); in hns3_handle_vtags()
1504 if (skb->protocol == htons(ETH_P_8021Q) && in hns3_handle_vtags()
1505 handle->port_base_vlan_state == in hns3_handle_vtags()
1511 skb->protocol = vlan_get_protocol(skb); in hns3_handle_vtags()
1519 vhdr = (struct vlan_ethhdr *)skb->data; in hns3_handle_vtags()
1520 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT) in hns3_handle_vtags()
1523 skb->protocol = vlan_get_protocol(skb); in hns3_handle_vtags()
1530 struct hns3_nic_priv *priv = netdev_priv(skb->dev); in hns3_check_hw_tx_csum()
1533 * HW checksum of the non-IP packets and GSO packets is handled at in hns3_check_hw_tx_csum()
1537 !test_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state)) in hns3_check_hw_tx_csum()
1548 u32 paylen_ol4cs = skb->len; in hns3_fill_skb_desc()
1557 u64_stats_update_begin(&ring->syncp); in hns3_fill_skb_desc()
1558 ring->stats.tx_vlan_err++; in hns3_fill_skb_desc()
1559 u64_stats_update_end(&ring->syncp); in hns3_fill_skb_desc()
1563 inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & in hns3_fill_skb_desc()
1568 out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & in hns3_fill_skb_desc()
1574 desc_cb->send_bytes = skb->len; in hns3_fill_skb_desc()
1576 if (skb->ip_summed == CHECKSUM_PARTIAL) { in hns3_fill_skb_desc()
1585 skb->csum_offset >> 1); in hns3_fill_skb_desc()
1594 u64_stats_update_begin(&ring->syncp); in hns3_fill_skb_desc()
1595 ring->stats.tx_l4_proto_err++; in hns3_fill_skb_desc()
1596 u64_stats_update_end(&ring->syncp); in hns3_fill_skb_desc()
1604 u64_stats_update_begin(&ring->syncp); in hns3_fill_skb_desc()
1605 ring->stats.tx_l2l3l4_err++; in hns3_fill_skb_desc()
1606 u64_stats_update_end(&ring->syncp); in hns3_fill_skb_desc()
1611 &type_cs_vlan_tso, &desc_cb->send_bytes); in hns3_fill_skb_desc()
1613 u64_stats_update_begin(&ring->syncp); in hns3_fill_skb_desc()
1614 ring->stats.tx_tso_err++; in hns3_fill_skb_desc()
1615 u64_stats_update_end(&ring->syncp); in hns3_fill_skb_desc()
1622 desc->tx.ol_type_vlan_len_msec = in hns3_fill_skb_desc()
1624 desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso); in hns3_fill_skb_desc()
1625 desc->tx.paylen_ol4cs = cpu_to_le32(paylen_ol4cs); in hns3_fill_skb_desc()
1626 desc->tx.mss_hw_csum = cpu_to_le16(mss_hw_csum); in hns3_fill_skb_desc()
1627 desc->tx.vlan_tag = cpu_to_le16(inner_vtag); in hns3_fill_skb_desc()
1628 desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag); in hns3_fill_skb_desc()
1638 struct hns3_desc *desc = &ring->desc[ring->next_to_use]; in hns3_fill_desc()
1643 desc->addr = cpu_to_le64(dma); in hns3_fill_desc()
1644 desc->tx.send_size = cpu_to_le16(size); in hns3_fill_desc()
1645 desc->tx.bdtp_fe_sc_vld_ra_ri = in hns3_fill_desc()
1648 trace_hns3_tx_desc(ring, ring->next_to_use); in hns3_fill_desc()
1660 desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k); in hns3_fill_desc()
1661 desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ? in hns3_fill_desc()
1663 desc->tx.bdtp_fe_sc_vld_ra_ri = in hns3_fill_desc()
1666 trace_hns3_tx_desc(ring, ring->next_to_use); in hns3_fill_desc()
1670 desc = &ring->desc[ring->next_to_use]; in hns3_fill_desc()
1679 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; in hns3_map_and_fill_desc()
1691 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); in hns3_map_and_fill_desc()
1708 u64_stats_update_begin(&ring->syncp); in hns3_map_and_fill_desc()
1709 ring->stats.sw_err_cnt++; in hns3_map_and_fill_desc()
1710 u64_stats_update_end(&ring->syncp); in hns3_map_and_fill_desc()
1711 return -ENOMEM; in hns3_map_and_fill_desc()
1714 desc_cb->priv = priv; in hns3_map_and_fill_desc()
1715 desc_cb->length = size; in hns3_map_and_fill_desc()
1716 desc_cb->dma = dma; in hns3_map_and_fill_desc()
1717 desc_cb->type = type; in hns3_map_and_fill_desc()
1731 size -= HNS3_MAX_BD_SIZE; in hns3_skb_bd_num()
1743 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in hns3_skb_bd_num()
1744 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in hns3_skb_bd_num()
1751 size -= HNS3_MAX_BD_SIZE; in hns3_skb_bd_num()
1774 if (likely(skb->len <= HNS3_MAX_BD_SIZE && !recursion_level && in hns3_tx_bd_num()
1776 skb_shinfo(skb)->nr_frags < max_non_tso_bd_num)) in hns3_tx_bd_num()
1777 return skb_shinfo(skb)->nr_frags + 1U; in hns3_tx_bd_num()
1798 if (!skb->encapsulation) in hns3_gso_hdr_len()
1806 * max_non_tso_bd_num - 1 frags to be larger than gso header len + mss,
1807 * and the remaining continuous max_non_tso_bd_num - 1 frags to be larger
1808 * than MSS except the last max_non_tso_bd_num - 1 frags.
1816 for (i = 0; i < max_non_tso_bd_num - 1U; i++) in hns3_skb_need_linearized()
1822 if (tot_len + bd_size[max_non_tso_bd_num - 1U] < in hns3_skb_need_linearized()
1823 skb_shinfo(skb)->gso_size + hns3_gso_hdr_len(skb)) in hns3_skb_need_linearized()
1826 /* ensure every continuous max_non_tso_bd_num - 1 buffer is greater in hns3_skb_need_linearized()
1829 for (i = 0; i < bd_num - max_non_tso_bd_num; i++) { in hns3_skb_need_linearized()
1830 tot_len -= bd_size[i]; in hns3_skb_need_linearized()
1831 tot_len += bd_size[i + max_non_tso_bd_num - 1U]; in hns3_skb_need_linearized()
1833 if (tot_len < skb_shinfo(skb)->gso_size) in hns3_skb_need_linearized()
1845 size[i] = skb_frag_size(&shinfo->frags[i]); in hns3_shinfo_pack()
1856 u64_stats_update_begin(&ring->syncp); in hns3_skb_linearize()
1857 ring->stats.over_max_recursion++; in hns3_skb_linearize()
1858 u64_stats_update_end(&ring->syncp); in hns3_skb_linearize()
1859 return -ENOMEM; in hns3_skb_linearize()
1862 /* The skb->len has exceeded the hw limitation, linearization in hns3_skb_linearize()
1865 if (skb->len > HNS3_MAX_TSO_SIZE || in hns3_skb_linearize()
1866 (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) { in hns3_skb_linearize()
1867 u64_stats_update_begin(&ring->syncp); in hns3_skb_linearize()
1868 ring->stats.hw_limitation++; in hns3_skb_linearize()
1869 u64_stats_update_end(&ring->syncp); in hns3_skb_linearize()
1870 return -ENOMEM; in hns3_skb_linearize()
1874 u64_stats_update_begin(&ring->syncp); in hns3_skb_linearize()
1875 ring->stats.sw_err_cnt++; in hns3_skb_linearize()
1876 u64_stats_update_end(&ring->syncp); in hns3_skb_linearize()
1877 return -ENOMEM; in hns3_skb_linearize()
1888 u8 max_non_tso_bd_num = priv->max_non_tso_bd_num; in hns3_nic_maybe_stop_tx()
1902 return -ENOMEM; in hns3_nic_maybe_stop_tx()
1904 bd_num = hns3_tx_bd_count(skb->len); in hns3_nic_maybe_stop_tx()
1906 u64_stats_update_begin(&ring->syncp); in hns3_nic_maybe_stop_tx()
1907 ring->stats.tx_copy++; in hns3_nic_maybe_stop_tx()
1908 u64_stats_update_end(&ring->syncp); in hns3_nic_maybe_stop_tx()
1915 netif_stop_subqueue(netdev, ring->queue_index); in hns3_nic_maybe_stop_tx()
1923 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { in hns3_nic_maybe_stop_tx()
1924 netif_start_subqueue(netdev, ring->queue_index); in hns3_nic_maybe_stop_tx()
1928 u64_stats_update_begin(&ring->syncp); in hns3_nic_maybe_stop_tx()
1929 ring->stats.tx_busy++; in hns3_nic_maybe_stop_tx()
1930 u64_stats_update_end(&ring->syncp); in hns3_nic_maybe_stop_tx()
1932 return -EBUSY; in hns3_nic_maybe_stop_tx()
1940 for (i = 0; i < ring->desc_num; i++) { in hns3_clear_desc()
1941 struct hns3_desc *desc = &ring->desc[ring->next_to_use]; in hns3_clear_desc()
1947 if (ring->next_to_use == next_to_use_orig) in hns3_clear_desc()
1953 desc_cb = &ring->desc_cb[ring->next_to_use]; in hns3_clear_desc()
1955 if (!desc_cb->dma) in hns3_clear_desc()
1959 if (desc_cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB)) in hns3_clear_desc()
1960 dma_unmap_single(dev, desc_cb->dma, desc_cb->length, in hns3_clear_desc()
1962 else if (desc_cb->type & in hns3_clear_desc()
1964 hns3_tx_spare_rollback(ring, desc_cb->length); in hns3_clear_desc()
1965 else if (desc_cb->length) in hns3_clear_desc()
1966 dma_unmap_page(dev, desc_cb->dma, desc_cb->length, in hns3_clear_desc()
1969 desc_cb->length = 0; in hns3_clear_desc()
1970 desc_cb->dma = 0; in hns3_clear_desc()
1971 desc_cb->type = DESC_TYPE_UNKNOWN; in hns3_clear_desc()
1987 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in hns3_fill_skb_to_desc()
1988 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in hns3_fill_skb_to_desc()
2012 ring->pending_buf += num; in hns3_tx_doorbell()
2015 u64_stats_update_begin(&ring->syncp); in hns3_tx_doorbell()
2016 ring->stats.tx_more++; in hns3_tx_doorbell()
2017 u64_stats_update_end(&ring->syncp); in hns3_tx_doorbell()
2021 if (!ring->pending_buf) in hns3_tx_doorbell()
2024 writel(ring->pending_buf, in hns3_tx_doorbell()
2025 ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG); in hns3_tx_doorbell()
2026 ring->pending_buf = 0; in hns3_tx_doorbell()
2027 WRITE_ONCE(ring->last_to_use, ring->next_to_use); in hns3_tx_doorbell()
2035 if (!(h->ae_algo->ops->set_tx_hwts_info && in hns3_tsyn()
2036 h->ae_algo->ops->set_tx_hwts_info(h, skb))) in hns3_tsyn()
2039 desc->tx.bdtp_fe_sc_vld_ra_ri |= cpu_to_le16(BIT(HNS3_TXD_TSYN_B)); in hns3_tsyn()
2045 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; in hns3_handle_tx_bounce()
2054 if (skb->len <= ring->tx_copybreak) { in hns3_handle_tx_bounce()
2055 size = skb->len; in hns3_handle_tx_bounce()
2067 u64_stats_update_begin(&ring->syncp); in hns3_handle_tx_bounce()
2068 ring->stats.copy_bits_err++; in hns3_handle_tx_bounce()
2069 u64_stats_update_end(&ring->syncp); in hns3_handle_tx_bounce()
2073 desc_cb->priv = skb; in hns3_handle_tx_bounce()
2074 desc_cb->length = cb_len; in hns3_handle_tx_bounce()
2075 desc_cb->dma = dma; in hns3_handle_tx_bounce()
2076 desc_cb->type = type; in hns3_handle_tx_bounce()
2092 u64_stats_update_begin(&ring->syncp); in hns3_handle_tx_bounce()
2093 ring->stats.tx_bounce++; in hns3_handle_tx_bounce()
2094 u64_stats_update_end(&ring->syncp); in hns3_handle_tx_bounce()
2101 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; in hns3_handle_tx_sgl()
2102 u32 nfrag = skb_shinfo(skb)->nr_frags + 1; in hns3_handle_tx_sgl()
2119 sgt->sgl = (struct scatterlist *)(sgt + 1); in hns3_handle_tx_sgl()
2120 sg_init_table(sgt->sgl, nfrag); in hns3_handle_tx_sgl()
2121 nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len); in hns3_handle_tx_sgl()
2124 u64_stats_update_begin(&ring->syncp); in hns3_handle_tx_sgl()
2125 ring->stats.skb2sgl_err++; in hns3_handle_tx_sgl()
2126 u64_stats_update_end(&ring->syncp); in hns3_handle_tx_sgl()
2127 return -ENOMEM; in hns3_handle_tx_sgl()
2130 sgt->orig_nents = nents; in hns3_handle_tx_sgl()
2131 sgt->nents = dma_map_sg(ring_to_dev(ring), sgt->sgl, sgt->orig_nents, in hns3_handle_tx_sgl()
2133 if (unlikely(!sgt->nents)) { in hns3_handle_tx_sgl()
2135 u64_stats_update_begin(&ring->syncp); in hns3_handle_tx_sgl()
2136 ring->stats.map_sg_err++; in hns3_handle_tx_sgl()
2137 u64_stats_update_end(&ring->syncp); in hns3_handle_tx_sgl()
2138 return -ENOMEM; in hns3_handle_tx_sgl()
2141 desc_cb->priv = skb; in hns3_handle_tx_sgl()
2142 desc_cb->length = cb_len; in hns3_handle_tx_sgl()
2143 desc_cb->dma = dma; in hns3_handle_tx_sgl()
2144 desc_cb->type = DESC_TYPE_SGL_SKB; in hns3_handle_tx_sgl()
2146 for (i = 0; i < sgt->nents; i++) in hns3_handle_tx_sgl()
2147 bd_num += hns3_fill_desc(ring, sg_dma_address(sgt->sgl + i), in hns3_handle_tx_sgl()
2148 sg_dma_len(sgt->sgl + i)); in hns3_handle_tx_sgl()
2150 u64_stats_update_begin(&ring->syncp); in hns3_handle_tx_sgl()
2151 ring->stats.tx_sgl++; in hns3_handle_tx_sgl()
2152 u64_stats_update_end(&ring->syncp); in hns3_handle_tx_sgl()
2162 if (!ring->tx_spare) in hns3_handle_desc_filling()
2180 struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping]; in hns3_nic_net_xmit()
2181 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; in hns3_nic_net_xmit()
2191 u64_stats_update_begin(&ring->syncp); in hns3_nic_net_xmit()
2192 ring->stats.sw_err_cnt++; in hns3_nic_net_xmit()
2193 u64_stats_update_end(&ring->syncp); in hns3_nic_net_xmit()
2199 prefetch(skb->data); in hns3_nic_net_xmit()
2203 if (ret == -EBUSY) { in hns3_nic_net_xmit()
2212 next_to_use_head = ring->next_to_use; in hns3_nic_net_xmit()
2214 ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use], in hns3_nic_net_xmit()
2219 /* 'ret < 0' means filling error, 'ret == 0' means skb->len is in hns3_nic_net_xmit()
2227 pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) : in hns3_nic_net_xmit()
2228 (ring->desc_num - 1); in hns3_nic_net_xmit()
2230 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) in hns3_nic_net_xmit()
2231 hns3_tsyn(netdev, skb, &ring->desc[pre_ntu]); in hns3_nic_net_xmit()
2233 ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |= in hns3_nic_net_xmit()
2240 dev_queue = netdev_get_tx_queue(netdev, ring->queue_index); in hns3_nic_net_xmit()
2241 doorbell = __netdev_tx_sent_queue(dev_queue, desc_cb->send_bytes, in hns3_nic_net_xmit()
2262 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) in hns3_nic_net_set_mac_address()
2263 return -EADDRNOTAVAIL; in hns3_nic_net_set_mac_address()
2265 if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) { in hns3_nic_net_set_mac_address()
2267 mac_addr->sa_data); in hns3_nic_net_set_mac_address()
2274 if (!hns3_is_phys_func(h->pdev) && in hns3_nic_net_set_mac_address()
2275 !is_zero_ether_addr(netdev->perm_addr)) { in hns3_nic_net_set_mac_address()
2277 netdev->perm_addr, mac_addr->sa_data); in hns3_nic_net_set_mac_address()
2278 return -EPERM; in hns3_nic_net_set_mac_address()
2281 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false); in hns3_nic_net_set_mac_address()
2287 ether_addr_copy(netdev->dev_addr, mac_addr->sa_data); in hns3_nic_net_set_mac_address()
2298 return -EINVAL; in hns3_nic_do_ioctl()
2300 if (!h->ae_algo->ops->do_ioctl) in hns3_nic_do_ioctl()
2301 return -EOPNOTSUPP; in hns3_nic_do_ioctl()
2303 return h->ae_algo->ops->do_ioctl(h, ifr, cmd); in hns3_nic_do_ioctl()
2309 netdev_features_t changed = netdev->features ^ features; in hns3_nic_set_features()
2311 struct hnae3_handle *h = priv->ae_handle; in hns3_nic_set_features()
2315 if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) { in hns3_nic_set_features()
2317 ret = h->ae_algo->ops->set_gro_en(h, enable); in hns3_nic_set_features()
2323 h->ae_algo->ops->enable_hw_strip_rxvtag) { in hns3_nic_set_features()
2325 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable); in hns3_nic_set_features()
2330 if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) { in hns3_nic_set_features()
2332 h->ae_algo->ops->enable_fd(h, enable); in hns3_nic_set_features()
2335 if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) && in hns3_nic_set_features()
2336 h->ae_algo->ops->cls_flower_active(h)) { in hns3_nic_set_features()
2339 return -EINVAL; in hns3_nic_set_features()
2343 h->ae_algo->ops->enable_vlan_filter) { in hns3_nic_set_features()
2345 ret = h->ae_algo->ops->enable_vlan_filter(h, enable); in hns3_nic_set_features()
2350 netdev->features = features; in hns3_nic_set_features()
2363 if (skb->ip_summed != CHECKSUM_PARTIAL) in hns3_features_check()
2366 if (skb->encapsulation) in hns3_features_check()
2367 len = skb_inner_transport_header(skb) - skb->data; in hns3_features_check()
2369 len = skb_transport_header(skb) - skb->data; in hns3_features_check()
2389 int queue_num = priv->ae_handle->kinfo.num_tqps; in hns3_nic_get_stats64()
2390 struct hnae3_handle *handle = priv->ae_handle; in hns3_nic_get_stats64()
2406 if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) in hns3_nic_get_stats64()
2409 handle->ae_algo->ops->update_stats(handle, &netdev->stats); in hns3_nic_get_stats64()
2413 ring = &priv->ring[idx]; in hns3_nic_get_stats64()
2415 start = u64_stats_fetch_begin_irq(&ring->syncp); in hns3_nic_get_stats64()
2416 tx_bytes += ring->stats.tx_bytes; in hns3_nic_get_stats64()
2417 tx_pkts += ring->stats.tx_pkts; in hns3_nic_get_stats64()
2418 tx_drop += ring->stats.sw_err_cnt; in hns3_nic_get_stats64()
2419 tx_drop += ring->stats.tx_vlan_err; in hns3_nic_get_stats64()
2420 tx_drop += ring->stats.tx_l4_proto_err; in hns3_nic_get_stats64()
2421 tx_drop += ring->stats.tx_l2l3l4_err; in hns3_nic_get_stats64()
2422 tx_drop += ring->stats.tx_tso_err; in hns3_nic_get_stats64()
2423 tx_drop += ring->stats.over_max_recursion; in hns3_nic_get_stats64()
2424 tx_drop += ring->stats.hw_limitation; in hns3_nic_get_stats64()
2425 tx_drop += ring->stats.copy_bits_err; in hns3_nic_get_stats64()
2426 tx_drop += ring->stats.skb2sgl_err; in hns3_nic_get_stats64()
2427 tx_drop += ring->stats.map_sg_err; in hns3_nic_get_stats64()
2428 tx_errors += ring->stats.sw_err_cnt; in hns3_nic_get_stats64()
2429 tx_errors += ring->stats.tx_vlan_err; in hns3_nic_get_stats64()
2430 tx_errors += ring->stats.tx_l4_proto_err; in hns3_nic_get_stats64()
2431 tx_errors += ring->stats.tx_l2l3l4_err; in hns3_nic_get_stats64()
2432 tx_errors += ring->stats.tx_tso_err; in hns3_nic_get_stats64()
2433 tx_errors += ring->stats.over_max_recursion; in hns3_nic_get_stats64()
2434 tx_errors += ring->stats.hw_limitation; in hns3_nic_get_stats64()
2435 tx_errors += ring->stats.copy_bits_err; in hns3_nic_get_stats64()
2436 tx_errors += ring->stats.skb2sgl_err; in hns3_nic_get_stats64()
2437 tx_errors += ring->stats.map_sg_err; in hns3_nic_get_stats64()
2438 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); in hns3_nic_get_stats64()
2441 ring = &priv->ring[idx + queue_num]; in hns3_nic_get_stats64()
2443 start = u64_stats_fetch_begin_irq(&ring->syncp); in hns3_nic_get_stats64()
2444 rx_bytes += ring->stats.rx_bytes; in hns3_nic_get_stats64()
2445 rx_pkts += ring->stats.rx_pkts; in hns3_nic_get_stats64()
2446 rx_drop += ring->stats.l2_err; in hns3_nic_get_stats64()
2447 rx_errors += ring->stats.l2_err; in hns3_nic_get_stats64()
2448 rx_errors += ring->stats.l3l4_csum_err; in hns3_nic_get_stats64()
2449 rx_crc_errors += ring->stats.l2_err; in hns3_nic_get_stats64()
2450 rx_multicast += ring->stats.rx_multicast; in hns3_nic_get_stats64()
2451 rx_length_errors += ring->stats.err_pkt_len; in hns3_nic_get_stats64()
2452 } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); in hns3_nic_get_stats64()
2455 stats->tx_bytes = tx_bytes; in hns3_nic_get_stats64()
2456 stats->tx_packets = tx_pkts; in hns3_nic_get_stats64()
2457 stats->rx_bytes = rx_bytes; in hns3_nic_get_stats64()
2458 stats->rx_packets = rx_pkts; in hns3_nic_get_stats64()
2460 stats->rx_errors = rx_errors; in hns3_nic_get_stats64()
2461 stats->multicast = rx_multicast; in hns3_nic_get_stats64()
2462 stats->rx_length_errors = rx_length_errors; in hns3_nic_get_stats64()
2463 stats->rx_crc_errors = rx_crc_errors; in hns3_nic_get_stats64()
2464 stats->rx_missed_errors = netdev->stats.rx_missed_errors; in hns3_nic_get_stats64()
2466 stats->tx_errors = tx_errors; in hns3_nic_get_stats64()
2467 stats->rx_dropped = rx_drop; in hns3_nic_get_stats64()
2468 stats->tx_dropped = tx_drop; in hns3_nic_get_stats64()
2469 stats->collisions = netdev->stats.collisions; in hns3_nic_get_stats64()
2470 stats->rx_over_errors = netdev->stats.rx_over_errors; in hns3_nic_get_stats64()
2471 stats->rx_frame_errors = netdev->stats.rx_frame_errors; in hns3_nic_get_stats64()
2472 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors; in hns3_nic_get_stats64()
2473 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors; in hns3_nic_get_stats64()
2474 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors; in hns3_nic_get_stats64()
2475 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors; in hns3_nic_get_stats64()
2476 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors; in hns3_nic_get_stats64()
2477 stats->tx_window_errors = netdev->stats.tx_window_errors; in hns3_nic_get_stats64()
2478 stats->rx_compressed = netdev->stats.rx_compressed; in hns3_nic_get_stats64()
2479 stats->tx_compressed = netdev->stats.tx_compressed; in hns3_nic_get_stats64()
2486 u8 tc = mqprio_qopt->qopt.num_tc; in hns3_setup_tc()
2487 u16 mode = mqprio_qopt->mode; in hns3_setup_tc()
2488 u8 hw = mqprio_qopt->qopt.hw; in hns3_setup_tc()
2493 return -EOPNOTSUPP; in hns3_setup_tc()
2496 return -EINVAL; in hns3_setup_tc()
2499 return -EINVAL; in hns3_setup_tc()
2502 kinfo = &h->kinfo; in hns3_setup_tc()
2506 return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? in hns3_setup_tc()
2507 kinfo->dcb_ops->setup_tc(h, mqprio_qopt) : -EOPNOTSUPP; in hns3_setup_tc()
2513 int tc = tc_classid_to_hwtc(priv->netdev, flow->classid); in hns3_setup_tc_cls_flower()
2514 struct hnae3_handle *h = hns3_get_handle(priv->netdev); in hns3_setup_tc_cls_flower()
2516 switch (flow->command) { in hns3_setup_tc_cls_flower()
2518 if (h->ae_algo->ops->add_cls_flower) in hns3_setup_tc_cls_flower()
2519 return h->ae_algo->ops->add_cls_flower(h, flow, tc); in hns3_setup_tc_cls_flower()
2522 if (h->ae_algo->ops->del_cls_flower) in hns3_setup_tc_cls_flower()
2523 return h->ae_algo->ops->del_cls_flower(h, flow); in hns3_setup_tc_cls_flower()
2529 return -EOPNOTSUPP; in hns3_setup_tc_cls_flower()
2537 if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data)) in hns3_setup_tc_block_cb()
2538 return -EOPNOTSUPP; in hns3_setup_tc_block_cb()
2544 return -EOPNOTSUPP; in hns3_setup_tc_block_cb()
2567 return -EOPNOTSUPP; in hns3_nic_setup_tc()
2577 int ret = -EIO; in hns3_vlan_rx_add_vid()
2579 if (h->ae_algo->ops->set_vlan_filter) in hns3_vlan_rx_add_vid()
2580 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false); in hns3_vlan_rx_add_vid()
2589 int ret = -EIO; in hns3_vlan_rx_kill_vid()
2591 if (h->ae_algo->ops->set_vlan_filter) in hns3_vlan_rx_kill_vid()
2592 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true); in hns3_vlan_rx_kill_vid()
2601 int ret = -EIO; in hns3_ndo_set_vf_vlan()
2607 if (h->ae_algo->ops->set_vf_vlan_filter) in hns3_ndo_set_vf_vlan()
2608 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, in hns3_ndo_set_vf_vlan()
2619 return -EBUSY; in hns3_set_vf_spoofchk()
2621 if (!handle->ae_algo->ops->set_vf_spoofchk) in hns3_set_vf_spoofchk()
2622 return -EOPNOTSUPP; in hns3_set_vf_spoofchk()
2624 return handle->ae_algo->ops->set_vf_spoofchk(handle, vf, enable); in hns3_set_vf_spoofchk()
2631 if (!handle->ae_algo->ops->set_vf_trust) in hns3_set_vf_trust()
2632 return -EOPNOTSUPP; in hns3_set_vf_trust()
2634 return handle->ae_algo->ops->set_vf_trust(handle, vf, enable); in hns3_set_vf_trust()
2643 return -EBUSY; in hns3_nic_change_mtu()
2645 if (!h->ae_algo->ops->set_mtu) in hns3_nic_change_mtu()
2646 return -EOPNOTSUPP; in hns3_nic_change_mtu()
2649 "change mtu from %u to %d\n", netdev->mtu, new_mtu); in hns3_nic_change_mtu()
2651 ret = h->ae_algo->ops->set_mtu(h, new_mtu); in hns3_nic_change_mtu()
2656 netdev->mtu = new_mtu; in hns3_nic_change_mtu()
2676 for (i = 0; i < ndev->num_tx_queues; i++) { in hns3_get_tx_timeo_queue_info()
2681 trans_start = q->trans_start; in hns3_get_tx_timeo_queue_info()
2684 (trans_start + ndev->watchdog_timeo))) { in hns3_get_tx_timeo_queue_info()
2687 q->state, in hns3_get_tx_timeo_queue_info()
2688 jiffies_to_msecs(jiffies - trans_start)); in hns3_get_tx_timeo_queue_info()
2693 if (i == ndev->num_tx_queues) { in hns3_get_tx_timeo_queue_info()
2696 priv->tx_timeout_count); in hns3_get_tx_timeo_queue_info()
2700 priv->tx_timeout_count++; in hns3_get_tx_timeo_queue_info()
2702 tx_ring = &priv->ring[timeout_queue]; in hns3_get_tx_timeo_queue_info()
2703 napi = &tx_ring->tqp_vector->napi; in hns3_get_tx_timeo_queue_info()
2707 priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use, in hns3_get_tx_timeo_queue_info()
2708 tx_ring->next_to_clean, napi->state); in hns3_get_tx_timeo_queue_info()
2712 tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes, in hns3_get_tx_timeo_queue_info()
2713 tx_ring->stats.sw_err_cnt, tx_ring->pending_buf); in hns3_get_tx_timeo_queue_info()
2717 tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more, in hns3_get_tx_timeo_queue_info()
2718 tx_ring->stats.restart_queue, tx_ring->stats.tx_busy); in hns3_get_tx_timeo_queue_info()
2723 if (h->ae_algo->ops->get_mac_stats) { in hns3_get_tx_timeo_queue_info()
2726 h->ae_algo->ops->get_mac_stats(h, &mac_stats); in hns3_get_tx_timeo_queue_info()
2731 hw_head = readl_relaxed(tx_ring->tqp->io_base + in hns3_get_tx_timeo_queue_info()
2733 hw_tail = readl_relaxed(tx_ring->tqp->io_base + in hns3_get_tx_timeo_queue_info()
2735 fbd_num = readl_relaxed(tx_ring->tqp->io_base + in hns3_get_tx_timeo_queue_info()
2737 fbd_oft = readl_relaxed(tx_ring->tqp->io_base + in hns3_get_tx_timeo_queue_info()
2739 ebd_num = readl_relaxed(tx_ring->tqp->io_base + in hns3_get_tx_timeo_queue_info()
2741 ebd_oft = readl_relaxed(tx_ring->tqp->io_base + in hns3_get_tx_timeo_queue_info()
2743 bd_num = readl_relaxed(tx_ring->tqp->io_base + in hns3_get_tx_timeo_queue_info()
2745 bd_err = readl_relaxed(tx_ring->tqp->io_base + in hns3_get_tx_timeo_queue_info()
2747 ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG); in hns3_get_tx_timeo_queue_info()
2748 tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG); in hns3_get_tx_timeo_queue_info()
2753 readl(tx_ring->tqp_vector->mask_addr)); in hns3_get_tx_timeo_queue_info()
2764 struct hnae3_handle *h = priv->ae_handle; in hns3_nic_net_timeout()
2772 if (h->ae_algo->ops->reset_event) in hns3_nic_net_timeout()
2773 h->ae_algo->ops->reset_event(h->pdev, h); in hns3_nic_net_timeout()
2783 if (!h->ae_algo->ops->add_arfs_entry) in hns3_rx_flow_steer()
2784 return -EOPNOTSUPP; in hns3_rx_flow_steer()
2786 if (skb->encapsulation) in hns3_rx_flow_steer()
2787 return -EPROTONOSUPPORT; in hns3_rx_flow_steer()
2790 return -EPROTONOSUPPORT; in hns3_rx_flow_steer()
2796 return -EPROTONOSUPPORT; in hns3_rx_flow_steer()
2798 return h->ae_algo->ops->add_arfs_entry(h, rxq_index, flow_id, &fkeys); in hns3_rx_flow_steer()
2807 if (!h->ae_algo->ops->get_vf_config) in hns3_nic_get_vf_config()
2808 return -EOPNOTSUPP; in hns3_nic_get_vf_config()
2810 return h->ae_algo->ops->get_vf_config(h, vf, ivf); in hns3_nic_get_vf_config()
2818 if (!h->ae_algo->ops->set_vf_link_state) in hns3_nic_set_vf_link_state()
2819 return -EOPNOTSUPP; in hns3_nic_set_vf_link_state()
2821 return h->ae_algo->ops->set_vf_link_state(h, vf, link_state); in hns3_nic_set_vf_link_state()
2829 if (!h->ae_algo->ops->set_vf_rate) in hns3_nic_set_vf_rate()
2830 return -EOPNOTSUPP; in hns3_nic_set_vf_rate()
2832 return h->ae_algo->ops->set_vf_rate(h, vf, min_tx_rate, max_tx_rate, in hns3_nic_set_vf_rate()
2840 if (!h->ae_algo->ops->set_vf_mac) in hns3_nic_set_vf_mac()
2841 return -EOPNOTSUPP; in hns3_nic_set_vf_mac()
2847 return -EINVAL; in hns3_nic_set_vf_mac()
2850 return h->ae_algo->ops->set_vf_mac(h, vf_id, mac); in hns3_nic_set_vf_mac()
2882 u32 dev_id = pdev->device; in hns3_is_phys_func()
2898 dev_warn(&pdev->dev, "un-recognized pci device-id %u", in hns3_is_phys_func()
2907 /* If our VFs are assigned we cannot shut down SR-IOV in hns3_disable_sriov()
2912 dev_warn(&pdev->dev, in hns3_disable_sriov()
2920 /* hns3_probe - Device initialization routine
2935 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL); in hns3_probe()
2937 return -ENOMEM; in hns3_probe()
2939 ae_dev->pdev = pdev; in hns3_probe()
2940 ae_dev->flag = ent->driver_data; in hns3_probe()
2950 /* hns3_remove - Device removal routine
2977 dev_warn(&pdev->dev, "Can not config SRIOV\n"); in hns3_pci_sriov_configure()
2978 return -EINVAL; in hns3_pci_sriov_configure()
2984 dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret); in hns3_pci_sriov_configure()
2990 dev_warn(&pdev->dev, in hns3_pci_sriov_configure()
3012 if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) { in hns3_suspend()
3014 if (ae_dev->ops && ae_dev->ops->reset_prepare) in hns3_suspend()
3015 ae_dev->ops->reset_prepare(ae_dev, HNAE3_FUNC_RESET); in hns3_suspend()
3025 if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) { in hns3_resume()
3027 if (ae_dev->ops && ae_dev->ops->reset_done) in hns3_resume()
3028 ae_dev->ops->reset_done(ae_dev); in hns3_resume()
3040 dev_info(&pdev->dev, "PCI error detected, state(=%u)!!\n", state); in hns3_error_detected()
3045 if (!ae_dev || !ae_dev->ops) { in hns3_error_detected()
3046 dev_err(&pdev->dev, in hns3_error_detected()
3047 "Can't recover - error happened before device initialized\n"); in hns3_error_detected()
3051 if (ae_dev->ops->handle_hw_ras_error) in hns3_error_detected()
3052 ret = ae_dev->ops->handle_hw_ras_error(ae_dev); in hns3_error_detected()
3064 struct device *dev = &pdev->dev; in hns3_slot_reset()
3066 if (!ae_dev || !ae_dev->ops) in hns3_slot_reset()
3069 ops = ae_dev->ops; in hns3_slot_reset()
3071 if (ops->reset_event && ops->get_reset_level && in hns3_slot_reset()
3072 ops->set_default_reset_request) { in hns3_slot_reset()
3073 if (ae_dev->hw_err_reset_req) { in hns3_slot_reset()
3074 reset_type = ops->get_reset_level(ae_dev, in hns3_slot_reset()
3075 &ae_dev->hw_err_reset_req); in hns3_slot_reset()
3076 ops->set_default_reset_request(ae_dev, reset_type); in hns3_slot_reset()
3078 ops->reset_event(pdev, NULL); in hns3_slot_reset()
3091 dev_info(&pdev->dev, "FLR prepare\n"); in hns3_reset_prepare()
3092 if (ae_dev && ae_dev->ops && ae_dev->ops->reset_prepare) in hns3_reset_prepare()
3093 ae_dev->ops->reset_prepare(ae_dev, HNAE3_FLR_RESET); in hns3_reset_prepare()
3100 dev_info(&pdev->dev, "FLR done\n"); in hns3_reset_done()
3101 if (ae_dev && ae_dev->ops && ae_dev->ops->reset_done) in hns3_reset_done()
3102 ae_dev->ops->reset_done(ae_dev); in hns3_reset_done()
3129 struct pci_dev *pdev = h->pdev; in hns3_set_default_feature()
3132 netdev->priv_flags |= IFF_UNICAST_FLT; in hns3_set_default_feature()
3134 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; in hns3_set_default_feature()
3136 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | in hns3_set_default_feature()
3143 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { in hns3_set_default_feature()
3144 netdev->features |= NETIF_F_GRO_HW; in hns3_set_default_feature()
3146 if (!(h->flags & HNAE3_SUPPORT_VF)) in hns3_set_default_feature()
3147 netdev->features |= NETIF_F_NTUPLE; in hns3_set_default_feature()
3150 if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps)) in hns3_set_default_feature()
3151 netdev->features |= NETIF_F_GSO_UDP_L4; in hns3_set_default_feature()
3153 if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) in hns3_set_default_feature()
3154 netdev->features |= NETIF_F_HW_CSUM; in hns3_set_default_feature()
3156 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; in hns3_set_default_feature()
3158 if (test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps)) in hns3_set_default_feature()
3159 netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; in hns3_set_default_feature()
3161 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) in hns3_set_default_feature()
3162 netdev->features |= NETIF_F_HW_TC; in hns3_set_default_feature()
3164 netdev->hw_features |= netdev->features; in hns3_set_default_feature()
3165 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) in hns3_set_default_feature()
3166 netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; in hns3_set_default_feature()
3168 netdev->vlan_features |= netdev->features & in hns3_set_default_feature()
3173 netdev->hw_enc_features |= netdev->vlan_features | NETIF_F_TSO_MANGLEID; in hns3_set_default_feature()
3182 if (ring->page_pool) { in hns3_alloc_buffer()
3183 p = page_pool_dev_alloc_frag(ring->page_pool, in hns3_alloc_buffer()
3184 &cb->page_offset, in hns3_alloc_buffer()
3187 return -ENOMEM; in hns3_alloc_buffer()
3189 cb->priv = p; in hns3_alloc_buffer()
3190 cb->buf = page_address(p); in hns3_alloc_buffer()
3191 cb->dma = page_pool_get_dma_addr(p); in hns3_alloc_buffer()
3192 cb->type = DESC_TYPE_PP_FRAG; in hns3_alloc_buffer()
3193 cb->reuse_flag = 0; in hns3_alloc_buffer()
3199 return -ENOMEM; in hns3_alloc_buffer()
3201 cb->priv = p; in hns3_alloc_buffer()
3202 cb->page_offset = 0; in hns3_alloc_buffer()
3203 cb->reuse_flag = 0; in hns3_alloc_buffer()
3204 cb->buf = page_address(p); in hns3_alloc_buffer()
3205 cb->length = hns3_page_size(ring); in hns3_alloc_buffer()
3206 cb->type = DESC_TYPE_PAGE; in hns3_alloc_buffer()
3207 page_ref_add(p, USHRT_MAX - 1); in hns3_alloc_buffer()
3208 cb->pagecnt_bias = USHRT_MAX; in hns3_alloc_buffer()
3216 if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_HEAD | in hns3_free_buffer()
3218 napi_consume_skb(cb->priv, budget); in hns3_free_buffer()
3220 if (cb->type & DESC_TYPE_PAGE && cb->pagecnt_bias) in hns3_free_buffer()
3221 __page_frag_cache_drain(cb->priv, cb->pagecnt_bias); in hns3_free_buffer()
3222 else if (cb->type & DESC_TYPE_PP_FRAG) in hns3_free_buffer()
3223 page_pool_put_full_page(ring->page_pool, cb->priv, in hns3_free_buffer()
3231 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, in hns3_map_buffer()
3232 cb->length, ring_to_dma_dir(ring)); in hns3_map_buffer()
3234 if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma))) in hns3_map_buffer()
3235 return -EIO; in hns3_map_buffer()
3243 if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB)) in hns3_unmap_buffer()
3244 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, in hns3_unmap_buffer()
3246 else if ((cb->type & DESC_TYPE_PAGE) && cb->length) in hns3_unmap_buffer()
3247 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, in hns3_unmap_buffer()
3249 else if (cb->type & (DESC_TYPE_BOUNCE_ALL | DESC_TYPE_BOUNCE_HEAD | in hns3_unmap_buffer()
3256 hns3_unmap_buffer(ring, &ring->desc_cb[i]); in hns3_buffer_detach()
3257 ring->desc[i].addr = 0; in hns3_buffer_detach()
3258 ring->desc_cb[i].refill = 0; in hns3_buffer_detach()
3264 struct hns3_desc_cb *cb = &ring->desc_cb[i]; in hns3_free_buffer_detach()
3266 if (!ring->desc_cb[i].dma) in hns3_free_buffer_detach()
3277 for (i = 0; i < ring->desc_num; i++) in hns3_free_buffers()
3284 int size = ring->desc_num * sizeof(ring->desc[0]); in hns3_free_desc()
3288 if (ring->desc) { in hns3_free_desc()
3290 ring->desc, ring->desc_dma_addr); in hns3_free_desc()
3291 ring->desc = NULL; in hns3_free_desc()
3297 int size = ring->desc_num * sizeof(ring->desc[0]); in hns3_alloc_desc()
3299 ring->desc = dma_alloc_coherent(ring_to_dev(ring), size, in hns3_alloc_desc()
3300 &ring->desc_dma_addr, GFP_KERNEL); in hns3_alloc_desc()
3301 if (!ring->desc) in hns3_alloc_desc()
3302 return -ENOMEM; in hns3_alloc_desc()
3313 if (ret || ring->page_pool) in hns3_alloc_and_map_buffer()
3330 int ret = hns3_alloc_and_map_buffer(ring, &ring->desc_cb[i]); in hns3_alloc_and_attach_buffer()
3335 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + in hns3_alloc_and_attach_buffer()
3336 ring->desc_cb[i].page_offset); in hns3_alloc_and_attach_buffer()
3337 ring->desc_cb[i].refill = 1; in hns3_alloc_and_attach_buffer()
3347 for (i = 0; i < ring->desc_num; i++) { in hns3_alloc_ring_buffers()
3356 for (j = i - 1; j >= 0; j--) in hns3_alloc_ring_buffers()
3361 /* detach a in-used buffer and replace with a reserved one */
3365 hns3_unmap_buffer(ring, &ring->desc_cb[i]); in hns3_replace_buffer()
3366 ring->desc_cb[i] = *res_cb; in hns3_replace_buffer()
3367 ring->desc_cb[i].refill = 1; in hns3_replace_buffer()
3368 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + in hns3_replace_buffer()
3369 ring->desc_cb[i].page_offset); in hns3_replace_buffer()
3370 ring->desc[i].rx.bd_base_info = 0; in hns3_replace_buffer()
3375 ring->desc_cb[i].reuse_flag = 0; in hns3_reuse_buffer()
3376 ring->desc_cb[i].refill = 1; in hns3_reuse_buffer()
3377 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + in hns3_reuse_buffer()
3378 ring->desc_cb[i].page_offset); in hns3_reuse_buffer()
3379 ring->desc[i].rx.bd_base_info = 0; in hns3_reuse_buffer()
3382 ring->desc_cb[i].dma + ring->desc_cb[i].page_offset, in hns3_reuse_buffer()
3390 /* pair with ring->last_to_use update in hns3_tx_doorbell(), in hns3_nic_reclaim_desc()
3394 int ltu = smp_load_acquire(&ring->last_to_use); in hns3_nic_reclaim_desc()
3395 int ntc = ring->next_to_clean; in hns3_nic_reclaim_desc()
3401 desc = &ring->desc[ntc]; in hns3_nic_reclaim_desc()
3403 if (le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri) & in hns3_nic_reclaim_desc()
3407 desc_cb = &ring->desc_cb[ntc]; in hns3_nic_reclaim_desc()
3409 if (desc_cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_ALL | in hns3_nic_reclaim_desc()
3413 (*bytes) += desc_cb->send_bytes; in hns3_nic_reclaim_desc()
3419 if (++ntc == ring->desc_num) in hns3_nic_reclaim_desc()
3423 prefetch(&ring->desc_cb[ntc]); in hns3_nic_reclaim_desc()
3433 smp_store_release(&ring->next_to_clean, ntc); in hns3_nic_reclaim_desc()
3453 ring->tqp_vector->tx_group.total_bytes += bytes; in hns3_clean_tx_ring()
3454 ring->tqp_vector->tx_group.total_packets += pkts; in hns3_clean_tx_ring()
3456 u64_stats_update_begin(&ring->syncp); in hns3_clean_tx_ring()
3457 ring->stats.tx_bytes += bytes; in hns3_clean_tx_ring()
3458 ring->stats.tx_pkts += pkts; in hns3_clean_tx_ring()
3459 u64_stats_update_end(&ring->syncp); in hns3_clean_tx_ring()
3461 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index); in hns3_clean_tx_ring()
3471 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { in hns3_clean_tx_ring()
3473 ring->stats.restart_queue++; in hns3_clean_tx_ring()
3480 int ntc = ring->next_to_clean; in hns3_desc_unused()
3481 int ntu = ring->next_to_use; in hns3_desc_unused()
3483 if (unlikely(ntc == ntu && !ring->desc_cb[ntc].refill)) in hns3_desc_unused()
3484 return ring->desc_num; in hns3_desc_unused()
3486 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; in hns3_desc_unused()
3498 desc_cb = &ring->desc_cb[ring->next_to_use]; in hns3_nic_alloc_rx_buffers()
3499 if (desc_cb->reuse_flag) { in hns3_nic_alloc_rx_buffers()
3500 u64_stats_update_begin(&ring->syncp); in hns3_nic_alloc_rx_buffers()
3501 ring->stats.reuse_pg_cnt++; in hns3_nic_alloc_rx_buffers()
3502 u64_stats_update_end(&ring->syncp); in hns3_nic_alloc_rx_buffers()
3504 hns3_reuse_buffer(ring, ring->next_to_use); in hns3_nic_alloc_rx_buffers()
3508 u64_stats_update_begin(&ring->syncp); in hns3_nic_alloc_rx_buffers()
3509 ring->stats.sw_err_cnt++; in hns3_nic_alloc_rx_buffers()
3510 u64_stats_update_end(&ring->syncp); in hns3_nic_alloc_rx_buffers()
3516 writel(i, ring->tqp->io_base + in hns3_nic_alloc_rx_buffers()
3520 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); in hns3_nic_alloc_rx_buffers()
3522 u64_stats_update_begin(&ring->syncp); in hns3_nic_alloc_rx_buffers()
3523 ring->stats.non_reuse_pg++; in hns3_nic_alloc_rx_buffers()
3524 u64_stats_update_end(&ring->syncp); in hns3_nic_alloc_rx_buffers()
3530 writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); in hns3_nic_alloc_rx_buffers()
3536 return page_count(cb->priv) == cb->pagecnt_bias; in hns3_can_reuse_page()
3543 struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; in hns3_nic_reuse_page()
3544 u32 frag_offset = desc_cb->page_offset + pull_len; in hns3_nic_reuse_page()
3545 int size = le16_to_cpu(desc->rx.size); in hns3_nic_reuse_page()
3547 u32 frag_size = size - pull_len; in hns3_nic_reuse_page()
3550 if (ring->page_pool) { in hns3_nic_reuse_page()
3551 skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset, in hns3_nic_reuse_page()
3556 /* Avoid re-using remote or pfmem page */ in hns3_nic_reuse_page()
3557 if (unlikely(!dev_page_is_reusable(desc_cb->priv))) in hns3_nic_reuse_page()
3569 * is non-zero, which means page_offset @ truesize will in hns3_nic_reuse_page()
3573 if ((!desc_cb->page_offset && reused) || in hns3_nic_reuse_page()
3574 ((desc_cb->page_offset + truesize + truesize) <= in hns3_nic_reuse_page()
3575 hns3_page_size(ring) && desc_cb->page_offset)) { in hns3_nic_reuse_page()
3576 desc_cb->page_offset += truesize; in hns3_nic_reuse_page()
3577 desc_cb->reuse_flag = 1; in hns3_nic_reuse_page()
3578 } else if (desc_cb->page_offset && reused) { in hns3_nic_reuse_page()
3579 desc_cb->page_offset = 0; in hns3_nic_reuse_page()
3580 desc_cb->reuse_flag = 1; in hns3_nic_reuse_page()
3581 } else if (frag_size <= ring->rx_copybreak) { in hns3_nic_reuse_page()
3585 u64_stats_update_begin(&ring->syncp); in hns3_nic_reuse_page()
3586 ring->stats.frag_alloc_err++; in hns3_nic_reuse_page()
3587 u64_stats_update_end(&ring->syncp); in hns3_nic_reuse_page()
3594 desc_cb->reuse_flag = 1; in hns3_nic_reuse_page()
3595 memcpy(frag, desc_cb->buf + frag_offset, frag_size); in hns3_nic_reuse_page()
3599 u64_stats_update_begin(&ring->syncp); in hns3_nic_reuse_page()
3600 ring->stats.frag_alloc++; in hns3_nic_reuse_page()
3601 u64_stats_update_end(&ring->syncp); in hns3_nic_reuse_page()
3606 desc_cb->pagecnt_bias--; in hns3_nic_reuse_page()
3608 if (unlikely(!desc_cb->pagecnt_bias)) { in hns3_nic_reuse_page()
3609 page_ref_add(desc_cb->priv, USHRT_MAX); in hns3_nic_reuse_page()
3610 desc_cb->pagecnt_bias = USHRT_MAX; in hns3_nic_reuse_page()
3613 skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset, in hns3_nic_reuse_page()
3616 if (unlikely(!desc_cb->reuse_flag)) in hns3_nic_reuse_page()
3617 __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias); in hns3_nic_reuse_page()
3622 __be16 type = skb->protocol; in hns3_gro_complete()
3630 return -EFAULT; in hns3_gro_complete()
3632 vh = (struct vlan_hdr *)(skb->data + depth); in hns3_gro_complete()
3633 type = vh->h_vlan_encapsulated_proto; in hns3_gro_complete()
3645 th->check = ~tcp_v4_check(skb->len - depth, iph->saddr, in hns3_gro_complete()
3646 iph->daddr, 0); in hns3_gro_complete()
3653 th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr, in hns3_gro_complete()
3654 &iph->daddr, 0); in hns3_gro_complete()
3656 hns3_rl_err(skb->dev, in hns3_gro_complete()
3659 return -EFAULT; in hns3_gro_complete()
3662 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; in hns3_gro_complete()
3663 if (th->cwr) in hns3_gro_complete()
3664 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; in hns3_gro_complete()
3667 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID; in hns3_gro_complete()
3669 skb->csum_start = (unsigned char *)th - skb->head; in hns3_gro_complete()
3670 skb->csum_offset = offsetof(struct tcphdr, check); in hns3_gro_complete()
3671 skb->ip_summed = CHECKSUM_PARTIAL; in hns3_gro_complete()
3685 u64_stats_update_begin(&ring->syncp); in hns3_checksum_complete()
3686 ring->stats.csum_complete++; in hns3_checksum_complete()
3687 u64_stats_update_end(&ring->syncp); in hns3_checksum_complete()
3688 skb->ip_summed = CHECKSUM_COMPLETE; in hns3_checksum_complete()
3689 skb->csum = csum_unfold((__force __sum16)csum); in hns3_checksum_complete()
3701 skb->csum_level = hns3_rx_ptype_tbl[ptype].csum_level; in hns3_rx_handle_csum()
3702 skb->ip_summed = hns3_rx_ptype_tbl[ptype].ip_summed; in hns3_rx_handle_csum()
3712 skb->csum_level = 1; in hns3_rx_handle_csum()
3725 skb->ip_summed = CHECKSUM_UNNECESSARY; in hns3_rx_handle_csum()
3740 skb->ip_summed = CHECKSUM_NONE; in hns3_rx_checksum()
3744 if (!(netdev->features & NETIF_F_RXCSUM)) in hns3_rx_checksum()
3747 if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) in hns3_rx_checksum()
3761 u64_stats_update_begin(&ring->syncp); in hns3_rx_checksum()
3762 ring->stats.l3l4_csum_err++; in hns3_rx_checksum()
3763 u64_stats_update_end(&ring->syncp); in hns3_rx_checksum()
3774 napi_gro_flush(&ring->tqp_vector->napi, false); in hns3_rx_skb()
3776 napi_gro_receive(&ring->tqp_vector->napi, skb); in hns3_rx_skb()
3783 struct hnae3_handle *handle = ring->tqp->handle; in hns3_parse_vlan_tag()
3784 struct pci_dev *pdev = ring->tqp->handle->pdev; in hns3_parse_vlan_tag()
3787 if (unlikely(ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)) { in hns3_parse_vlan_tag()
3788 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); in hns3_parse_vlan_tag()
3790 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); in hns3_parse_vlan_tag()
3806 if (handle->port_base_vlan_state != in hns3_parse_vlan_tag()
3810 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); in hns3_parse_vlan_tag()
3813 if (handle->port_base_vlan_state != in hns3_parse_vlan_tag()
3817 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); in hns3_parse_vlan_tag()
3820 if (handle->port_base_vlan_state == in hns3_parse_vlan_tag()
3822 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); in hns3_parse_vlan_tag()
3824 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); in hns3_parse_vlan_tag()
3834 ring->desc[ring->next_to_clean].rx.bd_base_info &= in hns3_rx_ring_move_fw()
3836 ring->desc_cb[ring->next_to_clean].refill = 0; in hns3_rx_ring_move_fw()
3837 ring->next_to_clean += 1; in hns3_rx_ring_move_fw()
3839 if (unlikely(ring->next_to_clean == ring->desc_num)) in hns3_rx_ring_move_fw()
3840 ring->next_to_clean = 0; in hns3_rx_ring_move_fw()
3846 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; in hns3_alloc_skb()
3850 ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE); in hns3_alloc_skb()
3851 skb = ring->skb; in hns3_alloc_skb()
3855 u64_stats_update_begin(&ring->syncp); in hns3_alloc_skb()
3856 ring->stats.sw_err_cnt++; in hns3_alloc_skb()
3857 u64_stats_update_end(&ring->syncp); in hns3_alloc_skb()
3859 return -ENOMEM; in hns3_alloc_skb()
3863 prefetchw(skb->data); in hns3_alloc_skb()
3865 ring->pending_buf = 1; in hns3_alloc_skb()
3866 ring->frag_num = 0; in hns3_alloc_skb()
3867 ring->tail_skb = NULL; in hns3_alloc_skb()
3871 /* We can reuse buffer as-is, just make sure it is reusable */ in hns3_alloc_skb()
3872 if (dev_page_is_reusable(desc_cb->priv)) in hns3_alloc_skb()
3873 desc_cb->reuse_flag = 1; in hns3_alloc_skb()
3874 else if (desc_cb->type & DESC_TYPE_PP_FRAG) in hns3_alloc_skb()
3875 page_pool_put_full_page(ring->page_pool, desc_cb->priv, in hns3_alloc_skb()
3878 __page_frag_cache_drain(desc_cb->priv, in hns3_alloc_skb()
3879 desc_cb->pagecnt_bias); in hns3_alloc_skb()
3885 if (ring->page_pool) in hns3_alloc_skb()
3888 u64_stats_update_begin(&ring->syncp); in hns3_alloc_skb()
3889 ring->stats.seg_pkt_cnt++; in hns3_alloc_skb()
3890 u64_stats_update_end(&ring->syncp); in hns3_alloc_skb()
3892 ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE); in hns3_alloc_skb()
3893 __skb_put(skb, ring->pull_len); in hns3_alloc_skb()
3894 hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len, in hns3_alloc_skb()
3903 struct sk_buff *skb = ring->skb; in hns3_add_frag()
3911 desc = &ring->desc[ring->next_to_clean]; in hns3_add_frag()
3912 desc_cb = &ring->desc_cb[ring->next_to_clean]; in hns3_add_frag()
3913 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); in hns3_add_frag()
3917 return -ENXIO; in hns3_add_frag()
3919 if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) { in hns3_add_frag()
3920 new_skb = napi_alloc_skb(&ring->tqp_vector->napi, 0); in hns3_add_frag()
3924 return -ENXIO; in hns3_add_frag()
3927 if (ring->page_pool) in hns3_add_frag()
3930 ring->frag_num = 0; in hns3_add_frag()
3932 if (ring->tail_skb) { in hns3_add_frag()
3933 ring->tail_skb->next = new_skb; in hns3_add_frag()
3934 ring->tail_skb = new_skb; in hns3_add_frag()
3936 skb_shinfo(skb)->frag_list = new_skb; in hns3_add_frag()
3937 ring->tail_skb = new_skb; in hns3_add_frag()
3941 if (ring->tail_skb) { in hns3_add_frag()
3942 head_skb->truesize += hns3_buf_size(ring); in hns3_add_frag()
3943 head_skb->data_len += le16_to_cpu(desc->rx.size); in hns3_add_frag()
3944 head_skb->len += le16_to_cpu(desc->rx.size); in hns3_add_frag()
3945 skb = ring->tail_skb; in hns3_add_frag()
3949 desc_cb->dma + desc_cb->page_offset, in hns3_add_frag()
3953 hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb); in hns3_add_frag()
3956 ring->pending_buf++; in hns3_add_frag()
3970 skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info, in hns3_set_gro_and_checksum()
3974 if (!skb_shinfo(skb)->gso_size) { in hns3_set_gro_and_checksum()
3980 NAPI_GRO_CB(skb)->count = hnae3_get_field(l234info, in hns3_set_gro_and_checksum()
3984 if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) { in hns3_set_gro_and_checksum()
3995 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; in hns3_set_gro_and_checksum()
3997 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; in hns3_set_gro_and_checksum()
3999 return -EFAULT; in hns3_set_gro_and_checksum()
4007 struct hnae3_handle *handle = ring->tqp->handle; in hns3_set_rx_skb_rss_type()
4011 rss_type = handle->kinfo.rss_type; in hns3_set_rx_skb_rss_type()
4029 * current packet, and ring->next_to_clean indicates the first in hns3_handle_bdinfo()
4030 * descriptor of next packet, so need - 1 below. in hns3_handle_bdinfo()
4032 pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) : in hns3_handle_bdinfo()
4033 (ring->desc_num - 1); in hns3_handle_bdinfo()
4034 desc = &ring->desc[pre_ntc]; in hns3_handle_bdinfo()
4035 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); in hns3_handle_bdinfo()
4036 l234info = le32_to_cpu(desc->rx.l234_info); in hns3_handle_bdinfo()
4037 ol_info = le32_to_cpu(desc->rx.ol_info); in hns3_handle_bdinfo()
4038 csum = le16_to_cpu(desc->csum); in hns3_handle_bdinfo()
4042 u32 nsec = le32_to_cpu(desc->ts_nsec); in hns3_handle_bdinfo()
4043 u32 sec = le32_to_cpu(desc->ts_sec); in hns3_handle_bdinfo()
4045 if (h->ae_algo->ops->get_rx_hwts) in hns3_handle_bdinfo()
4046 h->ae_algo->ops->get_rx_hwts(h, skb, nsec, sec); in hns3_handle_bdinfo()
4053 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { in hns3_handle_bdinfo()
4061 if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) | in hns3_handle_bdinfo()
4063 u64_stats_update_begin(&ring->syncp); in hns3_handle_bdinfo()
4065 ring->stats.l2_err++; in hns3_handle_bdinfo()
4067 ring->stats.err_pkt_len++; in hns3_handle_bdinfo()
4068 u64_stats_update_end(&ring->syncp); in hns3_handle_bdinfo()
4070 return -EFAULT; in hns3_handle_bdinfo()
4073 len = skb->len; in hns3_handle_bdinfo()
4076 skb->protocol = eth_type_trans(skb, netdev); in hns3_handle_bdinfo()
4082 u64_stats_update_begin(&ring->syncp); in hns3_handle_bdinfo()
4083 ring->stats.rx_err_cnt++; in hns3_handle_bdinfo()
4084 u64_stats_update_end(&ring->syncp); in hns3_handle_bdinfo()
4091 u64_stats_update_begin(&ring->syncp); in hns3_handle_bdinfo()
4092 ring->stats.rx_pkts++; in hns3_handle_bdinfo()
4093 ring->stats.rx_bytes += len; in hns3_handle_bdinfo()
4096 ring->stats.rx_multicast++; in hns3_handle_bdinfo()
4098 u64_stats_update_end(&ring->syncp); in hns3_handle_bdinfo()
4100 ring->tqp_vector->rx_group.total_bytes += len; in hns3_handle_bdinfo()
4102 hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash)); in hns3_handle_bdinfo()
4108 struct sk_buff *skb = ring->skb; in hns3_handle_rx_bd()
4115 desc = &ring->desc[ring->next_to_clean]; in hns3_handle_rx_bd()
4116 desc_cb = &ring->desc_cb[ring->next_to_clean]; in hns3_handle_rx_bd()
4121 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); in hns3_handle_rx_bd()
4124 return -ENXIO; in hns3_handle_rx_bd()
4127 length = le16_to_cpu(desc->rx.size); in hns3_handle_rx_bd()
4129 ring->va = desc_cb->buf + desc_cb->page_offset; in hns3_handle_rx_bd()
4132 desc_cb->dma + desc_cb->page_offset, in hns3_handle_rx_bd()
4143 net_prefetch(ring->va); in hns3_handle_rx_bd()
4145 ret = hns3_alloc_skb(ring, length, ring->va); in hns3_handle_rx_bd()
4146 skb = ring->skb; in hns3_handle_rx_bd()
4164 if (skb->len > HNS3_RX_HEAD_SIZE) in hns3_handle_rx_bd()
4165 memcpy(skb->data, ring->va, in hns3_handle_rx_bd()
4166 ALIGN(ring->pull_len, sizeof(long))); in hns3_handle_rx_bd()
4174 skb_record_rx_queue(skb, ring->tqp->tqp_index); in hns3_handle_rx_bd()
4187 unused_count -= ring->pending_buf; in hns3_clean_rx_ring()
4200 if (unlikely(!ring->skb || err == -ENXIO)) { in hns3_clean_rx_ring()
4203 rx_fn(ring, ring->skb); in hns3_clean_rx_ring()
4207 unused_count += ring->pending_buf; in hns3_clean_rx_ring()
4208 ring->skb = NULL; in hns3_clean_rx_ring()
4209 ring->pending_buf = 0; in hns3_clean_rx_ring()
4218 struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group; in hns3_update_rx_int_coalesce()
4221 if (!rx_group->coal.adapt_enable) in hns3_update_rx_int_coalesce()
4224 dim_update_sample(tqp_vector->event_cnt, rx_group->total_packets, in hns3_update_rx_int_coalesce()
4225 rx_group->total_bytes, &sample); in hns3_update_rx_int_coalesce()
4226 net_dim(&rx_group->dim, sample); in hns3_update_rx_int_coalesce()
4231 struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group; in hns3_update_tx_int_coalesce()
4234 if (!tx_group->coal.adapt_enable) in hns3_update_tx_int_coalesce()
4237 dim_update_sample(tqp_vector->event_cnt, tx_group->total_packets, in hns3_update_tx_int_coalesce()
4238 tx_group->total_bytes, &sample); in hns3_update_tx_int_coalesce()
4239 net_dim(&tx_group->dim, sample); in hns3_update_tx_int_coalesce()
4244 struct hns3_nic_priv *priv = netdev_priv(napi->dev); in hns3_nic_common_poll()
4253 if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { in hns3_nic_common_poll()
4261 hns3_for_each_ring(ring, tqp_vector->tx_group) in hns3_nic_common_poll()
4265 if (tqp_vector->num_tqps > 1) in hns3_nic_common_poll()
4266 rx_budget = max(budget / tqp_vector->num_tqps, 1); in hns3_nic_common_poll()
4268 hns3_for_each_ring(ring, tqp_vector->rx_group) { in hns3_nic_common_poll()
4277 tqp_vector->rx_group.total_packets += rx_pkt_total; in hns3_nic_common_poll()
4283 likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { in hns3_nic_common_poll()
4296 struct pci_dev *pdev = tqp_vector->handle->pdev; in hns3_get_vector_ring_chain()
4302 tx_ring = tqp_vector->tx_group.ring; in hns3_get_vector_ring_chain()
4304 cur_chain->tqp_index = tx_ring->tqp->tqp_index; in hns3_get_vector_ring_chain()
4305 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, in hns3_get_vector_ring_chain()
4307 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, in hns3_get_vector_ring_chain()
4310 cur_chain->next = NULL; in hns3_get_vector_ring_chain()
4312 while (tx_ring->next) { in hns3_get_vector_ring_chain()
4313 tx_ring = tx_ring->next; in hns3_get_vector_ring_chain()
4315 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), in hns3_get_vector_ring_chain()
4320 cur_chain->next = chain; in hns3_get_vector_ring_chain()
4321 chain->tqp_index = tx_ring->tqp->tqp_index; in hns3_get_vector_ring_chain()
4322 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, in hns3_get_vector_ring_chain()
4324 hnae3_set_field(chain->int_gl_idx, in hns3_get_vector_ring_chain()
4333 rx_ring = tqp_vector->rx_group.ring; in hns3_get_vector_ring_chain()
4335 cur_chain->next = NULL; in hns3_get_vector_ring_chain()
4336 cur_chain->tqp_index = rx_ring->tqp->tqp_index; in hns3_get_vector_ring_chain()
4337 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, in hns3_get_vector_ring_chain()
4339 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, in hns3_get_vector_ring_chain()
4342 rx_ring = rx_ring->next; in hns3_get_vector_ring_chain()
4346 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); in hns3_get_vector_ring_chain()
4350 cur_chain->next = chain; in hns3_get_vector_ring_chain()
4351 chain->tqp_index = rx_ring->tqp->tqp_index; in hns3_get_vector_ring_chain()
4352 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, in hns3_get_vector_ring_chain()
4354 hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M, in hns3_get_vector_ring_chain()
4359 rx_ring = rx_ring->next; in hns3_get_vector_ring_chain()
4365 cur_chain = head->next; in hns3_get_vector_ring_chain()
4367 chain = cur_chain->next; in hns3_get_vector_ring_chain()
4368 devm_kfree(&pdev->dev, cur_chain); in hns3_get_vector_ring_chain()
4371 head->next = NULL; in hns3_get_vector_ring_chain()
4373 return -ENOMEM; in hns3_get_vector_ring_chain()
4379 struct pci_dev *pdev = tqp_vector->handle->pdev; in hns3_free_vector_ring_chain()
4382 chain = head->next; in hns3_free_vector_ring_chain()
4385 chain_tmp = chain->next; in hns3_free_vector_ring_chain()
4386 devm_kfree(&pdev->dev, chain); in hns3_free_vector_ring_chain()
4394 ring->next = group->ring; in hns3_add_ring_to_group()
4395 group->ring = ring; in hns3_add_ring_to_group()
4397 group->count++; in hns3_add_ring_to_group()
4402 struct pci_dev *pdev = priv->ae_handle->pdev; in hns3_nic_set_cpumask()
4404 int num_vectors = priv->vector_num; in hns3_nic_set_cpumask()
4408 numa_node = dev_to_node(&pdev->dev); in hns3_nic_set_cpumask()
4411 tqp_vector = &priv->tqp_vector[vector_i]; in hns3_nic_set_cpumask()
4413 &tqp_vector->affinity_mask); in hns3_nic_set_cpumask()
4422 struct hns3_enet_tqp_vector *tqp_vector = group->ring->tqp_vector; in hns3_rx_dim_work()
4424 net_dim_get_rx_moderation(dim->mode, dim->profile_ix); in hns3_rx_dim_work()
4426 hns3_set_vector_coalesce_rx_gl(group->ring->tqp_vector, cur_moder.usec); in hns3_rx_dim_work()
4427 tqp_vector->rx_group.coal.int_gl = cur_moder.usec; in hns3_rx_dim_work()
4429 if (cur_moder.pkts < tqp_vector->rx_group.coal.int_ql_max) { in hns3_rx_dim_work()
4431 tqp_vector->rx_group.coal.int_ql = cur_moder.pkts; in hns3_rx_dim_work()
4434 dim->state = DIM_START_MEASURE; in hns3_rx_dim_work()
4442 struct hns3_enet_tqp_vector *tqp_vector = group->ring->tqp_vector; in hns3_tx_dim_work()
4444 net_dim_get_tx_moderation(dim->mode, dim->profile_ix); in hns3_tx_dim_work()
4447 tqp_vector->tx_group.coal.int_gl = cur_moder.usec; in hns3_tx_dim_work()
4449 if (cur_moder.pkts < tqp_vector->tx_group.coal.int_ql_max) { in hns3_tx_dim_work()
4451 tqp_vector->tx_group.coal.int_ql = cur_moder.pkts; in hns3_tx_dim_work()
4454 dim->state = DIM_START_MEASURE; in hns3_tx_dim_work()
4459 INIT_WORK(&tqp_vector->rx_group.dim.work, hns3_rx_dim_work); in hns3_nic_init_dim()
4460 INIT_WORK(&tqp_vector->tx_group.dim.work, hns3_tx_dim_work); in hns3_nic_init_dim()
4465 struct hnae3_handle *h = priv->ae_handle; in hns3_nic_init_vector_data()
4472 for (i = 0; i < priv->vector_num; i++) { in hns3_nic_init_vector_data()
4473 tqp_vector = &priv->tqp_vector[i]; in hns3_nic_init_vector_data()
4475 tqp_vector->num_tqps = 0; in hns3_nic_init_vector_data()
4479 for (i = 0; i < h->kinfo.num_tqps; i++) { in hns3_nic_init_vector_data()
4480 u16 vector_i = i % priv->vector_num; in hns3_nic_init_vector_data()
4481 u16 tqp_num = h->kinfo.num_tqps; in hns3_nic_init_vector_data()
4483 tqp_vector = &priv->tqp_vector[vector_i]; in hns3_nic_init_vector_data()
4485 hns3_add_ring_to_group(&tqp_vector->tx_group, in hns3_nic_init_vector_data()
4486 &priv->ring[i]); in hns3_nic_init_vector_data()
4488 hns3_add_ring_to_group(&tqp_vector->rx_group, in hns3_nic_init_vector_data()
4489 &priv->ring[i + tqp_num]); in hns3_nic_init_vector_data()
4491 priv->ring[i].tqp_vector = tqp_vector; in hns3_nic_init_vector_data()
4492 priv->ring[i + tqp_num].tqp_vector = tqp_vector; in hns3_nic_init_vector_data()
4493 tqp_vector->num_tqps++; in hns3_nic_init_vector_data()
4496 for (i = 0; i < priv->vector_num; i++) { in hns3_nic_init_vector_data()
4499 tqp_vector = &priv->tqp_vector[i]; in hns3_nic_init_vector_data()
4501 tqp_vector->rx_group.total_bytes = 0; in hns3_nic_init_vector_data()
4502 tqp_vector->rx_group.total_packets = 0; in hns3_nic_init_vector_data()
4503 tqp_vector->tx_group.total_bytes = 0; in hns3_nic_init_vector_data()
4504 tqp_vector->tx_group.total_packets = 0; in hns3_nic_init_vector_data()
4505 tqp_vector->handle = h; in hns3_nic_init_vector_data()
4512 ret = h->ae_algo->ops->map_ring_to_vector(h, in hns3_nic_init_vector_data()
4513 tqp_vector->vector_irq, &vector_ring_chain); in hns3_nic_init_vector_data()
4520 netif_napi_add(priv->netdev, &tqp_vector->napi, in hns3_nic_init_vector_data()
4527 while (i--) in hns3_nic_init_vector_data()
4528 netif_napi_del(&priv->tqp_vector[i].napi); in hns3_nic_init_vector_data()
4535 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); in hns3_nic_init_coal_cfg()
4536 struct hns3_enet_coalesce *tx_coal = &priv->tx_coal; in hns3_nic_init_coal_cfg()
4537 struct hns3_enet_coalesce *rx_coal = &priv->rx_coal; in hns3_nic_init_coal_cfg()
4544 * Default: enable interrupt coalescing self-adaptive and GL in hns3_nic_init_coal_cfg()
4546 tx_coal->adapt_enable = 1; in hns3_nic_init_coal_cfg()
4547 rx_coal->adapt_enable = 1; in hns3_nic_init_coal_cfg()
4549 tx_coal->int_gl = HNS3_INT_GL_50K; in hns3_nic_init_coal_cfg()
4550 rx_coal->int_gl = HNS3_INT_GL_50K; in hns3_nic_init_coal_cfg()
4552 rx_coal->flow_level = HNS3_FLOW_LOW; in hns3_nic_init_coal_cfg()
4553 tx_coal->flow_level = HNS3_FLOW_LOW; in hns3_nic_init_coal_cfg()
4555 if (ae_dev->dev_specs.int_ql_max) { in hns3_nic_init_coal_cfg()
4556 tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG; in hns3_nic_init_coal_cfg()
4557 rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG; in hns3_nic_init_coal_cfg()
4563 struct hnae3_handle *h = priv->ae_handle; in hns3_nic_alloc_vector_data()
4566 struct pci_dev *pdev = h->pdev; in hns3_nic_alloc_vector_data()
4567 u16 tqp_num = h->kinfo.num_tqps; in hns3_nic_alloc_vector_data()
4576 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), in hns3_nic_alloc_vector_data()
4579 return -ENOMEM; in hns3_nic_alloc_vector_data()
4582 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector); in hns3_nic_alloc_vector_data()
4584 priv->vector_num = vector_num; in hns3_nic_alloc_vector_data()
4585 priv->tqp_vector = (struct hns3_enet_tqp_vector *) in hns3_nic_alloc_vector_data()
4586 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector), in hns3_nic_alloc_vector_data()
4588 if (!priv->tqp_vector) { in hns3_nic_alloc_vector_data()
4589 ret = -ENOMEM; in hns3_nic_alloc_vector_data()
4593 for (i = 0; i < priv->vector_num; i++) { in hns3_nic_alloc_vector_data()
4594 tqp_vector = &priv->tqp_vector[i]; in hns3_nic_alloc_vector_data()
4595 tqp_vector->idx = i; in hns3_nic_alloc_vector_data()
4596 tqp_vector->mask_addr = vector[i].io_addr; in hns3_nic_alloc_vector_data()
4597 tqp_vector->vector_irq = vector[i].vector; in hns3_nic_alloc_vector_data()
4602 devm_kfree(&pdev->dev, vector); in hns3_nic_alloc_vector_data()
4608 group->ring = NULL; in hns3_clear_ring_group()
4609 group->count = 0; in hns3_clear_ring_group()
4615 struct hnae3_handle *h = priv->ae_handle; in hns3_nic_uninit_vector_data()
4619 for (i = 0; i < priv->vector_num; i++) { in hns3_nic_uninit_vector_data()
4620 tqp_vector = &priv->tqp_vector[i]; in hns3_nic_uninit_vector_data()
4622 if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring) in hns3_nic_uninit_vector_data()
4630 dev_warn(priv->dev, "failed to get ring chain\n"); in hns3_nic_uninit_vector_data()
4632 h->ae_algo->ops->unmap_ring_from_vector(h, in hns3_nic_uninit_vector_data()
4633 tqp_vector->vector_irq, &vector_ring_chain); in hns3_nic_uninit_vector_data()
4637 hns3_clear_ring_group(&tqp_vector->rx_group); in hns3_nic_uninit_vector_data()
4638 hns3_clear_ring_group(&tqp_vector->tx_group); in hns3_nic_uninit_vector_data()
4639 netif_napi_del(&priv->tqp_vector[i].napi); in hns3_nic_uninit_vector_data()
4645 struct hnae3_handle *h = priv->ae_handle; in hns3_nic_dealloc_vector_data()
4646 struct pci_dev *pdev = h->pdev; in hns3_nic_dealloc_vector_data()
4649 for (i = 0; i < priv->vector_num; i++) { in hns3_nic_dealloc_vector_data()
4652 tqp_vector = &priv->tqp_vector[i]; in hns3_nic_dealloc_vector_data()
4653 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq); in hns3_nic_dealloc_vector_data()
4658 devm_kfree(&pdev->dev, priv->tqp_vector); in hns3_nic_dealloc_vector_data()
4664 int queue_num = priv->ae_handle->kinfo.num_tqps; in hns3_ring_get_cfg()
4669 ring = &priv->ring[q->tqp_index]; in hns3_ring_get_cfg()
4670 desc_num = priv->ae_handle->kinfo.num_tx_desc; in hns3_ring_get_cfg()
4671 ring->queue_index = q->tqp_index; in hns3_ring_get_cfg()
4672 ring->tx_copybreak = priv->tx_copybreak; in hns3_ring_get_cfg()
4673 ring->last_to_use = 0; in hns3_ring_get_cfg()
4675 ring = &priv->ring[q->tqp_index + queue_num]; in hns3_ring_get_cfg()
4676 desc_num = priv->ae_handle->kinfo.num_rx_desc; in hns3_ring_get_cfg()
4677 ring->queue_index = q->tqp_index; in hns3_ring_get_cfg()
4678 ring->rx_copybreak = priv->rx_copybreak; in hns3_ring_get_cfg()
4681 hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); in hns3_ring_get_cfg()
4683 ring->tqp = q; in hns3_ring_get_cfg()
4684 ring->desc = NULL; in hns3_ring_get_cfg()
4685 ring->desc_cb = NULL; in hns3_ring_get_cfg()
4686 ring->dev = priv->dev; in hns3_ring_get_cfg()
4687 ring->desc_dma_addr = 0; in hns3_ring_get_cfg()
4688 ring->buf_size = q->buf_size; in hns3_ring_get_cfg()
4689 ring->desc_num = desc_num; in hns3_ring_get_cfg()
4690 ring->next_to_use = 0; in hns3_ring_get_cfg()
4691 ring->next_to_clean = 0; in hns3_ring_get_cfg()
4703 struct hnae3_handle *h = priv->ae_handle; in hns3_get_ring_config()
4704 struct pci_dev *pdev = h->pdev; in hns3_get_ring_config()
4707 priv->ring = devm_kzalloc(&pdev->dev, in hns3_get_ring_config()
4708 array3_size(h->kinfo.num_tqps, in hns3_get_ring_config()
4709 sizeof(*priv->ring), 2), in hns3_get_ring_config()
4711 if (!priv->ring) in hns3_get_ring_config()
4712 return -ENOMEM; in hns3_get_ring_config()
4714 for (i = 0; i < h->kinfo.num_tqps; i++) in hns3_get_ring_config()
4715 hns3_queue_to_ring(h->kinfo.tqp[i], priv); in hns3_get_ring_config()
4722 if (!priv->ring) in hns3_put_ring_config()
4725 devm_kfree(priv->dev, priv->ring); in hns3_put_ring_config()
4726 priv->ring = NULL; in hns3_put_ring_config()
4735 .pool_size = ring->desc_num * hns3_buf_size(ring) / in hns3_alloc_page_pool()
4744 ring->page_pool = page_pool_create(&pp_params); in hns3_alloc_page_pool()
4745 if (IS_ERR(ring->page_pool)) { in hns3_alloc_page_pool()
4747 PTR_ERR(ring->page_pool)); in hns3_alloc_page_pool()
4748 ring->page_pool = NULL; in hns3_alloc_page_pool()
4756 if (ring->desc_num <= 0 || ring->buf_size <= 0) in hns3_alloc_ring_memory()
4757 return -EINVAL; in hns3_alloc_ring_memory()
4759 ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num, in hns3_alloc_ring_memory()
4760 sizeof(ring->desc_cb[0]), GFP_KERNEL); in hns3_alloc_ring_memory()
4761 if (!ring->desc_cb) { in hns3_alloc_ring_memory()
4762 ret = -ENOMEM; in hns3_alloc_ring_memory()
4786 devm_kfree(ring_to_dev(ring), ring->desc_cb); in hns3_alloc_ring_memory()
4787 ring->desc_cb = NULL; in hns3_alloc_ring_memory()
4795 devm_kfree(ring_to_dev(ring), ring->desc_cb); in hns3_fini_ring()
4796 ring->desc_cb = NULL; in hns3_fini_ring()
4797 ring->next_to_clean = 0; in hns3_fini_ring()
4798 ring->next_to_use = 0; in hns3_fini_ring()
4799 ring->last_to_use = 0; in hns3_fini_ring()
4800 ring->pending_buf = 0; in hns3_fini_ring()
4801 if (!HNAE3_IS_TX_RING(ring) && ring->skb) { in hns3_fini_ring()
4802 dev_kfree_skb_any(ring->skb); in hns3_fini_ring()
4803 ring->skb = NULL; in hns3_fini_ring()
4804 } else if (HNAE3_IS_TX_RING(ring) && ring->tx_spare) { in hns3_fini_ring()
4805 struct hns3_tx_spare *tx_spare = ring->tx_spare; in hns3_fini_ring()
4807 dma_unmap_page(ring_to_dev(ring), tx_spare->dma, tx_spare->len, in hns3_fini_ring()
4809 free_pages((unsigned long)tx_spare->buf, in hns3_fini_ring()
4810 get_order(tx_spare->len)); in hns3_fini_ring()
4812 ring->tx_spare = NULL; in hns3_fini_ring()
4815 if (!HNAE3_IS_TX_RING(ring) && ring->page_pool) { in hns3_fini_ring()
4816 page_pool_destroy(ring->page_pool); in hns3_fini_ring()
4817 ring->page_pool = NULL; in hns3_fini_ring()
4847 dma_addr_t dma = ring->desc_dma_addr; in hns3_init_ring_hw()
4848 struct hnae3_queue *q = ring->tqp; in hns3_init_ring_hw()
4856 hns3_buf_size2type(ring->buf_size)); in hns3_init_ring_hw()
4858 ring->desc_num / 8 - 1); in hns3_init_ring_hw()
4866 ring->desc_num / 8 - 1); in hns3_init_ring_hw()
4872 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; in hns3_init_tx_ring_tc()
4873 struct hnae3_tc_info *tc_info = &kinfo->tc_info; in hns3_init_tx_ring_tc()
4876 for (i = 0; i < tc_info->num_tc; i++) { in hns3_init_tx_ring_tc()
4879 for (j = 0; j < tc_info->tqp_count[i]; j++) { in hns3_init_tx_ring_tc()
4882 q = priv->ring[tc_info->tqp_offset[i] + j].tqp; in hns3_init_tx_ring_tc()
4890 struct hnae3_handle *h = priv->ae_handle; in hns3_init_all_ring()
4891 int ring_num = h->kinfo.num_tqps * 2; in hns3_init_all_ring()
4896 ret = hns3_alloc_ring_memory(&priv->ring[i]); in hns3_init_all_ring()
4898 dev_err(priv->dev, in hns3_init_all_ring()
4903 u64_stats_init(&priv->ring[i].syncp); in hns3_init_all_ring()
4909 for (j = i - 1; j >= 0; j--) in hns3_init_all_ring()
4910 hns3_fini_ring(&priv->ring[j]); in hns3_init_all_ring()
4912 return -ENOMEM; in hns3_init_all_ring()
4917 struct hnae3_handle *h = priv->ae_handle; in hns3_uninit_all_ring()
4920 for (i = 0; i < h->kinfo.num_tqps; i++) { in hns3_uninit_all_ring()
4921 hns3_fini_ring(&priv->ring[i]); in hns3_uninit_all_ring()
4922 hns3_fini_ring(&priv->ring[i + h->kinfo.num_tqps]); in hns3_uninit_all_ring()
4930 struct hnae3_handle *h = priv->ae_handle; in hns3_init_mac_addr()
4934 if (h->ae_algo->ops->get_mac_addr) in hns3_init_mac_addr()
4935 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); in hns3_init_mac_addr()
4940 dev_warn(priv->dev, "using random MAC address %pM\n", in hns3_init_mac_addr()
4941 netdev->dev_addr); in hns3_init_mac_addr()
4942 } else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) { in hns3_init_mac_addr()
4943 ether_addr_copy(netdev->dev_addr, mac_addr_temp); in hns3_init_mac_addr()
4944 ether_addr_copy(netdev->perm_addr, mac_addr_temp); in hns3_init_mac_addr()
4949 if (h->ae_algo->ops->set_mac_addr) in hns3_init_mac_addr()
4950 ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true); in hns3_init_mac_addr()
4960 if (h->ae_algo->ops->mac_connect_phy) in hns3_init_phy()
4961 ret = h->ae_algo->ops->mac_connect_phy(h); in hns3_init_phy()
4970 if (h->ae_algo->ops->mac_disconnect_phy) in hns3_uninit_phy()
4971 h->ae_algo->ops->mac_disconnect_phy(h); in hns3_uninit_phy()
4976 if (!handle->ae_algo->ops->client_start) in hns3_client_start()
4979 return handle->ae_algo->ops->client_start(handle); in hns3_client_start()
4984 if (!handle->ae_algo->ops->client_stop) in hns3_client_stop()
4987 handle->ae_algo->ops->client_stop(handle); in hns3_client_stop()
4992 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; in hns3_info_show()
4994 dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr); in hns3_info_show()
4995 dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps); in hns3_info_show()
4996 dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size); in hns3_info_show()
4997 dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size); in hns3_info_show()
4998 dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len); in hns3_info_show()
4999 dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc); in hns3_info_show()
5000 dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc); in hns3_info_show()
5001 dev_info(priv->dev, "Total number of enabled TCs: %u\n", in hns3_info_show()
5002 kinfo->tc_info.num_tc); in hns3_info_show()
5003 dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu); in hns3_info_show()
5009 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); in hns3_set_cq_period_mode()
5010 struct hnae3_handle *handle = priv->ae_handle; in hns3_set_cq_period_mode()
5014 priv->tx_cqe_mode = mode; in hns3_set_cq_period_mode()
5016 for (i = 0; i < priv->vector_num; i++) in hns3_set_cq_period_mode()
5017 priv->tqp_vector[i].tx_group.dim.mode = mode; in hns3_set_cq_period_mode()
5019 priv->rx_cqe_mode = mode; in hns3_set_cq_period_mode()
5021 for (i = 0; i < priv->vector_num; i++) in hns3_set_cq_period_mode()
5022 priv->tqp_vector[i].rx_group.dim.mode = mode; in hns3_set_cq_period_mode()
5028 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) { in hns3_set_cq_period_mode()
5036 writel(new_mode, handle->kinfo.io_base + reg); in hns3_set_cq_period_mode()
5050 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); in hns3_state_init()
5051 struct net_device *netdev = handle->kinfo.netdev; in hns3_state_init()
5054 set_bit(HNS3_NIC_STATE_INITED, &priv->state); in hns3_state_init()
5056 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) in hns3_state_init()
5057 set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags); in hns3_state_init()
5059 if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) in hns3_state_init()
5060 set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state); in hns3_state_init()
5063 set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state); in hns3_state_init()
5068 struct pci_dev *pdev = handle->pdev; in hns3_client_init()
5075 handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps, in hns3_client_init()
5079 return -ENOMEM; in hns3_client_init()
5082 priv->dev = &pdev->dev; in hns3_client_init()
5083 priv->netdev = netdev; in hns3_client_init()
5084 priv->ae_handle = handle; in hns3_client_init()
5085 priv->tx_timeout_count = 0; in hns3_client_init()
5086 priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num; in hns3_client_init()
5087 set_bit(HNS3_NIC_STATE_DOWN, &priv->state); in hns3_client_init()
5089 handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL); in hns3_client_init()
5091 handle->kinfo.netdev = netdev; in hns3_client_init()
5092 handle->priv = (void *)priv; in hns3_client_init()
5098 netdev->watchdog_timeo = HNS3_TX_TIMEOUT; in hns3_client_init()
5099 netdev->priv_flags |= IFF_UNICAST_FLT; in hns3_client_init()
5100 netdev->netdev_ops = &hns3_nic_netdev_ops; in hns3_client_init()
5101 SET_NETDEV_DEV(netdev, &pdev->dev); in hns3_client_init()
5109 ret = -ENOMEM; in hns3_client_init()
5117 ret = -ENOMEM; in hns3_client_init()
5123 ret = -ENOMEM; in hns3_client_init()
5129 ret = -ENOMEM; in hns3_client_init()
5143 dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret); in hns3_client_init()
5147 dev_err(priv->dev, "init irq failed! ret=%d\n", ret); in hns3_client_init()
5154 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); in hns3_client_init()
5162 dev_err(priv->dev, "failed to init debugfs, ret = %d\n", in hns3_client_init()
5167 netdev->max_mtu = HNS3_MAX_MTU(ae_dev->dev_specs.max_frm_size); in hns3_client_init()
5173 dev_err(priv->dev, "probe register netdev fail!\n"); in hns3_client_init()
5196 priv->ring = NULL; in hns3_client_init()
5198 priv->ae_handle = NULL; in hns3_client_init()
5205 struct net_device *netdev = handle->kinfo.netdev; in hns3_client_uninit()
5208 if (netdev->reg_state != NETREG_UNINITIALIZED) in hns3_client_uninit()
5215 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { in hns3_client_uninit()
5241 struct net_device *netdev = handle->kinfo.netdev; in hns3_link_status_change()
5261 while (ring->next_to_clean != ring->next_to_use) { in hns3_clear_tx_ring()
5262 ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0; in hns3_clear_tx_ring()
5263 hns3_free_buffer_detach(ring, ring->next_to_clean, 0); in hns3_clear_tx_ring()
5267 ring->pending_buf = 0; in hns3_clear_tx_ring()
5275 while (ring->next_to_use != ring->next_to_clean) { in hns3_clear_rx_ring()
5280 if (!ring->desc_cb[ring->next_to_use].reuse_flag) { in hns3_clear_rx_ring()
5283 u64_stats_update_begin(&ring->syncp); in hns3_clear_rx_ring()
5284 ring->stats.sw_err_cnt++; in hns3_clear_rx_ring()
5285 u64_stats_update_end(&ring->syncp); in hns3_clear_rx_ring()
5294 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); in hns3_clear_rx_ring()
5300 if (ring->skb) { in hns3_clear_rx_ring()
5301 dev_kfree_skb_any(ring->skb); in hns3_clear_rx_ring()
5302 ring->skb = NULL; in hns3_clear_rx_ring()
5303 ring->pending_buf = 0; in hns3_clear_rx_ring()
5311 while (ring->next_to_use != ring->next_to_clean) { in hns3_force_clear_rx_ring()
5316 if (!ring->desc_cb[ring->next_to_use].reuse_flag) { in hns3_force_clear_rx_ring()
5318 &ring->desc_cb[ring->next_to_use]); in hns3_force_clear_rx_ring()
5319 ring->desc_cb[ring->next_to_use].dma = 0; in hns3_force_clear_rx_ring()
5328 struct net_device *ndev = h->kinfo.netdev; in hns3_clear_all_ring()
5332 for (i = 0; i < h->kinfo.num_tqps; i++) { in hns3_clear_all_ring()
5335 ring = &priv->ring[i]; in hns3_clear_all_ring()
5338 ring = &priv->ring[i + h->kinfo.num_tqps]; in hns3_clear_all_ring()
5351 struct net_device *ndev = h->kinfo.netdev; in hns3_nic_reset_all_ring()
5357 ret = h->ae_algo->ops->reset_queue(h); in hns3_nic_reset_all_ring()
5361 for (i = 0; i < h->kinfo.num_tqps; i++) { in hns3_nic_reset_all_ring()
5362 hns3_init_ring_hw(&priv->ring[i]); in hns3_nic_reset_all_ring()
5367 hns3_clear_tx_ring(&priv->ring[i]); in hns3_nic_reset_all_ring()
5368 priv->ring[i].next_to_clean = 0; in hns3_nic_reset_all_ring()
5369 priv->ring[i].next_to_use = 0; in hns3_nic_reset_all_ring()
5370 priv->ring[i].last_to_use = 0; in hns3_nic_reset_all_ring()
5372 rx_ring = &priv->ring[i + h->kinfo.num_tqps]; in hns3_nic_reset_all_ring()
5381 for (j = 0; j < rx_ring->desc_num; j++) in hns3_nic_reset_all_ring()
5384 rx_ring->next_to_clean = 0; in hns3_nic_reset_all_ring()
5385 rx_ring->next_to_use = 0; in hns3_nic_reset_all_ring()
5395 struct hnae3_knic_private_info *kinfo = &handle->kinfo; in hns3_reset_notify_down_enet()
5396 struct net_device *ndev = kinfo->netdev; in hns3_reset_notify_down_enet()
5399 if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) in hns3_reset_notify_down_enet()
5410 struct hnae3_knic_private_info *kinfo = &handle->kinfo; in hns3_reset_notify_up_enet()
5411 struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev); in hns3_reset_notify_up_enet()
5414 if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) { in hns3_reset_notify_up_enet()
5415 netdev_err(kinfo->netdev, "device is not initialized yet\n"); in hns3_reset_notify_up_enet()
5416 return -EFAULT; in hns3_reset_notify_up_enet()
5419 clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state); in hns3_reset_notify_up_enet()
5421 if (netif_running(kinfo->netdev)) { in hns3_reset_notify_up_enet()
5422 ret = hns3_nic_net_open(kinfo->netdev); in hns3_reset_notify_up_enet()
5424 set_bit(HNS3_NIC_STATE_RESETTING, &priv->state); in hns3_reset_notify_up_enet()
5425 netdev_err(kinfo->netdev, in hns3_reset_notify_up_enet()
5436 struct net_device *netdev = handle->kinfo.netdev; in hns3_reset_notify_init_enet()
5459 hns3_cq_period_mode_init(priv, priv->tx_cqe_mode, priv->rx_cqe_mode); in hns3_reset_notify_init_enet()
5464 dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret); in hns3_reset_notify_init_enet()
5468 dev_err(priv->dev, "init irq failed! ret=%d\n", ret); in hns3_reset_notify_init_enet()
5473 if (!hns3_is_phys_func(handle->pdev)) in hns3_reset_notify_init_enet()
5478 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); in hns3_reset_notify_init_enet()
5482 set_bit(HNS3_NIC_STATE_INITED, &priv->state); in hns3_reset_notify_init_enet()
5503 struct net_device *netdev = handle->kinfo.netdev; in hns3_reset_notify_uninit_enet()
5506 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { in hns3_reset_notify_uninit_enet()
5514 hns3_reset_tx_queue(priv->ae_handle); in hns3_reset_notify_uninit_enet()
5557 ret = handle->ae_algo->ops->set_channels(handle, new_tqp_num, in hns3_change_channels()
5560 dev_err(&handle->pdev->dev, in hns3_change_channels()
5580 struct hnae3_knic_private_info *kinfo = &h->kinfo; in hns3_set_channels()
5582 u32 new_tqp_num = ch->combined_count; in hns3_set_channels()
5587 return -EBUSY; in hns3_set_channels()
5589 if (ch->rx_count || ch->tx_count) in hns3_set_channels()
5590 return -EINVAL; in hns3_set_channels()
5592 if (kinfo->tc_info.mqprio_active) { in hns3_set_channels()
5593 dev_err(&netdev->dev, in hns3_set_channels()
5595 return -EINVAL; in hns3_set_channels()
5600 dev_err(&netdev->dev, in hns3_set_channels()
5603 return -EINVAL; in hns3_set_channels()
5606 if (kinfo->rss_size == new_tqp_num) in hns3_set_channels()
5621 org_tqp_num = h->kinfo.num_tqps; in hns3_set_channels()
5645 .msg = "IMP CMDQ error" },
5659 dev_err(&handle->pdev->dev, "Detected %s!\n", in hns3_process_hw_error()
5674 /* hns3_init_module - Driver registration routine
5682 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string); in hns3_init_module()
5713 /* hns3_exit_module - Driver exit cleanup routine
5728 MODULE_ALIAS("pci:hns-nic");