Lines Matching +full:rx +full:- +full:eq
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
25 apc->port_is_up = true; in mana_open()
40 if (!apc->port_is_up) in mana_close()
53 if (skb->protocol == htons(ETH_P_IP)) { in mana_checksum_info()
56 if (ip->protocol == IPPROTO_TCP) in mana_checksum_info()
59 if (ip->protocol == IPPROTO_UDP) in mana_checksum_info()
61 } else if (skb->protocol == htons(ETH_P_IPV6)) { in mana_checksum_info()
64 if (ip6->nexthdr == IPPROTO_TCP) in mana_checksum_info()
67 if (ip6->nexthdr == IPPROTO_UDP) in mana_checksum_info()
78 struct mana_skb_head *ash = (struct mana_skb_head *)skb->head; in mana_map_skb()
79 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_map_skb()
86 gc = gd->gdma_context; in mana_map_skb()
87 dev = gc->dev; in mana_map_skb()
88 da = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); in mana_map_skb()
91 return -ENOMEM; in mana_map_skb()
93 ash->dma_handle[0] = da; in mana_map_skb()
94 ash->size[0] = skb_headlen(skb); in mana_map_skb()
96 tp->wqe_req.sgl[0].address = ash->dma_handle[0]; in mana_map_skb()
97 tp->wqe_req.sgl[0].mem_key = gd->gpa_mkey; in mana_map_skb()
98 tp->wqe_req.sgl[0].size = ash->size[0]; in mana_map_skb()
100 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in mana_map_skb()
101 frag = &skb_shinfo(skb)->frags[i]; in mana_map_skb()
108 ash->dma_handle[i + 1] = da; in mana_map_skb()
109 ash->size[i + 1] = skb_frag_size(frag); in mana_map_skb()
111 tp->wqe_req.sgl[i + 1].address = ash->dma_handle[i + 1]; in mana_map_skb()
112 tp->wqe_req.sgl[i + 1].mem_key = gd->gpa_mkey; in mana_map_skb()
113 tp->wqe_req.sgl[i + 1].size = ash->size[i + 1]; in mana_map_skb()
119 for (i = i - 1; i >= 0; i--) in mana_map_skb()
120 dma_unmap_page(dev, ash->dma_handle[i + 1], ash->size[i + 1], in mana_map_skb()
123 dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE); in mana_map_skb()
125 return -ENOMEM; in mana_map_skb()
133 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_start_xmit()
144 if (unlikely(!apc->port_is_up)) in mana_start_xmit()
150 txq = &apc->tx_qp[txq_idx].txq; in mana_start_xmit()
151 gdma_sq = txq->gdma_sq; in mana_start_xmit()
152 cq = &apc->tx_qp[txq_idx].tx_cq; in mana_start_xmit()
154 pkg.tx_oob.s_oob.vcq_num = cq->gdma_id; in mana_start_xmit()
155 pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame; in mana_start_xmit()
157 if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) { in mana_start_xmit()
158 pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset; in mana_start_xmit()
161 pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset; in mana_start_xmit()
175 pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags; in mana_start_xmit()
190 if (skb->protocol == htons(ETH_P_IP)) in mana_start_xmit()
192 else if (skb->protocol == htons(ETH_P_IPV6)) in mana_start_xmit()
203 pkg.wqe_req.client_data_unit = skb_shinfo(skb)->gso_size; in mana_start_xmit()
206 ip_hdr(skb)->tot_len = 0; in mana_start_xmit()
207 ip_hdr(skb)->check = 0; in mana_start_xmit()
208 tcp_hdr(skb)->check = in mana_start_xmit()
209 ~csum_tcpudp_magic(ip_hdr(skb)->saddr, in mana_start_xmit()
210 ip_hdr(skb)->daddr, 0, in mana_start_xmit()
213 ipv6_hdr(skb)->payload_len = 0; in mana_start_xmit()
214 tcp_hdr(skb)->check = in mana_start_xmit()
215 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, in mana_start_xmit()
216 &ipv6_hdr(skb)->daddr, 0, in mana_start_xmit()
219 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { in mana_start_xmit()
244 skb_queue_tail(&txq->pending_skbs, skb); in mana_start_xmit()
246 len = skb->len; in mana_start_xmit()
250 (struct gdma_posted_wqe_info *)skb->cb); in mana_start_xmit()
253 apc->eth_stats.stop_queue++; in mana_start_xmit()
257 (void)skb_dequeue_tail(&txq->pending_skbs); in mana_start_xmit()
264 atomic_inc(&txq->pending_sends); in mana_start_xmit()
266 mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq); in mana_start_xmit()
271 tx_stats = &txq->stats; in mana_start_xmit()
272 u64_stats_update_begin(&tx_stats->syncp); in mana_start_xmit()
273 tx_stats->packets++; in mana_start_xmit()
274 tx_stats->bytes += len; in mana_start_xmit()
275 u64_stats_update_end(&tx_stats->syncp); in mana_start_xmit()
280 apc->eth_stats.wake_queue++; in mana_start_xmit()
289 ndev->stats.tx_dropped++; in mana_start_xmit()
299 unsigned int num_queues = apc->num_queues; in mana_get_stats64()
305 if (!apc->port_is_up) in mana_get_stats64()
308 netdev_stats_to_stats64(st, &ndev->stats); in mana_get_stats64()
311 stats = &apc->rxqs[q]->stats; in mana_get_stats64()
314 start = u64_stats_fetch_begin_irq(&stats->syncp); in mana_get_stats64()
315 packets = stats->packets; in mana_get_stats64()
316 bytes = stats->bytes; in mana_get_stats64()
317 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); in mana_get_stats64()
319 st->rx_packets += packets; in mana_get_stats64()
320 st->rx_bytes += bytes; in mana_get_stats64()
324 stats = &apc->tx_qp[q].txq.stats; in mana_get_stats64()
327 start = u64_stats_fetch_begin_irq(&stats->syncp); in mana_get_stats64()
328 packets = stats->packets; in mana_get_stats64()
329 bytes = stats->bytes; in mana_get_stats64()
330 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); in mana_get_stats64()
332 st->tx_packets += packets; in mana_get_stats64()
333 st->tx_bytes += bytes; in mana_get_stats64()
342 struct sock *sk = skb->sk; in mana_get_tx_queue()
345 txq = apc->indir_table[hash & MANA_INDIRECT_TABLE_MASK]; in mana_get_tx_queue()
348 rcu_access_pointer(sk->sk_dst_cache)) in mana_get_tx_queue()
359 if (ndev->real_num_tx_queues == 1) in mana_select_queue()
362 txq = sk_tx_queue_get(skb->sk); in mana_select_queue()
364 if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) { in mana_select_queue()
385 kfree(apc->rxqs); in mana_cleanup_port_context()
386 apc->rxqs = NULL; in mana_cleanup_port_context()
391 apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *), in mana_init_port_context()
394 return !apc->rxqs ? -ENOMEM : 0; in mana_init_port_context()
400 struct gdma_context *gc = ac->gdma_dev->gdma_context; in mana_send_request()
403 struct device *dev = gc->dev; in mana_send_request()
407 req->dev_id = gc->mana.dev_id; in mana_send_request()
408 req->activity_id = atomic_inc_return(&activity_id); in mana_send_request()
412 if (err || resp->status) { in mana_send_request()
414 err, resp->status); in mana_send_request()
415 return err ? err : -EPROTO; in mana_send_request()
418 if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 || in mana_send_request()
419 req->activity_id != resp->activity_id) { in mana_send_request()
421 req->dev_id.as_uint32, resp->dev_id.as_uint32, in mana_send_request()
422 req->activity_id, resp->activity_id); in mana_send_request()
423 return -EPROTO; in mana_send_request()
433 if (resp_hdr->response.msg_type != expected_code) in mana_verify_resp_hdr()
434 return -EPROTO; in mana_verify_resp_hdr()
436 if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1) in mana_verify_resp_hdr()
437 return -EPROTO; in mana_verify_resp_hdr()
439 if (resp_hdr->response.msg_size < min_size) in mana_verify_resp_hdr()
440 return -EPROTO; in mana_verify_resp_hdr()
449 struct gdma_context *gc = ac->gdma_dev->gdma_context; in mana_query_device_cfg()
452 struct device *dev = gc->dev; in mana_query_device_cfg()
473 err = -EPROTO; in mana_query_device_cfg()
494 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_query_vport_cfg()
505 return -EPROTO; in mana_query_vport_cfg()
511 apc->port_handle = resp.vport; in mana_query_vport_cfg()
512 ether_addr_copy(apc->mac_addr, resp.mac_addr); in mana_query_vport_cfg()
526 req.vport = apc->port_handle; in mana_cfg_vport()
530 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_cfg_vport()
533 netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err); in mana_cfg_vport()
540 netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n", in mana_cfg_vport()
543 err = -EPROTO; in mana_cfg_vport()
548 apc->tx_shortform_allowed = resp.short_form_allowed; in mana_cfg_vport()
549 apc->tx_vp_offset = resp.tx_vport_offset; in mana_cfg_vport()
555 enum TRI_STATE rx, in mana_cfg_vport_steering() argument
562 struct net_device *ndev = apc->ndev; in mana_cfg_vport_steering()
570 return -ENOMEM; in mana_cfg_vport_steering()
572 mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size, in mana_cfg_vport_steering()
575 req->vport = apc->port_handle; in mana_cfg_vport_steering()
576 req->num_indir_entries = num_entries; in mana_cfg_vport_steering()
577 req->indir_tab_offset = sizeof(*req); in mana_cfg_vport_steering()
578 req->rx_enable = rx; in mana_cfg_vport_steering()
579 req->rss_enable = apc->rss_state; in mana_cfg_vport_steering()
580 req->update_default_rxobj = update_default_rxobj; in mana_cfg_vport_steering()
581 req->update_hashkey = update_key; in mana_cfg_vport_steering()
582 req->update_indir_tab = update_tab; in mana_cfg_vport_steering()
583 req->default_rxobj = apc->default_rxobj; in mana_cfg_vport_steering()
586 memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE); in mana_cfg_vport_steering()
590 memcpy(req_indir_tab, apc->rxobj_table, in mana_cfg_vport_steering()
591 req->num_indir_entries * sizeof(mana_handle_t)); in mana_cfg_vport_steering()
594 err = mana_send_request(apc->ac, req, req_buf_size, &resp, in mana_cfg_vport_steering()
597 netdev_err(ndev, "Failed to configure vPort RX: %d\n", err); in mana_cfg_vport_steering()
604 netdev_err(ndev, "vPort RX configuration failed: %d\n", err); in mana_cfg_vport_steering()
609 netdev_err(ndev, "vPort RX configuration failed: 0x%x\n", in mana_cfg_vport_steering()
611 err = -EPROTO; in mana_cfg_vport_steering()
626 struct net_device *ndev = apc->ndev; in mana_create_wq_obj()
633 req.wq_gdma_region = wq_spec->gdma_region; in mana_create_wq_obj()
634 req.cq_gdma_region = cq_spec->gdma_region; in mana_create_wq_obj()
635 req.wq_size = wq_spec->queue_size; in mana_create_wq_obj()
636 req.cq_size = cq_spec->queue_size; in mana_create_wq_obj()
637 req.cq_moderation_ctx_id = cq_spec->modr_ctx_id; in mana_create_wq_obj()
638 req.cq_parent_qid = cq_spec->attached_eq; in mana_create_wq_obj()
640 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_create_wq_obj()
653 err = -EPROTO; in mana_create_wq_obj()
659 err = -EPROTO; in mana_create_wq_obj()
664 wq_spec->queue_index = resp.wq_id; in mana_create_wq_obj()
665 cq_spec->queue_index = resp.cq_id; in mana_create_wq_obj()
677 struct net_device *ndev = apc->ndev; in mana_destroy_wq_obj()
685 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_destroy_wq_obj()
701 struct gdma_context *gc = ac->gdma_dev->gdma_context; in mana_destroy_eq()
702 struct gdma_queue *eq; in mana_destroy_eq() local
705 if (!ac->eqs) in mana_destroy_eq()
708 for (i = 0; i < gc->max_num_queues; i++) { in mana_destroy_eq()
709 eq = ac->eqs[i].eq; in mana_destroy_eq()
710 if (!eq) in mana_destroy_eq()
713 mana_gd_destroy_queue(gc, eq); in mana_destroy_eq()
716 kfree(ac->eqs); in mana_destroy_eq()
717 ac->eqs = NULL; in mana_destroy_eq()
722 struct gdma_dev *gd = ac->gdma_dev; in mana_create_eq()
723 struct gdma_context *gc = gd->gdma_context; in mana_create_eq()
728 ac->eqs = kcalloc(gc->max_num_queues, sizeof(struct mana_eq), in mana_create_eq()
730 if (!ac->eqs) in mana_create_eq()
731 return -ENOMEM; in mana_create_eq()
736 spec.eq.callback = NULL; in mana_create_eq()
737 spec.eq.context = ac->eqs; in mana_create_eq()
738 spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE; in mana_create_eq()
740 for (i = 0; i < gc->max_num_queues; i++) { in mana_create_eq()
741 err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq); in mana_create_eq()
757 used_space_old = wq->head - wq->tail; in mana_move_wq_tail()
758 used_space_new = wq->head - (wq->tail + num_units); in mana_move_wq_tail()
761 return -ERANGE; in mana_move_wq_tail()
763 wq->tail += num_units; in mana_move_wq_tail()
769 struct mana_skb_head *ash = (struct mana_skb_head *)skb->head; in mana_unmap_skb()
770 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; in mana_unmap_skb()
771 struct device *dev = gc->dev; in mana_unmap_skb()
774 dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE); in mana_unmap_skb()
776 for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++) in mana_unmap_skb()
777 dma_unmap_page(dev, ash->dma_handle[i], ash->size[i], in mana_unmap_skb()
783 struct gdma_comp *completions = cq->gdma_comp_buf; in mana_poll_tx_cq()
787 struct mana_txq *txq = cq->txq; in mana_poll_tx_cq()
798 ndev = txq->ndev; in mana_poll_tx_cq()
801 comp_read = mana_gd_poll_cq(cq->gdma_cq, completions, in mana_poll_tx_cq()
814 if (WARN_ON_ONCE(cqe_oob->cqe_hdr.client_type != in mana_poll_tx_cq()
818 switch (cqe_oob->cqe_hdr.cqe_type) { in mana_poll_tx_cq()
832 cqe_oob->cqe_hdr.cqe_type); in mana_poll_tx_cq()
840 cqe_oob->cqe_hdr.cqe_type); in mana_poll_tx_cq()
844 if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num)) in mana_poll_tx_cq()
847 skb = skb_dequeue(&txq->pending_skbs); in mana_poll_tx_cq()
851 wqe_info = (struct gdma_posted_wqe_info *)skb->cb; in mana_poll_tx_cq()
852 wqe_unit_cnt += wqe_info->wqe_size_in_bu; in mana_poll_tx_cq()
856 napi_consume_skb(skb, cq->budget); in mana_poll_tx_cq()
864 mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt); in mana_poll_tx_cq()
866 gdma_wq = txq->gdma_sq; in mana_poll_tx_cq()
872 net_txq = txq->net_txq; in mana_poll_tx_cq()
875 /* Ensure checking txq_stopped before apc->port_is_up. */ in mana_poll_tx_cq()
878 if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) { in mana_poll_tx_cq()
880 apc->eth_stats.wake_queue++; in mana_poll_tx_cq()
883 if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0) in mana_poll_tx_cq()
886 cq->work_done = pkt_transmitted; in mana_poll_tx_cq()
895 curr_index = rxq->buf_index++; in mana_post_pkt_rxq()
896 if (rxq->buf_index == rxq->num_rx_buf) in mana_post_pkt_rxq()
897 rxq->buf_index = 0; in mana_post_pkt_rxq()
899 recv_buf_oob = &rxq->rx_oobs[curr_index]; in mana_post_pkt_rxq()
901 err = mana_gd_post_and_ring(rxq->gdma_rq, &recv_buf_oob->wqe_req, in mana_post_pkt_rxq()
902 &recv_buf_oob->wqe_inf); in mana_post_pkt_rxq()
906 WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1); in mana_post_pkt_rxq()
912 struct mana_stats *rx_stats = &rxq->stats; in mana_rx_skb()
913 struct net_device *ndev = rxq->ndev; in mana_rx_skb()
914 uint pkt_len = cqe->ppi[0].pkt_len; in mana_rx_skb()
915 u16 rxq_idx = rxq->rxq_idx; in mana_rx_skb()
920 rxq->rx_cq.work_done++; in mana_rx_skb()
921 napi = &rxq->rx_cq.napi; in mana_rx_skb()
924 ++ndev->stats.rx_dropped; in mana_rx_skb()
932 ++ndev->stats.rx_dropped; in mana_rx_skb()
937 skb->dev = napi->dev; in mana_rx_skb()
939 skb->protocol = eth_type_trans(skb, ndev); in mana_rx_skb()
943 if ((ndev->features & NETIF_F_RXCSUM) && cqe->rx_iphdr_csum_succeed) { in mana_rx_skb()
944 if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed) in mana_rx_skb()
945 skb->ip_summed = CHECKSUM_UNNECESSARY; in mana_rx_skb()
948 if (cqe->rx_hashtype != 0 && (ndev->features & NETIF_F_RXHASH)) { in mana_rx_skb()
949 hash_value = cqe->ppi[0].pkt_hash; in mana_rx_skb()
951 if (cqe->rx_hashtype & MANA_HASH_L4) in mana_rx_skb()
959 u64_stats_update_begin(&rx_stats->syncp); in mana_rx_skb()
960 rx_stats->packets++; in mana_rx_skb()
961 rx_stats->bytes += pkt_len; in mana_rx_skb()
962 u64_stats_update_end(&rx_stats->syncp); in mana_rx_skb()
968 struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data; in mana_process_rx_cqe()
969 struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context; in mana_process_rx_cqe()
970 struct net_device *ndev = rxq->ndev; in mana_process_rx_cqe()
972 struct device *dev = gc->dev; in mana_process_rx_cqe()
978 switch (oob->cqe_hdr.cqe_type) { in mana_process_rx_cqe()
987 netdev_err(ndev, "RX coalescing is unsupported\n"); in mana_process_rx_cqe()
991 netdev_err(ndev, "RX Fencing is unsupported\n"); in mana_process_rx_cqe()
995 netdev_err(ndev, "Unknown RX CQE type = %d\n", in mana_process_rx_cqe()
996 oob->cqe_hdr.cqe_type); in mana_process_rx_cqe()
1000 if (oob->cqe_hdr.cqe_type != CQE_RX_OKAY) in mana_process_rx_cqe()
1003 pktlen = oob->ppi[0].pkt_len; in mana_process_rx_cqe()
1007 netdev_err(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n", in mana_process_rx_cqe()
1008 rxq->gdma_id, cq->gdma_id, rxq->rxobj); in mana_process_rx_cqe()
1012 curr = rxq->buf_index; in mana_process_rx_cqe()
1013 rxbuf_oob = &rxq->rx_oobs[curr]; in mana_process_rx_cqe()
1014 WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1); in mana_process_rx_cqe()
1019 da = dma_map_page(dev, new_page, 0, rxq->datasize, in mana_process_rx_cqe()
1031 dma_unmap_page(dev, rxbuf_oob->buf_dma_addr, rxq->datasize, in mana_process_rx_cqe()
1034 old_buf = rxbuf_oob->buf_va; in mana_process_rx_cqe()
1037 rxbuf_oob->buf_va = new_buf; in mana_process_rx_cqe()
1038 rxbuf_oob->buf_dma_addr = da; in mana_process_rx_cqe()
1039 rxbuf_oob->sgl[0].address = rxbuf_oob->buf_dma_addr; in mana_process_rx_cqe()
1046 mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu); in mana_process_rx_cqe()
1053 struct gdma_comp *comp = cq->gdma_comp_buf; in mana_poll_rx_cq()
1056 comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER); in mana_poll_rx_cq()
1064 if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id)) in mana_poll_rx_cq()
1067 mana_process_rx_cqe(cq->rxq, cq, &comp[i]); in mana_poll_rx_cq()
1076 WARN_ON_ONCE(cq->gdma_cq != gdma_queue); in mana_cq_handler()
1078 if (cq->type == MANA_CQ_TYPE_RX) in mana_cq_handler()
1083 if (cq->work_done < cq->budget && in mana_cq_handler()
1084 napi_complete_done(&cq->napi, cq->work_done)) { in mana_cq_handler()
1097 cq->work_done = 0; in mana_poll()
1098 cq->budget = budget; in mana_poll()
1100 mana_cq_handler(cq, cq->gdma_cq); in mana_poll()
1102 return min(cq->work_done, budget); in mana_poll()
1109 napi_schedule_irqoff(&cq->napi); in mana_schedule_napi()
1114 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_deinit_cq()
1116 if (!cq->gdma_cq) in mana_deinit_cq()
1119 mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq); in mana_deinit_cq()
1124 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_deinit_txq()
1126 if (!txq->gdma_sq) in mana_deinit_txq()
1129 mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq); in mana_deinit_txq()
1137 if (!apc->tx_qp) in mana_destroy_txq()
1140 for (i = 0; i < apc->num_queues; i++) { in mana_destroy_txq()
1141 napi = &apc->tx_qp[i].tx_cq.napi; in mana_destroy_txq()
1146 mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object); in mana_destroy_txq()
1148 mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq); in mana_destroy_txq()
1150 mana_deinit_txq(apc, &apc->tx_qp[i].txq); in mana_destroy_txq()
1153 kfree(apc->tx_qp); in mana_destroy_txq()
1154 apc->tx_qp = NULL; in mana_destroy_txq()
1160 struct mana_context *ac = apc->ac; in mana_create_txq()
1161 struct gdma_dev *gd = ac->gdma_dev; in mana_create_txq()
1173 apc->tx_qp = kcalloc(apc->num_queues, sizeof(struct mana_tx_qp), in mana_create_txq()
1175 if (!apc->tx_qp) in mana_create_txq()
1176 return -ENOMEM; in mana_create_txq()
1189 gc = gd->gdma_context; in mana_create_txq()
1191 for (i = 0; i < apc->num_queues; i++) { in mana_create_txq()
1192 apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE; in mana_create_txq()
1195 txq = &apc->tx_qp[i].txq; in mana_create_txq()
1197 u64_stats_init(&txq->stats.syncp); in mana_create_txq()
1198 txq->ndev = net; in mana_create_txq()
1199 txq->net_txq = netdev_get_tx_queue(net, i); in mana_create_txq()
1200 txq->vp_offset = apc->tx_vp_offset; in mana_create_txq()
1201 skb_queue_head_init(&txq->pending_skbs); in mana_create_txq()
1207 err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq); in mana_create_txq()
1212 cq = &apc->tx_qp[i].tx_cq; in mana_create_txq()
1213 cq->type = MANA_CQ_TYPE_TX; in mana_create_txq()
1215 cq->txq = txq; in mana_create_txq()
1222 spec.cq.parent_eq = ac->eqs[i].eq; in mana_create_txq()
1224 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq); in mana_create_txq()
1231 wq_spec.gdma_region = txq->gdma_sq->mem_info.gdma_region; in mana_create_txq()
1232 wq_spec.queue_size = txq->gdma_sq->queue_size; in mana_create_txq()
1234 cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region; in mana_create_txq()
1235 cq_spec.queue_size = cq->gdma_cq->queue_size; in mana_create_txq()
1237 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id; in mana_create_txq()
1239 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ, in mana_create_txq()
1241 &apc->tx_qp[i].tx_object); in mana_create_txq()
1246 txq->gdma_sq->id = wq_spec.queue_index; in mana_create_txq()
1247 cq->gdma_cq->id = cq_spec.queue_index; in mana_create_txq()
1249 txq->gdma_sq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; in mana_create_txq()
1250 cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; in mana_create_txq()
1252 txq->gdma_txq_id = txq->gdma_sq->id; in mana_create_txq()
1254 cq->gdma_id = cq->gdma_cq->id; in mana_create_txq()
1256 if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) { in mana_create_txq()
1257 err = -EINVAL; in mana_create_txq()
1261 gc->cq_table[cq->gdma_id] = cq->gdma_cq; in mana_create_txq()
1263 netif_tx_napi_add(net, &cq->napi, mana_poll, NAPI_POLL_WEIGHT); in mana_create_txq()
1264 napi_enable(&cq->napi); in mana_create_txq()
1266 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); in mana_create_txq()
1279 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; in mana_destroy_rxq()
1281 struct device *dev = gc->dev; in mana_destroy_rxq()
1288 napi = &rxq->rx_cq.napi; in mana_destroy_rxq()
1296 mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj); in mana_destroy_rxq()
1298 mana_deinit_cq(apc, &rxq->rx_cq); in mana_destroy_rxq()
1300 for (i = 0; i < rxq->num_rx_buf; i++) { in mana_destroy_rxq()
1301 rx_oob = &rxq->rx_oobs[i]; in mana_destroy_rxq()
1303 if (!rx_oob->buf_va) in mana_destroy_rxq()
1306 dma_unmap_page(dev, rx_oob->buf_dma_addr, rxq->datasize, in mana_destroy_rxq()
1309 free_page((unsigned long)rx_oob->buf_va); in mana_destroy_rxq()
1310 rx_oob->buf_va = NULL; in mana_destroy_rxq()
1313 if (rxq->gdma_rq) in mana_destroy_rxq()
1314 mana_gd_destroy_queue(gc, rxq->gdma_rq); in mana_destroy_rxq()
1325 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; in mana_alloc_rx_wqe()
1327 struct device *dev = gc->dev; in mana_alloc_rx_wqe()
1332 WARN_ON(rxq->datasize == 0 || rxq->datasize > PAGE_SIZE); in mana_alloc_rx_wqe()
1337 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) { in mana_alloc_rx_wqe()
1338 rx_oob = &rxq->rx_oobs[buf_idx]; in mana_alloc_rx_wqe()
1343 return -ENOMEM; in mana_alloc_rx_wqe()
1345 da = dma_map_page(dev, page, 0, rxq->datasize, DMA_FROM_DEVICE); in mana_alloc_rx_wqe()
1349 return -ENOMEM; in mana_alloc_rx_wqe()
1352 rx_oob->buf_va = page_to_virt(page); in mana_alloc_rx_wqe()
1353 rx_oob->buf_dma_addr = da; in mana_alloc_rx_wqe()
1355 rx_oob->num_sge = 1; in mana_alloc_rx_wqe()
1356 rx_oob->sgl[0].address = rx_oob->buf_dma_addr; in mana_alloc_rx_wqe()
1357 rx_oob->sgl[0].size = rxq->datasize; in mana_alloc_rx_wqe()
1358 rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey; in mana_alloc_rx_wqe()
1360 rx_oob->wqe_req.sgl = rx_oob->sgl; in mana_alloc_rx_wqe()
1361 rx_oob->wqe_req.num_sge = rx_oob->num_sge; in mana_alloc_rx_wqe()
1362 rx_oob->wqe_req.inline_oob_size = 0; in mana_alloc_rx_wqe()
1363 rx_oob->wqe_req.inline_oob_data = NULL; in mana_alloc_rx_wqe()
1364 rx_oob->wqe_req.flags = 0; in mana_alloc_rx_wqe()
1365 rx_oob->wqe_req.client_data_unit = 0; in mana_alloc_rx_wqe()
1368 MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32); in mana_alloc_rx_wqe()
1381 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) { in mana_push_wqe()
1382 rx_oob = &rxq->rx_oobs[buf_idx]; in mana_push_wqe()
1384 err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req, in mana_push_wqe()
1385 &rx_oob->wqe_inf); in mana_push_wqe()
1387 return -ENOSPC; in mana_push_wqe()
1394 u32 rxq_idx, struct mana_eq *eq, in mana_create_rxq() argument
1397 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_create_rxq()
1407 gc = gd->gdma_context; in mana_create_rxq()
1414 rxq->ndev = ndev; in mana_create_rxq()
1415 rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE; in mana_create_rxq()
1416 rxq->rxq_idx = rxq_idx; in mana_create_rxq()
1417 rxq->datasize = ALIGN(MAX_FRAME_SIZE, 64); in mana_create_rxq()
1418 rxq->rxobj = INVALID_MANA_HANDLE; in mana_create_rxq()
1432 err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq); in mana_create_rxq()
1437 cq = &rxq->rx_cq; in mana_create_rxq()
1438 cq->type = MANA_CQ_TYPE_RX; in mana_create_rxq()
1439 cq->rxq = rxq; in mana_create_rxq()
1446 spec.cq.parent_eq = eq->eq; in mana_create_rxq()
1448 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq); in mana_create_rxq()
1454 wq_spec.gdma_region = rxq->gdma_rq->mem_info.gdma_region; in mana_create_rxq()
1455 wq_spec.queue_size = rxq->gdma_rq->queue_size; in mana_create_rxq()
1457 cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region; in mana_create_rxq()
1458 cq_spec.queue_size = cq->gdma_cq->queue_size; in mana_create_rxq()
1460 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id; in mana_create_rxq()
1462 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ, in mana_create_rxq()
1463 &wq_spec, &cq_spec, &rxq->rxobj); in mana_create_rxq()
1467 rxq->gdma_rq->id = wq_spec.queue_index; in mana_create_rxq()
1468 cq->gdma_cq->id = cq_spec.queue_index; in mana_create_rxq()
1470 rxq->gdma_rq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; in mana_create_rxq()
1471 cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; in mana_create_rxq()
1473 rxq->gdma_id = rxq->gdma_rq->id; in mana_create_rxq()
1474 cq->gdma_id = cq->gdma_cq->id; in mana_create_rxq()
1480 if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) { in mana_create_rxq()
1481 err = -EINVAL; in mana_create_rxq()
1485 gc->cq_table[cq->gdma_id] = cq->gdma_cq; in mana_create_rxq()
1487 netif_napi_add(ndev, &cq->napi, mana_poll, 1); in mana_create_rxq()
1488 napi_enable(&cq->napi); in mana_create_rxq()
1490 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); in mana_create_rxq()
1508 struct mana_context *ac = apc->ac; in mana_add_rx_queues()
1513 for (i = 0; i < apc->num_queues; i++) { in mana_add_rx_queues()
1514 rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev); in mana_add_rx_queues()
1516 err = -ENOMEM; in mana_add_rx_queues()
1520 u64_stats_init(&rxq->stats.syncp); in mana_add_rx_queues()
1522 apc->rxqs[i] = rxq; in mana_add_rx_queues()
1525 apc->default_rxobj = apc->rxqs[0]->rxobj; in mana_add_rx_queues()
1535 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) { in mana_destroy_vport()
1536 rxq = apc->rxqs[rxq_idx]; in mana_destroy_vport()
1541 apc->rxqs[rxq_idx] = NULL; in mana_destroy_vport()
1550 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_create_vport()
1553 apc->default_rxobj = INVALID_MANA_HANDLE; in mana_create_vport()
1555 err = mana_cfg_vport(apc, gd->pdid, gd->doorbell); in mana_create_vport()
1567 apc->indir_table[i] = in mana_rss_table_init()
1568 ethtool_rxfh_indir_default(i, apc->num_queues); in mana_rss_table_init()
1571 int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx, in mana_config_rss() argument
1579 queue_idx = apc->indir_table[i]; in mana_config_rss()
1580 apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj; in mana_config_rss()
1584 return mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab); in mana_config_rss()
1591 int port_idx = apc->port_idx; in mana_init_port()
1607 if (apc->max_queues > max_queues) in mana_init_port()
1608 apc->max_queues = max_queues; in mana_init_port()
1610 if (apc->num_queues > apc->max_queues) in mana_init_port()
1611 apc->num_queues = apc->max_queues; in mana_init_port()
1613 ether_addr_copy(ndev->dev_addr, apc->mac_addr); in mana_init_port()
1618 kfree(apc->rxqs); in mana_init_port()
1619 apc->rxqs = NULL; in mana_init_port()
1632 err = netif_set_real_num_tx_queues(ndev, apc->num_queues); in mana_alloc_queues()
1640 apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE; in mana_alloc_queues()
1642 err = netif_set_real_num_rx_queues(ndev, apc->num_queues); in mana_alloc_queues()
1672 kfree(apc->rxqs); in mana_attach()
1673 apc->rxqs = NULL; in mana_attach()
1679 apc->port_is_up = apc->port_st_save; in mana_attach()
1684 if (apc->port_is_up) { in mana_attach()
1698 if (apc->port_is_up) in mana_dealloc_queues()
1699 return -EINVAL; in mana_dealloc_queues()
1701 /* No packet can be transmitted now since apc->port_is_up is false. in mana_dealloc_queues()
1702 * There is still a tiny chance that mana_poll_tx_cq() can re-enable in mana_dealloc_queues()
1703 * a txq because it may not timely see apc->port_is_up being cleared in mana_dealloc_queues()
1705 * new packets due to apc->port_is_up being false. in mana_dealloc_queues()
1707 * Drain all the in-flight TX packets in mana_dealloc_queues()
1709 for (i = 0; i < apc->num_queues; i++) { in mana_dealloc_queues()
1710 txq = &apc->tx_qp[i].txq; in mana_dealloc_queues()
1712 while (atomic_read(&txq->pending_sends) > 0) in mana_dealloc_queues()
1720 apc->rss_state = TRI_STATE_FALSE; in mana_dealloc_queues()
1727 /* TODO: Implement RX fencing */ in mana_dealloc_queues()
1742 apc->port_st_save = apc->port_is_up; in mana_detach()
1743 apc->port_is_up = false; in mana_detach()
1751 if (apc->port_st_save) { in mana_detach()
1768 struct gdma_context *gc = ac->gdma_dev->gdma_context; in mana_probe_port()
1774 gc->max_num_queues); in mana_probe_port()
1776 return -ENOMEM; in mana_probe_port()
1781 apc->ac = ac; in mana_probe_port()
1782 apc->ndev = ndev; in mana_probe_port()
1783 apc->max_queues = gc->max_num_queues; in mana_probe_port()
1784 apc->num_queues = gc->max_num_queues; in mana_probe_port()
1785 apc->port_handle = INVALID_MANA_HANDLE; in mana_probe_port()
1786 apc->port_idx = port_idx; in mana_probe_port()
1788 ndev->netdev_ops = &mana_devops; in mana_probe_port()
1789 ndev->ethtool_ops = &mana_ethtool_ops; in mana_probe_port()
1790 ndev->mtu = ETH_DATA_LEN; in mana_probe_port()
1791 ndev->max_mtu = ndev->mtu; in mana_probe_port()
1792 ndev->min_mtu = ndev->mtu; in mana_probe_port()
1793 ndev->needed_headroom = MANA_HEADROOM; in mana_probe_port()
1794 SET_NETDEV_DEV(ndev, gc->dev); in mana_probe_port()
1798 netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE); in mana_probe_port()
1806 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; in mana_probe_port()
1807 ndev->hw_features |= NETIF_F_RXCSUM; in mana_probe_port()
1808 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; in mana_probe_port()
1809 ndev->hw_features |= NETIF_F_RXHASH; in mana_probe_port()
1810 ndev->features = ndev->hw_features; in mana_probe_port()
1811 ndev->vlan_features = 0; in mana_probe_port()
1822 kfree(apc->rxqs); in mana_probe_port()
1823 apc->rxqs = NULL; in mana_probe_port()
1833 struct gdma_context *gc = gd->gdma_context; in mana_probe()
1834 struct device *dev = gc->dev; in mana_probe()
1849 return -ENOMEM; in mana_probe()
1851 ac->gdma_dev = gd; in mana_probe()
1852 ac->num_ports = 1; in mana_probe()
1853 gd->driver_data = ac; in mana_probe()
1860 MANA_MICRO_VERSION, &ac->num_ports); in mana_probe()
1864 if (ac->num_ports > MAX_PORTS_IN_MANA_DEV) in mana_probe()
1865 ac->num_ports = MAX_PORTS_IN_MANA_DEV; in mana_probe()
1867 for (i = 0; i < ac->num_ports; i++) { in mana_probe()
1868 err = mana_probe_port(ac, i, &ac->ports[i]); in mana_probe()
1881 struct gdma_context *gc = gd->gdma_context; in mana_remove()
1882 struct mana_context *ac = gd->driver_data; in mana_remove()
1883 struct device *dev = gc->dev; in mana_remove()
1887 for (i = 0; i < ac->num_ports; i++) { in mana_remove()
1888 ndev = ac->ports[i]; in mana_remove()
1913 gd->driver_data = NULL; in mana_remove()
1914 gd->gdma_context = NULL; in mana_remove()