Lines Matching +full:rx +full:- +full:eq
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
28 apc->port_is_up = true; in mana_open()
43 if (!apc->port_is_up) in mana_close()
56 if (skb->protocol == htons(ETH_P_IP)) { in mana_checksum_info()
59 if (ip->protocol == IPPROTO_TCP) in mana_checksum_info()
62 if (ip->protocol == IPPROTO_UDP) in mana_checksum_info()
64 } else if (skb->protocol == htons(ETH_P_IPV6)) { in mana_checksum_info()
67 if (ip6->nexthdr == IPPROTO_TCP) in mana_checksum_info()
70 if (ip6->nexthdr == IPPROTO_UDP) in mana_checksum_info()
81 struct mana_skb_head *ash = (struct mana_skb_head *)skb->head; in mana_map_skb()
82 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_map_skb()
89 gc = gd->gdma_context; in mana_map_skb()
90 dev = gc->dev; in mana_map_skb()
91 da = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); in mana_map_skb()
94 return -ENOMEM; in mana_map_skb()
96 ash->dma_handle[0] = da; in mana_map_skb()
97 ash->size[0] = skb_headlen(skb); in mana_map_skb()
99 tp->wqe_req.sgl[0].address = ash->dma_handle[0]; in mana_map_skb()
100 tp->wqe_req.sgl[0].mem_key = gd->gpa_mkey; in mana_map_skb()
101 tp->wqe_req.sgl[0].size = ash->size[0]; in mana_map_skb()
103 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in mana_map_skb()
104 frag = &skb_shinfo(skb)->frags[i]; in mana_map_skb()
111 ash->dma_handle[i + 1] = da; in mana_map_skb()
112 ash->size[i + 1] = skb_frag_size(frag); in mana_map_skb()
114 tp->wqe_req.sgl[i + 1].address = ash->dma_handle[i + 1]; in mana_map_skb()
115 tp->wqe_req.sgl[i + 1].mem_key = gd->gpa_mkey; in mana_map_skb()
116 tp->wqe_req.sgl[i + 1].size = ash->size[i + 1]; in mana_map_skb()
122 for (i = i - 1; i >= 0; i--) in mana_map_skb()
123 dma_unmap_page(dev, ash->dma_handle[i + 1], ash->size[i + 1], in mana_map_skb()
126 dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE); in mana_map_skb()
128 return -ENOMEM; in mana_map_skb()
136 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_start_xmit()
147 if (unlikely(!apc->port_is_up)) in mana_start_xmit()
153 txq = &apc->tx_qp[txq_idx].txq; in mana_start_xmit()
154 gdma_sq = txq->gdma_sq; in mana_start_xmit()
155 cq = &apc->tx_qp[txq_idx].tx_cq; in mana_start_xmit()
157 pkg.tx_oob.s_oob.vcq_num = cq->gdma_id; in mana_start_xmit()
158 pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame; in mana_start_xmit()
160 if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) { in mana_start_xmit()
161 pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset; in mana_start_xmit()
164 pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset; in mana_start_xmit()
178 pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags; in mana_start_xmit()
193 if (skb->protocol == htons(ETH_P_IP)) in mana_start_xmit()
195 else if (skb->protocol == htons(ETH_P_IPV6)) in mana_start_xmit()
206 pkg.wqe_req.client_data_unit = skb_shinfo(skb)->gso_size; in mana_start_xmit()
209 ip_hdr(skb)->tot_len = 0; in mana_start_xmit()
210 ip_hdr(skb)->check = 0; in mana_start_xmit()
211 tcp_hdr(skb)->check = in mana_start_xmit()
212 ~csum_tcpudp_magic(ip_hdr(skb)->saddr, in mana_start_xmit()
213 ip_hdr(skb)->daddr, 0, in mana_start_xmit()
216 ipv6_hdr(skb)->payload_len = 0; in mana_start_xmit()
217 tcp_hdr(skb)->check = in mana_start_xmit()
218 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, in mana_start_xmit()
219 &ipv6_hdr(skb)->daddr, 0, in mana_start_xmit()
222 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { in mana_start_xmit()
247 skb_queue_tail(&txq->pending_skbs, skb); in mana_start_xmit()
249 len = skb->len; in mana_start_xmit()
253 (struct gdma_posted_wqe_info *)skb->cb); in mana_start_xmit()
256 apc->eth_stats.stop_queue++; in mana_start_xmit()
260 (void)skb_dequeue_tail(&txq->pending_skbs); in mana_start_xmit()
267 atomic_inc(&txq->pending_sends); in mana_start_xmit()
269 mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq); in mana_start_xmit()
274 tx_stats = &txq->stats; in mana_start_xmit()
275 u64_stats_update_begin(&tx_stats->syncp); in mana_start_xmit()
276 tx_stats->packets++; in mana_start_xmit()
277 tx_stats->bytes += len; in mana_start_xmit()
278 u64_stats_update_end(&tx_stats->syncp); in mana_start_xmit()
283 apc->eth_stats.wake_queue++; in mana_start_xmit()
292 ndev->stats.tx_dropped++; in mana_start_xmit()
302 unsigned int num_queues = apc->num_queues; in mana_get_stats64()
309 if (!apc->port_is_up) in mana_get_stats64()
312 netdev_stats_to_stats64(st, &ndev->stats); in mana_get_stats64()
315 rx_stats = &apc->rxqs[q]->stats; in mana_get_stats64()
318 start = u64_stats_fetch_begin_irq(&rx_stats->syncp); in mana_get_stats64()
319 packets = rx_stats->packets; in mana_get_stats64()
320 bytes = rx_stats->bytes; in mana_get_stats64()
321 } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); in mana_get_stats64()
323 st->rx_packets += packets; in mana_get_stats64()
324 st->rx_bytes += bytes; in mana_get_stats64()
328 tx_stats = &apc->tx_qp[q].txq.stats; in mana_get_stats64()
331 start = u64_stats_fetch_begin_irq(&tx_stats->syncp); in mana_get_stats64()
332 packets = tx_stats->packets; in mana_get_stats64()
333 bytes = tx_stats->bytes; in mana_get_stats64()
334 } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); in mana_get_stats64()
336 st->tx_packets += packets; in mana_get_stats64()
337 st->tx_bytes += bytes; in mana_get_stats64()
346 struct sock *sk = skb->sk; in mana_get_tx_queue()
349 txq = apc->indir_table[hash & MANA_INDIRECT_TABLE_MASK]; in mana_get_tx_queue()
352 rcu_access_pointer(sk->sk_dst_cache)) in mana_get_tx_queue()
363 if (ndev->real_num_tx_queues == 1) in mana_select_queue()
366 txq = sk_tx_queue_get(skb->sk); in mana_select_queue()
368 if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) { in mana_select_queue()
391 kfree(apc->rxqs); in mana_cleanup_port_context()
392 apc->rxqs = NULL; in mana_cleanup_port_context()
397 apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *), in mana_init_port_context()
400 return !apc->rxqs ? -ENOMEM : 0; in mana_init_port_context()
406 struct gdma_context *gc = ac->gdma_dev->gdma_context; in mana_send_request()
409 struct device *dev = gc->dev; in mana_send_request()
413 req->dev_id = gc->mana.dev_id; in mana_send_request()
414 req->activity_id = atomic_inc_return(&activity_id); in mana_send_request()
418 if (err || resp->status) { in mana_send_request()
420 err, resp->status); in mana_send_request()
421 return err ? err : -EPROTO; in mana_send_request()
424 if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 || in mana_send_request()
425 req->activity_id != resp->activity_id) { in mana_send_request()
427 req->dev_id.as_uint32, resp->dev_id.as_uint32, in mana_send_request()
428 req->activity_id, resp->activity_id); in mana_send_request()
429 return -EPROTO; in mana_send_request()
439 if (resp_hdr->response.msg_type != expected_code) in mana_verify_resp_hdr()
440 return -EPROTO; in mana_verify_resp_hdr()
442 if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1) in mana_verify_resp_hdr()
443 return -EPROTO; in mana_verify_resp_hdr()
445 if (resp_hdr->response.msg_size < min_size) in mana_verify_resp_hdr()
446 return -EPROTO; in mana_verify_resp_hdr()
463 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_pf_register_hw_vport()
466 netdev_err(apc->ndev, "Failed to register hw vPort: %d\n", err); in mana_pf_register_hw_vport()
473 netdev_err(apc->ndev, "Failed to register hw vPort: %d, 0x%x\n", in mana_pf_register_hw_vport()
475 return err ? err : -EPROTO; in mana_pf_register_hw_vport()
478 apc->port_handle = resp.hw_vport_handle; in mana_pf_register_hw_vport()
490 req.hw_vport_handle = apc->port_handle; in mana_pf_deregister_hw_vport()
492 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_pf_deregister_hw_vport()
495 netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n", in mana_pf_deregister_hw_vport()
503 netdev_err(apc->ndev, in mana_pf_deregister_hw_vport()
516 req.vport = apc->port_handle; in mana_pf_register_filter()
517 memcpy(req.mac_addr, apc->mac_addr, ETH_ALEN); in mana_pf_register_filter()
519 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_pf_register_filter()
522 netdev_err(apc->ndev, "Failed to register filter: %d\n", err); in mana_pf_register_filter()
529 netdev_err(apc->ndev, "Failed to register filter: %d, 0x%x\n", in mana_pf_register_filter()
531 return err ? err : -EPROTO; in mana_pf_register_filter()
534 apc->pf_filter_handle = resp.filter_handle; in mana_pf_register_filter()
546 req.filter_handle = apc->pf_filter_handle; in mana_pf_deregister_filter()
548 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_pf_deregister_filter()
551 netdev_err(apc->ndev, "Failed to unregister filter: %d\n", in mana_pf_deregister_filter()
559 netdev_err(apc->ndev, in mana_pf_deregister_filter()
568 struct gdma_context *gc = ac->gdma_dev->gdma_context; in mana_query_device_cfg()
571 struct device *dev = gc->dev; in mana_query_device_cfg()
592 err = -EPROTO; in mana_query_device_cfg()
613 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_query_vport_cfg()
624 return -EPROTO; in mana_query_vport_cfg()
630 apc->port_handle = resp.vport; in mana_query_vport_cfg()
631 ether_addr_copy(apc->mac_addr, resp.mac_addr); in mana_query_vport_cfg()
645 req.vport = apc->port_handle; in mana_cfg_vport()
649 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_cfg_vport()
652 netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err); in mana_cfg_vport()
659 netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n", in mana_cfg_vport()
662 err = -EPROTO; in mana_cfg_vport()
667 apc->tx_shortform_allowed = resp.short_form_allowed; in mana_cfg_vport()
668 apc->tx_vp_offset = resp.tx_vport_offset; in mana_cfg_vport()
674 enum TRI_STATE rx, in mana_cfg_vport_steering() argument
681 struct net_device *ndev = apc->ndev; in mana_cfg_vport_steering()
689 return -ENOMEM; in mana_cfg_vport_steering()
691 mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size, in mana_cfg_vport_steering()
694 req->vport = apc->port_handle; in mana_cfg_vport_steering()
695 req->num_indir_entries = num_entries; in mana_cfg_vport_steering()
696 req->indir_tab_offset = sizeof(*req); in mana_cfg_vport_steering()
697 req->rx_enable = rx; in mana_cfg_vport_steering()
698 req->rss_enable = apc->rss_state; in mana_cfg_vport_steering()
699 req->update_default_rxobj = update_default_rxobj; in mana_cfg_vport_steering()
700 req->update_hashkey = update_key; in mana_cfg_vport_steering()
701 req->update_indir_tab = update_tab; in mana_cfg_vport_steering()
702 req->default_rxobj = apc->default_rxobj; in mana_cfg_vport_steering()
705 memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE); in mana_cfg_vport_steering()
709 memcpy(req_indir_tab, apc->rxobj_table, in mana_cfg_vport_steering()
710 req->num_indir_entries * sizeof(mana_handle_t)); in mana_cfg_vport_steering()
713 err = mana_send_request(apc->ac, req, req_buf_size, &resp, in mana_cfg_vport_steering()
716 netdev_err(ndev, "Failed to configure vPort RX: %d\n", err); in mana_cfg_vport_steering()
723 netdev_err(ndev, "vPort RX configuration failed: %d\n", err); in mana_cfg_vport_steering()
728 netdev_err(ndev, "vPort RX configuration failed: 0x%x\n", in mana_cfg_vport_steering()
730 err = -EPROTO; in mana_cfg_vport_steering()
745 struct net_device *ndev = apc->ndev; in mana_create_wq_obj()
752 req.wq_gdma_region = wq_spec->gdma_region; in mana_create_wq_obj()
753 req.cq_gdma_region = cq_spec->gdma_region; in mana_create_wq_obj()
754 req.wq_size = wq_spec->queue_size; in mana_create_wq_obj()
755 req.cq_size = cq_spec->queue_size; in mana_create_wq_obj()
756 req.cq_moderation_ctx_id = cq_spec->modr_ctx_id; in mana_create_wq_obj()
757 req.cq_parent_qid = cq_spec->attached_eq; in mana_create_wq_obj()
759 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_create_wq_obj()
772 err = -EPROTO; in mana_create_wq_obj()
778 err = -EPROTO; in mana_create_wq_obj()
783 wq_spec->queue_index = resp.wq_id; in mana_create_wq_obj()
784 cq_spec->queue_index = resp.cq_id; in mana_create_wq_obj()
796 struct net_device *ndev = apc->ndev; in mana_destroy_wq_obj()
804 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_destroy_wq_obj()
820 struct gdma_context *gc = ac->gdma_dev->gdma_context; in mana_destroy_eq()
821 struct gdma_queue *eq; in mana_destroy_eq() local
824 if (!ac->eqs) in mana_destroy_eq()
827 for (i = 0; i < gc->max_num_queues; i++) { in mana_destroy_eq()
828 eq = ac->eqs[i].eq; in mana_destroy_eq()
829 if (!eq) in mana_destroy_eq()
832 mana_gd_destroy_queue(gc, eq); in mana_destroy_eq()
835 kfree(ac->eqs); in mana_destroy_eq()
836 ac->eqs = NULL; in mana_destroy_eq()
841 struct gdma_dev *gd = ac->gdma_dev; in mana_create_eq()
842 struct gdma_context *gc = gd->gdma_context; in mana_create_eq()
847 ac->eqs = kcalloc(gc->max_num_queues, sizeof(struct mana_eq), in mana_create_eq()
849 if (!ac->eqs) in mana_create_eq()
850 return -ENOMEM; in mana_create_eq()
855 spec.eq.callback = NULL; in mana_create_eq()
856 spec.eq.context = ac->eqs; in mana_create_eq()
857 spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE; in mana_create_eq()
859 for (i = 0; i < gc->max_num_queues; i++) { in mana_create_eq()
860 err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq); in mana_create_eq()
877 init_completion(&rxq->fence_event); in mana_fence_rq()
881 req.wq_obj_handle = rxq->rxobj; in mana_fence_rq()
883 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_fence_rq()
886 netdev_err(apc->ndev, "Failed to fence RQ %u: %d\n", in mana_fence_rq()
887 rxq->rxq_idx, err); in mana_fence_rq()
893 netdev_err(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n", in mana_fence_rq()
894 rxq->rxq_idx, err, resp.hdr.status); in mana_fence_rq()
896 err = -EPROTO; in mana_fence_rq()
901 if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) { in mana_fence_rq()
902 netdev_err(apc->ndev, "Failed to fence RQ %u: timed out\n", in mana_fence_rq()
903 rxq->rxq_idx); in mana_fence_rq()
904 return -ETIMEDOUT; in mana_fence_rq()
916 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) { in mana_fence_rqs()
917 rxq = apc->rxqs[rxq_idx]; in mana_fence_rqs()
931 used_space_old = wq->head - wq->tail; in mana_move_wq_tail()
932 used_space_new = wq->head - (wq->tail + num_units); in mana_move_wq_tail()
935 return -ERANGE; in mana_move_wq_tail()
937 wq->tail += num_units; in mana_move_wq_tail()
943 struct mana_skb_head *ash = (struct mana_skb_head *)skb->head; in mana_unmap_skb()
944 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; in mana_unmap_skb()
945 struct device *dev = gc->dev; in mana_unmap_skb()
948 dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE); in mana_unmap_skb()
950 for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++) in mana_unmap_skb()
951 dma_unmap_page(dev, ash->dma_handle[i], ash->size[i], in mana_unmap_skb()
957 struct gdma_comp *completions = cq->gdma_comp_buf; in mana_poll_tx_cq()
961 struct mana_txq *txq = cq->txq; in mana_poll_tx_cq()
972 ndev = txq->ndev; in mana_poll_tx_cq()
975 comp_read = mana_gd_poll_cq(cq->gdma_cq, completions, in mana_poll_tx_cq()
988 if (WARN_ON_ONCE(cqe_oob->cqe_hdr.client_type != in mana_poll_tx_cq()
992 switch (cqe_oob->cqe_hdr.cqe_type) { in mana_poll_tx_cq()
1006 cqe_oob->cqe_hdr.cqe_type); in mana_poll_tx_cq()
1014 cqe_oob->cqe_hdr.cqe_type); in mana_poll_tx_cq()
1018 if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num)) in mana_poll_tx_cq()
1021 skb = skb_dequeue(&txq->pending_skbs); in mana_poll_tx_cq()
1025 wqe_info = (struct gdma_posted_wqe_info *)skb->cb; in mana_poll_tx_cq()
1026 wqe_unit_cnt += wqe_info->wqe_size_in_bu; in mana_poll_tx_cq()
1030 napi_consume_skb(skb, cq->budget); in mana_poll_tx_cq()
1038 mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt); in mana_poll_tx_cq()
1040 gdma_wq = txq->gdma_sq; in mana_poll_tx_cq()
1046 net_txq = txq->net_txq; in mana_poll_tx_cq()
1049 /* Ensure checking txq_stopped before apc->port_is_up. */ in mana_poll_tx_cq()
1052 if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) { in mana_poll_tx_cq()
1054 apc->eth_stats.wake_queue++; in mana_poll_tx_cq()
1057 if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0) in mana_poll_tx_cq()
1060 cq->work_done = pkt_transmitted; in mana_poll_tx_cq()
1069 curr_index = rxq->buf_index++; in mana_post_pkt_rxq()
1070 if (rxq->buf_index == rxq->num_rx_buf) in mana_post_pkt_rxq()
1071 rxq->buf_index = 0; in mana_post_pkt_rxq()
1073 recv_buf_oob = &rxq->rx_oobs[curr_index]; in mana_post_pkt_rxq()
1075 err = mana_gd_post_and_ring(rxq->gdma_rq, &recv_buf_oob->wqe_req, in mana_post_pkt_rxq()
1076 &recv_buf_oob->wqe_inf); in mana_post_pkt_rxq()
1080 WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1); in mana_post_pkt_rxq()
1091 if (xdp->data_hard_start) { in mana_build_skb()
1092 skb_reserve(skb, xdp->data - xdp->data_hard_start); in mana_build_skb()
1093 skb_put(skb, xdp->data_end - xdp->data); in mana_build_skb()
1105 struct mana_stats_rx *rx_stats = &rxq->stats; in mana_rx_skb()
1106 struct net_device *ndev = rxq->ndev; in mana_rx_skb()
1107 uint pkt_len = cqe->ppi[0].pkt_len; in mana_rx_skb()
1108 u16 rxq_idx = rxq->rxq_idx; in mana_rx_skb()
1115 rxq->rx_cq.work_done++; in mana_rx_skb()
1116 napi = &rxq->rx_cq.napi; in mana_rx_skb()
1119 ++ndev->stats.rx_dropped; in mana_rx_skb()
1125 if (act == XDP_REDIRECT && !rxq->xdp_rc) in mana_rx_skb()
1136 skb->dev = napi->dev; in mana_rx_skb()
1138 skb->protocol = eth_type_trans(skb, ndev); in mana_rx_skb()
1142 if ((ndev->features & NETIF_F_RXCSUM) && cqe->rx_iphdr_csum_succeed) { in mana_rx_skb()
1143 if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed) in mana_rx_skb()
1144 skb->ip_summed = CHECKSUM_UNNECESSARY; in mana_rx_skb()
1147 if (cqe->rx_hashtype != 0 && (ndev->features & NETIF_F_RXHASH)) { in mana_rx_skb()
1148 hash_value = cqe->ppi[0].pkt_hash; in mana_rx_skb()
1150 if (cqe->rx_hashtype & MANA_HASH_L4) in mana_rx_skb()
1156 u64_stats_update_begin(&rx_stats->syncp); in mana_rx_skb()
1157 rx_stats->packets++; in mana_rx_skb()
1158 rx_stats->bytes += pkt_len; in mana_rx_skb()
1161 rx_stats->xdp_tx++; in mana_rx_skb()
1162 u64_stats_update_end(&rx_stats->syncp); in mana_rx_skb()
1175 u64_stats_update_begin(&rx_stats->syncp); in mana_rx_skb()
1176 rx_stats->xdp_drop++; in mana_rx_skb()
1177 u64_stats_update_end(&rx_stats->syncp); in mana_rx_skb()
1180 WARN_ON_ONCE(rxq->xdp_save_page); in mana_rx_skb()
1181 rxq->xdp_save_page = virt_to_page(buf_va); in mana_rx_skb()
1183 ++ndev->stats.rx_dropped; in mana_rx_skb()
1191 struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data; in mana_process_rx_cqe()
1192 struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context; in mana_process_rx_cqe()
1193 struct net_device *ndev = rxq->ndev; in mana_process_rx_cqe()
1195 struct device *dev = gc->dev; in mana_process_rx_cqe()
1201 switch (oob->cqe_hdr.cqe_type) { in mana_process_rx_cqe()
1206 ++ndev->stats.rx_dropped; in mana_process_rx_cqe()
1207 rxbuf_oob = &rxq->rx_oobs[rxq->buf_index]; in mana_process_rx_cqe()
1212 netdev_err(ndev, "RX coalescing is unsupported\n"); in mana_process_rx_cqe()
1216 complete(&rxq->fence_event); in mana_process_rx_cqe()
1220 netdev_err(ndev, "Unknown RX CQE type = %d\n", in mana_process_rx_cqe()
1221 oob->cqe_hdr.cqe_type); in mana_process_rx_cqe()
1225 pktlen = oob->ppi[0].pkt_len; in mana_process_rx_cqe()
1229 netdev_err(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n", in mana_process_rx_cqe()
1230 rxq->gdma_id, cq->gdma_id, rxq->rxobj); in mana_process_rx_cqe()
1234 curr = rxq->buf_index; in mana_process_rx_cqe()
1235 rxbuf_oob = &rxq->rx_oobs[curr]; in mana_process_rx_cqe()
1236 WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1); in mana_process_rx_cqe()
1239 if (rxq->xdp_save_page) { in mana_process_rx_cqe()
1240 new_page = rxq->xdp_save_page; in mana_process_rx_cqe()
1241 rxq->xdp_save_page = NULL; in mana_process_rx_cqe()
1247 da = dma_map_page(dev, new_page, XDP_PACKET_HEADROOM, rxq->datasize, in mana_process_rx_cqe()
1259 dma_unmap_page(dev, rxbuf_oob->buf_dma_addr, rxq->datasize, in mana_process_rx_cqe()
1262 old_buf = rxbuf_oob->buf_va; in mana_process_rx_cqe()
1265 rxbuf_oob->buf_va = new_buf; in mana_process_rx_cqe()
1266 rxbuf_oob->buf_dma_addr = da; in mana_process_rx_cqe()
1267 rxbuf_oob->sgl[0].address = rxbuf_oob->buf_dma_addr; in mana_process_rx_cqe()
1275 mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu); in mana_process_rx_cqe()
1282 struct gdma_comp *comp = cq->gdma_comp_buf; in mana_poll_rx_cq()
1283 struct mana_rxq *rxq = cq->rxq; in mana_poll_rx_cq()
1286 comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER); in mana_poll_rx_cq()
1289 rxq->xdp_flush = false; in mana_poll_rx_cq()
1296 if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id)) in mana_poll_rx_cq()
1302 if (rxq->xdp_flush) in mana_poll_rx_cq()
1312 WARN_ON_ONCE(cq->gdma_cq != gdma_queue); in mana_cq_handler()
1314 if (cq->type == MANA_CQ_TYPE_RX) in mana_cq_handler()
1319 w = cq->work_done; in mana_cq_handler()
1321 if (w < cq->budget && in mana_cq_handler()
1322 napi_complete_done(&cq->napi, w)) { in mana_cq_handler()
1338 cq->work_done = 0; in mana_poll()
1339 cq->budget = budget; in mana_poll()
1341 w = mana_cq_handler(cq, cq->gdma_cq); in mana_poll()
1350 napi_schedule_irqoff(&cq->napi); in mana_schedule_napi()
1355 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_deinit_cq()
1357 if (!cq->gdma_cq) in mana_deinit_cq()
1360 mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq); in mana_deinit_cq()
1365 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_deinit_txq()
1367 if (!txq->gdma_sq) in mana_deinit_txq()
1370 mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq); in mana_deinit_txq()
1378 if (!apc->tx_qp) in mana_destroy_txq()
1381 for (i = 0; i < apc->num_queues; i++) { in mana_destroy_txq()
1382 napi = &apc->tx_qp[i].tx_cq.napi; in mana_destroy_txq()
1387 mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object); in mana_destroy_txq()
1389 mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq); in mana_destroy_txq()
1391 mana_deinit_txq(apc, &apc->tx_qp[i].txq); in mana_destroy_txq()
1394 kfree(apc->tx_qp); in mana_destroy_txq()
1395 apc->tx_qp = NULL; in mana_destroy_txq()
1401 struct mana_context *ac = apc->ac; in mana_create_txq()
1402 struct gdma_dev *gd = ac->gdma_dev; in mana_create_txq()
1414 apc->tx_qp = kcalloc(apc->num_queues, sizeof(struct mana_tx_qp), in mana_create_txq()
1416 if (!apc->tx_qp) in mana_create_txq()
1417 return -ENOMEM; in mana_create_txq()
1430 gc = gd->gdma_context; in mana_create_txq()
1432 for (i = 0; i < apc->num_queues; i++) { in mana_create_txq()
1433 apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE; in mana_create_txq()
1436 txq = &apc->tx_qp[i].txq; in mana_create_txq()
1438 u64_stats_init(&txq->stats.syncp); in mana_create_txq()
1439 txq->ndev = net; in mana_create_txq()
1440 txq->net_txq = netdev_get_tx_queue(net, i); in mana_create_txq()
1441 txq->vp_offset = apc->tx_vp_offset; in mana_create_txq()
1442 skb_queue_head_init(&txq->pending_skbs); in mana_create_txq()
1448 err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq); in mana_create_txq()
1453 cq = &apc->tx_qp[i].tx_cq; in mana_create_txq()
1454 cq->type = MANA_CQ_TYPE_TX; in mana_create_txq()
1456 cq->txq = txq; in mana_create_txq()
1463 spec.cq.parent_eq = ac->eqs[i].eq; in mana_create_txq()
1465 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq); in mana_create_txq()
1472 wq_spec.gdma_region = txq->gdma_sq->mem_info.gdma_region; in mana_create_txq()
1473 wq_spec.queue_size = txq->gdma_sq->queue_size; in mana_create_txq()
1475 cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region; in mana_create_txq()
1476 cq_spec.queue_size = cq->gdma_cq->queue_size; in mana_create_txq()
1478 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id; in mana_create_txq()
1480 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ, in mana_create_txq()
1482 &apc->tx_qp[i].tx_object); in mana_create_txq()
1487 txq->gdma_sq->id = wq_spec.queue_index; in mana_create_txq()
1488 cq->gdma_cq->id = cq_spec.queue_index; in mana_create_txq()
1490 txq->gdma_sq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; in mana_create_txq()
1491 cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; in mana_create_txq()
1493 txq->gdma_txq_id = txq->gdma_sq->id; in mana_create_txq()
1495 cq->gdma_id = cq->gdma_cq->id; in mana_create_txq()
1497 if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) { in mana_create_txq()
1498 err = -EINVAL; in mana_create_txq()
1502 gc->cq_table[cq->gdma_id] = cq->gdma_cq; in mana_create_txq()
1504 netif_napi_add_tx(net, &cq->napi, mana_poll); in mana_create_txq()
1505 napi_enable(&cq->napi); in mana_create_txq()
1507 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); in mana_create_txq()
1520 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; in mana_destroy_rxq()
1522 struct device *dev = gc->dev; in mana_destroy_rxq()
1529 napi = &rxq->rx_cq.napi; in mana_destroy_rxq()
1536 xdp_rxq_info_unreg(&rxq->xdp_rxq); in mana_destroy_rxq()
1540 mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj); in mana_destroy_rxq()
1542 mana_deinit_cq(apc, &rxq->rx_cq); in mana_destroy_rxq()
1544 if (rxq->xdp_save_page) in mana_destroy_rxq()
1545 __free_page(rxq->xdp_save_page); in mana_destroy_rxq()
1547 for (i = 0; i < rxq->num_rx_buf; i++) { in mana_destroy_rxq()
1548 rx_oob = &rxq->rx_oobs[i]; in mana_destroy_rxq()
1550 if (!rx_oob->buf_va) in mana_destroy_rxq()
1553 dma_unmap_page(dev, rx_oob->buf_dma_addr, rxq->datasize, in mana_destroy_rxq()
1556 free_page((unsigned long)rx_oob->buf_va); in mana_destroy_rxq()
1557 rx_oob->buf_va = NULL; in mana_destroy_rxq()
1560 if (rxq->gdma_rq) in mana_destroy_rxq()
1561 mana_gd_destroy_queue(gc, rxq->gdma_rq); in mana_destroy_rxq()
1572 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; in mana_alloc_rx_wqe()
1574 struct device *dev = gc->dev; in mana_alloc_rx_wqe()
1579 WARN_ON(rxq->datasize == 0 || rxq->datasize > PAGE_SIZE); in mana_alloc_rx_wqe()
1584 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) { in mana_alloc_rx_wqe()
1585 rx_oob = &rxq->rx_oobs[buf_idx]; in mana_alloc_rx_wqe()
1590 return -ENOMEM; in mana_alloc_rx_wqe()
1592 da = dma_map_page(dev, page, XDP_PACKET_HEADROOM, rxq->datasize, in mana_alloc_rx_wqe()
1597 return -ENOMEM; in mana_alloc_rx_wqe()
1600 rx_oob->buf_va = page_to_virt(page); in mana_alloc_rx_wqe()
1601 rx_oob->buf_dma_addr = da; in mana_alloc_rx_wqe()
1603 rx_oob->num_sge = 1; in mana_alloc_rx_wqe()
1604 rx_oob->sgl[0].address = rx_oob->buf_dma_addr; in mana_alloc_rx_wqe()
1605 rx_oob->sgl[0].size = rxq->datasize; in mana_alloc_rx_wqe()
1606 rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey; in mana_alloc_rx_wqe()
1608 rx_oob->wqe_req.sgl = rx_oob->sgl; in mana_alloc_rx_wqe()
1609 rx_oob->wqe_req.num_sge = rx_oob->num_sge; in mana_alloc_rx_wqe()
1610 rx_oob->wqe_req.inline_oob_size = 0; in mana_alloc_rx_wqe()
1611 rx_oob->wqe_req.inline_oob_data = NULL; in mana_alloc_rx_wqe()
1612 rx_oob->wqe_req.flags = 0; in mana_alloc_rx_wqe()
1613 rx_oob->wqe_req.client_data_unit = 0; in mana_alloc_rx_wqe()
1616 MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32); in mana_alloc_rx_wqe()
1629 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) { in mana_push_wqe()
1630 rx_oob = &rxq->rx_oobs[buf_idx]; in mana_push_wqe()
1632 err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req, in mana_push_wqe()
1633 &rx_oob->wqe_inf); in mana_push_wqe()
1635 return -ENOSPC; in mana_push_wqe()
1642 u32 rxq_idx, struct mana_eq *eq, in mana_create_rxq() argument
1645 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_create_rxq()
1655 gc = gd->gdma_context; in mana_create_rxq()
1662 rxq->ndev = ndev; in mana_create_rxq()
1663 rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE; in mana_create_rxq()
1664 rxq->rxq_idx = rxq_idx; in mana_create_rxq()
1665 rxq->datasize = ALIGN(MAX_FRAME_SIZE, 64); in mana_create_rxq()
1666 rxq->rxobj = INVALID_MANA_HANDLE; in mana_create_rxq()
1680 err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq); in mana_create_rxq()
1685 cq = &rxq->rx_cq; in mana_create_rxq()
1686 cq->type = MANA_CQ_TYPE_RX; in mana_create_rxq()
1687 cq->rxq = rxq; in mana_create_rxq()
1694 spec.cq.parent_eq = eq->eq; in mana_create_rxq()
1696 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq); in mana_create_rxq()
1702 wq_spec.gdma_region = rxq->gdma_rq->mem_info.gdma_region; in mana_create_rxq()
1703 wq_spec.queue_size = rxq->gdma_rq->queue_size; in mana_create_rxq()
1705 cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region; in mana_create_rxq()
1706 cq_spec.queue_size = cq->gdma_cq->queue_size; in mana_create_rxq()
1708 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id; in mana_create_rxq()
1710 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ, in mana_create_rxq()
1711 &wq_spec, &cq_spec, &rxq->rxobj); in mana_create_rxq()
1715 rxq->gdma_rq->id = wq_spec.queue_index; in mana_create_rxq()
1716 cq->gdma_cq->id = cq_spec.queue_index; in mana_create_rxq()
1718 rxq->gdma_rq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; in mana_create_rxq()
1719 cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; in mana_create_rxq()
1721 rxq->gdma_id = rxq->gdma_rq->id; in mana_create_rxq()
1722 cq->gdma_id = cq->gdma_cq->id; in mana_create_rxq()
1728 if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) { in mana_create_rxq()
1729 err = -EINVAL; in mana_create_rxq()
1733 gc->cq_table[cq->gdma_id] = cq->gdma_cq; in mana_create_rxq()
1735 netif_napi_add_weight(ndev, &cq->napi, mana_poll, 1); in mana_create_rxq()
1737 WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx, in mana_create_rxq()
1738 cq->napi.napi_id)); in mana_create_rxq()
1739 WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, in mana_create_rxq()
1742 napi_enable(&cq->napi); in mana_create_rxq()
1744 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); in mana_create_rxq()
1762 struct mana_context *ac = apc->ac; in mana_add_rx_queues()
1767 for (i = 0; i < apc->num_queues; i++) { in mana_add_rx_queues()
1768 rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev); in mana_add_rx_queues()
1770 err = -ENOMEM; in mana_add_rx_queues()
1774 u64_stats_init(&rxq->stats.syncp); in mana_add_rx_queues()
1776 apc->rxqs[i] = rxq; in mana_add_rx_queues()
1779 apc->default_rxobj = apc->rxqs[0]->rxobj; in mana_add_rx_queues()
1786 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_destroy_vport()
1790 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) { in mana_destroy_vport()
1791 rxq = apc->rxqs[rxq_idx]; in mana_destroy_vport()
1796 apc->rxqs[rxq_idx] = NULL; in mana_destroy_vport()
1801 if (gd->gdma_context->is_pf) in mana_destroy_vport()
1808 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_create_vport()
1811 apc->default_rxobj = INVALID_MANA_HANDLE; in mana_create_vport()
1813 if (gd->gdma_context->is_pf) { in mana_create_vport()
1819 err = mana_cfg_vport(apc, gd->pdid, gd->doorbell); in mana_create_vport()
1831 apc->indir_table[i] = in mana_rss_table_init()
1832 ethtool_rxfh_indir_default(i, apc->num_queues); in mana_rss_table_init()
1835 int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx, in mana_config_rss() argument
1844 queue_idx = apc->indir_table[i]; in mana_config_rss()
1845 apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj; in mana_config_rss()
1849 err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab); in mana_config_rss()
1862 int port_idx = apc->port_idx; in mana_init_port()
1879 if (apc->max_queues > max_queues) in mana_init_port()
1880 apc->max_queues = max_queues; in mana_init_port()
1882 if (apc->num_queues > apc->max_queues) in mana_init_port()
1883 apc->num_queues = apc->max_queues; in mana_init_port()
1885 eth_hw_addr_set(ndev, apc->mac_addr); in mana_init_port()
1890 kfree(apc->rxqs); in mana_init_port()
1891 apc->rxqs = NULL; in mana_init_port()
1898 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_alloc_queues()
1905 err = netif_set_real_num_tx_queues(ndev, apc->num_queues); in mana_alloc_queues()
1913 apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE; in mana_alloc_queues()
1915 err = netif_set_real_num_rx_queues(ndev, apc->num_queues); in mana_alloc_queues()
1925 if (gd->gdma_context->is_pf) { in mana_alloc_queues()
1951 if (apc->port_st_save) { in mana_attach()
1959 apc->port_is_up = apc->port_st_save; in mana_attach()
1964 if (apc->port_is_up) in mana_attach()
1975 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_dealloc_queues()
1979 if (apc->port_is_up) in mana_dealloc_queues()
1980 return -EINVAL; in mana_dealloc_queues()
1984 if (gd->gdma_context->is_pf) in mana_dealloc_queues()
1987 /* No packet can be transmitted now since apc->port_is_up is false. in mana_dealloc_queues()
1988 * There is still a tiny chance that mana_poll_tx_cq() can re-enable in mana_dealloc_queues()
1989 * a txq because it may not timely see apc->port_is_up being cleared in mana_dealloc_queues()
1991 * new packets due to apc->port_is_up being false. in mana_dealloc_queues()
1993 * Drain all the in-flight TX packets in mana_dealloc_queues()
1995 for (i = 0; i < apc->num_queues; i++) { in mana_dealloc_queues()
1996 txq = &apc->tx_qp[i].txq; in mana_dealloc_queues()
1998 while (atomic_read(&txq->pending_sends) > 0) in mana_dealloc_queues()
2006 apc->rss_state = TRI_STATE_FALSE; in mana_dealloc_queues()
2025 apc->port_st_save = apc->port_is_up; in mana_detach()
2026 apc->port_is_up = false; in mana_detach()
2034 if (apc->port_st_save) { in mana_detach()
2051 struct gdma_context *gc = ac->gdma_dev->gdma_context; in mana_probe_port()
2057 gc->max_num_queues); in mana_probe_port()
2059 return -ENOMEM; in mana_probe_port()
2064 apc->ac = ac; in mana_probe_port()
2065 apc->ndev = ndev; in mana_probe_port()
2066 apc->max_queues = gc->max_num_queues; in mana_probe_port()
2067 apc->num_queues = gc->max_num_queues; in mana_probe_port()
2068 apc->port_handle = INVALID_MANA_HANDLE; in mana_probe_port()
2069 apc->pf_filter_handle = INVALID_MANA_HANDLE; in mana_probe_port()
2070 apc->port_idx = port_idx; in mana_probe_port()
2072 ndev->netdev_ops = &mana_devops; in mana_probe_port()
2073 ndev->ethtool_ops = &mana_ethtool_ops; in mana_probe_port()
2074 ndev->mtu = ETH_DATA_LEN; in mana_probe_port()
2075 ndev->max_mtu = ndev->mtu; in mana_probe_port()
2076 ndev->min_mtu = ndev->mtu; in mana_probe_port()
2077 ndev->needed_headroom = MANA_HEADROOM; in mana_probe_port()
2078 SET_NETDEV_DEV(ndev, gc->dev); in mana_probe_port()
2082 netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE); in mana_probe_port()
2090 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; in mana_probe_port()
2091 ndev->hw_features |= NETIF_F_RXCSUM; in mana_probe_port()
2092 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; in mana_probe_port()
2093 ndev->hw_features |= NETIF_F_RXHASH; in mana_probe_port()
2094 ndev->features = ndev->hw_features; in mana_probe_port()
2095 ndev->vlan_features = 0; in mana_probe_port()
2106 kfree(apc->rxqs); in mana_probe_port()
2107 apc->rxqs = NULL; in mana_probe_port()
2117 struct gdma_context *gc = gd->gdma_context; in mana_probe()
2118 struct mana_context *ac = gd->driver_data; in mana_probe()
2119 struct device *dev = gc->dev; in mana_probe()
2135 return -ENOMEM; in mana_probe()
2137 ac->gdma_dev = gd; in mana_probe()
2138 gd->driver_data = ac; in mana_probe()
2151 ac->num_ports = num_ports; in mana_probe()
2153 if (ac->num_ports != num_ports) { in mana_probe()
2154 dev_err(dev, "The number of vPorts changed: %d->%d\n", in mana_probe()
2155 ac->num_ports, num_ports); in mana_probe()
2156 err = -EPROTO; in mana_probe()
2161 if (ac->num_ports == 0) in mana_probe()
2164 if (ac->num_ports > MAX_PORTS_IN_MANA_DEV) in mana_probe()
2165 ac->num_ports = MAX_PORTS_IN_MANA_DEV; in mana_probe()
2168 for (i = 0; i < ac->num_ports; i++) { in mana_probe()
2169 err = mana_probe_port(ac, i, &ac->ports[i]); in mana_probe()
2174 for (i = 0; i < ac->num_ports; i++) { in mana_probe()
2176 err = mana_attach(ac->ports[i]); in mana_probe()
2191 struct gdma_context *gc = gd->gdma_context; in mana_remove()
2192 struct mana_context *ac = gd->driver_data; in mana_remove()
2193 struct device *dev = gc->dev; in mana_remove()
2198 for (i = 0; i < ac->num_ports; i++) { in mana_remove()
2199 ndev = ac->ports[i]; in mana_remove()
2237 gd->driver_data = NULL; in mana_remove()
2238 gd->gdma_context = NULL; in mana_remove()