Lines Matching +full:rx +full:- +full:eq
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
9 struct gdma_resource *r = &hwc->inflight_msg_res; in mana_hwc_get_msg_index()
13 down(&hwc->sema); in mana_hwc_get_msg_index()
15 spin_lock_irqsave(&r->lock, flags); in mana_hwc_get_msg_index()
17 index = find_first_zero_bit(hwc->inflight_msg_res.map, in mana_hwc_get_msg_index()
18 hwc->inflight_msg_res.size); in mana_hwc_get_msg_index()
20 bitmap_set(hwc->inflight_msg_res.map, index, 1); in mana_hwc_get_msg_index()
22 spin_unlock_irqrestore(&r->lock, flags); in mana_hwc_get_msg_index()
31 struct gdma_resource *r = &hwc->inflight_msg_res; in mana_hwc_put_msg_index()
34 spin_lock_irqsave(&r->lock, flags); in mana_hwc_put_msg_index()
35 bitmap_clear(hwc->inflight_msg_res.map, msg_id, 1); in mana_hwc_put_msg_index()
36 spin_unlock_irqrestore(&r->lock, flags); in mana_hwc_put_msg_index()
38 up(&hwc->sema); in mana_hwc_put_msg_index()
46 return -EPROTO; in mana_hwc_verify_resp_msg()
48 if (resp_len > caller_ctx->output_buflen) in mana_hwc_verify_resp_msg()
49 return -EPROTO; in mana_hwc_verify_resp_msg()
60 if (!test_bit(resp_msg->response.hwc_msg_id, in mana_hwc_handle_resp()
61 hwc->inflight_msg_res.map)) { in mana_hwc_handle_resp()
62 dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n", in mana_hwc_handle_resp()
63 resp_msg->response.hwc_msg_id); in mana_hwc_handle_resp()
67 ctx = hwc->caller_ctx + resp_msg->response.hwc_msg_id; in mana_hwc_handle_resp()
72 ctx->status_code = resp_msg->status; in mana_hwc_handle_resp()
74 memcpy(ctx->output_buf, resp_msg, resp_len); in mana_hwc_handle_resp()
76 ctx->error = err; in mana_hwc_handle_resp()
77 complete(&ctx->comp_event); in mana_hwc_handle_resp()
83 struct device *dev = hwc_rxq->hwc->dev; in mana_hwc_post_rx_wqe()
87 sge = &req->sge; in mana_hwc_post_rx_wqe()
88 sge->address = (u64)req->buf_sge_addr; in mana_hwc_post_rx_wqe()
89 sge->mem_key = hwc_rxq->msg_buf->gpa_mkey; in mana_hwc_post_rx_wqe()
90 sge->size = req->buf_len; in mana_hwc_post_rx_wqe()
92 memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request)); in mana_hwc_post_rx_wqe()
93 req->wqe_req.sgl = sge; in mana_hwc_post_rx_wqe()
94 req->wqe_req.num_sge = 1; in mana_hwc_post_rx_wqe()
95 req->wqe_req.client_data_unit = 0; in mana_hwc_post_rx_wqe()
97 err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL); in mana_hwc_post_rx_wqe()
107 struct gdma_dev *gd = hwc->gdma_dev; in mana_hwc_init_event_handler()
112 switch (event->type) { in mana_hwc_init_event_handler()
114 eq_db.as_uint32 = event->details[0]; in mana_hwc_init_event_handler()
115 hwc->cq->gdma_eq->id = eq_db.eq_id; in mana_hwc_init_event_handler()
116 gd->doorbell = eq_db.doorbell; in mana_hwc_init_event_handler()
120 type_data.as_uint32 = event->details[0]; in mana_hwc_init_event_handler()
126 hwc->cq->gdma_cq->id = val; in mana_hwc_init_event_handler()
130 hwc->rxq->gdma_wq->id = val; in mana_hwc_init_event_handler()
134 hwc->txq->gdma_wq->id = val; in mana_hwc_init_event_handler()
138 hwc->hwc_init_q_depth_max = (u16)val; in mana_hwc_init_event_handler()
142 hwc->hwc_init_max_req_msg_size = val; in mana_hwc_init_event_handler()
146 hwc->hwc_init_max_resp_msg_size = val; in mana_hwc_init_event_handler()
150 gd->gdma_context->max_num_cqs = val; in mana_hwc_init_event_handler()
154 hwc->gdma_dev->pdid = val; in mana_hwc_init_event_handler()
158 hwc->rxq->msg_buf->gpa_mkey = val; in mana_hwc_init_event_handler()
159 hwc->txq->msg_buf->gpa_mkey = val; in mana_hwc_init_event_handler()
163 hwc->pf_dest_vrq_id = val; in mana_hwc_init_event_handler()
167 hwc->pf_dest_vrcq_id = val; in mana_hwc_init_event_handler()
174 complete(&hwc->hwc_init_eqe_comp); in mana_hwc_init_event_handler()
187 struct hwc_wq *hwc_rxq = hwc->rxq; in mana_hwc_rx_event_handler()
197 if (WARN_ON_ONCE(hwc_rxq->gdma_wq->id != gdma_rxq_id)) in mana_hwc_rx_event_handler()
200 rq = hwc_rxq->gdma_wq; in mana_hwc_rx_event_handler()
201 wqe = mana_gd_get_wqe_ptr(rq, rx_oob->wqe_offset / GDMA_WQE_BU_SIZE); in mana_hwc_rx_event_handler()
204 sge = (struct gdma_sge *)(wqe + 8 + dma_oob->inline_oob_size_div4 * 4); in mana_hwc_rx_event_handler()
206 /* Select the RX work request for virtual address and for reposting. */ in mana_hwc_rx_event_handler()
207 rq_base_addr = hwc_rxq->msg_buf->mem_info.dma_handle; in mana_hwc_rx_event_handler()
208 rx_req_idx = (sge->address - rq_base_addr) / hwc->max_req_msg_size; in mana_hwc_rx_event_handler()
210 rx_req = &hwc_rxq->msg_buf->reqs[rx_req_idx]; in mana_hwc_rx_event_handler()
211 resp = (struct gdma_resp_hdr *)rx_req->buf_va; in mana_hwc_rx_event_handler()
213 if (resp->response.hwc_msg_id >= hwc->num_inflight_msg) { in mana_hwc_rx_event_handler()
214 dev_err(hwc->dev, "HWC RX: wrong msg_id=%u\n", in mana_hwc_rx_event_handler()
215 resp->response.hwc_msg_id); in mana_hwc_rx_event_handler()
219 mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, resp); in mana_hwc_rx_event_handler()
233 struct hwc_wq *hwc_txq = hwc->txq; in mana_hwc_tx_event_handler()
235 WARN_ON_ONCE(!hwc_txq || hwc_txq->gdma_wq->id != gdma_txq_id); in mana_hwc_tx_event_handler()
245 return -EINVAL; in mana_hwc_create_gdma_wq()
251 return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue); in mana_hwc_create_gdma_wq()
269 return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue); in mana_hwc_create_gdma_cq()
282 spec.eq.context = ctx; in mana_hwc_create_gdma_eq()
283 spec.eq.callback = cb; in mana_hwc_create_gdma_eq()
284 spec.eq.log2_throttle_limit = DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ; in mana_hwc_create_gdma_eq()
286 return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue); in mana_hwc_create_gdma_eq()
296 WARN_ON_ONCE(hwc_cq->gdma_cq != q_self); in mana_hwc_comp_event()
298 completions = hwc_cq->comp_buf; in mana_hwc_comp_event()
299 comp_read = mana_gd_poll_cq(q_self, completions, hwc_cq->queue_depth); in mana_hwc_comp_event()
300 WARN_ON_ONCE(comp_read <= 0 || comp_read > hwc_cq->queue_depth); in mana_hwc_comp_event()
306 hwc_cq->tx_event_handler(hwc_cq->tx_event_ctx, in mana_hwc_comp_event()
310 hwc_cq->rx_event_handler(hwc_cq->rx_event_ctx, in mana_hwc_comp_event()
320 kfree(hwc_cq->comp_buf); in mana_hwc_destroy_cq()
322 if (hwc_cq->gdma_cq) in mana_hwc_destroy_cq()
323 mana_gd_destroy_queue(gc, hwc_cq->gdma_cq); in mana_hwc_destroy_cq()
325 if (hwc_cq->gdma_eq) in mana_hwc_destroy_cq()
326 mana_gd_destroy_queue(gc, hwc_cq->gdma_eq); in mana_hwc_destroy_cq()
338 struct gdma_queue *eq, *cq; in mana_hwc_create_cq() local
354 return -ENOMEM; in mana_hwc_create_cq()
356 err = mana_hwc_create_gdma_eq(hwc, eq_size, ctx, callback, &eq); in mana_hwc_create_cq()
358 dev_err(hwc->dev, "Failed to create HWC EQ for RQ: %d\n", err); in mana_hwc_create_cq()
361 hwc_cq->gdma_eq = eq; in mana_hwc_create_cq()
364 eq, &cq); in mana_hwc_create_cq()
366 dev_err(hwc->dev, "Failed to create HWC CQ for RQ: %d\n", err); in mana_hwc_create_cq()
369 hwc_cq->gdma_cq = cq; in mana_hwc_create_cq()
373 err = -ENOMEM; in mana_hwc_create_cq()
377 hwc_cq->hwc = hwc; in mana_hwc_create_cq()
378 hwc_cq->comp_buf = comp_buf; in mana_hwc_create_cq()
379 hwc_cq->queue_depth = q_depth; in mana_hwc_create_cq()
380 hwc_cq->rx_event_handler = rx_ev_hdlr; in mana_hwc_create_cq()
381 hwc_cq->rx_event_ctx = rx_ev_ctx; in mana_hwc_create_cq()
382 hwc_cq->tx_event_handler = tx_ev_hdlr; in mana_hwc_create_cq()
383 hwc_cq->tx_event_ctx = tx_ev_ctx; in mana_hwc_create_cq()
388 mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq); in mana_hwc_create_cq()
396 struct gdma_context *gc = hwc->gdma_dev->gdma_context; in mana_hwc_alloc_dma_buf()
408 return -ENOMEM; in mana_hwc_alloc_dma_buf()
410 dma_buf->num_reqs = q_depth; in mana_hwc_alloc_dma_buf()
414 gmi = &dma_buf->mem_info; in mana_hwc_alloc_dma_buf()
417 dev_err(hwc->dev, "Failed to allocate DMA buffer: %d\n", err); in mana_hwc_alloc_dma_buf()
421 virt_addr = dma_buf->mem_info.virt_addr; in mana_hwc_alloc_dma_buf()
422 base_pa = (u8 *)dma_buf->mem_info.dma_handle; in mana_hwc_alloc_dma_buf()
425 hwc_wr = &dma_buf->reqs[i]; in mana_hwc_alloc_dma_buf()
427 hwc_wr->buf_va = virt_addr + i * max_msg_size; in mana_hwc_alloc_dma_buf()
428 hwc_wr->buf_sge_addr = base_pa + i * max_msg_size; in mana_hwc_alloc_dma_buf()
430 hwc_wr->buf_len = max_msg_size; in mana_hwc_alloc_dma_buf()
446 mana_gd_free_memory(&dma_buf->mem_info); in mana_hwc_dealloc_dma_buf()
454 mana_hwc_dealloc_dma_buf(hwc, hwc_wq->msg_buf); in mana_hwc_destroy_wq()
456 if (hwc_wq->gdma_wq) in mana_hwc_destroy_wq()
457 mana_gd_destroy_queue(hwc->gdma_dev->gdma_context, in mana_hwc_destroy_wq()
458 hwc_wq->gdma_wq); in mana_hwc_destroy_wq()
485 return -ENOMEM; in mana_hwc_create_wq()
491 hwc_wq->hwc = hwc; in mana_hwc_create_wq()
492 hwc_wq->gdma_wq = queue; in mana_hwc_create_wq()
493 hwc_wq->queue_depth = q_depth; in mana_hwc_create_wq()
494 hwc_wq->hwc_cq = hwc_cq; in mana_hwc_create_wq()
497 &hwc_wq->msg_buf); in mana_hwc_create_wq()
514 struct device *dev = hwc_txq->hwc->dev; in mana_hwc_post_tx_wqe()
519 if (req->msg_size == 0 || req->msg_size > req->buf_len) { in mana_hwc_post_tx_wqe()
521 req->msg_size, req->buf_len); in mana_hwc_post_tx_wqe()
522 return -EINVAL; in mana_hwc_post_tx_wqe()
525 tx_oob = &req->tx_oob; in mana_hwc_post_tx_wqe()
527 tx_oob->vrq_id = dest_virt_rq_id; in mana_hwc_post_tx_wqe()
528 tx_oob->dest_vfid = 0; in mana_hwc_post_tx_wqe()
529 tx_oob->vrcq_id = dest_virt_rcq_id; in mana_hwc_post_tx_wqe()
530 tx_oob->vscq_id = hwc_txq->hwc_cq->gdma_cq->id; in mana_hwc_post_tx_wqe()
531 tx_oob->loopback = false; in mana_hwc_post_tx_wqe()
532 tx_oob->lso_override = false; in mana_hwc_post_tx_wqe()
533 tx_oob->dest_pf = dest_pf; in mana_hwc_post_tx_wqe()
534 tx_oob->vsq_id = hwc_txq->gdma_wq->id; in mana_hwc_post_tx_wqe()
536 sge = &req->sge; in mana_hwc_post_tx_wqe()
537 sge->address = (u64)req->buf_sge_addr; in mana_hwc_post_tx_wqe()
538 sge->mem_key = hwc_txq->msg_buf->gpa_mkey; in mana_hwc_post_tx_wqe()
539 sge->size = req->msg_size; in mana_hwc_post_tx_wqe()
541 memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request)); in mana_hwc_post_tx_wqe()
542 req->wqe_req.sgl = sge; in mana_hwc_post_tx_wqe()
543 req->wqe_req.num_sge = 1; in mana_hwc_post_tx_wqe()
544 req->wqe_req.inline_oob_size = sizeof(struct hwc_tx_oob); in mana_hwc_post_tx_wqe()
545 req->wqe_req.inline_oob_data = tx_oob; in mana_hwc_post_tx_wqe()
546 req->wqe_req.client_data_unit = 0; in mana_hwc_post_tx_wqe()
548 err = mana_gd_post_and_ring(hwc_txq->gdma_wq, &req->wqe_req, NULL); in mana_hwc_post_tx_wqe()
559 sema_init(&hwc->sema, num_msg); in mana_hwc_init_inflight_msg()
561 err = mana_gd_alloc_res_map(num_msg, &hwc->inflight_msg_res); in mana_hwc_init_inflight_msg()
563 dev_err(hwc->dev, "Failed to init inflight_msg_res: %d\n", err); in mana_hwc_init_inflight_msg()
570 struct gdma_context *gc = hwc->gdma_dev->gdma_context; in mana_hwc_test_channel()
571 struct hwc_wq *hwc_rxq = hwc->rxq; in mana_hwc_test_channel()
579 req = &hwc_rxq->msg_buf->reqs[i]; in mana_hwc_test_channel()
587 return -ENOMEM; in mana_hwc_test_channel()
592 hwc->caller_ctx = ctx; in mana_hwc_test_channel()
594 return mana_gd_test_eq(gc, hwc->cq->gdma_eq); in mana_hwc_test_channel()
601 struct hw_channel_context *hwc = gc->hwc.driver_data; in mana_hwc_establish_channel()
602 struct gdma_queue *rq = hwc->rxq->gdma_wq; in mana_hwc_establish_channel()
603 struct gdma_queue *sq = hwc->txq->gdma_wq; in mana_hwc_establish_channel()
604 struct gdma_queue *eq = hwc->cq->gdma_eq; in mana_hwc_establish_channel() local
605 struct gdma_queue *cq = hwc->cq->gdma_cq; in mana_hwc_establish_channel()
608 init_completion(&hwc->hwc_init_eqe_comp); in mana_hwc_establish_channel()
610 err = mana_smc_setup_hwc(&gc->shm_channel, false, in mana_hwc_establish_channel()
611 eq->mem_info.dma_handle, in mana_hwc_establish_channel()
612 cq->mem_info.dma_handle, in mana_hwc_establish_channel()
613 rq->mem_info.dma_handle, in mana_hwc_establish_channel()
614 sq->mem_info.dma_handle, in mana_hwc_establish_channel()
615 eq->eq.msix_index); in mana_hwc_establish_channel()
619 if (!wait_for_completion_timeout(&hwc->hwc_init_eqe_comp, 60 * HZ)) in mana_hwc_establish_channel()
620 return -ETIMEDOUT; in mana_hwc_establish_channel()
622 *q_depth = hwc->hwc_init_q_depth_max; in mana_hwc_establish_channel()
623 *max_req_msg_size = hwc->hwc_init_max_req_msg_size; in mana_hwc_establish_channel()
624 *max_resp_msg_size = hwc->hwc_init_max_resp_msg_size; in mana_hwc_establish_channel()
627 if (WARN_ON(cq->id >= gc->max_num_cqs)) in mana_hwc_establish_channel()
628 return -EPROTO; in mana_hwc_establish_channel()
630 gc->cq_table = vzalloc(gc->max_num_cqs * sizeof(struct gdma_queue *)); in mana_hwc_establish_channel()
631 if (!gc->cq_table) in mana_hwc_establish_channel()
632 return -ENOMEM; in mana_hwc_establish_channel()
634 gc->cq_table[cq->id] = cq; in mana_hwc_establish_channel()
654 mana_hwc_tx_event_handler, hwc, &hwc->cq); in mana_hwc_init_queues()
656 dev_err(hwc->dev, "Failed to create HWC CQ: %d\n", err); in mana_hwc_init_queues()
661 hwc->cq, &hwc->rxq); in mana_hwc_init_queues()
663 dev_err(hwc->dev, "Failed to create HWC RQ: %d\n", err); in mana_hwc_init_queues()
668 hwc->cq, &hwc->txq); in mana_hwc_init_queues()
670 dev_err(hwc->dev, "Failed to create HWC SQ: %d\n", err); in mana_hwc_init_queues()
674 hwc->num_inflight_msg = q_depth; in mana_hwc_init_queues()
675 hwc->max_req_msg_size = max_req_msg_size; in mana_hwc_init_queues()
686 struct gdma_dev *gd = &gc->hwc; in mana_hwc_create_channel()
693 return -ENOMEM; in mana_hwc_create_channel()
695 gd->gdma_context = gc; in mana_hwc_create_channel()
696 gd->driver_data = hwc; in mana_hwc_create_channel()
697 hwc->gdma_dev = gd; in mana_hwc_create_channel()
698 hwc->dev = gc->dev; in mana_hwc_create_channel()
701 gd->dev_id.as_uint32 = 0; in mana_hwc_create_channel()
702 gd->dev_id.type = GDMA_DEVICE_HWC; in mana_hwc_create_channel()
704 gd->pdid = INVALID_PDID; in mana_hwc_create_channel()
705 gd->doorbell = INVALID_DOORBELL; in mana_hwc_create_channel()
714 dev_err(hwc->dev, "Failed to initialize HWC: %d\n", err); in mana_hwc_create_channel()
721 dev_err(hwc->dev, "Failed to establish HWC: %d\n", err); in mana_hwc_create_channel()
725 err = mana_hwc_test_channel(gc->hwc.driver_data, in mana_hwc_create_channel()
729 dev_err(hwc->dev, "Failed to test HWC: %d\n", err); in mana_hwc_create_channel()
741 struct hw_channel_context *hwc = gc->hwc.driver_data; in mana_hwc_destroy_channel()
746 /* gc->max_num_cqs is set in mana_hwc_init_event_handler(). If it's in mana_hwc_destroy_channel()
747 * non-zero, the HWC worked and we should tear down the HWC here. in mana_hwc_destroy_channel()
749 if (gc->max_num_cqs > 0) { in mana_hwc_destroy_channel()
750 mana_smc_teardown_hwc(&gc->shm_channel, false); in mana_hwc_destroy_channel()
751 gc->max_num_cqs = 0; in mana_hwc_destroy_channel()
754 kfree(hwc->caller_ctx); in mana_hwc_destroy_channel()
755 hwc->caller_ctx = NULL; in mana_hwc_destroy_channel()
757 if (hwc->txq) in mana_hwc_destroy_channel()
758 mana_hwc_destroy_wq(hwc, hwc->txq); in mana_hwc_destroy_channel()
760 if (hwc->rxq) in mana_hwc_destroy_channel()
761 mana_hwc_destroy_wq(hwc, hwc->rxq); in mana_hwc_destroy_channel()
763 if (hwc->cq) in mana_hwc_destroy_channel()
764 mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq); in mana_hwc_destroy_channel()
766 mana_gd_free_res_map(&hwc->inflight_msg_res); in mana_hwc_destroy_channel()
768 hwc->num_inflight_msg = 0; in mana_hwc_destroy_channel()
770 hwc->gdma_dev->doorbell = INVALID_DOORBELL; in mana_hwc_destroy_channel()
771 hwc->gdma_dev->pdid = INVALID_PDID; in mana_hwc_destroy_channel()
774 gc->hwc.driver_data = NULL; in mana_hwc_destroy_channel()
775 gc->hwc.gdma_context = NULL; in mana_hwc_destroy_channel()
777 vfree(gc->cq_table); in mana_hwc_destroy_channel()
778 gc->cq_table = NULL; in mana_hwc_destroy_channel()
784 struct gdma_context *gc = hwc->gdma_dev->gdma_context; in mana_hwc_send_request()
786 struct hwc_wq *txq = hwc->txq; in mana_hwc_send_request()
796 tx_wr = &txq->msg_buf->reqs[msg_id]; in mana_hwc_send_request()
798 if (req_len > tx_wr->buf_len) { in mana_hwc_send_request()
799 dev_err(hwc->dev, "HWC: req msg size: %d > %d\n", req_len, in mana_hwc_send_request()
800 tx_wr->buf_len); in mana_hwc_send_request()
801 err = -EINVAL; in mana_hwc_send_request()
805 ctx = hwc->caller_ctx + msg_id; in mana_hwc_send_request()
806 ctx->output_buf = resp; in mana_hwc_send_request()
807 ctx->output_buflen = resp_len; in mana_hwc_send_request()
809 req_msg = (struct gdma_req_hdr *)tx_wr->buf_va; in mana_hwc_send_request()
813 req_msg->req.hwc_msg_id = msg_id; in mana_hwc_send_request()
815 tx_wr->msg_size = req_len; in mana_hwc_send_request()
817 if (gc->is_pf) { in mana_hwc_send_request()
818 dest_vrq = hwc->pf_dest_vrq_id; in mana_hwc_send_request()
819 dest_vrcq = hwc->pf_dest_vrcq_id; in mana_hwc_send_request()
824 dev_err(hwc->dev, "HWC: Failed to post send WQE: %d\n", err); in mana_hwc_send_request()
828 if (!wait_for_completion_timeout(&ctx->comp_event, 30 * HZ)) { in mana_hwc_send_request()
829 dev_err(hwc->dev, "HWC: Request timed out!\n"); in mana_hwc_send_request()
830 err = -ETIMEDOUT; in mana_hwc_send_request()
834 if (ctx->error) { in mana_hwc_send_request()
835 err = ctx->error; in mana_hwc_send_request()
839 if (ctx->status_code) { in mana_hwc_send_request()
840 dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n", in mana_hwc_send_request()
841 ctx->status_code); in mana_hwc_send_request()
842 err = -EPROTO; in mana_hwc_send_request()