Lines Matching +full:cmdq +full:- +full:sync
2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
58 struct bnxt_qplib_cmdq_ctx *cmdq; in __wait_for_resp() local
62 cmdq = &rcfw->cmdq; in __wait_for_resp()
63 cbit = cookie % rcfw->cmdq_depth; in __wait_for_resp()
64 rc = wait_event_timeout(cmdq->waitq, in __wait_for_resp()
65 !test_bit(cbit, cmdq->cmdq_bitmap), in __wait_for_resp()
67 return rc ? 0 : -ETIMEDOUT; in __wait_for_resp()
73 struct bnxt_qplib_cmdq_ctx *cmdq; in __block_for_resp() local
76 cmdq = &rcfw->cmdq; in __block_for_resp()
77 cbit = cookie % rcfw->cmdq_depth; in __block_for_resp()
78 if (!test_bit(cbit, cmdq->cmdq_bitmap)) in __block_for_resp()
82 bnxt_qplib_service_creq(&rcfw->creq.creq_tasklet); in __block_for_resp()
83 } while (test_bit(cbit, cmdq->cmdq_bitmap) && --count); in __block_for_resp()
85 return count ? 0 : -ETIMEDOUT; in __block_for_resp()
91 struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq; in __send_message() local
92 struct bnxt_qplib_hwq *hwq = &cmdq->hwq; in __send_message()
102 pdev = rcfw->pdev; in __send_message()
104 opcode = req->opcode; in __send_message()
105 if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &cmdq->flags) && in __send_message()
109 dev_err(&pdev->dev, in __send_message()
111 return -EINVAL; in __send_message()
114 if (test_bit(FIRMWARE_INITIALIZED_FLAG, &cmdq->flags) && in __send_message()
116 dev_err(&pdev->dev, "RCFW already initialized!\n"); in __send_message()
117 return -EINVAL; in __send_message()
120 if (test_bit(FIRMWARE_TIMED_OUT, &cmdq->flags)) in __send_message()
121 return -ETIMEDOUT; in __send_message()
123 /* Cmdq are in 16-byte units, each request can consume 1 or more in __send_message()
126 spin_lock_irqsave(&hwq->lock, flags); in __send_message()
127 if (req->cmd_size >= HWQ_FREE_SLOTS(hwq)) { in __send_message()
128 dev_err(&pdev->dev, "RCFW: CMDQ is full!\n"); in __send_message()
129 spin_unlock_irqrestore(&hwq->lock, flags); in __send_message()
130 return -EAGAIN; in __send_message()
134 cookie = cmdq->seq_num & RCFW_MAX_COOKIE_VALUE; in __send_message()
135 cbit = cookie % rcfw->cmdq_depth; in __send_message()
139 set_bit(cbit, cmdq->cmdq_bitmap); in __send_message()
140 req->cookie = cpu_to_le16(cookie); in __send_message()
141 crsqe = &rcfw->crsqe_tbl[cbit]; in __send_message()
142 if (crsqe->resp) { in __send_message()
143 spin_unlock_irqrestore(&hwq->lock, flags); in __send_message()
144 return -EBUSY; in __send_message()
147 size = req->cmd_size; in __send_message()
148 /* change the cmd_size to the number of 16byte cmdq unit. in __send_message()
149 * req->cmd_size is modified here in __send_message()
154 crsqe->resp = (struct creq_qp_event *)resp; in __send_message()
155 crsqe->resp->cookie = req->cookie; in __send_message()
156 crsqe->req_size = req->cmd_size; in __send_message()
157 if (req->resp_size && sb) { in __send_message()
160 req->resp_addr = cpu_to_le64(sbuf->dma_addr); in __send_message()
161 req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) / in __send_message()
167 /* Locate the next cmdq slot */ in __send_message()
168 sw_prod = HWQ_CMP(hwq->prod, hwq); in __send_message()
171 dev_err(&pdev->dev, in __send_message()
175 /* Copy a segment of the req cmd to the cmdq */ in __send_message()
179 size -= min_t(u32, size, sizeof(*cmdqe)); in __send_message()
180 hwq->prod++; in __send_message()
182 cmdq->seq_num++; in __send_message()
184 cmdq_prod = hwq->prod; in __send_message()
185 if (test_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags)) { in __send_message()
192 clear_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags); in __send_message()
195 /* ring CMDQ DB */ in __send_message()
197 writel(cmdq_prod, cmdq->cmdq_mbox.prod); in __send_message()
198 writel(RCFW_CMDQ_TRIG_VAL, cmdq->cmdq_mbox.db); in __send_message()
200 spin_unlock_irqrestore(&hwq->lock, flags); in __send_message()
216 if (test_bit(ERR_DEVICE_DETACHED, &rcfw->cmdq.flags)) in bnxt_qplib_rcfw_send_message()
220 opcode = req->opcode; in bnxt_qplib_rcfw_send_message()
222 cookie = le16_to_cpu(req->cookie) & RCFW_MAX_COOKIE_VALUE; in bnxt_qplib_rcfw_send_message()
226 if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) { in bnxt_qplib_rcfw_send_message()
228 dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x send failed\n", in bnxt_qplib_rcfw_send_message()
234 } while (retry_cnt--); in bnxt_qplib_rcfw_send_message()
242 dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x timedout (%d)msec\n", in bnxt_qplib_rcfw_send_message()
244 set_bit(FIRMWARE_TIMED_OUT, &rcfw->cmdq.flags); in bnxt_qplib_rcfw_send_message()
248 if (evnt->status) { in bnxt_qplib_rcfw_send_message()
250 dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x status %#x\n", in bnxt_qplib_rcfw_send_message()
251 cookie, opcode, evnt->status); in bnxt_qplib_rcfw_send_message()
252 rc = -EFAULT; in bnxt_qplib_rcfw_send_message()
263 switch (func_event->event) { in bnxt_qplib_process_func_event()
294 return -EINVAL; in bnxt_qplib_process_func_event()
297 rc = rcfw->creq.aeq_handler(rcfw, (void *)func_event, NULL); in bnxt_qplib_process_func_event()
305 struct bnxt_qplib_hwq *hwq = &rcfw->cmdq.hwq; in bnxt_qplib_process_qp_event()
316 pdev = rcfw->pdev; in bnxt_qplib_process_qp_event()
317 switch (qp_event->event) { in bnxt_qplib_process_qp_event()
320 qp_id = le32_to_cpu(err_event->xid); in bnxt_qplib_process_qp_event()
322 qp = rcfw->qp_tbl[tbl_indx].qp_handle; in bnxt_qplib_process_qp_event()
323 dev_dbg(&pdev->dev, "Received QP error notification\n"); in bnxt_qplib_process_qp_event()
324 dev_dbg(&pdev->dev, in bnxt_qplib_process_qp_event()
326 qp_id, err_event->req_err_state_reason, in bnxt_qplib_process_qp_event()
327 err_event->res_err_state_reason); in bnxt_qplib_process_qp_event()
331 rc = rcfw->creq.aeq_handler(rcfw, qp_event, qp); in bnxt_qplib_process_qp_event()
336 * cmdq->lock needs to be acquired to synchronie in bnxt_qplib_process_qp_event()
338 * is always called with creq->lock held. Using in bnxt_qplib_process_qp_event()
343 spin_lock_irqsave_nested(&hwq->lock, flags, in bnxt_qplib_process_qp_event()
345 cookie = le16_to_cpu(qp_event->cookie); in bnxt_qplib_process_qp_event()
346 mcookie = qp_event->cookie; in bnxt_qplib_process_qp_event()
349 cbit = cookie % rcfw->cmdq_depth; in bnxt_qplib_process_qp_event()
350 crsqe = &rcfw->crsqe_tbl[cbit]; in bnxt_qplib_process_qp_event()
351 if (crsqe->resp && in bnxt_qplib_process_qp_event()
352 crsqe->resp->cookie == mcookie) { in bnxt_qplib_process_qp_event()
353 memcpy(crsqe->resp, qp_event, sizeof(*qp_event)); in bnxt_qplib_process_qp_event()
354 crsqe->resp = NULL; in bnxt_qplib_process_qp_event()
356 if (crsqe->resp && crsqe->resp->cookie) in bnxt_qplib_process_qp_event()
357 dev_err(&pdev->dev, in bnxt_qplib_process_qp_event()
359 crsqe->resp ? "mismatch" : "collision", in bnxt_qplib_process_qp_event()
360 crsqe->resp ? crsqe->resp->cookie : 0, in bnxt_qplib_process_qp_event()
363 if (!test_and_clear_bit(cbit, rcfw->cmdq.cmdq_bitmap)) in bnxt_qplib_process_qp_event()
364 dev_warn(&pdev->dev, in bnxt_qplib_process_qp_event()
366 hwq->cons += crsqe->req_size; in bnxt_qplib_process_qp_event()
367 crsqe->req_size = 0; in bnxt_qplib_process_qp_event()
370 wake_up(&rcfw->cmdq.waitq); in bnxt_qplib_process_qp_event()
371 spin_unlock_irqrestore(&hwq->lock, flags); in bnxt_qplib_process_qp_event()
376 /* SP - CREQ Completion handlers */
380 struct bnxt_qplib_creq_ctx *creq = &rcfw->creq; in bnxt_qplib_service_creq()
382 struct bnxt_qplib_hwq *hwq = &creq->hwq; in bnxt_qplib_service_creq()
388 spin_lock_irqsave(&hwq->lock, flags); in bnxt_qplib_service_creq()
389 raw_cons = hwq->cons; in bnxt_qplib_service_creq()
393 if (!CREQ_CMP_VALID(creqe, raw_cons, hwq->max_elements)) in bnxt_qplib_service_creq()
400 type = creqe->type & CREQ_BASE_TYPE_MASK; in bnxt_qplib_service_creq()
405 creq->stats.creq_qp_event_processed++; in bnxt_qplib_service_creq()
410 creq->stats.creq_func_event_processed++; in bnxt_qplib_service_creq()
412 dev_warn(&rcfw->pdev->dev, in bnxt_qplib_service_creq()
417 dev_warn(&rcfw->pdev->dev, in bnxt_qplib_service_creq()
423 budget--; in bnxt_qplib_service_creq()
426 if (hwq->cons != raw_cons) { in bnxt_qplib_service_creq()
427 hwq->cons = raw_cons; in bnxt_qplib_service_creq()
428 bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, in bnxt_qplib_service_creq()
429 rcfw->res->cctx, true); in bnxt_qplib_service_creq()
431 spin_unlock_irqrestore(&hwq->lock, flags); in bnxt_qplib_service_creq()
441 creq = &rcfw->creq; in bnxt_qplib_creq_irq()
442 hwq = &creq->hwq; in bnxt_qplib_creq_irq()
444 sw_cons = HWQ_CMP(hwq->cons, hwq); in bnxt_qplib_creq_irq()
447 tasklet_schedule(&creq->creq_tasklet); in bnxt_qplib_creq_irq()
466 clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags); in bnxt_qplib_deinit_rcfw()
480 /* Supply (log-base-2-of-host-page-size - base-page-shift) in bnxt_qplib_init_rcfw()
483 req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT - in bnxt_qplib_init_rcfw()
494 if (bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx)) in bnxt_qplib_init_rcfw()
497 lvl = ctx->qpc_tbl.level; in bnxt_qplib_init_rcfw()
498 pgsz = bnxt_qplib_base_pg_size(&ctx->qpc_tbl); in bnxt_qplib_init_rcfw()
501 lvl = ctx->mrw_tbl.level; in bnxt_qplib_init_rcfw()
502 pgsz = bnxt_qplib_base_pg_size(&ctx->mrw_tbl); in bnxt_qplib_init_rcfw()
505 lvl = ctx->srqc_tbl.level; in bnxt_qplib_init_rcfw()
506 pgsz = bnxt_qplib_base_pg_size(&ctx->srqc_tbl); in bnxt_qplib_init_rcfw()
509 lvl = ctx->cq_tbl.level; in bnxt_qplib_init_rcfw()
510 pgsz = bnxt_qplib_base_pg_size(&ctx->cq_tbl); in bnxt_qplib_init_rcfw()
513 lvl = ctx->tim_tbl.level; in bnxt_qplib_init_rcfw()
514 pgsz = bnxt_qplib_base_pg_size(&ctx->tim_tbl); in bnxt_qplib_init_rcfw()
517 lvl = ctx->tqm_ctx.pde.level; in bnxt_qplib_init_rcfw()
518 pgsz = bnxt_qplib_base_pg_size(&ctx->tqm_ctx.pde); in bnxt_qplib_init_rcfw()
522 cpu_to_le64(ctx->qpc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]); in bnxt_qplib_init_rcfw()
524 cpu_to_le64(ctx->mrw_tbl.pbl[PBL_LVL_0].pg_map_arr[0]); in bnxt_qplib_init_rcfw()
526 cpu_to_le64(ctx->srqc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]); in bnxt_qplib_init_rcfw()
528 cpu_to_le64(ctx->cq_tbl.pbl[PBL_LVL_0].pg_map_arr[0]); in bnxt_qplib_init_rcfw()
530 cpu_to_le64(ctx->tim_tbl.pbl[PBL_LVL_0].pg_map_arr[0]); in bnxt_qplib_init_rcfw()
532 cpu_to_le64(ctx->tqm_ctx.pde.pbl[PBL_LVL_0].pg_map_arr[0]); in bnxt_qplib_init_rcfw()
534 req.number_of_qp = cpu_to_le32(ctx->qpc_tbl.max_elements); in bnxt_qplib_init_rcfw()
535 req.number_of_mrw = cpu_to_le32(ctx->mrw_tbl.max_elements); in bnxt_qplib_init_rcfw()
536 req.number_of_srq = cpu_to_le32(ctx->srqc_tbl.max_elements); in bnxt_qplib_init_rcfw()
537 req.number_of_cq = cpu_to_le32(ctx->cq_tbl.max_elements); in bnxt_qplib_init_rcfw()
540 req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf); in bnxt_qplib_init_rcfw()
541 req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf); in bnxt_qplib_init_rcfw()
542 req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf); in bnxt_qplib_init_rcfw()
543 req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf); in bnxt_qplib_init_rcfw()
544 req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf); in bnxt_qplib_init_rcfw()
547 req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id); in bnxt_qplib_init_rcfw()
552 set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags); in bnxt_qplib_init_rcfw()
558 bitmap_free(rcfw->cmdq.cmdq_bitmap); in bnxt_qplib_free_rcfw_channel()
559 kfree(rcfw->qp_tbl); in bnxt_qplib_free_rcfw_channel()
560 kfree(rcfw->crsqe_tbl); in bnxt_qplib_free_rcfw_channel()
561 bnxt_qplib_free_hwq(rcfw->res, &rcfw->cmdq.hwq); in bnxt_qplib_free_rcfw_channel()
562 bnxt_qplib_free_hwq(rcfw->res, &rcfw->creq.hwq); in bnxt_qplib_free_rcfw_channel()
563 rcfw->pdev = NULL; in bnxt_qplib_free_rcfw_channel()
573 struct bnxt_qplib_cmdq_ctx *cmdq; in bnxt_qplib_alloc_rcfw_channel() local
576 rcfw->pdev = res->pdev; in bnxt_qplib_alloc_rcfw_channel()
577 cmdq = &rcfw->cmdq; in bnxt_qplib_alloc_rcfw_channel()
578 creq = &rcfw->creq; in bnxt_qplib_alloc_rcfw_channel()
579 rcfw->res = res; in bnxt_qplib_alloc_rcfw_channel()
585 hwq_attr.res = rcfw->res; in bnxt_qplib_alloc_rcfw_channel()
590 if (bnxt_qplib_alloc_init_hwq(&creq->hwq, &hwq_attr)) { in bnxt_qplib_alloc_rcfw_channel()
591 dev_err(&rcfw->pdev->dev, in bnxt_qplib_alloc_rcfw_channel()
595 if (ctx->hwrm_intf_ver < HWRM_VERSION_RCFW_CMDQ_DEPTH_CHECK) in bnxt_qplib_alloc_rcfw_channel()
596 rcfw->cmdq_depth = BNXT_QPLIB_CMDQE_MAX_CNT_256; in bnxt_qplib_alloc_rcfw_channel()
598 rcfw->cmdq_depth = BNXT_QPLIB_CMDQE_MAX_CNT_8192; in bnxt_qplib_alloc_rcfw_channel()
600 sginfo.pgsize = bnxt_qplib_cmdqe_page_size(rcfw->cmdq_depth); in bnxt_qplib_alloc_rcfw_channel()
601 hwq_attr.depth = rcfw->cmdq_depth; in bnxt_qplib_alloc_rcfw_channel()
604 if (bnxt_qplib_alloc_init_hwq(&cmdq->hwq, &hwq_attr)) { in bnxt_qplib_alloc_rcfw_channel()
605 dev_err(&rcfw->pdev->dev, in bnxt_qplib_alloc_rcfw_channel()
606 "HW channel CMDQ allocation failed\n"); in bnxt_qplib_alloc_rcfw_channel()
610 rcfw->crsqe_tbl = kcalloc(cmdq->hwq.max_elements, in bnxt_qplib_alloc_rcfw_channel()
611 sizeof(*rcfw->crsqe_tbl), GFP_KERNEL); in bnxt_qplib_alloc_rcfw_channel()
612 if (!rcfw->crsqe_tbl) in bnxt_qplib_alloc_rcfw_channel()
615 cmdq->cmdq_bitmap = bitmap_zalloc(rcfw->cmdq_depth, GFP_KERNEL); in bnxt_qplib_alloc_rcfw_channel()
616 if (!cmdq->cmdq_bitmap) in bnxt_qplib_alloc_rcfw_channel()
620 rcfw->qp_tbl_size = qp_tbl_sz + 1; in bnxt_qplib_alloc_rcfw_channel()
621 rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node), in bnxt_qplib_alloc_rcfw_channel()
623 if (!rcfw->qp_tbl) in bnxt_qplib_alloc_rcfw_channel()
630 return -ENOMEM; in bnxt_qplib_alloc_rcfw_channel()
637 creq = &rcfw->creq; in bnxt_qplib_rcfw_stop_irq()
638 tasklet_disable(&creq->creq_tasklet); in bnxt_qplib_rcfw_stop_irq()
640 bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, rcfw->res->cctx, false); in bnxt_qplib_rcfw_stop_irq()
641 /* Sync with last running IRQ-handler */ in bnxt_qplib_rcfw_stop_irq()
642 synchronize_irq(creq->msix_vec); in bnxt_qplib_rcfw_stop_irq()
644 tasklet_kill(&creq->creq_tasklet); in bnxt_qplib_rcfw_stop_irq()
646 if (creq->requested) { in bnxt_qplib_rcfw_stop_irq()
647 free_irq(creq->msix_vec, rcfw); in bnxt_qplib_rcfw_stop_irq()
648 creq->requested = false; in bnxt_qplib_rcfw_stop_irq()
655 struct bnxt_qplib_cmdq_ctx *cmdq; in bnxt_qplib_disable_rcfw_channel() local
658 creq = &rcfw->creq; in bnxt_qplib_disable_rcfw_channel()
659 cmdq = &rcfw->cmdq; in bnxt_qplib_disable_rcfw_channel()
663 iounmap(cmdq->cmdq_mbox.reg.bar_reg); in bnxt_qplib_disable_rcfw_channel()
664 iounmap(creq->creq_db.reg.bar_reg); in bnxt_qplib_disable_rcfw_channel()
666 indx = find_first_bit(cmdq->cmdq_bitmap, rcfw->cmdq_depth); in bnxt_qplib_disable_rcfw_channel()
667 if (indx != rcfw->cmdq_depth) in bnxt_qplib_disable_rcfw_channel()
668 dev_err(&rcfw->pdev->dev, in bnxt_qplib_disable_rcfw_channel()
669 "disabling RCFW with pending cmd-bit %lx\n", indx); in bnxt_qplib_disable_rcfw_channel()
671 cmdq->cmdq_mbox.reg.bar_reg = NULL; in bnxt_qplib_disable_rcfw_channel()
672 creq->creq_db.reg.bar_reg = NULL; in bnxt_qplib_disable_rcfw_channel()
673 creq->aeq_handler = NULL; in bnxt_qplib_disable_rcfw_channel()
674 creq->msix_vec = 0; in bnxt_qplib_disable_rcfw_channel()
683 creq = &rcfw->creq; in bnxt_qplib_rcfw_start_irq()
685 if (creq->requested) in bnxt_qplib_rcfw_start_irq()
686 return -EFAULT; in bnxt_qplib_rcfw_start_irq()
688 creq->msix_vec = msix_vector; in bnxt_qplib_rcfw_start_irq()
690 tasklet_setup(&creq->creq_tasklet, bnxt_qplib_service_creq); in bnxt_qplib_rcfw_start_irq()
692 tasklet_enable(&creq->creq_tasklet); in bnxt_qplib_rcfw_start_irq()
693 rc = request_irq(creq->msix_vec, bnxt_qplib_creq_irq, 0, in bnxt_qplib_rcfw_start_irq()
697 creq->requested = true; in bnxt_qplib_rcfw_start_irq()
699 bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, rcfw->res->cctx, true); in bnxt_qplib_rcfw_start_irq()
712 pdev = rcfw->pdev; in bnxt_qplib_map_cmdq_mbox()
713 mbox = &rcfw->cmdq.cmdq_mbox; in bnxt_qplib_map_cmdq_mbox()
715 mbox->reg.bar_id = RCFW_COMM_PCI_BAR_REGION; in bnxt_qplib_map_cmdq_mbox()
716 mbox->reg.len = RCFW_COMM_SIZE; in bnxt_qplib_map_cmdq_mbox()
717 mbox->reg.bar_base = pci_resource_start(pdev, mbox->reg.bar_id); in bnxt_qplib_map_cmdq_mbox()
718 if (!mbox->reg.bar_base) { in bnxt_qplib_map_cmdq_mbox()
719 dev_err(&pdev->dev, in bnxt_qplib_map_cmdq_mbox()
720 "QPLIB: CMDQ BAR region %d resc start is 0!\n", in bnxt_qplib_map_cmdq_mbox()
721 mbox->reg.bar_id); in bnxt_qplib_map_cmdq_mbox()
722 return -ENOMEM; in bnxt_qplib_map_cmdq_mbox()
725 bar_reg = mbox->reg.bar_base + RCFW_COMM_BASE_OFFSET; in bnxt_qplib_map_cmdq_mbox()
726 mbox->reg.len = RCFW_COMM_SIZE; in bnxt_qplib_map_cmdq_mbox()
727 mbox->reg.bar_reg = ioremap(bar_reg, mbox->reg.len); in bnxt_qplib_map_cmdq_mbox()
728 if (!mbox->reg.bar_reg) { in bnxt_qplib_map_cmdq_mbox()
729 dev_err(&pdev->dev, in bnxt_qplib_map_cmdq_mbox()
730 "QPLIB: CMDQ BAR region %d mapping failed\n", in bnxt_qplib_map_cmdq_mbox()
731 mbox->reg.bar_id); in bnxt_qplib_map_cmdq_mbox()
732 return -ENOMEM; in bnxt_qplib_map_cmdq_mbox()
737 mbox->prod = (void __iomem *)(mbox->reg.bar_reg + prod_offt); in bnxt_qplib_map_cmdq_mbox()
738 mbox->db = (void __iomem *)(mbox->reg.bar_reg + RCFW_COMM_TRIG_OFFSET); in bnxt_qplib_map_cmdq_mbox()
748 pdev = rcfw->pdev; in bnxt_qplib_map_creq_db()
749 creq_db = &rcfw->creq.creq_db; in bnxt_qplib_map_creq_db()
751 creq_db->reg.bar_id = RCFW_COMM_CONS_PCI_BAR_REGION; in bnxt_qplib_map_creq_db()
752 creq_db->reg.bar_base = pci_resource_start(pdev, creq_db->reg.bar_id); in bnxt_qplib_map_creq_db()
753 if (!creq_db->reg.bar_id) in bnxt_qplib_map_creq_db()
754 dev_err(&pdev->dev, in bnxt_qplib_map_creq_db()
756 creq_db->reg.bar_id); in bnxt_qplib_map_creq_db()
758 bar_reg = creq_db->reg.bar_base + reg_offt; in bnxt_qplib_map_creq_db()
760 creq_db->reg.len = 8; in bnxt_qplib_map_creq_db()
761 creq_db->reg.bar_reg = ioremap(bar_reg, creq_db->reg.len); in bnxt_qplib_map_creq_db()
762 if (!creq_db->reg.bar_reg) { in bnxt_qplib_map_creq_db()
763 dev_err(&pdev->dev, in bnxt_qplib_map_creq_db()
765 creq_db->reg.bar_id); in bnxt_qplib_map_creq_db()
766 return -ENOMEM; in bnxt_qplib_map_creq_db()
768 creq_db->dbinfo.db = creq_db->reg.bar_reg; in bnxt_qplib_map_creq_db()
769 creq_db->dbinfo.hwq = &rcfw->creq.hwq; in bnxt_qplib_map_creq_db()
770 creq_db->dbinfo.xid = rcfw->creq.ring_id; in bnxt_qplib_map_creq_db()
776 struct bnxt_qplib_cmdq_ctx *cmdq; in bnxt_qplib_start_rcfw() local
781 cmdq = &rcfw->cmdq; in bnxt_qplib_start_rcfw()
782 creq = &rcfw->creq; in bnxt_qplib_start_rcfw()
783 mbox = &cmdq->cmdq_mbox; in bnxt_qplib_start_rcfw()
785 init.cmdq_pbl = cpu_to_le64(cmdq->hwq.pbl[PBL_LVL_0].pg_map_arr[0]); in bnxt_qplib_start_rcfw()
787 cpu_to_le16(((rcfw->cmdq_depth << in bnxt_qplib_start_rcfw()
790 ((cmdq->hwq.level << in bnxt_qplib_start_rcfw()
793 init.creq_ring_id = cpu_to_le16(creq->ring_id); in bnxt_qplib_start_rcfw()
795 __iowrite32_copy(mbox->reg.bar_reg, &init, sizeof(init) / 4); in bnxt_qplib_start_rcfw()
803 struct bnxt_qplib_cmdq_ctx *cmdq; in bnxt_qplib_enable_rcfw_channel() local
807 cmdq = &rcfw->cmdq; in bnxt_qplib_enable_rcfw_channel()
808 creq = &rcfw->creq; in bnxt_qplib_enable_rcfw_channel()
812 cmdq->seq_num = 0; in bnxt_qplib_enable_rcfw_channel()
813 set_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags); in bnxt_qplib_enable_rcfw_channel()
814 init_waitqueue_head(&cmdq->waitq); in bnxt_qplib_enable_rcfw_channel()
816 creq->stats.creq_qp_event_processed = 0; in bnxt_qplib_enable_rcfw_channel()
817 creq->stats.creq_func_event_processed = 0; in bnxt_qplib_enable_rcfw_channel()
818 creq->aeq_handler = aeq_handler; in bnxt_qplib_enable_rcfw_channel()
830 dev_err(&rcfw->pdev->dev, in bnxt_qplib_enable_rcfw_channel()
851 sbuf->size = size; in bnxt_qplib_rcfw_alloc_sbuf()
852 sbuf->sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf->size, in bnxt_qplib_rcfw_alloc_sbuf()
853 &sbuf->dma_addr, GFP_KERNEL); in bnxt_qplib_rcfw_alloc_sbuf()
854 if (!sbuf->sb) in bnxt_qplib_rcfw_alloc_sbuf()
866 if (sbuf->sb) in bnxt_qplib_rcfw_free_sbuf()
867 dma_free_coherent(&rcfw->pdev->dev, sbuf->size, in bnxt_qplib_rcfw_free_sbuf()
868 sbuf->sb, sbuf->dma_addr); in bnxt_qplib_rcfw_free_sbuf()