Lines Matching refs:inbox
692 struct mlx4_cmd_mailbox *inbox) in update_pkey_index() argument
694 u8 sched = *(u8 *)(inbox->buf + 64); in update_pkey_index()
695 u8 orig_index = *(u8 *)(inbox->buf + 35); in update_pkey_index()
703 *(u8 *)(inbox->buf + 35) = new_index; in update_pkey_index()
706 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox, in update_gid() argument
709 struct mlx4_qp_context *qp_ctx = inbox->buf + 8; in update_gid()
710 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf); in update_gid()
750 struct mlx4_cmd_mailbox *inbox, in update_vport_qp_param() argument
753 struct mlx4_qp_context *qpc = inbox->buf + 8; in update_vport_qp_param()
779 *(__be32 *)inbox->buf = in update_vport_qp_param()
780 cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) | in update_vport_qp_param()
2296 struct mlx4_cmd_mailbox *inbox, in mlx4_ALLOC_RES_wrapper() argument
2610 struct mlx4_cmd_mailbox *inbox, in mlx4_FREE_RES_wrapper() argument
2756 struct mlx4_cmd_mailbox *inbox, in mlx4_SW2HW_MPT_wrapper() argument
2764 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz; in mlx4_SW2HW_MPT_wrapper()
2776 if (!mr_is_region(inbox->buf)) { in mlx4_SW2HW_MPT_wrapper()
2782 pd = mr_get_pd(inbox->buf); in mlx4_SW2HW_MPT_wrapper()
2789 if (mr_is_fmr(inbox->buf)) { in mlx4_SW2HW_MPT_wrapper()
2791 if (mr_is_bind_enabled(inbox->buf)) { in mlx4_SW2HW_MPT_wrapper()
2796 if (!mr_is_region(inbox->buf)) { in mlx4_SW2HW_MPT_wrapper()
2802 phys = mr_phys_mpt(inbox->buf); in mlx4_SW2HW_MPT_wrapper()
2809 mr_get_mtt_size(inbox->buf), mtt); in mlx4_SW2HW_MPT_wrapper()
2816 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_SW2HW_MPT_wrapper()
2839 struct mlx4_cmd_mailbox *inbox, in mlx4_HW2SW_MPT_wrapper() argument
2853 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_HW2SW_MPT_wrapper()
2871 struct mlx4_cmd_mailbox *inbox, in mlx4_QUERY_MPT_wrapper() argument
2907 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_QUERY_MPT_wrapper()
2949 struct mlx4_cmd_mailbox *inbox);
2953 struct mlx4_cmd_mailbox *inbox, in mlx4_RST2INIT_QP_wrapper() argument
2961 struct mlx4_qp_context *qpc = inbox->buf + 8; in mlx4_RST2INIT_QP_wrapper()
2973 err = adjust_qp_sched_queue(dev, slave, qpc, inbox); in mlx4_RST2INIT_QP_wrapper()
3016 update_pkey_index(dev, slave, inbox); in mlx4_RST2INIT_QP_wrapper()
3017 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_RST2INIT_QP_wrapper()
3094 struct mlx4_cmd_mailbox *inbox, in mlx4_SW2HW_EQ_wrapper() argument
3101 struct mlx4_eq_context *eqc = inbox->buf; in mlx4_SW2HW_EQ_wrapper()
3122 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_SW2HW_EQ_wrapper()
3143 struct mlx4_cmd_mailbox *inbox, in mlx4_CONFIG_DEV_wrapper() argument
3153 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_CONFIG_DEV_wrapper()
3184 struct mlx4_cmd_mailbox *inbox, in verify_qp_parameters() argument
3194 qp_ctx = inbox->buf + 8; in verify_qp_parameters()
3196 optpar = be32_to_cpu(*(__be32 *) inbox->buf); in verify_qp_parameters()
3264 struct mlx4_cmd_mailbox *inbox, in mlx4_WRITE_MTT_wrapper() argument
3269 __be64 *page_list = inbox->buf; in mlx4_WRITE_MTT_wrapper()
3302 struct mlx4_cmd_mailbox *inbox, in mlx4_HW2SW_EQ_wrapper() argument
3319 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_HW2SW_EQ_wrapper()
3408 struct mlx4_cmd_mailbox *inbox, in mlx4_QUERY_EQ_wrapper() argument
3426 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_QUERY_EQ_wrapper()
3435 struct mlx4_cmd_mailbox *inbox, in mlx4_SW2HW_CQ_wrapper() argument
3441 struct mlx4_cq_context *cqc = inbox->buf; in mlx4_SW2HW_CQ_wrapper()
3455 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_SW2HW_CQ_wrapper()
3473 struct mlx4_cmd_mailbox *inbox, in mlx4_HW2SW_CQ_wrapper() argument
3484 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_HW2SW_CQ_wrapper()
3498 struct mlx4_cmd_mailbox *inbox, in mlx4_QUERY_CQ_wrapper() argument
3513 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_QUERY_CQ_wrapper()
3522 struct mlx4_cmd_mailbox *inbox, in handle_resize() argument
3530 struct mlx4_cq_context *cqc = inbox->buf; in handle_resize()
3549 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in handle_resize()
3570 struct mlx4_cmd_mailbox *inbox, in mlx4_MODIFY_CQ_wrapper() argument
3586 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq); in mlx4_MODIFY_CQ_wrapper()
3590 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_MODIFY_CQ_wrapper()
3611 struct mlx4_cmd_mailbox *inbox, in mlx4_SW2HW_SRQ_wrapper() argument
3619 struct mlx4_srq_context *srqc = inbox->buf; in mlx4_SW2HW_SRQ_wrapper()
3636 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_SW2HW_SRQ_wrapper()
3656 struct mlx4_cmd_mailbox *inbox, in mlx4_HW2SW_SRQ_wrapper() argument
3667 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_HW2SW_SRQ_wrapper()
3685 struct mlx4_cmd_mailbox *inbox, in mlx4_QUERY_SRQ_wrapper() argument
3700 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_QUERY_SRQ_wrapper()
3708 struct mlx4_cmd_mailbox *inbox, in mlx4_ARM_SRQ_wrapper() argument
3725 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_ARM_SRQ_wrapper()
3733 struct mlx4_cmd_mailbox *inbox, in mlx4_GEN_QP_wrapper() argument
3749 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_GEN_QP_wrapper()
3757 struct mlx4_cmd_mailbox *inbox, in mlx4_INIT2INIT_QP_wrapper() argument
3761 struct mlx4_qp_context *context = inbox->buf + 8; in mlx4_INIT2INIT_QP_wrapper()
3763 update_pkey_index(dev, slave, inbox); in mlx4_INIT2INIT_QP_wrapper()
3764 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_INIT2INIT_QP_wrapper()
3769 struct mlx4_cmd_mailbox *inbox) in adjust_qp_sched_queue() argument
3771 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf); in adjust_qp_sched_queue()
3802 struct mlx4_cmd_mailbox *inbox) in roce_verify_mac() argument
3807 u8 sched = *(u8 *)(inbox->buf + 64); in roce_verify_mac()
3821 struct mlx4_cmd_mailbox *inbox, in mlx4_INIT2RTR_QP_wrapper() argument
3826 struct mlx4_qp_context *qpc = inbox->buf + 8; in mlx4_INIT2RTR_QP_wrapper()
3836 err = adjust_qp_sched_queue(dev, slave, qpc, inbox); in mlx4_INIT2RTR_QP_wrapper()
3839 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave); in mlx4_INIT2RTR_QP_wrapper()
3843 if (roce_verify_mac(dev, slave, qpc, inbox)) in mlx4_INIT2RTR_QP_wrapper()
3846 update_pkey_index(dev, slave, inbox); in mlx4_INIT2RTR_QP_wrapper()
3847 update_gid(dev, inbox, (u8)slave); in mlx4_INIT2RTR_QP_wrapper()
3859 err = update_vport_qp_param(dev, inbox, slave, qpn); in mlx4_INIT2RTR_QP_wrapper()
3863 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_INIT2RTR_QP_wrapper()
3883 struct mlx4_cmd_mailbox *inbox, in mlx4_RTR2RTS_QP_wrapper() argument
3888 struct mlx4_qp_context *context = inbox->buf + 8; in mlx4_RTR2RTS_QP_wrapper()
3890 err = adjust_qp_sched_queue(dev, slave, context, inbox); in mlx4_RTR2RTS_QP_wrapper()
3893 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave); in mlx4_RTR2RTS_QP_wrapper()
3897 update_pkey_index(dev, slave, inbox); in mlx4_RTR2RTS_QP_wrapper()
3898 update_gid(dev, inbox, (u8)slave); in mlx4_RTR2RTS_QP_wrapper()
3900 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_RTR2RTS_QP_wrapper()
3905 struct mlx4_cmd_mailbox *inbox, in mlx4_RTS2RTS_QP_wrapper() argument
3910 struct mlx4_qp_context *context = inbox->buf + 8; in mlx4_RTS2RTS_QP_wrapper()
3912 err = adjust_qp_sched_queue(dev, slave, context, inbox); in mlx4_RTS2RTS_QP_wrapper()
3915 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave); in mlx4_RTS2RTS_QP_wrapper()
3919 update_pkey_index(dev, slave, inbox); in mlx4_RTS2RTS_QP_wrapper()
3920 update_gid(dev, inbox, (u8)slave); in mlx4_RTS2RTS_QP_wrapper()
3922 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_RTS2RTS_QP_wrapper()
3928 struct mlx4_cmd_mailbox *inbox, in mlx4_SQERR2RTS_QP_wrapper() argument
3932 struct mlx4_qp_context *context = inbox->buf + 8; in mlx4_SQERR2RTS_QP_wrapper()
3933 int err = adjust_qp_sched_queue(dev, slave, context, inbox); in mlx4_SQERR2RTS_QP_wrapper()
3937 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_SQERR2RTS_QP_wrapper()
3942 struct mlx4_cmd_mailbox *inbox, in mlx4_SQD2SQD_QP_wrapper() argument
3947 struct mlx4_qp_context *context = inbox->buf + 8; in mlx4_SQD2SQD_QP_wrapper()
3949 err = adjust_qp_sched_queue(dev, slave, context, inbox); in mlx4_SQD2SQD_QP_wrapper()
3952 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave); in mlx4_SQD2SQD_QP_wrapper()
3957 update_gid(dev, inbox, (u8)slave); in mlx4_SQD2SQD_QP_wrapper()
3958 update_pkey_index(dev, slave, inbox); in mlx4_SQD2SQD_QP_wrapper()
3959 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_SQD2SQD_QP_wrapper()
3964 struct mlx4_cmd_mailbox *inbox, in mlx4_SQD2RTS_QP_wrapper() argument
3969 struct mlx4_qp_context *context = inbox->buf + 8; in mlx4_SQD2RTS_QP_wrapper()
3971 err = adjust_qp_sched_queue(dev, slave, context, inbox); in mlx4_SQD2RTS_QP_wrapper()
3974 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave); in mlx4_SQD2RTS_QP_wrapper()
3979 update_gid(dev, inbox, (u8)slave); in mlx4_SQD2RTS_QP_wrapper()
3980 update_pkey_index(dev, slave, inbox); in mlx4_SQD2RTS_QP_wrapper()
3981 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_SQD2RTS_QP_wrapper()
3986 struct mlx4_cmd_mailbox *inbox, in mlx4_2RST_QP_wrapper() argument
3997 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_2RST_QP_wrapper()
4139 struct mlx4_cmd_mailbox *inbox, in mlx4_QP_ATTACH_wrapper() argument
4144 u8 *gid = inbox->buf; in mlx4_QP_ATTACH_wrapper()
4225 struct mlx4_cmd_mailbox *inbox, in add_eth_header() argument
4237 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; in add_eth_header()
4285 struct mlx4_cmd_mailbox *inbox, in mlx4_UPDATE_QP_wrapper() argument
4298 cmd = (struct mlx4_update_qp_context *)inbox->buf; in mlx4_UPDATE_QP_wrapper()
4335 err = mlx4_cmd(dev, inbox->dma, in mlx4_UPDATE_QP_wrapper()
4367 struct mlx4_cmd_mailbox *inbox, in mlx4_QP_FLOW_STEERING_ATTACH_wrapper() argument
4388 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4418 if (add_eth_header(dev, slave, inbox, rlist, header_id)) { in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4431 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param, in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4449 mbox_size = qp_attach_mbox_size(inbox->buf); in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4457 memcpy(rrule->mirr_mbox, inbox->buf, mbox_size); in mlx4_QP_FLOW_STEERING_ATTACH_wrapper()
4501 struct mlx4_cmd_mailbox *inbox, in mlx4_QP_FLOW_STEERING_DETACH_wrapper() argument
4565 struct mlx4_cmd_mailbox *inbox, in mlx4_QUERY_IF_STAT_wrapper() argument
4576 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); in mlx4_QUERY_IF_STAT_wrapper()