Home
last modified time | relevance | path

Searched refs:wqe_size (Results 1 – 25 of 29) sorted by relevance

12

/Linux-v5.4/drivers/net/ethernet/huawei/hinic/
Dhinic_hw_qp.h172 void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size,
176 unsigned int wqe_size, u16 *prod_idx);
178 void hinic_sq_return_wqe(struct hinic_sq *sq, unsigned int wqe_size);
182 unsigned int wqe_size);
186 unsigned int wqe_size, u16 *cons_idx);
190 unsigned int *wqe_size, u16 *cons_idx);
192 void hinic_sq_put_wqe(struct hinic_sq *sq, unsigned int wqe_size);
198 unsigned int wqe_size, u16 *prod_idx);
204 unsigned int wqe_size,
208 unsigned int wqe_size,
[all …]
Dhinic_hw_qp.c634 void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size, in hinic_sq_write_db() argument
640 prod_idx += ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; in hinic_sq_write_db()
656 unsigned int wqe_size, u16 *prod_idx) in hinic_sq_get_wqe() argument
658 struct hinic_hw_wqe *hw_wqe = hinic_get_wqe(sq->wq, wqe_size, in hinic_sq_get_wqe()
672 void hinic_sq_return_wqe(struct hinic_sq *sq, unsigned int wqe_size) in hinic_sq_return_wqe() argument
674 hinic_return_wqe(sq->wq, wqe_size); in hinic_sq_return_wqe()
687 struct sk_buff *skb, unsigned int wqe_size) in hinic_sq_write_wqe() argument
694 hinic_cpu_to_be32(sq_wqe, wqe_size); in hinic_sq_write_wqe()
696 hinic_write_wqe(sq->wq, hw_wqe, wqe_size); in hinic_sq_write_wqe()
711 unsigned int *wqe_size, u16 *cons_idx) in hinic_sq_read_wqebb() argument
[all …]
Dhinic_hw_wq.h96 struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
99 void hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size);
101 void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size);
103 struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size,
109 unsigned int wqe_size);
Dhinic_tx.c469 unsigned int wqe_size; in hinic_xmit_frame() local
501 wqe_size = HINIC_SQ_WQE_SIZE(nr_sges); in hinic_xmit_frame()
503 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); in hinic_xmit_frame()
510 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); in hinic_xmit_frame()
522 wqe_size = 0; in hinic_xmit_frame()
533 hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size); in hinic_xmit_frame()
538 hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0); in hinic_xmit_frame()
543 hinic_sq_return_wqe(txq->sq, wqe_size); in hinic_xmit_frame()
580 unsigned int wqe_size; in free_all_tx_skbs() local
585 while ((sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &ci))) { in free_all_tx_skbs()
[all …]
Dhinic_hw_wq.c742 struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size, in hinic_get_wqe() argument
750 num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) >> wq->wqebb_size_shift; in hinic_get_wqe()
788 void hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size) in hinic_return_wqe() argument
790 int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; in hinic_return_wqe()
802 void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size) in hinic_put_wqe() argument
804 int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) in hinic_put_wqe()
820 struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size, in hinic_read_wqe() argument
823 int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) in hinic_read_wqe()
885 unsigned int wqe_size) in hinic_write_wqe() argument
895 num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; in hinic_write_wqe()
Dhinic_hw_cmdq.c160 unsigned int wqe_size = 0; in cmdq_wqe_size_from_bdlen() local
164 wqe_size = WQE_LCMD_SIZE; in cmdq_wqe_size_from_bdlen()
167 wqe_size = WQE_SCMD_SIZE; in cmdq_wqe_size_from_bdlen()
171 return wqe_size; in cmdq_wqe_size_from_bdlen()
535 unsigned int bufdesc_len, wqe_size; in clear_wqe_complete_bit() local
539 wqe_size = cmdq_wqe_size_from_bdlen(bufdesc_len); in clear_wqe_complete_bit()
540 if (wqe_size == WQE_LCMD_SIZE) { in clear_wqe_complete_bit()
/Linux-v5.4/drivers/infiniband/hw/i40iw/
Di40iw_uk.c60 qp->sq_wrtrk_array[wqe_idx].wqe_size = I40IW_QP_WQE_MIN_SIZE; in i40iw_nop_1()
136 u8 wqe_size, in i40iw_qp_get_next_send_wqe() argument
155 if ((offset + wqe_size) > I40IW_QP_WQE_MAX_SIZE) { in i40iw_qp_get_next_send_wqe()
169 if (((*wqe_idx & 3) == 1) && (wqe_size == I40IW_WQE_SIZE_64)) { in i40iw_qp_get_next_send_wqe()
179 wqe_size / I40IW_QP_WQE_MIN_SIZE, ret_code); in i40iw_qp_get_next_send_wqe()
195 qp->sq_wrtrk_array[*wqe_idx].wqe_size = wqe_size; in i40iw_qp_get_next_send_wqe()
256 u8 wqe_size; in i40iw_rdma_write() local
270 ret_code = i40iw_fragcnt_to_wqesize_sq(op_info->num_lo_sges, &wqe_size); in i40iw_rdma_write()
274 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, total_size, info->wr_id); in i40iw_rdma_write()
324 u8 wqe_size; in i40iw_rdma_read() local
[all …]
Di40iw_user.h328 u8 wqe_size; member
408 u8 wqe_size,
423 enum i40iw_status_code i40iw_fragcnt_to_wqesize_sq(u32 frag_cnt, u8 *wqe_size);
424 enum i40iw_status_code i40iw_fragcnt_to_wqesize_rq(u32 frag_cnt, u8 *wqe_size);
426 u8 *wqe_size);
/Linux-v5.4/drivers/infiniband/hw/qedr/
Dqedr_hsi_rdma.h310 u8 wqe_size; member
338 u8 wqe_size; member
374 u8 wqe_size; member
420 u8 wqe_size; member
475 u8 wqe_size; member
498 u8 wqe_size; member
548 u8 wqe_size; member
602 u8 wqe_size; member
628 u8 wqe_size; member
663 u8 wqe_size; member
[all …]
Dverbs.c2903 struct qedr_qp *qp, u8 *wqe_size, in qedr_prepare_sq_inline_data() argument
2940 (*wqe_size)++; in qedr_prepare_sq_inline_data()
2988 static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size, in qedr_prepare_sq_sges() argument
3003 if (wqe_size) in qedr_prepare_sq_sges()
3004 *wqe_size += wr->num_sge; in qedr_prepare_sq_sges()
3025 return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr, in qedr_prepare_sq_rdma_data()
3029 return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr); in qedr_prepare_sq_rdma_data()
3044 return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr, in qedr_prepare_sq_send_data()
3048 return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr); in qedr_prepare_sq_send_data()
3201 swqe->wqe_size = 2; in __qedr_post_send()
[all …]
Dqedr.h419 u8 wqe_size; member
430 u8 wqe_size; member
/Linux-v5.4/drivers/infiniband/hw/ocrdma/
Docrdma_verbs.c374 dev->attr.wqe_size) : 0; in _ocrdma_alloc_pd()
506 resp.wqe_size = dev->attr.wqe_size; in ocrdma_alloc_ucontext()
508 resp.dpp_wqe_size = dev->attr.wqe_size; in ocrdma_alloc_ucontext()
1935 const struct ib_send_wr *wr, u32 wqe_size) in ocrdma_build_inline_sges() argument
1956 wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES); in ocrdma_build_inline_sges()
1958 wqe_size += sizeof(struct ocrdma_sge); in ocrdma_build_inline_sges()
1963 wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge)); in ocrdma_build_inline_sges()
1965 wqe_size += sizeof(struct ocrdma_sge); in ocrdma_build_inline_sges()
1968 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT); in ocrdma_build_inline_sges()
1977 u32 wqe_size = sizeof(*hdr); in ocrdma_build_send() local
[all …]
Docrdma.h111 u32 wqe_size; member
Docrdma_hw.c1201 attr->wqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs & in ocrdma_get_attr()
1210 attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) + in ocrdma_get_attr()
2199 dev->attr.wqe_size, &hw_pages, &hw_page_size); in ocrdma_set_create_qp_sq_cmd()
2213 qp->sq.entry_size = dev->attr.wqe_size; in ocrdma_set_create_qp_sq_cmd()
2230 cmd->wqe_rqe_size |= (dev->attr.wqe_size << in ocrdma_set_create_qp_sq_cmd()
/Linux-v5.4/drivers/infiniband/sw/rxe/
Drxe_qp.c224 int wqe_size; in rxe_qp_init_req() local
245 wqe_size = max_t(int, sizeof(struct rxe_send_wqe) + in rxe_qp_init_req()
252 wqe_size); in rxe_qp_init_req()
293 int wqe_size; in rxe_qp_init_resp() local
299 wqe_size = rcv_wqe_size(qp->rq.max_sge); in rxe_qp_init_resp()
302 qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size); in rxe_qp_init_resp()
306 wqe_size); in rxe_qp_init_resp()
/Linux-v5.4/include/uapi/rdma/
Docrdma-abi.h55 __u32 wqe_size; member
Dib_user_verbs.h829 __u32 wqe_size; member
848 __u32 wqe_size; member
861 __u32 wqe_size; member
/Linux-v5.4/drivers/infiniband/hw/vmw_pvrdma/
Dpvrdma_qp.c144 qp->rq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_rq_wqe_hdr) + in pvrdma_set_rq_size()
147 qp->npages_recv = (qp->rq.wqe_cnt * qp->rq.wqe_size + PAGE_SIZE - 1) / in pvrdma_set_rq_size()
169 qp->sq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_sq_wqe_hdr) + in pvrdma_set_sq_size()
174 (qp->sq.wqe_cnt * qp->sq.wqe_size + PAGE_SIZE - 1) / in pvrdma_set_sq_size()
584 qp->sq.offset + n * qp->sq.wqe_size); in get_sq_wqe()
590 qp->rq.offset + n * qp->rq.wqe_size); in get_rq_wqe()
Dpvrdma.h155 int wqe_size; member
170 int wqe_size; member
/Linux-v5.4/drivers/infiniband/hw/mlx5/
Dqp.c333 int wqe_size; in set_rq_size() local
357 wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0; in set_rq_size()
358 wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg); in set_rq_size()
359 wqe_size = roundup_pow_of_two(wqe_size); in set_rq_size()
360 wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size; in set_rq_size()
362 qp->rq.wqe_cnt = wq_size / wqe_size; in set_rq_size()
363 if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) { in set_rq_size()
365 wqe_size, in set_rq_size()
370 qp->rq.wqe_shift = ilog2(wqe_size); in set_rq_size()
453 static int get_send_sge(struct ib_qp_init_attr *attr, int wqe_size) in get_send_sge() argument
[all …]
Dodp.c1090 int wqe_size = 1 << srq->msrq.wqe_shift; in mlx5_ib_mr_responder_pfault_handler_srq() local
1092 if (wqe_size > wqe_length) { in mlx5_ib_mr_responder_pfault_handler_srq()
1097 *wqe_end = *wqe + wqe_size; in mlx5_ib_mr_responder_pfault_handler_srq()
1109 int wqe_size = 1 << wq->wqe_shift; in mlx5_ib_mr_responder_pfault_handler_rq() local
1116 if (wqe_size > wqe_length) { in mlx5_ib_mr_responder_pfault_handler_rq()
1121 *wqe_end = wqe + wqe_size; in mlx5_ib_mr_responder_pfault_handler_rq()
/Linux-v5.4/drivers/infiniband/core/
Duverbs_cmd.c2028 wqes = uverbs_request_next_ptr(&iter, cmd.wqe_size * cmd.wr_count); in ib_uverbs_post_send()
2039 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL); in ib_uverbs_post_send()
2053 if (copy_from_user(user_wr, wqes + i * cmd.wqe_size, in ib_uverbs_post_send()
2054 cmd.wqe_size)) { in ib_uverbs_post_send()
2204 u32 wqe_size, u32 sge_count) in ib_uverbs_unmarshall_recv() argument
2214 if (wqe_size < sizeof (struct ib_uverbs_recv_wr)) in ib_uverbs_unmarshall_recv()
2217 wqes = uverbs_request_next_ptr(iter, wqe_size * wr_count); in ib_uverbs_unmarshall_recv()
2228 user_wr = kmalloc(wqe_size, GFP_KERNEL); in ib_uverbs_unmarshall_recv()
2235 if (copy_from_user(user_wr, wqes + i * wqe_size, in ib_uverbs_unmarshall_recv()
2236 wqe_size)) { in ib_uverbs_unmarshall_recv()
[all …]
/Linux-v5.4/drivers/net/ethernet/ibm/ehea/
Dehea_qmr.c358 int nr_pages, int wqe_size, int act_nr_sges, in ehea_qp_alloc_register() argument
365 ret = hw_queue_ctor(hw_queue, nr_pages, EHEA_PAGESIZE, wqe_size); in ehea_qp_alloc_register()
/Linux-v5.4/drivers/infiniband/hw/bnxt_re/
Droce_hsi.h194 u8 wqe_size; member
223 u8 wqe_size; member
280 u8 wqe_size; member
426 u8 wqe_size; member
Dqplib_fp.c694 srqe->wqe_size = wqe->num_sge + in bnxt_qplib_post_srq_recv()
1606 sqe->wqe_size = wqe_size16 + in bnxt_qplib_post_send()
1625 sqe->wqe_size = wqe_size16 + in bnxt_qplib_post_send()
1658 sqe->wqe_size = wqe_size16 + in bnxt_qplib_post_send()
1862 rqe->wqe_size = wqe->num_sge + in bnxt_qplib_post_recv()
1868 rqe->wqe_size++; in bnxt_qplib_post_recv()

12