Home
last modified time | relevance | path

Searched refs:wqe_index (Results 1 – 19 of 19) sorted by relevance

/Linux-v5.4/drivers/infiniband/sw/rxe/
Drxe_req.c71 unsigned int wqe_index; in req_retry() local
76 qp->req.wqe_index = consumer_index(qp->sq.queue); in req_retry()
80 for (wqe_index = consumer_index(qp->sq.queue); in req_retry()
81 wqe_index != producer_index(qp->sq.queue); in req_retry()
82 wqe_index = next_index(qp->sq.queue, wqe_index)) { in req_retry()
83 wqe = addr_from_index(qp->sq.queue, wqe_index); in req_retry()
150 if (wqe && ((qp->req.wqe_index != in req_next_wqe()
174 if (qp->req.wqe_index == producer_index(qp->sq.queue)) in req_next_wqe()
177 wqe = addr_from_index(qp->sq.queue, qp->req.wqe_index); in req_next_wqe()
185 (qp->req.wqe_index != consumer_index(qp->sq.queue)))) { in req_next_wqe()
[all …]
Drxe_verbs.h144 int wqe_index; member
Drxe_qp.c266 qp->req.wqe_index = producer_index(qp->sq.queue); in rxe_qp_init_req()
/Linux-v5.4/drivers/infiniband/hw/mthca/
Dmthca_cq.c373 struct mthca_qp *qp, int wqe_index, int is_send, in handle_error_cqe() argument
461 mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe); in handle_error_cqe()
486 int wqe_index; in mthca_poll_one() local
535 wqe_index = ((be32_to_cpu(cqe->wqe) - (*cur_qp)->send_wqe_offset) in mthca_poll_one()
537 entry->wr_id = (*cur_qp)->wrid[wqe_index + in mthca_poll_one()
543 wqe_index = wqe >> srq->wqe_shift; in mthca_poll_one()
544 entry->wr_id = srq->wrid[wqe_index]; in mthca_poll_one()
550 wqe_index = wqe >> wq->wqe_shift; in mthca_poll_one()
556 if (unlikely(wqe_index < 0)) in mthca_poll_one()
557 wqe_index = wq->max - 1; in mthca_poll_one()
[all …]
/Linux-v5.4/include/linux/mlx4/
Dcq.h57 __be16 wqe_index; member
66 __be16 wqe_index; member
82 __be16 wqe_index; member
/Linux-v5.4/drivers/net/ethernet/ibm/ehea/
Dehea_qmr.h308 int *wqe_index) in ehea_get_swqe() argument
313 *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_SQ); in ehea_get_swqe()
325 static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index) in ehea_poll_rq1() argument
329 *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_RQ1); in ehea_poll_rq1()
Dehea_main.c589 int arr_len, int wqe_index) in get_skb_by_index_ll() argument
595 x = wqe_index + 1; in get_skb_by_index_ll()
608 skb = skb_array[wqe_index]; in get_skb_by_index_ll()
609 skb_array[wqe_index] = NULL; in get_skb_by_index_ll()
665 int wqe_index, last_wqe_index, rq, port_reset; in ehea_proc_rwqes() local
670 cqe = ehea_poll_rq1(qp, &wqe_index); in ehea_proc_rwqes()
678 last_wqe_index = wqe_index; in ehea_proc_rwqes()
685 wqe_index); in ehea_proc_rwqes()
737 cqe = ehea_poll_rq1(qp, &wqe_index); in ehea_proc_rwqes()
880 int wqe_index; in ehea_poll() local
[all …]
/Linux-v5.4/drivers/infiniband/hw/mlx5/
Dodp.c61 u16 wqe_index; member
1030 u16 wqe_index = pfault->wqe.wqe_index; in mlx5_ib_mr_initiator_pfault_handler() local
1044 wqe_index, qpn); in mlx5_ib_mr_initiator_pfault_handler()
1169 u16 wqe_index = pfault->wqe.wqe_index; in mlx5_ib_mr_wqe_pfault_handler() local
1200 ret = mlx5_ib_read_user_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE, in mlx5_ib_mr_wqe_pfault_handler()
1207 ret = mlx5_ib_read_user_wqe_rq(qp, wqe_index, wqe, PAGE_SIZE, in mlx5_ib_mr_wqe_pfault_handler()
1216 ret = mlx5_ib_read_user_wqe_srq(srq, wqe_index, wqe, PAGE_SIZE, in mlx5_ib_mr_wqe_pfault_handler()
1244 ret, wqe_index, pfault->token); in mlx5_ib_mr_wqe_pfault_handler()
1418 pfault->wqe.wqe_index = in mlx5_ib_eq_pf_process()
1419 be16_to_cpu(pf_eqe->wqe.wqe_index); in mlx5_ib_eq_pf_process()
[all …]
Dsrq.c406 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index) in mlx5_ib_free_srq_wqe() argument
414 next->next_wqe_index = cpu_to_be16(wqe_index); in mlx5_ib_free_srq_wqe()
415 srq->tail = wqe_index; in mlx5_ib_free_srq_wqe()
Dmlx5_ib.h1104 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
1133 int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
1135 int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
1137 int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index,
Dqp.c135 int wqe_index, in mlx5_ib_read_user_wqe_common() argument
142 size_t offset = wq_offset + ((wqe_index % wq_wqe_cnt) << wq_wqe_shift); in mlx5_ib_read_user_wqe_common()
164 int wqe_index, in mlx5_ib_read_user_wqe_sq() argument
186 wqe_index, in mlx5_ib_read_user_wqe_sq()
230 int wqe_index, in mlx5_ib_read_user_wqe_rq() argument
244 wqe_index, in mlx5_ib_read_user_wqe_rq()
258 int wqe_index, in mlx5_ib_read_user_wqe_srq() argument
270 wqe_index, in mlx5_ib_read_user_wqe_srq()
/Linux-v5.4/drivers/infiniband/hw/mlx4/
Dsrq.c286 void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index) in mlx4_ib_free_srq_wqe() argument
294 next->next_wqe_index = cpu_to_be16(wqe_index); in mlx4_ib_free_srq_wqe()
295 srq->tail = wqe_index; in mlx4_ib_free_srq_wqe()
Dcq.c517 be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index), in mlx4_ib_handle_error_cqe()
739 wqe_ctr = be16_to_cpu(cqe->wqe_index); in mlx4_ib_poll_one()
746 wqe_ctr = be16_to_cpu(cqe->wqe_index); in mlx4_ib_poll_one()
751 wqe_ctr = be16_to_cpu(cqe->wqe_index); in mlx4_ib_poll_one()
948 mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index)); in __mlx4_ib_cq_clean()
Dmlx4_ib.h767 void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index);
/Linux-v5.4/include/linux/mlx5/
Ddevice.h633 __be16 wqe_index; member
Dmlx5_ifc.h2478 u8 wqe_index[0x10]; member
/Linux-v5.4/drivers/net/ethernet/mellanox/mlx4/
Den_tx.c440 new_index = be16_to_cpu(cqe->wqe_index) & size_mask; in mlx4_en_process_tx_cq()
Den_netdev.c1729 cq->buf->wqe_index = cpu_to_be16(0xffff); in mlx4_en_start_port()
/Linux-v5.4/drivers/infiniband/hw/hns/
Dhns_roce_hw_v2.c2473 static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index) in hns_roce_free_srq_wqe() argument
2478 bitmap_clear(srq->idx_que.bitmap, wqe_index, 1); in hns_roce_free_srq_wqe()
2495 int wqe_index; in __hns_roce_v2_cq_clean() local
2515 wqe_index = roce_get_field(cqe->byte_4, in __hns_roce_v2_cq_clean()
2518 hns_roce_free_srq_wqe(srq, wqe_index); in __hns_roce_v2_cq_clean()