Home
last modified time | relevance | path

Searched refs:cq (Results 1 – 25 of 250) sorted by relevance

12345678910

/Linux-v4.19/drivers/net/ethernet/intel/ice/
Dice_controlq.c14 struct ice_ctl_q_info *cq = &hw->adminq; in ice_adminq_init_regs() local
16 cq->sq.head = PF_FW_ATQH; in ice_adminq_init_regs()
17 cq->sq.tail = PF_FW_ATQT; in ice_adminq_init_regs()
18 cq->sq.len = PF_FW_ATQLEN; in ice_adminq_init_regs()
19 cq->sq.bah = PF_FW_ATQBAH; in ice_adminq_init_regs()
20 cq->sq.bal = PF_FW_ATQBAL; in ice_adminq_init_regs()
21 cq->sq.len_mask = PF_FW_ATQLEN_ATQLEN_M; in ice_adminq_init_regs()
22 cq->sq.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M; in ice_adminq_init_regs()
23 cq->sq.head_mask = PF_FW_ATQH_ATQH_M; in ice_adminq_init_regs()
25 cq->rq.head = PF_FW_ARQH; in ice_adminq_init_regs()
[all …]
/Linux-v4.19/drivers/net/ethernet/mellanox/mlx4/
Den_cq.c40 static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event) in mlx4_en_cq_event() argument
52 struct mlx4_en_cq *cq; in mlx4_en_create_cq() local
55 cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, node); in mlx4_en_create_cq()
56 if (!cq) { in mlx4_en_create_cq()
57 cq = kzalloc(sizeof(*cq), GFP_KERNEL); in mlx4_en_create_cq()
58 if (!cq) { in mlx4_en_create_cq()
64 cq->size = entries; in mlx4_en_create_cq()
65 cq->buf_size = cq->size * mdev->dev->caps.cqe_size; in mlx4_en_create_cq()
67 cq->ring = ring; in mlx4_en_create_cq()
68 cq->type = mode; in mlx4_en_create_cq()
[all …]
Dcq.c82 static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq) in mlx4_add_cq_to_tasklet() argument
84 struct mlx4_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv; in mlx4_add_cq_to_tasklet()
94 if (list_empty_careful(&cq->tasklet_ctx.list)) { in mlx4_add_cq_to_tasklet()
95 refcount_inc(&cq->refcount); in mlx4_add_cq_to_tasklet()
97 list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list); in mlx4_add_cq_to_tasklet()
106 struct mlx4_cq *cq; in mlx4_cq_completion() local
109 cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree, in mlx4_cq_completion()
113 if (!cq) { in mlx4_cq_completion()
121 ++cq->arm_sn; in mlx4_cq_completion()
123 cq->comp(cq); in mlx4_cq_completion()
[all …]
/Linux-v4.19/drivers/infiniband/core/
Dcq.c29 static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs, in __ib_process_cq() argument
39 while ((n = ib_poll_cq(cq, min_t(u32, batch, in __ib_process_cq()
45 wc->wr_cqe->done(cq, wc); in __ib_process_cq()
73 int ib_process_cq_direct(struct ib_cq *cq, int budget) in ib_process_cq_direct() argument
77 return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT); in ib_process_cq_direct()
81 static void ib_cq_completion_direct(struct ib_cq *cq, void *private) in ib_cq_completion_direct() argument
83 WARN_ONCE(1, "got unsolicited completion for CQ 0x%p\n", cq); in ib_cq_completion_direct()
88 struct ib_cq *cq = container_of(iop, struct ib_cq, iop); in ib_poll_handler() local
91 completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH); in ib_poll_handler()
93 irq_poll_complete(&cq->iop); in ib_poll_handler()
[all …]
Duverbs_std_types_cq.c40 struct ib_cq *cq = uobject->object; in uverbs_free_cq() local
41 struct ib_uverbs_event_queue *ev_queue = cq->cq_context; in uverbs_free_cq()
46 ret = ib_destroy_cq(cq); in uverbs_free_cq()
71 struct ib_cq *cq; in UVERBS_HANDLER() local
117 cq = ib_dev->create_cq(ib_dev, &attr, obj->uobject.context, &uhw); in UVERBS_HANDLER()
118 if (IS_ERR(cq)) { in UVERBS_HANDLER()
119 ret = PTR_ERR(cq); in UVERBS_HANDLER()
123 cq->device = ib_dev; in UVERBS_HANDLER()
124 cq->uobject = &obj->uobject; in UVERBS_HANDLER()
125 cq->comp_handler = ib_uverbs_comp_handler; in UVERBS_HANDLER()
[all …]
/Linux-v4.19/drivers/infiniband/sw/rxe/
Drxe_cq.c38 int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq, in rxe_cq_chk_attr() argument
54 if (cq) { in rxe_cq_chk_attr()
55 count = queue_count(cq->queue); in rxe_cq_chk_attr()
71 struct rxe_cq *cq = (struct rxe_cq *)data; in rxe_send_complete() local
74 spin_lock_irqsave(&cq->cq_lock, flags); in rxe_send_complete()
75 if (cq->is_dying) { in rxe_send_complete()
76 spin_unlock_irqrestore(&cq->cq_lock, flags); in rxe_send_complete()
79 spin_unlock_irqrestore(&cq->cq_lock, flags); in rxe_send_complete()
81 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in rxe_send_complete()
84 int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, in rxe_cq_from_init() argument
[all …]
/Linux-v4.19/drivers/infiniband/hw/mthca/
Dmthca_cq.c169 static inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry) in get_cqe() argument
171 return get_cqe_from_buf(&cq->buf, entry); in get_cqe()
179 static inline struct mthca_cqe *next_cqe_sw(struct mthca_cq *cq) in next_cqe_sw() argument
181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe)); in next_cqe_sw()
204 static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq, in update_cons_index() argument
208 *cq->set_ci_db = cpu_to_be32(cq->cons_index); in update_cons_index()
211 mthca_write64(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn, incr - 1, in update_cons_index()
224 struct mthca_cq *cq; in mthca_cq_completion() local
226 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); in mthca_cq_completion()
228 if (!cq) { in mthca_cq_completion()
[all …]
/Linux-v4.19/drivers/infiniband/sw/rdmavt/
Dcq.c64 void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited) in rvt_cq_enter() argument
71 spin_lock_irqsave(&cq->lock, flags); in rvt_cq_enter()
77 wc = cq->queue; in rvt_cq_enter()
79 if (head >= (unsigned)cq->ibcq.cqe) { in rvt_cq_enter()
80 head = cq->ibcq.cqe; in rvt_cq_enter()
87 spin_unlock_irqrestore(&cq->lock, flags); in rvt_cq_enter()
88 if (cq->ibcq.event_handler) { in rvt_cq_enter()
91 ev.device = cq->ibcq.device; in rvt_cq_enter()
92 ev.element.cq = &cq->ibcq; in rvt_cq_enter()
94 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); in rvt_cq_enter()
[all …]
/Linux-v4.19/drivers/infiniband/hw/mlx4/
Dcq.c42 static void mlx4_ib_cq_comp(struct mlx4_cq *cq) in mlx4_ib_cq_comp() argument
44 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; in mlx4_ib_cq_comp()
48 static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type) in mlx4_ib_cq_event() argument
55 "on CQ %06x\n", type, cq->cqn); in mlx4_ib_cq_event()
59 ibcq = &to_mibcq(cq)->ibcq; in mlx4_ib_cq_event()
63 event.element.cq = ibcq; in mlx4_ib_cq_event()
73 static void *get_cqe(struct mlx4_ib_cq *cq, int n) in get_cqe() argument
75 return get_cqe_from_buf(&cq->buf, n); in get_cqe()
78 static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n) in get_sw_cqe() argument
80 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe()
[all …]
/Linux-v4.19/drivers/infiniband/hw/vmw_pvrdma/
Dpvrdma_cq.c66 struct pvrdma_cq *cq = to_vcq(ibcq); in pvrdma_req_notify_cq() local
67 u32 val = cq->cq_handle; in pvrdma_req_notify_cq()
74 spin_lock_irqsave(&cq->cq_lock, flags); in pvrdma_req_notify_cq()
81 has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx, in pvrdma_req_notify_cq()
82 cq->ibcq.cqe, &head); in pvrdma_req_notify_cq()
87 spin_unlock_irqrestore(&cq->cq_lock, flags); in pvrdma_req_notify_cq()
109 struct pvrdma_cq *cq; in pvrdma_create_cq() local
129 cq = kzalloc(sizeof(*cq), GFP_KERNEL); in pvrdma_create_cq()
130 if (!cq) { in pvrdma_create_cq()
135 cq->ibcq.cqe = entries; in pvrdma_create_cq()
[all …]
/Linux-v4.19/drivers/net/ethernet/cisco/enic/
Dvnic_cq.c29 void vnic_cq_free(struct vnic_cq *cq) in vnic_cq_free() argument
31 vnic_dev_free_desc_ring(cq->vdev, &cq->ring); in vnic_cq_free()
33 cq->ctrl = NULL; in vnic_cq_free()
36 int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, in vnic_cq_alloc() argument
41 cq->index = index; in vnic_cq_alloc()
42 cq->vdev = vdev; in vnic_cq_alloc()
44 cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index); in vnic_cq_alloc()
45 if (!cq->ctrl) { in vnic_cq_alloc()
50 err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); in vnic_cq_alloc()
57 void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, in vnic_cq_init() argument
[all …]
Dvnic_cq.h72 static inline unsigned int vnic_cq_service(struct vnic_cq *cq, in vnic_cq_service() argument
83 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in vnic_cq_service()
84 cq->ring.desc_size * cq->to_clean); in vnic_cq_service()
88 while (color != cq->last_color) { in vnic_cq_service()
90 if ((*q_service)(cq->vdev, cq_desc, type, in vnic_cq_service()
94 cq->to_clean++; in vnic_cq_service()
95 if (cq->to_clean == cq->ring.desc_count) { in vnic_cq_service()
96 cq->to_clean = 0; in vnic_cq_service()
97 cq->last_color = cq->last_color ? 0 : 1; in vnic_cq_service()
100 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in vnic_cq_service()
[all …]
/Linux-v4.19/drivers/scsi/snic/
Dvnic_cq.c24 void svnic_cq_free(struct vnic_cq *cq) in svnic_cq_free() argument
26 svnic_dev_free_desc_ring(cq->vdev, &cq->ring); in svnic_cq_free()
28 cq->ctrl = NULL; in svnic_cq_free()
31 int svnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, in svnic_cq_alloc() argument
36 cq->index = index; in svnic_cq_alloc()
37 cq->vdev = vdev; in svnic_cq_alloc()
39 cq->ctrl = svnic_dev_get_res(vdev, RES_TYPE_CQ, index); in svnic_cq_alloc()
40 if (!cq->ctrl) { in svnic_cq_alloc()
46 err = svnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); in svnic_cq_alloc()
53 void svnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, in svnic_cq_init() argument
[all …]
Dvnic_cq_fw.h24 vnic_cq_fw_service(struct vnic_cq *cq, in vnic_cq_fw_service() argument
35 desc = (struct snic_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_fw_service()
36 cq->ring.desc_size * cq->to_clean); in vnic_cq_fw_service()
39 while (color != cq->last_color) { in vnic_cq_fw_service()
41 if ((*q_service)(cq->vdev, cq->index, desc)) in vnic_cq_fw_service()
44 cq->to_clean++; in vnic_cq_fw_service()
45 if (cq->to_clean == cq->ring.desc_count) { in vnic_cq_fw_service()
46 cq->to_clean = 0; in vnic_cq_fw_service()
47 cq->last_color = cq->last_color ? 0 : 1; in vnic_cq_fw_service()
50 desc = (struct snic_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_fw_service()
[all …]
Dvnic_cq.h60 static inline unsigned int svnic_cq_service(struct vnic_cq *cq, in svnic_cq_service() argument
71 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in svnic_cq_service()
72 cq->ring.desc_size * cq->to_clean); in svnic_cq_service()
76 while (color != cq->last_color) { in svnic_cq_service()
78 if ((*q_service)(cq->vdev, cq_desc, type, in svnic_cq_service()
82 cq->to_clean++; in svnic_cq_service()
83 if (cq->to_clean == cq->ring.desc_count) { in svnic_cq_service()
84 cq->to_clean = 0; in svnic_cq_service()
85 cq->last_color = cq->last_color ? 0 : 1; in svnic_cq_service()
88 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in svnic_cq_service()
[all …]
/Linux-v4.19/drivers/scsi/fnic/
Dvnic_cq.c24 void vnic_cq_free(struct vnic_cq *cq) in vnic_cq_free() argument
26 vnic_dev_free_desc_ring(cq->vdev, &cq->ring); in vnic_cq_free()
28 cq->ctrl = NULL; in vnic_cq_free()
31 int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, in vnic_cq_alloc() argument
36 cq->index = index; in vnic_cq_alloc()
37 cq->vdev = vdev; in vnic_cq_alloc()
39 cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index); in vnic_cq_alloc()
40 if (!cq->ctrl) { in vnic_cq_alloc()
45 err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); in vnic_cq_alloc()
52 void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, in vnic_cq_init() argument
[all …]
Dvnic_cq_copy.h24 struct vnic_cq *cq, in vnic_cq_copy_service() argument
35 desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_copy_service()
36 cq->ring.desc_size * cq->to_clean); in vnic_cq_copy_service()
39 while (color != cq->last_color) { in vnic_cq_copy_service()
41 if ((*q_service)(cq->vdev, cq->index, desc)) in vnic_cq_copy_service()
44 cq->to_clean++; in vnic_cq_copy_service()
45 if (cq->to_clean == cq->ring.desc_count) { in vnic_cq_copy_service()
46 cq->to_clean = 0; in vnic_cq_copy_service()
47 cq->last_color = cq->last_color ? 0 : 1; in vnic_cq_copy_service()
50 desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_copy_service()
[all …]
Dvnic_cq.h70 static inline unsigned int vnic_cq_service(struct vnic_cq *cq, in vnic_cq_service() argument
81 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in vnic_cq_service()
82 cq->ring.desc_size * cq->to_clean); in vnic_cq_service()
86 while (color != cq->last_color) { in vnic_cq_service()
88 if ((*q_service)(cq->vdev, cq_desc, type, in vnic_cq_service()
92 cq->to_clean++; in vnic_cq_service()
93 if (cq->to_clean == cq->ring.desc_count) { in vnic_cq_service()
94 cq->to_clean = 0; in vnic_cq_service()
95 cq->last_color = cq->last_color ? 0 : 1; in vnic_cq_service()
98 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in vnic_cq_service()
[all …]
/Linux-v4.19/drivers/infiniband/hw/mlx5/
Dcq.c39 static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq) in mlx5_ib_cq_comp() argument
41 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; in mlx5_ib_cq_comp()
48 struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq); in mlx5_ib_cq_event() local
49 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in mlx5_ib_cq_event()
50 struct ib_cq *ibcq = &cq->ibcq; in mlx5_ib_cq_event()
62 event.element.cq = ibcq; in mlx5_ib_cq_event()
67 static void *get_cqe(struct mlx5_ib_cq *cq, int n) in get_cqe() argument
69 return mlx5_frag_buf_get_wqe(&cq->buf.fbc, n); in get_cqe()
77 static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n) in get_sw_cqe() argument
79 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe()
[all …]
/Linux-v4.19/drivers/net/ethernet/mellanox/mlx5/core/
Dcq.c70 static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq) in mlx5_add_cq_to_tasklet() argument
73 struct mlx5_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv; in mlx5_add_cq_to_tasklet()
81 if (list_empty_careful(&cq->tasklet_ctx.list)) { in mlx5_add_cq_to_tasklet()
82 mlx5_cq_hold(cq); in mlx5_add_cq_to_tasklet()
83 list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list); in mlx5_add_cq_to_tasklet()
88 int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, in mlx5_core_create_cq() argument
108 cq->cqn = MLX5_GET(create_cq_out, out, cqn); in mlx5_core_create_cq()
109 cq->cons_index = 0; in mlx5_core_create_cq()
110 cq->arm_sn = 0; in mlx5_core_create_cq()
111 cq->eq = eq; in mlx5_core_create_cq()
[all …]
/Linux-v4.19/drivers/infiniband/hw/cxgb4/
Dcq.c35 static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, in destroy_cq() argument
55 res->u.cq.restype = FW_RI_RES_TYPE_CQ; in destroy_cq()
56 res->u.cq.op = FW_RI_RES_OP_RESET; in destroy_cq()
57 res->u.cq.iqid = cpu_to_be32(cq->cqid); in destroy_cq()
62 kfree(cq->sw_queue); in destroy_cq()
64 cq->memsize, cq->queue, in destroy_cq()
65 dma_unmap_addr(cq, mapping)); in destroy_cq()
66 c4iw_put_cqid(rdev, cq->cqid, uctx); in destroy_cq()
70 static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, in create_cq() argument
85 cq->cqid = c4iw_get_cqid(rdev, uctx); in create_cq()
[all …]
Dt4.h717 static inline void write_gts(struct t4_cq *cq, u32 val) in write_gts() argument
719 if (cq->bar2_va) in write_gts()
720 writel(val | INGRESSQID_V(cq->bar2_qid), in write_gts()
721 cq->bar2_va + SGE_UDB_GTS); in write_gts()
723 writel(val | INGRESSQID_V(cq->cqid), cq->gts); in write_gts()
726 static inline int t4_clear_cq_armed(struct t4_cq *cq) in t4_clear_cq_armed() argument
728 return test_and_clear_bit(CQ_ARMED, &cq->flags); in t4_clear_cq_armed()
731 static inline int t4_arm_cq(struct t4_cq *cq, int se) in t4_arm_cq() argument
735 set_bit(CQ_ARMED, &cq->flags); in t4_arm_cq()
736 while (cq->cidx_inc > CIDXINC_M) { in t4_arm_cq()
[all …]
Drestrack.c274 static int fill_cq(struct sk_buff *msg, struct t4_cq *cq) in fill_cq() argument
276 if (rdma_nl_put_driver_u32(msg, "cqid", cq->cqid)) in fill_cq()
278 if (rdma_nl_put_driver_u32(msg, "memsize", cq->memsize)) in fill_cq()
280 if (rdma_nl_put_driver_u32(msg, "size", cq->size)) in fill_cq()
282 if (rdma_nl_put_driver_u32(msg, "cidx", cq->cidx)) in fill_cq()
284 if (rdma_nl_put_driver_u32(msg, "cidx_inc", cq->cidx_inc)) in fill_cq()
286 if (rdma_nl_put_driver_u32(msg, "sw_cidx", cq->sw_cidx)) in fill_cq()
288 if (rdma_nl_put_driver_u32(msg, "sw_pidx", cq->sw_pidx)) in fill_cq()
290 if (rdma_nl_put_driver_u32(msg, "sw_in_use", cq->sw_in_use)) in fill_cq()
292 if (rdma_nl_put_driver_u32(msg, "vector", cq->vector)) in fill_cq()
[all …]
/Linux-v4.19/include/linux/mlx5/
Dcq.h141 static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq) in mlx5_cq_set_ci() argument
143 *cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff); in mlx5_cq_set_ci()
151 static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd, in mlx5_cq_arm() argument
159 sn = cq->arm_sn & 3; in mlx5_cq_arm()
162 *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci); in mlx5_cq_arm()
170 doorbell[1] = cpu_to_be32(cq->cqn); in mlx5_cq_arm()
175 static inline void mlx5_cq_hold(struct mlx5_core_cq *cq) in mlx5_cq_hold() argument
177 refcount_inc(&cq->refcount); in mlx5_cq_hold()
180 static inline void mlx5_cq_put(struct mlx5_core_cq *cq) in mlx5_cq_put() argument
182 if (refcount_dec_and_test(&cq->refcount)) in mlx5_cq_put()
[all …]
/Linux-v4.19/drivers/infiniband/hw/cxgb3/
Dcxio_hal.c71 int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq, in cxio_hal_cq_op() argument
79 setup.id = cq->cqid; in cxio_hal_cq_op()
92 if (Q_PTR2IDX((cq->rptr), cq->size_log2) != ret) { in cxio_hal_cq_op()
95 rptr = cq->rptr; in cxio_hal_cq_op()
101 while (Q_PTR2IDX((rptr+1), cq->size_log2) != ret) in cxio_hal_cq_op()
109 cqe = cq->queue + Q_PTR2IDX(rptr, cq->size_log2); in cxio_hal_cq_op()
110 while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) { in cxio_hal_cq_op()
156 int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel) in cxio_create_cq() argument
159 int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe); in cxio_create_cq()
162 cq->cqid = cxio_hal_get_cqid(rdev_p->rscp); in cxio_create_cq()
[all …]

12345678910