Lines Matching refs:cq

29 static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs,  in __ib_process_cq()  argument
39 while ((n = ib_poll_cq(cq, min_t(u32, batch, in __ib_process_cq()
45 wc->wr_cqe->done(cq, wc); in __ib_process_cq()
73 int ib_process_cq_direct(struct ib_cq *cq, int budget) in ib_process_cq_direct() argument
77 return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT); in ib_process_cq_direct()
81 static void ib_cq_completion_direct(struct ib_cq *cq, void *private) in ib_cq_completion_direct() argument
83 WARN_ONCE(1, "got unsolicited completion for CQ 0x%p\n", cq); in ib_cq_completion_direct()
88 struct ib_cq *cq = container_of(iop, struct ib_cq, iop); in ib_poll_handler() local
91 completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH); in ib_poll_handler()
93 irq_poll_complete(&cq->iop); in ib_poll_handler()
94 if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) in ib_poll_handler()
95 irq_poll_sched(&cq->iop); in ib_poll_handler()
101 static void ib_cq_completion_softirq(struct ib_cq *cq, void *private) in ib_cq_completion_softirq() argument
103 irq_poll_sched(&cq->iop); in ib_cq_completion_softirq()
108 struct ib_cq *cq = container_of(work, struct ib_cq, work); in ib_cq_poll_work() local
111 completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc, in ib_cq_poll_work()
114 ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) in ib_cq_poll_work()
115 queue_work(ib_comp_wq, &cq->work); in ib_cq_poll_work()
118 static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private) in ib_cq_completion_workqueue() argument
120 queue_work(ib_comp_wq, &cq->work); in ib_cq_completion_workqueue()
145 struct ib_cq *cq; in __ib_alloc_cq() local
148 cq = dev->create_cq(dev, &cq_attr, NULL, NULL); in __ib_alloc_cq()
149 if (IS_ERR(cq)) in __ib_alloc_cq()
150 return cq; in __ib_alloc_cq()
152 cq->device = dev; in __ib_alloc_cq()
153 cq->uobject = NULL; in __ib_alloc_cq()
154 cq->event_handler = NULL; in __ib_alloc_cq()
155 cq->cq_context = private; in __ib_alloc_cq()
156 cq->poll_ctx = poll_ctx; in __ib_alloc_cq()
157 atomic_set(&cq->usecnt, 0); in __ib_alloc_cq()
159 cq->wc = kmalloc_array(IB_POLL_BATCH, sizeof(*cq->wc), GFP_KERNEL); in __ib_alloc_cq()
160 if (!cq->wc) in __ib_alloc_cq()
163 cq->res.type = RDMA_RESTRACK_CQ; in __ib_alloc_cq()
164 cq->res.kern_name = caller; in __ib_alloc_cq()
165 rdma_restrack_add(&cq->res); in __ib_alloc_cq()
167 switch (cq->poll_ctx) { in __ib_alloc_cq()
169 cq->comp_handler = ib_cq_completion_direct; in __ib_alloc_cq()
172 cq->comp_handler = ib_cq_completion_softirq; in __ib_alloc_cq()
174 irq_poll_init(&cq->iop, IB_POLL_BUDGET_IRQ, ib_poll_handler); in __ib_alloc_cq()
175 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); in __ib_alloc_cq()
178 cq->comp_handler = ib_cq_completion_workqueue; in __ib_alloc_cq()
179 INIT_WORK(&cq->work, ib_cq_poll_work); in __ib_alloc_cq()
180 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); in __ib_alloc_cq()
187 return cq; in __ib_alloc_cq()
190 kfree(cq->wc); in __ib_alloc_cq()
191 rdma_restrack_del(&cq->res); in __ib_alloc_cq()
193 cq->device->destroy_cq(cq); in __ib_alloc_cq()
202 void ib_free_cq(struct ib_cq *cq) in ib_free_cq() argument
206 if (WARN_ON_ONCE(atomic_read(&cq->usecnt))) in ib_free_cq()
209 switch (cq->poll_ctx) { in ib_free_cq()
213 irq_poll_disable(&cq->iop); in ib_free_cq()
216 cancel_work_sync(&cq->work); in ib_free_cq()
222 kfree(cq->wc); in ib_free_cq()
223 rdma_restrack_del(&cq->res); in ib_free_cq()
224 ret = cq->device->destroy_cq(cq); in ib_free_cq()