Lines Matching refs:cb
31 struct mei_cl_cb *cb, *next; in mei_irq_compl_handler() local
34 list_for_each_entry_safe(cb, next, cmpl_list, list) { in mei_irq_compl_handler()
35 cl = cb->cl; in mei_irq_compl_handler()
36 list_del_init(&cb->list); in mei_irq_compl_handler()
39 mei_cl_complete(cl, cb); in mei_irq_compl_handler()
99 struct mei_cl_cb *cb; in mei_cl_irq_read_msg() local
112 cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list); in mei_cl_irq_read_msg()
113 if (!cb) { in mei_cl_irq_read_msg()
118 cb = mei_cl_alloc_cb(cl, mei_cl_mtu(cl), MEI_FOP_READ, cl->fp); in mei_cl_irq_read_msg()
119 if (!cb) in mei_cl_irq_read_msg()
121 list_add_tail(&cb->list, &cl->rd_pending); in mei_cl_irq_read_msg()
137 cb->status = -EPROTO; in mei_cl_irq_read_msg()
146 cb->status = -EPROTO; in mei_cl_irq_read_msg()
151 if (cb->vtag && cb->vtag != vtag_hdr->vtag) { in mei_cl_irq_read_msg()
153 cb->vtag, vtag_hdr->vtag); in mei_cl_irq_read_msg()
154 cb->status = -EPROTO; in mei_cl_irq_read_msg()
157 cb->vtag = vtag_hdr->vtag; in mei_cl_irq_read_msg()
162 cb->status = -ENODEV; in mei_cl_irq_read_msg()
169 buf_sz = length + cb->buf_idx; in mei_cl_irq_read_msg()
171 if (buf_sz < cb->buf_idx) { in mei_cl_irq_read_msg()
173 length, cb->buf_idx); in mei_cl_irq_read_msg()
174 cb->status = -EMSGSIZE; in mei_cl_irq_read_msg()
178 if (cb->buf.size < buf_sz) { in mei_cl_irq_read_msg()
180 cb->buf.size, length, cb->buf_idx); in mei_cl_irq_read_msg()
181 cb->status = -EMSGSIZE; in mei_cl_irq_read_msg()
186 mei_dma_ring_read(dev, cb->buf.data + cb->buf_idx, length); in mei_cl_irq_read_msg()
188 mei_read_slots(dev, cb->buf.data + cb->buf_idx, 0); in mei_cl_irq_read_msg()
190 mei_read_slots(dev, cb->buf.data + cb->buf_idx, length); in mei_cl_irq_read_msg()
193 cb->buf_idx += length; in mei_cl_irq_read_msg()
196 cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx); in mei_cl_irq_read_msg()
197 list_move_tail(&cb->list, cmpl_list); in mei_cl_irq_read_msg()
206 if (cb) in mei_cl_irq_read_msg()
207 list_move_tail(&cb->list, cmpl_list); in mei_cl_irq_read_msg()
221 static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb, in mei_cl_irq_disconnect_rsp() argument
238 list_move_tail(&cb->list, cmpl_list); in mei_cl_irq_disconnect_rsp()
253 static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb, in mei_cl_irq_read() argument
275 cb->buf_idx = 0; in mei_cl_irq_read()
276 list_move_tail(&cb->list, cmpl_list); in mei_cl_irq_read()
283 list_move_tail(&cb->list, &cl->rd_pending); in mei_cl_irq_read()
479 struct mei_cl_cb *cb, *next; in mei_irq_write_handler() local
497 list_for_each_entry_safe(cb, next, &dev->write_waiting_list, list) { in mei_irq_write_handler()
498 cl = cb->cl; in mei_irq_write_handler()
503 list_move_tail(&cb->list, cmpl_list); in mei_irq_write_handler()
508 list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list, list) { in mei_irq_write_handler()
509 cl = cb->cl; in mei_irq_write_handler()
510 switch (cb->fop_type) { in mei_irq_write_handler()
513 ret = mei_cl_irq_disconnect(cl, cb, cmpl_list); in mei_irq_write_handler()
520 ret = mei_cl_irq_read(cl, cb, cmpl_list); in mei_irq_write_handler()
527 ret = mei_cl_irq_connect(cl, cb, cmpl_list); in mei_irq_write_handler()
534 ret = mei_cl_irq_disconnect_rsp(cl, cb, cmpl_list); in mei_irq_write_handler()
541 ret = mei_cl_irq_notify(cl, cb, cmpl_list); in mei_irq_write_handler()
546 ret = mei_cl_irq_dma_map(cl, cb, cmpl_list); in mei_irq_write_handler()
551 ret = mei_cl_irq_dma_unmap(cl, cb, cmpl_list); in mei_irq_write_handler()
562 list_for_each_entry_safe(cb, next, &dev->write_list, list) { in mei_irq_write_handler()
563 cl = cb->cl; in mei_irq_write_handler()
564 ret = mei_cl_irq_write(cl, cb, cmpl_list); in mei_irq_write_handler()