Lines Matching full:channel

23 	struct xen_snd_front_evtchnl *channel = dev_id;  in evtchnl_interrupt_req()  local
24 struct xen_snd_front_info *front_info = channel->front_info; in evtchnl_interrupt_req()
28 if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED)) in evtchnl_interrupt_req()
31 mutex_lock(&channel->ring_io_lock); in evtchnl_interrupt_req()
34 rp = channel->u.req.ring.sring->rsp_prod; in evtchnl_interrupt_req()
43 for (i = channel->u.req.ring.rsp_cons; i != rp; i++) { in evtchnl_interrupt_req()
44 resp = RING_GET_RESPONSE(&channel->u.req.ring, i); in evtchnl_interrupt_req()
45 if (resp->id != channel->evt_id) in evtchnl_interrupt_req()
53 channel->u.req.resp_status = resp->status; in evtchnl_interrupt_req()
54 complete(&channel->u.req.completion); in evtchnl_interrupt_req()
57 channel->u.req.resp_status = resp->status; in evtchnl_interrupt_req()
58 channel->u.req.resp.hw_param = in evtchnl_interrupt_req()
60 complete(&channel->u.req.completion); in evtchnl_interrupt_req()
71 channel->u.req.ring.rsp_cons = i; in evtchnl_interrupt_req()
72 if (i != channel->u.req.ring.req_prod_pvt) { in evtchnl_interrupt_req()
75 RING_FINAL_CHECK_FOR_RESPONSES(&channel->u.req.ring, in evtchnl_interrupt_req()
80 channel->u.req.ring.sring->rsp_event = i + 1; in evtchnl_interrupt_req()
83 mutex_unlock(&channel->ring_io_lock); in evtchnl_interrupt_req()
89 struct xen_snd_front_evtchnl *channel = dev_id; in evtchnl_interrupt_evt() local
90 struct xensnd_event_page *page = channel->u.evt.page; in evtchnl_interrupt_evt()
93 if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED)) in evtchnl_interrupt_evt()
96 mutex_lock(&channel->ring_io_lock); in evtchnl_interrupt_evt()
113 if (unlikely(event->id != channel->evt_id++)) in evtchnl_interrupt_evt()
118 xen_snd_front_alsa_handle_cur_pos(channel, in evtchnl_interrupt_evt()
129 mutex_unlock(&channel->ring_io_lock); in evtchnl_interrupt_evt()
133 void xen_snd_front_evtchnl_flush(struct xen_snd_front_evtchnl *channel) in xen_snd_front_evtchnl_flush() argument
137 channel->u.req.ring.req_prod_pvt++; in xen_snd_front_evtchnl_flush()
138 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&channel->u.req.ring, notify); in xen_snd_front_evtchnl_flush()
140 notify_remote_via_irq(channel->irq); in xen_snd_front_evtchnl_flush()
144 struct xen_snd_front_evtchnl *channel) in evtchnl_free() argument
148 if (channel->type == EVTCHNL_TYPE_REQ) in evtchnl_free()
149 page = channel->u.req.ring.sring; in evtchnl_free()
150 else if (channel->type == EVTCHNL_TYPE_EVT) in evtchnl_free()
151 page = channel->u.evt.page; in evtchnl_free()
156 channel->state = EVTCHNL_STATE_DISCONNECTED; in evtchnl_free()
157 if (channel->type == EVTCHNL_TYPE_REQ) { in evtchnl_free()
159 channel->u.req.resp_status = -EIO; in evtchnl_free()
160 complete_all(&channel->u.req.completion); in evtchnl_free()
163 if (channel->irq) in evtchnl_free()
164 unbind_from_irqhandler(channel->irq, channel); in evtchnl_free()
166 if (channel->port) in evtchnl_free()
167 xenbus_free_evtchn(front_info->xb_dev, channel->port); in evtchnl_free()
170 xenbus_teardown_ring(&page, 1, &channel->gref); in evtchnl_free()
172 memset(channel, 0, sizeof(*channel)); in evtchnl_free()
192 struct xen_snd_front_evtchnl *channel, in evtchnl_alloc() argument
201 memset(channel, 0, sizeof(*channel)); in evtchnl_alloc()
202 channel->type = type; in evtchnl_alloc()
203 channel->index = index; in evtchnl_alloc()
204 channel->front_info = front_info; in evtchnl_alloc()
205 channel->state = EVTCHNL_STATE_DISCONNECTED; in evtchnl_alloc()
206 ret = xenbus_setup_ring(xb_dev, GFP_KERNEL, &page, 1, &channel->gref); in evtchnl_alloc()
219 mutex_init(&channel->ring_io_lock); in evtchnl_alloc()
224 init_completion(&channel->u.req.completion); in evtchnl_alloc()
225 mutex_init(&channel->u.req.req_io_lock); in evtchnl_alloc()
226 XEN_FRONT_RING_INIT(&channel->u.req.ring, sring, XEN_PAGE_SIZE); in evtchnl_alloc()
230 channel->u.evt.page = page; in evtchnl_alloc()
234 ret = xenbus_alloc_evtchn(xb_dev, &channel->port); in evtchnl_alloc()
238 ret = bind_evtchn_to_irq(channel->port); in evtchnl_alloc()
242 front_info->xb_dev->otherend_id, channel->port, ret); in evtchnl_alloc()
246 channel->irq = ret; in evtchnl_alloc()
248 ret = request_threaded_irq(channel->irq, NULL, handler, in evtchnl_alloc()
249 IRQF_ONESHOT, handler_name, channel); in evtchnl_alloc()
252 channel->irq, ret); in evtchnl_alloc()
293 dev_err(dev, "Error allocating control channel\n"); in xen_snd_front_evtchnl_create_all()
301 dev_err(dev, "Error allocating in-event channel\n"); in xen_snd_front_evtchnl_create_all()
313 dev_err(dev, "Error allocating control channel\n"); in xen_snd_front_evtchnl_create_all()
321 dev_err(dev, "Error allocating in-event channel\n"); in xen_snd_front_evtchnl_create_all()
336 struct xen_snd_front_evtchnl *channel, in evtchnl_publish() argument
340 struct xenbus_device *xb_dev = channel->front_info->xb_dev; in evtchnl_publish()
343 /* Write control channel ring reference. */ in evtchnl_publish()
344 ret = xenbus_printf(xbt, path, node_ring, "%u", channel->gref); in evtchnl_publish()
350 /* Write event channel ring reference. */ in evtchnl_publish()
351 ret = xenbus_printf(xbt, path, node_chnl, "%u", channel->port); in evtchnl_publish()
353 dev_err(&xb_dev->dev, "Error writing event channel: %d\n", ret); in evtchnl_publish()