Lines Matching refs:channel
23 struct xen_snd_front_evtchnl *channel = dev_id; in evtchnl_interrupt_req() local
24 struct xen_snd_front_info *front_info = channel->front_info; in evtchnl_interrupt_req()
28 if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED)) in evtchnl_interrupt_req()
31 mutex_lock(&channel->ring_io_lock); in evtchnl_interrupt_req()
34 rp = channel->u.req.ring.sring->rsp_prod; in evtchnl_interrupt_req()
43 for (i = channel->u.req.ring.rsp_cons; i != rp; i++) { in evtchnl_interrupt_req()
44 resp = RING_GET_RESPONSE(&channel->u.req.ring, i); in evtchnl_interrupt_req()
45 if (resp->id != channel->evt_id) in evtchnl_interrupt_req()
57 channel->u.req.resp_status = resp->status; in evtchnl_interrupt_req()
58 complete(&channel->u.req.completion); in evtchnl_interrupt_req()
61 channel->u.req.resp_status = resp->status; in evtchnl_interrupt_req()
62 channel->u.req.resp.hw_param = in evtchnl_interrupt_req()
64 complete(&channel->u.req.completion); in evtchnl_interrupt_req()
75 channel->u.req.ring.rsp_cons = i; in evtchnl_interrupt_req()
76 if (i != channel->u.req.ring.req_prod_pvt) { in evtchnl_interrupt_req()
79 RING_FINAL_CHECK_FOR_RESPONSES(&channel->u.req.ring, in evtchnl_interrupt_req()
84 channel->u.req.ring.sring->rsp_event = i + 1; in evtchnl_interrupt_req()
87 mutex_unlock(&channel->ring_io_lock); in evtchnl_interrupt_req()
93 struct xen_snd_front_evtchnl *channel = dev_id; in evtchnl_interrupt_evt() local
94 struct xensnd_event_page *page = channel->u.evt.page; in evtchnl_interrupt_evt()
97 if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED)) in evtchnl_interrupt_evt()
100 mutex_lock(&channel->ring_io_lock); in evtchnl_interrupt_evt()
117 if (unlikely(event->id != channel->evt_id++)) in evtchnl_interrupt_evt()
122 xen_snd_front_alsa_handle_cur_pos(channel, in evtchnl_interrupt_evt()
133 mutex_unlock(&channel->ring_io_lock); in evtchnl_interrupt_evt()
137 void xen_snd_front_evtchnl_flush(struct xen_snd_front_evtchnl *channel) in xen_snd_front_evtchnl_flush() argument
141 channel->u.req.ring.req_prod_pvt++; in xen_snd_front_evtchnl_flush()
142 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&channel->u.req.ring, notify); in xen_snd_front_evtchnl_flush()
144 notify_remote_via_irq(channel->irq); in xen_snd_front_evtchnl_flush()
148 struct xen_snd_front_evtchnl *channel) in evtchnl_free() argument
152 if (channel->type == EVTCHNL_TYPE_REQ) in evtchnl_free()
153 page = (unsigned long)channel->u.req.ring.sring; in evtchnl_free()
154 else if (channel->type == EVTCHNL_TYPE_EVT) in evtchnl_free()
155 page = (unsigned long)channel->u.evt.page; in evtchnl_free()
160 channel->state = EVTCHNL_STATE_DISCONNECTED; in evtchnl_free()
161 if (channel->type == EVTCHNL_TYPE_REQ) { in evtchnl_free()
163 channel->u.req.resp_status = -EIO; in evtchnl_free()
164 complete_all(&channel->u.req.completion); in evtchnl_free()
167 if (channel->irq) in evtchnl_free()
168 unbind_from_irqhandler(channel->irq, channel); in evtchnl_free()
170 if (channel->port) in evtchnl_free()
171 xenbus_free_evtchn(front_info->xb_dev, channel->port); in evtchnl_free()
174 if (channel->gref != GRANT_INVALID_REF) in evtchnl_free()
175 gnttab_end_foreign_access(channel->gref, 0, page); in evtchnl_free()
179 memset(channel, 0, sizeof(*channel)); in evtchnl_free()
199 struct xen_snd_front_evtchnl *channel, in evtchnl_alloc() argument
209 memset(channel, 0, sizeof(*channel)); in evtchnl_alloc()
210 channel->type = type; in evtchnl_alloc()
211 channel->index = index; in evtchnl_alloc()
212 channel->front_info = front_info; in evtchnl_alloc()
213 channel->state = EVTCHNL_STATE_DISCONNECTED; in evtchnl_alloc()
214 channel->gref = GRANT_INVALID_REF; in evtchnl_alloc()
230 mutex_init(&channel->ring_io_lock); in evtchnl_alloc()
235 init_completion(&channel->u.req.completion); in evtchnl_alloc()
236 mutex_init(&channel->u.req.req_io_lock); in evtchnl_alloc()
238 FRONT_RING_INIT(&channel->u.req.ring, sring, XEN_PAGE_SIZE); in evtchnl_alloc()
242 channel->u.req.ring.sring = NULL; in evtchnl_alloc()
253 channel->u.evt.page = (struct xensnd_event_page *)page; in evtchnl_alloc()
258 channel->gref = gref; in evtchnl_alloc()
260 ret = xenbus_alloc_evtchn(xb_dev, &channel->port); in evtchnl_alloc()
264 ret = bind_evtchn_to_irq(channel->port); in evtchnl_alloc()
268 front_info->xb_dev->otherend_id, channel->port, ret); in evtchnl_alloc()
272 channel->irq = ret; in evtchnl_alloc()
274 ret = request_threaded_irq(channel->irq, NULL, handler, in evtchnl_alloc()
275 IRQF_ONESHOT, handler_name, channel); in evtchnl_alloc()
278 channel->irq, ret); in evtchnl_alloc()
364 struct xen_snd_front_evtchnl *channel, in evtchnl_publish() argument
368 struct xenbus_device *xb_dev = channel->front_info->xb_dev; in evtchnl_publish()
372 ret = xenbus_printf(xbt, path, node_ring, "%u", channel->gref); in evtchnl_publish()
379 ret = xenbus_printf(xbt, path, node_chnl, "%u", channel->port); in evtchnl_publish()