Lines Matching +full:1 +full:q
50 static int queue_list_add(struct snd_seq_queue *q) in queue_list_add() argument
58 queue_list[i] = q; in queue_list_add()
59 q->queue = i; in queue_list_add()
66 return -1; in queue_list_add()
71 struct snd_seq_queue *q; in queue_list_remove() local
75 q = queue_list[id]; in queue_list_remove()
76 if (q) { in queue_list_remove()
77 spin_lock(&q->owner_lock); in queue_list_remove()
78 if (q->owner == client) { in queue_list_remove()
80 q->klocked = 1; in queue_list_remove()
81 spin_unlock(&q->owner_lock); in queue_list_remove()
85 return q; in queue_list_remove()
87 spin_unlock(&q->owner_lock); in queue_list_remove()
98 struct snd_seq_queue *q; in queue_new() local
100 q = kzalloc(sizeof(*q), GFP_KERNEL); in queue_new()
101 if (!q) in queue_new()
104 spin_lock_init(&q->owner_lock); in queue_new()
105 spin_lock_init(&q->check_lock); in queue_new()
106 mutex_init(&q->timer_mutex); in queue_new()
107 snd_use_lock_init(&q->use_lock); in queue_new()
108 q->queue = -1; in queue_new()
110 q->tickq = snd_seq_prioq_new(); in queue_new()
111 q->timeq = snd_seq_prioq_new(); in queue_new()
112 q->timer = snd_seq_timer_new(); in queue_new()
113 if (q->tickq == NULL || q->timeq == NULL || q->timer == NULL) { in queue_new()
114 snd_seq_prioq_delete(&q->tickq); in queue_new()
115 snd_seq_prioq_delete(&q->timeq); in queue_new()
116 snd_seq_timer_delete(&q->timer); in queue_new()
117 kfree(q); in queue_new()
121 q->owner = owner; in queue_new()
122 q->locked = locked; in queue_new()
123 q->klocked = 0; in queue_new()
125 return q; in queue_new()
129 static void queue_delete(struct snd_seq_queue *q) in queue_delete() argument
132 mutex_lock(&q->timer_mutex); in queue_delete()
133 snd_seq_timer_stop(q->timer); in queue_delete()
134 snd_seq_timer_close(q); in queue_delete()
135 mutex_unlock(&q->timer_mutex); in queue_delete()
137 snd_use_lock_sync(&q->use_lock); in queue_delete()
139 snd_seq_prioq_delete(&q->tickq); in queue_delete()
140 snd_seq_prioq_delete(&q->timeq); in queue_delete()
141 snd_seq_timer_delete(&q->timer); in queue_delete()
143 kfree(q); in queue_delete()
165 * The new queue's use_lock is set to 1. It is the caller's responsibility to
166 * call snd_use_lock_free(&q->use_lock).
170 struct snd_seq_queue *q; in snd_seq_queue_alloc() local
172 q = queue_new(client, locked); in snd_seq_queue_alloc()
173 if (q == NULL) in snd_seq_queue_alloc()
175 q->info_flags = info_flags; in snd_seq_queue_alloc()
176 queue_use(q, client, 1); in snd_seq_queue_alloc()
177 snd_use_lock_use(&q->use_lock); in snd_seq_queue_alloc()
178 if (queue_list_add(q) < 0) { in snd_seq_queue_alloc()
179 snd_use_lock_free(&q->use_lock); in snd_seq_queue_alloc()
180 queue_delete(q); in snd_seq_queue_alloc()
183 return q; in snd_seq_queue_alloc()
189 struct snd_seq_queue *q; in snd_seq_queue_delete() local
193 q = queue_list_remove(queueid, client); in snd_seq_queue_delete()
194 if (q == NULL) in snd_seq_queue_delete()
196 queue_delete(q); in snd_seq_queue_delete()
205 struct snd_seq_queue *q; in queueptr() local
211 q = queue_list[queueid]; in queueptr()
212 if (q) in queueptr()
213 snd_use_lock_use(&q->use_lock); in queueptr()
215 return q; in queueptr()
222 struct snd_seq_queue *q; in snd_seq_queue_find_name() local
225 q = queueptr(i); in snd_seq_queue_find_name()
226 if (q) { in snd_seq_queue_find_name()
227 if (strncmp(q->name, name, sizeof(q->name)) == 0) in snd_seq_queue_find_name()
228 return q; in snd_seq_queue_find_name()
229 queuefree(q); in snd_seq_queue_find_name()
238 void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop) in snd_seq_check_queue() argument
245 if (q == NULL) in snd_seq_check_queue()
249 spin_lock_irqsave(&q->check_lock, flags); in snd_seq_check_queue()
250 if (q->check_blocked) { in snd_seq_check_queue()
251 q->check_again = 1; in snd_seq_check_queue()
252 spin_unlock_irqrestore(&q->check_lock, flags); in snd_seq_check_queue()
255 q->check_blocked = 1; in snd_seq_check_queue()
256 spin_unlock_irqrestore(&q->check_lock, flags); in snd_seq_check_queue()
260 cur_tick = snd_seq_timer_get_cur_tick(q->timer); in snd_seq_check_queue()
262 cell = snd_seq_prioq_cell_out(q->tickq, &cur_tick); in snd_seq_check_queue()
269 cur_time = snd_seq_timer_get_cur_time(q->timer, false); in snd_seq_check_queue()
271 cell = snd_seq_prioq_cell_out(q->timeq, &cur_time); in snd_seq_check_queue()
278 spin_lock_irqsave(&q->check_lock, flags); in snd_seq_check_queue()
279 if (q->check_again) { in snd_seq_check_queue()
280 q->check_again = 0; in snd_seq_check_queue()
281 spin_unlock_irqrestore(&q->check_lock, flags); in snd_seq_check_queue()
284 q->check_blocked = 0; in snd_seq_check_queue()
285 spin_unlock_irqrestore(&q->check_lock, flags); in snd_seq_check_queue()
293 struct snd_seq_queue *q; in snd_seq_enqueue_event() local
298 q = queueptr(dest); in snd_seq_enqueue_event()
299 if (q == NULL) in snd_seq_enqueue_event()
305 cell->event.time.tick += q->timer->tick.cur_tick; in snd_seq_enqueue_event()
310 &q->timer->cur_time); in snd_seq_enqueue_event()
319 err = snd_seq_prioq_cell_in(q->tickq, cell); in snd_seq_enqueue_event()
324 err = snd_seq_prioq_cell_in(q->timeq, cell); in snd_seq_enqueue_event()
329 queuefree(q); /* unlock */ in snd_seq_enqueue_event()
334 snd_seq_check_queue(q, atomic, hop); in snd_seq_enqueue_event()
336 queuefree(q); /* unlock */ in snd_seq_enqueue_event()
344 static inline int check_access(struct snd_seq_queue *q, int client) in check_access() argument
346 return (q->owner == client) || (!q->locked && !q->klocked); in check_access()
352 static int queue_access_lock(struct snd_seq_queue *q, int client) in queue_access_lock() argument
357 spin_lock_irqsave(&q->owner_lock, flags); in queue_access_lock()
358 access_ok = check_access(q, client); in queue_access_lock()
360 q->klocked = 1; in queue_access_lock()
361 spin_unlock_irqrestore(&q->owner_lock, flags); in queue_access_lock()
366 static inline void queue_access_unlock(struct snd_seq_queue *q) in queue_access_unlock() argument
370 spin_lock_irqsave(&q->owner_lock, flags); in queue_access_unlock()
371 q->klocked = 0; in queue_access_unlock()
372 spin_unlock_irqrestore(&q->owner_lock, flags); in queue_access_unlock()
378 struct snd_seq_queue *q = queueptr(queueid); in snd_seq_queue_check_access() local
382 if (! q) in snd_seq_queue_check_access()
384 spin_lock_irqsave(&q->owner_lock, flags); in snd_seq_queue_check_access()
385 access_ok = check_access(q, client); in snd_seq_queue_check_access()
386 spin_unlock_irqrestore(&q->owner_lock, flags); in snd_seq_queue_check_access()
387 queuefree(q); in snd_seq_queue_check_access()
398 struct snd_seq_queue *q = queueptr(queueid); in snd_seq_queue_set_owner() local
401 if (q == NULL) in snd_seq_queue_set_owner()
404 if (! queue_access_lock(q, client)) { in snd_seq_queue_set_owner()
405 queuefree(q); in snd_seq_queue_set_owner()
409 spin_lock_irqsave(&q->owner_lock, flags); in snd_seq_queue_set_owner()
410 q->locked = locked ? 1 : 0; in snd_seq_queue_set_owner()
411 q->owner = client; in snd_seq_queue_set_owner()
412 spin_unlock_irqrestore(&q->owner_lock, flags); in snd_seq_queue_set_owner()
413 queue_access_unlock(q); in snd_seq_queue_set_owner()
414 queuefree(q); in snd_seq_queue_set_owner()
423 * q->use mutex should be down before calling this function to avoid
446 * q->use mutex should be down before calling this function
465 struct snd_seq_queue *q = queueptr(queueid); in snd_seq_queue_timer_set_tempo() local
468 if (q == NULL) in snd_seq_queue_timer_set_tempo()
470 if (! queue_access_lock(q, client)) { in snd_seq_queue_timer_set_tempo()
471 queuefree(q); in snd_seq_queue_timer_set_tempo()
475 result = snd_seq_timer_set_tempo_ppq(q->timer, info->tempo, info->ppq); in snd_seq_queue_timer_set_tempo()
477 result = snd_seq_timer_set_skew(q->timer, info->skew_value, in snd_seq_queue_timer_set_tempo()
479 queue_access_unlock(q); in snd_seq_queue_timer_set_tempo()
480 queuefree(q); in snd_seq_queue_timer_set_tempo()
495 if (use && queue->clients == 1) in queue_use()
524 * return 0 if not used, 1 if used.
528 struct snd_seq_queue *q; in snd_seq_queue_is_used() local
531 q = queueptr(queueid); in snd_seq_queue_is_used()
532 if (q == NULL) in snd_seq_queue_is_used()
534 result = test_bit(client, q->clients_bitmap) ? 1 : 0; in snd_seq_queue_is_used()
535 queuefree(q); in snd_seq_queue_is_used()
549 struct snd_seq_queue *q; in snd_seq_queue_client_leave() local
553 q = queue_list_remove(i, client); in snd_seq_queue_client_leave()
554 if (q) in snd_seq_queue_client_leave()
555 queue_delete(q); in snd_seq_queue_client_leave()
562 q = queueptr(i); in snd_seq_queue_client_leave()
563 if (!q) in snd_seq_queue_client_leave()
565 if (test_bit(client, q->clients_bitmap)) { in snd_seq_queue_client_leave()
566 snd_seq_prioq_leave(q->tickq, client, 0); in snd_seq_queue_client_leave()
567 snd_seq_prioq_leave(q->timeq, client, 0); in snd_seq_queue_client_leave()
568 snd_seq_queue_use(q->queue, client, 0); in snd_seq_queue_client_leave()
570 queuefree(q); in snd_seq_queue_client_leave()
582 struct snd_seq_queue *q; in snd_seq_queue_client_leave_cells() local
585 q = queueptr(i); in snd_seq_queue_client_leave_cells()
586 if (!q) in snd_seq_queue_client_leave_cells()
588 snd_seq_prioq_leave(q->tickq, client, 0); in snd_seq_queue_client_leave_cells()
589 snd_seq_prioq_leave(q->timeq, client, 0); in snd_seq_queue_client_leave_cells()
590 queuefree(q); in snd_seq_queue_client_leave_cells()
598 struct snd_seq_queue *q; in snd_seq_queue_remove_cells() local
601 q = queueptr(i); in snd_seq_queue_remove_cells()
602 if (!q) in snd_seq_queue_remove_cells()
604 if (test_bit(client, q->clients_bitmap) && in snd_seq_queue_remove_cells()
606 q->queue == info->queue)) { in snd_seq_queue_remove_cells()
607 snd_seq_prioq_remove_events(q->tickq, client, info); in snd_seq_queue_remove_cells()
608 snd_seq_prioq_remove_events(q->timeq, client, info); in snd_seq_queue_remove_cells()
610 queuefree(q); in snd_seq_queue_remove_cells()
619 static void queue_broadcast_event(struct snd_seq_queue *q, struct snd_seq_event *ev, in queue_broadcast_event() argument
627 sev.time.tick = q->timer->tick.cur_tick; in queue_broadcast_event()
628 sev.queue = q->queue; in queue_broadcast_event()
629 sev.data.queue.queue = q->queue; in queue_broadcast_event()
642 static void snd_seq_queue_process_event(struct snd_seq_queue *q, in snd_seq_queue_process_event() argument
648 snd_seq_prioq_leave(q->tickq, ev->source.client, 1); in snd_seq_queue_process_event()
649 snd_seq_prioq_leave(q->timeq, ev->source.client, 1); in snd_seq_queue_process_event()
650 if (! snd_seq_timer_start(q->timer)) in snd_seq_queue_process_event()
651 queue_broadcast_event(q, ev, atomic, hop); in snd_seq_queue_process_event()
655 if (! snd_seq_timer_continue(q->timer)) in snd_seq_queue_process_event()
656 queue_broadcast_event(q, ev, atomic, hop); in snd_seq_queue_process_event()
660 snd_seq_timer_stop(q->timer); in snd_seq_queue_process_event()
661 queue_broadcast_event(q, ev, atomic, hop); in snd_seq_queue_process_event()
665 snd_seq_timer_set_tempo(q->timer, ev->data.queue.param.value); in snd_seq_queue_process_event()
666 queue_broadcast_event(q, ev, atomic, hop); in snd_seq_queue_process_event()
670 if (snd_seq_timer_set_position_tick(q->timer, ev->data.queue.param.time.tick) == 0) { in snd_seq_queue_process_event()
671 queue_broadcast_event(q, ev, atomic, hop); in snd_seq_queue_process_event()
676 if (snd_seq_timer_set_position_time(q->timer, ev->data.queue.param.time.time) == 0) { in snd_seq_queue_process_event()
677 queue_broadcast_event(q, ev, atomic, hop); in snd_seq_queue_process_event()
681 if (snd_seq_timer_set_skew(q->timer, in snd_seq_queue_process_event()
684 queue_broadcast_event(q, ev, atomic, hop); in snd_seq_queue_process_event()
697 struct snd_seq_queue *q; in snd_seq_control_queue() local
701 q = queueptr(ev->data.queue.queue); in snd_seq_control_queue()
703 if (q == NULL) in snd_seq_control_queue()
706 if (! queue_access_lock(q, ev->source.client)) { in snd_seq_control_queue()
707 queuefree(q); in snd_seq_control_queue()
711 snd_seq_queue_process_event(q, ev, atomic, hop); in snd_seq_control_queue()
713 queue_access_unlock(q); in snd_seq_control_queue()
714 queuefree(q); in snd_seq_control_queue()
727 struct snd_seq_queue *q; in snd_seq_info_queues_read() local
733 q = queueptr(i); in snd_seq_info_queues_read()
734 if (!q) in snd_seq_info_queues_read()
737 tmr = q->timer; in snd_seq_info_queues_read()
743 spin_lock_irq(&q->owner_lock); in snd_seq_info_queues_read()
744 locked = q->locked; in snd_seq_info_queues_read()
745 owner = q->owner; in snd_seq_info_queues_read()
746 spin_unlock_irq(&q->owner_lock); in snd_seq_info_queues_read()
748 snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name); in snd_seq_info_queues_read()
751 snd_iprintf(buffer, "queued time events : %d\n", snd_seq_prioq_avail(q->timeq)); in snd_seq_info_queues_read()
752 snd_iprintf(buffer, "queued tick events : %d\n", snd_seq_prioq_avail(q->tickq)); in snd_seq_info_queues_read()
760 queuefree(q); in snd_seq_info_queues_read()