Lines Matching full:c

3  * core.c - Implementation of core module of MOST Linux driver stack
5 * Copyright (C) 2013-2020 Microchip Technology Germany II GmbH & Co. KG
102 struct most_channel *c = mbo->context; in most_free_mbo_coherent() local
103 u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len; in most_free_mbo_coherent()
105 if (c->iface->dma_free) in most_free_mbo_coherent()
106 c->iface->dma_free(mbo, coherent_buf_size); in most_free_mbo_coherent()
110 if (atomic_sub_and_test(1, &c->mbo_ref)) in most_free_mbo_coherent()
111 complete(&c->cleanup); in most_free_mbo_coherent()
116 * @c: pointer to channel object
118 static void flush_channel_fifos(struct most_channel *c) in flush_channel_fifos() argument
123 if (list_empty(&c->fifo) && list_empty(&c->halt_fifo)) in flush_channel_fifos()
126 spin_lock_irqsave(&c->fifo_lock, flags); in flush_channel_fifos()
127 list_for_each_entry_safe(mbo, tmp, &c->fifo, list) { in flush_channel_fifos()
129 spin_unlock_irqrestore(&c->fifo_lock, flags); in flush_channel_fifos()
131 spin_lock_irqsave(&c->fifo_lock, flags); in flush_channel_fifos()
133 spin_unlock_irqrestore(&c->fifo_lock, flags); in flush_channel_fifos()
135 spin_lock_irqsave(&c->fifo_lock, hf_flags); in flush_channel_fifos()
136 list_for_each_entry_safe(mbo, tmp, &c->halt_fifo, list) { in flush_channel_fifos()
138 spin_unlock_irqrestore(&c->fifo_lock, hf_flags); in flush_channel_fifos()
140 spin_lock_irqsave(&c->fifo_lock, hf_flags); in flush_channel_fifos()
142 spin_unlock_irqrestore(&c->fifo_lock, hf_flags); in flush_channel_fifos()
144 if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo)))) in flush_channel_fifos()
145 dev_warn(&c->dev, "Channel or trash fifo not empty\n"); in flush_channel_fifos()
150 * @c: pointer to channel object
152 static int flush_trash_fifo(struct most_channel *c) in flush_trash_fifo() argument
157 spin_lock_irqsave(&c->fifo_lock, flags); in flush_trash_fifo()
158 list_for_each_entry_safe(mbo, tmp, &c->trash_fifo, list) { in flush_trash_fifo()
160 spin_unlock_irqrestore(&c->fifo_lock, flags); in flush_trash_fifo()
162 spin_lock_irqsave(&c->fifo_lock, flags); in flush_trash_fifo()
164 spin_unlock_irqrestore(&c->fifo_lock, flags); in flush_trash_fifo()
172 struct most_channel *c = to_channel(dev); in available_directions_show() local
173 unsigned int i = c->channel_id; in available_directions_show()
176 if (c->iface->channel_vector[i].direction & MOST_CH_RX) in available_directions_show()
178 if (c->iface->channel_vector[i].direction & MOST_CH_TX) in available_directions_show()
188 struct most_channel *c = to_channel(dev); in available_datatypes_show() local
189 unsigned int i = c->channel_id; in available_datatypes_show()
192 if (c->iface->channel_vector[i].data_type & MOST_CH_CONTROL) in available_datatypes_show()
194 if (c->iface->channel_vector[i].data_type & MOST_CH_ASYNC) in available_datatypes_show()
196 if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC) in available_datatypes_show()
198 if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC) in available_datatypes_show()
208 struct most_channel *c = to_channel(dev); in number_of_packet_buffers_show() local
209 unsigned int i = c->channel_id; in number_of_packet_buffers_show()
212 c->iface->channel_vector[i].num_buffers_packet); in number_of_packet_buffers_show()
219 struct most_channel *c = to_channel(dev); in number_of_stream_buffers_show() local
220 unsigned int i = c->channel_id; in number_of_stream_buffers_show()
223 c->iface->channel_vector[i].num_buffers_streaming); in number_of_stream_buffers_show()
230 struct most_channel *c = to_channel(dev); in size_of_packet_buffer_show() local
231 unsigned int i = c->channel_id; in size_of_packet_buffer_show()
234 c->iface->channel_vector[i].buffer_size_packet); in size_of_packet_buffer_show()
241 struct most_channel *c = to_channel(dev); in size_of_stream_buffer_show() local
242 unsigned int i = c->channel_id; in size_of_stream_buffer_show()
245 c->iface->channel_vector[i].buffer_size_streaming); in size_of_stream_buffer_show()
252 struct most_channel *c = to_channel(dev); in channel_starving_show() local
254 return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving); in channel_starving_show()
261 struct most_channel *c = to_channel(dev); in set_number_of_buffers_show() local
263 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers); in set_number_of_buffers_show()
270 struct most_channel *c = to_channel(dev); in set_buffer_size_show() local
272 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size); in set_buffer_size_show()
279 struct most_channel *c = to_channel(dev); in set_direction_show() local
281 if (c->cfg.direction & MOST_CH_TX) in set_direction_show()
283 else if (c->cfg.direction & MOST_CH_RX) in set_direction_show()
293 struct most_channel *c = to_channel(dev); in set_datatype_show() local
296 if (c->cfg.data_type & ch_data_type[i].most_ch_data_type) in set_datatype_show()
307 struct most_channel *c = to_channel(dev); in set_subbuffer_size_show() local
309 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size); in set_subbuffer_size_show()
316 struct most_channel *c = to_channel(dev); in set_packets_per_xact_show() local
318 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact); in set_packets_per_xact_show()
324 struct most_channel *c = to_channel(dev); in set_dbr_size_show() local
326 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.dbr_size); in set_dbr_size_show()
335 struct most_channel *c = to_channel(dev); in channel_attr_is_visible() local
338 (c->iface->interface != ITYPE_MEDIALB_DIM2)) in channel_attr_is_visible()
341 (c->iface->interface != ITYPE_USB)) in channel_attr_is_visible()
469 struct most_channel *c; in print_links() local
472 list_for_each_entry(c, &iface->p->channel_list, list) { in print_links()
473 if (c->pipe0.comp) { in print_links()
477 c->pipe0.comp->name, in print_links()
479 dev_name(&c->dev)); in print_links()
481 if (c->pipe1.comp) { in print_links()
485 c->pipe1.comp->name, in print_links()
487 dev_name(&c->dev)); in print_links()
536 struct most_channel *c, *tmp; in get_channel() local
543 list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) { in get_channel()
544 if (!strcmp(dev_name(&c->dev), mdev_ch)) in get_channel()
545 return c; in get_channel()
551 inline int link_channel_to_component(struct most_channel *c, in link_channel_to_component() argument
559 if (!c->pipe0.comp) in link_channel_to_component()
560 comp_ptr = &c->pipe0.comp; in link_channel_to_component()
561 else if (!c->pipe1.comp) in link_channel_to_component()
562 comp_ptr = &c->pipe1.comp; in link_channel_to_component()
567 ret = comp->probe_channel(c->iface, c->channel_id, &c->cfg, name, in link_channel_to_component()
578 struct most_channel *c = get_channel(mdev, mdev_ch); in most_set_cfg_buffer_size() local
580 if (!c) in most_set_cfg_buffer_size()
582 c->cfg.buffer_size = val; in most_set_cfg_buffer_size()
588 struct most_channel *c = get_channel(mdev, mdev_ch); in most_set_cfg_subbuffer_size() local
590 if (!c) in most_set_cfg_subbuffer_size()
592 c->cfg.subbuffer_size = val; in most_set_cfg_subbuffer_size()
598 struct most_channel *c = get_channel(mdev, mdev_ch); in most_set_cfg_dbr_size() local
600 if (!c) in most_set_cfg_dbr_size()
602 c->cfg.dbr_size = val; in most_set_cfg_dbr_size()
608 struct most_channel *c = get_channel(mdev, mdev_ch); in most_set_cfg_num_buffers() local
610 if (!c) in most_set_cfg_num_buffers()
612 c->cfg.num_buffers = val; in most_set_cfg_num_buffers()
619 struct most_channel *c = get_channel(mdev, mdev_ch); in most_set_cfg_datatype() local
621 if (!c) in most_set_cfg_datatype()
625 c->cfg.data_type = ch_data_type[i].most_ch_data_type; in most_set_cfg_datatype()
631 dev_warn(&c->dev, "Invalid attribute settings\n"); in most_set_cfg_datatype()
637 struct most_channel *c = get_channel(mdev, mdev_ch); in most_set_cfg_direction() local
639 if (!c) in most_set_cfg_direction()
642 c->cfg.direction = MOST_CH_RX; in most_set_cfg_direction()
644 c->cfg.direction = MOST_CH_RX; in most_set_cfg_direction()
646 c->cfg.direction = MOST_CH_TX; in most_set_cfg_direction()
648 c->cfg.direction = MOST_CH_TX; in most_set_cfg_direction()
650 dev_err(&c->dev, "Invalid direction\n"); in most_set_cfg_direction()
658 struct most_channel *c = get_channel(mdev, mdev_ch); in most_set_cfg_packets_xact() local
660 if (!c) in most_set_cfg_packets_xact()
662 c->cfg.packets_per_xact = val; in most_set_cfg_packets_xact()
680 struct most_channel *c = get_channel(mdev, mdev_ch); in most_add_link() local
683 if (!c || !comp) in most_add_link()
686 return link_channel_to_component(c, comp, link_name, comp_param); in most_add_link()
691 struct most_channel *c; in most_remove_link() local
697 c = get_channel(mdev, mdev_ch); in most_remove_link()
698 if (!c) in most_remove_link()
701 if (comp->disconnect_channel(c->iface, c->channel_id)) in most_remove_link()
703 if (c->pipe0.comp == comp) in most_remove_link()
704 c->pipe0.comp = NULL; in most_remove_link()
705 if (c->pipe1.comp == comp) in most_remove_link()
706 c->pipe1.comp = NULL; in most_remove_link()
739 struct most_channel *c = mbo->context; in trash_mbo() local
741 spin_lock_irqsave(&c->fifo_lock, flags); in trash_mbo()
742 list_add(&mbo->list, &c->trash_fifo); in trash_mbo()
743 spin_unlock_irqrestore(&c->fifo_lock, flags); in trash_mbo()
746 static bool hdm_mbo_ready(struct most_channel *c) in hdm_mbo_ready() argument
750 if (c->enqueue_halt) in hdm_mbo_ready()
753 spin_lock_irq(&c->fifo_lock); in hdm_mbo_ready()
754 empty = list_empty(&c->halt_fifo); in hdm_mbo_ready()
755 spin_unlock_irq(&c->fifo_lock); in hdm_mbo_ready()
763 struct most_channel *c = mbo->context; in nq_hdm_mbo() local
765 spin_lock_irqsave(&c->fifo_lock, flags); in nq_hdm_mbo()
766 list_add_tail(&mbo->list, &c->halt_fifo); in nq_hdm_mbo()
767 spin_unlock_irqrestore(&c->fifo_lock, flags); in nq_hdm_mbo()
768 wake_up_interruptible(&c->hdm_fifo_wq); in nq_hdm_mbo()
773 struct most_channel *c = data; in hdm_enqueue_thread() local
776 typeof(c->iface->enqueue) enqueue = c->iface->enqueue; in hdm_enqueue_thread()
779 wait_event_interruptible(c->hdm_fifo_wq, in hdm_enqueue_thread()
780 hdm_mbo_ready(c) || in hdm_enqueue_thread()
783 mutex_lock(&c->nq_mutex); in hdm_enqueue_thread()
784 spin_lock_irq(&c->fifo_lock); in hdm_enqueue_thread()
785 if (unlikely(c->enqueue_halt || list_empty(&c->halt_fifo))) { in hdm_enqueue_thread()
786 spin_unlock_irq(&c->fifo_lock); in hdm_enqueue_thread()
787 mutex_unlock(&c->nq_mutex); in hdm_enqueue_thread()
791 mbo = list_pop_mbo(&c->halt_fifo); in hdm_enqueue_thread()
792 spin_unlock_irq(&c->fifo_lock); in hdm_enqueue_thread()
794 if (c->cfg.direction == MOST_CH_RX) in hdm_enqueue_thread()
795 mbo->buffer_length = c->cfg.buffer_size; in hdm_enqueue_thread()
798 mutex_unlock(&c->nq_mutex); in hdm_enqueue_thread()
801 dev_err(&c->dev, "Buffer enqueue failed\n"); in hdm_enqueue_thread()
803 c->hdm_enqueue_task = NULL; in hdm_enqueue_thread()
811 static int run_enqueue_thread(struct most_channel *c, int channel_id) in run_enqueue_thread() argument
814 kthread_run(hdm_enqueue_thread, c, "hdm_fifo_%d", in run_enqueue_thread()
820 c->hdm_enqueue_task = task; in run_enqueue_thread()
838 struct most_channel *c; in arm_mbo() local
840 c = mbo->context; in arm_mbo()
842 if (c->is_poisoned) { in arm_mbo()
847 spin_lock_irqsave(&c->fifo_lock, flags); in arm_mbo()
849 list_add_tail(&mbo->list, &c->fifo); in arm_mbo()
850 spin_unlock_irqrestore(&c->fifo_lock, flags); in arm_mbo()
852 if (c->pipe0.refs && c->pipe0.comp->tx_completion) in arm_mbo()
853 c->pipe0.comp->tx_completion(c->iface, c->channel_id); in arm_mbo()
855 if (c->pipe1.refs && c->pipe1.comp->tx_completion) in arm_mbo()
856 c->pipe1.comp->tx_completion(c->iface, c->channel_id); in arm_mbo()
861 * @c: pointer to interface channel
872 static int arm_mbo_chain(struct most_channel *c, int dir, in arm_mbo_chain() argument
878 u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len; in arm_mbo_chain()
880 atomic_set(&c->mbo_nq_level, 0); in arm_mbo_chain()
882 for (i = 0; i < c->cfg.num_buffers; i++) { in arm_mbo_chain()
887 mbo->context = c; in arm_mbo_chain()
888 mbo->ifp = c->iface; in arm_mbo_chain()
889 mbo->hdm_channel_id = c->channel_id; in arm_mbo_chain()
890 if (c->iface->dma_alloc) { in arm_mbo_chain()
892 c->iface->dma_alloc(mbo, coherent_buf_size); in arm_mbo_chain()
904 atomic_inc(&c->mbo_nq_level); in arm_mbo_chain()
906 spin_lock_irqsave(&c->fifo_lock, flags); in arm_mbo_chain()
907 list_add_tail(&mbo->list, &c->fifo); in arm_mbo_chain()
908 spin_unlock_irqrestore(&c->fifo_lock, flags); in arm_mbo_chain()
911 return c->cfg.num_buffers; in arm_mbo_chain()
917 flush_channel_fifos(c); in arm_mbo_chain()
944 struct most_channel *c; in most_write_completion() local
946 c = mbo->context; in most_write_completion()
947 if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) in most_write_completion()
956 struct most_channel *c = iface->p->channel[id]; in channel_has_mbo() local
960 if (unlikely(!c)) in channel_has_mbo()
963 if (c->pipe0.refs && c->pipe1.refs && in channel_has_mbo()
964 ((comp == c->pipe0.comp && c->pipe0.num_buffers <= 0) || in channel_has_mbo()
965 (comp == c->pipe1.comp && c->pipe1.num_buffers <= 0))) in channel_has_mbo()
968 spin_lock_irqsave(&c->fifo_lock, flags); in channel_has_mbo()
969 empty = list_empty(&c->fifo); in channel_has_mbo()
970 spin_unlock_irqrestore(&c->fifo_lock, flags); in channel_has_mbo()
988 struct most_channel *c; in most_get_mbo() local
992 c = iface->p->channel[id]; in most_get_mbo()
993 if (unlikely(!c)) in most_get_mbo()
996 if (c->pipe0.refs && c->pipe1.refs && in most_get_mbo()
997 ((comp == c->pipe0.comp && c->pipe0.num_buffers <= 0) || in most_get_mbo()
998 (comp == c->pipe1.comp && c->pipe1.num_buffers <= 0))) in most_get_mbo()
1001 if (comp == c->pipe0.comp) in most_get_mbo()
1002 num_buffers_ptr = &c->pipe0.num_buffers; in most_get_mbo()
1003 else if (comp == c->pipe1.comp) in most_get_mbo()
1004 num_buffers_ptr = &c->pipe1.num_buffers; in most_get_mbo()
1008 spin_lock_irqsave(&c->fifo_lock, flags); in most_get_mbo()
1009 if (list_empty(&c->fifo)) { in most_get_mbo()
1010 spin_unlock_irqrestore(&c->fifo_lock, flags); in most_get_mbo()
1013 mbo = list_pop_mbo(&c->fifo); in most_get_mbo()
1015 spin_unlock_irqrestore(&c->fifo_lock, flags); in most_get_mbo()
1018 mbo->buffer_length = c->cfg.buffer_size; in most_get_mbo()
1029 struct most_channel *c = mbo->context; in most_put_mbo() local
1031 if (c->cfg.direction == MOST_CH_TX) { in most_put_mbo()
1036 atomic_inc(&c->mbo_nq_level); in most_put_mbo()
1052 struct most_channel *c = mbo->context; in most_read_completion() local
1054 if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) { in most_read_completion()
1061 atomic_inc(&c->mbo_nq_level); in most_read_completion()
1065 if (atomic_sub_and_test(1, &c->mbo_nq_level)) in most_read_completion()
1066 c->is_starving = 1; in most_read_completion()
1068 if (c->pipe0.refs && c->pipe0.comp->rx_completion && in most_read_completion()
1069 c->pipe0.comp->rx_completion(mbo) == 0) in most_read_completion()
1072 if (c->pipe1.refs && c->pipe1.comp->rx_completion && in most_read_completion()
1073 c->pipe1.comp->rx_completion(mbo) == 0) in most_read_completion()
1095 struct most_channel *c = iface->p->channel[id]; in most_start_channel() local
1097 if (unlikely(!c)) in most_start_channel()
1100 mutex_lock(&c->start_mutex); in most_start_channel()
1101 if (c->pipe0.refs + c->pipe1.refs > 0) in most_start_channel()
1105 dev_err(&c->dev, "Failed to acquire HDM lock\n"); in most_start_channel()
1106 mutex_unlock(&c->start_mutex); in most_start_channel()
1110 c->cfg.extra_len = 0; in most_start_channel()
1111 if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) { in most_start_channel()
1112 dev_err(&c->dev, "Channel configuration failed. Go check settings...\n"); in most_start_channel()
1117 init_waitqueue_head(&c->hdm_fifo_wq); in most_start_channel()
1119 if (c->cfg.direction == MOST_CH_RX) in most_start_channel()
1120 num_buffer = arm_mbo_chain(c, c->cfg.direction, in most_start_channel()
1123 num_buffer = arm_mbo_chain(c, c->cfg.direction, in most_start_channel()
1130 ret = run_enqueue_thread(c, id); in most_start_channel()
1134 c->is_starving = 0; in most_start_channel()
1135 c->pipe0.num_buffers = c->cfg.num_buffers / 2; in most_start_channel()
1136 c->pipe1.num_buffers = c->cfg.num_buffers - c->pipe0.num_buffers; in most_start_channel()
1137 atomic_set(&c->mbo_ref, num_buffer); in most_start_channel()
1140 if (comp == c->pipe0.comp) in most_start_channel()
1141 c->pipe0.refs++; in most_start_channel()
1142 if (comp == c->pipe1.comp) in most_start_channel()
1143 c->pipe1.refs++; in most_start_channel()
1144 mutex_unlock(&c->start_mutex); in most_start_channel()
1149 mutex_unlock(&c->start_mutex); in most_start_channel()
1163 struct most_channel *c; in most_stop_channel() local
1169 c = iface->p->channel[id]; in most_stop_channel()
1170 if (unlikely(!c)) in most_stop_channel()
1173 mutex_lock(&c->start_mutex); in most_stop_channel()
1174 if (c->pipe0.refs + c->pipe1.refs >= 2) in most_stop_channel()
1177 if (c->hdm_enqueue_task) in most_stop_channel()
1178 kthread_stop(c->hdm_enqueue_task); in most_stop_channel()
1179 c->hdm_enqueue_task = NULL; in most_stop_channel()
1184 c->is_poisoned = true; in most_stop_channel()
1185 if (c->iface->poison_channel(c->iface, c->channel_id)) { in most_stop_channel()
1186 dev_err(&c->dev, "Failed to stop channel %d of interface %s\n", c->channel_id, in most_stop_channel()
1187 c->iface->description); in most_stop_channel()
1188 mutex_unlock(&c->start_mutex); in most_stop_channel()
1191 flush_trash_fifo(c); in most_stop_channel()
1192 flush_channel_fifos(c); in most_stop_channel()
1195 if (wait_for_completion_interruptible(&c->cleanup)) { in most_stop_channel()
1196 dev_err(&c->dev, "Interrupted while cleaning up channel %d\n", c->channel_id); in most_stop_channel()
1197 mutex_unlock(&c->start_mutex); in most_stop_channel()
1201 wait_for_completion(&c->cleanup); in most_stop_channel()
1203 c->is_poisoned = false; in most_stop_channel()
1206 if (comp == c->pipe0.comp) in most_stop_channel()
1207 c->pipe0.refs--; in most_stop_channel()
1208 if (comp == c->pipe1.comp) in most_stop_channel()
1209 c->pipe1.refs--; in most_stop_channel()
1210 mutex_unlock(&c->start_mutex); in most_stop_channel()
1233 struct most_channel *c, *tmp; in disconnect_channels() local
1237 list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) { in disconnect_channels()
1238 if (c->pipe0.comp == comp || c->pipe1.comp == comp) in disconnect_channels()
1239 comp->disconnect_channel(c->iface, c->channel_id); in disconnect_channels()
1240 if (c->pipe0.comp == comp) in disconnect_channels()
1241 c->pipe0.comp = NULL; in disconnect_channels()
1242 if (c->pipe1.comp == comp) in disconnect_channels()
1243 c->pipe1.comp = NULL; in disconnect_channels()
1267 struct most_channel *c = to_channel(dev); in release_channel() local
1269 kfree(c); in release_channel()
1283 struct most_channel *c; in most_register_interface() local
1318 c = kzalloc(sizeof(*c), GFP_KERNEL); in most_register_interface()
1319 if (!c) in most_register_interface()
1322 snprintf(c->name, STRING_SIZE, "ch%d", i); in most_register_interface()
1324 snprintf(c->name, STRING_SIZE, "%s", name_suffix); in most_register_interface()
1325 c->dev.init_name = c->name; in most_register_interface()
1326 c->dev.parent = iface->dev; in most_register_interface()
1327 c->dev.groups = channel_attr_groups; in most_register_interface()
1328 c->dev.release = release_channel; in most_register_interface()
1329 iface->p->channel[i] = c; in most_register_interface()
1330 c->is_starving = 0; in most_register_interface()
1331 c->iface = iface; in most_register_interface()
1332 c->channel_id = i; in most_register_interface()
1333 c->keep_mbo = false; in most_register_interface()
1334 c->enqueue_halt = false; in most_register_interface()
1335 c->is_poisoned = false; in most_register_interface()
1336 c->cfg.direction = 0; in most_register_interface()
1337 c->cfg.data_type = 0; in most_register_interface()
1338 c->cfg.num_buffers = 0; in most_register_interface()
1339 c->cfg.buffer_size = 0; in most_register_interface()
1340 c->cfg.subbuffer_size = 0; in most_register_interface()
1341 c->cfg.packets_per_xact = 0; in most_register_interface()
1342 spin_lock_init(&c->fifo_lock); in most_register_interface()
1343 INIT_LIST_HEAD(&c->fifo); in most_register_interface()
1344 INIT_LIST_HEAD(&c->trash_fifo); in most_register_interface()
1345 INIT_LIST_HEAD(&c->halt_fifo); in most_register_interface()
1346 init_completion(&c->cleanup); in most_register_interface()
1347 atomic_set(&c->mbo_ref, 0); in most_register_interface()
1348 mutex_init(&c->start_mutex); in most_register_interface()
1349 mutex_init(&c->nq_mutex); in most_register_interface()
1350 list_add_tail(&c->list, &iface->p->channel_list); in most_register_interface()
1351 if (device_register(&c->dev)) { in most_register_interface()
1352 dev_err(&c->dev, "Failed to register channel device\n"); in most_register_interface()
1360 put_device(&c->dev); in most_register_interface()
1364 c = iface->p->channel[--i]; in most_register_interface()
1365 device_unregister(&c->dev); in most_register_interface()
1384 struct most_channel *c; in most_deregister_interface() local
1387 c = iface->p->channel[i]; in most_deregister_interface()
1388 if (c->pipe0.comp) in most_deregister_interface()
1389 c->pipe0.comp->disconnect_channel(c->iface, in most_deregister_interface()
1390 c->channel_id); in most_deregister_interface()
1391 if (c->pipe1.comp) in most_deregister_interface()
1392 c->pipe1.comp->disconnect_channel(c->iface, in most_deregister_interface()
1393 c->channel_id); in most_deregister_interface()
1394 c->pipe0.comp = NULL; in most_deregister_interface()
1395 c->pipe1.comp = NULL; in most_deregister_interface()
1396 list_del(&c->list); in most_deregister_interface()
1397 device_unregister(&c->dev); in most_deregister_interface()
1418 struct most_channel *c = iface->p->channel[id]; in most_stop_enqueue() local
1420 if (!c) in most_stop_enqueue()
1423 mutex_lock(&c->nq_mutex); in most_stop_enqueue()
1424 c->enqueue_halt = true; in most_stop_enqueue()
1425 mutex_unlock(&c->nq_mutex); in most_stop_enqueue()
1439 struct most_channel *c = iface->p->channel[id]; in most_resume_enqueue() local
1441 if (!c) in most_resume_enqueue()
1444 mutex_lock(&c->nq_mutex); in most_resume_enqueue()
1445 c->enqueue_halt = false; in most_resume_enqueue()
1446 mutex_unlock(&c->nq_mutex); in most_resume_enqueue()
1448 wake_up_interruptible(&c->hdm_fifo_wq); in most_resume_enqueue()