1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * core.c - Implementation of core module of MOST Linux driver stack
4 *
5 * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/module.h>
10 #include <linux/fs.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/device.h>
14 #include <linux/list.h>
15 #include <linux/poll.h>
16 #include <linux/wait.h>
17 #include <linux/kobject.h>
18 #include <linux/mutex.h>
19 #include <linux/completion.h>
20 #include <linux/sysfs.h>
21 #include <linux/kthread.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/idr.h>
24 #include <most/core.h>
25
26 #define MAX_CHANNELS 64
27 #define STRING_SIZE 80
28
29 static struct ida mdev_id;
30 static int dummy_num_buffers;
31
32 static struct mostcore {
33 struct device dev;
34 struct device_driver drv;
35 struct bus_type bus;
36 struct list_head comp_list;
37 } mc;
38
39 #define to_driver(d) container_of(d, struct mostcore, drv)
40
41 struct pipe {
42 struct core_component *comp;
43 int refs;
44 int num_buffers;
45 };
46
47 struct most_channel {
48 struct device dev;
49 struct completion cleanup;
50 atomic_t mbo_ref;
51 atomic_t mbo_nq_level;
52 u16 channel_id;
53 char name[STRING_SIZE];
54 bool is_poisoned;
55 struct mutex start_mutex;
56 struct mutex nq_mutex; /* nq thread synchronization */
57 int is_starving;
58 struct most_interface *iface;
59 struct most_channel_config cfg;
60 bool keep_mbo;
61 bool enqueue_halt;
62 struct list_head fifo;
63 spinlock_t fifo_lock;
64 struct list_head halt_fifo;
65 struct list_head list;
66 struct pipe pipe0;
67 struct pipe pipe1;
68 struct list_head trash_fifo;
69 struct task_struct *hdm_enqueue_task;
70 wait_queue_head_t hdm_fifo_wq;
71
72 };
73
74 #define to_channel(d) container_of(d, struct most_channel, dev)
75
76 struct interface_private {
77 int dev_id;
78 char name[STRING_SIZE];
79 struct most_channel *channel[MAX_CHANNELS];
80 struct list_head channel_list;
81 };
82
83 static const struct {
84 int most_ch_data_type;
85 const char *name;
86 } ch_data_type[] = {
87 { MOST_CH_CONTROL, "control\n" },
88 { MOST_CH_ASYNC, "async\n" },
89 { MOST_CH_SYNC, "sync\n" },
90 { MOST_CH_ISOC, "isoc\n"},
91 { MOST_CH_ISOC, "isoc_avp\n"},
92 };
93
94 /**
95 * list_pop_mbo - retrieves the first MBO of the list and removes it
96 * @ptr: the list head to grab the MBO from.
97 */
98 #define list_pop_mbo(ptr) \
99 ({ \
100 struct mbo *_mbo = list_first_entry(ptr, struct mbo, list); \
101 list_del(&_mbo->list); \
102 _mbo; \
103 })
104
105 /**
106 * most_free_mbo_coherent - free an MBO and its coherent buffer
107 * @mbo: most buffer
108 */
most_free_mbo_coherent(struct mbo * mbo)109 static void most_free_mbo_coherent(struct mbo *mbo)
110 {
111 struct most_channel *c = mbo->context;
112 u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
113
114 if (c->iface->dma_free)
115 c->iface->dma_free(mbo, coherent_buf_size);
116 else
117 kfree(mbo->virt_address);
118 kfree(mbo);
119 if (atomic_sub_and_test(1, &c->mbo_ref))
120 complete(&c->cleanup);
121 }
122
123 /**
124 * flush_channel_fifos - clear the channel fifos
125 * @c: pointer to channel object
126 */
flush_channel_fifos(struct most_channel * c)127 static void flush_channel_fifos(struct most_channel *c)
128 {
129 unsigned long flags, hf_flags;
130 struct mbo *mbo, *tmp;
131
132 if (list_empty(&c->fifo) && list_empty(&c->halt_fifo))
133 return;
134
135 spin_lock_irqsave(&c->fifo_lock, flags);
136 list_for_each_entry_safe(mbo, tmp, &c->fifo, list) {
137 list_del(&mbo->list);
138 spin_unlock_irqrestore(&c->fifo_lock, flags);
139 most_free_mbo_coherent(mbo);
140 spin_lock_irqsave(&c->fifo_lock, flags);
141 }
142 spin_unlock_irqrestore(&c->fifo_lock, flags);
143
144 spin_lock_irqsave(&c->fifo_lock, hf_flags);
145 list_for_each_entry_safe(mbo, tmp, &c->halt_fifo, list) {
146 list_del(&mbo->list);
147 spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
148 most_free_mbo_coherent(mbo);
149 spin_lock_irqsave(&c->fifo_lock, hf_flags);
150 }
151 spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
152
153 if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo))))
154 pr_info("WARN: fifo | trash fifo not empty\n");
155 }
156
157 /**
158 * flush_trash_fifo - clear the trash fifo
159 * @c: pointer to channel object
160 */
flush_trash_fifo(struct most_channel * c)161 static int flush_trash_fifo(struct most_channel *c)
162 {
163 struct mbo *mbo, *tmp;
164 unsigned long flags;
165
166 spin_lock_irqsave(&c->fifo_lock, flags);
167 list_for_each_entry_safe(mbo, tmp, &c->trash_fifo, list) {
168 list_del(&mbo->list);
169 spin_unlock_irqrestore(&c->fifo_lock, flags);
170 most_free_mbo_coherent(mbo);
171 spin_lock_irqsave(&c->fifo_lock, flags);
172 }
173 spin_unlock_irqrestore(&c->fifo_lock, flags);
174 return 0;
175 }
176
available_directions_show(struct device * dev,struct device_attribute * attr,char * buf)177 static ssize_t available_directions_show(struct device *dev,
178 struct device_attribute *attr,
179 char *buf)
180 {
181 struct most_channel *c = to_channel(dev);
182 unsigned int i = c->channel_id;
183
184 strcpy(buf, "");
185 if (c->iface->channel_vector[i].direction & MOST_CH_RX)
186 strcat(buf, "rx ");
187 if (c->iface->channel_vector[i].direction & MOST_CH_TX)
188 strcat(buf, "tx ");
189 strcat(buf, "\n");
190 return strlen(buf);
191 }
192
available_datatypes_show(struct device * dev,struct device_attribute * attr,char * buf)193 static ssize_t available_datatypes_show(struct device *dev,
194 struct device_attribute *attr,
195 char *buf)
196 {
197 struct most_channel *c = to_channel(dev);
198 unsigned int i = c->channel_id;
199
200 strcpy(buf, "");
201 if (c->iface->channel_vector[i].data_type & MOST_CH_CONTROL)
202 strcat(buf, "control ");
203 if (c->iface->channel_vector[i].data_type & MOST_CH_ASYNC)
204 strcat(buf, "async ");
205 if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC)
206 strcat(buf, "sync ");
207 if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC)
208 strcat(buf, "isoc ");
209 strcat(buf, "\n");
210 return strlen(buf);
211 }
212
number_of_packet_buffers_show(struct device * dev,struct device_attribute * attr,char * buf)213 static ssize_t number_of_packet_buffers_show(struct device *dev,
214 struct device_attribute *attr,
215 char *buf)
216 {
217 struct most_channel *c = to_channel(dev);
218 unsigned int i = c->channel_id;
219
220 return snprintf(buf, PAGE_SIZE, "%d\n",
221 c->iface->channel_vector[i].num_buffers_packet);
222 }
223
number_of_stream_buffers_show(struct device * dev,struct device_attribute * attr,char * buf)224 static ssize_t number_of_stream_buffers_show(struct device *dev,
225 struct device_attribute *attr,
226 char *buf)
227 {
228 struct most_channel *c = to_channel(dev);
229 unsigned int i = c->channel_id;
230
231 return snprintf(buf, PAGE_SIZE, "%d\n",
232 c->iface->channel_vector[i].num_buffers_streaming);
233 }
234
size_of_packet_buffer_show(struct device * dev,struct device_attribute * attr,char * buf)235 static ssize_t size_of_packet_buffer_show(struct device *dev,
236 struct device_attribute *attr,
237 char *buf)
238 {
239 struct most_channel *c = to_channel(dev);
240 unsigned int i = c->channel_id;
241
242 return snprintf(buf, PAGE_SIZE, "%d\n",
243 c->iface->channel_vector[i].buffer_size_packet);
244 }
245
size_of_stream_buffer_show(struct device * dev,struct device_attribute * attr,char * buf)246 static ssize_t size_of_stream_buffer_show(struct device *dev,
247 struct device_attribute *attr,
248 char *buf)
249 {
250 struct most_channel *c = to_channel(dev);
251 unsigned int i = c->channel_id;
252
253 return snprintf(buf, PAGE_SIZE, "%d\n",
254 c->iface->channel_vector[i].buffer_size_streaming);
255 }
256
channel_starving_show(struct device * dev,struct device_attribute * attr,char * buf)257 static ssize_t channel_starving_show(struct device *dev,
258 struct device_attribute *attr,
259 char *buf)
260 {
261 struct most_channel *c = to_channel(dev);
262
263 return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving);
264 }
265
set_number_of_buffers_show(struct device * dev,struct device_attribute * attr,char * buf)266 static ssize_t set_number_of_buffers_show(struct device *dev,
267 struct device_attribute *attr,
268 char *buf)
269 {
270 struct most_channel *c = to_channel(dev);
271
272 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers);
273 }
274
set_number_of_buffers_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)275 static ssize_t set_number_of_buffers_store(struct device *dev,
276 struct device_attribute *attr,
277 const char *buf,
278 size_t count)
279 {
280 struct most_channel *c = to_channel(dev);
281 int ret = kstrtou16(buf, 0, &c->cfg.num_buffers);
282
283 if (ret)
284 return ret;
285 return count;
286 }
287
set_buffer_size_show(struct device * dev,struct device_attribute * attr,char * buf)288 static ssize_t set_buffer_size_show(struct device *dev,
289 struct device_attribute *attr,
290 char *buf)
291 {
292 struct most_channel *c = to_channel(dev);
293
294 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size);
295 }
296
set_buffer_size_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)297 static ssize_t set_buffer_size_store(struct device *dev,
298 struct device_attribute *attr,
299 const char *buf,
300 size_t count)
301 {
302 struct most_channel *c = to_channel(dev);
303 int ret = kstrtou16(buf, 0, &c->cfg.buffer_size);
304
305 if (ret)
306 return ret;
307 return count;
308 }
309
set_direction_show(struct device * dev,struct device_attribute * attr,char * buf)310 static ssize_t set_direction_show(struct device *dev,
311 struct device_attribute *attr,
312 char *buf)
313 {
314 struct most_channel *c = to_channel(dev);
315
316 if (c->cfg.direction & MOST_CH_TX)
317 return snprintf(buf, PAGE_SIZE, "tx\n");
318 else if (c->cfg.direction & MOST_CH_RX)
319 return snprintf(buf, PAGE_SIZE, "rx\n");
320 return snprintf(buf, PAGE_SIZE, "unconfigured\n");
321 }
322
set_direction_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)323 static ssize_t set_direction_store(struct device *dev,
324 struct device_attribute *attr,
325 const char *buf,
326 size_t count)
327 {
328 struct most_channel *c = to_channel(dev);
329
330 if (!strcmp(buf, "dir_rx\n")) {
331 c->cfg.direction = MOST_CH_RX;
332 } else if (!strcmp(buf, "rx\n")) {
333 c->cfg.direction = MOST_CH_RX;
334 } else if (!strcmp(buf, "dir_tx\n")) {
335 c->cfg.direction = MOST_CH_TX;
336 } else if (!strcmp(buf, "tx\n")) {
337 c->cfg.direction = MOST_CH_TX;
338 } else {
339 pr_info("WARN: invalid attribute settings\n");
340 return -EINVAL;
341 }
342 return count;
343 }
344
set_datatype_show(struct device * dev,struct device_attribute * attr,char * buf)345 static ssize_t set_datatype_show(struct device *dev,
346 struct device_attribute *attr,
347 char *buf)
348 {
349 int i;
350 struct most_channel *c = to_channel(dev);
351
352 for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
353 if (c->cfg.data_type & ch_data_type[i].most_ch_data_type)
354 return snprintf(buf, PAGE_SIZE, ch_data_type[i].name);
355 }
356 return snprintf(buf, PAGE_SIZE, "unconfigured\n");
357 }
358
set_datatype_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)359 static ssize_t set_datatype_store(struct device *dev,
360 struct device_attribute *attr,
361 const char *buf,
362 size_t count)
363 {
364 int i;
365 struct most_channel *c = to_channel(dev);
366
367 for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
368 if (!strcmp(buf, ch_data_type[i].name)) {
369 c->cfg.data_type = ch_data_type[i].most_ch_data_type;
370 break;
371 }
372 }
373
374 if (i == ARRAY_SIZE(ch_data_type)) {
375 pr_info("WARN: invalid attribute settings\n");
376 return -EINVAL;
377 }
378 return count;
379 }
380
set_subbuffer_size_show(struct device * dev,struct device_attribute * attr,char * buf)381 static ssize_t set_subbuffer_size_show(struct device *dev,
382 struct device_attribute *attr,
383 char *buf)
384 {
385 struct most_channel *c = to_channel(dev);
386
387 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size);
388 }
389
set_subbuffer_size_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)390 static ssize_t set_subbuffer_size_store(struct device *dev,
391 struct device_attribute *attr,
392 const char *buf,
393 size_t count)
394 {
395 struct most_channel *c = to_channel(dev);
396 int ret = kstrtou16(buf, 0, &c->cfg.subbuffer_size);
397
398 if (ret)
399 return ret;
400 return count;
401 }
402
set_packets_per_xact_show(struct device * dev,struct device_attribute * attr,char * buf)403 static ssize_t set_packets_per_xact_show(struct device *dev,
404 struct device_attribute *attr,
405 char *buf)
406 {
407 struct most_channel *c = to_channel(dev);
408
409 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact);
410 }
411
set_packets_per_xact_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)412 static ssize_t set_packets_per_xact_store(struct device *dev,
413 struct device_attribute *attr,
414 const char *buf,
415 size_t count)
416 {
417 struct most_channel *c = to_channel(dev);
418 int ret = kstrtou16(buf, 0, &c->cfg.packets_per_xact);
419
420 if (ret)
421 return ret;
422 return count;
423 }
424
set_dbr_size_show(struct device * dev,struct device_attribute * attr,char * buf)425 static ssize_t set_dbr_size_show(struct device *dev,
426 struct device_attribute *attr, char *buf)
427 {
428 struct most_channel *c = to_channel(dev);
429
430 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.dbr_size);
431 }
432
set_dbr_size_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)433 static ssize_t set_dbr_size_store(struct device *dev,
434 struct device_attribute *attr,
435 const char *buf, size_t count)
436 {
437 struct most_channel *c = to_channel(dev);
438 int ret = kstrtou16(buf, 0, &c->cfg.dbr_size);
439
440 if (ret)
441 return ret;
442 return count;
443 }
444
445 #define DEV_ATTR(_name) (&dev_attr_##_name.attr)
446
447 static DEVICE_ATTR_RO(available_directions);
448 static DEVICE_ATTR_RO(available_datatypes);
449 static DEVICE_ATTR_RO(number_of_packet_buffers);
450 static DEVICE_ATTR_RO(number_of_stream_buffers);
451 static DEVICE_ATTR_RO(size_of_stream_buffer);
452 static DEVICE_ATTR_RO(size_of_packet_buffer);
453 static DEVICE_ATTR_RO(channel_starving);
454 static DEVICE_ATTR_RW(set_buffer_size);
455 static DEVICE_ATTR_RW(set_number_of_buffers);
456 static DEVICE_ATTR_RW(set_direction);
457 static DEVICE_ATTR_RW(set_datatype);
458 static DEVICE_ATTR_RW(set_subbuffer_size);
459 static DEVICE_ATTR_RW(set_packets_per_xact);
460 static DEVICE_ATTR_RW(set_dbr_size);
461
462 static struct attribute *channel_attrs[] = {
463 DEV_ATTR(available_directions),
464 DEV_ATTR(available_datatypes),
465 DEV_ATTR(number_of_packet_buffers),
466 DEV_ATTR(number_of_stream_buffers),
467 DEV_ATTR(size_of_stream_buffer),
468 DEV_ATTR(size_of_packet_buffer),
469 DEV_ATTR(channel_starving),
470 DEV_ATTR(set_buffer_size),
471 DEV_ATTR(set_number_of_buffers),
472 DEV_ATTR(set_direction),
473 DEV_ATTR(set_datatype),
474 DEV_ATTR(set_subbuffer_size),
475 DEV_ATTR(set_packets_per_xact),
476 DEV_ATTR(set_dbr_size),
477 NULL,
478 };
479
480 static struct attribute_group channel_attr_group = {
481 .attrs = channel_attrs,
482 };
483
484 static const struct attribute_group *channel_attr_groups[] = {
485 &channel_attr_group,
486 NULL,
487 };
488
description_show(struct device * dev,struct device_attribute * attr,char * buf)489 static ssize_t description_show(struct device *dev,
490 struct device_attribute *attr,
491 char *buf)
492 {
493 struct most_interface *iface = to_most_interface(dev);
494
495 return snprintf(buf, PAGE_SIZE, "%s\n", iface->description);
496 }
497
interface_show(struct device * dev,struct device_attribute * attr,char * buf)498 static ssize_t interface_show(struct device *dev,
499 struct device_attribute *attr,
500 char *buf)
501 {
502 struct most_interface *iface = to_most_interface(dev);
503
504 switch (iface->interface) {
505 case ITYPE_LOOPBACK:
506 return snprintf(buf, PAGE_SIZE, "loopback\n");
507 case ITYPE_I2C:
508 return snprintf(buf, PAGE_SIZE, "i2c\n");
509 case ITYPE_I2S:
510 return snprintf(buf, PAGE_SIZE, "i2s\n");
511 case ITYPE_TSI:
512 return snprintf(buf, PAGE_SIZE, "tsi\n");
513 case ITYPE_HBI:
514 return snprintf(buf, PAGE_SIZE, "hbi\n");
515 case ITYPE_MEDIALB_DIM:
516 return snprintf(buf, PAGE_SIZE, "mlb_dim\n");
517 case ITYPE_MEDIALB_DIM2:
518 return snprintf(buf, PAGE_SIZE, "mlb_dim2\n");
519 case ITYPE_USB:
520 return snprintf(buf, PAGE_SIZE, "usb\n");
521 case ITYPE_PCIE:
522 return snprintf(buf, PAGE_SIZE, "pcie\n");
523 }
524 return snprintf(buf, PAGE_SIZE, "unknown\n");
525 }
526
527 static DEVICE_ATTR_RO(description);
528 static DEVICE_ATTR_RO(interface);
529
530 static struct attribute *interface_attrs[] = {
531 DEV_ATTR(description),
532 DEV_ATTR(interface),
533 NULL,
534 };
535
536 static struct attribute_group interface_attr_group = {
537 .attrs = interface_attrs,
538 };
539
540 static const struct attribute_group *interface_attr_groups[] = {
541 &interface_attr_group,
542 NULL,
543 };
544
match_component(char * name)545 static struct core_component *match_component(char *name)
546 {
547 struct core_component *comp;
548
549 list_for_each_entry(comp, &mc.comp_list, list) {
550 if (!strcmp(comp->name, name))
551 return comp;
552 }
553 return NULL;
554 }
555
556 struct show_links_data {
557 int offs;
558 char *buf;
559 };
560
print_links(struct device * dev,void * data)561 static int print_links(struct device *dev, void *data)
562 {
563 struct show_links_data *d = data;
564 int offs = d->offs;
565 char *buf = d->buf;
566 struct most_channel *c;
567 struct most_interface *iface = to_most_interface(dev);
568
569 list_for_each_entry(c, &iface->p->channel_list, list) {
570 if (c->pipe0.comp) {
571 offs += snprintf(buf + offs,
572 PAGE_SIZE - offs,
573 "%s:%s:%s\n",
574 c->pipe0.comp->name,
575 dev_name(&iface->dev),
576 dev_name(&c->dev));
577 }
578 if (c->pipe1.comp) {
579 offs += snprintf(buf + offs,
580 PAGE_SIZE - offs,
581 "%s:%s:%s\n",
582 c->pipe1.comp->name,
583 dev_name(&iface->dev),
584 dev_name(&c->dev));
585 }
586 }
587 d->offs = offs;
588 return 0;
589 }
590
links_show(struct device_driver * drv,char * buf)591 static ssize_t links_show(struct device_driver *drv, char *buf)
592 {
593 struct show_links_data d = { .buf = buf };
594
595 bus_for_each_dev(&mc.bus, NULL, &d, print_links);
596 return d.offs;
597 }
598
components_show(struct device_driver * drv,char * buf)599 static ssize_t components_show(struct device_driver *drv, char *buf)
600 {
601 struct core_component *comp;
602 int offs = 0;
603
604 list_for_each_entry(comp, &mc.comp_list, list) {
605 offs += snprintf(buf + offs, PAGE_SIZE - offs, "%s\n",
606 comp->name);
607 }
608 return offs;
609 }
610
611 /**
612 * split_string - parses buf and extracts ':' separated substrings.
613 *
614 * @buf: complete string from attribute 'add_channel'
615 * @a: storage for 1st substring (=interface name)
616 * @b: storage for 2nd substring (=channel name)
617 * @c: storage for 3rd substring (=component name)
618 * @d: storage optional 4th substring (=user defined name)
619 *
620 * Examples:
621 *
622 * Input: "mdev0:ch6:cdev:my_channel\n" or
623 * "mdev0:ch6:cdev:my_channel"
624 *
625 * Output: *a -> "mdev0", *b -> "ch6", *c -> "cdev" *d -> "my_channel"
626 *
627 * Input: "mdev1:ep81:cdev\n"
628 * Output: *a -> "mdev1", *b -> "ep81", *c -> "cdev" *d -> ""
629 *
630 * Input: "mdev1:ep81"
631 * Output: *a -> "mdev1", *b -> "ep81", *c -> "cdev" *d == NULL
632 */
split_string(char * buf,char ** a,char ** b,char ** c,char ** d)633 static int split_string(char *buf, char **a, char **b, char **c, char **d)
634 {
635 *a = strsep(&buf, ":");
636 if (!*a)
637 return -EIO;
638
639 *b = strsep(&buf, ":\n");
640 if (!*b)
641 return -EIO;
642
643 *c = strsep(&buf, ":\n");
644 if (!*c)
645 return -EIO;
646
647 if (d)
648 *d = strsep(&buf, ":\n");
649
650 return 0;
651 }
652
match_bus_dev(struct device * dev,void * data)653 static int match_bus_dev(struct device *dev, void *data)
654 {
655 char *mdev_name = data;
656
657 return !strcmp(dev_name(dev), mdev_name);
658 }
659
660 /**
661 * get_channel - get pointer to channel
662 * @mdev: name of the device interface
663 * @mdev_ch: name of channel
664 */
get_channel(char * mdev,char * mdev_ch)665 static struct most_channel *get_channel(char *mdev, char *mdev_ch)
666 {
667 struct device *dev = NULL;
668 struct most_interface *iface;
669 struct most_channel *c, *tmp;
670
671 dev = bus_find_device(&mc.bus, NULL, mdev, match_bus_dev);
672 if (!dev)
673 return NULL;
674 iface = to_most_interface(dev);
675 list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) {
676 if (!strcmp(dev_name(&c->dev), mdev_ch))
677 return c;
678 }
679 return NULL;
680 }
681
682 static
link_channel_to_component(struct most_channel * c,struct core_component * comp,char * comp_param)683 inline int link_channel_to_component(struct most_channel *c,
684 struct core_component *comp,
685 char *comp_param)
686 {
687 int ret;
688 struct core_component **comp_ptr;
689
690 if (!c->pipe0.comp)
691 comp_ptr = &c->pipe0.comp;
692 else if (!c->pipe1.comp)
693 comp_ptr = &c->pipe1.comp;
694 else
695 return -ENOSPC;
696
697 *comp_ptr = comp;
698 ret = comp->probe_channel(c->iface, c->channel_id, &c->cfg, comp_param);
699 if (ret) {
700 *comp_ptr = NULL;
701 return ret;
702 }
703 return 0;
704 }
705
706 /**
707 * add_link_store - store function for add_link attribute
708 * @drv: device driver
709 * @buf: buffer
710 * @len: buffer length
711 *
712 * This parses the string given by buf and splits it into
713 * four substrings. Note: last substring is optional. In case a cdev
714 * component is loaded the optional 4th substring will make up the name of
715 * device node in the /dev directory. If omitted, the device node will
716 * inherit the channel's name within sysfs.
717 *
718 * Searches for (device, channel) pair and probes the component
719 *
720 * Example:
721 * (1) echo "mdev0:ch6:cdev:my_rxchannel" >add_link
722 * (2) echo "mdev1:ep81:cdev" >add_link
723 *
724 * (1) would create the device node /dev/my_rxchannel
725 * (2) would create the device node /dev/mdev1-ep81
726 */
add_link_store(struct device_driver * drv,const char * buf,size_t len)727 static ssize_t add_link_store(struct device_driver *drv,
728 const char *buf,
729 size_t len)
730 {
731 struct most_channel *c;
732 struct core_component *comp;
733 char buffer[STRING_SIZE];
734 char *mdev;
735 char *mdev_ch;
736 char *comp_name;
737 char *comp_param;
738 char devnod_buf[STRING_SIZE];
739 int ret;
740 size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
741
742 strlcpy(buffer, buf, max_len);
743 ret = split_string(buffer, &mdev, &mdev_ch, &comp_name, &comp_param);
744 if (ret)
745 return ret;
746 comp = match_component(comp_name);
747 if (!comp)
748 return -ENODEV;
749 if (!comp_param || *comp_param == 0) {
750 snprintf(devnod_buf, sizeof(devnod_buf), "%s-%s", mdev,
751 mdev_ch);
752 comp_param = devnod_buf;
753 }
754
755 c = get_channel(mdev, mdev_ch);
756 if (!c)
757 return -ENODEV;
758
759 ret = link_channel_to_component(c, comp, comp_param);
760 if (ret)
761 return ret;
762 return len;
763 }
764
765 /**
766 * remove_link_store - store function for remove_link attribute
767 * @drv: device driver
768 * @buf: buffer
769 * @len: buffer length
770 *
771 * Example:
772 * echo "mdev0:ep81" >remove_link
773 */
remove_link_store(struct device_driver * drv,const char * buf,size_t len)774 static ssize_t remove_link_store(struct device_driver *drv,
775 const char *buf,
776 size_t len)
777 {
778 struct most_channel *c;
779 struct core_component *comp;
780 char buffer[STRING_SIZE];
781 char *mdev;
782 char *mdev_ch;
783 char *comp_name;
784 int ret;
785 size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
786
787 strlcpy(buffer, buf, max_len);
788 ret = split_string(buffer, &mdev, &mdev_ch, &comp_name, NULL);
789 if (ret)
790 return ret;
791 comp = match_component(comp_name);
792 if (!comp)
793 return -ENODEV;
794 c = get_channel(mdev, mdev_ch);
795 if (!c)
796 return -ENODEV;
797
798 if (comp->disconnect_channel(c->iface, c->channel_id))
799 return -EIO;
800 if (c->pipe0.comp == comp)
801 c->pipe0.comp = NULL;
802 if (c->pipe1.comp == comp)
803 c->pipe1.comp = NULL;
804 return len;
805 }
806
807 #define DRV_ATTR(_name) (&driver_attr_##_name.attr)
808
809 static DRIVER_ATTR_RO(links);
810 static DRIVER_ATTR_RO(components);
811 static DRIVER_ATTR_WO(add_link);
812 static DRIVER_ATTR_WO(remove_link);
813
814 static struct attribute *mc_attrs[] = {
815 DRV_ATTR(links),
816 DRV_ATTR(components),
817 DRV_ATTR(add_link),
818 DRV_ATTR(remove_link),
819 NULL,
820 };
821
822 static struct attribute_group mc_attr_group = {
823 .attrs = mc_attrs,
824 };
825
826 static const struct attribute_group *mc_attr_groups[] = {
827 &mc_attr_group,
828 NULL,
829 };
830
most_match(struct device * dev,struct device_driver * drv)831 static int most_match(struct device *dev, struct device_driver *drv)
832 {
833 if (!strcmp(dev_name(dev), "most"))
834 return 0;
835 else
836 return 1;
837 }
838
trash_mbo(struct mbo * mbo)839 static inline void trash_mbo(struct mbo *mbo)
840 {
841 unsigned long flags;
842 struct most_channel *c = mbo->context;
843
844 spin_lock_irqsave(&c->fifo_lock, flags);
845 list_add(&mbo->list, &c->trash_fifo);
846 spin_unlock_irqrestore(&c->fifo_lock, flags);
847 }
848
hdm_mbo_ready(struct most_channel * c)849 static bool hdm_mbo_ready(struct most_channel *c)
850 {
851 bool empty;
852
853 if (c->enqueue_halt)
854 return false;
855
856 spin_lock_irq(&c->fifo_lock);
857 empty = list_empty(&c->halt_fifo);
858 spin_unlock_irq(&c->fifo_lock);
859
860 return !empty;
861 }
862
nq_hdm_mbo(struct mbo * mbo)863 static void nq_hdm_mbo(struct mbo *mbo)
864 {
865 unsigned long flags;
866 struct most_channel *c = mbo->context;
867
868 spin_lock_irqsave(&c->fifo_lock, flags);
869 list_add_tail(&mbo->list, &c->halt_fifo);
870 spin_unlock_irqrestore(&c->fifo_lock, flags);
871 wake_up_interruptible(&c->hdm_fifo_wq);
872 }
873
hdm_enqueue_thread(void * data)874 static int hdm_enqueue_thread(void *data)
875 {
876 struct most_channel *c = data;
877 struct mbo *mbo;
878 int ret;
879 typeof(c->iface->enqueue) enqueue = c->iface->enqueue;
880
881 while (likely(!kthread_should_stop())) {
882 wait_event_interruptible(c->hdm_fifo_wq,
883 hdm_mbo_ready(c) ||
884 kthread_should_stop());
885
886 mutex_lock(&c->nq_mutex);
887 spin_lock_irq(&c->fifo_lock);
888 if (unlikely(c->enqueue_halt || list_empty(&c->halt_fifo))) {
889 spin_unlock_irq(&c->fifo_lock);
890 mutex_unlock(&c->nq_mutex);
891 continue;
892 }
893
894 mbo = list_pop_mbo(&c->halt_fifo);
895 spin_unlock_irq(&c->fifo_lock);
896
897 if (c->cfg.direction == MOST_CH_RX)
898 mbo->buffer_length = c->cfg.buffer_size;
899
900 ret = enqueue(mbo->ifp, mbo->hdm_channel_id, mbo);
901 mutex_unlock(&c->nq_mutex);
902
903 if (unlikely(ret)) {
904 pr_err("hdm enqueue failed\n");
905 nq_hdm_mbo(mbo);
906 c->hdm_enqueue_task = NULL;
907 return 0;
908 }
909 }
910
911 return 0;
912 }
913
run_enqueue_thread(struct most_channel * c,int channel_id)914 static int run_enqueue_thread(struct most_channel *c, int channel_id)
915 {
916 struct task_struct *task =
917 kthread_run(hdm_enqueue_thread, c, "hdm_fifo_%d",
918 channel_id);
919
920 if (IS_ERR(task))
921 return PTR_ERR(task);
922
923 c->hdm_enqueue_task = task;
924 return 0;
925 }
926
927 /**
928 * arm_mbo - recycle MBO for further usage
929 * @mbo: most buffer
930 *
931 * This puts an MBO back to the list to have it ready for up coming
932 * tx transactions.
933 *
934 * In case the MBO belongs to a channel that recently has been
935 * poisoned, the MBO is scheduled to be trashed.
936 * Calls the completion handler of an attached component.
937 */
arm_mbo(struct mbo * mbo)938 static void arm_mbo(struct mbo *mbo)
939 {
940 unsigned long flags;
941 struct most_channel *c;
942
943 c = mbo->context;
944
945 if (c->is_poisoned) {
946 trash_mbo(mbo);
947 return;
948 }
949
950 spin_lock_irqsave(&c->fifo_lock, flags);
951 ++*mbo->num_buffers_ptr;
952 list_add_tail(&mbo->list, &c->fifo);
953 spin_unlock_irqrestore(&c->fifo_lock, flags);
954
955 if (c->pipe0.refs && c->pipe0.comp->tx_completion)
956 c->pipe0.comp->tx_completion(c->iface, c->channel_id);
957
958 if (c->pipe1.refs && c->pipe1.comp->tx_completion)
959 c->pipe1.comp->tx_completion(c->iface, c->channel_id);
960 }
961
962 /**
963 * arm_mbo_chain - helper function that arms an MBO chain for the HDM
964 * @c: pointer to interface channel
965 * @dir: direction of the channel
966 * @compl: pointer to completion function
967 *
968 * This allocates buffer objects including the containing DMA coherent
969 * buffer and puts them in the fifo.
970 * Buffers of Rx channels are put in the kthread fifo, hence immediately
971 * submitted to the HDM.
972 *
973 * Returns the number of allocated and enqueued MBOs.
974 */
arm_mbo_chain(struct most_channel * c,int dir,void (* compl)(struct mbo *))975 static int arm_mbo_chain(struct most_channel *c, int dir,
976 void (*compl)(struct mbo *))
977 {
978 unsigned int i;
979 struct mbo *mbo;
980 unsigned long flags;
981 u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
982
983 atomic_set(&c->mbo_nq_level, 0);
984
985 for (i = 0; i < c->cfg.num_buffers; i++) {
986 mbo = kzalloc(sizeof(*mbo), GFP_KERNEL);
987 if (!mbo)
988 goto flush_fifos;
989
990 mbo->context = c;
991 mbo->ifp = c->iface;
992 mbo->hdm_channel_id = c->channel_id;
993 if (c->iface->dma_alloc) {
994 mbo->virt_address =
995 c->iface->dma_alloc(mbo, coherent_buf_size);
996 } else {
997 mbo->virt_address =
998 kzalloc(coherent_buf_size, GFP_KERNEL);
999 }
1000 if (!mbo->virt_address)
1001 goto release_mbo;
1002
1003 mbo->complete = compl;
1004 mbo->num_buffers_ptr = &dummy_num_buffers;
1005 if (dir == MOST_CH_RX) {
1006 nq_hdm_mbo(mbo);
1007 atomic_inc(&c->mbo_nq_level);
1008 } else {
1009 spin_lock_irqsave(&c->fifo_lock, flags);
1010 list_add_tail(&mbo->list, &c->fifo);
1011 spin_unlock_irqrestore(&c->fifo_lock, flags);
1012 }
1013 }
1014 return c->cfg.num_buffers;
1015
1016 release_mbo:
1017 kfree(mbo);
1018
1019 flush_fifos:
1020 flush_channel_fifos(c);
1021 return 0;
1022 }
1023
1024 /**
1025 * most_submit_mbo - submits an MBO to fifo
1026 * @mbo: most buffer
1027 */
most_submit_mbo(struct mbo * mbo)1028 void most_submit_mbo(struct mbo *mbo)
1029 {
1030 if (WARN_ONCE(!mbo || !mbo->context,
1031 "bad mbo or missing channel reference\n"))
1032 return;
1033
1034 nq_hdm_mbo(mbo);
1035 }
1036 EXPORT_SYMBOL_GPL(most_submit_mbo);
1037
1038 /**
1039 * most_write_completion - write completion handler
1040 * @mbo: most buffer
1041 *
1042 * This recycles the MBO for further usage. In case the channel has been
1043 * poisoned, the MBO is scheduled to be trashed.
1044 */
most_write_completion(struct mbo * mbo)1045 static void most_write_completion(struct mbo *mbo)
1046 {
1047 struct most_channel *c;
1048
1049 c = mbo->context;
1050 if (mbo->status == MBO_E_INVAL)
1051 pr_info("WARN: Tx MBO status: invalid\n");
1052 if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE)))
1053 trash_mbo(mbo);
1054 else
1055 arm_mbo(mbo);
1056 }
1057
channel_has_mbo(struct most_interface * iface,int id,struct core_component * comp)1058 int channel_has_mbo(struct most_interface *iface, int id,
1059 struct core_component *comp)
1060 {
1061 struct most_channel *c = iface->p->channel[id];
1062 unsigned long flags;
1063 int empty;
1064
1065 if (unlikely(!c))
1066 return -EINVAL;
1067
1068 if (c->pipe0.refs && c->pipe1.refs &&
1069 ((comp == c->pipe0.comp && c->pipe0.num_buffers <= 0) ||
1070 (comp == c->pipe1.comp && c->pipe1.num_buffers <= 0)))
1071 return 0;
1072
1073 spin_lock_irqsave(&c->fifo_lock, flags);
1074 empty = list_empty(&c->fifo);
1075 spin_unlock_irqrestore(&c->fifo_lock, flags);
1076 return !empty;
1077 }
1078 EXPORT_SYMBOL_GPL(channel_has_mbo);
1079
1080 /**
1081 * most_get_mbo - get pointer to an MBO of pool
1082 * @iface: pointer to interface instance
1083 * @id: channel ID
1084 * @comp: driver component
1085 *
1086 * This attempts to get a free buffer out of the channel fifo.
1087 * Returns a pointer to MBO on success or NULL otherwise.
1088 */
most_get_mbo(struct most_interface * iface,int id,struct core_component * comp)1089 struct mbo *most_get_mbo(struct most_interface *iface, int id,
1090 struct core_component *comp)
1091 {
1092 struct mbo *mbo;
1093 struct most_channel *c;
1094 unsigned long flags;
1095 int *num_buffers_ptr;
1096
1097 c = iface->p->channel[id];
1098 if (unlikely(!c))
1099 return NULL;
1100
1101 if (c->pipe0.refs && c->pipe1.refs &&
1102 ((comp == c->pipe0.comp && c->pipe0.num_buffers <= 0) ||
1103 (comp == c->pipe1.comp && c->pipe1.num_buffers <= 0)))
1104 return NULL;
1105
1106 if (comp == c->pipe0.comp)
1107 num_buffers_ptr = &c->pipe0.num_buffers;
1108 else if (comp == c->pipe1.comp)
1109 num_buffers_ptr = &c->pipe1.num_buffers;
1110 else
1111 num_buffers_ptr = &dummy_num_buffers;
1112
1113 spin_lock_irqsave(&c->fifo_lock, flags);
1114 if (list_empty(&c->fifo)) {
1115 spin_unlock_irqrestore(&c->fifo_lock, flags);
1116 return NULL;
1117 }
1118 mbo = list_pop_mbo(&c->fifo);
1119 --*num_buffers_ptr;
1120 spin_unlock_irqrestore(&c->fifo_lock, flags);
1121
1122 mbo->num_buffers_ptr = num_buffers_ptr;
1123 mbo->buffer_length = c->cfg.buffer_size;
1124 return mbo;
1125 }
1126 EXPORT_SYMBOL_GPL(most_get_mbo);
1127
1128 /**
1129 * most_put_mbo - return buffer to pool
1130 * @mbo: most buffer
1131 */
most_put_mbo(struct mbo * mbo)1132 void most_put_mbo(struct mbo *mbo)
1133 {
1134 struct most_channel *c = mbo->context;
1135
1136 if (c->cfg.direction == MOST_CH_TX) {
1137 arm_mbo(mbo);
1138 return;
1139 }
1140 nq_hdm_mbo(mbo);
1141 atomic_inc(&c->mbo_nq_level);
1142 }
1143 EXPORT_SYMBOL_GPL(most_put_mbo);
1144
1145 /**
1146 * most_read_completion - read completion handler
1147 * @mbo: most buffer
1148 *
1149 * This function is called by the HDM when data has been received from the
1150 * hardware and copied to the buffer of the MBO.
1151 *
1152 * In case the channel has been poisoned it puts the buffer in the trash queue.
1153 * Otherwise, it passes the buffer to an component for further processing.
1154 */
most_read_completion(struct mbo * mbo)1155 static void most_read_completion(struct mbo *mbo)
1156 {
1157 struct most_channel *c = mbo->context;
1158
1159 if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) {
1160 trash_mbo(mbo);
1161 return;
1162 }
1163
1164 if (mbo->status == MBO_E_INVAL) {
1165 nq_hdm_mbo(mbo);
1166 atomic_inc(&c->mbo_nq_level);
1167 return;
1168 }
1169
1170 if (atomic_sub_and_test(1, &c->mbo_nq_level))
1171 c->is_starving = 1;
1172
1173 if (c->pipe0.refs && c->pipe0.comp->rx_completion &&
1174 c->pipe0.comp->rx_completion(mbo) == 0)
1175 return;
1176
1177 if (c->pipe1.refs && c->pipe1.comp->rx_completion &&
1178 c->pipe1.comp->rx_completion(mbo) == 0)
1179 return;
1180
1181 most_put_mbo(mbo);
1182 }
1183
1184 /**
1185 * most_start_channel - prepares a channel for communication
1186 * @iface: pointer to interface instance
1187 * @id: channel ID
1188 * @comp: driver component
1189 *
1190 * This prepares the channel for usage. Cross-checks whether the
1191 * channel's been properly configured.
1192 *
1193 * Returns 0 on success or error code otherwise.
1194 */
most_start_channel(struct most_interface * iface,int id,struct core_component * comp)1195 int most_start_channel(struct most_interface *iface, int id,
1196 struct core_component *comp)
1197 {
1198 int num_buffer;
1199 int ret;
1200 struct most_channel *c = iface->p->channel[id];
1201
1202 if (unlikely(!c))
1203 return -EINVAL;
1204
1205 mutex_lock(&c->start_mutex);
1206 if (c->pipe0.refs + c->pipe1.refs > 0)
1207 goto out; /* already started by another component */
1208
1209 if (!try_module_get(iface->mod)) {
1210 pr_info("failed to acquire HDM lock\n");
1211 mutex_unlock(&c->start_mutex);
1212 return -ENOLCK;
1213 }
1214
1215 c->cfg.extra_len = 0;
1216 if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
1217 pr_info("channel configuration failed. Go check settings...\n");
1218 ret = -EINVAL;
1219 goto error;
1220 }
1221
1222 init_waitqueue_head(&c->hdm_fifo_wq);
1223
1224 if (c->cfg.direction == MOST_CH_RX)
1225 num_buffer = arm_mbo_chain(c, c->cfg.direction,
1226 most_read_completion);
1227 else
1228 num_buffer = arm_mbo_chain(c, c->cfg.direction,
1229 most_write_completion);
1230 if (unlikely(!num_buffer)) {
1231 ret = -ENOMEM;
1232 goto error;
1233 }
1234
1235 ret = run_enqueue_thread(c, id);
1236 if (ret)
1237 goto error;
1238
1239 c->is_starving = 0;
1240 c->pipe0.num_buffers = c->cfg.num_buffers / 2;
1241 c->pipe1.num_buffers = c->cfg.num_buffers - c->pipe0.num_buffers;
1242 atomic_set(&c->mbo_ref, num_buffer);
1243
1244 out:
1245 if (comp == c->pipe0.comp)
1246 c->pipe0.refs++;
1247 if (comp == c->pipe1.comp)
1248 c->pipe1.refs++;
1249 mutex_unlock(&c->start_mutex);
1250 return 0;
1251
1252 error:
1253 module_put(iface->mod);
1254 mutex_unlock(&c->start_mutex);
1255 return ret;
1256 }
1257 EXPORT_SYMBOL_GPL(most_start_channel);
1258
1259 /**
1260 * most_stop_channel - stops a running channel
1261 * @iface: pointer to interface instance
1262 * @id: channel ID
1263 * @comp: driver component
1264 */
most_stop_channel(struct most_interface * iface,int id,struct core_component * comp)1265 int most_stop_channel(struct most_interface *iface, int id,
1266 struct core_component *comp)
1267 {
1268 struct most_channel *c;
1269
1270 if (unlikely((!iface) || (id >= iface->num_channels) || (id < 0))) {
1271 pr_err("Bad interface or index out of range\n");
1272 return -EINVAL;
1273 }
1274 c = iface->p->channel[id];
1275 if (unlikely(!c))
1276 return -EINVAL;
1277
1278 mutex_lock(&c->start_mutex);
1279 if (c->pipe0.refs + c->pipe1.refs >= 2)
1280 goto out;
1281
1282 if (c->hdm_enqueue_task)
1283 kthread_stop(c->hdm_enqueue_task);
1284 c->hdm_enqueue_task = NULL;
1285
1286 if (iface->mod)
1287 module_put(iface->mod);
1288
1289 c->is_poisoned = true;
1290 if (c->iface->poison_channel(c->iface, c->channel_id)) {
1291 pr_err("Cannot stop channel %d of mdev %s\n", c->channel_id,
1292 c->iface->description);
1293 mutex_unlock(&c->start_mutex);
1294 return -EAGAIN;
1295 }
1296 flush_trash_fifo(c);
1297 flush_channel_fifos(c);
1298
1299 #ifdef CMPL_INTERRUPTIBLE
1300 if (wait_for_completion_interruptible(&c->cleanup)) {
1301 pr_info("Interrupted while clean up ch %d\n", c->channel_id);
1302 mutex_unlock(&c->start_mutex);
1303 return -EINTR;
1304 }
1305 #else
1306 wait_for_completion(&c->cleanup);
1307 #endif
1308 c->is_poisoned = false;
1309
1310 out:
1311 if (comp == c->pipe0.comp)
1312 c->pipe0.refs--;
1313 if (comp == c->pipe1.comp)
1314 c->pipe1.refs--;
1315 mutex_unlock(&c->start_mutex);
1316 return 0;
1317 }
1318 EXPORT_SYMBOL_GPL(most_stop_channel);
1319
1320 /**
1321 * most_register_component - registers a driver component with the core
1322 * @comp: driver component
1323 */
most_register_component(struct core_component * comp)1324 int most_register_component(struct core_component *comp)
1325 {
1326 if (!comp) {
1327 pr_err("Bad component\n");
1328 return -EINVAL;
1329 }
1330 list_add_tail(&comp->list, &mc.comp_list);
1331 pr_info("registered new core component %s\n", comp->name);
1332 return 0;
1333 }
1334 EXPORT_SYMBOL_GPL(most_register_component);
1335
disconnect_channels(struct device * dev,void * data)1336 static int disconnect_channels(struct device *dev, void *data)
1337 {
1338 struct most_interface *iface;
1339 struct most_channel *c, *tmp;
1340 struct core_component *comp = data;
1341
1342 iface = to_most_interface(dev);
1343 list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) {
1344 if (c->pipe0.comp == comp || c->pipe1.comp == comp)
1345 comp->disconnect_channel(c->iface, c->channel_id);
1346 if (c->pipe0.comp == comp)
1347 c->pipe0.comp = NULL;
1348 if (c->pipe1.comp == comp)
1349 c->pipe1.comp = NULL;
1350 }
1351 return 0;
1352 }
1353
1354 /**
1355 * most_deregister_component - deregisters a driver component with the core
1356 * @comp: driver component
1357 */
most_deregister_component(struct core_component * comp)1358 int most_deregister_component(struct core_component *comp)
1359 {
1360 if (!comp) {
1361 pr_err("Bad component\n");
1362 return -EINVAL;
1363 }
1364
1365 bus_for_each_dev(&mc.bus, NULL, comp, disconnect_channels);
1366 list_del(&comp->list);
1367 pr_info("deregistering component %s\n", comp->name);
1368 return 0;
1369 }
1370 EXPORT_SYMBOL_GPL(most_deregister_component);
1371
release_interface(struct device * dev)1372 static void release_interface(struct device *dev)
1373 {
1374 pr_info("releasing interface dev %s...\n", dev_name(dev));
1375 }
1376
release_channel(struct device * dev)1377 static void release_channel(struct device *dev)
1378 {
1379 pr_info("releasing channel dev %s...\n", dev_name(dev));
1380 }
1381
1382 /**
1383 * most_register_interface - registers an interface with core
1384 * @iface: device interface
1385 *
1386 * Allocates and initializes a new interface instance and all of its channels.
1387 * Returns a pointer to kobject or an error pointer.
1388 */
most_register_interface(struct most_interface * iface)1389 int most_register_interface(struct most_interface *iface)
1390 {
1391 unsigned int i;
1392 int id;
1393 struct most_channel *c;
1394
1395 if (!iface || !iface->enqueue || !iface->configure ||
1396 !iface->poison_channel || (iface->num_channels > MAX_CHANNELS)) {
1397 pr_err("Bad interface or channel overflow\n");
1398 return -EINVAL;
1399 }
1400
1401 id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
1402 if (id < 0) {
1403 pr_info("Failed to alloc mdev ID\n");
1404 return id;
1405 }
1406
1407 iface->p = kzalloc(sizeof(*iface->p), GFP_KERNEL);
1408 if (!iface->p) {
1409 ida_simple_remove(&mdev_id, id);
1410 return -ENOMEM;
1411 }
1412
1413 INIT_LIST_HEAD(&iface->p->channel_list);
1414 iface->p->dev_id = id;
1415 snprintf(iface->p->name, STRING_SIZE, "mdev%d", id);
1416 iface->dev.init_name = iface->p->name;
1417 iface->dev.bus = &mc.bus;
1418 iface->dev.parent = &mc.dev;
1419 iface->dev.groups = interface_attr_groups;
1420 iface->dev.release = release_interface;
1421 if (device_register(&iface->dev)) {
1422 pr_err("registering iface->dev failed\n");
1423 kfree(iface->p);
1424 ida_simple_remove(&mdev_id, id);
1425 return -ENOMEM;
1426 }
1427
1428 for (i = 0; i < iface->num_channels; i++) {
1429 const char *name_suffix = iface->channel_vector[i].name_suffix;
1430
1431 c = kzalloc(sizeof(*c), GFP_KERNEL);
1432 if (!c)
1433 goto free_instance;
1434 if (!name_suffix)
1435 snprintf(c->name, STRING_SIZE, "ch%d", i);
1436 else
1437 snprintf(c->name, STRING_SIZE, "%s", name_suffix);
1438 c->dev.init_name = c->name;
1439 c->dev.parent = &iface->dev;
1440 c->dev.groups = channel_attr_groups;
1441 c->dev.release = release_channel;
1442 if (device_register(&c->dev)) {
1443 pr_err("registering c->dev failed\n");
1444 goto free_instance_nodev;
1445 }
1446 iface->p->channel[i] = c;
1447 c->is_starving = 0;
1448 c->iface = iface;
1449 c->channel_id = i;
1450 c->keep_mbo = false;
1451 c->enqueue_halt = false;
1452 c->is_poisoned = false;
1453 c->cfg.direction = 0;
1454 c->cfg.data_type = 0;
1455 c->cfg.num_buffers = 0;
1456 c->cfg.buffer_size = 0;
1457 c->cfg.subbuffer_size = 0;
1458 c->cfg.packets_per_xact = 0;
1459 spin_lock_init(&c->fifo_lock);
1460 INIT_LIST_HEAD(&c->fifo);
1461 INIT_LIST_HEAD(&c->trash_fifo);
1462 INIT_LIST_HEAD(&c->halt_fifo);
1463 init_completion(&c->cleanup);
1464 atomic_set(&c->mbo_ref, 0);
1465 mutex_init(&c->start_mutex);
1466 mutex_init(&c->nq_mutex);
1467 list_add_tail(&c->list, &iface->p->channel_list);
1468 }
1469 pr_info("registered new device mdev%d (%s)\n",
1470 id, iface->description);
1471 return 0;
1472
1473 free_instance_nodev:
1474 kfree(c);
1475
1476 free_instance:
1477 while (i > 0) {
1478 c = iface->p->channel[--i];
1479 device_unregister(&c->dev);
1480 kfree(c);
1481 }
1482 kfree(iface->p);
1483 device_unregister(&iface->dev);
1484 ida_simple_remove(&mdev_id, id);
1485 return -ENOMEM;
1486 }
1487 EXPORT_SYMBOL_GPL(most_register_interface);
1488
1489 /**
1490 * most_deregister_interface - deregisters an interface with core
1491 * @iface: device interface
1492 *
1493 * Before removing an interface instance from the list, all running
1494 * channels are stopped and poisoned.
1495 */
most_deregister_interface(struct most_interface * iface)1496 void most_deregister_interface(struct most_interface *iface)
1497 {
1498 int i;
1499 struct most_channel *c;
1500
1501 pr_info("deregistering device %s (%s)\n", dev_name(&iface->dev),
1502 iface->description);
1503 for (i = 0; i < iface->num_channels; i++) {
1504 c = iface->p->channel[i];
1505 if (c->pipe0.comp)
1506 c->pipe0.comp->disconnect_channel(c->iface,
1507 c->channel_id);
1508 if (c->pipe1.comp)
1509 c->pipe1.comp->disconnect_channel(c->iface,
1510 c->channel_id);
1511 c->pipe0.comp = NULL;
1512 c->pipe1.comp = NULL;
1513 list_del(&c->list);
1514 device_unregister(&c->dev);
1515 kfree(c);
1516 }
1517
1518 ida_simple_remove(&mdev_id, iface->p->dev_id);
1519 kfree(iface->p);
1520 device_unregister(&iface->dev);
1521 }
1522 EXPORT_SYMBOL_GPL(most_deregister_interface);
1523
1524 /**
1525 * most_stop_enqueue - prevents core from enqueueing MBOs
1526 * @iface: pointer to interface
1527 * @id: channel id
1528 *
1529 * This is called by an HDM that _cannot_ attend to its duties and
1530 * is imminent to get run over by the core. The core is not going to
1531 * enqueue any further packets unless the flagging HDM calls
1532 * most_resume enqueue().
1533 */
most_stop_enqueue(struct most_interface * iface,int id)1534 void most_stop_enqueue(struct most_interface *iface, int id)
1535 {
1536 struct most_channel *c = iface->p->channel[id];
1537
1538 if (!c)
1539 return;
1540
1541 mutex_lock(&c->nq_mutex);
1542 c->enqueue_halt = true;
1543 mutex_unlock(&c->nq_mutex);
1544 }
1545 EXPORT_SYMBOL_GPL(most_stop_enqueue);
1546
1547 /**
1548 * most_resume_enqueue - allow core to enqueue MBOs again
1549 * @iface: pointer to interface
1550 * @id: channel id
1551 *
1552 * This clears the enqueue halt flag and enqueues all MBOs currently
1553 * sitting in the wait fifo.
1554 */
most_resume_enqueue(struct most_interface * iface,int id)1555 void most_resume_enqueue(struct most_interface *iface, int id)
1556 {
1557 struct most_channel *c = iface->p->channel[id];
1558
1559 if (!c)
1560 return;
1561
1562 mutex_lock(&c->nq_mutex);
1563 c->enqueue_halt = false;
1564 mutex_unlock(&c->nq_mutex);
1565
1566 wake_up_interruptible(&c->hdm_fifo_wq);
1567 }
1568 EXPORT_SYMBOL_GPL(most_resume_enqueue);
1569
release_most_sub(struct device * dev)1570 static void release_most_sub(struct device *dev)
1571 {
1572 pr_info("releasing most_subsystem\n");
1573 }
1574
most_init(void)1575 static int __init most_init(void)
1576 {
1577 int err;
1578
1579 pr_info("init()\n");
1580 INIT_LIST_HEAD(&mc.comp_list);
1581 ida_init(&mdev_id);
1582
1583 mc.bus.name = "most",
1584 mc.bus.match = most_match,
1585 mc.drv.name = "most_core",
1586 mc.drv.bus = &mc.bus,
1587 mc.drv.groups = mc_attr_groups;
1588
1589 err = bus_register(&mc.bus);
1590 if (err) {
1591 pr_info("Cannot register most bus\n");
1592 return err;
1593 }
1594 err = driver_register(&mc.drv);
1595 if (err) {
1596 pr_info("Cannot register core driver\n");
1597 goto exit_bus;
1598 }
1599 mc.dev.init_name = "most_bus";
1600 mc.dev.release = release_most_sub;
1601 if (device_register(&mc.dev)) {
1602 err = -ENOMEM;
1603 goto exit_driver;
1604 }
1605
1606 return 0;
1607
1608 exit_driver:
1609 driver_unregister(&mc.drv);
1610 exit_bus:
1611 bus_unregister(&mc.bus);
1612 return err;
1613 }
1614
most_exit(void)1615 static void __exit most_exit(void)
1616 {
1617 pr_info("exit core module\n");
1618 device_unregister(&mc.dev);
1619 driver_unregister(&mc.drv);
1620 bus_unregister(&mc.bus);
1621 ida_destroy(&mdev_id);
1622 }
1623
1624 module_init(most_init);
1625 module_exit(most_exit);
1626 MODULE_LICENSE("GPL");
1627 MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
1628 MODULE_DESCRIPTION("Core module of stacked MOST Linux driver");
1629