1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * core.c - Implementation of core module of MOST Linux driver stack
4  *
5  * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/module.h>
10 #include <linux/fs.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/device.h>
14 #include <linux/list.h>
15 #include <linux/poll.h>
16 #include <linux/wait.h>
17 #include <linux/kobject.h>
18 #include <linux/mutex.h>
19 #include <linux/completion.h>
20 #include <linux/sysfs.h>
21 #include <linux/kthread.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/idr.h>
24 #include <most/core.h>
25 
26 #define MAX_CHANNELS	64
27 #define STRING_SIZE	80
28 
29 static struct ida mdev_id;
30 static int dummy_num_buffers;
31 
32 static struct mostcore {
33 	struct device dev;
34 	struct device_driver drv;
35 	struct bus_type bus;
36 	struct list_head comp_list;
37 } mc;
38 
39 #define to_driver(d) container_of(d, struct mostcore, drv)
40 
41 struct pipe {
42 	struct core_component *comp;
43 	int refs;
44 	int num_buffers;
45 };
46 
47 struct most_channel {
48 	struct device dev;
49 	struct completion cleanup;
50 	atomic_t mbo_ref;
51 	atomic_t mbo_nq_level;
52 	u16 channel_id;
53 	char name[STRING_SIZE];
54 	bool is_poisoned;
55 	struct mutex start_mutex;
56 	struct mutex nq_mutex; /* nq thread synchronization */
57 	int is_starving;
58 	struct most_interface *iface;
59 	struct most_channel_config cfg;
60 	bool keep_mbo;
61 	bool enqueue_halt;
62 	struct list_head fifo;
63 	spinlock_t fifo_lock;
64 	struct list_head halt_fifo;
65 	struct list_head list;
66 	struct pipe pipe0;
67 	struct pipe pipe1;
68 	struct list_head trash_fifo;
69 	struct task_struct *hdm_enqueue_task;
70 	wait_queue_head_t hdm_fifo_wq;
71 
72 };
73 
74 #define to_channel(d) container_of(d, struct most_channel, dev)
75 
76 struct interface_private {
77 	int dev_id;
78 	char name[STRING_SIZE];
79 	struct most_channel *channel[MAX_CHANNELS];
80 	struct list_head channel_list;
81 };
82 
83 static const struct {
84 	int most_ch_data_type;
85 	const char *name;
86 } ch_data_type[] = {
87 	{ MOST_CH_CONTROL, "control\n" },
88 	{ MOST_CH_ASYNC, "async\n" },
89 	{ MOST_CH_SYNC, "sync\n" },
90 	{ MOST_CH_ISOC, "isoc\n"},
91 	{ MOST_CH_ISOC, "isoc_avp\n"},
92 };
93 
94 /**
95  * list_pop_mbo - retrieves the first MBO of the list and removes it
96  * @ptr: the list head to grab the MBO from.
97  */
98 #define list_pop_mbo(ptr)						\
99 ({									\
100 	struct mbo *_mbo = list_first_entry(ptr, struct mbo, list);	\
101 	list_del(&_mbo->list);						\
102 	_mbo;								\
103 })
104 
105 /**
106  * most_free_mbo_coherent - free an MBO and its coherent buffer
107  * @mbo: most buffer
108  */
most_free_mbo_coherent(struct mbo * mbo)109 static void most_free_mbo_coherent(struct mbo *mbo)
110 {
111 	struct most_channel *c = mbo->context;
112 	u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
113 
114 	if (c->iface->dma_free)
115 		c->iface->dma_free(mbo, coherent_buf_size);
116 	else
117 		kfree(mbo->virt_address);
118 	kfree(mbo);
119 	if (atomic_sub_and_test(1, &c->mbo_ref))
120 		complete(&c->cleanup);
121 }
122 
123 /**
124  * flush_channel_fifos - clear the channel fifos
125  * @c: pointer to channel object
126  */
flush_channel_fifos(struct most_channel * c)127 static void flush_channel_fifos(struct most_channel *c)
128 {
129 	unsigned long flags, hf_flags;
130 	struct mbo *mbo, *tmp;
131 
132 	if (list_empty(&c->fifo) && list_empty(&c->halt_fifo))
133 		return;
134 
135 	spin_lock_irqsave(&c->fifo_lock, flags);
136 	list_for_each_entry_safe(mbo, tmp, &c->fifo, list) {
137 		list_del(&mbo->list);
138 		spin_unlock_irqrestore(&c->fifo_lock, flags);
139 		most_free_mbo_coherent(mbo);
140 		spin_lock_irqsave(&c->fifo_lock, flags);
141 	}
142 	spin_unlock_irqrestore(&c->fifo_lock, flags);
143 
144 	spin_lock_irqsave(&c->fifo_lock, hf_flags);
145 	list_for_each_entry_safe(mbo, tmp, &c->halt_fifo, list) {
146 		list_del(&mbo->list);
147 		spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
148 		most_free_mbo_coherent(mbo);
149 		spin_lock_irqsave(&c->fifo_lock, hf_flags);
150 	}
151 	spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
152 
153 	if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo))))
154 		pr_info("WARN: fifo | trash fifo not empty\n");
155 }
156 
157 /**
158  * flush_trash_fifo - clear the trash fifo
159  * @c: pointer to channel object
160  */
flush_trash_fifo(struct most_channel * c)161 static int flush_trash_fifo(struct most_channel *c)
162 {
163 	struct mbo *mbo, *tmp;
164 	unsigned long flags;
165 
166 	spin_lock_irqsave(&c->fifo_lock, flags);
167 	list_for_each_entry_safe(mbo, tmp, &c->trash_fifo, list) {
168 		list_del(&mbo->list);
169 		spin_unlock_irqrestore(&c->fifo_lock, flags);
170 		most_free_mbo_coherent(mbo);
171 		spin_lock_irqsave(&c->fifo_lock, flags);
172 	}
173 	spin_unlock_irqrestore(&c->fifo_lock, flags);
174 	return 0;
175 }
176 
available_directions_show(struct device * dev,struct device_attribute * attr,char * buf)177 static ssize_t available_directions_show(struct device *dev,
178 					 struct device_attribute *attr,
179 					 char *buf)
180 {
181 	struct most_channel *c = to_channel(dev);
182 	unsigned int i = c->channel_id;
183 
184 	strcpy(buf, "");
185 	if (c->iface->channel_vector[i].direction & MOST_CH_RX)
186 		strcat(buf, "rx ");
187 	if (c->iface->channel_vector[i].direction & MOST_CH_TX)
188 		strcat(buf, "tx ");
189 	strcat(buf, "\n");
190 	return strlen(buf);
191 }
192 
available_datatypes_show(struct device * dev,struct device_attribute * attr,char * buf)193 static ssize_t available_datatypes_show(struct device *dev,
194 					struct device_attribute *attr,
195 					char *buf)
196 {
197 	struct most_channel *c = to_channel(dev);
198 	unsigned int i = c->channel_id;
199 
200 	strcpy(buf, "");
201 	if (c->iface->channel_vector[i].data_type & MOST_CH_CONTROL)
202 		strcat(buf, "control ");
203 	if (c->iface->channel_vector[i].data_type & MOST_CH_ASYNC)
204 		strcat(buf, "async ");
205 	if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC)
206 		strcat(buf, "sync ");
207 	if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC)
208 		strcat(buf, "isoc ");
209 	strcat(buf, "\n");
210 	return strlen(buf);
211 }
212 
number_of_packet_buffers_show(struct device * dev,struct device_attribute * attr,char * buf)213 static ssize_t number_of_packet_buffers_show(struct device *dev,
214 					     struct device_attribute *attr,
215 					     char *buf)
216 {
217 	struct most_channel *c = to_channel(dev);
218 	unsigned int i = c->channel_id;
219 
220 	return snprintf(buf, PAGE_SIZE, "%d\n",
221 			c->iface->channel_vector[i].num_buffers_packet);
222 }
223 
number_of_stream_buffers_show(struct device * dev,struct device_attribute * attr,char * buf)224 static ssize_t number_of_stream_buffers_show(struct device *dev,
225 					     struct device_attribute *attr,
226 					     char *buf)
227 {
228 	struct most_channel *c = to_channel(dev);
229 	unsigned int i = c->channel_id;
230 
231 	return snprintf(buf, PAGE_SIZE, "%d\n",
232 			c->iface->channel_vector[i].num_buffers_streaming);
233 }
234 
size_of_packet_buffer_show(struct device * dev,struct device_attribute * attr,char * buf)235 static ssize_t size_of_packet_buffer_show(struct device *dev,
236 					  struct device_attribute *attr,
237 					  char *buf)
238 {
239 	struct most_channel *c = to_channel(dev);
240 	unsigned int i = c->channel_id;
241 
242 	return snprintf(buf, PAGE_SIZE, "%d\n",
243 			c->iface->channel_vector[i].buffer_size_packet);
244 }
245 
size_of_stream_buffer_show(struct device * dev,struct device_attribute * attr,char * buf)246 static ssize_t size_of_stream_buffer_show(struct device *dev,
247 					  struct device_attribute *attr,
248 					  char *buf)
249 {
250 	struct most_channel *c = to_channel(dev);
251 	unsigned int i = c->channel_id;
252 
253 	return snprintf(buf, PAGE_SIZE, "%d\n",
254 			c->iface->channel_vector[i].buffer_size_streaming);
255 }
256 
channel_starving_show(struct device * dev,struct device_attribute * attr,char * buf)257 static ssize_t channel_starving_show(struct device *dev,
258 				     struct device_attribute *attr,
259 				     char *buf)
260 {
261 	struct most_channel *c = to_channel(dev);
262 
263 	return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving);
264 }
265 
set_number_of_buffers_show(struct device * dev,struct device_attribute * attr,char * buf)266 static ssize_t set_number_of_buffers_show(struct device *dev,
267 					  struct device_attribute *attr,
268 					  char *buf)
269 {
270 	struct most_channel *c = to_channel(dev);
271 
272 	return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers);
273 }
274 
set_buffer_size_show(struct device * dev,struct device_attribute * attr,char * buf)275 static ssize_t set_buffer_size_show(struct device *dev,
276 				    struct device_attribute *attr,
277 				    char *buf)
278 {
279 	struct most_channel *c = to_channel(dev);
280 
281 	return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size);
282 }
283 
set_direction_show(struct device * dev,struct device_attribute * attr,char * buf)284 static ssize_t set_direction_show(struct device *dev,
285 				  struct device_attribute *attr,
286 				  char *buf)
287 {
288 	struct most_channel *c = to_channel(dev);
289 
290 	if (c->cfg.direction & MOST_CH_TX)
291 		return snprintf(buf, PAGE_SIZE, "tx\n");
292 	else if (c->cfg.direction & MOST_CH_RX)
293 		return snprintf(buf, PAGE_SIZE, "rx\n");
294 	return snprintf(buf, PAGE_SIZE, "unconfigured\n");
295 }
296 
set_datatype_show(struct device * dev,struct device_attribute * attr,char * buf)297 static ssize_t set_datatype_show(struct device *dev,
298 				 struct device_attribute *attr,
299 				 char *buf)
300 {
301 	int i;
302 	struct most_channel *c = to_channel(dev);
303 
304 	for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
305 		if (c->cfg.data_type & ch_data_type[i].most_ch_data_type)
306 			return snprintf(buf, PAGE_SIZE, "%s",
307 					ch_data_type[i].name);
308 	}
309 	return snprintf(buf, PAGE_SIZE, "unconfigured\n");
310 }
311 
set_subbuffer_size_show(struct device * dev,struct device_attribute * attr,char * buf)312 static ssize_t set_subbuffer_size_show(struct device *dev,
313 				       struct device_attribute *attr,
314 				       char *buf)
315 {
316 	struct most_channel *c = to_channel(dev);
317 
318 	return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size);
319 }
320 
set_packets_per_xact_show(struct device * dev,struct device_attribute * attr,char * buf)321 static ssize_t set_packets_per_xact_show(struct device *dev,
322 					 struct device_attribute *attr,
323 					 char *buf)
324 {
325 	struct most_channel *c = to_channel(dev);
326 
327 	return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact);
328 }
329 
set_dbr_size_show(struct device * dev,struct device_attribute * attr,char * buf)330 static ssize_t set_dbr_size_show(struct device *dev,
331 				 struct device_attribute *attr, char *buf)
332 {
333 	struct most_channel *c = to_channel(dev);
334 
335 	return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.dbr_size);
336 }
337 
338 #define to_dev_attr(a) container_of(a, struct device_attribute, attr)
channel_attr_is_visible(struct kobject * kobj,struct attribute * attr,int index)339 static umode_t channel_attr_is_visible(struct kobject *kobj,
340 				       struct attribute *attr, int index)
341 {
342 	struct device_attribute *dev_attr = to_dev_attr(attr);
343 	struct device *dev = kobj_to_dev(kobj);
344 	struct most_channel *c = to_channel(dev);
345 
346 	if (!strcmp(dev_attr->attr.name, "set_dbr_size") &&
347 	    (c->iface->interface != ITYPE_MEDIALB_DIM2))
348 		return 0;
349 	if (!strcmp(dev_attr->attr.name, "set_packets_per_xact") &&
350 	    (c->iface->interface != ITYPE_USB))
351 		return 0;
352 
353 	return attr->mode;
354 }
355 
356 #define DEV_ATTR(_name)  (&dev_attr_##_name.attr)
357 
358 static DEVICE_ATTR_RO(available_directions);
359 static DEVICE_ATTR_RO(available_datatypes);
360 static DEVICE_ATTR_RO(number_of_packet_buffers);
361 static DEVICE_ATTR_RO(number_of_stream_buffers);
362 static DEVICE_ATTR_RO(size_of_stream_buffer);
363 static DEVICE_ATTR_RO(size_of_packet_buffer);
364 static DEVICE_ATTR_RO(channel_starving);
365 static DEVICE_ATTR_RO(set_buffer_size);
366 static DEVICE_ATTR_RO(set_number_of_buffers);
367 static DEVICE_ATTR_RO(set_direction);
368 static DEVICE_ATTR_RO(set_datatype);
369 static DEVICE_ATTR_RO(set_subbuffer_size);
370 static DEVICE_ATTR_RO(set_packets_per_xact);
371 static DEVICE_ATTR_RO(set_dbr_size);
372 
373 static struct attribute *channel_attrs[] = {
374 	DEV_ATTR(available_directions),
375 	DEV_ATTR(available_datatypes),
376 	DEV_ATTR(number_of_packet_buffers),
377 	DEV_ATTR(number_of_stream_buffers),
378 	DEV_ATTR(size_of_stream_buffer),
379 	DEV_ATTR(size_of_packet_buffer),
380 	DEV_ATTR(channel_starving),
381 	DEV_ATTR(set_buffer_size),
382 	DEV_ATTR(set_number_of_buffers),
383 	DEV_ATTR(set_direction),
384 	DEV_ATTR(set_datatype),
385 	DEV_ATTR(set_subbuffer_size),
386 	DEV_ATTR(set_packets_per_xact),
387 	DEV_ATTR(set_dbr_size),
388 	NULL,
389 };
390 
391 static struct attribute_group channel_attr_group = {
392 	.attrs = channel_attrs,
393 	.is_visible = channel_attr_is_visible,
394 };
395 
396 static const struct attribute_group *channel_attr_groups[] = {
397 	&channel_attr_group,
398 	NULL,
399 };
400 
description_show(struct device * dev,struct device_attribute * attr,char * buf)401 static ssize_t description_show(struct device *dev,
402 				struct device_attribute *attr,
403 				char *buf)
404 {
405 	struct most_interface *iface = to_most_interface(dev);
406 
407 	return snprintf(buf, PAGE_SIZE, "%s\n", iface->description);
408 }
409 
interface_show(struct device * dev,struct device_attribute * attr,char * buf)410 static ssize_t interface_show(struct device *dev,
411 			      struct device_attribute *attr,
412 			      char *buf)
413 {
414 	struct most_interface *iface = to_most_interface(dev);
415 
416 	switch (iface->interface) {
417 	case ITYPE_LOOPBACK:
418 		return snprintf(buf, PAGE_SIZE, "loopback\n");
419 	case ITYPE_I2C:
420 		return snprintf(buf, PAGE_SIZE, "i2c\n");
421 	case ITYPE_I2S:
422 		return snprintf(buf, PAGE_SIZE, "i2s\n");
423 	case ITYPE_TSI:
424 		return snprintf(buf, PAGE_SIZE, "tsi\n");
425 	case ITYPE_HBI:
426 		return snprintf(buf, PAGE_SIZE, "hbi\n");
427 	case ITYPE_MEDIALB_DIM:
428 		return snprintf(buf, PAGE_SIZE, "mlb_dim\n");
429 	case ITYPE_MEDIALB_DIM2:
430 		return snprintf(buf, PAGE_SIZE, "mlb_dim2\n");
431 	case ITYPE_USB:
432 		return snprintf(buf, PAGE_SIZE, "usb\n");
433 	case ITYPE_PCIE:
434 		return snprintf(buf, PAGE_SIZE, "pcie\n");
435 	}
436 	return snprintf(buf, PAGE_SIZE, "unknown\n");
437 }
438 
439 static DEVICE_ATTR_RO(description);
440 static DEVICE_ATTR_RO(interface);
441 
442 static struct attribute *interface_attrs[] = {
443 	DEV_ATTR(description),
444 	DEV_ATTR(interface),
445 	NULL,
446 };
447 
448 static struct attribute_group interface_attr_group = {
449 	.attrs = interface_attrs,
450 };
451 
452 static const struct attribute_group *interface_attr_groups[] = {
453 	&interface_attr_group,
454 	NULL,
455 };
456 
match_component(char * name)457 static struct core_component *match_component(char *name)
458 {
459 	struct core_component *comp;
460 
461 	list_for_each_entry(comp, &mc.comp_list, list) {
462 		if (!strcmp(comp->name, name))
463 			return comp;
464 	}
465 	return NULL;
466 }
467 
468 struct show_links_data {
469 	int offs;
470 	char *buf;
471 };
472 
print_links(struct device * dev,void * data)473 static int print_links(struct device *dev, void *data)
474 {
475 	struct show_links_data *d = data;
476 	int offs = d->offs;
477 	char *buf = d->buf;
478 	struct most_channel *c;
479 	struct most_interface *iface = to_most_interface(dev);
480 
481 	list_for_each_entry(c, &iface->p->channel_list, list) {
482 		if (c->pipe0.comp) {
483 			offs += snprintf(buf + offs,
484 					 PAGE_SIZE - offs,
485 					 "%s:%s:%s\n",
486 					 c->pipe0.comp->name,
487 					 dev_name(&iface->dev),
488 					 dev_name(&c->dev));
489 		}
490 		if (c->pipe1.comp) {
491 			offs += snprintf(buf + offs,
492 					 PAGE_SIZE - offs,
493 					 "%s:%s:%s\n",
494 					 c->pipe1.comp->name,
495 					 dev_name(&iface->dev),
496 					 dev_name(&c->dev));
497 		}
498 	}
499 	d->offs = offs;
500 	return 0;
501 }
502 
links_show(struct device_driver * drv,char * buf)503 static ssize_t links_show(struct device_driver *drv, char *buf)
504 {
505 	struct show_links_data d = { .buf = buf };
506 
507 	bus_for_each_dev(&mc.bus, NULL, &d, print_links);
508 	return d.offs;
509 }
510 
components_show(struct device_driver * drv,char * buf)511 static ssize_t components_show(struct device_driver *drv, char *buf)
512 {
513 	struct core_component *comp;
514 	int offs = 0;
515 
516 	list_for_each_entry(comp, &mc.comp_list, list) {
517 		offs += snprintf(buf + offs, PAGE_SIZE - offs, "%s\n",
518 				 comp->name);
519 	}
520 	return offs;
521 }
522 
523 /**
524  * split_string - parses buf and extracts ':' separated substrings.
525  *
526  * @buf: complete string from attribute 'add_channel'
527  * @a: storage for 1st substring (=interface name)
528  * @b: storage for 2nd substring (=channel name)
529  * @c: storage for 3rd substring (=component name)
530  * @d: storage optional 4th substring (=user defined name)
531  *
532  * Examples:
533  *
534  * Input: "mdev0:ch6:cdev:my_channel\n" or
535  *        "mdev0:ch6:cdev:my_channel"
536  *
537  * Output: *a -> "mdev0", *b -> "ch6", *c -> "cdev" *d -> "my_channel"
538  *
539  * Input: "mdev1:ep81:cdev\n"
540  * Output: *a -> "mdev1", *b -> "ep81", *c -> "cdev" *d -> ""
541  *
542  * Input: "mdev1:ep81"
543  * Output: *a -> "mdev1", *b -> "ep81", *c -> "cdev" *d == NULL
544  */
split_string(char * buf,char ** a,char ** b,char ** c,char ** d)545 static int split_string(char *buf, char **a, char **b, char **c, char **d)
546 {
547 	*a = strsep(&buf, ":");
548 	if (!*a)
549 		return -EIO;
550 
551 	*b = strsep(&buf, ":\n");
552 	if (!*b)
553 		return -EIO;
554 
555 	*c = strsep(&buf, ":\n");
556 	if (!*c)
557 		return -EIO;
558 
559 	if (d)
560 		*d = strsep(&buf, ":\n");
561 
562 	return 0;
563 }
564 
565 /**
566  * get_channel - get pointer to channel
567  * @mdev: name of the device interface
568  * @mdev_ch: name of channel
569  */
get_channel(char * mdev,char * mdev_ch)570 static struct most_channel *get_channel(char *mdev, char *mdev_ch)
571 {
572 	struct device *dev = NULL;
573 	struct most_interface *iface;
574 	struct most_channel *c, *tmp;
575 
576 	dev = bus_find_device_by_name(&mc.bus, NULL, mdev);
577 	if (!dev)
578 		return NULL;
579 	iface = to_most_interface(dev);
580 	list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) {
581 		if (!strcmp(dev_name(&c->dev), mdev_ch))
582 			return c;
583 	}
584 	return NULL;
585 }
586 
587 static
link_channel_to_component(struct most_channel * c,struct core_component * comp,char * name,char * comp_param)588 inline int link_channel_to_component(struct most_channel *c,
589 				     struct core_component *comp,
590 				     char *name,
591 				     char *comp_param)
592 {
593 	int ret;
594 	struct core_component **comp_ptr;
595 
596 	if (!c->pipe0.comp)
597 		comp_ptr = &c->pipe0.comp;
598 	else if (!c->pipe1.comp)
599 		comp_ptr = &c->pipe1.comp;
600 	else
601 		return -ENOSPC;
602 
603 	*comp_ptr = comp;
604 	ret = comp->probe_channel(c->iface, c->channel_id, &c->cfg, name,
605 				  comp_param);
606 	if (ret) {
607 		*comp_ptr = NULL;
608 		return ret;
609 	}
610 	return 0;
611 }
612 
most_set_cfg_buffer_size(char * mdev,char * mdev_ch,u16 val)613 int most_set_cfg_buffer_size(char *mdev, char *mdev_ch, u16 val)
614 {
615 	struct most_channel *c = get_channel(mdev, mdev_ch);
616 
617 	if (!c)
618 		return -ENODEV;
619 	c->cfg.buffer_size = val;
620 	return 0;
621 }
622 
most_set_cfg_subbuffer_size(char * mdev,char * mdev_ch,u16 val)623 int most_set_cfg_subbuffer_size(char *mdev, char *mdev_ch, u16 val)
624 {
625 	struct most_channel *c = get_channel(mdev, mdev_ch);
626 
627 	if (!c)
628 		return -ENODEV;
629 	c->cfg.subbuffer_size = val;
630 	return 0;
631 }
632 
most_set_cfg_dbr_size(char * mdev,char * mdev_ch,u16 val)633 int most_set_cfg_dbr_size(char *mdev, char *mdev_ch, u16 val)
634 {
635 	struct most_channel *c = get_channel(mdev, mdev_ch);
636 
637 	if (!c)
638 		return -ENODEV;
639 	c->cfg.dbr_size = val;
640 	return 0;
641 }
642 
most_set_cfg_num_buffers(char * mdev,char * mdev_ch,u16 val)643 int most_set_cfg_num_buffers(char *mdev, char *mdev_ch, u16 val)
644 {
645 	struct most_channel *c = get_channel(mdev, mdev_ch);
646 
647 	if (!c)
648 		return -ENODEV;
649 	c->cfg.num_buffers = val;
650 	return 0;
651 }
652 
most_set_cfg_datatype(char * mdev,char * mdev_ch,char * buf)653 int most_set_cfg_datatype(char *mdev, char *mdev_ch, char *buf)
654 {
655 	int i;
656 	struct most_channel *c = get_channel(mdev, mdev_ch);
657 
658 	if (!c)
659 		return -ENODEV;
660 	for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
661 		if (!strcmp(buf, ch_data_type[i].name)) {
662 			c->cfg.data_type = ch_data_type[i].most_ch_data_type;
663 			break;
664 		}
665 	}
666 
667 	if (i == ARRAY_SIZE(ch_data_type))
668 		pr_info("WARN: invalid attribute settings\n");
669 	return 0;
670 }
671 
most_set_cfg_direction(char * mdev,char * mdev_ch,char * buf)672 int most_set_cfg_direction(char *mdev, char *mdev_ch, char *buf)
673 {
674 	struct most_channel *c = get_channel(mdev, mdev_ch);
675 
676 	if (!c)
677 		return -ENODEV;
678 	if (!strcmp(buf, "dir_rx\n")) {
679 		c->cfg.direction = MOST_CH_RX;
680 	} else if (!strcmp(buf, "rx\n")) {
681 		c->cfg.direction = MOST_CH_RX;
682 	} else if (!strcmp(buf, "dir_tx\n")) {
683 		c->cfg.direction = MOST_CH_TX;
684 	} else if (!strcmp(buf, "tx\n")) {
685 		c->cfg.direction = MOST_CH_TX;
686 	} else {
687 		pr_info("Invalid direction\n");
688 		return -ENODATA;
689 	}
690 	return 0;
691 }
692 
most_set_cfg_packets_xact(char * mdev,char * mdev_ch,u16 val)693 int most_set_cfg_packets_xact(char *mdev, char *mdev_ch, u16 val)
694 {
695 	struct most_channel *c = get_channel(mdev, mdev_ch);
696 
697 	if (!c)
698 		return -ENODEV;
699 	c->cfg.packets_per_xact = val;
700 	return 0;
701 }
702 
most_cfg_complete(char * comp_name)703 int most_cfg_complete(char *comp_name)
704 {
705 	struct core_component *comp;
706 
707 	comp = match_component(comp_name);
708 	if (!comp)
709 		return -ENODEV;
710 
711 	return comp->cfg_complete();
712 }
713 
most_add_link(char * mdev,char * mdev_ch,char * comp_name,char * link_name,char * comp_param)714 int most_add_link(char *mdev, char *mdev_ch, char *comp_name, char *link_name,
715 		  char *comp_param)
716 {
717 	struct most_channel *c = get_channel(mdev, mdev_ch);
718 	struct core_component *comp = match_component(comp_name);
719 
720 	if (!c || !comp)
721 		return -ENODEV;
722 
723 	return link_channel_to_component(c, comp, link_name, comp_param);
724 }
725 
726 /**
727  * remove_link_store - store function for remove_link attribute
728  * @drv: device driver
729  * @buf: buffer
730  * @len: buffer length
731  *
732  * Example:
733  * echo "mdev0:ep81" >remove_link
734  */
remove_link_store(struct device_driver * drv,const char * buf,size_t len)735 static ssize_t remove_link_store(struct device_driver *drv,
736 				 const char *buf,
737 				 size_t len)
738 {
739 	struct most_channel *c;
740 	struct core_component *comp;
741 	char buffer[STRING_SIZE];
742 	char *mdev;
743 	char *mdev_ch;
744 	char *comp_name;
745 	int ret;
746 	size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
747 
748 	strlcpy(buffer, buf, max_len);
749 	ret = split_string(buffer, &mdev, &mdev_ch, &comp_name, NULL);
750 	if (ret)
751 		return ret;
752 	comp = match_component(comp_name);
753 	if (!comp)
754 		return -ENODEV;
755 	c = get_channel(mdev, mdev_ch);
756 	if (!c)
757 		return -ENODEV;
758 
759 	if (comp->disconnect_channel(c->iface, c->channel_id))
760 		return -EIO;
761 	if (c->pipe0.comp == comp)
762 		c->pipe0.comp = NULL;
763 	if (c->pipe1.comp == comp)
764 		c->pipe1.comp = NULL;
765 	return len;
766 }
767 
most_remove_link(char * mdev,char * mdev_ch,char * comp_name)768 int most_remove_link(char *mdev, char *mdev_ch, char *comp_name)
769 {
770 	struct most_channel *c;
771 	struct core_component *comp;
772 
773 	comp = match_component(comp_name);
774 	if (!comp)
775 		return -ENODEV;
776 	c = get_channel(mdev, mdev_ch);
777 	if (!c)
778 		return -ENODEV;
779 
780 	if (comp->disconnect_channel(c->iface, c->channel_id))
781 		return -EIO;
782 	if (c->pipe0.comp == comp)
783 		c->pipe0.comp = NULL;
784 	if (c->pipe1.comp == comp)
785 		c->pipe1.comp = NULL;
786 	return 0;
787 }
788 
789 #define DRV_ATTR(_name)  (&driver_attr_##_name.attr)
790 
791 static DRIVER_ATTR_RO(links);
792 static DRIVER_ATTR_RO(components);
793 static DRIVER_ATTR_WO(remove_link);
794 
795 static struct attribute *mc_attrs[] = {
796 	DRV_ATTR(links),
797 	DRV_ATTR(components),
798 	DRV_ATTR(remove_link),
799 	NULL,
800 };
801 
802 static struct attribute_group mc_attr_group = {
803 	.attrs = mc_attrs,
804 };
805 
806 static const struct attribute_group *mc_attr_groups[] = {
807 	&mc_attr_group,
808 	NULL,
809 };
810 
most_match(struct device * dev,struct device_driver * drv)811 static int most_match(struct device *dev, struct device_driver *drv)
812 {
813 	if (!strcmp(dev_name(dev), "most"))
814 		return 0;
815 	else
816 		return 1;
817 }
818 
trash_mbo(struct mbo * mbo)819 static inline void trash_mbo(struct mbo *mbo)
820 {
821 	unsigned long flags;
822 	struct most_channel *c = mbo->context;
823 
824 	spin_lock_irqsave(&c->fifo_lock, flags);
825 	list_add(&mbo->list, &c->trash_fifo);
826 	spin_unlock_irqrestore(&c->fifo_lock, flags);
827 }
828 
hdm_mbo_ready(struct most_channel * c)829 static bool hdm_mbo_ready(struct most_channel *c)
830 {
831 	bool empty;
832 
833 	if (c->enqueue_halt)
834 		return false;
835 
836 	spin_lock_irq(&c->fifo_lock);
837 	empty = list_empty(&c->halt_fifo);
838 	spin_unlock_irq(&c->fifo_lock);
839 
840 	return !empty;
841 }
842 
nq_hdm_mbo(struct mbo * mbo)843 static void nq_hdm_mbo(struct mbo *mbo)
844 {
845 	unsigned long flags;
846 	struct most_channel *c = mbo->context;
847 
848 	spin_lock_irqsave(&c->fifo_lock, flags);
849 	list_add_tail(&mbo->list, &c->halt_fifo);
850 	spin_unlock_irqrestore(&c->fifo_lock, flags);
851 	wake_up_interruptible(&c->hdm_fifo_wq);
852 }
853 
hdm_enqueue_thread(void * data)854 static int hdm_enqueue_thread(void *data)
855 {
856 	struct most_channel *c = data;
857 	struct mbo *mbo;
858 	int ret;
859 	typeof(c->iface->enqueue) enqueue = c->iface->enqueue;
860 
861 	while (likely(!kthread_should_stop())) {
862 		wait_event_interruptible(c->hdm_fifo_wq,
863 					 hdm_mbo_ready(c) ||
864 					 kthread_should_stop());
865 
866 		mutex_lock(&c->nq_mutex);
867 		spin_lock_irq(&c->fifo_lock);
868 		if (unlikely(c->enqueue_halt || list_empty(&c->halt_fifo))) {
869 			spin_unlock_irq(&c->fifo_lock);
870 			mutex_unlock(&c->nq_mutex);
871 			continue;
872 		}
873 
874 		mbo = list_pop_mbo(&c->halt_fifo);
875 		spin_unlock_irq(&c->fifo_lock);
876 
877 		if (c->cfg.direction == MOST_CH_RX)
878 			mbo->buffer_length = c->cfg.buffer_size;
879 
880 		ret = enqueue(mbo->ifp, mbo->hdm_channel_id, mbo);
881 		mutex_unlock(&c->nq_mutex);
882 
883 		if (unlikely(ret)) {
884 			pr_err("hdm enqueue failed\n");
885 			nq_hdm_mbo(mbo);
886 			c->hdm_enqueue_task = NULL;
887 			return 0;
888 		}
889 	}
890 
891 	return 0;
892 }
893 
run_enqueue_thread(struct most_channel * c,int channel_id)894 static int run_enqueue_thread(struct most_channel *c, int channel_id)
895 {
896 	struct task_struct *task =
897 		kthread_run(hdm_enqueue_thread, c, "hdm_fifo_%d",
898 			    channel_id);
899 
900 	if (IS_ERR(task))
901 		return PTR_ERR(task);
902 
903 	c->hdm_enqueue_task = task;
904 	return 0;
905 }
906 
907 /**
908  * arm_mbo - recycle MBO for further usage
909  * @mbo: most buffer
910  *
911  * This puts an MBO back to the list to have it ready for up coming
912  * tx transactions.
913  *
914  * In case the MBO belongs to a channel that recently has been
915  * poisoned, the MBO is scheduled to be trashed.
916  * Calls the completion handler of an attached component.
917  */
arm_mbo(struct mbo * mbo)918 static void arm_mbo(struct mbo *mbo)
919 {
920 	unsigned long flags;
921 	struct most_channel *c;
922 
923 	c = mbo->context;
924 
925 	if (c->is_poisoned) {
926 		trash_mbo(mbo);
927 		return;
928 	}
929 
930 	spin_lock_irqsave(&c->fifo_lock, flags);
931 	++*mbo->num_buffers_ptr;
932 	list_add_tail(&mbo->list, &c->fifo);
933 	spin_unlock_irqrestore(&c->fifo_lock, flags);
934 
935 	if (c->pipe0.refs && c->pipe0.comp->tx_completion)
936 		c->pipe0.comp->tx_completion(c->iface, c->channel_id);
937 
938 	if (c->pipe1.refs && c->pipe1.comp->tx_completion)
939 		c->pipe1.comp->tx_completion(c->iface, c->channel_id);
940 }
941 
942 /**
943  * arm_mbo_chain - helper function that arms an MBO chain for the HDM
944  * @c: pointer to interface channel
945  * @dir: direction of the channel
946  * @compl: pointer to completion function
947  *
948  * This allocates buffer objects including the containing DMA coherent
949  * buffer and puts them in the fifo.
950  * Buffers of Rx channels are put in the kthread fifo, hence immediately
951  * submitted to the HDM.
952  *
953  * Returns the number of allocated and enqueued MBOs.
954  */
arm_mbo_chain(struct most_channel * c,int dir,void (* compl)(struct mbo *))955 static int arm_mbo_chain(struct most_channel *c, int dir,
956 			 void (*compl)(struct mbo *))
957 {
958 	unsigned int i;
959 	struct mbo *mbo;
960 	unsigned long flags;
961 	u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
962 
963 	atomic_set(&c->mbo_nq_level, 0);
964 
965 	for (i = 0; i < c->cfg.num_buffers; i++) {
966 		mbo = kzalloc(sizeof(*mbo), GFP_KERNEL);
967 		if (!mbo)
968 			goto flush_fifos;
969 
970 		mbo->context = c;
971 		mbo->ifp = c->iface;
972 		mbo->hdm_channel_id = c->channel_id;
973 		if (c->iface->dma_alloc) {
974 			mbo->virt_address =
975 				c->iface->dma_alloc(mbo, coherent_buf_size);
976 		} else {
977 			mbo->virt_address =
978 				kzalloc(coherent_buf_size, GFP_KERNEL);
979 		}
980 		if (!mbo->virt_address)
981 			goto release_mbo;
982 
983 		mbo->complete = compl;
984 		mbo->num_buffers_ptr = &dummy_num_buffers;
985 		if (dir == MOST_CH_RX) {
986 			nq_hdm_mbo(mbo);
987 			atomic_inc(&c->mbo_nq_level);
988 		} else {
989 			spin_lock_irqsave(&c->fifo_lock, flags);
990 			list_add_tail(&mbo->list, &c->fifo);
991 			spin_unlock_irqrestore(&c->fifo_lock, flags);
992 		}
993 	}
994 	return c->cfg.num_buffers;
995 
996 release_mbo:
997 	kfree(mbo);
998 
999 flush_fifos:
1000 	flush_channel_fifos(c);
1001 	return 0;
1002 }
1003 
1004 /**
1005  * most_submit_mbo - submits an MBO to fifo
1006  * @mbo: most buffer
1007  */
most_submit_mbo(struct mbo * mbo)1008 void most_submit_mbo(struct mbo *mbo)
1009 {
1010 	if (WARN_ONCE(!mbo || !mbo->context,
1011 		      "bad mbo or missing channel reference\n"))
1012 		return;
1013 
1014 	nq_hdm_mbo(mbo);
1015 }
1016 EXPORT_SYMBOL_GPL(most_submit_mbo);
1017 
1018 /**
1019  * most_write_completion - write completion handler
1020  * @mbo: most buffer
1021  *
1022  * This recycles the MBO for further usage. In case the channel has been
1023  * poisoned, the MBO is scheduled to be trashed.
1024  */
most_write_completion(struct mbo * mbo)1025 static void most_write_completion(struct mbo *mbo)
1026 {
1027 	struct most_channel *c;
1028 
1029 	c = mbo->context;
1030 	if (mbo->status == MBO_E_INVAL)
1031 		pr_info("WARN: Tx MBO status: invalid\n");
1032 	if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE)))
1033 		trash_mbo(mbo);
1034 	else
1035 		arm_mbo(mbo);
1036 }
1037 
channel_has_mbo(struct most_interface * iface,int id,struct core_component * comp)1038 int channel_has_mbo(struct most_interface *iface, int id,
1039 		    struct core_component *comp)
1040 {
1041 	struct most_channel *c = iface->p->channel[id];
1042 	unsigned long flags;
1043 	int empty;
1044 
1045 	if (unlikely(!c))
1046 		return -EINVAL;
1047 
1048 	if (c->pipe0.refs && c->pipe1.refs &&
1049 	    ((comp == c->pipe0.comp && c->pipe0.num_buffers <= 0) ||
1050 	     (comp == c->pipe1.comp && c->pipe1.num_buffers <= 0)))
1051 		return 0;
1052 
1053 	spin_lock_irqsave(&c->fifo_lock, flags);
1054 	empty = list_empty(&c->fifo);
1055 	spin_unlock_irqrestore(&c->fifo_lock, flags);
1056 	return !empty;
1057 }
1058 EXPORT_SYMBOL_GPL(channel_has_mbo);
1059 
1060 /**
1061  * most_get_mbo - get pointer to an MBO of pool
1062  * @iface: pointer to interface instance
1063  * @id: channel ID
1064  * @comp: driver component
1065  *
1066  * This attempts to get a free buffer out of the channel fifo.
1067  * Returns a pointer to MBO on success or NULL otherwise.
1068  */
most_get_mbo(struct most_interface * iface,int id,struct core_component * comp)1069 struct mbo *most_get_mbo(struct most_interface *iface, int id,
1070 			 struct core_component *comp)
1071 {
1072 	struct mbo *mbo;
1073 	struct most_channel *c;
1074 	unsigned long flags;
1075 	int *num_buffers_ptr;
1076 
1077 	c = iface->p->channel[id];
1078 	if (unlikely(!c))
1079 		return NULL;
1080 
1081 	if (c->pipe0.refs && c->pipe1.refs &&
1082 	    ((comp == c->pipe0.comp && c->pipe0.num_buffers <= 0) ||
1083 	     (comp == c->pipe1.comp && c->pipe1.num_buffers <= 0)))
1084 		return NULL;
1085 
1086 	if (comp == c->pipe0.comp)
1087 		num_buffers_ptr = &c->pipe0.num_buffers;
1088 	else if (comp == c->pipe1.comp)
1089 		num_buffers_ptr = &c->pipe1.num_buffers;
1090 	else
1091 		num_buffers_ptr = &dummy_num_buffers;
1092 
1093 	spin_lock_irqsave(&c->fifo_lock, flags);
1094 	if (list_empty(&c->fifo)) {
1095 		spin_unlock_irqrestore(&c->fifo_lock, flags);
1096 		return NULL;
1097 	}
1098 	mbo = list_pop_mbo(&c->fifo);
1099 	--*num_buffers_ptr;
1100 	spin_unlock_irqrestore(&c->fifo_lock, flags);
1101 
1102 	mbo->num_buffers_ptr = num_buffers_ptr;
1103 	mbo->buffer_length = c->cfg.buffer_size;
1104 	return mbo;
1105 }
1106 EXPORT_SYMBOL_GPL(most_get_mbo);
1107 
1108 /**
1109  * most_put_mbo - return buffer to pool
1110  * @mbo: most buffer
1111  */
most_put_mbo(struct mbo * mbo)1112 void most_put_mbo(struct mbo *mbo)
1113 {
1114 	struct most_channel *c = mbo->context;
1115 
1116 	if (c->cfg.direction == MOST_CH_TX) {
1117 		arm_mbo(mbo);
1118 		return;
1119 	}
1120 	nq_hdm_mbo(mbo);
1121 	atomic_inc(&c->mbo_nq_level);
1122 }
1123 EXPORT_SYMBOL_GPL(most_put_mbo);
1124 
1125 /**
1126  * most_read_completion - read completion handler
1127  * @mbo: most buffer
1128  *
1129  * This function is called by the HDM when data has been received from the
1130  * hardware and copied to the buffer of the MBO.
1131  *
1132  * In case the channel has been poisoned it puts the buffer in the trash queue.
1133  * Otherwise, it passes the buffer to an component for further processing.
1134  */
most_read_completion(struct mbo * mbo)1135 static void most_read_completion(struct mbo *mbo)
1136 {
1137 	struct most_channel *c = mbo->context;
1138 
1139 	if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) {
1140 		trash_mbo(mbo);
1141 		return;
1142 	}
1143 
1144 	if (mbo->status == MBO_E_INVAL) {
1145 		nq_hdm_mbo(mbo);
1146 		atomic_inc(&c->mbo_nq_level);
1147 		return;
1148 	}
1149 
1150 	if (atomic_sub_and_test(1, &c->mbo_nq_level))
1151 		c->is_starving = 1;
1152 
1153 	if (c->pipe0.refs && c->pipe0.comp->rx_completion &&
1154 	    c->pipe0.comp->rx_completion(mbo) == 0)
1155 		return;
1156 
1157 	if (c->pipe1.refs && c->pipe1.comp->rx_completion &&
1158 	    c->pipe1.comp->rx_completion(mbo) == 0)
1159 		return;
1160 
1161 	most_put_mbo(mbo);
1162 }
1163 
1164 /**
1165  * most_start_channel - prepares a channel for communication
1166  * @iface: pointer to interface instance
1167  * @id: channel ID
1168  * @comp: driver component
1169  *
1170  * This prepares the channel for usage. Cross-checks whether the
1171  * channel's been properly configured.
1172  *
1173  * Returns 0 on success or error code otherwise.
1174  */
most_start_channel(struct most_interface * iface,int id,struct core_component * comp)1175 int most_start_channel(struct most_interface *iface, int id,
1176 		       struct core_component *comp)
1177 {
1178 	int num_buffer;
1179 	int ret;
1180 	struct most_channel *c = iface->p->channel[id];
1181 
1182 	if (unlikely(!c))
1183 		return -EINVAL;
1184 
1185 	mutex_lock(&c->start_mutex);
1186 	if (c->pipe0.refs + c->pipe1.refs > 0)
1187 		goto out; /* already started by another component */
1188 
1189 	if (!try_module_get(iface->mod)) {
1190 		pr_info("failed to acquire HDM lock\n");
1191 		mutex_unlock(&c->start_mutex);
1192 		return -ENOLCK;
1193 	}
1194 
1195 	c->cfg.extra_len = 0;
1196 	if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
1197 		pr_info("channel configuration failed. Go check settings...\n");
1198 		ret = -EINVAL;
1199 		goto err_put_module;
1200 	}
1201 
1202 	init_waitqueue_head(&c->hdm_fifo_wq);
1203 
1204 	if (c->cfg.direction == MOST_CH_RX)
1205 		num_buffer = arm_mbo_chain(c, c->cfg.direction,
1206 					   most_read_completion);
1207 	else
1208 		num_buffer = arm_mbo_chain(c, c->cfg.direction,
1209 					   most_write_completion);
1210 	if (unlikely(!num_buffer)) {
1211 		ret = -ENOMEM;
1212 		goto err_put_module;
1213 	}
1214 
1215 	ret = run_enqueue_thread(c, id);
1216 	if (ret)
1217 		goto err_put_module;
1218 
1219 	c->is_starving = 0;
1220 	c->pipe0.num_buffers = c->cfg.num_buffers / 2;
1221 	c->pipe1.num_buffers = c->cfg.num_buffers - c->pipe0.num_buffers;
1222 	atomic_set(&c->mbo_ref, num_buffer);
1223 
1224 out:
1225 	if (comp == c->pipe0.comp)
1226 		c->pipe0.refs++;
1227 	if (comp == c->pipe1.comp)
1228 		c->pipe1.refs++;
1229 	mutex_unlock(&c->start_mutex);
1230 	return 0;
1231 
1232 err_put_module:
1233 	module_put(iface->mod);
1234 	mutex_unlock(&c->start_mutex);
1235 	return ret;
1236 }
1237 EXPORT_SYMBOL_GPL(most_start_channel);
1238 
1239 /**
1240  * most_stop_channel - stops a running channel
1241  * @iface: pointer to interface instance
1242  * @id: channel ID
1243  * @comp: driver component
1244  */
most_stop_channel(struct most_interface * iface,int id,struct core_component * comp)1245 int most_stop_channel(struct most_interface *iface, int id,
1246 		      struct core_component *comp)
1247 {
1248 	struct most_channel *c;
1249 
1250 	if (unlikely((!iface) || (id >= iface->num_channels) || (id < 0))) {
1251 		pr_err("Bad interface or index out of range\n");
1252 		return -EINVAL;
1253 	}
1254 	c = iface->p->channel[id];
1255 	if (unlikely(!c))
1256 		return -EINVAL;
1257 
1258 	mutex_lock(&c->start_mutex);
1259 	if (c->pipe0.refs + c->pipe1.refs >= 2)
1260 		goto out;
1261 
1262 	if (c->hdm_enqueue_task)
1263 		kthread_stop(c->hdm_enqueue_task);
1264 	c->hdm_enqueue_task = NULL;
1265 
1266 	if (iface->mod)
1267 		module_put(iface->mod);
1268 
1269 	c->is_poisoned = true;
1270 	if (c->iface->poison_channel(c->iface, c->channel_id)) {
1271 		pr_err("Cannot stop channel %d of mdev %s\n", c->channel_id,
1272 		       c->iface->description);
1273 		mutex_unlock(&c->start_mutex);
1274 		return -EAGAIN;
1275 	}
1276 	flush_trash_fifo(c);
1277 	flush_channel_fifos(c);
1278 
1279 #ifdef CMPL_INTERRUPTIBLE
1280 	if (wait_for_completion_interruptible(&c->cleanup)) {
1281 		pr_info("Interrupted while clean up ch %d\n", c->channel_id);
1282 		mutex_unlock(&c->start_mutex);
1283 		return -EINTR;
1284 	}
1285 #else
1286 	wait_for_completion(&c->cleanup);
1287 #endif
1288 	c->is_poisoned = false;
1289 
1290 out:
1291 	if (comp == c->pipe0.comp)
1292 		c->pipe0.refs--;
1293 	if (comp == c->pipe1.comp)
1294 		c->pipe1.refs--;
1295 	mutex_unlock(&c->start_mutex);
1296 	return 0;
1297 }
1298 EXPORT_SYMBOL_GPL(most_stop_channel);
1299 
1300 /**
1301  * most_register_component - registers a driver component with the core
1302  * @comp: driver component
1303  */
most_register_component(struct core_component * comp)1304 int most_register_component(struct core_component *comp)
1305 {
1306 	if (!comp) {
1307 		pr_err("Bad component\n");
1308 		return -EINVAL;
1309 	}
1310 	list_add_tail(&comp->list, &mc.comp_list);
1311 	pr_info("registered new core component %s\n", comp->name);
1312 	return 0;
1313 }
1314 EXPORT_SYMBOL_GPL(most_register_component);
1315 
disconnect_channels(struct device * dev,void * data)1316 static int disconnect_channels(struct device *dev, void *data)
1317 {
1318 	struct most_interface *iface;
1319 	struct most_channel *c, *tmp;
1320 	struct core_component *comp = data;
1321 
1322 	iface = to_most_interface(dev);
1323 	list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) {
1324 		if (c->pipe0.comp == comp || c->pipe1.comp == comp)
1325 			comp->disconnect_channel(c->iface, c->channel_id);
1326 		if (c->pipe0.comp == comp)
1327 			c->pipe0.comp = NULL;
1328 		if (c->pipe1.comp == comp)
1329 			c->pipe1.comp = NULL;
1330 	}
1331 	return 0;
1332 }
1333 
1334 /**
1335  * most_deregister_component - deregisters a driver component with the core
1336  * @comp: driver component
1337  */
most_deregister_component(struct core_component * comp)1338 int most_deregister_component(struct core_component *comp)
1339 {
1340 	if (!comp) {
1341 		pr_err("Bad component\n");
1342 		return -EINVAL;
1343 	}
1344 
1345 	bus_for_each_dev(&mc.bus, NULL, comp, disconnect_channels);
1346 	list_del(&comp->list);
1347 	pr_info("deregistering component %s\n", comp->name);
1348 	return 0;
1349 }
1350 EXPORT_SYMBOL_GPL(most_deregister_component);
1351 
release_interface(struct device * dev)1352 static void release_interface(struct device *dev)
1353 {
1354 	pr_info("releasing interface dev %s...\n", dev_name(dev));
1355 }
1356 
release_channel(struct device * dev)1357 static void release_channel(struct device *dev)
1358 {
1359 	pr_info("releasing channel dev %s...\n", dev_name(dev));
1360 }
1361 
1362 /**
1363  * most_register_interface - registers an interface with core
1364  * @iface: device interface
1365  *
1366  * Allocates and initializes a new interface instance and all of its channels.
1367  * Returns a pointer to kobject or an error pointer.
1368  */
most_register_interface(struct most_interface * iface)1369 int most_register_interface(struct most_interface *iface)
1370 {
1371 	unsigned int i;
1372 	int id;
1373 	struct most_channel *c;
1374 
1375 	if (!iface || !iface->enqueue || !iface->configure ||
1376 	    !iface->poison_channel || (iface->num_channels > MAX_CHANNELS)) {
1377 		pr_err("Bad interface or channel overflow\n");
1378 		return -EINVAL;
1379 	}
1380 
1381 	id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
1382 	if (id < 0) {
1383 		pr_info("Failed to alloc mdev ID\n");
1384 		return id;
1385 	}
1386 
1387 	iface->p = kzalloc(sizeof(*iface->p), GFP_KERNEL);
1388 	if (!iface->p) {
1389 		ida_simple_remove(&mdev_id, id);
1390 		return -ENOMEM;
1391 	}
1392 
1393 	INIT_LIST_HEAD(&iface->p->channel_list);
1394 	iface->p->dev_id = id;
1395 	strscpy(iface->p->name, iface->description, sizeof(iface->p->name));
1396 	iface->dev.init_name = iface->p->name;
1397 	iface->dev.bus = &mc.bus;
1398 	iface->dev.parent = &mc.dev;
1399 	iface->dev.groups = interface_attr_groups;
1400 	iface->dev.release = release_interface;
1401 	if (device_register(&iface->dev)) {
1402 		pr_err("registering iface->dev failed\n");
1403 		kfree(iface->p);
1404 		ida_simple_remove(&mdev_id, id);
1405 		return -ENOMEM;
1406 	}
1407 
1408 	for (i = 0; i < iface->num_channels; i++) {
1409 		const char *name_suffix = iface->channel_vector[i].name_suffix;
1410 
1411 		c = kzalloc(sizeof(*c), GFP_KERNEL);
1412 		if (!c)
1413 			goto err_free_resources;
1414 		if (!name_suffix)
1415 			snprintf(c->name, STRING_SIZE, "ch%d", i);
1416 		else
1417 			snprintf(c->name, STRING_SIZE, "%s", name_suffix);
1418 		c->dev.init_name = c->name;
1419 		c->dev.parent = &iface->dev;
1420 		c->dev.groups = channel_attr_groups;
1421 		c->dev.release = release_channel;
1422 		iface->p->channel[i] = c;
1423 		c->is_starving = 0;
1424 		c->iface = iface;
1425 		c->channel_id = i;
1426 		c->keep_mbo = false;
1427 		c->enqueue_halt = false;
1428 		c->is_poisoned = false;
1429 		c->cfg.direction = 0;
1430 		c->cfg.data_type = 0;
1431 		c->cfg.num_buffers = 0;
1432 		c->cfg.buffer_size = 0;
1433 		c->cfg.subbuffer_size = 0;
1434 		c->cfg.packets_per_xact = 0;
1435 		spin_lock_init(&c->fifo_lock);
1436 		INIT_LIST_HEAD(&c->fifo);
1437 		INIT_LIST_HEAD(&c->trash_fifo);
1438 		INIT_LIST_HEAD(&c->halt_fifo);
1439 		init_completion(&c->cleanup);
1440 		atomic_set(&c->mbo_ref, 0);
1441 		mutex_init(&c->start_mutex);
1442 		mutex_init(&c->nq_mutex);
1443 		list_add_tail(&c->list, &iface->p->channel_list);
1444 		if (device_register(&c->dev)) {
1445 			pr_err("registering c->dev failed\n");
1446 			goto err_free_most_channel;
1447 		}
1448 	}
1449 	pr_info("registered new device mdev%d (%s)\n",
1450 		id, iface->description);
1451 	most_interface_register_notify(iface->description);
1452 	return 0;
1453 
1454 err_free_most_channel:
1455 	kfree(c);
1456 
1457 err_free_resources:
1458 	while (i > 0) {
1459 		c = iface->p->channel[--i];
1460 		device_unregister(&c->dev);
1461 		kfree(c);
1462 	}
1463 	kfree(iface->p);
1464 	device_unregister(&iface->dev);
1465 	ida_simple_remove(&mdev_id, id);
1466 	return -ENOMEM;
1467 }
1468 EXPORT_SYMBOL_GPL(most_register_interface);
1469 
1470 /**
1471  * most_deregister_interface - deregisters an interface with core
1472  * @iface: device interface
1473  *
1474  * Before removing an interface instance from the list, all running
1475  * channels are stopped and poisoned.
1476  */
most_deregister_interface(struct most_interface * iface)1477 void most_deregister_interface(struct most_interface *iface)
1478 {
1479 	int i;
1480 	struct most_channel *c;
1481 
1482 	pr_info("deregistering device %s (%s)\n", dev_name(&iface->dev),
1483 		iface->description);
1484 	for (i = 0; i < iface->num_channels; i++) {
1485 		c = iface->p->channel[i];
1486 		if (c->pipe0.comp)
1487 			c->pipe0.comp->disconnect_channel(c->iface,
1488 							c->channel_id);
1489 		if (c->pipe1.comp)
1490 			c->pipe1.comp->disconnect_channel(c->iface,
1491 							c->channel_id);
1492 		c->pipe0.comp = NULL;
1493 		c->pipe1.comp = NULL;
1494 		list_del(&c->list);
1495 		device_unregister(&c->dev);
1496 		kfree(c);
1497 	}
1498 
1499 	ida_simple_remove(&mdev_id, iface->p->dev_id);
1500 	kfree(iface->p);
1501 	device_unregister(&iface->dev);
1502 }
1503 EXPORT_SYMBOL_GPL(most_deregister_interface);
1504 
1505 /**
1506  * most_stop_enqueue - prevents core from enqueueing MBOs
1507  * @iface: pointer to interface
1508  * @id: channel id
1509  *
1510  * This is called by an HDM that _cannot_ attend to its duties and
1511  * is imminent to get run over by the core. The core is not going to
1512  * enqueue any further packets unless the flagging HDM calls
1513  * most_resume enqueue().
1514  */
most_stop_enqueue(struct most_interface * iface,int id)1515 void most_stop_enqueue(struct most_interface *iface, int id)
1516 {
1517 	struct most_channel *c = iface->p->channel[id];
1518 
1519 	if (!c)
1520 		return;
1521 
1522 	mutex_lock(&c->nq_mutex);
1523 	c->enqueue_halt = true;
1524 	mutex_unlock(&c->nq_mutex);
1525 }
1526 EXPORT_SYMBOL_GPL(most_stop_enqueue);
1527 
1528 /**
1529  * most_resume_enqueue - allow core to enqueue MBOs again
1530  * @iface: pointer to interface
1531  * @id: channel id
1532  *
1533  * This clears the enqueue halt flag and enqueues all MBOs currently
1534  * sitting in the wait fifo.
1535  */
most_resume_enqueue(struct most_interface * iface,int id)1536 void most_resume_enqueue(struct most_interface *iface, int id)
1537 {
1538 	struct most_channel *c = iface->p->channel[id];
1539 
1540 	if (!c)
1541 		return;
1542 
1543 	mutex_lock(&c->nq_mutex);
1544 	c->enqueue_halt = false;
1545 	mutex_unlock(&c->nq_mutex);
1546 
1547 	wake_up_interruptible(&c->hdm_fifo_wq);
1548 }
1549 EXPORT_SYMBOL_GPL(most_resume_enqueue);
1550 
release_most_sub(struct device * dev)1551 static void release_most_sub(struct device *dev)
1552 {
1553 	pr_info("releasing most_subsystem\n");
1554 }
1555 
most_init(void)1556 static int __init most_init(void)
1557 {
1558 	int err;
1559 
1560 	pr_info("init()\n");
1561 	INIT_LIST_HEAD(&mc.comp_list);
1562 	ida_init(&mdev_id);
1563 
1564 	mc.bus.name = "most",
1565 	mc.bus.match = most_match,
1566 	mc.drv.name = "most_core",
1567 	mc.drv.bus = &mc.bus,
1568 	mc.drv.groups = mc_attr_groups;
1569 
1570 	err = bus_register(&mc.bus);
1571 	if (err) {
1572 		pr_info("Cannot register most bus\n");
1573 		return err;
1574 	}
1575 	err = driver_register(&mc.drv);
1576 	if (err) {
1577 		pr_info("Cannot register core driver\n");
1578 		goto err_unregister_bus;
1579 	}
1580 	mc.dev.init_name = "most_bus";
1581 	mc.dev.release = release_most_sub;
1582 	if (device_register(&mc.dev)) {
1583 		err = -ENOMEM;
1584 		goto err_unregister_driver;
1585 	}
1586 	configfs_init();
1587 	return 0;
1588 
1589 err_unregister_driver:
1590 	driver_unregister(&mc.drv);
1591 err_unregister_bus:
1592 	bus_unregister(&mc.bus);
1593 	return err;
1594 }
1595 
most_exit(void)1596 static void __exit most_exit(void)
1597 {
1598 	pr_info("exit core module\n");
1599 	device_unregister(&mc.dev);
1600 	driver_unregister(&mc.drv);
1601 	bus_unregister(&mc.bus);
1602 	ida_destroy(&mdev_id);
1603 }
1604 
1605 module_init(most_init);
1606 module_exit(most_exit);
1607 MODULE_LICENSE("GPL");
1608 MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
1609 MODULE_DESCRIPTION("Core module of stacked MOST Linux driver");
1610