1 // SPDX-License-Identifier: BSD-3-Clause
2 //
3 // Copyright 2019 NXP
4 //
5 // Author: Daniel Baluta <daniel.baluta@nxp.com>
6 // Author: Paul Olaru <paul.olaru@nxp.com>
7 
8 /* Dummy DMA driver (software-based DMA controller)
9  *
10  * This driver is usable on all platforms where the DSP can directly access
11  * all of the host physical memory (or at least the host buffers).
12  *
13  * The way this driver works is that it simply performs the copies
14  * synchronously within the dma_start() and dma_copy() calls.
15  *
16  * One of the drawbacks of this driver is that it doesn't actually have a true
17  * IRQ context, as the copy is done synchronously and the IRQ callbacks are
18  * called in process context.
19  *
20  * An actual hardware DMA driver may be preferable because of the above
21  * drawback which comes from a software implementation. But if there isn't any
22  * hardware DMA controller dedicated for the host this driver can be used.
23  *
24  * This driver requires physical addresses in the elems. This assumption only
25  * holds if you have CONFIG_HOST_PTABLE enabled, at least currently.
26  */
27 
28 #include <sof/atomic.h>
29 #include <sof/audio/component.h>
30 #include <sof/drivers/timer.h>
31 #include <sof/lib/alloc.h>
32 #include <sof/lib/cache.h>
33 #include <sof/lib/dma.h>
34 #include <sof/lib/notifier.h>
35 #include <sof/lib/uuid.h>
36 #include <sof/platform.h>
37 #include <sof/spinlock.h>
38 #include <sof/string.h>
39 #include <sof/trace/trace.h>
40 #include <sys/types.h>
41 #include <ipc/topology.h>
42 #include <user/trace.h>
43 
44 #include <errno.h>
45 #include <stdbool.h>
46 #include <stddef.h>
47 #include <stdint.h>
48 
49 /* f6d15ad3-b122-458c-ae9b-0ab0b5867aa0 */
50 DECLARE_SOF_UUID("dummy-dma", dummy_dma_uuid, 0xf6d15ad3, 0xb122, 0x458c,
51 		 0xae, 0x9b, 0x0a, 0xb0, 0xb5, 0x86, 0x7a, 0xa0);
52 
53 DECLARE_TR_CTX(ddma_tr, SOF_UUID(dummy_dma_uuid), LOG_LEVEL_INFO);
54 
55 struct dma_chan_pdata {
56 	struct dma_sg_elem_array *elems;
57 	int sg_elem_curr_idx;
58 	uintptr_t r_pos;
59 	uintptr_t w_pos;
60 	uintptr_t elem_progress;
61 	bool cyclic;
62 };
63 
64 #define DUMMY_DMA_BUFFER_PERIOD_COUNT	2
65 
66 /**
67  * \brief Copy the currently-in-progress DMA SG elem
68  * \param[in,out] pdata: Private data structure for this DMA channel
69  * \param[in] bytes: The amount of data requested for copying
70  * \return How many bytes have been copied, or -ENODATA if nothing can be
71  *	   copied. Will return 0 quickly if 0 bytes are requested.
72  *
73  * Perform the individual copy of the in-progress DMA SG elem. To copy more
74  * data, one should call this function repeatedly.
75  */
dummy_dma_copy_crt_elem(struct dma_chan_pdata * pdata,int bytes)76 static ssize_t dummy_dma_copy_crt_elem(struct dma_chan_pdata *pdata,
77 				       int bytes)
78 {
79 	int ret;
80 	uintptr_t rptr, wptr;
81 	size_t orig_size, remaining_size, copy_size;
82 
83 	if (bytes == 0)
84 		return 0;
85 
86 	/* Quick check, do we have a valid elem? */
87 	if (pdata->sg_elem_curr_idx >= pdata->elems->count)
88 		return -ENODATA;
89 
90 	/* We should copy whatever is left of the element, unless we have
91 	 * too little remaining for that to happen
92 	 */
93 
94 	/* Compute copy size and pointers */
95 	rptr = pdata->elems->elems[pdata->sg_elem_curr_idx].src;
96 	wptr = pdata->elems->elems[pdata->sg_elem_curr_idx].dest;
97 	orig_size = pdata->elems->elems[pdata->sg_elem_curr_idx].size;
98 	remaining_size = orig_size - pdata->elem_progress;
99 	copy_size = MIN(remaining_size, bytes);
100 
101 	/* On playback, invalidate host buffer (it may lie in a cached area).
102 	 * Otherwise we could be playing stale data.
103 	 * On capture this should be safe as host.c does a writeback before
104 	 * triggering the DMA.
105 	 */
106 	dcache_invalidate_region((void *)rptr, copy_size);
107 
108 	/* Perform the copy, being careful if we overflow the elem */
109 	ret = memcpy_s((void *)wptr, remaining_size, (void *)rptr, copy_size);
110 	assert(!ret);
111 
112 	/* On capture, writeback the host buffer (it may lie in a cached area).
113 	 * On playback, also writeback because host.c does an invalidate to
114 	 * be able to use the data transferred by the DMA.
115 	 */
116 	dcache_writeback_region((void *)wptr, copy_size);
117 
118 	pdata->elem_progress += copy_size;
119 
120 	if (remaining_size == copy_size) {
121 		/* Advance to next elem, if we can */
122 		pdata->sg_elem_curr_idx++;
123 		pdata->elem_progress = 0;
124 		/* Support cyclic copying */
125 		if (pdata->cyclic &&
126 		    pdata->sg_elem_curr_idx == pdata->elems->count)
127 			pdata->sg_elem_curr_idx = 0;
128 	}
129 
130 	return copy_size;
131 }
132 
dummy_dma_comp_avail_data_cyclic(struct dma_chan_pdata * pdata)133 static size_t dummy_dma_comp_avail_data_cyclic(struct dma_chan_pdata *pdata)
134 {
135 	/* Simple: just sum up all of the elements */
136 	size_t size = 0;
137 	int i;
138 
139 	for (i = 0; i < pdata->elems->count; i++)
140 		size += pdata->elems->elems[i].size;
141 
142 	return size;
143 }
144 
dummy_dma_comp_avail_data_noncyclic(struct dma_chan_pdata * pdata)145 static size_t dummy_dma_comp_avail_data_noncyclic(struct dma_chan_pdata *pdata)
146 {
147 	/* Slightly harder, take remainder of the current element plus
148 	 * all of the data in future elements
149 	 */
150 	size_t size = 0;
151 	int i;
152 
153 	for (i = pdata->sg_elem_curr_idx; i < pdata->elems->count; i++)
154 		size += pdata->elems->elems[i].size;
155 
156 	/* Account for partially copied current elem */
157 	size -= pdata->elem_progress;
158 
159 	return size;
160 }
161 
162 /**
163  * \brief Compute how much data is available for copying at this point
164  * \param[in] pdata: Private data structure for this DMA channel
165  * \return Number of available/free bytes for copying, possibly 0
166  *
167  * Returns how many bytes can be copied with one dma_copy() call.
168  */
dummy_dma_compute_avail_data(struct dma_chan_pdata * pdata)169 static size_t dummy_dma_compute_avail_data(struct dma_chan_pdata *pdata)
170 {
171 	if (pdata->cyclic)
172 		return dummy_dma_comp_avail_data_cyclic(pdata);
173 	else
174 		return dummy_dma_comp_avail_data_noncyclic(pdata);
175 }
176 
177 /**
178  * \brief Copy as many elems as required to copy @bytes bytes
179  * \param[in,out] pdata: Private data structure for this DMA channel
180  * \param[in] bytes: The amount of data requested for copying
181  * \return How many bytes have been copied, or -ENODATA if nothing can be
182  *	   copied.
183  *
184  * Perform as many elem copies as required to fulfill the request for copying
185  * @bytes bytes of data. Will copy exactly this much data if possible, however
186  * it will stop short if you try to copy more data than available.
187  */
dummy_dma_do_copies(struct dma_chan_pdata * pdata,int bytes)188 static ssize_t dummy_dma_do_copies(struct dma_chan_pdata *pdata, int bytes)
189 {
190 	size_t avail = dummy_dma_compute_avail_data(pdata);
191 	ssize_t copied = 0;
192 	ssize_t crt_copied;
193 
194 	if (!avail)
195 		return -ENODATA;
196 
197 	while (bytes) {
198 		crt_copied = dummy_dma_copy_crt_elem(pdata, bytes);
199 		if (crt_copied <= 0) {
200 			if (copied > 0)
201 				return copied;
202 			else
203 				return crt_copied;
204 		}
205 		bytes -= crt_copied;
206 		copied += crt_copied;
207 	}
208 
209 	return copied;
210 }
211 
212 /**
213  * \brief Allocate next free DMA channel
214  * \param[in] dma: DMA controller
215  * \param[in] req_chan: Ignored, would have been a preference for a particular
216  *			channel
217  * \return A structure to be used with the other callbacks in this driver,
218  * or NULL in case no channel could be allocated.
219  *
220  * This function allocates a DMA channel for actual usage by any SOF client
221  * code.
222  */
dummy_dma_channel_get(struct dma * dma,unsigned int req_chan)223 static struct dma_chan_data *dummy_dma_channel_get(struct dma *dma,
224 						   unsigned int req_chan)
225 {
226 	uint32_t flags;
227 	int i;
228 
229 	spin_lock_irq(&dma->lock, flags);
230 	for (i = 0; i < dma->plat_data.channels; i++) {
231 		/* use channel if it's free */
232 		if (dma->chan[i].status == COMP_STATE_INIT) {
233 			dma->chan[i].status = COMP_STATE_READY;
234 
235 			atomic_add(&dma->num_channels_busy, 1);
236 
237 			/* return channel */
238 			spin_unlock_irq(&dma->lock, flags);
239 			return &dma->chan[i];
240 		}
241 	}
242 	spin_unlock_irq(&dma->lock, flags);
243 	tr_err(&ddma_tr, "dummy-dmac: %d no free channel",
244 	       dma->plat_data.id);
245 	return NULL;
246 }
247 
dummy_dma_channel_put_unlocked(struct dma_chan_data * channel)248 static void dummy_dma_channel_put_unlocked(struct dma_chan_data *channel)
249 {
250 	struct dma_chan_pdata *ch = dma_chan_get_data(channel);
251 
252 	/* Reset channel state */
253 	notifier_unregister_all(NULL, channel);
254 
255 	ch->elems = NULL;
256 	channel->desc_count = 0;
257 	ch->sg_elem_curr_idx = 0;
258 
259 	ch->r_pos = 0;
260 	ch->w_pos = 0;
261 
262 	channel->status = COMP_STATE_INIT;
263 	atomic_sub(&channel->dma->num_channels_busy, 1);
264 }
265 
266 /**
267  * \brief Free a DMA channel
268  * \param[in] channel: DMA channel
269  *
270  * Once a DMA channel is no longer needed it should be freed by calling this
271  * function.
272  */
dummy_dma_channel_put(struct dma_chan_data * channel)273 static void dummy_dma_channel_put(struct dma_chan_data *channel)
274 {
275 	uint32_t flags;
276 
277 	spin_lock_irq(&channel->dma->lock, flags);
278 	dummy_dma_channel_put_unlocked(channel);
279 	spin_unlock_irq(&channel->dma->lock, flags);
280 }
281 
282 /* Since copies are synchronous, the triggers do nothing */
dummy_dma_start(struct dma_chan_data * channel)283 static int dummy_dma_start(struct dma_chan_data *channel)
284 {
285 	return 0;
286 }
287 
288 /* Since copies are synchronous, the triggers do nothing */
dummy_dma_release(struct dma_chan_data * channel)289 static int dummy_dma_release(struct dma_chan_data *channel)
290 {
291 	return 0;
292 }
293 
294 /* Since copies are synchronous, the triggers do nothing */
dummy_dma_pause(struct dma_chan_data * channel)295 static int dummy_dma_pause(struct dma_chan_data *channel)
296 {
297 	return 0;
298 }
299 
300 /* Since copies are synchronous, the triggers do nothing */
dummy_dma_stop(struct dma_chan_data * channel)301 static int dummy_dma_stop(struct dma_chan_data *channel)
302 {
303 	return 0;
304 }
305 
306 /* fill in "status" with current DMA channel state and position */
dummy_dma_status(struct dma_chan_data * channel,struct dma_chan_status * status,uint8_t direction)307 static int dummy_dma_status(struct dma_chan_data *channel,
308 			    struct dma_chan_status *status,
309 			    uint8_t direction)
310 {
311 	struct dma_chan_pdata *ch = dma_chan_get_data(channel);
312 
313 	status->state = channel->status;
314 	status->flags = 0; /* TODO What flags should be put here? */
315 	status->r_pos = ch->r_pos;
316 	status->w_pos = ch->w_pos;
317 
318 	status->timestamp = timer_get_system(timer_get());
319 	return 0;
320 }
321 
322 /**
323  * \brief Set channel configuration
324  * \param[in] channel: The channel to configure
325  * \param[in] config: Configuration data
326  * \return 0 on success, -EINVAL if the config is invalid or unsupported.
327  *
328  * Sets the channel configuration. For this particular driver the config means
329  * the direction and the actual SG elems for copying.
330  */
dummy_dma_set_config(struct dma_chan_data * channel,struct dma_sg_config * config)331 static int dummy_dma_set_config(struct dma_chan_data *channel,
332 				struct dma_sg_config *config)
333 {
334 	struct dma_chan_pdata *ch = dma_chan_get_data(channel);
335 	uint32_t flags;
336 	int ret = 0;
337 
338 	spin_lock_irq(&channel->dma->lock, flags);
339 
340 	if (!config->elem_array.count) {
341 		tr_err(&ddma_tr, "dummy-dmac: %d channel %d no DMA descriptors",
342 		       channel->dma->plat_data.id,
343 		       channel->index);
344 
345 		ret = -EINVAL;
346 		goto out;
347 	}
348 
349 	channel->direction = config->direction;
350 
351 	if (config->direction != DMA_DIR_HMEM_TO_LMEM &&
352 	    config->direction != DMA_DIR_LMEM_TO_HMEM) {
353 		/* Shouldn't even happen though */
354 		tr_err(&ddma_tr, "dummy-dmac: %d channel %d invalid direction %d",
355 		       channel->dma->plat_data.id, channel->index,
356 		       config->direction);
357 		ret = -EINVAL;
358 		goto out;
359 	}
360 	channel->desc_count = config->elem_array.count;
361 	ch->elems = &config->elem_array;
362 	ch->sg_elem_curr_idx = 0;
363 	ch->cyclic = config->cyclic;
364 
365 	channel->status = COMP_STATE_PREPARE;
366 out:
367 	spin_unlock_irq(&channel->dma->lock, flags);
368 	return ret;
369 }
370 
371 /* restore DMA context after leaving D3 */
dummy_dma_pm_context_restore(struct dma * dma)372 static int dummy_dma_pm_context_restore(struct dma *dma)
373 {
374 	/* Virtual device, no hardware registers */
375 	return 0;
376 }
377 
378 /* store DMA context after leaving D3 */
dummy_dma_pm_context_store(struct dma * dma)379 static int dummy_dma_pm_context_store(struct dma *dma)
380 {
381 	/* Virtual device, no hardware registers */
382 	return 0;
383 }
384 
385 /**
386  * \brief Perform the DMA copy itself
387  * \param[in] channel The channel to do the copying
388  * \param[in] bytes How many bytes are requested to be copied
389  * \param[in] flags Flags which may alter the copying (this driver ignores them)
390  * \return 0 on success (this driver always succeeds)
391  *
392  * The copying must be done synchronously within this function, then SOF (the
393  * host component) is notified via the callback that this number of bytes is
394  * available.
395  */
dummy_dma_copy(struct dma_chan_data * channel,int bytes,uint32_t flags)396 static int dummy_dma_copy(struct dma_chan_data *channel, int bytes,
397 			  uint32_t flags)
398 {
399 	ssize_t copied;
400 	struct dma_cb_data next = {
401 		.channel = channel,
402 	};
403 	struct dma_chan_pdata *pdata = dma_chan_get_data(channel);
404 
405 	copied = dummy_dma_do_copies(pdata, bytes);
406 	if (copied < 0)
407 		return copied;
408 
409 	next.elem.size = copied;
410 
411 	/* Let the user of the driver know how much we copied */
412 	notifier_event(channel, NOTIFIER_ID_DMA_COPY,
413 		       NOTIFIER_TARGET_CORE_LOCAL, &next, sizeof(next));
414 
415 	return 0;
416 }
417 
418 /**
419  * \brief Initialize the driver
420  * \param[in] dma The preallocated DMA controller structure
421  * \return 0 on success, a negative value on error
422  *
423  * This function must be called before any other will work. Calling functions
424  * such as dma_channel_get() without a successful dma_probe() is undefined
425  * behavior.
426  */
dummy_dma_probe(struct dma * dma)427 static int dummy_dma_probe(struct dma *dma)
428 {
429 	struct dma_chan_pdata *chanp;
430 	int i;
431 
432 	if (dma->chan) {
433 		tr_err(&ddma_tr, "dummy-dmac %d already created!",
434 		       dma->plat_data.id);
435 		return -EEXIST; /* already created */
436 	}
437 
438 	dma->chan = rzalloc(SOF_MEM_ZONE_RUNTIME_SHARED, 0, SOF_MEM_CAPS_RAM,
439 			    dma->plat_data.channels * sizeof(dma->chan[0]));
440 	if (!dma->chan) {
441 		tr_err(&ddma_tr, "dummy-dmac %d: Out of memory!",
442 		       dma->plat_data.id);
443 		return -ENOMEM;
444 	}
445 
446 	chanp = rzalloc(SOF_MEM_ZONE_SYS_RUNTIME, 0, SOF_MEM_CAPS_RAM,
447 			dma->plat_data.channels * sizeof(chanp[0]));
448 	if (!chanp) {
449 		rfree(dma->chan);
450 		tr_err(&ddma_tr, "dummy-dmac %d: Out of memory!",
451 		       dma->plat_data.id);
452 		dma->chan = NULL;
453 		return -ENOMEM;
454 	}
455 
456 	for (i = 0; i < dma->plat_data.channels; i++) {
457 		dma->chan[i].dma = dma;
458 		dma->chan[i].index = i;
459 		dma->chan[i].status = COMP_STATE_INIT;
460 		dma_chan_set_data(&dma->chan[i], &chanp[i]);
461 	}
462 
463 	atomic_init(&dma->num_channels_busy, 0);
464 
465 	return 0;
466 }
467 
468 /**
469  * \brief Free up all memory and resources used by this driver
470  * \param[in] dma The DMA controller structure belonging to this driver
471  *
472  * This function undoes everything that probe() did. All channels that were
473  * returned via dma_channel_get() become invalid and further usage of them is
474  * undefined behavior. dma_channel_put() is automatically called on all
475  * channels.
476  *
477  * This function is idempotent, and safe to call multiple times in a row.
478  */
dummy_dma_remove(struct dma * dma)479 static int dummy_dma_remove(struct dma *dma)
480 {
481 	tr_dbg(&ddma_tr, "dummy_dma %d -> remove", dma->plat_data.id);
482 	if (!dma->chan)
483 		return 0;
484 
485 	rfree(dma_chan_get_data(&dma->chan[0]));
486 	rfree(dma->chan);
487 	dma->chan = NULL;
488 	return 0;
489 }
490 
491 /**
492  * \brief Get DMA copy data sizes
493  * \param[in] channel DMA channel on which we're interested of the sizes
494  * \param[out] avail How much data the channel can deliver if copy() is called
495  *		     now
496  * \param[out] free How much data can be copied to the host via this channel
497  *		    without going over the buffer size
498  * \return 0 on success, -EINVAL if a configuration error is detected
499  */
dummy_dma_get_data_size(struct dma_chan_data * channel,uint32_t * avail,uint32_t * free)500 static int dummy_dma_get_data_size(struct dma_chan_data *channel,
501 				   uint32_t *avail, uint32_t *free)
502 {
503 	struct dma_chan_pdata *pdata = dma_chan_get_data(channel);
504 	uint32_t size = dummy_dma_compute_avail_data(pdata);
505 
506 	switch (channel->direction) {
507 	case DMA_DIR_HMEM_TO_LMEM:
508 		*avail = size;
509 		break;
510 	case DMA_DIR_LMEM_TO_HMEM:
511 		*free = size;
512 		break;
513 	default:
514 		tr_err(&ddma_tr, "get_data_size direction: %d",
515 		       channel->direction);
516 		return -EINVAL;
517 	}
518 	return 0;
519 }
520 
dummy_dma_interrupt(struct dma_chan_data * channel,enum dma_irq_cmd cmd)521 static int dummy_dma_interrupt(struct dma_chan_data *channel,
522 			       enum dma_irq_cmd cmd)
523 {
524 	/* Software DMA doesn't need any interrupts */
525 	return 0;
526 }
527 
dummy_dma_get_attribute(struct dma * dma,uint32_t type,uint32_t * value)528 static int dummy_dma_get_attribute(struct dma *dma, uint32_t type,
529 				   uint32_t *value)
530 {
531 	switch (type) {
532 	case DMA_ATTR_BUFFER_ALIGNMENT:
533 	case DMA_ATTR_COPY_ALIGNMENT:
534 		*value = sizeof(void *);
535 		break;
536 	case DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT:
537 		*value = PLATFORM_DCACHE_ALIGN;
538 		break;
539 	case DMA_ATTR_BUFFER_PERIOD_COUNT:
540 		*value = DUMMY_DMA_BUFFER_PERIOD_COUNT;
541 		break;
542 	default:
543 		return -ENOENT; /* Attribute not found */
544 	}
545 	return 0;
546 }
547 
548 const struct dma_ops dummy_dma_ops = {
549 	.channel_get	= dummy_dma_channel_get,
550 	.channel_put	= dummy_dma_channel_put,
551 	.start		= dummy_dma_start,
552 	.stop		= dummy_dma_stop,
553 	.pause		= dummy_dma_pause,
554 	.release	= dummy_dma_release,
555 	.copy		= dummy_dma_copy,
556 	.status		= dummy_dma_status,
557 	.set_config	= dummy_dma_set_config,
558 	.pm_context_restore		= dummy_dma_pm_context_restore,
559 	.pm_context_store		= dummy_dma_pm_context_store,
560 	.probe		= dummy_dma_probe,
561 	.remove		= dummy_dma_remove,
562 	.get_data_size	= dummy_dma_get_data_size,
563 	.interrupt	= dummy_dma_interrupt,
564 	.get_attribute	= dummy_dma_get_attribute,
565 };
566