1 // SPDX-License-Identifier: BSD-3-Clause
2 //
3 // Copyright 2019 NXP
4 //
5 // Author: Daniel Baluta <daniel.baluta@nxp.com>
6 // Author: Paul Olaru <paul.olaru@nxp.com>
7
8 /* Dummy DMA driver (software-based DMA controller)
9 *
10 * This driver is usable on all platforms where the DSP can directly access
11 * all of the host physical memory (or at least the host buffers).
12 *
13 * The way this driver works is that it simply performs the copies
14 * synchronously within the dma_start() and dma_copy() calls.
15 *
16 * One of the drawbacks of this driver is that it doesn't actually have a true
17 * IRQ context, as the copy is done synchronously and the IRQ callbacks are
18 * called in process context.
19 *
20 * An actual hardware DMA driver may be preferable because of the above
21 * drawback which comes from a software implementation. But if there isn't any
22 * hardware DMA controller dedicated for the host this driver can be used.
23 *
24 * This driver requires physical addresses in the elems. This assumption only
25 * holds if you have CONFIG_HOST_PTABLE enabled, at least currently.
26 */
27
28 #include <rtos/atomic.h>
29 #include <sof/audio/component.h>
30 #include <rtos/timer.h>
31 #include <rtos/alloc.h>
32 #include <rtos/cache.h>
33 #include <sof/lib/dma.h>
34 #include <sof/lib/notifier.h>
35 #include <sof/lib/uuid.h>
36 #include <sof/platform.h>
37 #include <rtos/spinlock.h>
38 #include <rtos/string.h>
39 #include <sof/trace/trace.h>
40 #include <sys/types.h>
41 #include <ipc/topology.h>
42 #include <user/trace.h>
43
44 #include <errno.h>
45 #include <stdbool.h>
46 #include <stddef.h>
47 #include <stdint.h>
48
49 LOG_MODULE_REGISTER(dummy_dma, CONFIG_SOF_LOG_LEVEL);
50
51 /* f6d15ad3-b122-458c-ae9b-0ab0b5867aa0 */
52 DECLARE_SOF_UUID("dummy-dma", dummy_dma_uuid, 0xf6d15ad3, 0xb122, 0x458c,
53 0xae, 0x9b, 0x0a, 0xb0, 0xb5, 0x86, 0x7a, 0xa0);
54
55 DECLARE_TR_CTX(ddma_tr, SOF_UUID(dummy_dma_uuid), LOG_LEVEL_INFO);
56
57 struct dma_chan_pdata {
58 struct dma_sg_elem_array *elems;
59 int sg_elem_curr_idx;
60 uintptr_t r_pos;
61 uintptr_t w_pos;
62 uintptr_t elem_progress;
63 bool cyclic;
64 };
65
66 #define DUMMY_DMA_BUFFER_PERIOD_COUNT 2
67
68 /**
69 * \brief Copy the currently-in-progress DMA SG elem
70 * \param[in,out] pdata: Private data structure for this DMA channel
71 * \param[in] bytes: The amount of data requested for copying
72 * \return How many bytes have been copied, or -ENODATA if nothing can be
73 * copied. Will return 0 quickly if 0 bytes are requested.
74 *
75 * Perform the individual copy of the in-progress DMA SG elem. To copy more
76 * data, one should call this function repeatedly.
77 */
dummy_dma_copy_crt_elem(struct dma_chan_pdata * pdata,int bytes)78 static ssize_t dummy_dma_copy_crt_elem(struct dma_chan_pdata *pdata,
79 int bytes)
80 {
81 int ret;
82 uintptr_t rptr, wptr;
83 size_t orig_size, remaining_size, copy_size;
84
85 if (bytes == 0)
86 return 0;
87
88 /* Quick check, do we have a valid elem? */
89 if (pdata->sg_elem_curr_idx >= pdata->elems->count)
90 return -ENODATA;
91
92 /* We should copy whatever is left of the element, unless we have
93 * too little remaining for that to happen
94 */
95
96 /* Compute copy size and pointers */
97 rptr = pdata->elems->elems[pdata->sg_elem_curr_idx].src;
98 wptr = pdata->elems->elems[pdata->sg_elem_curr_idx].dest;
99 orig_size = pdata->elems->elems[pdata->sg_elem_curr_idx].size;
100 remaining_size = orig_size - pdata->elem_progress;
101 copy_size = MIN(remaining_size, bytes);
102
103 /* On playback, invalidate host buffer (it may lie in a cached area).
104 * Otherwise we could be playing stale data.
105 * On capture this should be safe as host.c does a writeback before
106 * triggering the DMA.
107 */
108 dcache_invalidate_region((void *)rptr, copy_size);
109
110 /* Perform the copy, being careful if we overflow the elem */
111 ret = memcpy_s((void *)wptr, remaining_size, (void *)rptr, copy_size);
112 assert(!ret);
113
114 /* On capture, writeback the host buffer (it may lie in a cached area).
115 * On playback, also writeback because host.c does an invalidate to
116 * be able to use the data transferred by the DMA.
117 */
118 dcache_writeback_region((void *)wptr, copy_size);
119
120 pdata->elem_progress += copy_size;
121
122 if (remaining_size == copy_size) {
123 /* Advance to next elem, if we can */
124 pdata->sg_elem_curr_idx++;
125 pdata->elem_progress = 0;
126 /* Support cyclic copying */
127 if (pdata->cyclic &&
128 pdata->sg_elem_curr_idx == pdata->elems->count)
129 pdata->sg_elem_curr_idx = 0;
130 }
131
132 return copy_size;
133 }
134
dummy_dma_comp_avail_data_cyclic(struct dma_chan_pdata * pdata)135 static size_t dummy_dma_comp_avail_data_cyclic(struct dma_chan_pdata *pdata)
136 {
137 /* Simple: just sum up all of the elements */
138 size_t size = 0;
139 int i;
140
141 for (i = 0; i < pdata->elems->count; i++)
142 size += pdata->elems->elems[i].size;
143
144 return size;
145 }
146
dummy_dma_comp_avail_data_noncyclic(struct dma_chan_pdata * pdata)147 static size_t dummy_dma_comp_avail_data_noncyclic(struct dma_chan_pdata *pdata)
148 {
149 /* Slightly harder, take remainder of the current element plus
150 * all of the data in future elements
151 */
152 size_t size = 0;
153 int i;
154
155 for (i = pdata->sg_elem_curr_idx; i < pdata->elems->count; i++)
156 size += pdata->elems->elems[i].size;
157
158 /* Account for partially copied current elem */
159 size -= pdata->elem_progress;
160
161 return size;
162 }
163
164 /**
165 * \brief Compute how much data is available for copying at this point
166 * \param[in] pdata: Private data structure for this DMA channel
167 * \return Number of available/free bytes for copying, possibly 0
168 *
169 * Returns how many bytes can be copied with one dma_copy() call.
170 */
dummy_dma_compute_avail_data(struct dma_chan_pdata * pdata)171 static size_t dummy_dma_compute_avail_data(struct dma_chan_pdata *pdata)
172 {
173 if (pdata->cyclic)
174 return dummy_dma_comp_avail_data_cyclic(pdata);
175 else
176 return dummy_dma_comp_avail_data_noncyclic(pdata);
177 }
178
179 /**
180 * \brief Copy as many elems as required to copy @bytes bytes
181 * \param[in,out] pdata: Private data structure for this DMA channel
182 * \param[in] bytes: The amount of data requested for copying
183 * \return How many bytes have been copied, or -ENODATA if nothing can be
184 * copied.
185 *
186 * Perform as many elem copies as required to fulfill the request for copying
187 * @bytes bytes of data. Will copy exactly this much data if possible, however
188 * it will stop short if you try to copy more data than available.
189 */
dummy_dma_do_copies(struct dma_chan_pdata * pdata,int bytes)190 static ssize_t dummy_dma_do_copies(struct dma_chan_pdata *pdata, int bytes)
191 {
192 size_t avail = dummy_dma_compute_avail_data(pdata);
193 ssize_t copied = 0;
194 ssize_t crt_copied;
195
196 if (!avail)
197 return -ENODATA;
198
199 while (bytes) {
200 crt_copied = dummy_dma_copy_crt_elem(pdata, bytes);
201 if (crt_copied <= 0) {
202 if (copied > 0)
203 return copied;
204 else
205 return crt_copied;
206 }
207 bytes -= crt_copied;
208 copied += crt_copied;
209 }
210
211 return copied;
212 }
213
214 /**
215 * \brief Allocate next free DMA channel
216 * \param[in] dma: DMA controller
217 * \param[in] req_chan: Ignored, would have been a preference for a particular
218 * channel
219 * \return A structure to be used with the other callbacks in this driver,
220 * or NULL in case no channel could be allocated.
221 *
222 * This function allocates a DMA channel for actual usage by any SOF client
223 * code.
224 */
dummy_dma_channel_get(struct dma * dma,unsigned int req_chan)225 static struct dma_chan_data *dummy_dma_channel_get(struct dma *dma,
226 unsigned int req_chan)
227 {
228 k_spinlock_key_t key;
229 int i;
230
231 key = k_spin_lock(&dma->lock);
232 for (i = 0; i < dma->plat_data.channels; i++) {
233 /* use channel if it's free */
234 if (dma->chan[i].status == COMP_STATE_INIT) {
235 dma->chan[i].status = COMP_STATE_READY;
236
237 atomic_add(&dma->num_channels_busy, 1);
238
239 /* return channel */
240 k_spin_unlock(&dma->lock, key);
241 return &dma->chan[i];
242 }
243 }
244 k_spin_unlock(&dma->lock, key);
245 tr_err(&ddma_tr, "dummy-dmac: %d no free channel",
246 dma->plat_data.id);
247 return NULL;
248 }
249
dummy_dma_channel_put_unlocked(struct dma_chan_data * channel)250 static void dummy_dma_channel_put_unlocked(struct dma_chan_data *channel)
251 {
252 struct dma_chan_pdata *ch = dma_chan_get_data(channel);
253
254 /* Reset channel state */
255 notifier_unregister_all(NULL, channel);
256
257 ch->elems = NULL;
258 channel->desc_count = 0;
259 ch->sg_elem_curr_idx = 0;
260
261 ch->r_pos = 0;
262 ch->w_pos = 0;
263
264 channel->status = COMP_STATE_INIT;
265 atomic_sub(&channel->dma->num_channels_busy, 1);
266 }
267
268 /**
269 * \brief Free a DMA channel
270 * \param[in] channel: DMA channel
271 *
272 * Once a DMA channel is no longer needed it should be freed by calling this
273 * function.
274 */
dummy_dma_channel_put(struct dma_chan_data * channel)275 static void dummy_dma_channel_put(struct dma_chan_data *channel)
276 {
277 k_spinlock_key_t key;
278
279 key = k_spin_lock(&channel->dma->lock);
280 dummy_dma_channel_put_unlocked(channel);
281 k_spin_unlock(&channel->dma->lock, key);
282 }
283
284 /* Since copies are synchronous, the triggers do nothing */
dummy_dma_start(struct dma_chan_data * channel)285 static int dummy_dma_start(struct dma_chan_data *channel)
286 {
287 return 0;
288 }
289
290 /* Since copies are synchronous, the triggers do nothing */
dummy_dma_release(struct dma_chan_data * channel)291 static int dummy_dma_release(struct dma_chan_data *channel)
292 {
293 return 0;
294 }
295
296 /* Since copies are synchronous, the triggers do nothing */
dummy_dma_pause(struct dma_chan_data * channel)297 static int dummy_dma_pause(struct dma_chan_data *channel)
298 {
299 return 0;
300 }
301
302 /* Since copies are synchronous, the triggers do nothing */
dummy_dma_stop(struct dma_chan_data * channel)303 static int dummy_dma_stop(struct dma_chan_data *channel)
304 {
305 return 0;
306 }
307
308 /* fill in "status" with current DMA channel state and position */
dummy_dma_status(struct dma_chan_data * channel,struct dma_chan_status * status,uint8_t direction)309 static int dummy_dma_status(struct dma_chan_data *channel,
310 struct dma_chan_status *status,
311 uint8_t direction)
312 {
313 struct dma_chan_pdata *ch = dma_chan_get_data(channel);
314
315 status->state = channel->status;
316 status->flags = 0; /* TODO What flags should be put here? */
317 status->r_pos = ch->r_pos;
318 status->w_pos = ch->w_pos;
319
320 status->timestamp = sof_cycle_get_64();
321 return 0;
322 }
323
324 /**
325 * \brief Set channel configuration
326 * \param[in] channel: The channel to configure
327 * \param[in] config: Configuration data
328 * \return 0 on success, -EINVAL if the config is invalid or unsupported.
329 *
330 * Sets the channel configuration. For this particular driver the config means
331 * the direction and the actual SG elems for copying.
332 */
dummy_dma_set_config(struct dma_chan_data * channel,struct dma_sg_config * config)333 static int dummy_dma_set_config(struct dma_chan_data *channel,
334 struct dma_sg_config *config)
335 {
336 struct dma_chan_pdata *ch = dma_chan_get_data(channel);
337 k_spinlock_key_t key;
338 int ret = 0;
339
340 key = k_spin_lock(&channel->dma->lock);
341
342 if (!config->elem_array.count) {
343 tr_err(&ddma_tr, "dummy-dmac: %d channel %d no DMA descriptors",
344 channel->dma->plat_data.id,
345 channel->index);
346
347 ret = -EINVAL;
348 goto out;
349 }
350
351 channel->direction = config->direction;
352
353 if (config->direction != DMA_DIR_HMEM_TO_LMEM &&
354 config->direction != DMA_DIR_LMEM_TO_HMEM) {
355 /* Shouldn't even happen though */
356 tr_err(&ddma_tr, "dummy-dmac: %d channel %d invalid direction %d",
357 channel->dma->plat_data.id, channel->index,
358 config->direction);
359 ret = -EINVAL;
360 goto out;
361 }
362 channel->desc_count = config->elem_array.count;
363 ch->elems = &config->elem_array;
364 ch->sg_elem_curr_idx = 0;
365 ch->cyclic = config->cyclic;
366
367 channel->status = COMP_STATE_PREPARE;
368 out:
369 k_spin_unlock(&channel->dma->lock, key);
370 return ret;
371 }
372
373 /**
374 * \brief Perform the DMA copy itself
375 * \param[in] channel The channel to do the copying
376 * \param[in] bytes How many bytes are requested to be copied
377 * \param[in] flags Flags which may alter the copying (this driver ignores them)
378 * \return 0 on success (this driver always succeeds)
379 *
380 * The copying must be done synchronously within this function, then SOF (the
381 * host component) is notified via the callback that this number of bytes is
382 * available.
383 */
dummy_dma_copy(struct dma_chan_data * channel,int bytes,uint32_t flags)384 static int dummy_dma_copy(struct dma_chan_data *channel, int bytes,
385 uint32_t flags)
386 {
387 ssize_t copied;
388 struct dma_cb_data next = {
389 .channel = channel,
390 };
391 struct dma_chan_pdata *pdata = dma_chan_get_data(channel);
392
393 copied = dummy_dma_do_copies(pdata, bytes);
394 if (copied < 0)
395 return copied;
396
397 next.elem.size = copied;
398
399 /* Let the user of the driver know how much we copied */
400 notifier_event(channel, NOTIFIER_ID_DMA_COPY,
401 NOTIFIER_TARGET_CORE_LOCAL, &next, sizeof(next));
402
403 return 0;
404 }
405
406 /**
407 * \brief Initialize the driver
408 * \param[in] dma The preallocated DMA controller structure
409 * \return 0 on success, a negative value on error
410 *
411 * This function must be called before any other will work. Calling functions
412 * such as dma_channel_get() without a successful dma_probe() is undefined
413 * behavior.
414 */
dummy_dma_probe(struct dma * dma)415 static int dummy_dma_probe(struct dma *dma)
416 {
417 struct dma_chan_pdata *chanp;
418 int i;
419
420 if (dma->chan) {
421 tr_err(&ddma_tr, "dummy-dmac %d already created!",
422 dma->plat_data.id);
423 return -EEXIST; /* already created */
424 }
425
426 dma->chan = rzalloc(SOF_MEM_ZONE_RUNTIME_SHARED, 0, SOF_MEM_CAPS_RAM,
427 dma->plat_data.channels * sizeof(dma->chan[0]));
428 if (!dma->chan) {
429 tr_err(&ddma_tr, "dummy-dmac %d: Out of memory!",
430 dma->plat_data.id);
431 return -ENOMEM;
432 }
433
434 chanp = rzalloc(SOF_MEM_ZONE_SYS_RUNTIME, 0, SOF_MEM_CAPS_RAM,
435 dma->plat_data.channels * sizeof(chanp[0]));
436 if (!chanp) {
437 rfree(dma->chan);
438 tr_err(&ddma_tr, "dummy-dmac %d: Out of memory!",
439 dma->plat_data.id);
440 dma->chan = NULL;
441 return -ENOMEM;
442 }
443
444 for (i = 0; i < dma->plat_data.channels; i++) {
445 dma->chan[i].dma = dma;
446 dma->chan[i].index = i;
447 dma->chan[i].status = COMP_STATE_INIT;
448 dma_chan_set_data(&dma->chan[i], &chanp[i]);
449 }
450
451 atomic_init(&dma->num_channels_busy, 0);
452
453 return 0;
454 }
455
456 /**
457 * \brief Free up all memory and resources used by this driver
458 * \param[in] dma The DMA controller structure belonging to this driver
459 *
460 * This function undoes everything that probe() did. All channels that were
461 * returned via dma_channel_get() become invalid and further usage of them is
462 * undefined behavior. dma_channel_put() is automatically called on all
463 * channels.
464 *
465 * This function is idempotent, and safe to call multiple times in a row.
466 */
dummy_dma_remove(struct dma * dma)467 static int dummy_dma_remove(struct dma *dma)
468 {
469 tr_dbg(&ddma_tr, "dummy_dma %d -> remove", dma->plat_data.id);
470 if (!dma->chan)
471 return 0;
472
473 rfree(dma_chan_get_data(&dma->chan[0]));
474 rfree(dma->chan);
475 dma->chan = NULL;
476 return 0;
477 }
478
479 /**
480 * \brief Get DMA copy data sizes
481 * \param[in] channel DMA channel on which we're interested of the sizes
482 * \param[out] avail How much data the channel can deliver if copy() is called
483 * now
484 * \param[out] free How much data can be copied to the host via this channel
485 * without going over the buffer size
486 * \return 0 on success, -EINVAL if a configuration error is detected
487 */
dummy_dma_get_data_size(struct dma_chan_data * channel,uint32_t * avail,uint32_t * free)488 static int dummy_dma_get_data_size(struct dma_chan_data *channel,
489 uint32_t *avail, uint32_t *free)
490 {
491 struct dma_chan_pdata *pdata = dma_chan_get_data(channel);
492 uint32_t size = dummy_dma_compute_avail_data(pdata);
493
494 switch (channel->direction) {
495 case DMA_DIR_HMEM_TO_LMEM:
496 *avail = size;
497 break;
498 case DMA_DIR_LMEM_TO_HMEM:
499 *free = size;
500 break;
501 default:
502 tr_err(&ddma_tr, "get_data_size direction: %d",
503 channel->direction);
504 return -EINVAL;
505 }
506 return 0;
507 }
508
dummy_dma_interrupt(struct dma_chan_data * channel,enum dma_irq_cmd cmd)509 static int dummy_dma_interrupt(struct dma_chan_data *channel,
510 enum dma_irq_cmd cmd)
511 {
512 /* Software DMA doesn't need any interrupts */
513 return 0;
514 }
515
dummy_dma_get_attribute(struct dma * dma,uint32_t type,uint32_t * value)516 static int dummy_dma_get_attribute(struct dma *dma, uint32_t type,
517 uint32_t *value)
518 {
519 switch (type) {
520 case DMA_ATTR_BUFFER_ALIGNMENT:
521 case DMA_ATTR_COPY_ALIGNMENT:
522 *value = sizeof(void *);
523 break;
524 case DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT:
525 *value = PLATFORM_DCACHE_ALIGN;
526 break;
527 case DMA_ATTR_BUFFER_PERIOD_COUNT:
528 *value = DUMMY_DMA_BUFFER_PERIOD_COUNT;
529 break;
530 default:
531 return -ENOENT; /* Attribute not found */
532 }
533 return 0;
534 }
535
536 const struct dma_ops dummy_dma_ops = {
537 .channel_get = dummy_dma_channel_get,
538 .channel_put = dummy_dma_channel_put,
539 .start = dummy_dma_start,
540 .stop = dummy_dma_stop,
541 .pause = dummy_dma_pause,
542 .release = dummy_dma_release,
543 .copy = dummy_dma_copy,
544 .status = dummy_dma_status,
545 .set_config = dummy_dma_set_config,
546 .probe = dummy_dma_probe,
547 .remove = dummy_dma_remove,
548 .get_data_size = dummy_dma_get_data_size,
549 .interrupt = dummy_dma_interrupt,
550 .get_attribute = dummy_dma_get_attribute,
551 };
552