1 // SPDX-License-Identifier: BSD-3-Clause
2 //
3 // Copyright(c) 2016 Intel Corporation. All rights reserved.
4 //
5 // Author: Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
6
7 #include <rtos/atomic.h>
8 #include <sof/audio/audio_stream.h>
9 #include <sof/audio/buffer.h>
10 #include <rtos/alloc.h>
11 #include <rtos/cache.h>
12 #include <sof/lib/dma.h>
13 #include <sof/lib/memory.h>
14 #include <sof/lib/uuid.h>
15 #include <rtos/spinlock.h>
16 #include <sof/trace/trace.h>
17 #include <ipc/topology.h>
18 #include <user/trace.h>
19 #include <errno.h>
20 #include <stddef.h>
21 #include <stdint.h>
22
23 LOG_MODULE_REGISTER(dma, CONFIG_SOF_LOG_LEVEL);
24
25 /* bc3526a7-9b86-4ab4-84a5-2e02ae70cc10 */
26 DECLARE_SOF_UUID("dma", dma_uuid, 0xbc3526a7, 0x9b86, 0x4ab4,
27 0x84, 0xa5, 0x2e, 0x02, 0xae, 0x70, 0xcc, 0x10);
28
29 DECLARE_TR_CTX(dma_tr, SOF_UUID(dma_uuid), LOG_LEVEL_INFO);
30
31 #if CONFIG_ZEPHYR_NATIVE_DRIVERS
32 static int dma_init(struct dma *dma);
33
dma_get(uint32_t dir,uint32_t cap,uint32_t dev,uint32_t flags)34 struct dma *dma_get(uint32_t dir, uint32_t cap, uint32_t dev, uint32_t flags)
35 {
36 const struct dma_info *info = dma_info_get();
37 int users, ret = 0;
38 int min_users = INT32_MAX;
39 struct dma *d = NULL, *dmin = NULL;
40 k_spinlock_key_t key;
41
42 if (!info->num_dmas) {
43 tr_err(&dma_tr, "dma_get(): No DMACs installed");
44 return NULL;
45 }
46
47 /* find DMAC with free channels that matches request */
48 for (d = info->dma_array; d < info->dma_array + info->num_dmas;
49 d++) {
50 /* skip if this DMAC does not support the requested dir */
51 if (dir && (d->plat_data.dir & dir) == 0)
52 continue;
53
54 /* skip if this DMAC does not support the requested caps */
55 if (cap && (d->plat_data.caps & cap) == 0)
56 continue;
57
58 /* skip if this DMAC does not support the requested dev */
59 if (dev && (d->plat_data.devs & dev) == 0)
60 continue;
61
62 /* if exclusive access is requested */
63 if (flags & DMA_ACCESS_EXCLUSIVE) {
64 /* ret DMA with no users */
65 if (!d->sref) {
66 dmin = d;
67 break;
68 }
69 } else {
70 /* get number of users for this DMAC*/
71 users = d->sref;
72
73 /* pick DMAC with the least num of users */
74 if (users < min_users) {
75 dmin = d;
76 min_users = users;
77 }
78 }
79 }
80
81 if (!dmin) {
82 tr_err(&dma_tr, "No DMAC dir %d caps 0x%x dev 0x%x flags 0x%x",
83 dir, cap, dev, flags);
84
85 for (d = info->dma_array;
86 d < info->dma_array + info->num_dmas;
87 d++) {
88 tr_err(&dma_tr, " DMAC ID %d users %d busy channels %ld",
89 d->plat_data.id, d->sref,
90 atomic_read(&d->num_channels_busy));
91 tr_err(&dma_tr, " caps 0x%x dev 0x%x",
92 d->plat_data.caps, d->plat_data.devs);
93 }
94
95 return NULL;
96 }
97
98 /* return DMAC */
99 tr_dbg(&dma_tr, "dma_get(), dma-probe id = %d",
100 dmin->plat_data.id);
101
102 /* Shared DMA controllers with multiple channels
103 * may be requested many times, let the probe()
104 * do on-first-use initialization.
105 */
106 key = k_spin_lock(&dmin->lock);
107
108 if (!dmin->sref) {
109 ret = dma_init(dmin);
110 if (ret < 0) {
111 tr_err(&dma_tr, "dma_get(): dma-probe failed id = %d, ret = %d",
112 dmin->plat_data.id, ret);
113 goto out;
114 }
115 }
116
117 dmin->sref++;
118
119 tr_info(&dma_tr, "dma_get() ID %d sref = %d busy channels %ld",
120 dmin->plat_data.id, dmin->sref,
121 atomic_read(&dmin->num_channels_busy));
122 out:
123 k_spin_unlock(&dmin->lock, key);
124 return !ret ? dmin : NULL;
125 }
126
dma_put(struct dma * dma)127 void dma_put(struct dma *dma)
128 {
129 k_spinlock_key_t key;
130
131 key = k_spin_lock(&dma->lock);
132 if (--dma->sref == 0) {
133 rfree(dma->chan);
134 dma->chan = NULL;
135 }
136
137 tr_info(&dma_tr, "dma_put(), dma = %p, sref = %d",
138 dma, dma->sref);
139 k_spin_unlock(&dma->lock, key);
140 }
141
dma_init(struct dma * dma)142 static int dma_init(struct dma *dma)
143 {
144 struct dma_chan_data *chan;
145 int i;
146
147 /* allocate dma channels */
148 dma->chan = rzalloc(SOF_MEM_ZONE_RUNTIME_SHARED, 0, SOF_MEM_CAPS_RAM,
149 sizeof(struct dma_chan_data) * dma->plat_data.channels);
150
151 if (!dma->chan) {
152 tr_err(&dma_tr, "dma_probe_sof(): dma %d allocaction of channels failed",
153 dma->plat_data.id);
154 return -ENOMEM;
155 }
156
157 /* init work */
158 for (i = 0, chan = dma->chan; i < dma->plat_data.channels;
159 i++, chan++) {
160 chan->dma = dma;
161 chan->index = i;
162 }
163
164 return 0;
165 }
166 #else
dma_get(uint32_t dir,uint32_t cap,uint32_t dev,uint32_t flags)167 struct dma *dma_get(uint32_t dir, uint32_t cap, uint32_t dev, uint32_t flags)
168 {
169 const struct dma_info *info = dma_info_get();
170 int users, ret;
171 int min_users = INT32_MAX;
172 struct dma *d = NULL, *dmin = NULL;
173 k_spinlock_key_t key;
174
175 if (!info->num_dmas) {
176 tr_err(&dma_tr, "dma_get(): No DMACs installed");
177 return NULL;
178 }
179
180 /* find DMAC with free channels that matches request */
181 for (d = info->dma_array; d < info->dma_array + info->num_dmas;
182 d++) {
183 /* skip if this DMAC does not support the requested dir */
184 if (dir && (d->plat_data.dir & dir) == 0)
185 continue;
186
187 /* skip if this DMAC does not support the requested caps */
188 if (cap && (d->plat_data.caps & cap) == 0)
189 continue;
190
191 /* skip if this DMAC does not support the requested dev */
192 if (dev && (d->plat_data.devs & dev) == 0)
193 continue;
194
195 /* skip if this DMAC has 1 user per avail channel */
196 /* TODO: this should be fixed in dai.c to allow more users */
197 if (d->sref >= d->plat_data.channels)
198 continue;
199
200 /* if exclusive access is requested */
201 if (flags & DMA_ACCESS_EXCLUSIVE) {
202 /* ret DMA with no users */
203 if (!d->sref) {
204 dmin = d;
205 break;
206 }
207 } else {
208 /* get number of users for this DMAC*/
209 users = d->sref;
210
211 /* pick DMAC with the least num of users */
212 if (users < min_users) {
213 dmin = d;
214 min_users = users;
215 }
216 }
217 }
218
219 if (!dmin) {
220 tr_err(&dma_tr, "No DMAC dir %d caps 0x%x dev 0x%x flags 0x%x",
221 dir, cap, dev, flags);
222
223 for (d = info->dma_array;
224 d < info->dma_array + info->num_dmas;
225 d++) {
226 tr_err(&dma_tr, " DMAC ID %d users %d busy channels %ld",
227 d->plat_data.id, d->sref,
228 atomic_read(&d->num_channels_busy));
229 tr_err(&dma_tr, " caps 0x%x dev 0x%x",
230 d->plat_data.caps, d->plat_data.devs);
231 }
232
233 return NULL;
234 }
235
236 /* return DMAC */
237 tr_dbg(&dma_tr, "dma_get(), dma-probe id = %d",
238 dmin->plat_data.id);
239
240 /* Shared DMA controllers with multiple channels
241 * may be requested many times, let the probe()
242 * do on-first-use initialization.
243 */
244 key = k_spin_lock(&dmin->lock);
245
246 ret = 0;
247 if (!dmin->sref) {
248 ret = dma_probe_legacy(dmin);
249 if (ret < 0) {
250 tr_err(&dma_tr, "dma_get(): dma-probe failed id = %d, ret = %d",
251 dmin->plat_data.id, ret);
252 }
253 }
254 if (!ret)
255 dmin->sref++;
256
257 tr_info(&dma_tr, "dma_get() ID %d sref = %d busy channels %ld",
258 dmin->plat_data.id, dmin->sref,
259 atomic_read(&dmin->num_channels_busy));
260
261 k_spin_unlock(&dmin->lock, key);
262 return !ret ? dmin : NULL;
263 }
264
dma_put(struct dma * dma)265 void dma_put(struct dma *dma)
266 {
267 k_spinlock_key_t key;
268 int ret;
269
270 key = k_spin_lock(&dma->lock);
271 if (--dma->sref == 0) {
272 ret = dma_remove_legacy(dma);
273 if (ret < 0) {
274 tr_err(&dma_tr, "dma_put(): dma_remove() failed id = %d, ret = %d",
275 dma->plat_data.id, ret);
276 }
277 }
278 tr_info(&dma_tr, "dma_put(), dma = %p, sref = %d",
279 dma, dma->sref);
280 k_spin_unlock(&dma->lock, key);
281 }
282 #endif
283
dma_sg_alloc(struct dma_sg_elem_array * elem_array,enum mem_zone zone,uint32_t direction,uint32_t buffer_count,uint32_t buffer_bytes,uintptr_t dma_buffer_addr,uintptr_t external_addr)284 int dma_sg_alloc(struct dma_sg_elem_array *elem_array,
285 enum mem_zone zone,
286 uint32_t direction,
287 uint32_t buffer_count, uint32_t buffer_bytes,
288 uintptr_t dma_buffer_addr, uintptr_t external_addr)
289 {
290 int i;
291
292 elem_array->elems = rzalloc(zone, 0, SOF_MEM_CAPS_RAM,
293 sizeof(struct dma_sg_elem) * buffer_count);
294 if (!elem_array->elems)
295 return -ENOMEM;
296
297 for (i = 0; i < buffer_count; i++) {
298 elem_array->elems[i].size = buffer_bytes;
299 // TODO: may count offsets once
300 switch (direction) {
301 case DMA_DIR_MEM_TO_DEV:
302 case DMA_DIR_LMEM_TO_HMEM:
303 elem_array->elems[i].src = dma_buffer_addr;
304 elem_array->elems[i].dest = external_addr;
305 break;
306 default:
307 elem_array->elems[i].src = external_addr;
308 elem_array->elems[i].dest = dma_buffer_addr;
309 break;
310 }
311
312 dma_buffer_addr += buffer_bytes;
313 }
314 elem_array->count = buffer_count;
315 return 0;
316 }
317
dma_sg_free(struct dma_sg_elem_array * elem_array)318 void dma_sg_free(struct dma_sg_elem_array *elem_array)
319 {
320 rfree(elem_array->elems);
321 dma_sg_init(elem_array);
322 }
323
dma_buffer_copy_from(struct comp_buffer __sparse_cache * source,struct comp_buffer __sparse_cache * sink,dma_process_func process,uint32_t source_bytes)324 int dma_buffer_copy_from(struct comp_buffer __sparse_cache *source,
325 struct comp_buffer __sparse_cache *sink,
326 dma_process_func process, uint32_t source_bytes)
327 {
328 struct audio_stream __sparse_cache *istream = &source->stream;
329 uint32_t samples = source_bytes /
330 audio_stream_sample_bytes(istream);
331 uint32_t sink_bytes = audio_stream_sample_bytes(&sink->stream) *
332 samples;
333 int ret;
334
335 /* source buffer contains data copied by DMA */
336 audio_stream_invalidate(istream, source_bytes);
337
338 /* process data */
339 ret = process(istream, 0, &sink->stream, 0, samples);
340
341 buffer_stream_writeback(sink, sink_bytes);
342
343 /*
344 * consume istream using audio_stream API because this buffer doesn't
345 * appear in topology so notifier event is not needed
346 */
347 audio_stream_consume(istream, source_bytes);
348 comp_update_buffer_produce(sink, sink_bytes);
349
350 return ret;
351 }
352
dma_buffer_copy_to(struct comp_buffer __sparse_cache * source,struct comp_buffer __sparse_cache * sink,dma_process_func process,uint32_t sink_bytes)353 int dma_buffer_copy_to(struct comp_buffer __sparse_cache *source,
354 struct comp_buffer __sparse_cache *sink,
355 dma_process_func process, uint32_t sink_bytes)
356 {
357 struct audio_stream __sparse_cache *ostream = &sink->stream;
358 uint32_t samples = sink_bytes /
359 audio_stream_sample_bytes(ostream);
360 uint32_t source_bytes = audio_stream_sample_bytes(&source->stream) *
361 samples;
362 int ret;
363
364 buffer_stream_invalidate(source, source_bytes);
365
366 /* process data */
367 ret = process(&source->stream, 0, ostream, 0, samples);
368
369 /* sink buffer contains data meant to copied to DMA */
370 audio_stream_writeback(ostream, sink_bytes);
371
372 /*
373 * produce ostream using audio_stream API because this buffer doesn't
374 * appear in topology so notifier event is not needed
375 */
376 audio_stream_produce(ostream, sink_bytes);
377 comp_update_buffer_consume(source, source_bytes);
378
379 return ret;
380 }
381