1 // SPDX-License-Identifier: BSD-3-Clause
2 //
3 // Copyright(c) 2019 Intel Corporation. All rights reserved.
4 //
5 // Author: Tomasz Lauda <tomasz.lauda@linux.intel.com>
6
7 #include <sof/audio/component.h>
8 #include <sof/bit.h>
9 #include <sof/drivers/interrupt.h>
10 #include <sof/drivers/timer.h>
11 #include <sof/lib/alloc.h>
12 #include <sof/lib/cpu.h>
13 #include <sof/lib/dma.h>
14 #include <sof/lib/memory.h>
15 #include <sof/lib/notifier.h>
16 #include <sof/platform.h>
17 #include <sof/schedule/ll_schedule.h>
18 #include <sof/schedule/ll_schedule_domain.h>
19 #include <sof/schedule/schedule.h>
20 #include <sof/schedule/task.h>
21 #include <ipc/topology.h>
22 #include <errno.h>
23 #include <limits.h>
24 #include <stdbool.h>
25 #include <stddef.h>
26 #include <stdint.h>
27
28 #define DMA_DOMAIN_OWNER_INVALID 0xFFFFFFFF
29
30 struct dma_domain_data {
31 int irq;
32 struct dma_chan_data *channel;
33 void (*handler)(void *arg);
34 void *arg;
35 };
36
37 struct dma_domain {
38 struct dma *dma_array; /* pointer to scheduling DMAs */
39 uint32_t num_dma; /* number of scheduling DMAs */
40 uint32_t owner; /* core owning the scheduling channel */
41 bool channel_changed; /* true if we needed to re-register */
42
43 /* data per core */
44 struct dma_domain_data data[CONFIG_CORE_COUNT];
45 };
46
47 const struct ll_schedule_domain_ops dma_single_chan_domain_ops;
48
49 static void dma_single_chan_domain_enable(struct ll_schedule_domain *domain,
50 int core);
51 static void dma_domain_changed(void *arg, enum notify_id type, void *data);
52
53 /**
54 * \brief Retrieves DMA channel with lowest period.
55 * \param[in,out] dma_domain Pointer to DMA domain.
56 * \return Pointer to DMA channel.
57 */
dma_chan_min_period(struct dma_domain * dma_domain)58 static struct dma_chan_data *dma_chan_min_period(struct dma_domain *dma_domain)
59 {
60 struct dma *dmas = dma_domain->dma_array;
61 struct dma_chan_data *channel = NULL;
62 int i;
63 int j;
64
65 /* get currently registered channel if exists */
66 if (dma_domain->owner != DMA_DOMAIN_OWNER_INVALID &&
67 dma_domain->data[dma_domain->owner].channel)
68 channel = dma_domain->data[dma_domain->owner].channel;
69
70 for (i = 0; i < dma_domain->num_dma; ++i) {
71 /* DMA not probed */
72 if (!dmas[i].sref)
73 continue;
74
75 for (j = 0; j < dmas[i].plat_data.channels; ++j) {
76 /* channel not set as scheduling source */
77 if (!dma_is_scheduling_source(&dmas[i].chan[j]))
78 continue;
79
80 /* channel not running */
81 if (dmas[i].chan[j].status != COMP_STATE_ACTIVE)
82 continue;
83
84 /* bigger period */
85 if (channel &&
86 channel->period <= dmas[i].chan[j].period)
87 continue;
88
89 channel = &dmas[i].chan[j];
90 }
91 }
92
93 return channel;
94 }
95
96 /**
97 * \brief Sends notification event about scheduling DMA channel change.
98 * \param[in,out] channel Pointer to new scheduling DMA channel.
99 */
dma_domain_notify_change(struct dma_chan_data * channel)100 static void dma_domain_notify_change(struct dma_chan_data *channel)
101 {
102 tr_info(&ll_tr, "dma_domain_notify_change()");
103
104 notifier_event(channel, NOTIFIER_ID_DMA_DOMAIN_CHANGE,
105 NOTIFIER_TARGET_CORE_ALL_MASK & ~BIT(cpu_get_id()),
106 channel, sizeof(*channel));
107 }
108
109 /**
110 * \brief Registers and enables selected DMA interrupt.
111 * \param[in,out] channel Pointer to DMA channel.
112 * \param[in,out] data Pointer to DMA domain data.
113 * \param[in,out] handler Pointer to DMA interrupt handler.
114 * \param[in,out] arg Pointer to DMA interrupt handler's argument.
115 * \return Error code.
116 */
dma_single_chan_domain_irq_register(struct dma_chan_data * channel,struct dma_domain_data * data,void (* handler)(void * arg),void * arg)117 static int dma_single_chan_domain_irq_register(struct dma_chan_data *channel,
118 struct dma_domain_data *data,
119 void (*handler)(void *arg),
120 void *arg)
121 {
122 int irq = dma_chan_irq(channel->dma, channel->index);
123 int ret;
124
125 tr_info(&ll_tr, "dma_single_chan_domain_irq_register()");
126
127 data->irq = interrupt_get_irq(irq, dma_irq_name(channel->dma));
128 if (data->irq < 0) {
129 ret = data->irq;
130 goto out;
131 }
132
133 ret = interrupt_register(data->irq, handler, arg);
134 if (ret < 0)
135 goto out;
136
137 interrupt_enable(data->irq, arg);
138
139 interrupt_mask(data->irq, cpu_get_id());
140
141 data->channel = channel;
142 data->handler = handler;
143 data->arg = arg;
144
145 out:
146 return ret;
147 }
148
149 /**
150 * \brief Unregisters and disables selected DMA interrupt.
151 * \param[in,out] data Pointer to DMA domain data.
152 */
dma_single_chan_domain_irq_unregister(struct dma_domain_data * data)153 static void dma_single_chan_domain_irq_unregister(struct dma_domain_data *data)
154 {
155 tr_info(&ll_tr, "dma_single_chan_domain_irq_unregister()");
156
157 interrupt_disable(data->irq, data->arg);
158 interrupt_unregister(data->irq, data->arg);
159 }
160
161 /**
162 * \brief Registers task to DMA domain.
163 *
164 * \param[in,out] domain Pointer to schedule domain.
165 * \param[in,out] task Task to be registered.
166 * \param[in,out] handler Pointer to DMA interrupt handler.
167 * \param[in,out] arg Pointer to DMA interrupt handler's argument.
168 * \return Error code.
169 *
170 * Every core registers for the same DMA channel, but only the one on which
171 * this channel is actually running is the owner. If there is already channel
172 * running we still need to verify, if the recently started channel doesn't
173 * have lower period. In such case change of channel is needed and all other
174 * cores need to be notified.
175 */
dma_single_chan_domain_register(struct ll_schedule_domain * domain,struct task * task,void (* handler)(void * arg),void * arg)176 static int dma_single_chan_domain_register(struct ll_schedule_domain *domain,
177 struct task *task,
178 void (*handler)(void *arg),
179 void *arg)
180 {
181 struct dma_domain *dma_domain = ll_sch_domain_get_pdata(domain);
182 struct pipeline_task *pipe_task = pipeline_task_get(task);
183 int core = cpu_get_id();
184 struct dma_domain_data *data = &dma_domain->data[core];
185 struct dma_chan_data *channel;
186 bool register_needed = true;
187 int ret = 0;
188
189 tr_info(&ll_tr, "dma_single_chan_domain_register()");
190
191 /* check if task should be registered */
192 if (!pipe_task->registrable)
193 goto out;
194
195 /* get running channel with min period */
196 channel = dma_chan_min_period(dma_domain);
197 if (!channel) {
198 ret = -EINVAL;
199 goto out;
200 }
201
202 if (data->channel) {
203 /* channel with min period already registered */
204 if (data->channel->period == channel->period)
205 goto out;
206
207 tr_info(&ll_tr, "dma_single_chan_domain_register(): lower period detected, registering again");
208
209 /* unregister from current channel */
210 dma_single_chan_domain_irq_unregister(data);
211 dma_interrupt(data->channel, DMA_IRQ_MASK);
212 dma_interrupt(data->channel, DMA_IRQ_CLEAR);
213
214 dma_domain->channel_changed = true;
215
216 /* already registered */
217 register_needed = false;
218 }
219
220 if (channel->period <= UINT_MAX)
221 tr_info(&ll_tr,
222 "dma_single_chan_domain_register(): registering on channel with period %u",
223 (unsigned int)channel->period);
224 else
225 tr_info(&ll_tr,
226 "dma_single_chan_domain_register(): registering on channel with period > %u",
227 UINT_MAX);
228
229 /* register for interrupt */
230 ret = dma_single_chan_domain_irq_register(channel, data, handler, arg);
231 if (ret < 0)
232 goto out;
233
234 /* enable channel interrupt */
235 dma_interrupt(data->channel, DMA_IRQ_UNMASK);
236
237 /* unmask if we are the owner */
238 if (dma_domain->owner == core)
239 interrupt_unmask(data->irq, core);
240
241 /* notify scheduling channel change */
242 if (dma_domain->owner != channel->core)
243 dma_domain_notify_change(channel);
244
245 /* register for source change notifications */
246 if (register_needed)
247 notifier_register(domain, NULL, NOTIFIER_ID_DMA_DOMAIN_CHANGE,
248 dma_domain_changed, 0);
249
250 dma_domain->owner = channel->core;
251
252 out:
253 return ret;
254 }
255
256 /**
257 * \brief Checks if any DMA channel is running on current core.
258 * \param[in,out] dmas Array of possible DMAs.
259 * \param[in] num Number of DMAs in the array.
260 * \return True if any DMA channel is running, false otherwise.
261 */
dma_chan_is_any_running(struct dma * dmas,uint32_t num)262 static bool dma_chan_is_any_running(struct dma *dmas, uint32_t num)
263 {
264 int core = cpu_get_id();
265 bool ret = false;
266 int i;
267 int j;
268
269 for (i = 0; i < num; ++i) {
270 /* DMA not probed */
271 if (!dmas[i].sref)
272 continue;
273
274 for (j = 0; j < dmas[i].plat_data.channels; ++j) {
275 /* channel not set as scheduling source */
276 if (!dma_is_scheduling_source(&dmas[i].chan[j]))
277 continue;
278
279 /* channel owned by different core */
280 if (core != dmas[i].chan[j].core)
281 continue;
282
283 if (dmas[i].chan[j].status == COMP_STATE_ACTIVE) {
284 ret = true;
285 goto out;
286 }
287 }
288 }
289
290 out:
291 return ret;
292 }
293
294 /**
295 * \brief Unregisters task from DMA domain if the current core is the owner.
296 * \param[in,out] domain Pointer to schedule domain.
297 * \param[in,out] data Pointer to DMA domain data.
298 *
299 * If the owner of scheduling DMA channel unregisters, it has to notify
300 * other cores about the change.
301 */
dma_domain_unregister_owner(struct ll_schedule_domain * domain,struct dma_domain_data * data)302 static void dma_domain_unregister_owner(struct ll_schedule_domain *domain,
303 struct dma_domain_data *data)
304 {
305 struct dma_domain *dma_domain = ll_sch_domain_get_pdata(domain);
306 struct dma *dmas = dma_domain->dma_array;
307 struct dma_chan_data *channel;
308
309 tr_info(&ll_tr, "dma_domain_unregister_owner()");
310
311 /* transfers still scheduled on this channel */
312 if (data->channel->status == COMP_STATE_ACTIVE)
313 return;
314
315 channel = dma_chan_min_period(dma_domain);
316 if (channel && dma_chan_is_any_running(dmas, dma_domain->num_dma)) {
317 /* another channel is running */
318 tr_info(&ll_tr, "dma_domain_unregister_owner(): domain in use, change owner");
319
320 /* change owner */
321 dma_domain->owner = channel->core;
322
323 /* notify scheduling channel change */
324 dma_domain_notify_change(channel);
325
326 data->channel = channel;
327 dma_domain->channel_changed = true;
328
329 return;
330 }
331
332 /* no other channel is running */
333 dma_single_chan_domain_irq_unregister(data);
334 dma_interrupt(data->channel, DMA_IRQ_MASK);
335 dma_interrupt(data->channel, DMA_IRQ_CLEAR);
336 data->channel = NULL;
337
338 if (channel) {
339 /* change owner */
340 dma_domain->owner = channel->core;
341
342 /* notify scheduling channel change */
343 dma_domain_notify_change(channel);
344
345 return;
346 }
347
348 dma_domain->owner = DMA_DOMAIN_OWNER_INVALID;
349
350 notifier_unregister(domain, NULL, NOTIFIER_ID_DMA_DOMAIN_CHANGE);
351 }
352
353 /**
354 * \brief Unregisters task from DMA domain.
355 * \param[in,out] domain Pointer to schedule domain.
356 * \param[in,out] task Task to be unregistered from the domain.
357 * \param[in] num_tasks Number of currently scheduled tasks.
358 * \return Error code.
359 */
dma_single_chan_domain_unregister(struct ll_schedule_domain * domain,struct task * task,uint32_t num_tasks)360 static int dma_single_chan_domain_unregister(struct ll_schedule_domain *domain,
361 struct task *task,
362 uint32_t num_tasks)
363 {
364 struct dma_domain *dma_domain = ll_sch_domain_get_pdata(domain);
365 struct pipeline_task *pipe_task = pipeline_task_get(task);
366 struct dma *dmas = dma_domain->dma_array;
367 int core = cpu_get_id();
368 struct dma_domain_data *data = &dma_domain->data[core];
369
370 tr_info(&ll_tr, "dma_single_chan_domain_unregister()");
371
372 /* check if task should be unregistered */
373 if (!pipe_task->registrable)
374 return 0;
375
376 /* channel not registered */
377 if (!data->channel)
378 return -EINVAL;
379
380 /* unregister domain owner */
381 if (dma_domain->owner == core) {
382 dma_domain_unregister_owner(domain, data);
383 return 0;
384 }
385
386 /* some channel still running, so return */
387 if (dma_chan_is_any_running(dmas, dma_domain->num_dma))
388 return -EBUSY;
389
390 /* no more transfers scheduled on this core */
391 dma_single_chan_domain_irq_unregister(data);
392 data->channel = NULL;
393
394 notifier_unregister(domain, NULL, NOTIFIER_ID_DMA_DOMAIN_CHANGE);
395
396 return 0;
397 }
398
399 /**
400 * \brief Unmasks scheduling DMA channel's interrupt.
401 * \param[in,out] domain Pointer to schedule domain.
402 * \param[in] core Core on which interrupt should be unmasked.
403 */
dma_single_chan_domain_enable(struct ll_schedule_domain * domain,int core)404 static void dma_single_chan_domain_enable(struct ll_schedule_domain *domain,
405 int core)
406 {
407 struct dma_domain *dma_domain = ll_sch_domain_get_pdata(domain);
408 struct dma_domain_data *data = &dma_domain->data[core];
409
410 /* channel not registered */
411 if (!data->channel)
412 return;
413
414 dma_interrupt(data->channel, DMA_IRQ_UNMASK);
415 interrupt_unmask(data->irq, core);
416 }
417
418 /**
419 * \brief Masks scheduling DMA channel's interrupt.
420 * \param[in,out] domain Pointer to schedule domain.
421 * \param[in] core Core on which interrupt should be masked.
422 */
dma_single_chan_domain_disable(struct ll_schedule_domain * domain,int core)423 static void dma_single_chan_domain_disable(struct ll_schedule_domain *domain,
424 int core)
425 {
426 struct dma_domain *dma_domain = ll_sch_domain_get_pdata(domain);
427 struct dma_domain_data *data = &dma_domain->data[core];
428
429 /* channel not registered */
430 if (!data->channel)
431 return;
432
433 interrupt_mask(data->irq, core);
434 }
435
436 /**
437 * \brief Calculates domain's next tick.
438 * \param[in,out] domain Pointer to schedule domain.
439 * \param[in] start Offset of last calculated tick.
440 */
dma_single_chan_domain_set(struct ll_schedule_domain * domain,uint64_t start)441 static void dma_single_chan_domain_set(struct ll_schedule_domain *domain,
442 uint64_t start)
443 {
444 struct dma_domain *dma_domain = ll_sch_domain_get_pdata(domain);
445 struct dma_domain_data *data = &dma_domain->data[cpu_get_id()];
446 uint64_t ticks;
447
448 /* channel not registered */
449 if (!data->channel)
450 return;
451
452 if (dma_domain->channel_changed) {
453 domain->next_tick = platform_timer_get_atomic(timer_get());
454
455 dma_domain->channel_changed = false;
456 } else {
457 ticks = domain->ticks_per_ms * data->channel->period / 1000 +
458 start;
459
460 domain->next_tick = domain->next_tick != UINT64_MAX ?
461 ticks : start;
462 }
463 }
464
465 /**
466 * \brief Clears scheduling DMA channel's interrupt.
467 * \param[in,out] domain Pointer to schedule domain.
468 */
dma_single_chan_domain_clear(struct ll_schedule_domain * domain)469 static void dma_single_chan_domain_clear(struct ll_schedule_domain *domain)
470 {
471 struct dma_domain *dma_domain = ll_sch_domain_get_pdata(domain);
472 struct dma_domain_data *data = &dma_domain->data[cpu_get_id()];
473
474 /* channel not registered */
475 if (!data->channel)
476 return;
477
478 dma_interrupt(data->channel, DMA_IRQ_CLEAR);
479 }
480
481 /**
482 * \brief Checks if given task should be executed.
483 * \param[in,out] domain Pointer to schedule domain.
484 * \param[in,out] task Task to be checked.
485 * \return True is task should be executed, false otherwise.
486 */
dma_single_chan_domain_is_pending(struct ll_schedule_domain * domain,struct task * task,struct comp_dev ** comp)487 static bool dma_single_chan_domain_is_pending(struct ll_schedule_domain *domain,
488 struct task *task, struct comp_dev **comp)
489 {
490 return task->start <= platform_timer_get_atomic(timer_get());
491 }
492
493 /**
494 * \brief Scheduling DMA channel change notification handling.
495 * \param[in,out] arg Pointer to self.
496 * \param[in] type Id of the notification.
497 * \param[in,out] data Pointer to notification event data.
498 */
dma_domain_changed(void * arg,enum notify_id type,void * data)499 static void dma_domain_changed(void *arg, enum notify_id type, void *data)
500 {
501 struct ll_schedule_domain *domain = arg;
502 struct dma_domain *dma_domain = ll_sch_domain_get_pdata(domain);
503 int core = cpu_get_id();
504 struct dma_domain_data *domain_data = &dma_domain->data[core];
505
506 tr_info(&ll_tr, "dma_domain_changed()");
507
508 /* unregister from current DMA channel */
509 dma_single_chan_domain_irq_unregister(domain_data);
510
511 if (domain_data->channel->core == core) {
512 dma_interrupt(domain_data->channel, DMA_IRQ_MASK);
513 dma_interrupt(domain_data->channel, DMA_IRQ_CLEAR);
514 }
515
516 /* register to the new DMA channel */
517 if (dma_single_chan_domain_irq_register(data, domain_data,
518 domain_data->handler,
519 domain_data->arg) < 0)
520 return;
521
522 dma_single_chan_domain_enable(domain, core);
523 }
524
525 /**
526 * \brief Initializes DMA single channel scheduling domain.
527 * \param[in,out] dma_array Array of DMAs to be scheduled on.
528 * \param[in] num_dma Number of DMAs passed as dma_array.
529 * \param[in] clk Platform clock to base calculations on.
530 * \return Pointer to initialized scheduling domain object.
531 */
dma_single_chan_domain_init(struct dma * dma_array,uint32_t num_dma,int clk)532 struct ll_schedule_domain *dma_single_chan_domain_init(struct dma *dma_array,
533 uint32_t num_dma,
534 int clk)
535 {
536 struct ll_schedule_domain *domain;
537 struct dma_domain *dma_domain;
538
539 tr_info(&ll_tr, "dma_single_chan_domain_init(): num_dma %d, clk %d",
540 num_dma, clk);
541
542 domain = domain_init(SOF_SCHEDULE_LL_DMA, clk, false,
543 &dma_single_chan_domain_ops);
544
545 dma_domain = rzalloc(SOF_MEM_ZONE_SYS_SHARED, 0, SOF_MEM_CAPS_RAM, sizeof(*dma_domain));
546 dma_domain->dma_array = dma_array;
547 dma_domain->num_dma = num_dma;
548 dma_domain->owner = DMA_DOMAIN_OWNER_INVALID;
549
550 ll_sch_domain_set_pdata(domain, dma_domain);
551
552 return domain;
553 }
554
555 const struct ll_schedule_domain_ops dma_single_chan_domain_ops = {
556 .domain_register = dma_single_chan_domain_register,
557 .domain_unregister = dma_single_chan_domain_unregister,
558 .domain_enable = dma_single_chan_domain_enable,
559 .domain_disable = dma_single_chan_domain_disable,
560 .domain_set = dma_single_chan_domain_set,
561 .domain_clear = dma_single_chan_domain_clear,
562 .domain_is_pending = dma_single_chan_domain_is_pending,
563 };
564