1 // SPDX-License-Identifier: BSD-3-Clause
2 //
3 // Copyright(c) 2019 Intel Corporation. All rights reserved.
4 //
5 // Author: Tomasz Lauda <tomasz.lauda@linux.intel.com>
6
7 #include <sof/audio/component.h>
8 #include <rtos/bit.h>
9 #include <rtos/interrupt.h>
10 #include <rtos/timer.h>
11 #include <rtos/alloc.h>
12 #include <sof/lib/cpu.h>
13 #include <sof/lib/dma.h>
14 #include <sof/lib/memory.h>
15 #include <sof/lib/notifier.h>
16 #include <sof/platform.h>
17 #include <sof/schedule/ll_schedule.h>
18 #include <sof/schedule/ll_schedule_domain.h>
19 #include <sof/schedule/schedule.h>
20 #include <rtos/task.h>
21 #include <ipc/topology.h>
22 #include <stdbool.h>
23 #include <stddef.h>
24 #include <stdint.h>
25
26 LOG_MODULE_DECLARE(ll_schedule, CONFIG_SOF_LOG_LEVEL);
27
28 /* For i.MX, when building SOF with Zephyr, we use wrapper.c,
29 * interrupt.c and interrupt-irqsteer.c which causes name
30 * collisions.
31 * In order to avoid this and make any second level interrupt
32 * handling go through interrupt-irqsteer.c define macros to
33 * rename the duplicated functions.
34 */
35 #if defined(__ZEPHYR__) && defined(CONFIG_IMX)
36 #define interrupt_get_irq mux_interrupt_get_irq
37 #define interrupt_register mux_interrupt_register
38 #define interrupt_unregister mux_interrupt_unregister
39 #define interrupt_enable mux_interrupt_enable
40 #define interrupt_disable mux_interrupt_disable
41 #endif
42
43 struct dma_domain_data {
44 int irq;
45 struct pipeline_task *task;
46 void (*handler)(void *arg);
47 void *arg;
48 };
49
50 struct dma_domain {
51 struct dma *dma_array; /* pointer to scheduling DMAs */
52 uint32_t num_dma; /* number of scheduling DMAs */
53 bool aggregated_irq; /* true if aggregated interrupts */
54
55 /* mask of currently running channels */
56 uint32_t channel_mask[PLATFORM_NUM_DMACS][CONFIG_CORE_COUNT];
57 /* array of arguments for aggregated mode */
58 struct dma_domain_data *arg[PLATFORM_NUM_DMACS][CONFIG_CORE_COUNT];
59 /* array of registered channels data */
60 struct dma_domain_data data[PLATFORM_NUM_DMACS][PLATFORM_MAX_DMA_CHAN];
61 };
62
63 const struct ll_schedule_domain_ops dma_multi_chan_domain_ops;
64
65 /**
66 * \brief Generic DMA interrupt handler.
67 * \param[in,out] data Pointer to DMA domain data.
68 */
dma_multi_chan_domain_irq_handler(void * data)69 static void dma_multi_chan_domain_irq_handler(void *data)
70 {
71 struct dma_domain_data *domain_data = data;
72
73 /* just call registered handler */
74 domain_data->handler(domain_data->arg);
75
76 }
77
78 /**
79 * \brief Registers and enables selected DMA interrupt.
80 * \param[in,out] data Pointer to DMA domain data.
81 * \param[in,out] handler Pointer to DMA interrupt handler.
82 * \return Error code.
83 */
dma_multi_chan_domain_irq_register(struct dma_domain_data * data,void (* handler)(void * arg))84 static int dma_multi_chan_domain_irq_register(struct dma_domain_data *data,
85 void (*handler)(void *arg))
86 {
87 int ret;
88
89 tr_info(&ll_tr, "dma_multi_chan_domain_irq_register()");
90
91 /* always go through dma_multi_chan_domain_irq_handler,
92 * so we have different arg registered for every channel
93 */
94 ret = interrupt_register(data->irq, dma_multi_chan_domain_irq_handler,
95 data);
96 if (ret < 0)
97 return ret;
98
99 interrupt_enable(data->irq, data);
100
101 return 0;
102 }
103
104 /**
105 * \brief Registers task to DMA domain.
106 *
107 * \param[in,out] domain Pointer to schedule domain.
108 * \param[in,out] task Task to be registered.
109 * \param[in,out] handler Pointer to DMA interrupt handler.
110 * \param[in,out] arg Pointer to DMA interrupt handler's argument.
111 * \return Error code.
112 *
113 * Keeps track of potential double registrations and handles non aggregated
114 * DMA interrupts (different irq number per DMA channel).
115 */
dma_multi_chan_domain_register(struct ll_schedule_domain * domain,struct task * task,void (* handler)(void * arg),void * arg)116 static int dma_multi_chan_domain_register(struct ll_schedule_domain *domain,
117 struct task *task,
118 void (*handler)(void *arg), void *arg)
119 {
120 struct dma_domain *dma_domain = ll_sch_domain_get_pdata(domain);
121 struct pipeline_task *pipe_task = pipeline_task_get(task);
122 struct dma *dmas = dma_domain->dma_array;
123 int core = cpu_get_id();
124 int ret = 0;
125 int i;
126 int j;
127
128 tr_info(&ll_tr, "dma_multi_chan_domain_register()");
129
130 /* check if task should be registered */
131 if (!pipe_task->registrable)
132 goto out;
133
134 for (i = 0; i < dma_domain->num_dma; ++i) {
135 for (j = 0; j < dmas[i].plat_data.channels; ++j) {
136 /* channel not set as scheduling source */
137 if (!dma_is_scheduling_source(&dmas[i].chan[j]))
138 continue;
139
140 /* channel not running */
141 if (dmas[i].chan[j].status != COMP_STATE_ACTIVE)
142 continue;
143
144 /* channel owned by different core */
145 if (core != dmas[i].chan[j].core)
146 continue;
147
148 /* channel has been already running */
149 if (dma_domain->channel_mask[i][core] & BIT(j))
150 continue;
151
152 dma_interrupt_legacy(&dmas[i].chan[j], DMA_IRQ_CLEAR);
153
154 /* register only if not aggregated or not registered */
155 if (!dma_domain->aggregated_irq ||
156 !dma_domain->channel_mask[i][core]) {
157 ret = dma_multi_chan_domain_irq_register(
158 &dma_domain->data[i][j],
159 handler);
160 if (ret < 0)
161 goto out;
162
163 dma_domain->data[i][j].handler = handler;
164 dma_domain->data[i][j].arg = arg;
165
166 /* needed to unregister aggregated interrupts */
167 dma_domain->arg[i][core] =
168 &dma_domain->data[i][j];
169 }
170
171 interrupt_clear_mask(dma_domain->data[i][j].irq,
172 BIT(j));
173
174 dma_interrupt_legacy(&dmas[i].chan[j], DMA_IRQ_UNMASK);
175
176 dma_domain->data[i][j].task = pipe_task;
177 dma_domain->channel_mask[i][core] |= BIT(j);
178
179 goto out;
180 }
181 }
182
183 out:
184 return ret;
185 }
186
187 /**
188 * \brief Unregisters and disables selected DMA interrupt.
189 * \param[in,out] data data Pointer to DMA domain data.
190 */
dma_multi_chan_domain_irq_unregister(struct dma_domain_data * data)191 static void dma_multi_chan_domain_irq_unregister(struct dma_domain_data *data)
192 {
193 tr_info(&ll_tr, "dma_multi_chan_domain_irq_unregister()");
194
195 interrupt_disable(data->irq, data);
196
197 interrupt_unregister(data->irq, data);
198 }
199
200 /**
201 * \brief Unregisters task from DMA domain.
202 * \param[in,out] domain Pointer to schedule domain.
203 * \param[in,out] task Task to be unregistered from the domain..
204 * \param[in] num_tasks Number of currently scheduled tasks.
205 * \return Error code.
206 */
dma_multi_chan_domain_unregister(struct ll_schedule_domain * domain,struct task * task,uint32_t num_tasks)207 static int dma_multi_chan_domain_unregister(struct ll_schedule_domain *domain,
208 struct task *task,
209 uint32_t num_tasks)
210 {
211 struct dma_domain *dma_domain = ll_sch_domain_get_pdata(domain);
212 struct pipeline_task *pipe_task = pipeline_task_get(task);
213 struct dma *dmas = dma_domain->dma_array;
214 int core = cpu_get_id();
215 int i;
216 int j;
217
218 tr_info(&ll_tr, "dma_multi_chan_domain_unregister()");
219
220 /* check if task should be unregistered */
221 if (!task || !pipe_task->registrable)
222 return 0;
223
224 for (i = 0; i < dma_domain->num_dma; ++i) {
225 for (j = 0; j < dmas[i].plat_data.channels; ++j) {
226 /* channel not set as scheduling source */
227 if (!dma_is_scheduling_source(&dmas[i].chan[j]))
228 continue;
229
230 /* channel still running */
231 if (dmas[i].chan[j].status == COMP_STATE_ACTIVE)
232 continue;
233
234 /* channel owned by different core */
235 if (core != dmas[i].chan[j].core)
236 continue;
237
238 /* channel hasn't been running */
239 if (!(dma_domain->channel_mask[i][core] & BIT(j)))
240 continue;
241
242 dma_interrupt_legacy(&dmas[i].chan[j], DMA_IRQ_MASK);
243 dma_interrupt_legacy(&dmas[i].chan[j], DMA_IRQ_CLEAR);
244 interrupt_clear_mask(dma_domain->data[i][j].irq,
245 BIT(j));
246
247 dma_domain->data[i][j].task = NULL;
248 dma_domain->channel_mask[i][core] &= ~BIT(j);
249
250 /* unregister interrupt */
251 if (!dma_domain->aggregated_irq)
252 dma_multi_chan_domain_irq_unregister(
253 &dma_domain->data[i][j]);
254 else if (!dma_domain->channel_mask[i][core])
255 dma_multi_chan_domain_irq_unregister(
256 dma_domain->arg[i][core]);
257 return 0;
258 }
259 }
260
261 /* task in running or unregistered at all, can't unregister it */
262 return -EINVAL;
263 }
264
265 /**
266 * \brief Checks if given task should be executed.
267 * \param[in,out] domain Pointer to schedule domain.
268 * \param[in,out] task Task to be checked.
269 * \return True is task should be executed, false otherwise.
270 */
dma_multi_chan_domain_is_pending(struct ll_schedule_domain * domain,struct task * task,struct comp_dev ** comp)271 static bool dma_multi_chan_domain_is_pending(struct ll_schedule_domain *domain,
272 struct task *task, struct comp_dev **comp)
273 {
274 struct dma_domain *dma_domain = ll_sch_domain_get_pdata(domain);
275 struct pipeline_task *pipe_task = pipeline_task_get(task);
276 struct dma *dmas = dma_domain->dma_array;
277 struct ll_task_pdata *pdata;
278 uint32_t status;
279 int i;
280 int j;
281
282 for (i = 0; i < dma_domain->num_dma; ++i) {
283 for (j = 0; j < dmas[i].plat_data.channels; ++j) {
284 if (!*comp) {
285 status = dma_interrupt_legacy(&dmas[i].chan[j],
286 DMA_IRQ_STATUS_GET);
287 if (!status)
288 continue;
289
290 *comp = dma_domain->data[i][j].task->sched_comp;
291 } else if (!dma_domain->data[i][j].task ||
292 dma_domain->data[i][j].task->sched_comp != *comp) {
293 continue;
294 }
295
296 /* not the same scheduling component */
297 if (dma_domain->data[i][j].task->sched_comp !=
298 pipe_task->sched_comp)
299 continue;
300
301 /* Schedule task based on the frequency they
302 * were configured with, not time (task.start)
303 *
304 * There are cases when a DMA transfer from a DAI
305 * is finished earlier than task.start and,
306 * without full_sync mode, this task will not
307 * be scheduled
308 */
309 if (domain->full_sync) {
310 pdata = ll_sch_get_pdata(&pipe_task->task);
311 pdata->skip_cnt++;
312 if (pdata->skip_cnt == pdata->ratio)
313 pdata->skip_cnt = 0;
314
315 if (pdata->skip_cnt != 0)
316 continue;
317 } else {
318 /* it's too soon for this task */
319 if (!pipe_task->registrable &&
320 pipe_task->task.start >
321 sof_cycle_get_64_atomic())
322 continue;
323 }
324
325 notifier_event(&dmas[i].chan[j], NOTIFIER_ID_DMA_IRQ,
326 NOTIFIER_TARGET_CORE_LOCAL,
327 &dmas[i].chan[j],
328 sizeof(struct dma_chan_data));
329
330 /* clear interrupt */
331 if (pipe_task->registrable) {
332 dma_interrupt_legacy(&dmas[i].chan[j], DMA_IRQ_CLEAR);
333 interrupt_clear_mask(dma_domain->data[i][j].irq,
334 BIT(j));
335 }
336
337 return true;
338 }
339 }
340
341 return false;
342 }
343
344 /**
345 * \brief Initializes DMA multichannel scheduling domain.
346 * \param[in,out] dma_array Array of DMAs to be scheduled on.
347 * \param[in] num_dma Number of DMAs passed as dma_array.
348 * \param[in] clk Platform clock to base calculations on.
349 * \param[in] aggregated_irq True if all DMAs share the same interrupt line.
350 * \return Pointer to initialized scheduling domain object.
351 */
dma_multi_chan_domain_init(struct dma * dma_array,uint32_t num_dma,int clk,bool aggregated_irq)352 struct ll_schedule_domain *dma_multi_chan_domain_init(struct dma *dma_array,
353 uint32_t num_dma, int clk,
354 bool aggregated_irq)
355 {
356 struct ll_schedule_domain *domain;
357 struct dma_domain *dma_domain;
358 struct dma *dma;
359 int i;
360 int j;
361
362 tr_info(&ll_tr, "dma_multi_chan_domain_init(): num_dma %d, clk %d, aggregated_irq %d",
363 num_dma, clk, aggregated_irq);
364
365 domain = domain_init(SOF_SCHEDULE_LL_DMA, clk, true,
366 &dma_multi_chan_domain_ops);
367
368 dma_domain = rzalloc(SOF_MEM_ZONE_SYS_SHARED, 0, SOF_MEM_CAPS_RAM, sizeof(*dma_domain));
369 dma_domain->dma_array = dma_array;
370 dma_domain->num_dma = num_dma;
371 dma_domain->aggregated_irq = aggregated_irq;
372
373 /* retrieve IRQ numbers for each DMA channel */
374 for (i = 0; i < num_dma; ++i) {
375 dma = &dma_array[i];
376 for (j = 0; j < dma->plat_data.channels; ++j)
377 dma_domain->data[i][j].irq = interrupt_get_irq(
378 dma_chan_irq(dma, j),
379 dma_chan_irq_name(dma, j));
380 }
381
382 ll_sch_domain_set_pdata(domain, dma_domain);
383
384 return domain;
385 }
386
387 const struct ll_schedule_domain_ops dma_multi_chan_domain_ops = {
388 .domain_register = dma_multi_chan_domain_register,
389 .domain_unregister = dma_multi_chan_domain_unregister,
390 .domain_is_pending = dma_multi_chan_domain_is_pending,
391 .domain_set = NULL,
392 .domain_enable = NULL,
393 .domain_disable = NULL,
394 .domain_clear = NULL,
395 };
396