1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Renesas R-Car Audio DMAC support
4 //
5 // Copyright (C) 2015 Renesas Electronics Corp.
6 // Copyright (c) 2015 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
7
8 #include <linux/delay.h>
9 #include <linux/of_dma.h>
10 #include "rsnd.h"
11
12 /*
13 * Audio DMAC peri peri register
14 */
15 #define PDMASAR 0x00
16 #define PDMADAR 0x04
17 #define PDMACHCR 0x0c
18
19 /* PDMACHCR */
20 #define PDMACHCR_DE (1 << 0)
21
22
23 struct rsnd_dmaen {
24 struct dma_chan *chan;
25 dma_cookie_t cookie;
26 unsigned int dma_len;
27 };
28
29 struct rsnd_dmapp {
30 int dmapp_id;
31 u32 chcr;
32 };
33
34 struct rsnd_dma {
35 struct rsnd_mod mod;
36 struct rsnd_mod *mod_from;
37 struct rsnd_mod *mod_to;
38 dma_addr_t src_addr;
39 dma_addr_t dst_addr;
40 union {
41 struct rsnd_dmaen en;
42 struct rsnd_dmapp pp;
43 } dma;
44 };
45
46 struct rsnd_dma_ctrl {
47 void __iomem *base;
48 int dmaen_num;
49 int dmapp_num;
50 };
51
52 #define rsnd_priv_to_dmac(p) ((struct rsnd_dma_ctrl *)(p)->dma)
53 #define rsnd_mod_to_dma(_mod) container_of((_mod), struct rsnd_dma, mod)
54 #define rsnd_dma_to_dmaen(dma) (&(dma)->dma.en)
55 #define rsnd_dma_to_dmapp(dma) (&(dma)->dma.pp)
56
57 /* for DEBUG */
58 static struct rsnd_mod_ops mem_ops = {
59 .name = "mem",
60 };
61
62 static struct rsnd_mod mem = {
63 };
64
65 /*
66 * Audio DMAC
67 */
__rsnd_dmaen_complete(struct rsnd_mod * mod,struct rsnd_dai_stream * io)68 static void __rsnd_dmaen_complete(struct rsnd_mod *mod,
69 struct rsnd_dai_stream *io)
70 {
71 if (rsnd_io_is_working(io))
72 rsnd_dai_period_elapsed(io);
73 }
74
rsnd_dmaen_complete(void * data)75 static void rsnd_dmaen_complete(void *data)
76 {
77 struct rsnd_mod *mod = data;
78
79 rsnd_mod_interrupt(mod, __rsnd_dmaen_complete);
80 }
81
rsnd_dmaen_request_channel(struct rsnd_dai_stream * io,struct rsnd_mod * mod_from,struct rsnd_mod * mod_to)82 static struct dma_chan *rsnd_dmaen_request_channel(struct rsnd_dai_stream *io,
83 struct rsnd_mod *mod_from,
84 struct rsnd_mod *mod_to)
85 {
86 if ((!mod_from && !mod_to) ||
87 (mod_from && mod_to))
88 return NULL;
89
90 if (mod_from)
91 return rsnd_mod_dma_req(io, mod_from);
92 else
93 return rsnd_mod_dma_req(io, mod_to);
94 }
95
rsnd_dmaen_stop(struct rsnd_mod * mod,struct rsnd_dai_stream * io,struct rsnd_priv * priv)96 static int rsnd_dmaen_stop(struct rsnd_mod *mod,
97 struct rsnd_dai_stream *io,
98 struct rsnd_priv *priv)
99 {
100 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
101 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
102
103 if (dmaen->chan)
104 dmaengine_terminate_all(dmaen->chan);
105
106 return 0;
107 }
108
rsnd_dmaen_nolock_stop(struct rsnd_mod * mod,struct rsnd_dai_stream * io,struct rsnd_priv * priv)109 static int rsnd_dmaen_nolock_stop(struct rsnd_mod *mod,
110 struct rsnd_dai_stream *io,
111 struct rsnd_priv *priv)
112 {
113 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
114 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
115
116 /*
117 * DMAEngine release uses mutex lock.
118 * Thus, it shouldn't be called under spinlock.
119 * Let's call it under nolock_start
120 */
121 if (dmaen->chan)
122 dma_release_channel(dmaen->chan);
123
124 dmaen->chan = NULL;
125
126 return 0;
127 }
128
rsnd_dmaen_nolock_start(struct rsnd_mod * mod,struct rsnd_dai_stream * io,struct rsnd_priv * priv)129 static int rsnd_dmaen_nolock_start(struct rsnd_mod *mod,
130 struct rsnd_dai_stream *io,
131 struct rsnd_priv *priv)
132 {
133 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
134 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
135 struct device *dev = rsnd_priv_to_dev(priv);
136
137 if (dmaen->chan) {
138 dev_err(dev, "it already has dma channel\n");
139 return -EIO;
140 }
141
142 /*
143 * DMAEngine request uses mutex lock.
144 * Thus, it shouldn't be called under spinlock.
145 * Let's call it under nolock_start
146 */
147 dmaen->chan = rsnd_dmaen_request_channel(io,
148 dma->mod_from,
149 dma->mod_to);
150 if (IS_ERR_OR_NULL(dmaen->chan)) {
151 dmaen->chan = NULL;
152 dev_err(dev, "can't get dma channel\n");
153 return -EIO;
154 }
155
156 return 0;
157 }
158
rsnd_dmaen_start(struct rsnd_mod * mod,struct rsnd_dai_stream * io,struct rsnd_priv * priv)159 static int rsnd_dmaen_start(struct rsnd_mod *mod,
160 struct rsnd_dai_stream *io,
161 struct rsnd_priv *priv)
162 {
163 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
164 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
165 struct snd_pcm_substream *substream = io->substream;
166 struct device *dev = rsnd_priv_to_dev(priv);
167 struct dma_async_tx_descriptor *desc;
168 struct dma_slave_config cfg = {};
169 int is_play = rsnd_io_is_play(io);
170 int ret;
171
172 cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
173 cfg.src_addr = dma->src_addr;
174 cfg.dst_addr = dma->dst_addr;
175 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
176 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
177
178 dev_dbg(dev, "%s[%d] %pad -> %pad\n",
179 rsnd_mod_name(mod), rsnd_mod_id(mod),
180 &cfg.src_addr, &cfg.dst_addr);
181
182 ret = dmaengine_slave_config(dmaen->chan, &cfg);
183 if (ret < 0)
184 return ret;
185
186 desc = dmaengine_prep_dma_cyclic(dmaen->chan,
187 substream->runtime->dma_addr,
188 snd_pcm_lib_buffer_bytes(substream),
189 snd_pcm_lib_period_bytes(substream),
190 is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
191 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
192
193 if (!desc) {
194 dev_err(dev, "dmaengine_prep_slave_sg() fail\n");
195 return -EIO;
196 }
197
198 desc->callback = rsnd_dmaen_complete;
199 desc->callback_param = rsnd_mod_get(dma);
200
201 dmaen->dma_len = snd_pcm_lib_buffer_bytes(substream);
202
203 dmaen->cookie = dmaengine_submit(desc);
204 if (dmaen->cookie < 0) {
205 dev_err(dev, "dmaengine_submit() fail\n");
206 return -EIO;
207 }
208
209 dma_async_issue_pending(dmaen->chan);
210
211 return 0;
212 }
213
rsnd_dma_request_channel(struct device_node * of_node,struct rsnd_mod * mod,char * name)214 struct dma_chan *rsnd_dma_request_channel(struct device_node *of_node,
215 struct rsnd_mod *mod, char *name)
216 {
217 struct dma_chan *chan = NULL;
218 struct device_node *np;
219 int i = 0;
220
221 for_each_child_of_node(of_node, np) {
222 if (i == rsnd_mod_id(mod) && (!chan))
223 chan = of_dma_request_slave_channel(np, name);
224 i++;
225 }
226
227 /* It should call of_node_put(), since, it is rsnd_xxx_of_node() */
228 of_node_put(of_node);
229
230 return chan;
231 }
232
rsnd_dmaen_attach(struct rsnd_dai_stream * io,struct rsnd_dma * dma,struct rsnd_mod * mod_from,struct rsnd_mod * mod_to)233 static int rsnd_dmaen_attach(struct rsnd_dai_stream *io,
234 struct rsnd_dma *dma,
235 struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
236 {
237 struct rsnd_priv *priv = rsnd_io_to_priv(io);
238 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
239 struct dma_chan *chan;
240
241 /* try to get DMAEngine channel */
242 chan = rsnd_dmaen_request_channel(io, mod_from, mod_to);
243 if (IS_ERR_OR_NULL(chan)) {
244 /* Let's follow when -EPROBE_DEFER case */
245 if (PTR_ERR(chan) == -EPROBE_DEFER)
246 return PTR_ERR(chan);
247
248 /*
249 * DMA failed. try to PIO mode
250 * see
251 * rsnd_ssi_fallback()
252 * rsnd_rdai_continuance_probe()
253 */
254 return -EAGAIN;
255 }
256
257 /*
258 * use it for IPMMU if needed
259 * see
260 * rsnd_preallocate_pages()
261 */
262 io->dmac_dev = chan->device->dev;
263
264 dma_release_channel(chan);
265
266 dmac->dmaen_num++;
267
268 return 0;
269 }
270
rsnd_dmaen_pointer(struct rsnd_mod * mod,struct rsnd_dai_stream * io,snd_pcm_uframes_t * pointer)271 static int rsnd_dmaen_pointer(struct rsnd_mod *mod,
272 struct rsnd_dai_stream *io,
273 snd_pcm_uframes_t *pointer)
274 {
275 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
276 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
277 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
278 struct dma_tx_state state;
279 enum dma_status status;
280 unsigned int pos = 0;
281
282 status = dmaengine_tx_status(dmaen->chan, dmaen->cookie, &state);
283 if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) {
284 if (state.residue > 0 && state.residue <= dmaen->dma_len)
285 pos = dmaen->dma_len - state.residue;
286 }
287 *pointer = bytes_to_frames(runtime, pos);
288
289 return 0;
290 }
291
292 static struct rsnd_mod_ops rsnd_dmaen_ops = {
293 .name = "audmac",
294 .nolock_start = rsnd_dmaen_nolock_start,
295 .nolock_stop = rsnd_dmaen_nolock_stop,
296 .start = rsnd_dmaen_start,
297 .stop = rsnd_dmaen_stop,
298 .pointer= rsnd_dmaen_pointer,
299 };
300
301 /*
302 * Audio DMAC peri peri
303 */
304 static const u8 gen2_id_table_ssiu[] = {
305 0x00, /* SSI00 */
306 0x04, /* SSI10 */
307 0x08, /* SSI20 */
308 0x0c, /* SSI3 */
309 0x0d, /* SSI4 */
310 0x0e, /* SSI5 */
311 0x0f, /* SSI6 */
312 0x10, /* SSI7 */
313 0x11, /* SSI8 */
314 0x12, /* SSI90 */
315 };
316 static const u8 gen2_id_table_scu[] = {
317 0x2d, /* SCU_SRCI0 */
318 0x2e, /* SCU_SRCI1 */
319 0x2f, /* SCU_SRCI2 */
320 0x30, /* SCU_SRCI3 */
321 0x31, /* SCU_SRCI4 */
322 0x32, /* SCU_SRCI5 */
323 0x33, /* SCU_SRCI6 */
324 0x34, /* SCU_SRCI7 */
325 0x35, /* SCU_SRCI8 */
326 0x36, /* SCU_SRCI9 */
327 };
328 static const u8 gen2_id_table_cmd[] = {
329 0x37, /* SCU_CMD0 */
330 0x38, /* SCU_CMD1 */
331 };
332
rsnd_dmapp_get_id(struct rsnd_dai_stream * io,struct rsnd_mod * mod)333 static u32 rsnd_dmapp_get_id(struct rsnd_dai_stream *io,
334 struct rsnd_mod *mod)
335 {
336 struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
337 struct rsnd_mod *src = rsnd_io_to_mod_src(io);
338 struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
339 const u8 *entry = NULL;
340 int id = rsnd_mod_id(mod);
341 int size = 0;
342
343 if (mod == ssi) {
344 entry = gen2_id_table_ssiu;
345 size = ARRAY_SIZE(gen2_id_table_ssiu);
346 } else if (mod == src) {
347 entry = gen2_id_table_scu;
348 size = ARRAY_SIZE(gen2_id_table_scu);
349 } else if (mod == dvc) {
350 entry = gen2_id_table_cmd;
351 size = ARRAY_SIZE(gen2_id_table_cmd);
352 }
353
354 if ((!entry) || (size <= id)) {
355 struct device *dev = rsnd_priv_to_dev(rsnd_io_to_priv(io));
356
357 dev_err(dev, "unknown connection (%s[%d])\n",
358 rsnd_mod_name(mod), rsnd_mod_id(mod));
359
360 /* use non-prohibited SRS number as error */
361 return 0x00; /* SSI00 */
362 }
363
364 return entry[id];
365 }
366
rsnd_dmapp_get_chcr(struct rsnd_dai_stream * io,struct rsnd_mod * mod_from,struct rsnd_mod * mod_to)367 static u32 rsnd_dmapp_get_chcr(struct rsnd_dai_stream *io,
368 struct rsnd_mod *mod_from,
369 struct rsnd_mod *mod_to)
370 {
371 return (rsnd_dmapp_get_id(io, mod_from) << 24) +
372 (rsnd_dmapp_get_id(io, mod_to) << 16);
373 }
374
375 #define rsnd_dmapp_addr(dmac, dma, reg) \
376 (dmac->base + 0x20 + reg + \
377 (0x10 * rsnd_dma_to_dmapp(dma)->dmapp_id))
rsnd_dmapp_write(struct rsnd_dma * dma,u32 data,u32 reg)378 static void rsnd_dmapp_write(struct rsnd_dma *dma, u32 data, u32 reg)
379 {
380 struct rsnd_mod *mod = rsnd_mod_get(dma);
381 struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
382 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
383 struct device *dev = rsnd_priv_to_dev(priv);
384
385 dev_dbg(dev, "w %p : %08x\n", rsnd_dmapp_addr(dmac, dma, reg), data);
386
387 iowrite32(data, rsnd_dmapp_addr(dmac, dma, reg));
388 }
389
rsnd_dmapp_read(struct rsnd_dma * dma,u32 reg)390 static u32 rsnd_dmapp_read(struct rsnd_dma *dma, u32 reg)
391 {
392 struct rsnd_mod *mod = rsnd_mod_get(dma);
393 struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
394 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
395
396 return ioread32(rsnd_dmapp_addr(dmac, dma, reg));
397 }
398
rsnd_dmapp_bset(struct rsnd_dma * dma,u32 data,u32 mask,u32 reg)399 static void rsnd_dmapp_bset(struct rsnd_dma *dma, u32 data, u32 mask, u32 reg)
400 {
401 struct rsnd_mod *mod = rsnd_mod_get(dma);
402 struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
403 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
404 void __iomem *addr = rsnd_dmapp_addr(dmac, dma, reg);
405 u32 val = ioread32(addr);
406
407 val &= ~mask;
408 val |= (data & mask);
409
410 iowrite32(val, addr);
411 }
412
rsnd_dmapp_stop(struct rsnd_mod * mod,struct rsnd_dai_stream * io,struct rsnd_priv * priv)413 static int rsnd_dmapp_stop(struct rsnd_mod *mod,
414 struct rsnd_dai_stream *io,
415 struct rsnd_priv *priv)
416 {
417 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
418 int i;
419
420 rsnd_dmapp_bset(dma, 0, PDMACHCR_DE, PDMACHCR);
421
422 for (i = 0; i < 1024; i++) {
423 if (0 == (rsnd_dmapp_read(dma, PDMACHCR) & PDMACHCR_DE))
424 return 0;
425 udelay(1);
426 }
427
428 return -EIO;
429 }
430
rsnd_dmapp_start(struct rsnd_mod * mod,struct rsnd_dai_stream * io,struct rsnd_priv * priv)431 static int rsnd_dmapp_start(struct rsnd_mod *mod,
432 struct rsnd_dai_stream *io,
433 struct rsnd_priv *priv)
434 {
435 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
436 struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
437
438 rsnd_dmapp_write(dma, dma->src_addr, PDMASAR);
439 rsnd_dmapp_write(dma, dma->dst_addr, PDMADAR);
440 rsnd_dmapp_write(dma, dmapp->chcr, PDMACHCR);
441
442 return 0;
443 }
444
rsnd_dmapp_attach(struct rsnd_dai_stream * io,struct rsnd_dma * dma,struct rsnd_mod * mod_from,struct rsnd_mod * mod_to)445 static int rsnd_dmapp_attach(struct rsnd_dai_stream *io,
446 struct rsnd_dma *dma,
447 struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
448 {
449 struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
450 struct rsnd_priv *priv = rsnd_io_to_priv(io);
451 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
452 struct device *dev = rsnd_priv_to_dev(priv);
453
454 dmapp->dmapp_id = dmac->dmapp_num;
455 dmapp->chcr = rsnd_dmapp_get_chcr(io, mod_from, mod_to) | PDMACHCR_DE;
456
457 dmac->dmapp_num++;
458
459 dev_dbg(dev, "id/src/dst/chcr = %d/%pad/%pad/%08x\n",
460 dmapp->dmapp_id, &dma->src_addr, &dma->dst_addr, dmapp->chcr);
461
462 return 0;
463 }
464
465 static struct rsnd_mod_ops rsnd_dmapp_ops = {
466 .name = "audmac-pp",
467 .start = rsnd_dmapp_start,
468 .stop = rsnd_dmapp_stop,
469 .quit = rsnd_dmapp_stop,
470 };
471
472 /*
473 * Common DMAC Interface
474 */
475
476 /*
477 * DMA read/write register offset
478 *
479 * RSND_xxx_I_N for Audio DMAC input
480 * RSND_xxx_O_N for Audio DMAC output
481 * RSND_xxx_I_P for Audio DMAC peri peri input
482 * RSND_xxx_O_P for Audio DMAC peri peri output
483 *
484 * ex) R-Car H2 case
485 * mod / DMAC in / DMAC out / DMAC PP in / DMAC pp out
486 * SSI : 0xec541000 / 0xec241008 / 0xec24100c
487 * SSIU: 0xec541000 / 0xec100000 / 0xec100000 / 0xec400000 / 0xec400000
488 * SCU : 0xec500000 / 0xec000000 / 0xec004000 / 0xec300000 / 0xec304000
489 * CMD : 0xec500000 / / 0xec008000 0xec308000
490 */
491 #define RDMA_SSI_I_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0x8)
492 #define RDMA_SSI_O_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0xc)
493
494 #define RDMA_SSIU_I_N(addr, i) (addr ##_reg - 0x00441000 + (0x1000 * i))
495 #define RDMA_SSIU_O_N(addr, i) (addr ##_reg - 0x00441000 + (0x1000 * i))
496
497 #define RDMA_SSIU_I_P(addr, i) (addr ##_reg - 0x00141000 + (0x1000 * i))
498 #define RDMA_SSIU_O_P(addr, i) (addr ##_reg - 0x00141000 + (0x1000 * i))
499
500 #define RDMA_SRC_I_N(addr, i) (addr ##_reg - 0x00500000 + (0x400 * i))
501 #define RDMA_SRC_O_N(addr, i) (addr ##_reg - 0x004fc000 + (0x400 * i))
502
503 #define RDMA_SRC_I_P(addr, i) (addr ##_reg - 0x00200000 + (0x400 * i))
504 #define RDMA_SRC_O_P(addr, i) (addr ##_reg - 0x001fc000 + (0x400 * i))
505
506 #define RDMA_CMD_O_N(addr, i) (addr ##_reg - 0x004f8000 + (0x400 * i))
507 #define RDMA_CMD_O_P(addr, i) (addr ##_reg - 0x001f8000 + (0x400 * i))
508
509 static dma_addr_t
rsnd_gen2_dma_addr(struct rsnd_dai_stream * io,struct rsnd_mod * mod,int is_play,int is_from)510 rsnd_gen2_dma_addr(struct rsnd_dai_stream *io,
511 struct rsnd_mod *mod,
512 int is_play, int is_from)
513 {
514 struct rsnd_priv *priv = rsnd_io_to_priv(io);
515 struct device *dev = rsnd_priv_to_dev(priv);
516 phys_addr_t ssi_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SSI);
517 phys_addr_t src_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SCU);
518 int is_ssi = !!(rsnd_io_to_mod_ssi(io) == mod);
519 int use_src = !!rsnd_io_to_mod_src(io);
520 int use_cmd = !!rsnd_io_to_mod_dvc(io) ||
521 !!rsnd_io_to_mod_mix(io) ||
522 !!rsnd_io_to_mod_ctu(io);
523 int id = rsnd_mod_id(mod);
524 struct dma_addr {
525 dma_addr_t out_addr;
526 dma_addr_t in_addr;
527 } dma_addrs[3][2][3] = {
528 /* SRC */
529 /* Capture */
530 {{{ 0, 0 },
531 { RDMA_SRC_O_N(src, id), RDMA_SRC_I_P(src, id) },
532 { RDMA_CMD_O_N(src, id), RDMA_SRC_I_P(src, id) } },
533 /* Playback */
534 {{ 0, 0, },
535 { RDMA_SRC_O_P(src, id), RDMA_SRC_I_N(src, id) },
536 { RDMA_CMD_O_P(src, id), RDMA_SRC_I_N(src, id) } }
537 },
538 /* SSI */
539 /* Capture */
540 {{{ RDMA_SSI_O_N(ssi, id), 0 },
541 { RDMA_SSIU_O_P(ssi, id), 0 },
542 { RDMA_SSIU_O_P(ssi, id), 0 } },
543 /* Playback */
544 {{ 0, RDMA_SSI_I_N(ssi, id) },
545 { 0, RDMA_SSIU_I_P(ssi, id) },
546 { 0, RDMA_SSIU_I_P(ssi, id) } }
547 },
548 /* SSIU */
549 /* Capture */
550 {{{ RDMA_SSIU_O_N(ssi, id), 0 },
551 { RDMA_SSIU_O_P(ssi, id), 0 },
552 { RDMA_SSIU_O_P(ssi, id), 0 } },
553 /* Playback */
554 {{ 0, RDMA_SSIU_I_N(ssi, id) },
555 { 0, RDMA_SSIU_I_P(ssi, id) },
556 { 0, RDMA_SSIU_I_P(ssi, id) } } },
557 };
558
559 /* it shouldn't happen */
560 if (use_cmd && !use_src)
561 dev_err(dev, "DVC is selected without SRC\n");
562
563 /* use SSIU or SSI ? */
564 if (is_ssi && rsnd_ssi_use_busif(io))
565 is_ssi++;
566
567 return (is_from) ?
568 dma_addrs[is_ssi][is_play][use_src + use_cmd].out_addr :
569 dma_addrs[is_ssi][is_play][use_src + use_cmd].in_addr;
570 }
571
rsnd_dma_addr(struct rsnd_dai_stream * io,struct rsnd_mod * mod,int is_play,int is_from)572 static dma_addr_t rsnd_dma_addr(struct rsnd_dai_stream *io,
573 struct rsnd_mod *mod,
574 int is_play, int is_from)
575 {
576 struct rsnd_priv *priv = rsnd_io_to_priv(io);
577
578 /*
579 * gen1 uses default DMA addr
580 */
581 if (rsnd_is_gen1(priv))
582 return 0;
583
584 if (!mod)
585 return 0;
586
587 return rsnd_gen2_dma_addr(io, mod, is_play, is_from);
588 }
589
590 #define MOD_MAX (RSND_MOD_MAX + 1) /* +Memory */
rsnd_dma_of_path(struct rsnd_mod * this,struct rsnd_dai_stream * io,int is_play,struct rsnd_mod ** mod_from,struct rsnd_mod ** mod_to)591 static void rsnd_dma_of_path(struct rsnd_mod *this,
592 struct rsnd_dai_stream *io,
593 int is_play,
594 struct rsnd_mod **mod_from,
595 struct rsnd_mod **mod_to)
596 {
597 struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
598 struct rsnd_mod *src = rsnd_io_to_mod_src(io);
599 struct rsnd_mod *ctu = rsnd_io_to_mod_ctu(io);
600 struct rsnd_mod *mix = rsnd_io_to_mod_mix(io);
601 struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
602 struct rsnd_mod *mod[MOD_MAX];
603 struct rsnd_mod *mod_start, *mod_end;
604 struct rsnd_priv *priv = rsnd_mod_to_priv(this);
605 struct device *dev = rsnd_priv_to_dev(priv);
606 int nr, i, idx;
607
608 if (!ssi)
609 return;
610
611 nr = 0;
612 for (i = 0; i < MOD_MAX; i++) {
613 mod[i] = NULL;
614 nr += !!rsnd_io_to_mod(io, i);
615 }
616
617 /*
618 * [S] -*-> [E]
619 * [S] -*-> SRC -o-> [E]
620 * [S] -*-> SRC -> DVC -o-> [E]
621 * [S] -*-> SRC -> CTU -> MIX -> DVC -o-> [E]
622 *
623 * playback [S] = mem
624 * [E] = SSI
625 *
626 * capture [S] = SSI
627 * [E] = mem
628 *
629 * -*-> Audio DMAC
630 * -o-> Audio DMAC peri peri
631 */
632 mod_start = (is_play) ? NULL : ssi;
633 mod_end = (is_play) ? ssi : NULL;
634
635 idx = 0;
636 mod[idx++] = mod_start;
637 for (i = 1; i < nr; i++) {
638 if (src) {
639 mod[idx++] = src;
640 src = NULL;
641 } else if (ctu) {
642 mod[idx++] = ctu;
643 ctu = NULL;
644 } else if (mix) {
645 mod[idx++] = mix;
646 mix = NULL;
647 } else if (dvc) {
648 mod[idx++] = dvc;
649 dvc = NULL;
650 }
651 }
652 mod[idx] = mod_end;
653
654 /*
655 * | SSI | SRC |
656 * -------------+-----+-----+
657 * is_play | o | * |
658 * !is_play | * | o |
659 */
660 if ((this == ssi) == (is_play)) {
661 *mod_from = mod[idx - 1];
662 *mod_to = mod[idx];
663 } else {
664 *mod_from = mod[0];
665 *mod_to = mod[1];
666 }
667
668 dev_dbg(dev, "module connection (this is %s[%d])\n",
669 rsnd_mod_name(this), rsnd_mod_id(this));
670 for (i = 0; i <= idx; i++) {
671 dev_dbg(dev, " %s[%d]%s\n",
672 rsnd_mod_name(mod[i] ? mod[i] : &mem),
673 rsnd_mod_id (mod[i] ? mod[i] : &mem),
674 (mod[i] == *mod_from) ? " from" :
675 (mod[i] == *mod_to) ? " to" : "");
676 }
677 }
678
rsnd_dma_alloc(struct rsnd_dai_stream * io,struct rsnd_mod * mod,struct rsnd_mod ** dma_mod)679 static int rsnd_dma_alloc(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
680 struct rsnd_mod **dma_mod)
681 {
682 struct rsnd_mod *mod_from = NULL;
683 struct rsnd_mod *mod_to = NULL;
684 struct rsnd_priv *priv = rsnd_io_to_priv(io);
685 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
686 struct device *dev = rsnd_priv_to_dev(priv);
687 struct rsnd_dma *dma;
688 struct rsnd_mod_ops *ops;
689 enum rsnd_mod_type type;
690 int (*attach)(struct rsnd_dai_stream *io, struct rsnd_dma *dma,
691 struct rsnd_mod *mod_from, struct rsnd_mod *mod_to);
692 int is_play = rsnd_io_is_play(io);
693 int ret, dma_id;
694
695 /*
696 * DMA failed. try to PIO mode
697 * see
698 * rsnd_ssi_fallback()
699 * rsnd_rdai_continuance_probe()
700 */
701 if (!dmac)
702 return -EAGAIN;
703
704 rsnd_dma_of_path(mod, io, is_play, &mod_from, &mod_to);
705
706 /* for Gen2 or later */
707 if (mod_from && mod_to) {
708 ops = &rsnd_dmapp_ops;
709 attach = rsnd_dmapp_attach;
710 dma_id = dmac->dmapp_num;
711 type = RSND_MOD_AUDMAPP;
712 } else {
713 ops = &rsnd_dmaen_ops;
714 attach = rsnd_dmaen_attach;
715 dma_id = dmac->dmaen_num;
716 type = RSND_MOD_AUDMA;
717 }
718
719 /* for Gen1, overwrite */
720 if (rsnd_is_gen1(priv)) {
721 ops = &rsnd_dmaen_ops;
722 attach = rsnd_dmaen_attach;
723 dma_id = dmac->dmaen_num;
724 type = RSND_MOD_AUDMA;
725 }
726
727 dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
728 if (!dma)
729 return -ENOMEM;
730
731 *dma_mod = rsnd_mod_get(dma);
732
733 ret = rsnd_mod_init(priv, *dma_mod, ops, NULL,
734 rsnd_mod_get_status, type, dma_id);
735 if (ret < 0)
736 return ret;
737
738 dev_dbg(dev, "%s[%d] %s[%d] -> %s[%d]\n",
739 rsnd_mod_name(*dma_mod), rsnd_mod_id(*dma_mod),
740 rsnd_mod_name(mod_from ? mod_from : &mem),
741 rsnd_mod_id (mod_from ? mod_from : &mem),
742 rsnd_mod_name(mod_to ? mod_to : &mem),
743 rsnd_mod_id (mod_to ? mod_to : &mem));
744
745 ret = attach(io, dma, mod_from, mod_to);
746 if (ret < 0)
747 return ret;
748
749 dma->src_addr = rsnd_dma_addr(io, mod_from, is_play, 1);
750 dma->dst_addr = rsnd_dma_addr(io, mod_to, is_play, 0);
751 dma->mod_from = mod_from;
752 dma->mod_to = mod_to;
753
754 return 0;
755 }
756
rsnd_dma_attach(struct rsnd_dai_stream * io,struct rsnd_mod * mod,struct rsnd_mod ** dma_mod)757 int rsnd_dma_attach(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
758 struct rsnd_mod **dma_mod)
759 {
760 if (!(*dma_mod)) {
761 int ret = rsnd_dma_alloc(io, mod, dma_mod);
762
763 if (ret < 0)
764 return ret;
765 }
766
767 return rsnd_dai_connect(*dma_mod, io, (*dma_mod)->type);
768 }
769
rsnd_dma_probe(struct rsnd_priv * priv)770 int rsnd_dma_probe(struct rsnd_priv *priv)
771 {
772 struct platform_device *pdev = rsnd_priv_to_pdev(priv);
773 struct device *dev = rsnd_priv_to_dev(priv);
774 struct rsnd_dma_ctrl *dmac;
775 struct resource *res;
776
777 /*
778 * for Gen1
779 */
780 if (rsnd_is_gen1(priv))
781 return 0;
782
783 /*
784 * for Gen2 or later
785 */
786 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "audmapp");
787 dmac = devm_kzalloc(dev, sizeof(*dmac), GFP_KERNEL);
788 if (!dmac || !res) {
789 dev_err(dev, "dma allocate failed\n");
790 return 0; /* it will be PIO mode */
791 }
792
793 dmac->dmapp_num = 0;
794 dmac->base = devm_ioremap_resource(dev, res);
795 if (IS_ERR(dmac->base))
796 return PTR_ERR(dmac->base);
797
798 priv->dma = dmac;
799
800 /* dummy mem mod for debug */
801 return rsnd_mod_init(NULL, &mem, &mem_ops, NULL, NULL, 0, 0);
802 }
803